summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt1
-rw-r--r--README.md7
-rw-r--r--TODO.md2
-rw-r--r--cloud-tenant-base-dependencies-enforcer/pom.xml2
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RPCCommunicator.java2
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java2
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java2
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/application/Xml.java10
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java7
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java11
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java5
-rw-r--r--config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java17
-rw-r--r--config-application-package/src/test/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistryTestCase.java12
-rw-r--r--config-model-api/abi-spec.json10
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java30
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java2
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java4
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java9
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java7
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java4
-rw-r--r--config-model-api/src/test/java/com/yahoo/config/application/api/ValidationOverrideTest.java15
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java18
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java33
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java7
-rw-r--r--config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java81
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/DistributableResource.java41
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java27
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/ImmutableSearch.java2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/LargeRankExpressions.java38
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionBody.java10
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFile.java35
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFiles.java47
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/Search.java8
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java21
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/derived/RawRankProfile.java31
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/AbstractService.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java20
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java13
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java8
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java10
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java10
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java12
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validator.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ChangeValidator.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidator.java45
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java44
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/ClusterResourceLimits.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/Content.java20
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/DispatchTuning.java11
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomResourceLimitsBuilder.java18
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java16
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java11
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/DispatchGroup.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java16
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java15
-rw-r--r--config-model/src/main/resources/schema/content.rnc6
-rw-r--r--config-model/src/test/derived/namecollision/collision.sd8
-rw-r--r--config-model/src/test/derived/namecollision/collisionstruct.sd15
-rw-r--r--config-model/src/test/derived/namecollision/documentmanager.cfg55
-rw-r--r--config-model/src/test/derived/rankexpression/rankexpression.sd4
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java2
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/MockModelContext.java5
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java5
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/RankProfileTestCase.java3
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/derived/NameCollisionTestCase.java20
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTransformerTokensTestCase.java2
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java3
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java1
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java49
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/ComplexAttributeFieldsValidatorTestCase.java9
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContentTypeRemovalValidatorTest.java8
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidatorTest.java64
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidatorTest.java64
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/ClusterResourceLimitsTest.java58
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java5
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java5
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java1
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java154
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java338
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java7
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/cluster/DomDispatchTuningBuilderTest.java3
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTestCase.java16
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java5
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/test/utils/ApplicationPackageUtils.java6
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/ApplicationLockException.java6
-rw-r--r--config-proxy/pom.xml6
-rw-r--r--config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java2
-rw-r--r--config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java6
-rw-r--r--config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java6
-rw-r--r--config-proxy/src/main/sh/vespa-config-loadtester.sh2
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java12
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java186
-rw-r--r--config/src/vespa/config/common/configvalue.cpp12
-rw-r--r--config/src/vespa/config/subscription/configsubscriptionset.cpp4
-rw-r--r--configd/src/apps/cmd/main.cpp98
-rw-r--r--configd/src/apps/sentinel/CMakeLists.txt3
-rw-r--r--configd/src/apps/sentinel/cc-result.h9
-rw-r--r--configd/src/apps/sentinel/config-owner.cpp31
-rw-r--r--configd/src/apps/sentinel/config-owner.h11
-rw-r--r--configd/src/apps/sentinel/connectivity.cpp213
-rw-r--r--configd/src/apps/sentinel/connectivity.h46
-rw-r--r--configd/src/apps/sentinel/env.cpp122
-rw-r--r--configd/src/apps/sentinel/env.h4
-rw-r--r--configd/src/apps/sentinel/manager.cpp3
-rw-r--r--configd/src/apps/sentinel/model-owner.cpp66
-rw-r--r--configd/src/apps/sentinel/model-owner.h32
-rw-r--r--configd/src/apps/sentinel/output-connection.cpp2
-rw-r--r--configd/src/apps/sentinel/outward-check.cpp23
-rw-r--r--configd/src/apps/sentinel/outward-check.h17
-rw-r--r--configd/src/apps/sentinel/peer-check.cpp12
-rw-r--r--configd/src/apps/sentinel/peer-check.h4
-rw-r--r--configd/src/apps/sentinel/report-connectivity.cpp53
-rw-r--r--configd/src/apps/sentinel/report-connectivity.h33
-rw-r--r--configd/src/apps/sentinel/rpchooks.cpp27
-rw-r--r--configd/src/apps/sentinel/rpchooks.h5
-rw-r--r--configd/src/apps/sentinel/rpcserver.cpp6
-rw-r--r--configd/src/apps/sentinel/rpcserver.h3
-rw-r--r--configd/src/apps/sentinel/sentinel.cpp24
-rw-r--r--configd/src/apps/sentinel/service.cpp2
-rw-r--r--configdefinitions/src/vespa/dispatch.def4
-rw-r--r--configdefinitions/src/vespa/sentinel.def11
-rw-r--r--configserver-client/src/test/java/ai/vespa/hosted/client/HttpConfigServerClientTest.java1
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java6
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java9
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java15
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java20
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployer.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/AddFileInterface.java3
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/ApplicationFileManager.java27
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java21
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionUtil.java6
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java5
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java17
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintainer.java19
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/FileDistributionMaintainer.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ReindexingMaintainer.java11
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/SessionsMaintainer.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/TenantsMaintainer.java6
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java10
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java13
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/rpc/GetConfigProcessor.java3
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java42
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java12
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/zookeeper/ZKApplicationPackage.java17
-rw-r--r--configserver/src/test/apps/app_sdbundles/services.xml2
-rw-r--r--configserver/src/test/apps/deprecated-features-app/hosts.xml7
-rw-r--r--configserver/src/test/apps/deprecated-features-app/searchdefinitions/music.sd50
-rw-r--r--configserver/src/test/apps/deprecated-features-app/services.xml38
-rw-r--r--configserver/src/test/apps/hosted-no-write-access-control/services.xml2
-rw-r--r--configserver/src/test/apps/hosted/services.xml2
-rw-r--r--configserver/src/test/apps/zkapp/services.xml2
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java4
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/deploy/DeployTester.java2
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClientTest.java2
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java9
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/MockFileRegistry.java12
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java5
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java22
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionRepositoryTest.java82
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/tenant/TenantRepositoryTest.java3
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/zookeeper/ZKApplicationPackageTest.java6
-rw-r--r--configserver/src/test/resources/deploy/advancedapp/services.xml2
-rw-r--r--configserver/src/test/resources/metrics/clustercontroller_metrics.json42
-rw-r--r--container-apache-http-client-bundle/CMakeLists.txt2
-rw-r--r--container-apache-http-client-bundle/README.md3
-rw-r--r--container-apache-http-client-bundle/pom.xml80
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/async/methods/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/async/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/classic/methods/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/classic/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/config/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/entity/mime/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/entity/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/async/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/classic/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/io/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/nio/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/io/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/nio/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/protocol/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/routing/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/socket/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/ssl/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/concurrent/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/config/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/io/entity/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/io/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/io/support/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/message/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/ssl/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/support/classic/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/support/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/protocol/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http2/config/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http2/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/net/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/reactor/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/util/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/annotation/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/auth/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/client/config/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/client/methods/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/client/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/client/protocol/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/client/utils/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/config/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/conn/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/conn/routing/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/conn/socket/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/conn/ssl/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/entity/mime/content/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/entity/mime/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/entity/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/impl/client/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/impl/conn/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/impl/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/message/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/params/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/pool/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/protocol/package-info.java8
-rw-r--r--container-apache-http-client-bundle/src/main/java/org/apache/http/util/package-info.java8
-rw-r--r--container-core/abi-spec.json38
-rw-r--r--container-core/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentGraph.java2
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerTestDriver.java4
-rw-r--r--container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java8
-rw-r--r--container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java10
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java11
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java5
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java11
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletResponseController.java4
-rw-r--r--container-core/src/main/resources/configdefinitions/jdisc.http.jdisc.http.connector.def4
-rw-r--r--container-core/src/test/java/com/yahoo/container/logging/JsonConnectionLogWriterTest.java3
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java81
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java54
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java15
-rw-r--r--container-disc/pom.xml8
-rw-r--r--container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java2
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/NearestNeighborItem.java2
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/searcher/FillSearcher.java1
-rw-r--r--container-search/src/main/java/com/yahoo/search/Searcher.java1
-rw-r--r--container-search/src/main/java/com/yahoo/search/cluster/BaseNodeMonitor.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/cluster/MonitorConfiguration.java5
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java5
-rw-r--r--container-search/src/main/java/com/yahoo/search/grouping/request/GroupingOperation.java7
-rw-r--r--container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java9
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/Presentation.java4
-rw-r--r--container-search/src/main/java/com/yahoo/search/searchchain/Execution.java1
-rw-r--r--container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java44
-rw-r--r--container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java1
-rw-r--r--container-search/src/test/java/com/yahoo/search/searchers/ValidateNearestNeighborTestCase.java26
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java7
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/DeploymentData.java9
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ServiceRegistry.java6
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AccessControlService.java18
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java57
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/MockAccessControlService.java34
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java23
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Cluster.java4
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java15
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonClient.java25
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonResponse.java36
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/MockHorizonClient.java48
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/package-info.java5
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterData.java3
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeHistory.java24
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/User.java31
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java9
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java6
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/RoleDefinition.java3
-rw-r--r--controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobTypeTest.java28
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java15
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java17
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/athenz/impl/AthenzFacade.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java31
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntry.java27
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntrySerializer.java63
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationMetaDataGarbageCollector.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java37
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainer.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdater.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainer.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventReporter.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirer.java80
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ContactInformationMaintainer.java10
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ContainerImageExpirer.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainer.java19
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CostReportMaintainer.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirer.java10
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java35
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/HostInfoUpdater.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/InfrastructureUpgrader.java14
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunner.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/NameServiceDispatcher.java7
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java20
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgrader.java19
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsVersionStatusUpdater.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployer.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ReadyJobsTrigger.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ReindexingTriggerer.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainer.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainer.java66
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemRoutingPolicyMaintainer.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainer.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdater.java17
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/Upgrader.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainer.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VersionStatusUpdater.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java7
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java65
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java44
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/AuditLogSerializer.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ControllerVersionSerializer.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java22
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/LogSerializer.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NameServiceQueueSerializer.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NodeVersionSerializer.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java28
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/OsVersionTargetSerializer.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java19
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/Serializers.java51
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializer.java69
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializer.java6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializer.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java22
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java38
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/AccessRequestResponse.java28
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiHandler.java60
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java121
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java112
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java23
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/support/access/SupportAccessControl.java19
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/certificate/EndpointCertificatesTest.java3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java31
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java14
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java28
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java11
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java18
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java8
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirerTest.java93
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainerTest.java16
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainerTest.java10
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java11
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java12
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java70
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java13
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java30
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java39
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializerTest.java36
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java37
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiTest.java24
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java59
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/application-roles.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/tenant-roles.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java54
-rw-r--r--controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.json37
-rw-r--r--controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.operator.json32
-rw-r--r--controller-server/src/test/resources/horizon/filter-in-execution-graph.json21
-rw-r--r--controller-server/src/test/resources/horizon/filters-complex.expected.json56
-rw-r--r--controller-server/src/test/resources/horizon/filters-complex.json46
-rw-r--r--controller-server/src/test/resources/horizon/filters-meta-query.expected.json39
-rw-r--r--controller-server/src/test/resources/horizon/filters-meta-query.json29
-rw-r--r--controller-server/src/test/resources/horizon/no-filters.expected.json32
-rw-r--r--controller-server/src/test/resources/horizon/no-filters.json16
-rw-r--r--default_build_settings.cmake10
-rw-r--r--dist/vespa.spec49
-rw-r--r--document/src/main/java/com/yahoo/document/StructDataType.java2
-rw-r--r--document/src/main/java/com/yahoo/document/StructuredDataType.java2
-rw-r--r--document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java3
-rwxr-xr-xdocumentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ExternPolicy.java2
-rw-r--r--documentgen-test/src/test/java/com/yahoo/vespa/config/DocumentGenPluginTest.java92
-rw-r--r--eval/CMakeLists.txt1
-rw-r--r--eval/src/apps/analyze_onnx_model/.gitignore1
-rw-r--r--eval/src/apps/analyze_onnx_model/CMakeLists.txt8
-rw-r--r--eval/src/apps/analyze_onnx_model/analyze_onnx_model.cpp208
-rw-r--r--eval/src/apps/tensor_conformance/generate.cpp28
-rw-r--r--eval/src/tests/eval/inline_operation/inline_operation_test.cpp1
-rw-r--r--eval/src/tests/eval/node_tools/node_tools_test.cpp1
-rw-r--r--eval/src/tests/eval/node_types/node_types_test.cpp1
-rw-r--r--eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp20
-rw-r--r--eval/src/vespa/eval/eval/aggr.h8
-rw-r--r--eval/src/vespa/eval/eval/array_array_map.h2
-rw-r--r--eval/src/vespa/eval/eval/call_nodes.cpp1
-rw-r--r--eval/src/vespa/eval/eval/call_nodes.h1
-rw-r--r--eval/src/vespa/eval/eval/extract_bit.h13
-rw-r--r--eval/src/vespa/eval/eval/key_gen.cpp1
-rw-r--r--eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp5
-rw-r--r--eval/src/vespa/eval/eval/llvm/llvm_wrapper.h1
-rw-r--r--eval/src/vespa/eval/eval/make_tensor_function.cpp3
-rw-r--r--eval/src/vespa/eval/eval/node_tools.cpp1
-rw-r--r--eval/src/vespa/eval/eval/node_types.cpp1
-rw-r--r--eval/src/vespa/eval/eval/node_visitor.h2
-rw-r--r--eval/src/vespa/eval/eval/operation.cpp3
-rw-r--r--eval/src/vespa/eval/eval/operation.h1
-rw-r--r--eval/src/vespa/eval/eval/test/eval_spec.cpp11
-rw-r--r--eval/src/vespa/eval/eval/test/reference_evaluation.cpp3
-rw-r--r--eval/src/vespa/eval/eval/visit_stuff.cpp1
-rw-r--r--eval/src/vespa/eval/onnx/CMakeLists.txt1
-rw-r--r--eval/src/vespa/eval/onnx/onnx_model_cache.cpp51
-rw-r--r--eval/src/vespa/eval/onnx/onnx_model_cache.h58
-rw-r--r--eval/src/vespa/eval/onnx/onnx_wrapper.cpp48
-rw-r--r--eval/src/vespa/eval/onnx/onnx_wrapper.h2
-rw-r--r--fbench/src/fbench/client.cpp44
-rw-r--r--fbench/src/fbench/client.h25
-rw-r--r--fbench/src/fbench/fbench.cpp50
-rw-r--r--fbench/src/fbench/fbench.h50
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/Downloads.java133
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDownloader.java67
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java26
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java142
-rw-r--r--filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java92
-rw-r--r--filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileReceiverTest.java6
-rw-r--r--flags/pom.xml5
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java72
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java24
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java7
-rw-r--r--fnet/src/vespa/fnet/connection.cpp8
-rw-r--r--fnet/src/vespa/fnet/connection.h5
-rw-r--r--fnet/src/vespa/fnet/frt/supervisor.cpp4
-rw-r--r--fnet/src/vespa/fnet/frt/supervisor.h1
-rw-r--r--indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java14
-rw-r--r--indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java27
-rw-r--r--jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilter.java79
-rw-r--r--jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/RequestResourceMapper.java13
-rw-r--r--jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilterTest.java4
-rw-r--r--jdisc_core/src/main/java/com/yahoo/jdisc/test/ServerProviderConformanceTest.java2
-rw-r--r--jrt/src/com/yahoo/jrt/Buffer.java19
-rw-r--r--jrt/src/com/yahoo/jrt/Connection.java40
-rw-r--r--jrt/src/com/yahoo/jrt/Supervisor.java23
-rw-r--r--jrt/src/com/yahoo/jrt/TlsCryptoSocket.java8
-rw-r--r--jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java2
-rw-r--r--jrt/tests/com/yahoo/jrt/BufferTest.java58
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/Token.java8
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/Tokenizer.java11
-rw-r--r--linguistics/src/main/java/com/yahoo/language/simple/SimpleToken.java4
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/StaticThrottlePolicy.java2
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/rpc/RpcConnector.java2
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/ConfigSentinelClient.java2
-rw-r--r--metrics/src/tests/summetrictest.cpp41
-rw-r--r--metrics/src/vespa/metrics/countmetric.h2
-rw-r--r--metrics/src/vespa/metrics/metric.cpp7
-rw-r--r--metrics/src/vespa/metrics/metric.h2
-rw-r--r--metrics/src/vespa/metrics/metricvalueset.h9
-rw-r--r--metrics/src/vespa/metrics/metricvalueset.hpp8
-rw-r--r--metrics/src/vespa/metrics/summetric.h1
-rw-r--r--metrics/src/vespa/metrics/summetric.hpp13
-rw-r--r--model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java2
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeReports.java13
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java2
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java24
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java6
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java23
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java2
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java4
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java41
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java2
-rw-r--r--node-repository/pom.xml2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Application.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java69
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java11
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java49
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java12
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/PassthroughLoadBalancerService.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainer.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainer.java (renamed from node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java)30
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostEncrypter.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureProvisioner.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java21
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java16
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java17
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainer.java21
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintainer.java19
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OsUpgradeActivator.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java30
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Rebalancer.java13
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java11
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java25
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Agent.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java44
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java25
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java51
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java100
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java12
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java3
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeSerializer.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java10
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java27
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java13
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java3
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainerTest.java (renamed from node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java)10
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java24
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java50
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java5
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java5
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java7
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java7
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json1
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2.json1
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json6
-rw-r--r--parent/pom.xml9
-rw-r--r--pom.xml1
-rwxr-xr-xscrewdriver/build-vespa.sh8
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdb_test.cpp124
-rw-r--r--searchcore/src/tests/proton/server/memory_flush_config_updater/memory_flush_config_updater_test.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/config/proton.def4
-rw-r--r--searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp10
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb.cpp14
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp37
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h44
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.cpp33
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.h27
-rw-r--r--searchlib/abi-spec.json6
-rw-r--r--searchlib/src/apps/docstore/benchmarkdatastore.cpp2
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java40
-rwxr-xr-xsearchlib/src/main/java/com/yahoo/searchlib/rankingexpression/RankingExpression.java14
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/evaluation/TensorValue.java1
-rw-r--r--searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/Function.java3
-rwxr-xr-xsearchlib/src/main/javacc/RankingExpressionParser.jj4
-rw-r--r--searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java2
-rwxr-xr-xsearchlib/src/test/java/com/yahoo/searchlib/rankingexpression/RankingExpressionTestCase.java18
-rw-r--r--searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/EvaluationTestCase.java18
-rw-r--r--searchlib/src/tests/attribute/attribute_test.cpp12
-rw-r--r--searchlib/src/tests/attribute/changevector/changevector_test.cpp68
-rw-r--r--searchlib/src/tests/features/tensor_from_labels/tensor_from_labels_test.cpp18
-rw-r--r--searchlib/src/tests/predicate/predicate_bounds_posting_list_test.cpp4
-rw-r--r--searchlib/src/tests/predicate/predicate_index_test.cpp50
-rw-r--r--searchlib/src/tests/predicate/predicate_interval_posting_list_test.cpp4
-rw-r--r--searchlib/src/tests/predicate/predicate_zero_constraint_posting_list_test.cpp4
-rw-r--r--searchlib/src/tests/predicate/predicate_zstar_compressed_posting_list_test.cpp4
-rw-r--r--searchlib/src/tests/queryeval/predicate/predicate_blueprint_test.cpp56
-rw-r--r--searchlib/src/vespa/searchlib/aggregation/group.h2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attributevector.hpp54
-rw-r--r--searchlib/src/vespa/searchlib/attribute/changevector.h91
-rw-r--r--searchlib/src/vespa/searchlib/attribute/changevector.hpp67
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp76
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h38
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumattribute.hpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumstore.h10
-rw-r--r--searchlib/src/vespa/searchlib/attribute/enumstore.hpp18
-rw-r--r--searchlib/src/vespa/searchlib/attribute/i_enum_store.h12
-rw-r--r--searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h20
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multivalueattribute.h1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multivalueattribute.hpp14
-rw-r--r--searchlib/src/vespa/searchlib/attribute/postingstore.cpp5
-rw-r--r--searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp27
-rw-r--r--searchlib/src/vespa/searchlib/attribute/predicate_attribute.h1
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singleboolattribute.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp8
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlenumericattribute.hpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlesmallnumericattribute.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp2
-rw-r--r--searchlib/src/vespa/searchlib/common/bitvector.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/common/bitvectorcache.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/common/bitvectorcache.h12
-rw-r--r--searchlib/src/vespa/searchlib/common/condensedbitvectors.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/common/condensedbitvectors.h4
-rw-r--r--searchlib/src/vespa/searchlib/common/indexmetainfo.cpp54
-rw-r--r--searchlib/src/vespa/searchlib/common/partialbitvector.h2
-rw-r--r--searchlib/src/vespa/searchlib/common/tunefileinfo.hpp1
-rw-r--r--searchlib/src/vespa/searchlib/features/onnx_feature.cpp15
-rw-r--r--searchlib/src/vespa/searchlib/features/onnx_feature.h8
-rw-r--r--searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp14
-rw-r--r--searchlib/src/vespa/searchlib/predicate/document_features_store.cpp60
-rw-r--r--searchlib/src/vespa/searchlib/predicate/document_features_store.h16
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_bounds_posting_list.h6
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_index.cpp50
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_index.h24
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_interval.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_interval_posting_list.h11
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp15
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_interval_store.h5
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_posting_list.h6
-rw-r--r--searchlib/src/vespa/searchlib/predicate/predicate_zstar_compressed_posting_list.h6
-rw-r--r--searchlib/src/vespa/searchlib/predicate/simple_index.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/predicate/simple_index.h20
-rw-r--r--searchlib/src/vespa/searchlib/predicate/simple_index.hpp71
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp114
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.h31
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp4
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImpl.java2
-rw-r--r--slobrok/src/tests/registerapi/registerapi.cpp2
-rw-r--r--slobrok/src/vespa/slobrok/sbmirror.cpp1
-rw-r--r--slobrok/src/vespa/slobrok/sbmirror.h1
-rw-r--r--staging_vespalib/src/vespa/vespalib/util/programoptions.cpp1
-rw-r--r--standalone-container/src/main/java/com/yahoo/container/standalone/LocalFileDb.java11
-rw-r--r--storage/src/tests/distributor/CMakeLists.txt4
-rw-r--r--storage/src/tests/distributor/blockingoperationstartertest.cpp85
-rw-r--r--storage/src/tests/distributor/distributor_host_info_reporter_test.cpp68
-rw-r--r--storage/src/tests/distributor/distributor_message_sender_stub.h5
-rw-r--r--storage/src/tests/distributor/distributortestutil.cpp13
-rw-r--r--storage/src/tests/distributor/distributortestutil.h6
-rw-r--r--storage/src/tests/distributor/idealstatemanagertest.cpp76
-rw-r--r--storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp (renamed from storage/src/tests/distributor/bucketdbupdatertest.cpp)186
-rw-r--r--storage/src/tests/distributor/legacy_distributor_test.cpp (renamed from storage/src/tests/distributor/distributortest.cpp)139
-rw-r--r--storage/src/tests/distributor/maintenancemocks.h2
-rw-r--r--storage/src/tests/distributor/mergeoperationtest.cpp28
-rw-r--r--storage/src/tests/distributor/simplemaintenancescannertest.cpp85
-rw-r--r--storage/src/tests/distributor/splitbuckettest.cpp18
-rw-r--r--storage/src/tests/storageserver/mergethrottlertest.cpp2
-rw-r--r--storage/src/vespa/storage/config/distributorconfiguration.h4
-rw-r--r--storage/src/vespa/storage/distributor/CMakeLists.txt4
-rw-r--r--storage/src/vespa/storage/distributor/blockingoperationstarter.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/blockingoperationstarter.h12
-rw-r--r--storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.cpp40
-rw-r--r--storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.h21
-rw-r--r--storage/src/vespa/storage/distributor/distributor.cpp125
-rw-r--r--storage/src/vespa/storage/distributor/distributor.h24
-rw-r--r--storage/src/vespa/storage/distributor/distributor_operation_context.h2
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.cpp7
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.h9
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_component.h3
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_interface.h1
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h3
-rw-r--r--storage/src/vespa/storage/distributor/distributor_total_metrics.cpp54
-rw-r--r--storage/src/vespa/storage/distributor/distributor_total_metrics.h29
-rw-r--r--storage/src/vespa/storage/distributor/distributormessagesender.h1
-rw-r--r--storage/src/vespa/storage/distributor/externaloperationhandler.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/ideal_state_total_metrics.cpp51
-rw-r--r--storage/src/vespa/storage/distributor/ideal_state_total_metrics.h28
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemanager.cpp29
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemanager.h26
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp33
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h7
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp22
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h5
-rw-r--r--storage/src/vespa/storage/distributor/min_replica_provider.cpp19
-rw-r--r--storage/src/vespa/storage/distributor/min_replica_provider.h10
-rw-r--r--storage/src/vespa/storage/distributor/operationowner.h4
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp4
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.cpp48
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h6
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp8
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.h2
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp8
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h2
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp4
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.h2
-rw-r--r--storage/src/vespa/storage/distributor/operations/operation.h3
-rw-r--r--storage/src/vespa/storage/storageserver/mergethrottler.cpp2
-rw-r--r--storage/src/vespa/storage/storageserver/mergethrottler.h3
-rw-r--r--storageapi/src/vespa/storageapi/message/bucket.cpp9
-rw-r--r--storageapi/src/vespa/storageapi/message/bucket.h3
-rw-r--r--storageapi/src/vespa/storageapi/message/visitor.cpp11
-rw-r--r--storageapi/src/vespa/storageapi/message/visitor.h3
-rw-r--r--vdslib/src/vespa/vdslib/distribution/redundancygroupdistribution.cpp1
-rw-r--r--vdstestlib/src/vespa/vdstestlib/config/dirconfig.cpp1
-rw-r--r--vespa-athenz/pom.xml7
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzAccessToken.java11
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzGroup.java41
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java77
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java14
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/MembershipEntity.java81
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/MembershipResponseEntity.java28
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/RoleEntity.java54
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/DefaultZtsClient.java2
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/bindings/RoleCertificateRequestEntity.java2
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/bindings/RoleCertificateResponseEntity.java5
-rw-r--r--vespa-documentgen-plugin/src/main/java/com/yahoo/vespa/DocumentGenMojo.java2
-rw-r--r--vespa-feed-client-cli/CMakeLists.txt2
-rw-r--r--vespa-feed-client-cli/pom.xml37
-rw-r--r--vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java60
-rwxr-xr-xvespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh9
-rwxr-xr-xvespa-feed-client-cli/src/main/sh/vespa-feed-client.sh2
-rw-r--r--vespa-feed-client-cli/src/maven/create-zip.xml24
-rw-r--r--vespa-feed-client/abi-spec.json386
-rw-r--r--vespa-feed-client/pom.xml9
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/ApacheCluster.java165
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/BenchmarkingCluster.java102
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java21
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java2
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/DynamicThrottler.java86
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java86
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java154
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java39
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java71
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java253
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequest.java42
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java333
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpResponse.java16
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java484
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonStreamFeeder.java364
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java2
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java15
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationStats.java96
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java22
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java2
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java14
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java30
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/StaticThrottler.java45
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/package-info.java9
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/GracePeriodCircuitBreakerTest.java60
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpFeedClientTest.java101
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpRequestStrategyTest.java203
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java124
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonStreamFeederTest.java67
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java92
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java117
-rw-r--r--vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java34
-rw-r--r--vespa-hadoop/pom.xml11
-rw-r--r--vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java18
-rw-r--r--vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/LegacyVespaRecordWriter.java235
-rw-r--r--vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaOutputFormat.java6
-rw-r--r--vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java292
-rw-r--r--vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/util/VespaConfiguration.java2
-rw-r--r--vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/MapReduceTest.java34
-rw-r--r--vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaDocumentOperationTest.java18
-rw-r--r--vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaQueryTest.java10
-rw-r--r--vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaStorageTest.java26
-rw-r--r--vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/util/TupleToolsTest.java4
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ApacheGatewayConnection.java12
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java2
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java41
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java115
-rwxr-xr-xvespaclient-java/src/main/sh/vespa-visit.sh13
-rw-r--r--vespajlib/abi-spec.json1
-rw-r--r--vespajlib/src/main/java/com/yahoo/concurrent/maintenance/JobMetrics.java33
-rw-r--r--vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java36
-rw-r--r--vespajlib/src/main/java/com/yahoo/slime/SlimeUtils.java41
-rw-r--r--vespajlib/src/main/java/com/yahoo/tensor/Tensor.java1
-rw-r--r--vespajlib/src/test/java/com/yahoo/concurrent/maintenance/JobControlTest.java16
-rw-r--r--vespajlib/src/test/java/com/yahoo/concurrent/maintenance/MaintainerTest.java43
-rw-r--r--vespajlib/src/test/java/com/yahoo/concurrent/maintenance/TestMaintainer.java6
-rw-r--r--vespalib/src/tests/datastore/datastore/datastore_test.cpp33
-rw-r--r--vespalib/src/tests/stllike/hashtable_test.cpp75
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.h6
-rw-r--r--vespalib/src/vespa/vespalib/datastore/entryref.h1
-rw-r--r--vespalib/src/vespa/vespalib/net/tls/crypto_codec_adapter.h2
-rw-r--r--vespalib/src/vespa/vespalib/stllike/hashtable.h4
-rw-r--r--vespalib/src/vespa/vespalib/stllike/hashtable.hpp2
-rw-r--r--vespalib/src/vespa/vespalib/util/alloc.h12
-rw-r--r--vespalib/src/vespa/vespalib/util/exception.h3
-rw-r--r--vespalib/src/vespa/vespalib/util/optimized.h6
797 files changed, 13404 insertions, 5031 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7bf698b7a98..09d060f5d74 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -62,6 +62,7 @@ add_subdirectory(configgen)
add_subdirectory(configserver)
add_subdirectory(configserver-flags)
add_subdirectory(configutil)
+add_subdirectory(container-apache-http-client-bundle)
add_subdirectory(container-core)
add_subdirectory(container-disc)
add_subdirectory(container-jersey2)
diff --git a/README.md b/README.md
index 1a42eb2ff68..7d3be172cb5 100644
--- a/README.md
+++ b/README.md
@@ -8,9 +8,10 @@ over big data at serving time.
This is the primary repository for Vespa where all development is happening.
New production releases from this repository's master branch are made each weekday from Monday to Thursday.
-Home page: [https://vespa.ai](https://vespa.ai)
-Documentation: [https://docs.vespa.ai/](https://docs.vespa.ai/)
-Run applications in the cloud for free: [https://cloud.vespa.ai](https://cloud.vespa.ai)
+* Home page: [https://vespa.ai](https://vespa.ai)
+* Documentation: [https://docs.vespa.ai](https://docs.vespa.ai)
+* Continuous build: [https://factory.vespa.oath.cloud](https://factory.vespa.oath.cloud)
+* Run applications in the cloud for free: [https://cloud.vespa.ai](https://cloud.vespa.ai)
Vespa build status: [![Vespa Build Status](https://cd.screwdriver.cd/pipelines/6386/build-vespa/badge)](https://cd.screwdriver.cd/pipelines/6386)
diff --git a/TODO.md b/TODO.md
index eec3e3767b6..c633a1bf38a 100644
--- a/TODO.md
+++ b/TODO.md
@@ -17,6 +17,8 @@ bundles of parameters accessible to Searchers processing queries. Writes go thro
Document Processors, but have no equivalent support for parametrization. This is to allow configuration of document
processor profiles by reusing the query profile support also for document processors.
+See [slack discussion](https://vespatalk.slack.com/archives/C01QNBPPNT1/p1624176344102300) for more details.
+
**Code pointers:**
- [Query profiles](https://github.com/vespa-engine/vespa/blob/master/container-search/src/main/java/com/yahoo/search/query/profile/QueryProfile.java)
- [Document processors](https://github.com/vespa-engine/vespa/blob/master/docproc/src/main/java/com/yahoo/docproc/DocumentProcessor.java)
diff --git a/cloud-tenant-base-dependencies-enforcer/pom.xml b/cloud-tenant-base-dependencies-enforcer/pom.xml
index 16045d5dc75..340123ae659 100644
--- a/cloud-tenant-base-dependencies-enforcer/pom.xml
+++ b/cloud-tenant-base-dependencies-enforcer/pom.xml
@@ -34,7 +34,7 @@
<jetty-alpn.version>1.1.3.v20160715</jetty-alpn.version>
<junit5.version>5.7.0</junit5.version>
<junit5.platform.version>1.7.0</junit5.platform.version>
- <onnxruntime.version>1.7.0</onnxruntime.version>
+ <onnxruntime.version>1.8.0</onnxruntime.version>
<org.lz4.version>1.7.1</org.lz4.version>
<org.json.version>20090211</org.json.version><!-- TODO Vespa 8: remove as provided dependency -->
<slf4j.version>1.7.30</slf4j.version>
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RPCCommunicator.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RPCCommunicator.java
index bb45de37ce3..ec05ac1ed29 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RPCCommunicator.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RPCCommunicator.java
@@ -54,7 +54,7 @@ public class RPCCommunicator implements Communicator {
private final int fleetControllerIndex;
public static Supervisor createRealSupervisor() {
- return new Supervisor(new Transport("rpc-communicator")).useSmallBuffers();
+ return new Supervisor(new Transport("rpc-communicator")).setDropEmptyBuffers(true);
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java
index 73597e995d4..ce710a29180 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java
@@ -99,7 +99,7 @@ public class RpcServer {
disconnect();
log.log(Level.FINE, () -> "Fleetcontroller " + fleetControllerIndex + ": Connecting RPC server.");
if (supervisor != null) disconnect();
- supervisor = new Supervisor(new Transport("rpc" + port)).useSmallBuffers();
+ supervisor = new Supervisor(new Transport("rpc" + port)).setDropEmptyBuffers(true);
addMethods();
log.log(Level.FINE, () -> "Fleetcontroller " + fleetControllerIndex + ": Attempting to bind to port " + port);
acceptor = supervisor.listen(new Spec(port));
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java
index b4e9a760d8e..3fa1b32cada 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/SlobrokClient.java
@@ -53,7 +53,7 @@ public class SlobrokClient implements NodeLookup {
this.connectionSpecs = slobrokConnectionSpecs;
shutdown();
supervisor = new Supervisor(new Transport("slobrok-client"));
- supervisor.useSmallBuffers();
+ supervisor.setDropEmptyBuffers(true);
SlobrokList slist = new SlobrokList();
slist.setup(slobrokConnectionSpecs);
mirror = new Mirror(supervisor, slist);
diff --git a/config-application-package/src/main/java/com/yahoo/config/application/Xml.java b/config-application-package/src/main/java/com/yahoo/config/application/Xml.java
index c48a41083c7..f2a837026ea 100644
--- a/config-application-package/src/main/java/com/yahoo/config/application/Xml.java
+++ b/config-application-package/src/main/java/com/yahoo/config/application/Xml.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.application;
import com.yahoo.config.application.api.ApplicationPackage;
@@ -75,10 +75,6 @@ public class Xml {
return factory.newDocumentBuilder();
}
- static File getServices(File app) {
- return new File(app, "services.xml"); // TODO Do not hard-code
- }
-
static Document copyDocument(Document input) throws TransformerException {
Transformer transformer = TransformerFactory.newInstance().newTransformer();
DOMSource source = new DOMSource(input);
@@ -142,9 +138,7 @@ public class Xml {
List<Element> children = XML.getChildren(parent, name);
List<Element> allFromFiles = allElemsFromPath(app, pathFromAppRoot);
for (Element fromFile : allFromFiles) {
- for (Element inThatFile : XML.getChildren(fromFile, name)) {
- children.add(inThatFile);
- }
+ children.addAll(XML.getChildren(fromFile, name));
}
return children;
}
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java
index db875f669fe..81fbc764bb6 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java
@@ -301,7 +301,7 @@ public class FilesApplicationPackage implements ApplicationPackage {
}
@Override
- public Collection<NamedReader> searchDefinitionContents() {
+ public Collection<NamedReader> getSchemas() {
Set<NamedReader> ret = new LinkedHashSet<>();
try {
for (File f : getSearchDefinitionFiles()) {
@@ -575,11 +575,6 @@ public class FilesApplicationPackage implements ApplicationPackage {
IOUtils.writeFile(metaFile, metaData.asJsonBytes());
}
- @Override
- public Collection<NamedReader> getSearchDefinitions() {
- return searchDefinitionContents();
- }
-
private void preprocessXML(File destination, File inputXml, Zone zone) throws IOException {
if ( ! inputXml.exists()) return;
try {
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java
index b3d2b061430..5d71376aa5b 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java
@@ -4,7 +4,9 @@ package com.yahoo.config.model.application.provider;
import com.yahoo.config.FileReference;
import com.yahoo.config.application.api.FileRegistry;
import com.yahoo.net.HostName;
+import net.jpountz.xxhash.XXHashFactory;
+import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
@@ -33,4 +35,13 @@ public class MockFileRegistry implements FileRegistry {
throw new IllegalArgumentException("FileReference addUri(String uri) is not implemented for " + getClass().getCanonicalName());
}
+ @Override
+ public FileReference addBlob(ByteBuffer blob) {
+ long blobHash = XXHashFactory.fastestJavaInstance().hash64().hash(blob, 0);
+ String relativePath = Long.toHexString(blobHash) + ".blob";
+ FileReference fileReference = new FileReference(relativePath);
+ entries.add(new Entry(relativePath, fileReference));
+ return fileReference;
+ }
+
}
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java
index e779d59ba24..75482ded05d 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java
@@ -7,6 +7,7 @@ import com.yahoo.config.application.api.FileRegistry;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.Reader;
+import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
@@ -81,6 +82,10 @@ public class PreGeneratedFileRegistry implements FileRegistry {
public FileReference addUri(String uri) {
return new FileReference(path2Hash.get(uri));
}
+ @Override
+ public FileReference addBlob(ByteBuffer blob) {
+ return new FileReference(path2Hash.get(blob));
+ }
@Override
public String fileSourceHost() {
diff --git a/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java b/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java
index 562970c266f..f8484a8e455 100644
--- a/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java
+++ b/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java
@@ -1,15 +1,16 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.application;
+import com.yahoo.config.application.api.ApplicationPackage;
import org.junit.Test;
import org.w3c.dom.Document;
import org.xml.sax.SAXException;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.ParserConfigurationException;
-import javax.xml.stream.XMLStreamException;
-import javax.xml.transform.*;
-import java.io.*;
+import javax.xml.transform.TransformerException;
+import java.io.File;
+import java.io.IOException;
import java.nio.file.NoSuchFileException;
/**
@@ -72,7 +73,7 @@ public class IncludeProcessorTest {
" </nodes>\n" +
"</container></services>";
- Document doc = new IncludeProcessor(app).process(docBuilder.parse(Xml.getServices(app)));
+ Document doc = new IncludeProcessor(app).process(docBuilder.parse(getServices(app)));
// System.out.println(Xml.documentAsString(doc));
TestBase.assertDocument(expected, doc);
}
@@ -81,7 +82,11 @@ public class IncludeProcessorTest {
public void testRequiredIncludeIsDefault() throws ParserConfigurationException, IOException, SAXException, TransformerException {
File app = new File("src/test/resources/multienvapp_failrequired");
DocumentBuilder docBuilder = Xml.getPreprocessDocumentBuilder();
- new IncludeProcessor(app).process(docBuilder.parse(Xml.getServices(app)));
+ new IncludeProcessor(app).process(docBuilder.parse(getServices(app)));
+ }
+
+ static File getServices(File app) {
+ return new File(app, ApplicationPackage.SERVICES);
}
}
diff --git a/config-application-package/src/test/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistryTestCase.java b/config-application-package/src/test/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistryTestCase.java
index 4b2f5890a4e..7996efaa60e 100644
--- a/config-application-package/src/test/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistryTestCase.java
+++ b/config-application-package/src/test/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistryTestCase.java
@@ -1,35 +1,37 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.model.application.provider;
-import com.yahoo.config.FileReference;
import com.yahoo.config.application.api.FileRegistry;
import org.junit.Test;
import java.io.StringReader;
-import java.util.List;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
import java.util.Set;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
/**
* @author Tony Vaagenes
*/
public class PreGeneratedFileRegistryTestCase {
+ private static final String BLOB = "Some blob";
@Test
public void importAndExport() {
FileRegistry fileRegistry = new MockFileRegistry();
fileRegistry.addFile("1234");
+ fileRegistry.addBlob(ByteBuffer.wrap(BLOB.getBytes(StandardCharsets.UTF_8)));
String serializedRegistry = PreGeneratedFileRegistry.exportRegistry(fileRegistry);
PreGeneratedFileRegistry importedRegistry = PreGeneratedFileRegistry.importRegistry(new StringReader(serializedRegistry));
- assertEquals(Set.of("1234"), importedRegistry.getPaths());
+ assertEquals(Set.of("1234", "c5674b55c15c9c95.blob"), importedRegistry.getPaths());
- assertEquals(1, importedRegistry.getPaths().size());
+ assertEquals(2, importedRegistry.getPaths().size());
checkConsistentEntry(fileRegistry.export().get(0), importedRegistry);
+ checkConsistentEntry(fileRegistry.export().get(1), importedRegistry);
assertEquals(fileRegistry.fileSourceHost(), importedRegistry.fileSourceHost());
}
diff --git a/config-model-api/abi-spec.json b/config-model-api/abi-spec.json
index bdf2b53bc92..735778f4d46 100644
--- a/config-model-api/abi-spec.json
+++ b/config-model-api/abi-spec.json
@@ -103,7 +103,7 @@
"public abstract java.io.Reader getHosts()",
"public java.util.List getUserIncludeDirs()",
"public void validateIncludeDir(java.lang.String)",
- "public abstract java.util.Collection searchDefinitionContents()",
+ "public java.util.Collection searchDefinitionContents()",
"public abstract java.util.Map getAllExistingConfigDefs()",
"public abstract java.util.List getFiles(com.yahoo.path.Path, java.lang.String, boolean)",
"public java.util.List getFiles(com.yahoo.path.Path, java.lang.String)",
@@ -127,7 +127,8 @@
"public void writeMetaData()",
"public java.util.Optional getAllocatedHosts()",
"public java.util.Map getFileRegistries()",
- "public abstract java.util.Collection getSearchDefinitions()",
+ "public java.util.Collection getSearchDefinitions()",
+ "public abstract java.util.Collection getSchemas()",
"public com.yahoo.config.application.api.ApplicationPackage preprocess(com.yahoo.config.provision.Zone, com.yahoo.config.application.api.DeployLogger)"
],
"fields": [
@@ -421,6 +422,7 @@
"methods": [
"public abstract com.yahoo.config.FileReference addFile(java.lang.String)",
"public abstract com.yahoo.config.FileReference addUri(java.lang.String)",
+ "public abstract com.yahoo.config.FileReference addBlob(java.nio.ByteBuffer)",
"public com.yahoo.config.FileReference addApplicationPackage()",
"public abstract java.lang.String fileSourceHost()",
"public abstract java.util.List export()"
@@ -537,7 +539,9 @@
"public static final enum com.yahoo.config.application.api.ValidationId configModelVersionMismatch",
"public static final enum com.yahoo.config.application.api.ValidationId skipOldConfigModels",
"public static final enum com.yahoo.config.application.api.ValidationId accessControl",
- "public static final enum com.yahoo.config.application.api.ValidationId globalEndpointChange"
+ "public static final enum com.yahoo.config.application.api.ValidationId globalEndpointChange",
+ "public static final enum com.yahoo.config.application.api.ValidationId redundancyIncrease",
+ "public static final enum com.yahoo.config.application.api.ValidationId redundancyOne"
]
},
"com.yahoo.config.application.api.ValidationOverrides$Allow": {
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java b/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java
index d97ff5ca774..2aefc985f4b 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.application.api;
import com.yahoo.component.Version;
@@ -79,7 +79,7 @@ public interface ApplicationPackage {
* @return the name of the application (i.e the directory where the application package was deployed from)
* @deprecated do not use
*/
- @Deprecated // TODO: Remove on Vespa 8
+ @Deprecated // TODO: Remove in Vespa 8
String getApplicationName();
ApplicationId getApplicationId();
@@ -87,14 +87,14 @@ public interface ApplicationPackage {
/**
* Contents of services.xml. Caller must close reader after use.
*
- * @return a Reader, or null if no services.xml/vespa-services.xml present
+ * @return a Reader, or null if no services.xml present
*/
Reader getServices();
/**
* Contents of hosts.xml. Caller must close reader after use.
*
- * @return a Reader, or null if no hosts.xml/vespa-hosts.xml present
+ * @return a Reader, or null if no hosts.xml present
*/
Reader getHosts();
@@ -113,9 +113,12 @@ public interface ApplicationPackage {
/**
* Readers for all the search definition files for this.
+ * @deprecated use {@link #getSchemas()} instead
* @return a list of readers for search definitions
*/
- Collection<NamedReader> searchDefinitionContents();
+ @Deprecated
+ // TODO: Remove in Vespa 8
+ default Collection<NamedReader> searchDefinitionContents() { return getSchemas(); }
/**
* Returns all the config definitions available in this package as unparsed data.
@@ -143,10 +146,11 @@ public interface ApplicationPackage {
/** Returns the major version this application is valid for, or empty if it is valid for all versions */
default Optional<Integer> getMajorVersion() {
- if ( ! getDeployment().isPresent()) return Optional.empty();
+ if (getDeployment().isEmpty()) return Optional.empty();
Element deployElement = XML.getDocument(getDeployment().get()).getDocumentElement();
if (deployElement == null) return Optional.empty();
+
String majorVersionString = deployElement.getAttribute("major-version");
if (majorVersionString == null || majorVersionString.isEmpty())
return Optional.empty();
@@ -178,7 +182,6 @@ public interface ApplicationPackage {
/** Returns handle for the file containing client certificate authorities */
default ApplicationFile getClientSecurityFile() { return getFile(SECURITY_DIR.append("clients.pem")); }
- //For generating error messages
String getHostSource();
String getServicesSource();
@@ -235,7 +238,18 @@ public interface ApplicationPackage {
return Collections.emptyMap();
}
- Collection<NamedReader> getSearchDefinitions();
+ /**
+ * @deprecated use {@link #getSchemas()} instead
+ */
+ @Deprecated
+ // TODO: Remove in Vespa 8
+ default Collection<NamedReader> getSearchDefinitions() { return getSchemas(); }
+
+ /**
+ * Readers for all the schema files.
+ * @return a collection of readers for schemas
+ */
+ Collection<NamedReader> getSchemas();
/**
* Preprocess an application for a given zone and return a new application package pointing to the preprocessed
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java b/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java
index 8415781b827..9d049ae0847 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.application.api;
+import java.nio.ByteBuffer;
import java.util.List;
import com.yahoo.config.FileReference;
@@ -13,6 +14,7 @@ public interface FileRegistry {
FileReference addFile(String relativePath);
FileReference addUri(String uri);
+ FileReference addBlob(ByteBuffer blob);
default FileReference addApplicationPackage() { return addFile(""); }
/**
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java
index 4c76d42a17e..7aa6788b86d 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationId.java
@@ -23,7 +23,9 @@ public enum ValidationId {
configModelVersionMismatch("config-model-version-mismatch"), // Internal use
skipOldConfigModels("skip-old-config-models"), // Internal use
accessControl("access-control"), // Internal use, used in zones where there should be no access-control
- globalEndpointChange("global-endpoint-change"); // Changing global endpoints
+ globalEndpointChange("global-endpoint-change"), // Changing global endpoints
+ redundancyIncrease("redundancy-increase"), // Increasing redundancy - may easily cause feed blocked
+ redundancyOne("redundancy-one"); // redundancy=1 requires a validation override on first deployment
private final String id;
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java
index 8845431c71b..3221df38d4f 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.application.api;
import com.google.common.collect.ImmutableList;
@@ -67,7 +67,7 @@ public class ValidationOverrides {
public boolean allows(String validationIdString, Instant now) {
Optional<ValidationId> validationId = ValidationId.from(validationIdString);
- if ( ! validationId.isPresent()) return false; // unknown id -> not allowed
+ if (validationId.isEmpty()) return false; // unknown id -> not allowed
return allows(validationId.get(), now);
}
@@ -125,8 +125,8 @@ public class ValidationOverrides {
.atStartOfDay().atZone(ZoneOffset.UTC).toInstant()
.plus(Duration.ofDays(1)); // Make the override valid *on* the "until" date
Optional<ValidationId> validationId = ValidationId.from(XML.getValue(allow));
- if (validationId.isPresent()) // skip unknown ids as they may be valid for other model versions
- overrides.add(new ValidationOverrides.Allow(validationId.get(), until));
+ // skip unknown ids as they may be valid for other model versions
+ validationId.ifPresent(id -> overrides.add(new Allow(id, until)));
}
return new ValidationOverrides(overrides, xmlForm);
}
@@ -177,6 +177,7 @@ public class ValidationOverrides {
}
+ // TODO: Remove this class after June 2021
public static class AllowAllValidationOverrides extends ValidationOverrides {
private final DeployLogger logger;
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index 42ef763fc62..81ee0a4c4c3 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -81,11 +81,14 @@ public interface ModelContext {
@ModelFeatureFlag(owners = {"baldersheim"}) default boolean useAsyncMessageHandlingOnSchedule() { throw new UnsupportedOperationException("TODO specify default value"); }
@ModelFeatureFlag(owners = {"baldersheim"}) default double feedConcurrency() { throw new UnsupportedOperationException("TODO specify default value"); }
@ModelFeatureFlag(owners = {"baldersheim"}) default boolean useBucketExecutorForPruneRemoved() { return true; }
+ @ModelFeatureFlag(owners = {"baldersheim"}) default int largeRankExpressionLimit() { return 0x10000; }
@ModelFeatureFlag(owners = {"baldersheim"}) default boolean useExternalRankExpressions() { return false; }
@ModelFeatureFlag(owners = {"baldersheim"}) default boolean distributeExternalRankExpressions() { return false; }
+ @ModelFeatureFlag(owners = {"baldersheim"}) default int maxConcurrentMergesPerNode() { throw new UnsupportedOperationException("TODO specify default value"); }
+ @ModelFeatureFlag(owners = {"baldersheim"}) default int maxMergeQueueSize() { throw new UnsupportedOperationException("TODO specify default value"); }
@ModelFeatureFlag(owners = {"geirst"}) default boolean enableFeedBlockInDistributor() { return true; }
@ModelFeatureFlag(owners = {"hmusum"}, removeAfter = "7.406") default int clusterControllerMaxHeapSizeInMb() { return 128; }
- @ModelFeatureFlag(owners = {"hmusum"}) default int metricsProxyMaxHeapSizeInMb(ClusterSpec.Type type) { return 256; }
+ @ModelFeatureFlag(owners = {"hmusum"}, removeAfter = "7.422") default int metricsProxyMaxHeapSizeInMb(ClusterSpec.Type type) { return 256; }
@ModelFeatureFlag(owners = {"bjorncs", "tokle"}) default List<String> allowedAthenzProxyIdentities() { return List.of(); }
@ModelFeatureFlag(owners = {"tokle"}) default boolean tenantIamRole() { return false; }
@ModelFeatureFlag(owners = {"vekterli"}) default int maxActivationInhibitedOutOfSyncGroups() { return 0; }
@@ -93,6 +96,8 @@ public interface ModelContext {
@ModelFeatureFlag(owners = {"bjorncs", "jonmv"}, removeAfter = "7.409") default boolean enableJdiscHttp2() { return true; }
@ModelFeatureFlag(owners = {"tokle", "bjorncs"}) default boolean enableCustomAclMapping() { return false; }
@ModelFeatureFlag(owners = {"geirst", "vekterli"}) default int numDistributorStripes() { return 0; }
+ @ModelFeatureFlag(owners = {"arnej"}) default boolean requireConnectivityCheck() { return false; }
+ @ModelFeatureFlag(owners = {"hmusum"}) default boolean throwIfResourceLimitsSpecified() { return false; }
}
/** Warning: As elsewhere in this package, do not make backwards incompatible changes that will break old config models! */
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java b/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java
index 00d194a37a2..bb88bcf75d9 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java
@@ -35,9 +35,9 @@ public class Quota {
}
public static Quota fromSlime(Inspector inspector) {
- var clusterSize = SlimeUtils.optionalLong(inspector.field("clusterSize"));
+ var clusterSize = SlimeUtils.optionalInteger(inspector.field("clusterSize"));
var budget = budgetFromSlime(inspector.field("budget"));
- return new Quota(clusterSize.map(Long::intValue), budget, true);
+ return new Quota(clusterSize.stream().boxed().findFirst(), budget, true);
}
public Quota withBudget(BigDecimal budget) {
diff --git a/config-model-api/src/test/java/com/yahoo/config/application/api/ValidationOverrideTest.java b/config-model-api/src/test/java/com/yahoo/config/application/api/ValidationOverrideTest.java
index 2943b0bab34..f7ef059c5f2 100644
--- a/config-model-api/src/test/java/com/yahoo/config/application/api/ValidationOverrideTest.java
+++ b/config-model-api/src/test/java/com/yahoo/config/application/api/ValidationOverrideTest.java
@@ -4,16 +4,10 @@ package com.yahoo.config.application.api;
import com.yahoo.test.ManualClock;
import org.junit.Assert;
import org.junit.Test;
-import org.xml.sax.SAXException;
-
-import java.io.IOException;
import java.io.StringReader;
-import java.time.Clock;
import java.time.Instant;
-import java.util.Optional;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
/**
* @author bratseth
@@ -82,15 +76,6 @@ public class ValidationOverrideTest {
assertEquals(empty.xmlForm(), emptyReserialized.xmlForm());
}
- @Test
- public void testAll() {
- ValidationOverrides all = ValidationOverrides.all;
- assertTrue(all.allows(ValidationId.deploymentRemoval, Clock.systemUTC().instant()));
- assertTrue(all.allows(ValidationId.contentClusterRemoval, Clock.systemUTC().instant()));
- assertTrue(all.allows(ValidationId.indexModeChange, Clock.systemUTC().instant()));
- assertTrue(all.allows(ValidationId.fieldTypeChange, Clock.systemUTC().instant()));
- }
-
private void assertOverridden(String validationId, ValidationOverrides overrides, Instant now) {
overrides.invalid(ValidationId.from(validationId).get(), "message", now); // should not throw exception
}
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
index 1d0541b67d1..dd66861f2ce 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
@@ -22,7 +22,7 @@ import com.yahoo.config.model.api.ValidationParameters;
import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.config.model.application.provider.MockFileRegistry;
import com.yahoo.config.model.provision.HostsXmlProvisioner;
-import com.yahoo.config.model.provision.SingleNodeProvisioner;
+import com.yahoo.config.model.provision .SingleNodeProvisioner;
import com.yahoo.config.model.test.MockApplicationPackage;
import com.yahoo.config.provision.DockerImage;
import com.yahoo.config.provision.Zone;
@@ -142,12 +142,8 @@ public class DeployState implements ConfigDefinitionStore {
this.semanticRules = semanticRules; // TODO: Remove this by seeing how pagetemplates are propagated
this.importedModels = importMlModels(applicationPackage, modelImporters, deployLogger);
- ValidationOverrides suppliedValidationOverrides = applicationPackage.getValidationOverrides().map(ValidationOverrides::fromXml)
- .orElse(ValidationOverrides.empty);
- this.validationOverrides =
- zone.environment().isManuallyDeployed() // // Warn but allow in manually deployed zones
- ? new ValidationOverrides.AllowAllValidationOverrides(suppliedValidationOverrides, deployLogger)
- : suppliedValidationOverrides;
+ this.validationOverrides = applicationPackage.getValidationOverrides().map(ValidationOverrides::fromXml)
+ .orElse(ValidationOverrides.empty);
this.wantedNodeVespaVersion = wantedNodeVespaVersion;
this.now = now;
@@ -460,7 +456,7 @@ public class DeployState implements ConfigDefinitionStore {
private SearchDocumentModel createSearchDocumentModel(RankProfileRegistry rankProfileRegistry,
QueryProfiles queryProfiles,
ValidationParameters validationParameters) {
- Collection<NamedReader> readers = applicationPackage.getSearchDefinitions();
+ Collection<NamedReader> readers = applicationPackage.getSchemas();
Map<String, String> names = new LinkedHashMap<>();
SearchBuilder builder = new SearchBuilder(applicationPackage, logger, properties, rankProfileRegistry, queryProfiles.getRegistry());
for (NamedReader reader : readers) {
@@ -470,14 +466,14 @@ public class DeployState implements ConfigDefinitionStore {
String sdName = stripSuffix(readerName, ApplicationPackage.SD_NAME_SUFFIX);
names.put(topLevelName, sdName);
if ( ! sdName.equals(topLevelName)) {
- throw new IllegalArgumentException("Schema definition file name ('" + sdName + "') and name of " +
+ throw new IllegalArgumentException("Schema file name ('" + sdName + "') and name of " +
"top level element ('" + topLevelName +
"') are not equal for file '" + readerName + "'");
}
} catch (ParseException e) {
- throw new IllegalArgumentException("Could not parse sd file '" + reader.getName() + "'", e);
+ throw new IllegalArgumentException("Could not parse schema file '" + reader.getName() + "'", e);
} catch (IOException e) {
- throw new IllegalArgumentException("Could not read sd file '" + reader.getName() + "'", e);
+ throw new IllegalArgumentException("Could not read schema file '" + reader.getName() + "'", e);
} finally {
closeIgnoreException(reader.getReader());
}
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
index 304855e545d..fe1bf93f32b 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
@@ -42,6 +42,7 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
private double defaultTermwiseLimit = 1.0;
private String jvmGCOptions = null;
private String sequencerType = "LATENCY";
+ private boolean firstTimeDeployment = false;
private String responseSequencerType = "ADAPTIVE";
private int responseNumThreads = 2;
private Optional<EndpointCertificateSecrets> endpointCertificateSecrets = Optional.empty();
@@ -54,11 +55,13 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
private boolean enableFeedBlockInDistributor = true;
private boolean useExternalRankExpression = false;
private int clusterControllerMaxHeapSizeInMb = 128;
- private int metricsProxyMaxHeapSizeInMb = 256;
private int maxActivationInhibitedOutOfSyncGroups = 0;
private List<TenantSecretStore> tenantSecretStores = Collections.emptyList();
private String jvmOmitStackTraceInFastThrowOption;
private int numDistributorStripes = 0;
+ private int maxConcurrentMergesPerNode = 16;
+ private int maxMergeQueueSize = 1024;
+ private int largeRankExpressionLimit = 0x10000;
private boolean allowDisableMtls = true;
private List<X509Certificate> operatorCertificates = Collections.emptyList();
@@ -75,7 +78,7 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
@Override public String jvmGCOptions(Optional<ClusterSpec.Type> clusterType) { return jvmGCOptions; }
@Override public String feedSequencerType() { return sequencerType; }
@Override public boolean isBootstrap() { return false; }
- @Override public boolean isFirstTimeDeployment() { return false; }
+ @Override public boolean isFirstTimeDeployment() { return firstTimeDeployment; }
@Override public boolean useDedicatedNodeForLogserver() { return useDedicatedNodeForLogserver; }
@Override public Optional<EndpointCertificateSecrets> endpointCertificateSecrets() { return endpointCertificateSecrets; }
@Override public double defaultTermwiseLimit() { return defaultTermwiseLimit; }
@@ -93,7 +96,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
@Override public double feedConcurrency() { return feedConcurrency; }
@Override public boolean enableFeedBlockInDistributor() { return enableFeedBlockInDistributor; }
@Override public int clusterControllerMaxHeapSizeInMb() { return clusterControllerMaxHeapSizeInMb; }
- @Override public int metricsProxyMaxHeapSizeInMb(ClusterSpec.Type type) { return metricsProxyMaxHeapSizeInMb; }
@Override public int maxActivationInhibitedOutOfSyncGroups() { return maxActivationInhibitedOutOfSyncGroups; }
@Override public List<TenantSecretStore> tenantSecretStores() { return tenantSecretStores; }
@Override public String jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type type) { return jvmOmitStackTraceInFastThrowOption; }
@@ -102,11 +104,18 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
@Override public List<X509Certificate> operatorCertificates() { return operatorCertificates; }
@Override public boolean useExternalRankExpressions() { return useExternalRankExpression; }
@Override public boolean distributeExternalRankExpressions() { return useExternalRankExpression; }
+ @Override public int largeRankExpressionLimit() { return largeRankExpressionLimit; }
+ @Override public int maxConcurrentMergesPerNode() { return maxConcurrentMergesPerNode; }
+ @Override public int maxMergeQueueSize() { return maxMergeQueueSize; }
public TestProperties useExternalRankExpression(boolean value) {
useExternalRankExpression = value;
return this;
}
+ public TestProperties largeRankExpressionLimit(int value) {
+ largeRankExpressionLimit = value;
+ return this;
+ }
public TestProperties setFeedConcurrency(double feedConcurrency) {
this.feedConcurrency = feedConcurrency;
return this;
@@ -129,11 +138,24 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
responseSequencerType = type;
return this;
}
+ public TestProperties setFirstTimeDeployment(boolean firstTimeDeployment) {
+ this.firstTimeDeployment = firstTimeDeployment;
+ return this;
+ }
public TestProperties setResponseNumThreads(int numThreads) {
responseNumThreads = numThreads;
return this;
}
+ public TestProperties setMaxConcurrentMergesPerNode(int maxConcurrentMergesPerNode) {
+ this.maxConcurrentMergesPerNode = maxConcurrentMergesPerNode;
+ return this;
+ }
+ public TestProperties setMaxMergeQueueSize(int maxMergeQueueSize) {
+ this.maxMergeQueueSize = maxMergeQueueSize;
+ return this;
+ }
+
public TestProperties setDefaultTermwiseLimit(double limit) {
defaultTermwiseLimit = limit;
return this;
@@ -209,11 +231,6 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
return this;
}
- public TestProperties metricsProxyMaxHeapSizeInMb(int heapSize) {
- metricsProxyMaxHeapSizeInMb = heapSize;
- return this;
- }
-
public TestProperties maxActivationInhibitedOutOfSyncGroups(int nGroups) {
maxActivationInhibitedOutOfSyncGroups = nGroups;
return this;
diff --git a/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java b/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java
index e9fe0824f30..411a37bb70a 100644
--- a/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java
+++ b/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java
@@ -115,7 +115,7 @@ public class MockApplicationPackage implements ApplicationPackage {
}
@Override
- public List<NamedReader> getSearchDefinitions() {
+ public List<NamedReader> getSchemas() {
ArrayList<NamedReader> readers = new ArrayList<>();
SearchBuilder searchBuilder = new SearchBuilder(this,
new BaseDeployLogger(),
@@ -134,11 +134,6 @@ public class MockApplicationPackage implements ApplicationPackage {
}
@Override
- public List<NamedReader> searchDefinitionContents() {
- return new ArrayList<>();
- }
-
- @Override
public Map<ConfigDefinitionKey, UnparsedConfigDefinition> getAllExistingConfigDefs() {
return Collections.emptyMap();
}
diff --git a/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java b/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java
index 38d831a0b28..da338ad3107 100644
--- a/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java
+++ b/config-model/src/main/java/com/yahoo/documentmodel/NewDocumentType.java
@@ -3,7 +3,6 @@ package com.yahoo.documentmodel;
import com.yahoo.document.DataType;
import com.yahoo.document.Document;
-import com.yahoo.document.DocumentId;
import com.yahoo.document.Field;
import com.yahoo.document.StructDataType;
import com.yahoo.document.StructuredDataType;
@@ -32,34 +31,6 @@ import static java.util.Collections.emptySet;
*/
public final class NewDocumentType extends StructuredDataType implements DataTypeCollection {
- public static final class Name {
-
- private final String name;
- private final int id;
-
- public Name(String name) {
- this(name.hashCode(), name);
- }
-
- public Name(int id, String name) {
- this.id = id;
- this.name = name;
- }
-
- public String toString() { return name; }
-
- public final String getName() { return name; }
-
- public final int getId() { return id; }
-
- public int hashCode() { return name.hashCode(); }
-
- public boolean equals(Object other) {
- if ( ! (other instanceof Name)) return false;
- return name.equals(((Name)other).getName());
- }
- }
-
private final Name name;
private final DataTypeRepo dataTypes = new DataTypeRepo();
private final Map<Integer, NewDocumentType> inherits = new LinkedHashMap<>();
@@ -139,7 +110,7 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp
}
@Override
- public Class getValueClass() {
+ public Class<Document> getValueClass() {
return Document.class;
}
@@ -148,7 +119,8 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp
if (!(value instanceof Document)) {
return false;
}
- /** Temporary disabled due to clash with document and covariant return type
+ /*
+ Temporary disabled due to clash with document and covariant return type
Document doc = (Document) value;
if (((NewDocumentType) doc.getDataType()).inherits(this)) {
//the value is of this type; or the supertype of the value is of this type, etc....
@@ -162,28 +134,31 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp
for (Field f : getFields()) {
Field inhF = inherited.getField(f.getName());
if (inhF != null && !inhF.equals(f)) {
- throw new IllegalArgumentException("Inherited document '" + inherited.toString() + "' already contains field '" +
- inhF.getName() + "'. Can not override with '" + f.getName() + "'.");
+ throw new IllegalArgumentException("Inherited document '" + inherited + "' already contains field '" +
+ inhF.getName() + "'. Can not override with '" + f.getName() + "'.");
}
}
for (Field f : inherited.getAllFields()) {
for (NewDocumentType side : inherits.values()) {
Field sideF = side.getField(f.getName());
if (sideF != null && !sideF.equals(f)) {
- throw new IllegalArgumentException("Inherited document '" + side.toString() + "' already contains field '" +
- sideF.getName() + "'. Document '" + inherited.toString() + "' also defines field '" + f.getName() +
- "'.Multiple inheritance must be disjunctive.");
+ throw new IllegalArgumentException("Inherited document '" + side + "' already contains field '" +
+ sideF.getName() + "'. Document '" + inherited +
+ "' also defines field '" + f.getName() +
+ "'.Multiple inheritance must be disjunctive.");
}
}
}
return true;
}
+
public void inherit(NewDocumentType inherited) {
if ( ! inherits.containsKey(inherited.getId())) {
verifyInheritance(inherited);
inherits.put(inherited.getId(), inherited);
}
}
+
public boolean inherits(NewDocumentType superType) {
if (getId() == superType.getId()) return true;
for (NewDocumentType type : inherits.values()) {
@@ -243,7 +218,7 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp
@Override
public Document createFieldValue() {
- return new Document(null, (DocumentId)null);
+ throw new RuntimeException("Cannot create an instance of " + this);
}
@Override
@@ -375,4 +350,36 @@ public final class NewDocumentType extends StructuredDataType implements DataTyp
return importedFieldNames;
}
+ public static final class Name {
+
+ private final String name;
+ private final int id;
+
+ public Name(String name) {
+ this(name.hashCode(), name);
+ }
+
+ public Name(int id, String name) {
+ this.id = id;
+ this.name = name;
+ }
+
+ @Override
+ public String toString() { return name; }
+
+ public final String getName() { return name; }
+
+ public final int getId() { return id; }
+
+ @Override
+ public int hashCode() { return name.hashCode(); }
+
+ @Override
+ public boolean equals(Object other) {
+ if ( ! (other instanceof Name)) return false;
+ return name.equals(((Name)other).getName());
+ }
+
+ }
+
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/DistributableResource.java b/config-model/src/main/java/com/yahoo/searchdefinition/DistributableResource.java
index 77ce2dd41b5..ffa9cbe9ba5 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/DistributableResource.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/DistributableResource.java
@@ -5,15 +5,17 @@ import com.yahoo.path.Path;
import com.yahoo.vespa.model.AbstractService;
import com.yahoo.vespa.model.utils.FileSender;
+import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Objects;
public class DistributableResource {
- public enum PathType { FILE, URI };
+ public enum PathType { FILE, URI, BLOB };
/** The search definition-unique name of this constant */
private final String name;
- private String path = null;
+ private final ByteBuffer blob;
+ private String path;
private String fileReference = "";
private PathType pathType = PathType.FILE;
@@ -22,11 +24,20 @@ public class DistributableResource {
}
public DistributableResource(String name) {
- this(name, null);
+ this.name = name;
+ blob = null;
}
public DistributableResource(String name, String path) {
this.name = name;
this.path = path;
+ blob = null;
+ }
+ public DistributableResource(String name, ByteBuffer blob) {
+ Objects.requireNonNull(name, "Blob name cannot be null");
+ Objects.requireNonNull(blob, "Blob cannot be null");
+ this.name = name;
+ this.blob = blob;
+ pathType = PathType.BLOB;
}
public void setFileName(String fileName) {
@@ -41,16 +52,24 @@ public class DistributableResource {
this.pathType = PathType.URI;
}
- protected void setFileReference(String fileReference) { this.fileReference = fileReference; }
/** Initiate sending of this constant to some services over file distribution */
public void sendTo(Collection<? extends AbstractService> services) {
- FileReference reference = (pathType == PathType.FILE)
- ? FileSender.sendFileToServices(path, services)
- : FileSender.sendUriToServices(path, services);
- this.fileReference = reference.value();
+ fileReference = sendToServices(services).value();
+ }
+ private FileReference sendToServices(Collection<? extends AbstractService> services) {
+ switch (pathType) {
+ case FILE:
+ return FileSender.sendFileToServices(path, services);
+ case URI:
+ return FileSender.sendUriToServices(path, services);
+ case BLOB:
+ return FileSender.sendBlobToServices(blob, services);
+ }
+ throw new IllegalArgumentException("Unknown path type " + pathType);
}
public String getName() { return name; }
+ public ByteBuffer getBlob() { return blob; }
public String getFileName() { return path; }
public Path getFilePath() { return Path.fromString(path); }
public String getUri() { return path; }
@@ -63,10 +82,8 @@ public class DistributableResource {
public String toString() {
StringBuilder b = new StringBuilder();
- b.append("resource '").append(name)
- .append(pathType == PathType.FILE ? "' from file '" : " from uri ").append(path)
- .append("' with ref '").append(fileReference)
- .append("'");
+ b.append("resource '").append(name).append(" of type '").append(pathType)
+ .append("' with ref '").append(fileReference).append("'");
return b.toString();
}
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java
index fed35382b21..9b752c4179f 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/DocumentModelBuilder.java
@@ -209,17 +209,13 @@ public class DocumentModelBuilder {
private static DataType resolveTemporariesRecurse(DataType type, DataTypeCollection repo,
Collection<NewDocumentType> docs) {
if (type instanceof TemporaryStructuredDataType) {
- NewDocumentType docType = getDocumentType(docs, type.getId());
- if (docType != null) {
- type = docType;
- return type;
- }
- DataType real = repo.getDataType(type.getId());
- if (real == null) {
- throw new NullPointerException("Can not find type '" + type.toString() + "', impossible.");
- }
- type = real;
- } else if (type instanceof StructDataType) {
+ DataType struct = repo.getDataType(type.getId());
+ if (struct != null)
+ type = struct;
+ else
+ type = getDocumentType(docs, type.getId());
+ }
+ else if (type instanceof StructDataType) {
StructDataType dt = (StructDataType) type;
for (com.yahoo.document.Field field : dt.getFields()) {
if (field.getDataType() != type) {
@@ -227,14 +223,17 @@ public class DocumentModelBuilder {
field.setDataType(resolveTemporariesRecurse(field.getDataType(), repo, docs));
}
}
- } else if (type instanceof MapDataType) {
+ }
+ else if (type instanceof MapDataType) {
MapDataType t = (MapDataType) type;
t.setKeyType(resolveTemporariesRecurse(t.getKeyType(), repo, docs));
t.setValueType(resolveTemporariesRecurse(t.getValueType(), repo, docs));
- } else if (type instanceof CollectionDataType) {
+ }
+ else if (type instanceof CollectionDataType) {
CollectionDataType t = (CollectionDataType) type;
t.setNestedType(resolveTemporariesRecurse(t.getNestedType(), repo, docs));
- } else if (type instanceof ReferenceDataType) {
+ }
+ else if (type instanceof ReferenceDataType) {
ReferenceDataType t = (ReferenceDataType) type;
if (t.getTargetType() instanceof TemporaryStructuredDataType) {
DataType targetType = resolveTemporariesRecurse(t.getTargetType(), repo, docs);
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/ImmutableSearch.java b/config-model/src/main/java/com/yahoo/searchdefinition/ImmutableSearch.java
index 6b40289e17d..24bc081aded 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/ImmutableSearch.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/ImmutableSearch.java
@@ -32,7 +32,7 @@ public interface ImmutableSearch {
DeployLogger getDeployLogger();
ModelContext.Properties getDeployProperties();
RankingConstants rankingConstants();
- RankExpressionFiles rankExpressionFiles();
+ LargeRankExpressions rankExpressionFiles();
OnnxModels onnxModels();
Stream<ImmutableSDField> allImportedFields();
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/LargeRankExpressions.java b/config-model/src/main/java/com/yahoo/searchdefinition/LargeRankExpressions.java
new file mode 100644
index 00000000000..6fadcb39d11
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/LargeRankExpressions.java
@@ -0,0 +1,38 @@
+package com.yahoo.searchdefinition;
+
+import com.yahoo.vespa.model.AbstractService;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+public class LargeRankExpressions {
+ private final Map<String, RankExpressionBody> expressions = new HashMap<>();
+
+ public void add(RankExpressionBody expression) {
+ expression.validate();
+ String name = expression.getName();
+ if (expressions.containsKey(name)) {
+ throw new IllegalArgumentException("Rank expression '" + name +
+ "' defined twice. Previous blob with " + expressions.get(name).getBlob().remaining() +
+ " bytes, while current has " + expression.getBlob().remaining() + " bytes");
+ }
+ expressions.put(name, expression);
+ }
+
+ /** Returns the ranking constant with the given name, or null if not present */
+ public RankExpressionBody get(String name) {
+ return expressions.get(name);
+ }
+
+ /** Returns a read-only map of the ranking constants in this indexed by name */
+ public Map<String, RankExpressionBody> asMap() {
+ return Collections.unmodifiableMap(expressions);
+ }
+
+ /** Initiate sending of these constants to some services over file distribution */
+ public void sendTo(Collection<? extends AbstractService> services) {
+ expressions.values().forEach(constant -> constant.sendTo(services));
+ }
+}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionBody.java b/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionBody.java
new file mode 100644
index 00000000000..8c6830de815
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionBody.java
@@ -0,0 +1,10 @@
+package com.yahoo.searchdefinition;
+
+import java.nio.ByteBuffer;
+
+public class RankExpressionBody extends DistributableResource {
+
+ public RankExpressionBody(String name, ByteBuffer body) {
+ super(name, body);
+ }
+}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFile.java b/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFile.java
deleted file mode 100644
index 56385efeb0b..00000000000
--- a/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFile.java
+++ /dev/null
@@ -1,35 +0,0 @@
-package com.yahoo.searchdefinition;
-
-import com.yahoo.config.application.api.ApplicationPackage;
-import com.yahoo.vespa.model.AbstractService;
-import com.yahoo.vespa.model.utils.FileSender;
-
-import java.util.Collection;
-
-public class RankExpressionFile extends DistributableResource {
-
- public RankExpressionFile(String name, String path) {
- super(name, path);
- validate();
- }
-
- @Override
- public void sendTo(Collection<? extends AbstractService> services) {
- /*
- * TODO This is a very dirty hack due to using both SEARCH_DEFINITIONS_DIR and SCHEMA_DIR
- * and doing so inconsistently, combined with using both fields from application package on disk and in zookeeper.
- * The mess is spread out nicely, but ZookeeperClient, and writeSearchDefinitions and ZkApplicationPackage and FilesApplicationPackage
- * should be consolidated
- */
- try {
- setFileReference(FileSender.sendFileToServices(ApplicationPackage.SCHEMAS_DIR + "/" + getFileName(), services).value());
- } catch (IllegalArgumentException e1) {
- try {
- setFileReference(FileSender.sendFileToServices(ApplicationPackage.SEARCH_DEFINITIONS_DIR + "/" + getFileName(), services).value());
- } catch (IllegalArgumentException e2) {
- throw new IllegalArgumentException("Failed to find expression file '" + getFileName() + "' in '"
- + ApplicationPackage.SEARCH_DEFINITIONS_DIR + "' or '" + ApplicationPackage.SCHEMAS_DIR + "'.", e2);
- }
- }
- }
-}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFiles.java b/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFiles.java
deleted file mode 100644
index 34ad912dd00..00000000000
--- a/config-model/src/main/java/com/yahoo/searchdefinition/RankExpressionFiles.java
+++ /dev/null
@@ -1,47 +0,0 @@
-package com.yahoo.searchdefinition;
-
-import com.yahoo.config.application.api.DeployLogger;
-import com.yahoo.vespa.model.AbstractService;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.logging.Level;
-
-public class RankExpressionFiles {
- private final Map<String, RankExpressionFile> expressions = new HashMap<>();
-
- //TODO Deploy logger should not be necessary, as redefinition is illegal, but legacy prevents enforcement starting now.
- public void add(RankExpressionFile expression, DeployLogger deployLogger) {
- expression.validate();
- String name = expression.getName();
- if (expressions.containsKey(name)) {
- if ( expressions.get(name).getFileName().equals(expression.getFileName()) ) {
- //TODO Throw instead, No later than Vespa 8
- deployLogger.logApplicationPackage(Level.WARNING, "Rank expression file '" + name +
- "' defined twice with identical expression (illegal and will be enforced soon) '" + expression.getFileName() + "'.");
- } else {
- throw new IllegalArgumentException("Rank expression file '" + name +
- "' defined twice (illegal but not enforced), but redefinition is not matching (illegal and enforced), " +
- "previous = '" + expressions.get(name).getFileName() + "', new = '" + expression.getFileName() + "'.");
- }
- }
- expressions.put(name, expression);
- }
-
- /** Returns the ranking constant with the given name, or null if not present */
- public RankExpressionFile get(String name) {
- return expressions.get(name);
- }
-
- /** Returns a read-only map of the ranking constants in this indexed by name */
- public Map<String, RankExpressionFile> asMap() {
- return Collections.unmodifiableMap(expressions);
- }
-
- /** Initiate sending of these constants to some services over file distribution */
- public void sendTo(Collection<? extends AbstractService> services) {
- expressions.values().forEach(constant -> constant.sendTo(services));
- }
-}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/Search.java b/config-model/src/main/java/com/yahoo/searchdefinition/Search.java
index f11afef0eb2..9ce1b8bb330 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/Search.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/Search.java
@@ -83,7 +83,7 @@ public class Search implements ImmutableSearch {
private final Map<String, DocumentSummary> summaries = new LinkedHashMap<>();
/** External rank expression files of this */
- private final RankExpressionFiles rankExpressionFiles = new RankExpressionFiles();
+ private final LargeRankExpressions largeRankExpressions = new LargeRankExpressions();
/** Ranking constants of this */
private final RankingConstants rankingConstants = new RankingConstants();
@@ -98,7 +98,7 @@ public class Search implements ImmutableSearch {
private final DeployLogger deployLogger;
private final ModelContext.Properties properties;
- /** Testin only */
+ /** Testing only */
public Search(String name) {
this(name, null, new BaseDeployLogger(), new TestProperties());
}
@@ -188,7 +188,7 @@ public class Search implements ImmutableSearch {
}
@Override
- public RankExpressionFiles rankExpressionFiles() { return rankExpressionFiles; }
+ public LargeRankExpressions rankExpressionFiles() { return largeRankExpressions; }
@Override
public RankingConstants rankingConstants() { return rankingConstants; }
@@ -198,7 +198,7 @@ public class Search implements ImmutableSearch {
public void sendTo(Collection<? extends AbstractService> services) {
rankingConstants.sendTo(services);
- rankExpressionFiles.sendTo(services);
+ largeRankExpressions.sendTo(services);
onnxModels.sendTo(services);
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java
index d414b9ed79f..7c533cce006 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/RankProfileList.java
@@ -6,8 +6,7 @@ import com.yahoo.config.model.api.ModelContext;
import com.yahoo.search.query.profile.QueryProfileRegistry;
import com.yahoo.searchdefinition.OnnxModel;
import com.yahoo.searchdefinition.OnnxModels;
-import com.yahoo.searchdefinition.RankExpressionFile;
-import com.yahoo.searchdefinition.RankExpressionFiles;
+import com.yahoo.searchdefinition.LargeRankExpressions;
import com.yahoo.searchdefinition.RankProfileRegistry;
import com.yahoo.searchdefinition.RankingConstant;
import com.yahoo.searchdefinition.RankingConstants;
@@ -34,14 +33,14 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
private final Map<String, RawRankProfile> rankProfiles = new java.util.LinkedHashMap<>();
private final RankingConstants rankingConstants;
- private final RankExpressionFiles rankExpressionFiles;
+ private final LargeRankExpressions largeRankExpressions;
private final OnnxModels onnxModels;
public static RankProfileList empty = new RankProfileList();
private RankProfileList() {
rankingConstants = new RankingConstants();
- rankExpressionFiles = new RankExpressionFiles();
+ largeRankExpressions = new LargeRankExpressions();
onnxModels = new OnnxModels();
}
@@ -53,7 +52,7 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
*/
public RankProfileList(Search search,
RankingConstants rankingConstants,
- RankExpressionFiles rankExpressionFiles,
+ LargeRankExpressions largeRankExpressions,
AttributeFields attributeFields,
RankProfileRegistry rankProfileRegistry,
QueryProfileRegistry queryProfiles,
@@ -61,7 +60,7 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
ModelContext.Properties deployProperties) {
setName(search == null ? "default" : search.getName());
this.rankingConstants = rankingConstants;
- this.rankExpressionFiles = rankExpressionFiles;
+ this.largeRankExpressions = largeRankExpressions;
onnxModels = search == null ? new OnnxModels() : search.onnxModels(); // as ONNX models come from parsing rank expressions
deriveRankProfiles(rankProfileRegistry, queryProfiles, importedModels, search, attributeFields, deployProperties);
}
@@ -74,7 +73,8 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
ModelContext.Properties deployProperties) {
if (search != null) { // profiles belonging to a search have a default profile
RawRankProfile defaultProfile = new RawRankProfile(rankProfileRegistry.get(search, "default"),
- queryProfiles, importedModels, attributeFields, deployProperties);
+ largeRankExpressions, queryProfiles, importedModels,
+ attributeFields, deployProperties);
rankProfiles.put(defaultProfile.getName(), defaultProfile);
}
@@ -84,7 +84,8 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
this.onnxModels.add(rank.onnxModels());
}
- RawRankProfile rawRank = new RawRankProfile(rank, queryProfiles, importedModels, attributeFields, deployProperties);
+ RawRankProfile rawRank = new RawRankProfile(rank, largeRankExpressions, queryProfiles, importedModels,
+ attributeFields, deployProperties);
rankProfiles.put(rawRank.getName(), rawRank);
}
}
@@ -100,7 +101,7 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
public void sendTo(Collection<? extends AbstractService> services) {
rankingConstants.sendTo(services);
- rankExpressionFiles.sendTo(services);
+ largeRankExpressions.sendTo(services);
onnxModels.sendTo(services);
}
@@ -115,7 +116,7 @@ public class RankProfileList extends Derived implements RankProfilesConfig.Produ
}
public void getConfig(RankingExpressionsConfig.Builder builder) {
- rankExpressionFiles.asMap().values().forEach((expr) -> builder.expression.add(new RankingExpressionsConfig.Expression.Builder().name(expr.getName()).fileref(expr.getFileReference())));
+ largeRankExpressions.asMap().values().forEach((expr) -> builder.expression.add(new RankingExpressionsConfig.Expression.Builder().name(expr.getName()).fileref(expr.getFileReference())));
}
public void getConfig(RankingConstantsConfig.Builder builder) {
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/RawRankProfile.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/RawRankProfile.java
index 6b589a22de5..97d695cead9 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/RawRankProfile.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/RawRankProfile.java
@@ -9,6 +9,8 @@ import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.deploy.TestProperties;
import com.yahoo.search.query.profile.QueryProfileRegistry;
import com.yahoo.searchdefinition.OnnxModel;
+import com.yahoo.searchdefinition.LargeRankExpressions;
+import com.yahoo.searchdefinition.RankExpressionBody;
import com.yahoo.searchdefinition.document.RankType;
import com.yahoo.searchdefinition.RankProfile;
import com.yahoo.searchdefinition.expressiontransforms.OnnxModelTransformer;
@@ -20,6 +22,7 @@ import com.yahoo.searchlib.rankingexpression.rule.SerializationContext;
import com.yahoo.tensor.TensorType;
import com.yahoo.vespa.config.search.RankProfilesConfig;
+import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.HashSet;
@@ -27,6 +30,7 @@ import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
+import java.util.ListIterator;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
@@ -55,19 +59,20 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
/**
* Creates a raw rank profile from the given rank profile
*/
- public RawRankProfile(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels,
+ public RawRankProfile(RankProfile rankProfile, LargeRankExpressions largeExpressions,
+ QueryProfileRegistry queryProfiles, ImportedMlModels importedModels,
AttributeFields attributeFields, ModelContext.Properties deployProperties) {
this.name = rankProfile.getName();
compressedProperties = compress(new Deriver(rankProfile.compile(queryProfiles, importedModels),
- attributeFields, deployProperties).derive());
+ attributeFields, deployProperties).derive(largeExpressions));
}
/**
* Only for testing
*/
- public RawRankProfile(RankProfile rankProfile, QueryProfileRegistry queryProfiles,
- ImportedMlModels importedModels, AttributeFields attributeFields) {
- this(rankProfile, queryProfiles, importedModels, attributeFields, new TestProperties());
+ public RawRankProfile(RankProfile rankProfile, QueryProfileRegistry queryProfiles, ImportedMlModels importedModels,
+ AttributeFields attributeFields) {
+ this(rankProfile, new LargeRankExpressions(), queryProfiles, importedModels, attributeFields, new TestProperties());
}
private Compressor.Compression compress(List<Pair<String, String>> properties) {
@@ -142,6 +147,9 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
private final int numSearchPartitions;
private final double termwiseLimit;
private final double rankScoreDropLimit;
+ private final int largeRankExpressionLimit;
+ private final boolean distributeLargeRankExpressions;
+ private final boolean useDistributedRankExpressions;
/**
* The rank type definitions used to derive settings for the native rank features
@@ -150,6 +158,7 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
private final Map<String, String> attributeTypes;
private final Map<String, String> queryFeatureTypes;
private final Set<String> filterFields = new java.util.LinkedHashSet<>();
+ private final String rankprofileName;
private RankingExpression firstPhaseRanking;
private RankingExpression secondPhaseRanking;
@@ -159,6 +168,7 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
*/
Deriver(RankProfile compiled, AttributeFields attributeFields, ModelContext.Properties deployProperties)
{
+ rankprofileName = compiled.getName();
attributeTypes = compiled.getAttributeTypes();
queryFeatureTypes = compiled.getQueryFeatureTypes();
firstPhaseRanking = compiled.getFirstPhaseRanking();
@@ -174,6 +184,9 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
keepRankCount = compiled.getKeepRankCount();
rankScoreDropLimit = compiled.getRankScoreDropLimit();
ignoreDefaultRankFeatures = compiled.getIgnoreDefaultRankFeatures();
+ largeRankExpressionLimit = deployProperties.featureFlags().largeRankExpressionLimit();
+ distributeLargeRankExpressions = deployProperties.featureFlags().distributeExternalRankExpressions();
+ useDistributedRankExpressions = deployProperties.featureFlags().useExternalRankExpressions();
rankProperties = new ArrayList<>(compiled.getRankProperties());
Map<String, RankProfile.RankingExpressionFunction> functions = compiled.getFunctions();
@@ -319,10 +332,10 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
}
/** Derives the properties this produces */
- public List<Pair<String, String>> derive() {
+ public List<Pair<String, String>> derive(LargeRankExpressions largeRankExpressions) {
List<Pair<String, String>> properties = new ArrayList<>();
for (RankProfile.RankProperty property : rankProperties) {
- if (("rankingExpression(" + RankProfile.FIRST_PHASE + ").rankingScript").equals(property.getName())) {
+ if (RankingExpression.propertyName(RankProfile.FIRST_PHASE).equals(property.getName())) {
// Could have been set by function expansion. Set expressions, then skip this property.
try {
firstPhaseRanking = new RankingExpression(property.getValue());
@@ -330,7 +343,7 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
throw new IllegalArgumentException("Could not parse first phase expression", e);
}
}
- else if (("rankingExpression(" + RankProfile.SECOND_PHASE + ").rankingScript").equals(property.getName())) {
+ else if (RankingExpression.propertyName(RankProfile.SECOND_PHASE).equals(property.getName())) {
try {
secondPhaseRanking = new RankingExpression(property.getValue());
} catch (ParseException e) {
@@ -419,7 +432,7 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
properties.add(new Pair<>("vespa.rank." + phase, expression.getRoot().toString()));
} else {
properties.add(new Pair<>("vespa.rank." + phase, "rankingExpression(" + name + ")"));
- properties.add(new Pair<>("rankingExpression(" + name + ").rankingScript", expression.getRoot().toString()));
+ properties.add(new Pair<>(RankingExpression.propertyName(name), expression.getRoot().toString()));
}
return properties;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/AbstractService.java b/config-model/src/main/java/com/yahoo/vespa/model/AbstractService.java
index 8dea1b65079..ea0452a6c49 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/AbstractService.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/AbstractService.java
@@ -8,6 +8,7 @@ import com.yahoo.config.model.api.ServiceInfo;
import com.yahoo.config.model.producer.AbstractConfigProducer;
import com.yahoo.vespa.defaults.Defaults;
+import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
@@ -470,6 +471,10 @@ public abstract class AbstractService extends AbstractConfigProducer<AbstractCon
return getRoot().getFileDistributor().sendUriToHost(uri, getHost().getHost());
}
+ public FileReference sendBlob(ByteBuffer blob) {
+ return getRoot().getFileDistributor().sendBlobToHost(blob, getHost().getHost());
+ }
+
/** The service HTTP port for health status */
public int getHealthPort() { return -1;}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java b/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java
index d15db6b4a55..dd35787571e 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/ConfigSentinel.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model;
+import com.yahoo.config.model.api.ModelContext;
import com.yahoo.cloud.config.SentinelConfig;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Zone;
@@ -18,16 +19,20 @@ public class ConfigSentinel extends AbstractService implements SentinelConfig.Pr
private final ApplicationId applicationId;
private final Zone zone;
+ private final boolean requireConnectivityCheck;
/**
* Constructs a new ConfigSentinel for the given host.
*
* @param host Physical host on which to run.
*/
- public ConfigSentinel(Host host, ApplicationId applicationId, Zone zone) {
+ public ConfigSentinel(Host host, ApplicationId applicationId, Zone zone,
+ ModelContext.FeatureFlags featureFlags)
+ {
super(host, "sentinel");
this.applicationId = applicationId;
this.zone = zone;
+ this.requireConnectivityCheck = featureFlags.requireConnectivityCheck();
portsMeta.on(0).tag("rpc").tag("admin");
portsMeta.on(1).tag("telnet").tag("interactive").tag("http").tag("state");
setProp("clustertype", "hosts");
@@ -75,6 +80,19 @@ public class ConfigSentinel extends AbstractService implements SentinelConfig.Pr
builder.service(getServiceConfig(s));
}
}
+ builder.connectivity(getConnectivityConfig(requireConnectivityCheck));
+ }
+
+ private SentinelConfig.Connectivity.Builder getConnectivityConfig(boolean enable) {
+ var builder = new SentinelConfig.Connectivity.Builder();
+ if (enable) {
+ builder.minOkPercent(50);
+ builder.maxBadCount(1);
+ } else {
+ builder.minOkPercent(0);
+ builder.maxBadCount(Integer.MAX_VALUE);
+ }
+ return builder;
}
private SentinelConfig.Application.Builder getApplicationConfig() {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
index 09bbd446803..53f42866d8d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
@@ -47,16 +47,15 @@ public class HostSystem extends AbstractConfigProducer<Host> {
void checkName(String hostname) {
// Give a warning if the host does not exist
try {
- @SuppressWarnings("unused")
- Object ignore = java.net.InetAddress.getByName(hostname);
+ var inetAddr = java.net.InetAddress.getByName(hostname);
+ String canonical = inetAddr.getCanonicalHostName();
+ if (! hostname.equals(canonical)) {
+ deployLogger.logApplicationPackage(Level.WARNING, "Host named '" + hostname + "' may not receive any config " +
+ "since it differs from its canonical hostname '" + canonical + "' (check DNS and /etc/hosts).");
+ }
} catch (UnknownHostException e) {
deployLogger.logApplicationPackage(Level.WARNING, "Unable to lookup IP address of host: " + hostname);
}
- if (! hostname.contains(".")) {
- deployLogger.logApplicationPackage(Level.WARNING, "Host named '" + hostname + "' may not receive any config " +
- "since it is not a canonical hostname. " +
- "Disregard this warning when testing in a Docker container.");
- }
}
/**
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java
index ab00e9d295f..d20247b79fc 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java
@@ -33,7 +33,7 @@ import com.yahoo.container.QrConfig;
import com.yahoo.path.Path;
import com.yahoo.searchdefinition.OnnxModel;
import com.yahoo.searchdefinition.OnnxModels;
-import com.yahoo.searchdefinition.RankExpressionFiles;
+import com.yahoo.searchdefinition.LargeRankExpressions;
import com.yahoo.searchdefinition.RankProfile;
import com.yahoo.searchdefinition.RankProfileRegistry;
import com.yahoo.searchdefinition.RankingConstants;
@@ -131,7 +131,7 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
private final RankingConstants rankingConstants = new RankingConstants();
/** External rank expression files of this */
- private final RankExpressionFiles rankExpressionFiles = new RankExpressionFiles();
+ private final LargeRankExpressions largeRankExpressions = new LargeRankExpressions();
/** The validation overrides of this. This is never null. */
private final ValidationOverrides validationOverrides;
@@ -187,7 +187,7 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
deployState.rankProfileRegistry(), deployState.getQueryProfiles());
rankProfileList = new RankProfileList(null, // null search -> global
rankingConstants,
- rankExpressionFiles,
+ largeRankExpressions,
AttributeFields.empty,
deployState.rankProfileRegistry(),
deployState.getQueryProfiles().getRegistry(),
@@ -266,7 +266,7 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
/** Returns the global ranking constants of this */
public RankingConstants rankingConstants() { return rankingConstants; }
- public RankExpressionFiles rankExpressionFiles() { return rankExpressionFiles; }
+ public LargeRankExpressions rankExpressionFiles() { return largeRankExpressions; }
/** Creates a mutable model with no services instantiated */
public static VespaModel createIncomplete(DeployState deployState) throws IOException, SAXException {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
index d6673cd49e9..b576d1cb5d2 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
@@ -6,6 +6,7 @@ import com.google.inject.Inject;
import com.yahoo.component.Version;
import com.yahoo.component.provider.ComponentRegistry;
import com.yahoo.config.application.api.ApplicationPackage;
+import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.model.ConfigModelRegistry;
import com.yahoo.config.model.MapConfigModelRegistry;
import com.yahoo.config.model.NullConfigModelRegistry;
@@ -23,6 +24,7 @@ import com.yahoo.config.provision.TransientException;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.config.VespaVersion;
import com.yahoo.vespa.model.application.validation.Validation;
+import com.yahoo.yolean.Exceptions;
import org.xml.sax.SAXException;
import java.io.IOException;
@@ -31,6 +33,7 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
+import java.util.logging.Level;
import java.util.logging.Logger;
/**
@@ -170,6 +173,13 @@ public class VespaModelFactory implements ModelFactory {
private List<ConfigChangeAction> validateModel(VespaModel model, DeployState deployState, ValidationParameters validationParameters) {
try {
return Validation.validate(model, validationParameters, deployState);
+ } catch (ValidationOverrides.ValidationException e) {
+ if (deployState.isHosted() && zone.environment().isManuallyDeployed())
+ deployState.getDeployLogger().logApplicationPackage(Level.WARNING,
+ "Auto-overriding validation which would be disallowed in production: " +
+ Exceptions.toMessageString(e));
+ else
+ rethrowUnlessIgnoreErrors(e, validationParameters.ignoreValidationErrors());
} catch (IllegalArgumentException | TransientException e) {
rethrowUnlessIgnoreErrors(e, validationParameters.ignoreValidationErrors());
} catch (Exception e) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java
index e080ce43730..a2a6ada9093 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.admin;
+import com.yahoo.config.model.api.ModelContext;
import com.yahoo.cloud.config.SlobroksConfig;
import com.yahoo.cloud.config.ZookeepersConfig;
import com.yahoo.cloud.config.log.LogdConfig;
@@ -242,7 +243,8 @@ public class Admin extends AbstractConfigProducer<Admin> implements Serializable
}
private void addCommonServices(HostResource host, DeployState deployState) {
- addConfigSentinel(deployState.getDeployLogger(), host, deployState.getProperties().applicationId(), deployState.zone());
+ addConfigSentinel(deployState.getDeployLogger(), host, deployState.getProperties().applicationId(), deployState.zone(),
+ deployState.featureFlags());
addLogd(deployState.getDeployLogger(), host);
addConfigProxy(deployState.getDeployLogger(), host);
addFileDistribution(host);
@@ -262,8 +264,10 @@ public class Admin extends AbstractConfigProducer<Admin> implements Serializable
}
}
- private void addConfigSentinel(DeployLogger deployLogger, HostResource host, ApplicationId applicationId, Zone zone) {
- ConfigSentinel configSentinel = new ConfigSentinel(host.getHost(), applicationId, zone);
+ private void addConfigSentinel(DeployLogger deployLogger, HostResource host,
+ ApplicationId applicationId, Zone zone, ModelContext.FeatureFlags featureFlags)
+ {
+ ConfigSentinel configSentinel = new ConfigSentinel(host.getHost(), applicationId, zone, featureFlags);
addAndInitializeService(deployLogger, host, configSentinel);
host.getHost().setConfigSentinel(configSentinel);
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java
index 234892c5cc3..9dec27e17fe 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainer.java
@@ -45,16 +45,13 @@ public class MetricsProxyContainer extends Container implements
final boolean isHostedVespa;
private final Optional<ClusterMembership> clusterMembership;
- private final ModelContext.FeatureFlags featureFlags;
private final MetricsProxyContainerCluster cluster;
private final String jvmGCOptions;
-
public MetricsProxyContainer(MetricsProxyContainerCluster cluster, HostResource host, int index, DeployState deployState) {
super(cluster, host.getHostname(), index, deployState);
this.isHostedVespa = deployState.isHosted();
this.clusterMembership = host.spec().membership();
- this.featureFlags = deployState.featureFlags();
this.cluster = cluster;
this.jvmGCOptions = deployState.getProperties().jvmGCOptions(clusterMembership.map(membership -> membership.cluster().type()));
setProp("clustertype", "admin");
@@ -157,7 +154,9 @@ public class MetricsProxyContainer extends Container implements
cluster.getConfig(builder);
if (clusterMembership.isPresent()) {
- int maxHeapSize = featureFlags.metricsProxyMaxHeapSizeInMb(clusterMembership.get().cluster().type());
+ int maxHeapSize = clusterMembership.get().cluster().type() == ClusterSpec.Type.admin
+ ? 128
+ : 256;
builder.jvm
.gcopts(jvmGCOptions)
.heapsize(maxHeapSize);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
index 034bf772ffc..114a3e380ef 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
@@ -256,8 +256,11 @@ public class VespaMetricSet {
metrics.add(new Metric("cluster-controller.node-event.count"));
metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.last"));
+ metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.max"));
metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.last"));
+ metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.max"));
metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.last"));
+ metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.max"));
metrics.add(new Metric("cluster-controller.resource_usage.disk_limit.last"));
metrics.add(new Metric("cluster-controller.resource_usage.memory_limit.last"));
@@ -762,6 +765,15 @@ public class VespaMetricSet {
metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count"));
+ metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.max"));
+ metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.sum"));
+ metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.count"));
+ metrics.add(new Metric("vds.mergethrottler.queuesize.max"));
+ metrics.add(new Metric("vds.mergethrottler.queuesize.sum"));
+ metrics.add(new Metric("vds.mergethrottler.queuesize.count"));
+ metrics.add(new Metric("vds.mergethrottler.bounced_due_to_back_pressure.rate"));
+ metrics.add(new Metric("vds.mergethrottler.locallyexecutedmerges.ok.rate"));
+ metrics.add(new Metric("vds.mergethrottler.mergechains.ok.rate"));
return metrics;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java
index d87c6596fa4..52dccbe96b5 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/RankSetupValidator.java
@@ -8,7 +8,7 @@ import com.yahoo.log.InvalidLogFormatException;
import com.yahoo.log.LogMessage;
import com.yahoo.path.Path;
import com.yahoo.searchdefinition.OnnxModel;
-import com.yahoo.searchdefinition.RankExpressionFile;
+import com.yahoo.searchdefinition.RankExpressionBody;
import com.yahoo.vespa.config.search.core.RankingExpressionsConfig;
import com.yahoo.vespa.defaults.Defaults;
import com.yahoo.yolean.Exceptions;
@@ -165,7 +165,7 @@ public class RankSetupValidator extends Validator {
config.add(String.format("file[%d].path \"%s\"", config.size() / 2, modelPath));
}
- for (RankExpressionFile expr : db.getDerivedConfiguration().getSearch().rankExpressionFiles().asMap().values()) {
+ for (RankExpressionBody expr : db.getDerivedConfiguration().getSearch().rankExpressionFiles().asMap().values()) {
String modelPath = getFileRepositoryPath(expr.getFilePath(), expr.getFileReference());
config.add(String.format("file[%d].ref \"%s\"", config.size() / 2, expr.getFileReference()));
config.add(String.format("file[%d].path \"%s\"", config.size() / 2, modelPath));
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java
index 55443d4b260..84c7a48a998 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validation.java
@@ -20,10 +20,12 @@ import com.yahoo.vespa.model.application.validation.change.GlobalDocumentChangeV
import com.yahoo.vespa.model.application.validation.change.IndexedSearchClusterChangeValidator;
import com.yahoo.vespa.model.application.validation.change.IndexingModeChangeValidator;
import com.yahoo.vespa.model.application.validation.change.NodeResourceChangeValidator;
+import com.yahoo.vespa.model.application.validation.change.RedundancyIncreaseValidator;
import com.yahoo.vespa.model.application.validation.change.ResourcesReductionValidator;
import com.yahoo.vespa.model.application.validation.change.StartupCommandChangeValidator;
import com.yahoo.vespa.model.application.validation.change.StreamingSearchClusterChangeValidator;
import com.yahoo.vespa.model.application.validation.first.AccessControlOnFirstDeploymentValidator;
+import com.yahoo.vespa.model.application.validation.first.RedundancyOnFirstDeploymentValidator;
import java.time.Instant;
import java.util.Arrays;
@@ -53,6 +55,7 @@ public class Validation {
* between the previous and current model
*
* @return a list of required changes needed to make this configuration live
+ * @throws ValidationOverrides.ValidationException if the change fails validation
*/
public static List<ConfigChangeAction> validate(VespaModel model, ValidationParameters validationParameters, DeployState deployState) {
if (validationParameters.checkRouting()) {
@@ -105,7 +108,8 @@ public class Validation {
new ClusterSizeReductionValidator(),
new ResourcesReductionValidator(),
new ContainerRestartValidator(),
- new NodeResourceChangeValidator()
+ new NodeResourceChangeValidator(),
+ new RedundancyIncreaseValidator()
};
List<ConfigChangeAction> actions = Arrays.stream(validators)
.flatMap(v -> v.validate(currentModel, nextModel, overrides, now).stream())
@@ -122,6 +126,7 @@ public class Validation {
private static void validateFirstTimeDeployment(VespaModel model, DeployState deployState) {
new AccessControlOnFirstDeploymentValidator().validate(model, deployState);
+ new RedundancyOnFirstDeploymentValidator().validate(model, deployState);
}
private static void deferConfigChangesForClustersToBeRestarted(List<ConfigChangeAction> actions, VespaModel model) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validator.java
index f3bebbe7fb9..fee63828670 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/Validator.java
@@ -15,7 +15,7 @@ public abstract class Validator {
* Validates the input vespamodel
*
* @param model a VespaModel object
- * @param deployState The {@link DeployState} built from building the model
+ * @param deployState the {@link DeployState} built from building the model
*/
public abstract void validate(VespaModel model, DeployState deployState);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ChangeValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ChangeValidator.java
index b720cc13f42..4222d22563d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ChangeValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/ChangeValidator.java
@@ -25,6 +25,7 @@ public interface ChangeValidator {
* @param now the instant to use as now
* @return a list of actions specifying what needs to be done in order to activate the new model.
* Return an empty list if nothing needs to be done
+ * @throws IllegalArgumentException if the change fails validation
*/
List<ConfigChangeAction> validate(VespaModel current, VespaModel next, ValidationOverrides overrides, Instant now);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidator.java
new file mode 100644
index 00000000000..dcf16222d35
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidator.java
@@ -0,0 +1,45 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation.change;
+
+import com.yahoo.config.application.api.ValidationId;
+import com.yahoo.config.application.api.ValidationOverrides;
+import com.yahoo.config.model.api.ConfigChangeAction;
+import com.yahoo.config.provision.Capacity;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.vespa.model.VespaModel;
+import com.yahoo.vespa.model.content.cluster.ContentCluster;
+
+import java.time.Instant;
+import java.util.List;
+
+/**
+ * Checks that redundancy is not increased (without a validation override),
+ * as that may easily cause the cluster to run out of reasources.
+ *
+ * @author bratseth
+ */
+public class RedundancyIncreaseValidator implements ChangeValidator {
+
+ @Override
+ public List<ConfigChangeAction> validate(VespaModel current, VespaModel next, ValidationOverrides overrides, Instant now) {
+ for (ContentCluster currentCluster : current.getContentClusters().values()) {
+ ContentCluster nextCluster = next.getContentClusters().get(currentCluster.getSubId());
+ if (nextCluster == null) continue;
+ if (redundancyOf(nextCluster) > redundancyOf(currentCluster)) {
+ overrides.invalid(ValidationId.redundancyIncrease,
+ "Increasing redundancy from " + redundancyOf(currentCluster) + " to " +
+ redundancyOf(nextCluster) + " in '" + currentCluster + ". " +
+ "This is a safe operation but verify that you have room for a " +
+ redundancyOf(nextCluster) + "/" + redundancyOf(currentCluster) + "x increase " +
+ "in content size",
+ now);
+ }
+ }
+ return List.of();
+ }
+
+ private int redundancyOf(ContentCluster cluster) {
+ return cluster.redundancy().finalRedundancy();
+ }
+
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java
new file mode 100644
index 00000000000..e6117299269
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidator.java
@@ -0,0 +1,44 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation.first;
+
+import com.yahoo.config.application.api.ValidationId;
+import com.yahoo.config.model.ConfigModelContext.ApplicationType;
+import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.vespa.model.VespaModel;
+import com.yahoo.vespa.model.application.validation.Validator;
+import com.yahoo.vespa.model.container.ApplicationContainerCluster;
+import com.yahoo.vespa.model.container.Container;
+import com.yahoo.vespa.model.container.ContainerCluster;
+import com.yahoo.vespa.model.content.cluster.ContentCluster;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static com.yahoo.collections.CollectionUtil.mkString;
+import static com.yahoo.config.provision.InstanceName.defaultName;
+import static com.yahoo.vespa.model.container.http.AccessControl.hasHandlerThatNeedsProtection;
+
+/**
+ * Validates that applications in prod zones do not have redundancy 1 (without a validation override).
+ *
+ * @author bratseth
+ */
+public class RedundancyOnFirstDeploymentValidator extends Validator {
+
+ @Override
+ public void validate(VespaModel model, DeployState deployState) {
+ if ( ! deployState.isHosted()) return;
+ if ( ! deployState.zone().environment().isProduction()) return;
+
+ for (ContentCluster cluster : model.getContentClusters().values()) {
+ if (cluster.redundancy().finalRedundancy() == 1
+ && cluster.redundancy().totalNodes() > cluster.redundancy().groups())
+ deployState.validationOverrides().invalid(ValidationId.redundancyOne,
+ cluster + " has redundancy 1, which will cause it to lose data " +
+ "if a node fails. This requires an override on first deployment " +
+ "in a production zone",
+ deployState.now());
+ }
+ }
+
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterResourceLimits.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterResourceLimits.java
index 70f2acd3c7b..638864d85bb 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterResourceLimits.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterResourceLimits.java
@@ -37,13 +37,16 @@ public class ClusterResourceLimits {
private final boolean enableFeedBlockInDistributor;
private final boolean hostedVespa;
+ private final boolean throwIfSpecified;
private final DeployLogger deployLogger;
+
private ResourceLimits.Builder ctrlBuilder = new ResourceLimits.Builder();
private ResourceLimits.Builder nodeBuilder = new ResourceLimits.Builder();
- public Builder(boolean enableFeedBlockInDistributor, boolean hostedVespa, DeployLogger deployLogger) {
+ public Builder(boolean enableFeedBlockInDistributor, boolean hostedVespa, boolean throwIfSpecified, DeployLogger deployLogger) {
this.enableFeedBlockInDistributor = enableFeedBlockInDistributor;
this.hostedVespa = hostedVespa;
+ this.throwIfSpecified = throwIfSpecified;
this.deployLogger = deployLogger;
}
@@ -58,7 +61,7 @@ public class ClusterResourceLimits {
private ResourceLimits.Builder createBuilder(ModelElement element) {
return element == null
? new ResourceLimits.Builder()
- : DomResourceLimitsBuilder.createBuilder(element, hostedVespa, deployLogger);
+ : DomResourceLimitsBuilder.createBuilder(element, hostedVespa, throwIfSpecified, deployLogger);
}
public void setClusterControllerBuilder(ResourceLimits.Builder builder) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java b/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java
index ea52f9689ff..4a8002ba3dc 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/Content.java
@@ -111,13 +111,15 @@ public class Content extends ConfigModel {
return null;
}
- private static void checkThatExplicitIndexingChainInheritsCorrectly(ComponentRegistry<DocprocChain> allChains, ChainSpecification chainSpec) {
+ private static void checkThatExplicitIndexingChainInheritsCorrectly(ComponentRegistry<DocprocChain> allChains,
+ ChainSpecification chainSpec) {
ChainSpecification.Inheritance inheritance = chainSpec.inheritance;
for (ComponentSpecification componentSpec : inheritance.chainSpecifications) {
ChainSpecification parentSpec = getChainSpec(allChains, componentSpec);
if (containsIndexingChain(allChains, parentSpec)) return;
}
- throw new IllegalArgumentException("Docproc chain '" + chainSpec.componentId + "' does not inherit from 'indexing' chain.");
+ throw new IllegalArgumentException("Docproc chain '" + chainSpec.componentId +
+ "' must inherit from the 'indexing' chain");
}
public static List<Content> getContent(ConfigModelRepo pc) {
@@ -261,9 +263,17 @@ public class Content extends ConfigModel {
if (cluster.hasExplicitIndexingChain()) {
indexingChain = allChains.getComponent(cluster.getIndexingChainName());
if (indexingChain == null) {
- throw new RuntimeException("Indexing cluster " + cluster.getClusterName() + " refers to docproc " +
- "chain " + cluster.getIndexingChainName() + " for indexing, which does not exist.");
- } else {
+ throw new IllegalArgumentException(cluster + " refers to docproc " +
+ "chain '" + cluster.getIndexingChainName() +
+ "' for indexing, but this chain does not exist");
+ }
+ else if (indexingChain.getId().getName().equals("default")) {
+ throw new IllegalArgumentException(cluster + " specifies the chain " +
+ "'default' as indexing chain. As the 'default' chain is run by default, " +
+ "using it as the indexing chain will run it twice. " +
+ "Use a different name for the indexing chain.");
+ }
+ else {
checkThatExplicitIndexingChainInheritsCorrectly(allChains, indexingChain.getChainSpecification());
}
} else {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
index 51949e78838..efb47e97ccb 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
@@ -316,12 +316,8 @@ public class ContentSearchCluster extends AbstractConfigProducer<SearchCluster>
}
public void handleRedundancy(Redundancy redundancy) {
- if (hasIndexedCluster()) {
- if (usesHierarchicDistribution()) {
- indexedCluster.setMaxNodesDownPerFixedRow((redundancy.effectiveFinalRedundancy() / groupToSpecMap.size()) - 1);
- }
+ if (hasIndexedCluster())
indexedCluster.setSearchableCopies(redundancy.readyCopies());
- }
this.redundancy = redundancy;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/DispatchTuning.java b/config-model/src/main/java/com/yahoo/vespa/model/content/DispatchTuning.java
index 3b694f8986c..786d032578f 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/DispatchTuning.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/DispatchTuning.java
@@ -15,7 +15,6 @@ public class DispatchTuning {
private final Integer maxHitsPerPartition;
private DispatchPolicy dispatchPolicy;
- private final Double minGroupCoverage;
private final Double minActiveDocsCoverage;
public Double getTopkProbability() {
@@ -27,7 +26,6 @@ public class DispatchTuning {
private DispatchTuning(Builder builder) {
maxHitsPerPartition = builder.maxHitsPerPartition;
dispatchPolicy = builder.dispatchPolicy;
- minGroupCoverage = builder.minGroupCoverage;
minActiveDocsCoverage = builder.minActiveDocsCoverage;
topkProbability = builder.topKProbability;
}
@@ -41,9 +39,6 @@ public class DispatchTuning {
@SuppressWarnings("unused")
public void setDispatchPolicy(DispatchPolicy dispatchPolicy) { this.dispatchPolicy = dispatchPolicy; }
- /** Returns the percentage of nodes in a group which must be up for that group to receive queries */
- public Double getMinGroupCoverage() { return minGroupCoverage; }
-
/** Returns the percentage of documents which must be available in a group for that group to receive queries */
public Double getMinActiveDocsCoverage() { return minActiveDocsCoverage; }
@@ -51,7 +46,6 @@ public class DispatchTuning {
private Integer maxHitsPerPartition;
private DispatchPolicy dispatchPolicy;
- private Double minGroupCoverage;
private Double minActiveDocsCoverage;
private Double topKProbability;
@@ -81,11 +75,6 @@ public class DispatchTuning {
}
}
- public Builder setMinGroupCoverage(Double minGroupCoverage) {
- this.minGroupCoverage = minGroupCoverage;
- return this;
- }
-
public Builder setMinActiveDocsCoverage(Double minCoverage) {
this.minActiveDocsCoverage = minCoverage;
return this;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
index e0d311e6df6..c298b7f5f5a 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
@@ -123,6 +123,7 @@ public class ContentCluster extends AbstractConfigProducer<AbstractConfigProduce
boolean enableFeedBlockInDistributor = deployState.getProperties().featureFlags().enableFeedBlockInDistributor();
var resourceLimits = new ClusterResourceLimits.Builder(enableFeedBlockInDistributor,
stateIsHosted(deployState),
+ deployState.featureFlags().throwIfResourceLimitsSpecified(),
deployState.getDeployLogger())
.build(contentElement);
c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterId(contentElement),
@@ -674,4 +675,9 @@ public class ContentCluster extends AbstractConfigProducer<AbstractConfigProduce
// TODO
}
+ @Override
+ public String toString() {
+ return "content cluster '" + clusterId + "'";
+ }
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomResourceLimitsBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomResourceLimitsBuilder.java
index 9f4852629d0..37adb73bc15 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomResourceLimitsBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomResourceLimitsBuilder.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.content.cluster;
import com.yahoo.config.application.api.DeployLogger;
@@ -14,18 +14,22 @@ import java.util.logging.Level;
*/
public class DomResourceLimitsBuilder {
- public static ResourceLimits.Builder createBuilder(ModelElement contentXml, boolean hostedVespa, DeployLogger deployLogger) {
+ public static ResourceLimits.Builder createBuilder(ModelElement contentXml,
+ boolean hostedVespa,
+ boolean throwIfSpecified,
+ DeployLogger deployLogger) {
ResourceLimits.Builder builder = new ResourceLimits.Builder();
ModelElement resourceLimits = contentXml.child("resource-limits");
if (resourceLimits == null) { return builder; }
if (hostedVespa) {
- deployLogger.logApplicationPackage(Level.WARNING, "Element " + resourceLimits +
- " is not allowed, default limits will be used");
- // TODO: Throw exception when we are sure nobody is using this
- //throw new IllegalArgumentException("Element " + element + " is not allowed to be set, default limits will be used");
- return builder;
+ String message = "Element '" + resourceLimits + "' is not allowed to be set";
+ if (throwIfSpecified)
+ throw new IllegalArgumentException(message);
+ else
+ deployLogger.logApplicationPackage(Level.WARNING, message);
}
+
if (resourceLimits.child("disk") != null) {
builder.setDiskLimit(resourceLimits.childAsDouble("disk"));
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java
index 64911acae1f..f429e40baa9 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/DomTuningDispatchBuilder.java
@@ -25,12 +25,14 @@ public class DomTuningDispatchBuilder {
builder.setMaxHitsPerPartition(dispatchElement.childAsInteger("max-hits-per-partition"));
builder.setTopKProbability(dispatchElement.childAsDouble("top-k-probability"));
builder.setDispatchPolicy(dispatchElement.childAsString("dispatch-policy"));
- builder.setMinGroupCoverage(dispatchElement.childAsDouble("min-group-coverage"));
builder.setMinActiveDocsCoverage(dispatchElement.childAsDouble("min-active-docs-coverage"));
+ if (dispatchElement.child("min-group-coverage") != null)
+ logger.logApplicationPackage(Level.WARNING, "Attribute 'min-group-coverage' is deprecated and ignored: " +
+ "Use min-active-docs-coverage instead.");
if (dispatchElement.child("use-local-node") != null)
logger.logApplicationPackage(Level.WARNING, "Attribute 'use-local-node' is deprecated and ignored: " +
- "The local node will automatically be preferred when appropriate.");
+ "The local node will automatically be preferred when appropriate.");
return builder.build();
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java
index 40a634fbfe8..e89d45e8b83 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorServerProducer.java
@@ -6,6 +6,8 @@ import com.yahoo.vespa.config.content.core.StorServerConfig;
import com.yahoo.vespa.model.content.cluster.ContentCluster;
import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
+import java.util.Optional;
+
/**
* Serves config for stor-server for storage clusters (clusters of storage nodes).
*/
@@ -14,7 +16,7 @@ public class StorServerProducer implements StorServerConfig.Producer {
StorServerProducer build(ModelContext.Properties properties, ModelElement element) {
ModelElement tuning = element.child("tuning");
- StorServerProducer producer = new StorServerProducer(ContentCluster.getClusterId(element));
+ StorServerProducer producer = new StorServerProducer(ContentCluster.getClusterId(element), properties.featureFlags());
if (tuning == null) return producer;
ModelElement merges = tuning.child("merges");
@@ -32,11 +34,15 @@ public class StorServerProducer implements StorServerConfig.Producer {
private Integer bucketDBStripeBits;
private StorServerProducer setMaxMergesPerNode(Integer value) {
- maxMergesPerNode = value;
+ if (value != null) {
+ maxMergesPerNode = value;
+ }
return this;
}
private StorServerProducer setMaxQueueSize(Integer value) {
- queueSize = value;
+ if (value != null) {
+ queueSize = value;
+ }
return this;
}
private StorServerProducer setBucketDBStripeBits(Integer value) {
@@ -44,8 +50,10 @@ public class StorServerProducer implements StorServerConfig.Producer {
return this;
}
- public StorServerProducer(String clusterName) {
+ StorServerProducer(String clusterName, ModelContext.FeatureFlags featureFlags) {
this.clusterName = clusterName;
+ maxMergesPerNode = featureFlags.maxConcurrentMergesPerNode();
+ queueSize = featureFlags.maxMergeQueueSize();
}
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java b/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java
index 5bb57f4ff6c..d8da911e32f 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java
@@ -8,6 +8,7 @@ import com.yahoo.config.application.api.FileRegistry;
import com.yahoo.vespa.model.ConfigProxy;
import com.yahoo.vespa.model.Host;
+import java.nio.ByteBuffer;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
@@ -58,6 +59,16 @@ public class FileDistributor {
return addFileReference(fileRegistry.addUri(uri), host);
}
+ /**
+ * Adds the given blob to the associated application packages' registry of file and marks the file
+ * for distribution to the given host.
+ *
+ * @return the reference to the file, created by the application package
+ */
+ public FileReference sendBlobToHost(ByteBuffer blob, Host host) {
+ return addFileReference(fileRegistry.addBlob(blob), host);
+ }
+
private FileReference addFileReference(FileReference reference, Host host) {
filesToHosts.computeIfAbsent(reference, k -> new HashSet<>()).add(host);
return reference;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/DispatchGroup.java b/config-model/src/main/java/com/yahoo/vespa/model/search/DispatchGroup.java
index 384f77737c1..3e70bda216b 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/DispatchGroup.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/DispatchGroup.java
@@ -54,19 +54,18 @@ public class DispatchGroup {
public int getSearchableCopies() { return sc.getSearchableCopies(); }
- public int getMaxNodesDownPerFixedRow() {
- return sc.getMaxNodesDownPerFixedRow();
- }
-
static class Iterator implements java.util.Iterator<SearchInterface> {
+
private java.util.Iterator<Map<Integer, SearchInterface>> it1;
private java.util.Iterator<SearchInterface> it2;
+
Iterator(Map<Integer, Map<Integer, SearchInterface> > s) {
it1 = s.values().iterator();
if (it1.hasNext()) {
it2 = it1.next().values().iterator();
}
}
+
@Override
public boolean hasNext() {
if (it2 == null) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java
index 99f1b3ad34e..c99549e82e9 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/IndexedSearchCluster.java
@@ -46,7 +46,6 @@ public class IndexedSearchCluster extends SearchCluster
private String routingSelector = null;
private final List<DocumentDatabase> documentDbs = new LinkedList<>();
private final UnionConfiguration unionCfg;
- private int maxNodesDownPerFixedRow = 0;
private int searchableCopies = 1;
@@ -261,13 +260,6 @@ public class IndexedSearchCluster extends SearchCluster
return false;
}
- int getMaxNodesDownPerFixedRow() {
- return maxNodesDownPerFixedRow;
- }
-
- public void setMaxNodesDownPerFixedRow(int value) {
- maxNodesDownPerFixedRow = value;
- }
public int getSearchableCopies() {
return searchableCopies;
}
@@ -305,8 +297,6 @@ public class IndexedSearchCluster extends SearchCluster
}
if (tuning.dispatch.getMinActiveDocsCoverage() != null)
builder.minActivedocsPercentage(tuning.dispatch.getMinActiveDocsCoverage());
- if (tuning.dispatch.getMinGroupCoverage() != null)
- builder.minGroupCoverage(tuning.dispatch.getMinGroupCoverage());
if (tuning.dispatch.getDispatchPolicy() != null) {
switch (tuning.dispatch.getDispatchPolicy()) {
case ADAPTIVE:
@@ -320,7 +310,6 @@ public class IndexedSearchCluster extends SearchCluster
if (tuning.dispatch.getMaxHitsPerPartition() != null)
builder.maxHitsPerNode(tuning.dispatch.getMaxHitsPerPartition());
- builder.maxNodesDownPerGroup(rootDispatch.getMaxNodesDownPerFixedRow());
builder.searchableCopies(rootDispatch.getSearchableCopies());
if (searchCoverage != null) {
if (searchCoverage.getMinimum() != null)
@@ -336,6 +325,11 @@ public class IndexedSearchCluster extends SearchCluster
@Override
public int getRowBits() { return 8; }
+ @Override
+ public String toString() {
+ return "Indexing cluster '" + getClusterName() + "'";
+ }
+
/**
* Class used to retrieve combined configuration from multiple document databases.
* It is not a {@link com.yahoo.config.ConfigInstance.Producer} of those configs,
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java b/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
index 5e7ac0cabec..52edec7114b 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
@@ -12,6 +12,7 @@ import com.yahoo.vespa.config.ConfigPayloadBuilder;
import com.yahoo.vespa.model.AbstractService;
import java.io.Serializable;
+import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
@@ -61,6 +62,20 @@ public class FileSender implements Serializable {
return fileref;
}
+ public static FileReference sendBlobToServices(ByteBuffer blob, Collection<? extends AbstractService> services) {
+ if (services.isEmpty()) {
+ throw new IllegalStateException("No service instances. Probably a standalone cluster setting up <nodes> " +
+ "using 'count' instead of <node> tags.");
+ }
+
+ FileReference fileref = null;
+ for (AbstractService service : services) {
+ // The same reference will be returned from each call.
+ fileref = service.sendBlob(blob);
+ }
+ return fileref;
+ }
+
/**
* Sends all user configured files for a producer to all given services.
*/
diff --git a/config-model/src/main/resources/schema/content.rnc b/config-model/src/main/resources/schema/content.rnc
index 7f52eae6da8..91e9e069e17 100644
--- a/config-model/src/main/resources/schema/content.rnc
+++ b/config-model/src/main/resources/schema/content.rnc
@@ -83,7 +83,7 @@ ClusterControllerTuning = element cluster-controller {
DispatchTuning = element dispatch {
element max-hits-per-partition { xsd:nonNegativeInteger }? &
element dispatch-policy { string "round-robin" | string "adaptive" | string "random" }? &
- element min-group-coverage { xsd:double }? &
+ element min-group-coverage { xsd:double }? & # TODO: Ignored, remove on Vespa 8
element min-active-docs-coverage { xsd:double }? &
element top-k-probability { xsd:double }? &
element use-local-node { string "true" | string "false" }?
@@ -376,8 +376,8 @@ Tuning = element tuning {
}
TuningIoOptionsLight = string "normal" | string "directio"
-TuningIoOptionsFull = string "normal" | string "directio" | string "mmap" | string "mlock" | string "populate"
-TuningIoOptionsSearch = string "mmap" | string "mlock" | string "populate"
+TuningIoOptionsFull = string "normal" | string "directio" | string "mmap" | string "populate"
+TuningIoOptionsSearch = string "mmap" | string "populate"
TuningCompression = element compression {
element type { string "none" | string "lz4" | string "zstd" }? &
diff --git a/config-model/src/test/derived/namecollision/collision.sd b/config-model/src/test/derived/namecollision/collision.sd
new file mode 100644
index 00000000000..43dd4830204
--- /dev/null
+++ b/config-model/src/test/derived/namecollision/collision.sd
@@ -0,0 +1,8 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+search collision {
+
+ document collision {
+
+ }
+
+}
diff --git a/config-model/src/test/derived/namecollision/collisionstruct.sd b/config-model/src/test/derived/namecollision/collisionstruct.sd
new file mode 100644
index 00000000000..c98efb0b582
--- /dev/null
+++ b/config-model/src/test/derived/namecollision/collisionstruct.sd
@@ -0,0 +1,15 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+search collisionstruct {
+
+ document collisionstruct {
+
+ struct collision {
+ }
+
+ field structarray type array<collision> {
+ indexing: summary
+ }
+
+ }
+
+}
diff --git a/config-model/src/test/derived/namecollision/documentmanager.cfg b/config-model/src/test/derived/namecollision/documentmanager.cfg
new file mode 100644
index 00000000000..8d0d89dde35
--- /dev/null
+++ b/config-model/src/test/derived/namecollision/documentmanager.cfg
@@ -0,0 +1,55 @@
+enablecompression false
+datatype[].id 1381038251
+datatype[].structtype[].name "position"
+datatype[].structtype[].version 0
+datatype[].structtype[].compresstype NONE
+datatype[].structtype[].compresslevel 0
+datatype[].structtype[].compressthreshold 95
+datatype[].structtype[].compressminsize 800
+datatype[].structtype[].field[].name "x"
+datatype[].structtype[].field[].datatype 0
+datatype[].structtype[].field[].detailedtype ""
+datatype[].structtype[].field[].name "y"
+datatype[].structtype[].field[].datatype 0
+datatype[].structtype[].field[].detailedtype ""
+datatype[].id -379118517
+datatype[].structtype[].name "collision.header"
+datatype[].structtype[].version 0
+datatype[].structtype[].compresstype NONE
+datatype[].structtype[].compresslevel 0
+datatype[].structtype[].compressthreshold 95
+datatype[].structtype[].compressminsize 800
+datatype[].id 1557022836
+datatype[].documenttype[].name "collision"
+datatype[].documenttype[].version 0
+datatype[].documenttype[].inherits[].name "document"
+datatype[].documenttype[].inherits[].version 0
+datatype[].documenttype[].headerstruct -379118517
+datatype[].documenttype[].bodystruct 0
+datatype[].id 1557022836
+datatype[].structtype[].name "collision"
+datatype[].structtype[].version 0
+datatype[].structtype[].compresstype NONE
+datatype[].structtype[].compresslevel 0
+datatype[].structtype[].compressthreshold 95
+datatype[].structtype[].compressminsize 800
+datatype[].id -1730522993
+datatype[].arraytype[].datatype 1557022836
+datatype[].id -1270379114
+datatype[].structtype[].name "collisionstruct.header"
+datatype[].structtype[].version 0
+datatype[].structtype[].compresstype NONE
+datatype[].structtype[].compresslevel 0
+datatype[].structtype[].compressthreshold 95
+datatype[].structtype[].compressminsize 800
+datatype[].structtype[].field[].name "structarray"
+datatype[].structtype[].field[].datatype -1730522993
+datatype[].structtype[].field[].detailedtype ""
+datatype[].id -1723079287
+datatype[].documenttype[].name "collisionstruct"
+datatype[].documenttype[].version 0
+datatype[].documenttype[].inherits[].name "document"
+datatype[].documenttype[].inherits[].version 0
+datatype[].documenttype[].headerstruct -1270379114
+datatype[].documenttype[].bodystruct 0
+datatype[].documenttype[].fieldsets{[]}.fields[] "structarray"
diff --git a/config-model/src/test/derived/rankexpression/rankexpression.sd b/config-model/src/test/derived/rankexpression/rankexpression.sd
index 20f9c7a9160..e615a1a7671 100644
--- a/config-model/src/test/derived/rankexpression/rankexpression.sd
+++ b/config-model/src/test/derived/rankexpression/rankexpression.sd
@@ -115,8 +115,8 @@ search rankexpression {
expression {
exp(0) +
mysum(attribute(foo),
- "attribute( bar )",
- "attribute( \"baz\" )")
+ "attribute( bar )",
+ "attribute( \"baz\" )")
}
rerank-count: 101
}
diff --git a/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java b/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java
index e7622816603..8f751631fb5 100644
--- a/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java
@@ -54,7 +54,7 @@ public class ApplicationDeployTest {
@Test
public void testVespaModel() throws SAXException, IOException {
ApplicationPackageTester tester = ApplicationPackageTester.create(TESTDIR + "app1");
- VespaModel model = new VespaModel(tester.app());
+ new VespaModel(tester.app());
List<NamedSchema> schemas = tester.getSchemas();
assertEquals(schemas.size(), 5);
for (NamedSchema searchDefinition : schemas) {
diff --git a/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java b/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java
index f8469aa6fa1..59af3193b79 100644
--- a/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java
+++ b/config-model/src/test/java/com/yahoo/config/model/MockModelContext.java
@@ -20,8 +20,8 @@ import com.yahoo.config.model.test.MockApplicationPackage;
import java.util.Optional;
/**
-* @author hmusum
-*/
+ * @author hmusum
+ */
public class MockModelContext implements ModelContext {
private final ApplicationPackage applicationPackage;
@@ -82,4 +82,5 @@ public class MockModelContext implements ModelContext {
public Properties properties() {
return new TestProperties();
}
+
}
diff --git a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
index b1b386924d1..b0ddadf11bd 100644
--- a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
@@ -49,6 +49,7 @@ import static com.yahoo.config.model.test.TestUtil.joinLines;
import static com.yahoo.vespa.defaults.Defaults.getDefaults;
import static com.yahoo.vespa.model.search.NodeResourcesTuning.GB;
import static com.yahoo.vespa.model.search.NodeResourcesTuning.reservedMemoryGb;
+import static com.yahoo.vespa.model.test.utils.ApplicationPackageUtils.generateSchemas;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -2015,7 +2016,7 @@ public class ModelProvisioningTest {
}
private VespaModel createNonProvisionedModel(boolean multitenant, String hosts, String services) {
- VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, ApplicationPackageUtils.generateSearchDefinition("type1"));
+ VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(hosts, services, generateSchemas("type1"));
ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg;
DeployState deployState = new DeployState.Builder().applicationPackage(appPkg).
properties((new TestProperties()).setMultitenant(multitenant)).
@@ -2023,7 +2024,7 @@ public class ModelProvisioningTest {
return modelCreatorWithMockPkg.create(false, deployState);
}
- private int physicalMemoryPercentage(ContainerCluster cluster) {
+ private int physicalMemoryPercentage(ContainerCluster<?> cluster) {
QrStartConfig.Builder b = new QrStartConfig.Builder();
cluster.getConfig(b);
return b.build().jvm().heapSizeAsPercentageOfPhysicalMemory();
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/RankProfileTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/RankProfileTestCase.java
index 91e8640308a..d5ef3779493 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/RankProfileTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/RankProfileTestCase.java
@@ -102,7 +102,8 @@ public class RankProfileTestCase extends SchemaTestCase {
assertEquals(8, rankProfile.getNumThreadsPerSearch());
assertEquals(70, rankProfile.getMinHitsPerThread());
assertEquals(1200, rankProfile.getNumSearchPartitions());
- RawRankProfile rawRankProfile = new RawRankProfile(rankProfile, new QueryProfileRegistry(), new ImportedMlModels(), attributeFields, deployProperties);
+ RawRankProfile rawRankProfile = new RawRankProfile(rankProfile, new LargeRankExpressions(), new QueryProfileRegistry(),
+ new ImportedMlModels(), attributeFields, deployProperties);
if (expectedTermwiseLimit != null) {
assertTrue(findProperty(rawRankProfile.configProperties(), "vespa.matching.termwise_limit").isPresent());
assertEquals(String.valueOf(expectedTermwiseLimit), findProperty(rawRankProfile.configProperties(), "vespa.matching.termwise_limit").get());
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/derived/NameCollisionTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/derived/NameCollisionTestCase.java
new file mode 100644
index 00000000000..fda9e6327ce
--- /dev/null
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/derived/NameCollisionTestCase.java
@@ -0,0 +1,20 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.searchdefinition.derived;
+
+import org.junit.Test;
+
+/**
+ * Verifies that a struct in a document type is preferred over another dopcument type
+ * of the same name.
+ *
+ * @author bratseth
+ */
+public class NameCollisionTestCase extends AbstractExportingTestCase {
+
+ @Test
+ public void testNameCollision() throws Exception {
+ assertCorrectDeriving("namecollision", "collisionstruct", new TestableDeployLogger());
+ }
+
+}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTransformerTokensTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTransformerTokensTestCase.java
index 2d8630e3ff7..e285d796882 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTransformerTokensTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionWithTransformerTokensTestCase.java
@@ -91,7 +91,7 @@ public class RankingExpressionWithTransformerTokensTestCase {
searchBuilder.build();
Search search = searchBuilder.getSearch();
RankProfile rp = rankProfileRegistry.get(search, "my_profile");
- return new RankProfileTransformContext(rp, queryProfileRegistry, Collections.EMPTY_MAP, null, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
+ return new RankProfileTransformContext(rp, queryProfileRegistry, Collections.emptyMap(), null, Collections.emptyMap(), Collections.emptyMap());
}
}
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java
index 85ef70132b5..021d2931414 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankingExpressionsTestCase.java
@@ -7,6 +7,7 @@ import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.config.model.deploy.TestProperties;
import com.yahoo.search.query.profile.QueryProfileRegistry;
+import com.yahoo.searchdefinition.LargeRankExpressions;
import com.yahoo.searchdefinition.RankProfile;
import com.yahoo.searchdefinition.RankProfileRegistry;
import com.yahoo.searchdefinition.SchemaTestCase;
@@ -49,7 +50,7 @@ public class RankingExpressionsTestCase extends SchemaTestCase {
functions.get("artistmatch").function().getBody().getRoot().toString());
assertEquals(0, functions.get("artistmatch").function().arguments().size());
- RawRankProfile rawRankProfile = new RawRankProfile(functionsRankProfile, new QueryProfileRegistry(),
+ RawRankProfile rawRankProfile = new RawRankProfile(functionsRankProfile, new LargeRankExpressions(), new QueryProfileRegistry(),
new ImportedMlModels(), new AttributeFields(search), deployProperties);
List<Pair<String, String>> rankProperties = rawRankProfile.configProperties();
assertEquals(6, rankProperties.size());
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java b/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java
index e3e0edd7896..a3e3a768b05 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/VespaModelFactoryTest.java
@@ -19,7 +19,6 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.ProvisionLogger;
-import com.yahoo.vespa.model.builder.xml.dom.NodesSpecification;
import org.junit.Before;
import org.junit.Test;
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java
index 1a7258db7e2..413daefdf75 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerClusterTest.java
@@ -1,8 +1,4 @@
-// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-/*
- * Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
- */
-
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.admin.metricsproxy;
import ai.vespa.metricsproxy.http.application.ApplicationMetricsHandler;
@@ -13,14 +9,10 @@ import ai.vespa.metricsproxy.http.yamas.YamasHandler;
import ai.vespa.metricsproxy.metric.dimensions.ApplicationDimensionsConfig;
import ai.vespa.metricsproxy.metric.dimensions.PublicDimensions;
import com.yahoo.component.ComponentSpecification;
-import com.yahoo.config.model.api.HostInfo;
-import com.yahoo.config.model.deploy.DeployState;
-import com.yahoo.config.model.deploy.TestProperties;
import com.yahoo.config.model.test.MockApplicationPackage;
import com.yahoo.config.provision.Zone;
import com.yahoo.container.core.ApplicationMetadataConfig;
import com.yahoo.container.di.config.PlatformBundlesConfig;
-import com.yahoo.search.config.QrStartConfig;
import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyContainerCluster.AppDimensionNames;
import com.yahoo.vespa.model.container.component.Component;
@@ -40,14 +32,11 @@ import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.T
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getApplicationDimensionsConfig;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getMetricsNodesConfig;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getModel;
-import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getQrStartConfig;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.servicesWithAdminOnly;
-import static com.yahoo.vespa.model.container.ContainerCluster.G1GC;
import static java.util.stream.Collectors.toList;
import static org.hamcrest.CoreMatchers.endsWith;
import static org.hamcrest.CoreMatchers.hasItem;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
@@ -77,42 +66,6 @@ public class MetricsProxyContainerClusterTest {
assertEquals(MockApplicationPackage.DEPLOYED_BY_USER, config.user());
}
- private void metrics_proxy_has_expected_qr_start_options(MetricsProxyModelTester.TestMode mode) {
- metrics_proxy_has_expected_qr_start_options(mode, 0);
- }
-
- private void metrics_proxy_has_expected_qr_start_options(MetricsProxyModelTester.TestMode mode, int maxHeapForAdminClusterNodes) {
- DeployState.Builder builder = new DeployState.Builder();
- if (maxHeapForAdminClusterNodes > 0) {
- builder.properties(new TestProperties().metricsProxyMaxHeapSizeInMb(maxHeapForAdminClusterNodes));
- }
-
- VespaModel model = getModel(servicesWithAdminOnly(), mode, builder);
- for (HostInfo host : model.getHosts()) {
- QrStartConfig qrStartConfig = getQrStartConfig(model, host.getHostname());
- assertEquals(32, qrStartConfig.jvm().minHeapsize());
- assertEquals(maxHeapForAdminClusterNodes > 0 ? maxHeapForAdminClusterNodes : 512, qrStartConfig.jvm().heapsize());
- assertEquals(0, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory());
- assertEquals(2, qrStartConfig.jvm().availableProcessors());
- assertFalse(qrStartConfig.jvm().verbosegc());
- assertEquals(G1GC, qrStartConfig.jvm().gcopts());
- assertEquals(512, qrStartConfig.jvm().stacksize());
- assertEquals(0, qrStartConfig.jvm().directMemorySizeCache());
- assertEquals(32, qrStartConfig.jvm().compressedClassSpaceSize());
- assertEquals(75, qrStartConfig.jvm().baseMaxDirectMemorySize());
- }
- }
-
- @Test
- public void metrics_proxy_has_expected_qr_start_options() {
- metrics_proxy_has_expected_qr_start_options(self_hosted);
- metrics_proxy_has_expected_qr_start_options(hosted);
-
- // With max heap from feature flag
- metrics_proxy_has_expected_qr_start_options(self_hosted, 123);
- metrics_proxy_has_expected_qr_start_options(hosted, 123);
- }
-
@Test
public void http_handlers_are_set_up() {
VespaModel model = getModel(servicesWithAdminOnly(), self_hosted);
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java
index 7c31802fb4d..01167e40411 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyModelTester.java
@@ -15,8 +15,6 @@ import com.yahoo.vespa.model.admin.monitoring.Metric;
import com.yahoo.vespa.model.admin.monitoring.MetricsConsumer;
import com.yahoo.vespa.model.test.VespaModelTester;
-import java.util.Optional;
-
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.TestMode.hosted;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.TestMode.self_hosted;
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ComplexAttributeFieldsValidatorTestCase.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ComplexAttributeFieldsValidatorTestCase.java
index 76f34cf4a81..341a90c6618 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ComplexAttributeFieldsValidatorTestCase.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ComplexAttributeFieldsValidatorTestCase.java
@@ -15,6 +15,7 @@ import org.junit.rules.ExpectedException;
import org.xml.sax.SAXException;
import java.io.IOException;
+import java.util.List;
import static com.yahoo.config.model.test.TestUtil.joinLines;
@@ -99,17 +100,17 @@ public class ComplexAttributeFieldsValidatorTestCase {
"}"));
}
- private static void createModelAndValidate(String searchDefinition) throws IOException, SAXException {
- DeployState deployState = createDeployState(servicesXml(), searchDefinition);
+ private static void createModelAndValidate(String schema) throws IOException, SAXException {
+ DeployState deployState = createDeployState(servicesXml(), schema);
VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState);
ValidationParameters validationParameters = new ValidationParameters(CheckRouting.FALSE);
Validation.validate(model, validationParameters, deployState);
}
- private static DeployState createDeployState(String servicesXml, String searchDefinition) {
+ private static DeployState createDeployState(String servicesXml, String schema) {
ApplicationPackage app = new MockApplicationPackage.Builder()
.withServices(servicesXml)
- .withSearchDefinition(searchDefinition)
+ .withSchemas(List.of(schema))
.build();
return new DeployState.Builder().applicationPackage(app).build();
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContentTypeRemovalValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContentTypeRemovalValidatorTest.java
index c8fdb8348c3..45f3b0fcf60 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContentTypeRemovalValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContentTypeRemovalValidatorTest.java
@@ -45,14 +45,6 @@ public class ContentTypeRemovalValidatorTest {
tester.deploy(previous, getServices("book"), Environment.prod, removalOverride); // Allowed due to override
}
- @Test
- public void testNoOverrideNeededinDev() {
- ValidationTester tester = new ValidationTester();
-
- VespaModel previous = tester.deploy(null, getServices("music"), Environment.prod, null).getFirst();
- tester.deploy(previous, getServices("book"), Environment.dev, null);
- }
-
private static String getServices(String documentType) {
return "<services version='1.0'>" +
" <content id='test' version='1.0'>" +
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidatorTest.java
new file mode 100644
index 00000000000..ddeada8b33f
--- /dev/null
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/RedundancyIncreaseValidatorTest.java
@@ -0,0 +1,64 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation.change;
+
+import com.yahoo.config.application.api.ValidationId;
+import com.yahoo.config.application.api.ValidationOverrides;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.vespa.model.VespaModel;
+import com.yahoo.vespa.model.application.validation.ValidationTester;
+import com.yahoo.yolean.Exceptions;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+/**
+ * @author bratseth
+ */
+public class RedundancyIncreaseValidatorTest {
+
+ private final ValidationTester tester = new ValidationTester(7);
+
+ @Test
+ public void testRedundancyIncreaseValidation() {
+ VespaModel previous = tester.deploy(null, getServices(2), Environment.prod, null).getFirst();
+ try {
+ tester.deploy(previous, getServices(3), Environment.prod, null);
+ fail("Expected exception due to redundancy increase");
+ }
+ catch (IllegalArgumentException expected) {
+ assertEquals("redundancy-increase: " +
+ "Increasing redundancy from 2 to 3 in 'content cluster 'contentClusterId'. " +
+ "This is a safe operation but verify that you have room for a 3/2x increase in content size. " +
+ ValidationOverrides.toAllowMessage(ValidationId.redundancyIncrease),
+ Exceptions.toMessageString(expected));
+ }
+ }
+
+ @Test
+ public void testOverridingContentRemovalValidation() {
+ VespaModel previous = tester.deploy(null, getServices(2), Environment.prod, null).getFirst();
+ tester.deploy(previous, getServices(3), Environment.prod, redundancyIncreaseOverride); // Allowed due to override
+ }
+
+ private static String getServices(int redundancy) {
+ return "<services version='1.0'>" +
+ " <content id='contentClusterId' version='1.0'>" +
+ " <redundancy>" + redundancy + "</redundancy>" +
+ " <engine>" +
+ " <proton/>" +
+ " </engine>" +
+ " <documents>" +
+ " <document type='music' mode='index'/>" +
+ " </documents>" +
+ " <nodes count='3'/>" +
+ " </content>" +
+ "</services>";
+ }
+
+ private static final String redundancyIncreaseOverride =
+ "<validation-overrides>\n" +
+ " <allow until='2000-01-03'>redundancy-increase</allow>\n" +
+ "</validation-overrides>\n";
+
+}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidatorTest.java
new file mode 100644
index 00000000000..d59b2f7227c
--- /dev/null
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/first/RedundancyOnFirstDeploymentValidatorTest.java
@@ -0,0 +1,64 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation.first;
+
+import com.yahoo.config.application.api.ValidationId;
+import com.yahoo.config.application.api.ValidationOverrides;
+import com.yahoo.config.model.deploy.TestProperties;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.vespa.model.application.validation.ValidationTester;
+import com.yahoo.yolean.Exceptions;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+/**
+ * @author bratseth
+ */
+public class RedundancyOnFirstDeploymentValidatorTest {
+
+ private final ValidationTester tester = new ValidationTester(7, false,
+ new TestProperties().setFirstTimeDeployment(true)
+ .setHostedVespa(true));
+
+ @Test
+ public void testRedundancyOnFirstDeploymentValidation() {
+ try {
+ tester.deploy(null, getServices(1), Environment.prod, null);
+ fail("Expected exception due to redundancy 1");
+ }
+ catch (IllegalArgumentException expected) {
+ assertEquals("redundancy-one: " +
+ "content cluster 'contentClusterId' has redundancy 1, which will cause it to lose data if a node fails. " +
+ "This requires an override on first deployment in a production zone. " +
+ ValidationOverrides.toAllowMessage(ValidationId.redundancyOne),
+ Exceptions.toMessageString(expected));
+ }
+ }
+
+ @Test
+ public void testOverridingRedundancyOnFirstDeploymentValidation() {
+ tester.deploy(null, getServices(1), Environment.prod, redundancyOneOverride); // Allowed due to override
+ }
+
+ private static String getServices(int redundancy) {
+ return "<services version='1.0'>" +
+ " <content id='contentClusterId' version='1.0'>" +
+ " <redundancy>" + redundancy + "</redundancy>" +
+ " <engine>" +
+ " <proton/>" +
+ " </engine>" +
+ " <documents>" +
+ " <document type='music' mode='index'/>" +
+ " </documents>" +
+ " <nodes count='3'/>" +
+ " </content>" +
+ "</services>";
+ }
+
+ private static final String redundancyOneOverride =
+ "<validation-overrides>\n" +
+ " <allow until='2000-01-03'>redundancy-one</allow>\n" +
+ "</validation-overrides>\n";
+
+}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterResourceLimitsTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterResourceLimitsTest.java
index 469e4649c14..4324f257922 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterResourceLimitsTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterResourceLimitsTest.java
@@ -6,10 +6,14 @@ import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.searchdefinition.derived.TestableDeployLogger;
import com.yahoo.text.XML;
import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
+import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.w3c.dom.Document;
import java.util.Optional;
+import static org.hamcrest.CoreMatchers.containsString;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -48,13 +52,19 @@ public class ClusterResourceLimitsTest {
return this;
}
public ClusterResourceLimits build() {
- var builder = new ClusterResourceLimits.Builder(enableFeedBlockInDistributor, false, new BaseDeployLogger());
+ var builder = new ClusterResourceLimits.Builder(enableFeedBlockInDistributor,
+ false,
+ false,
+ new BaseDeployLogger());
builder.setClusterControllerBuilder(ctrlBuilder);
builder.setContentNodeBuilder(nodeBuilder);
return builder.build();
}
}
+ @Rule
+ public ExpectedException expectedException = ExpectedException.none();
+
@Test
public void content_node_limits_are_derived_from_cluster_controller_limits_if_not_set() {
assertLimits(0.4, 0.7, 0.7, 0.85,
@@ -120,26 +130,40 @@ public class ClusterResourceLimitsTest {
}
@Test
- // TODO: Change to expect exception being thrown when no one uses this in hosted
- public void default_resource_limits_when_hosted_and_warning_is_logged() {
+ public void exception_is_thrown_when_resource_limits_are_specified() {
TestableDeployLogger logger = new TestableDeployLogger();
- final boolean hosted = true;
- ClusterResourceLimits.Builder builder = new ClusterResourceLimits.Builder(true, hosted, logger);
- ClusterResourceLimits limits = builder.build(new ModelElement(XML.getDocument("<cluster id=\"test\">" +
- " <tuning>\n" +
- " <resource-limits>\n" +
- " <memory>0.92</memory>\n" +
- " </resource-limits>\n" +
- " </tuning>\n" +
- "</cluster>")
- .getDocumentElement()));
+ buildClusterResourceLimitsAndLogIfSpecified(logger);
+ assertEquals(1, logger.warnings.size());
+ assertEquals("Element 'resource-limits' is not allowed to be set", logger.warnings.get(0));
- assertLimits(0.8, 0.8, limits.getClusterControllerLimits());
- assertLimits(0.9, 0.9, limits.getContentNodeLimits());
+ expectedException.expect(IllegalArgumentException.class);
+ expectedException.expectMessage(containsString("Element 'resource-limits' is not allowed to be set"));
+ buildClusterResourceLimitsAndThrowIfSpecified(logger);
+ }
- assertEquals(1, logger.warnings.size());
- assertEquals("Element resource-limits is not allowed, default limits will be used", logger.warnings.get(0));
+ private void buildClusterResourceLimitsAndThrowIfSpecified(DeployLogger deployLogger) {
+ buildClusterResourceLimits(true, deployLogger);
+ }
+
+ private void buildClusterResourceLimitsAndLogIfSpecified(DeployLogger deployLogger) {
+ buildClusterResourceLimits(false, deployLogger);
+ }
+
+ private void buildClusterResourceLimits(boolean throwIfSpecified, DeployLogger deployLogger) {
+ Document clusterXml = XML.getDocument("<cluster id=\"test\">" +
+ " <tuning>\n" +
+ " <resource-limits>\n" +
+ " <memory>0.92</memory>\n" +
+ " </resource-limits>\n" +
+ " </tuning>\n" +
+ "</cluster>");
+
+ ClusterResourceLimits.Builder builder = new ClusterResourceLimits.Builder(true,
+ true,
+ throwIfSpecified,
+ deployLogger);
+ builder.build(new ModelElement(clusterXml.getDocumentElement()));
}
private void assertLimits(Double expCtrlDisk, Double expCtrlMemory, Double expNodeDisk, Double expNodeMemory, Fixture f) {
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java
index 8a46aaaa230..27a01750210 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java
@@ -17,12 +17,10 @@ public class DispatchTuningTest {
DispatchTuning dispatch = new DispatchTuning.Builder()
.setMaxHitsPerPartition(69)
.setDispatchPolicy("round-robin")
- .setMinGroupCoverage(7.5)
.setMinActiveDocsCoverage(12.5)
.setTopKProbability(18.3)
.build();
assertEquals(69, dispatch.getMaxHitsPerPartition().intValue());
- assertEquals(7.5, dispatch.getMinGroupCoverage().doubleValue(), 0.0);
assertEquals(12.5, dispatch.getMinActiveDocsCoverage().doubleValue(), 0.0);
assertTrue(DispatchTuning.DispatchPolicy.ROUNDROBIN == dispatch.getDispatchPolicy());
assertEquals(18.3, dispatch.getTopkProbability(), 0.0);
@@ -33,7 +31,6 @@ public class DispatchTuningTest {
.setDispatchPolicy("random")
.build();
assertTrue(DispatchTuning.DispatchPolicy.ADAPTIVE == dispatch.getDispatchPolicy());
- assertNull(dispatch.getMinGroupCoverage());
assertNull(dispatch.getMinActiveDocsCoverage());
}
@@ -43,7 +40,6 @@ public class DispatchTuningTest {
.setDispatchPolicy("adaptive")
.build();
assertTrue(DispatchTuning.DispatchPolicy.ADAPTIVE == dispatch.getDispatchPolicy());
- assertNull(dispatch.getMinGroupCoverage());
assertNull(dispatch.getMinActiveDocsCoverage());
}
@@ -53,7 +49,6 @@ public class DispatchTuningTest {
assertNull(dispatch.getMaxHitsPerPartition());
assertNull(dispatch.getDispatchPolicy());
assertNull(dispatch.getMinActiveDocsCoverage());
- assertNull(dispatch.getMinGroupCoverage());
assertNull(dispatch.getTopkProbability());
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java
index 22e38b30959..10bb00168bb 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java
@@ -4,9 +4,9 @@ package com.yahoo.vespa.model.content;
import com.yahoo.config.model.application.provider.BaseDeployLogger;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.deploy.TestProperties;
-import com.yahoo.vespa.config.content.FleetcontrollerConfig;
import com.yahoo.config.model.test.MockRoot;
import com.yahoo.text.XML;
+import com.yahoo.vespa.config.content.FleetcontrollerConfig;
import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
import org.junit.Test;
import org.w3c.dom.Document;
@@ -26,6 +26,7 @@ public class FleetControllerClusterTest {
clusterElement,
new ClusterResourceLimits.Builder(enableFeedBlockInDistributor,
false,
+ false,
new BaseDeployLogger())
.build(clusterElement).getClusterControllerLimits())
.build(root.getDeployState(), root, clusterElement.getXml());
@@ -115,7 +116,7 @@ public class FleetControllerClusterTest {
assertLimits(0.8, 0.7, getConfigForResourceLimitsTuning(null, 0.7));
}
- private static double DELTA = 0.00001;
+ private static final double DELTA = 0.00001;
private void assertLimits(double expDisk, double expMemory, FleetcontrollerConfig config) {
var limits = config.cluster_feed_block_limit();
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java
index 3be592e54e7..6c8cb393d3f 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java
@@ -201,7 +201,6 @@ public class IndexedHierarchicDistributionTest {
assertEquals(8, dg.getRowBits());
assertEquals(3, dg.getNumPartitions());
assertEquals(true, dg.useFixedRowInDispatch());
- assertEquals(1, dg.getMaxNodesDownPerFixedRow());
ArrayList<SearchInterface> list = new ArrayList<>();
for(SearchInterface si : dg.getSearchersIterable()) {
list.add(si);
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java
index 177b86c953e..e16862230fc 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexingAndDocprocRoutingTest.java
@@ -6,7 +6,6 @@ import com.yahoo.messagebus.routing.HopBlueprint;
import com.yahoo.messagebus.routing.PolicyDirective;
import com.yahoo.messagebus.routing.Route;
import com.yahoo.messagebus.routing.RoutingTable;
-import com.yahoo.searchdefinition.parser.ParseException;
import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.container.ContainerCluster;
import com.yahoo.vespa.model.container.docproc.ContainerDocproc;
@@ -17,50 +16,48 @@ import com.yahoo.vespa.model.routing.Routing;
import com.yahoo.vespa.model.test.utils.ApplicationPackageUtils;
import com.yahoo.vespa.model.test.utils.VespaModelCreatorWithMockPkg;
import org.junit.Test;
-import org.xml.sax.SAXException;
-import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static org.hamcrest.Matchers.*;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
/**
* @author Einar M R Rosenvinge
*/
public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
+
@Test
- public void oneContentOneDoctypeImplicitIndexingClusterImplicitIndexingChain()
- throws IOException, SAXException, ParseException {
+ public void oneContentOneDoctypeImplicitIndexingClusterImplicitIndexingChain() {
final String CLUSTERNAME = "musiccluster";
SearchClusterSpec searchCluster = new SearchClusterSpec(CLUSTERNAME, null, null);
searchCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
- VespaModel model = getIndexedContentVespaModel(Collections.<DocprocClusterSpec>emptyList(), Arrays.asList(searchCluster));
+ VespaModel model = getIndexedContentVespaModel(List.of(), List.of(searchCluster));
assertIndexing(model, new DocprocClusterSpec("container", new DocprocChainSpec("container/chain.indexing")));
assertFeedingRoute(model, CLUSTERNAME, "container/chain.indexing");
}
@Test
- public void oneContentTwoDoctypesImplicitIndexingClusterImplicitIndexingChain()
- throws IOException, SAXException, ParseException {
+ public void oneContentTwoDoctypesImplicitIndexingClusterImplicitIndexingChain() {
final String CLUSTERNAME = "musicandbookscluster";
SearchClusterSpec searchCluster = new SearchClusterSpec(CLUSTERNAME, null, null);
searchCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
searchCluster.searchDefs.add(new SearchDefSpec("book", "author", "title"));
- VespaModel model = getIndexedContentVespaModel(Collections.<DocprocClusterSpec>emptyList(), Arrays.asList(searchCluster));
+ VespaModel model = getIndexedContentVespaModel(List.of(), List.of(searchCluster));
assertIndexing(model, new DocprocClusterSpec("container", new DocprocChainSpec("container/chain.indexing")));
assertFeedingRoute(model, CLUSTERNAME, "container/chain.indexing");
}
@Test
- public void twoContentTwoDoctypesImplicitIndexingClusterImplicitIndexingChain()
- throws IOException, SAXException, ParseException {
+ public void twoContentTwoDoctypesImplicitIndexingClusterImplicitIndexingChain() {
final String MUSIC = "musiccluster";
SearchClusterSpec musicCluster = new SearchClusterSpec(MUSIC, null, null);
musicCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
@@ -69,10 +66,10 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
SearchClusterSpec booksCluster = new SearchClusterSpec(BOOKS, null, null);
booksCluster.searchDefs.add(new SearchDefSpec("book", "author", "title"));
- VespaModel model = getIndexedContentVespaModel(Collections.<DocprocClusterSpec>emptyList(), Arrays.asList(musicCluster, booksCluster));
+ VespaModel model = getIndexedContentVespaModel(List.of(), List.of(musicCluster, booksCluster));
assertIndexing(model,
- new DocprocClusterSpec("container", new DocprocChainSpec("container/chain.indexing")));
+ new DocprocClusterSpec("container", new DocprocChainSpec("container/chain.indexing")));
assertFeedingRoute(model, MUSIC, "container/chain.indexing");
assertFeedingRoute(model, BOOKS, "container/chain.indexing");
@@ -80,19 +77,17 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
@Test
- public void oneContentOneDoctypeExplicitIndexingClusterImplicitIndexingChain()
- throws IOException, SAXException, ParseException {
+ public void oneContentOneDoctypeExplicitIndexingClusterImplicitIndexingChain() {
final String CLUSTERNAME = "musiccluster";
SearchClusterSpec searchCluster = new SearchClusterSpec(CLUSTERNAME, "dpcluster", null);
searchCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
- VespaModel model = getIndexedContentVespaModel(Arrays.asList(new DocprocClusterSpec("dpcluster")), Arrays.asList(searchCluster));
+ VespaModel model = getIndexedContentVespaModel(List.of(new DocprocClusterSpec("dpcluster")), List.of(searchCluster));
assertIndexing(model, new DocprocClusterSpec("dpcluster", new DocprocChainSpec("dpcluster/chain.indexing")));
assertFeedingRoute(model, CLUSTERNAME, "dpcluster/chain.indexing");
}
@Test
- public void oneSearchOneDoctypeExplicitIndexingClusterExplicitIndexingChain()
- throws IOException, SAXException, ParseException {
+ public void oneSearchOneDoctypeExplicitIndexingClusterExplicitIndexingChain() {
String xml =
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" +
"<services version=\"1.0\">\n" +
@@ -130,8 +125,7 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
}
@Test
- public void twoContentTwoDoctypesExplicitIndexingInSameIndexingCluster()
- throws IOException, SAXException, ParseException {
+ public void twoContentTwoDoctypesExplicitIndexingInSameIndexingCluster() {
final String MUSIC = "musiccluster";
SearchClusterSpec musicCluster = new SearchClusterSpec(MUSIC, "dpcluster", null);
musicCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
@@ -140,8 +134,8 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
SearchClusterSpec booksCluster = new SearchClusterSpec(BOOKS, "dpcluster", null);
booksCluster.searchDefs.add(new SearchDefSpec("book", "author", "title"));
- VespaModel model = getIndexedContentVespaModel(Arrays.asList(new DocprocClusterSpec("dpcluster")),
- Arrays.asList(musicCluster, booksCluster));
+ VespaModel model = getIndexedContentVespaModel(List.of(new DocprocClusterSpec("dpcluster")),
+ List.of(musicCluster, booksCluster));
assertIndexing(model, new DocprocClusterSpec("dpcluster", new DocprocChainSpec("dpcluster/chain.indexing")));
assertFeedingRoute(model, MUSIC, "dpcluster/chain.indexing");
@@ -165,14 +159,12 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
"</services>\n";
List<String> sds = ApplicationPackageUtils.generateSchemas("music", "title", "artist");
- VespaModel model = new VespaModelCreatorWithMockPkg(getHosts(),
- services, sds).create();
+ VespaModel model = new VespaModelCreatorWithMockPkg(getHosts(), services, sds).create();
assertIndexing(model, new DocprocClusterSpec("dokprok"));
}
@Test
- public void twoContentTwoDoctypesExplicitIndexingInDifferentIndexingClustersExplicitChain()
- throws IOException, SAXException, ParseException {
+ public void twoContentTwoDoctypesExplicitIndexingInDifferentIndexingClustersExplicitChain() {
final String MUSIC = "musiccluster";
SearchClusterSpec musicCluster = new SearchClusterSpec(MUSIC, "dpmusiccluster", "dpmusicchain");
musicCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
@@ -183,12 +175,8 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
DocprocClusterSpec dpMusicCluster = new DocprocClusterSpec("dpmusiccluster", new DocprocChainSpec("dpmusicchain", "indexing"));
DocprocClusterSpec dpBooksCluster = new DocprocClusterSpec("dpbookscluster", new DocprocChainSpec("dpbookschain", "indexing"));
- VespaModel model = getIndexedContentVespaModel(Arrays.asList(
- dpMusicCluster,
- dpBooksCluster),
- Arrays.asList(
- musicCluster,
- booksCluster));
+ VespaModel model = getIndexedContentVespaModel(List.of(dpMusicCluster, dpBooksCluster),
+ List.of(musicCluster, booksCluster));
//after we generated model, add indexing chains for validation:
dpMusicCluster.chains.clear();
@@ -204,52 +192,52 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
assertFeedingRoute(model, BOOKS, "dpbookscluster/chain.dpbookschain");
}
- @Test(expected = IllegalArgumentException.class)
- public void twoContentTwoDoctypesExplicitIndexingInDifferentIndexingClustersExplicitChainIncorrectInheritance()
- throws IOException, SAXException, ParseException {
- final String MUSIC = "musiccluster";
- SearchClusterSpec musicCluster = new SearchClusterSpec(MUSIC, "dpmusiccluster", "dpmusicchain");
- musicCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
-
- final String BOOKS = "bookscluster";
- SearchClusterSpec booksCluster = new SearchClusterSpec(BOOKS, "dpbookscluster", "dpbookschain");
- booksCluster.searchDefs.add(new SearchDefSpec("book", "author", "title"));
-
- DocprocClusterSpec dpMusicCluster = new DocprocClusterSpec("dpmusiccluster", new DocprocChainSpec("dpmusicchain"));
- DocprocClusterSpec dpBooksCluster = new DocprocClusterSpec("dpbookscluster", new DocprocChainSpec("dpbookschain"));
- VespaModel model = getIndexedContentVespaModel(Arrays.asList(
- dpMusicCluster,
- dpBooksCluster),
- Arrays.asList(
- musicCluster,
- booksCluster));
-
- //after we generated model, add indexing chains for validation:
- dpMusicCluster.chains.clear();
- dpMusicCluster.chains.add(new DocprocChainSpec("dpmusiccluster/chain.indexing"));
- dpMusicCluster.chains.add(new DocprocChainSpec("dpmusiccluster/chain.dpmusicchain"));
-
- dpBooksCluster.chains.clear();
- dpBooksCluster.chains.add(new DocprocChainSpec("dpbookscluster/chain.indexing"));
- dpBooksCluster.chains.add(new DocprocChainSpec("dpbookscluster/chain.dpbookschain"));
+ @Test
+ public void requiresIndexingInheritance() {
+ try {
+ SearchClusterSpec musicCluster = new SearchClusterSpec("musiccluster",
+ "dpmusiccluster",
+ "dpmusicchain");
+ musicCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
+
+ DocprocClusterSpec dpMusicCluster = new DocprocClusterSpec("dpmusiccluster", new DocprocChainSpec("dpmusicchain"));
+ getIndexedContentVespaModel(List.of(dpMusicCluster), List.of(musicCluster));
+ fail("Expected exception");
+ }
+ catch (IllegalArgumentException e) {
+ assertEquals("Docproc chain 'dpmusicchain' must inherit from the 'indexing' chain", e.getMessage());
+ }
+ }
- assertIndexing(model, dpMusicCluster, dpBooksCluster);
- assertFeedingRoute(model, MUSIC, "dpmusiccluster/chain.dpmusicchain");
- assertFeedingRoute(model, BOOKS, "dpbookscluster/chain.dpbookschain");
+ @Test
+ public void indexingChainShouldNotBeTheDefaultChain() {
+ try {
+ SearchClusterSpec musicCluster = new SearchClusterSpec("musiccluster",
+ "dpmusiccluster",
+ "default");
+ musicCluster.searchDefs.add(new SearchDefSpec("music", "artist", "album"));
+
+ DocprocClusterSpec dpMusicCluster = new DocprocClusterSpec("dpmusiccluster", new DocprocChainSpec("default", "indexing"));
+ getIndexedContentVespaModel(List.of(dpMusicCluster), List.of(musicCluster));
+ fail("Expected exception");
+ }
+ catch (IllegalArgumentException e) {
+ assertTrue(e.getMessage().startsWith("Indexing cluster 'musiccluster' specifies the chain 'default' as indexing chain"));
+ }
}
private void assertIndexing(VespaModel model, DocprocClusterSpec... expectedDocprocClusters) {
Map<String, ContainerCluster> docprocClusters = getDocprocClusters(model);
- assertThat(docprocClusters.size(), is(expectedDocprocClusters.length));
+ assertEquals(expectedDocprocClusters.length, docprocClusters.size());
for (DocprocClusterSpec expectedDocprocCluster : expectedDocprocClusters) {
ContainerCluster docprocCluster = docprocClusters.get(expectedDocprocCluster.name);
- assertThat(docprocCluster, not(nullValue()));
- assertThat(docprocCluster.getName(), is(expectedDocprocCluster.name));
+ assertNotNull(docprocCluster);
+ assertEquals(expectedDocprocCluster.name, docprocCluster.getName());
ContainerDocproc containerDocproc = docprocCluster.getDocproc();
- assertThat(containerDocproc, not(nullValue()));
+ assertNotNull(containerDocproc);
List<DocprocChain> chains = containerDocproc.getChains().allChains().allComponents();
- assertThat(chains.size(), is(expectedDocprocCluster.chains.size()));
+ assertEquals(expectedDocprocCluster.chains.size(), chains.size());
List<String> actualDocprocChains = new ArrayList<>();
for (DocprocChain chain : chains) {
actualDocprocChains.add(chain.getServiceName());
@@ -373,7 +361,8 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
return retval.toString();
}
- private String createVespaServicesWithContent(List<DocprocClusterSpec> docprocClusterSpecs, List<SearchClusterSpec> searchClusterSpecs) {
+ private String createVespaServicesWithContent(List<DocprocClusterSpec> docprocClusterSpecs,
+ List<SearchClusterSpec> searchClusterSpecs) {
String mainPre =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
"<services version='1.0'>\n" +
@@ -393,7 +382,7 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
String docprocCluster = "";
docprocCluster += " <container version='1.0' id='" + docprocClusterSpec.name + "'>\n";
- if (docprocClusterSpec.chains != null && docprocClusterSpec.chains.size() > 0) {
+ if (docprocClusterSpec.chains.size() > 0) {
docprocCluster += " <document-processing>\n";
for (DocprocChainSpec chain : docprocClusterSpec.chains) {
if (chain.inherits.isEmpty()) {
@@ -465,11 +454,12 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
createVespaServicesWithContent(docprocClusterSpecs, searchClusterSpecs), sds).create();
}
- private class SearchClusterSpec {
+ private static class SearchClusterSpec {
+
private final String name;
- private List<SearchDefSpec> searchDefs = new ArrayList<>(2);
- private String indexingClusterName;
- private String indexingChainName;
+ private final List<SearchDefSpec> searchDefs = new ArrayList<>(2);
+ private final String indexingClusterName;
+ private final String indexingChainName;
private SearchClusterSpec(String name, String indexingClusterName, String indexingChainName) {
this.name = name;
@@ -478,10 +468,11 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
}
}
- private class SearchDefSpec {
- private String typeName;
- private String field1Name;
- private String field2Name;
+ private static class SearchDefSpec {
+
+ private final String typeName;
+ private final String field1Name;
+ private final String field2Name;
private SearchDefSpec(String typeName, String field1Name, String field2Name) {
this.typeName = typeName;
@@ -491,6 +482,7 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
}
private class DocprocClusterSpec {
+
private final String name;
private final List<DocprocChainSpec> chains = new ArrayList<>();
@@ -500,7 +492,8 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
}
}
- private class DocprocChainSpec {
+ private static class DocprocChainSpec {
+
private final String name;
private final List<String> inherits = new ArrayList<>();
@@ -509,4 +502,5 @@ public class IndexingAndDocprocRoutingTest extends ContentBaseTest {
this.inherits.addAll(Arrays.asList(inherits));
}
}
+
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java
index 5cf57430f91..9a681003293 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java
@@ -17,6 +17,7 @@ import com.yahoo.vespa.config.content.PersistenceConfig;
import com.yahoo.config.model.test.MockRoot;
import com.yahoo.documentmodel.NewDocumentType;
import static com.yahoo.vespa.defaults.Defaults.getDefaults;
+import static com.yahoo.config.model.test.TestUtil.joinLines;
import com.yahoo.vespa.model.content.cluster.ContentCluster;
import com.yahoo.vespa.model.content.storagecluster.StorageCluster;
import com.yahoo.vespa.model.content.utils.ContentClusterUtils;
@@ -44,10 +45,17 @@ public class StorageClusterTest {
return parse(xml, root);
}
- StorageCluster parse(String xml) {
- MockRoot root = new MockRoot();
+ StorageCluster parse(String xml, ModelContext.Properties properties) {
+ MockRoot root = new MockRoot("",
+ new DeployState.Builder()
+ .properties(properties)
+ .applicationPackage(new MockApplicationPackage.Builder().build())
+ .build());
return parse(xml, root);
}
+ StorageCluster parse(String xml) {
+ return parse(xml, new TestProperties());
+ }
StorageCluster parse(String xml, MockRoot root) {
root.getDeployState().getDocumentModel().getDocumentManager().add(
new NewDocumentType(new NewDocumentType.Name("music"))
@@ -61,13 +69,23 @@ public class StorageClusterTest {
return cluster.getStorageNodes();
}
+ private static String group() {
+ return joinLines(
+ "<group>",
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>",
+ "</group>");
+ }
+ private static String cluster(String clusterName, String insert) {
+ return joinLines(
+ "<content id=\"" + clusterName + "\">",
+ "<documents/>",
+ insert,
+ group(),
+ "</content>");
+ }
@Test
public void testBasics() {
- StorageCluster storage = parse("<content id=\"foofighters\"><documents/>\n" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</content>\n");
+ StorageCluster storage = parse(cluster("foofighters", ""));
assertEquals(1, storage.getChildren().size());
StorServerConfig.Builder builder = new StorServerConfig.Builder();
@@ -79,11 +97,7 @@ public class StorageClusterTest {
}
@Test
public void testCommunicationManagerDefaults() {
- StorageCluster storage = parse("<content id=\"foofighters\"><documents/>\n" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</content>\n");
+ StorageCluster storage = parse(cluster("foofighters", ""));
StorCommunicationmanagerConfig.Builder builder = new StorCommunicationmanagerConfig.Builder();
storage.getChildren().get("0").getConfig(builder);
StorCommunicationmanagerConfig config = new StorCommunicationmanagerConfig(builder);
@@ -97,40 +111,49 @@ public class StorageClusterTest {
}
@Test
+ public void testMergeDefaults() {
+ StorServerConfig.Builder builder = new StorServerConfig.Builder();
+ parse(cluster("foofighters", "")).getConfig(builder);
+
+ StorServerConfig config = new StorServerConfig(builder);
+ assertEquals(16, config.max_merges_per_node());
+ assertEquals(1024, config.max_merge_queue_size());
+ }
+
+ @Test
public void testMerges() {
StorServerConfig.Builder builder = new StorServerConfig.Builder();
- parse("" +
- "<content id=\"foofighters\">\n" +
- " <documents/>" +
- " <tuning>" +
- " <merges max-per-node=\"1K\" max-queue-size=\"10K\"/>\n" +
- " </tuning>" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</content>"
+ parse(cluster("foofighters", joinLines(
+ "<tuning>",
+ " <merges max-per-node=\"1K\" max-queue-size=\"10K\"/>",
+ "</tuning>")),
+ new TestProperties().setMaxMergeQueueSize(1919).setMaxConcurrentMergesPerNode(37)
).getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
assertEquals(1024, config.max_merges_per_node());
assertEquals(1024*10, config.max_merge_queue_size());
}
+ @Test
+ public void testMergeFeatureFlags() {
+ StorServerConfig.Builder builder = new StorServerConfig.Builder();
+ parse(cluster("foofighters", ""), new TestProperties().setMaxMergeQueueSize(1919).setMaxConcurrentMergesPerNode(37)).getConfig(builder);
+
+ StorServerConfig config = new StorServerConfig(builder);
+ assertEquals(37, config.max_merges_per_node());
+ assertEquals(1919, config.max_merge_queue_size());
+ }
@Test
public void testVisitors() {
StorVisitorConfig.Builder builder = new StorVisitorConfig.Builder();
- parse(
- "<cluster id=\"bees\">\n" +
- " <documents/>" +
- " <tuning>\n" +
- " <visitors thread-count=\"7\" max-queue-size=\"1000\">\n" +
- " <max-concurrent fixed=\"42\" variable=\"100\"/>\n" +
- " </visitors>\n" +
- " </tuning>\n" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</cluster>"
+ parse(cluster("bees",
+ joinLines(
+ "<tuning>",
+ " <visitors thread-count=\"7\" max-queue-size=\"1000\">",
+ " <max-concurrent fixed=\"42\" variable=\"100\"/>",
+ " </visitors>",
+ "</tuning>"))
).getConfig(builder);
StorVisitorConfig config = new StorVisitorConfig(builder);
@@ -143,16 +166,10 @@ public class StorageClusterTest {
@Test
public void testPersistenceThreads() {
- StorageCluster stc = parse(
- "<cluster id=\"bees\">\n" +
- " <documents/>" +
- " <tuning>\n" +
- " <persistence-threads count=\"7\"/>\n" +
- " </tuning>\n" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</cluster>",
+ StorageCluster stc = parse(cluster("bees",joinLines(
+ "<tuning>",
+ " <persistence-threads count=\"7\"/>",
+ "</tuning>")),
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
@@ -178,16 +195,10 @@ public class StorageClusterTest {
@Test
public void testResponseThreads() {
- StorageCluster stc = parse(
- "<cluster id=\"bees\">\n" +
- " <documents/>" +
- " <tuning>\n" +
- " <persistence-threads count=\"7\"/>\n" +
- " </tuning>\n" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</cluster>",
+ StorageCluster stc = parse(cluster("bees",joinLines(
+ "<tuning>",
+ " <persistence-threads count=\"7\"/>",
+ "</tuning>")),
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder();
@@ -201,20 +212,14 @@ public class StorageClusterTest {
@Test
public void testPersistenceThreadsOld() {
- StorageCluster stc = parse(
- "<cluster id=\"bees\">\n" +
- " <documents/>" +
- " <tuning>\n" +
- " <persistence-threads>\n" +
- " <thread lowest-priority=\"VERY_LOW\" count=\"2\"/>\n" +
- " <thread lowest-priority=\"VERY_HIGH\" count=\"1\"/>\n" +
- " <thread count=\"1\"/>\n" +
- " </persistence-threads>\n" +
- " </tuning>\n" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</cluster>",
+ StorageCluster stc = parse(cluster("bees", joinLines(
+ "<tuning>",
+ " <persistence-threads>",
+ " <thread lowest-priority=\"VERY_LOW\" count=\"2\"/>",
+ " <thread lowest-priority=\"VERY_HIGH\" count=\"1\"/>",
+ " <thread count=\"1\"/>",
+ " </persistence-threads>",
+ "</tuning>")),
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
@@ -238,15 +243,7 @@ public class StorageClusterTest {
@Test
public void testNoPersistenceThreads() {
- StorageCluster stc = parse(
- "<cluster id=\"bees\">\n" +
- " <documents/>" +
- " <tuning>\n" +
- " </tuning>\n" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</cluster>",
+ StorageCluster stc = parse(cluster("bees", ""),
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build())
);
@@ -267,13 +264,7 @@ public class StorageClusterTest {
}
private StorageCluster simpleCluster(ModelContext.Properties properties) {
- return parse(
- "<cluster id=\"bees\">\n" +
- " <documents/>" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</cluster>",
+ return parse(cluster("bees", ""),
new Flavor(new FlavorsConfig.Flavor.Builder().name("test-flavor").minCpuCores(9).build()),
properties);
}
@@ -302,14 +293,7 @@ public class StorageClusterTest {
@Test
public void integrity_checker_explicitly_disabled_when_not_running_with_vds_provider() {
StorIntegritycheckerConfig.Builder builder = new StorIntegritycheckerConfig.Builder();
- parse(
- "<cluster id=\"bees\">\n" +
- " <documents/>" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</cluster>"
- ).getConfig(builder);
+ parse(cluster("bees", "")).getConfig(builder);
StorIntegritycheckerConfig config = new StorIntegritycheckerConfig(builder);
// '-' --> don't run on the given week day
assertEquals("-------", config.weeklycycle());
@@ -317,15 +301,15 @@ public class StorageClusterTest {
@Test
public void testCapacity() {
- String xml =
- "<cluster id=\"storage\">\n" +
- " <documents/>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " <node distribution-key=\"1\" hostalias=\"mockhost\" capacity=\"1.5\"/>\n" +
- " <node distribution-key=\"2\" hostalias=\"mockhost\" capacity=\"2.0\"/>\n" +
- " </group>\n" +
- "</cluster>";
+ String xml = joinLines(
+ "<cluster id=\"storage\">",
+ " <documents/>",
+ " <group>",
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>",
+ " <node distribution-key=\"1\" hostalias=\"mockhost\" capacity=\"1.5\"/>",
+ " <node distribution-key=\"2\" hostalias=\"mockhost\" capacity=\"2.0\"/>",
+ " </group>",
+ "</cluster>");
ContentCluster cluster = ContentClusterUtils.createCluster(xml, new MockRoot());
@@ -341,15 +325,7 @@ public class StorageClusterTest {
@Test
public void testRootFolder() {
- String xml =
- "<cluster id=\"storage\">\n" +
- " <documents/>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- "</cluster>";
-
- ContentCluster cluster = ContentClusterUtils.createCluster(xml, new MockRoot());
+ ContentCluster cluster = ContentClusterUtils.createCluster(cluster("storage", ""), new MockRoot());
StorageNode node = cluster.getStorageNodes().getChildren().get("0");
@@ -372,18 +348,18 @@ public class StorageClusterTest {
@Test
public void testGenericPersistenceTuning() {
- String xml =
- "<cluster id=\"storage\">\n" +
- "<documents/>" +
- "<engine>\n" +
- " <fail-partition-on-error>true</fail-partition-on-error>\n" +
- " <revert-time>34m</revert-time>\n" +
- " <recovery-time>5d</recovery-time>\n" +
- "</engine>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- "</cluster>";
+ String xml = joinLines(
+ "<cluster id=\"storage\">",
+ " <documents/>",
+ " <engine>",
+ " <fail-partition-on-error>true</fail-partition-on-error>",
+ " <revert-time>34m</revert-time>",
+ " <recovery-time>5d</recovery-time>",
+ " </engine>",
+ " <group>",
+ " node distribution-key=\"0\" hostalias=\"mockhost\"/>",
+ " </group>",
+ "</cluster>");
ContentCluster cluster = ContentClusterUtils.createCluster(xml, new MockRoot());
@@ -398,21 +374,21 @@ public class StorageClusterTest {
@Test
public void requireThatUserDoesNotSpecifyBothGroupAndNodes() {
- String xml =
- "<cluster id=\"storage\">\n" +
- "<documents/>\n" +
- "<engine>\n" +
- " <fail-partition-on-error>true</fail-partition-on-error>\n" +
- " <revert-time>34m</revert-time>\n" +
- " <recovery-time>5d</recovery-time>\n" +
- "</engine>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " <nodes>\n" +
- " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
- " </nodes>\n" +
- "</cluster>";
+ String xml = joinLines(
+ "<cluster id=\"storage\">",
+ " <documents/>",
+ " <engine>",
+ " <fail-partition-on-error>true</fail-partition-on-error>",
+ " <revert-time>34m</revert-time>",
+ " <recovery-time>5d</recovery-time>",
+ " </engine>",
+ " <group>",
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>",
+ " </group>",
+ " <nodes>",
+ " <node distribution-key=\"1\" hostalias=\"mockhost\"/>",
+ " </nodes>",
+ "</cluster>");
try {
final MockRoot root = new MockRoot();
@@ -429,20 +405,20 @@ public class StorageClusterTest {
@Test
public void requireThatGroupNamesMustBeUniqueAmongstSiblings() {
- String xml =
- "<cluster id=\"storage\">\n" +
- " <redundancy>2</redundancy>" +
- " <documents/>\n" +
- " <group>\n" +
- " <distribution partitions=\"*\"/>\n" +
- " <group distribution-key=\"0\" name=\"bar\">\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " <group distribution-key=\"0\" name=\"bar\">\n" +
- " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " </group>\n" +
- "</cluster>";
+ String xml = joinLines(
+ "<cluster id=\"storage\">",
+ " <redundancy>2</redundancy>",
+ " <documents/>",
+ " <group>",
+ " <distribution partitions=\"*\"/>",
+ " <group distribution-key=\"0\" name=\"bar\">",
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>",
+ " </group>",
+ " <group distribution-key=\"0\" name=\"bar\">",
+ " <node distribution-key=\"1\" hostalias=\"mockhost\"/>",
+ " </group>",
+ " </group>",
+ "</cluster>");
try {
ContentClusterUtils.createCluster(xml, new MockRoot());
@@ -455,24 +431,24 @@ public class StorageClusterTest {
@Test
public void requireThatGroupNamesCanBeDuplicatedAcrossLevels() {
- String xml =
- "<cluster id=\"storage\">\n" +
- " <redundancy>2</redundancy>" +
- "<documents/>\n" +
- " <group>\n" +
- " <distribution partitions=\"*\"/>\n" +
- " <group distribution-key=\"0\" name=\"bar\">\n" +
- " <group distribution-key=\"0\" name=\"foo\">\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " </group>\n" +
- " <group distribution-key=\"0\" name=\"foo\">\n" +
- " <group distribution-key=\"0\" name=\"bar\">\n" +
- " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " </group>\n" +
- " </group>\n" +
- "</cluster>";
+ String xml = joinLines(
+ "<cluster id=\"storage\">",
+ " <redundancy>2</redundancy>",
+ " <documents/>",
+ " <group>",
+ " <distribution partitions=\"*\"/>",
+ " <group distribution-key=\"0\" name=\"bar\">",
+ " <group distribution-key=\"0\" name=\"foo\">",
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>",
+ " </group>",
+ " </group>",
+ " <group distribution-key=\"0\" name=\"foo\">",
+ " <group distribution-key=\"0\" name=\"bar\">",
+ " <node distribution-key=\"1\" hostalias=\"mockhost\"/>",
+ " </group>",
+ " </group>",
+ " </group>",
+ "</cluster>");
// Should not throw.
ContentClusterUtils.createCluster(xml, new MockRoot());
@@ -480,18 +456,18 @@ public class StorageClusterTest {
@Test
public void requireThatNestedGroupsRequireDistribution() {
- String xml =
- "<cluster id=\"storage\">\n" +
- "<documents/>\n" +
- " <group>\n" +
- " <group distribution-key=\"0\" name=\"bar\">\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " <group distribution-key=\"0\" name=\"baz\">\n" +
- " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " </group>\n" +
- "</cluster>";
+ String xml = joinLines(
+ "<cluster id=\"storage\">",
+ " <documents/>",
+ " <group>",
+ " <group distribution-key=\"0\" name=\"bar\">",
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>",
+ " </group>",
+ " <group distribution-key=\"0\" name=\"baz\">",
+ " <node distribution-key=\"1\" hostalias=\"mockhost\"/>",
+ " </group>",
+ " </group>",
+ "</cluster>");
try {
ContentClusterUtils.createCluster(xml, new MockRoot());
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java
index 7c93b4ef02b..1426b094971 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java
@@ -15,9 +15,7 @@ import java.util.List;
import static com.yahoo.config.model.test.TestUtil.joinLines;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
/**
* @author Simon Thoresen Hult
@@ -75,7 +73,6 @@ public class ClusterTest {
joinLines(
"<max-hits-per-partition>77</max-hits-per-partition>",
"<dispatch-policy>round-robin</dispatch-policy>",
- "<min-group-coverage>13</min-group-coverage>",
"<min-active-docs-coverage>93</min-active-docs-coverage>",
"<top-k-probability>0.777</top-k-probability>"),
false);
@@ -84,7 +81,6 @@ public class ClusterTest {
DispatchConfig config = new DispatchConfig(builder);
assertEquals(2, config.searchableCopies());
assertEquals(93.0, config.minActivedocsPercentage(), DELTA);
- assertEquals(13.0, config.minGroupCoverage(), DELTA);
assertEquals(DispatchConfig.DistributionPolicy.ROUNDROBIN, config.distributionPolicy());
assertEquals(77, config.maxHitsPerNode());
assertEquals(0.777, config.topKProbability(), DELTA);
@@ -99,7 +95,6 @@ public class ClusterTest {
DispatchConfig config = new DispatchConfig(builder);
assertEquals(2, config.searchableCopies());
assertEquals(DispatchConfig.DistributionPolicy.ADAPTIVE, config.distributionPolicy());
- assertEquals(0, config.maxNodesDownPerGroup());
assertEquals(1.0, config.maxWaitAfterCoverageFactor(), DELTA);
assertEquals(0, config.minWaitAfterCoverageFactor(), DELTA);
assertEquals(8, config.numJrtConnectionsPerNode());
@@ -179,7 +174,7 @@ public class ClusterTest {
" </tuning>",
" </content>",
"</services>"))
- .withSchemas(ApplicationPackageUtils.generateSearchDefinition("my_document"))
+ .withSchemas(ApplicationPackageUtils.generateSchemas("my_document"))
.build();
List<Content> contents = new TestDriver().buildModel(app).getConfigModels(Content.class);
assertEquals(1, contents.size());
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/DomDispatchTuningBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/DomDispatchTuningBuilderTest.java
index abfb03e41dd..7533bf1ef9d 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/DomDispatchTuningBuilderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/DomDispatchTuningBuilderTest.java
@@ -44,7 +44,6 @@ public class DomDispatchTuningBuilderTest {
" </tuning>" +
"</content>");
assertNull(dispatch.getMaxHitsPerPartition());
- assertNull(dispatch.getMinGroupCoverage());
assertNull(dispatch.getMinActiveDocsCoverage());
assertNull(dispatch.getDispatchPolicy());
assertNull(dispatch.getTopkProbability());
@@ -57,14 +56,12 @@ public class DomDispatchTuningBuilderTest {
" <tuning>" +
" <dispatch>" +
" <max-hits-per-partition>69</max-hits-per-partition>" +
- " <min-group-coverage>7.5</min-group-coverage>" +
" <min-active-docs-coverage>12.5</min-active-docs-coverage>" +
" <top-k-probability>0.999</top-k-probability>" +
" </dispatch>" +
" </tuning>" +
"</content>");
assertEquals(69, dispatch.getMaxHitsPerPartition().intValue());
- assertEquals(7.5, dispatch.getMinGroupCoverage().doubleValue(), 0.0);
assertEquals(12.5, dispatch.getMinActiveDocsCoverage().doubleValue(), 0.0);
assertEquals(0.999, dispatch.getTopkProbability().doubleValue(), 0.0);
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTestCase.java b/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTestCase.java
index 9eb7ca0ac02..2b36bfc47b2 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTestCase.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTestCase.java
@@ -192,7 +192,7 @@ public class VespaModelTestCase {
" </documents>" +
"</content>" +
"</services>",
- ApplicationPackageUtils.generateSearchDefinition("music"))
+ ApplicationPackageUtils.generateSchemas("music"))
.create();
MessagebusConfig.Builder mBusB = new MessagebusConfig.Builder();
model.getConfig(mBusB, "client");
@@ -316,10 +316,18 @@ public class VespaModelTestCase {
.build();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState);
Validation.validate(model, new ValidationParameters(), deployState);
+ assertContainsWarning(logger.msgs, "Directory searchdefinitions/ should not be used for schemas, use schemas/ instead");
+ }
- assertEquals(3, logger.msgs.size());
- assertEquals("WARNING", logger.msgs.get(1).getFirst().getName());
- assertEquals("Directory searchdefinitions/ should not be used for schemas, use schemas/ instead", logger.msgs.get(1).getSecond());
+ private void assertContainsWarning(List<Pair<Level,String>> msgs, String text) {
+ boolean foundCorrectWarning = false;
+ for (var msg : msgs)
+ if (msg.getFirst().getName().equals("WARNING") && msg.getSecond().equals(text)) {
+ foundCorrectWarning = true;
+ }
+ if (! foundCorrectWarning) for (var msg : msgs) System.err.println("MSG: "+msg);
+ assertTrue(msgs.size() > 0);
+ assertTrue(foundCorrectWarning);
}
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java b/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java
index ba975e52d1a..7e34e9efbbf 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/test/VespaModelTester.java
@@ -21,7 +21,6 @@ import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.ProvisionLogger;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.model.VespaModel;
-import com.yahoo.vespa.model.test.utils.ApplicationPackageUtils;
import com.yahoo.vespa.model.test.utils.VespaModelCreatorWithMockPkg;
import java.util.ArrayList;
@@ -31,6 +30,8 @@ import java.util.List;
import java.util.Map;
import java.util.Optional;
+import static com.yahoo.vespa.model.test.utils.ApplicationPackageUtils.generateSchemas;
+
/**
* Helper class which sets up a system with multiple hosts.
* Usage:
@@ -168,7 +169,7 @@ public class VespaModelTester {
boolean alwaysReturnOneNode,
int startIndexForClusters, Optional<VespaModel> previousModel,
DeployState.Builder deployStatebuilder, String ... retiredHostNames) {
- VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(null, services, ApplicationPackageUtils.generateSearchDefinition("type1"));
+ VespaModelCreatorWithMockPkg modelCreatorWithMockPkg = new VespaModelCreatorWithMockPkg(null, services, generateSchemas("type1"));
ApplicationPackage appPkg = modelCreatorWithMockPkg.appPkg;
provisioner = hosted ? new ProvisionerAdapter(new InMemoryProvisioner(hostsByResources,
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/test/utils/ApplicationPackageUtils.java b/config-model/src/test/java/com/yahoo/vespa/model/test/utils/ApplicationPackageUtils.java
index df62a3bff07..1f7deaf1991 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/test/utils/ApplicationPackageUtils.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/test/utils/ApplicationPackageUtils.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.test.utils;
import java.util.ArrayList;
@@ -43,10 +43,6 @@ public class ApplicationPackageUtils {
"}";
}
- public static List<String> generateSearchDefinition(String name) {
- return generateSchemas(name);
- }
-
public static List<String> generateSchemas(String ... sdNames) {
return generateSchemas(Arrays.asList(sdNames));
}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/ApplicationLockException.java b/config-provisioning/src/main/java/com/yahoo/config/provision/ApplicationLockException.java
index e9fe669269c..8f71d4f5061 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/ApplicationLockException.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/ApplicationLockException.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.provision;
/**
@@ -13,4 +13,8 @@ public class ApplicationLockException extends RuntimeException {
super(e);
}
+ public ApplicationLockException(String message) {
+ super(message);
+ }
+
}
diff --git a/config-proxy/pom.xml b/config-proxy/pom.xml
index a02915c49a0..124d72f092d 100644
--- a/config-proxy/pom.xml
+++ b/config-proxy/pom.xml
@@ -73,6 +73,12 @@
<artifactId>slf4j-api</artifactId>
<scope>compile</scope>
</dependency>
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>container-apache-http-client-bundle</artifactId>
+ <version>${project.version}</version>
+ <scope>compile</scope>
+ </dependency>
</dependencies>
<build>
<plugins>
diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java
index 047cec87ed7..5ad9fabcb61 100644
--- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java
+++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/ProxyServer.java
@@ -56,7 +56,7 @@ public class ProxyServer implements Runnable {
ProxyServer(Spec spec, ConfigSourceSet source, MemoryCache memoryCache, ConfigSourceClient configClient) {
this.configSource = source;
- supervisor = new Supervisor(new Transport("proxy-server", JRT_TRANSPORT_THREADS)).useSmallBuffers();
+ supervisor = new Supervisor(new Transport("proxy-server", JRT_TRANSPORT_THREADS)).setDropEmptyBuffers(true);
log.log(Level.FINE, () -> "Using config source '" + source);
this.memoryCache = memoryCache;
this.rpcServer = createRpcServer(spec);
diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java
index 2767d2c8027..1dba56805a5 100644
--- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java
+++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionAndUrlDownload.java
@@ -1,4 +1,4 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.proxy.filedistribution;
import com.yahoo.concurrent.DaemonThreadFactory;
@@ -26,7 +26,9 @@ public class FileDistributionAndUrlDownload {
new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("file references and downloads cleanup"));
public FileDistributionAndUrlDownload(Supervisor supervisor, ConfigSourceSet source) {
- fileDistributionRpcServer = new FileDistributionRpcServer(supervisor, new FileDownloader(new JRTConnectionPool(source)));
+ fileDistributionRpcServer =
+ new FileDistributionRpcServer(supervisor,
+ new FileDownloader(new JRTConnectionPool(source, "filedistribution-jrt-pool-")));
urlDownloadRpcServer = new UrlDownloadRpcServer(supervisor);
cleanupExecutor.scheduleAtFixedRate(new CachedFilesMaintainer(), delay.toSeconds(), delay.toSeconds(), TimeUnit.SECONDS);
}
diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java
index a25e86926a1..ea3a69c54a2 100644
--- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java
+++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileDistributionRpcServer.java
@@ -1,4 +1,4 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.proxy.filedistribution;
import com.yahoo.concurrent.DaemonThreadFactory;
@@ -41,7 +41,7 @@ class FileDistributionRpcServer {
FileDistributionRpcServer(Supervisor supervisor, FileDownloader downloader) {
this.supervisor = supervisor;
this.downloader = downloader;
- declareFileDistributionMethods();
+ declareMethods();
}
void close() {
@@ -53,7 +53,7 @@ class FileDistributionRpcServer {
}
}
- private void declareFileDistributionMethods() {
+ private void declareMethods() {
// Legacy method, needs to be the same name as used in filedistributor
supervisor.addMethod(new Method("waitFor", "s", "s", this::getFile)
.methodDesc("get path to file reference")
diff --git a/config-proxy/src/main/sh/vespa-config-loadtester.sh b/config-proxy/src/main/sh/vespa-config-loadtester.sh
index f7cecbf292f..38be1cf7b33 100644
--- a/config-proxy/src/main/sh/vespa-config-loadtester.sh
+++ b/config-proxy/src/main/sh/vespa-config-loadtester.sh
@@ -79,4 +79,4 @@ export ROOT
echo "# Using CLASSPATH=$CLASSPATH, args=$@"
-java -cp $CLASSPATH:$ROOT/lib/jars/config-proxy-jar-with-dependencies.jar com.yahoo.vespa.config.benchmark.LoadTester "$@"
+java -Xms1g -Xmx1g -cp $CLASSPATH:$ROOT/lib/jars/config-proxy-jar-with-dependencies.jar com.yahoo.vespa.config.benchmark.LoadTester "$@"
diff --git a/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java b/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java
index 26eafb67c1b..b5147075972 100644
--- a/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java
+++ b/config/src/main/java/com/yahoo/vespa/config/JRTConnectionPool.java
@@ -29,6 +29,7 @@ public class JRTConnectionPool implements ConnectionPool {
private final Supervisor supervisor;
private final Map<String, JRTConnection> connections = new LinkedHashMap<>();
+ private final String poolName;
// The config sources used by this connection pool.
private ConfigSourceSet sourceSet = null;
@@ -37,11 +38,16 @@ public class JRTConnectionPool implements ConnectionPool {
private volatile JRTConnection currentConnection;
public JRTConnectionPool(ConfigSourceSet sourceSet) {
- supervisor = new Supervisor(new Transport("config-jrtpool-" + sourceSet.hashCode())).useSmallBuffers();
+ this(sourceSet, "config-jrt-pool-" + sourceSet.hashCode());
+ }
+
+ public JRTConnectionPool(ConfigSourceSet sourceSet, String poolName) {
+ this.poolName = poolName;
+ supervisor = new Supervisor(new Transport(poolName)).setDropEmptyBuffers(true);
addSources(sourceSet);
}
- public JRTConnectionPool(List<String> addresses) {
+ JRTConnectionPool(List<String> addresses) {
this(new ConfigSourceSet(addresses));
}
@@ -131,7 +137,7 @@ public class JRTConnectionPool implements ConnectionPool {
}
public String toString() {
- StringBuilder sb = new StringBuilder();
+ StringBuilder sb = new StringBuilder(poolName + ": ");
synchronized (connections) {
for (JRTConnection conn : connections.values()) {
sb.append(conn.toString());
diff --git a/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java b/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java
index 3a8d80e5ffe..26c5889c579 100644
--- a/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java
+++ b/config/src/main/java/com/yahoo/vespa/config/benchmark/LoadTester.java
@@ -1,4 +1,4 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.benchmark;
import com.yahoo.collections.Tuple2;
@@ -33,8 +33,10 @@ import java.util.Map;
import java.util.Optional;
import java.util.concurrent.ThreadLocalRandom;
+import static com.yahoo.vespa.config.ConfigKey.createFull;
+
/**
- * A config client for generating load against a config server or config proxy.
+ * A client for generating load (config requests) against a config server or config proxy.
* <p>
* Log messages from a run will have a # first in the line, the end result will not.
*
@@ -42,13 +44,30 @@ import java.util.concurrent.ThreadLocalRandom;
*/
public class LoadTester {
- private static boolean debug = false;
private final Transport transport = new Transport("rpc-client");
protected Supervisor supervisor = new Supervisor(transport);
private List<ConfigKey<?>> configs = new ArrayList<>();
private Map<ConfigDefinitionKey, Tuple2<String, String[]>> defs = new HashMap<>();
private final CompressionType compressionType = JRTConfigRequestFactory.getCompressionType();
+ private final String host;
+ private final int port;
+ private final int iterations;
+ private final int threads;
+ private final String configFile;
+ private final String defPath;
+ private final boolean debug;
+
+ LoadTester(String host, int port, int iterations, int threads, String configFile, String defPath, boolean debug) {
+ this.host = host;
+ this.port = port;
+ this.iterations = iterations;
+ this.threads = threads;
+ this.configFile = configFile;
+ this.defPath = defPath;
+ this.debug = debug;
+ }
+
/**
* @param args command-line arguments
*/
@@ -59,28 +78,29 @@ public class LoadTester {
parser.addRequiredBinarySwitch("-p", "port");
parser.addRequiredBinarySwitch("-i", "iterations per thread");
parser.addRequiredBinarySwitch("-t", "threads");
- parser.addLegalBinarySwitch("-l", "configs file, on form name,configid. (To get list: vespa-configproxy-cmd -m cache | cut -d ',' -f1-2)");
+ parser.addLegalBinarySwitch("-l", "config file, on form name,configid. (To get list: vespa-configproxy-cmd -m cache | cut -d ',' -f1-2)");
parser.addLegalBinarySwitch("-dd", "dir with def files, must be of form name.def");
parser.parse();
String host = parser.getBinarySwitches().get("-c");
int port = Integer.parseInt(parser.getBinarySwitches().get("-p"));
int iterations = Integer.parseInt(parser.getBinarySwitches().get("-i"));
int threads = Integer.parseInt(parser.getBinarySwitches().get("-t"));
- String configsList = parser.getBinarySwitches().get("-l");
+ String configFile = parser.getBinarySwitches().get("-l");
String defPath = parser.getBinarySwitches().get("-dd");
- debug = parser.getUnarySwitches().contains("-d");
- LoadTester loadTester = new LoadTester();
- loadTester.runLoad(host, port, iterations, threads, configsList, defPath);
+ boolean debug = parser.getUnarySwitches().contains("-d");
+ new LoadTester(host, port, iterations, threads, configFile, defPath, debug)
+ .runLoad();
}
- private void runLoad(String host, int port, int iterations, int threads,
- String configsList, String defPath) throws IOException, InterruptedException {
- configs = readConfigs(configsList);
+ private void runLoad() throws IOException, InterruptedException {
+ configs = readConfigs(configFile);
defs = readDefs(defPath);
+ validateConfigs(configs, defs);
+
List<LoadThread> threadList = new ArrayList<>();
- long start = System.currentTimeMillis();
Metrics m = new Metrics();
+ long startInNanos = System.nanoTime();
for (int i = 0; i < threads; i++) {
LoadThread lt = new LoadThread(iterations, host, port);
threadList.add(lt);
@@ -91,20 +111,25 @@ public class LoadTester {
lt.join();
m.merge(lt.metrics);
}
- printOutput(start, threads, iterations, m);
+ float durationInSeconds = (float) (System.nanoTime() - startInNanos) / 1_000_000_000f;
+
+ printResults(durationInSeconds, threads, iterations, m);
}
private Map<ConfigDefinitionKey, Tuple2<String, String[]>> readDefs(String defPath) throws IOException {
Map<ConfigDefinitionKey, Tuple2<String, String[]>> ret = new HashMap<>();
if (defPath == null) return ret;
+
File defDir = new File(defPath);
if (!defDir.isDirectory()) {
- System.out.println("# Given def file dir is not a directory: " + defDir.getPath() + " , will not send def contents in requests.");
+ System.out.println("# Given def file dir is not a directory: " +
+ defDir.getPath() + " , will not send def contents in requests.");
return ret;
}
- final File[] files = defDir.listFiles();
+ File[] files = defDir.listFiles();
if (files == null) {
- System.out.println("# Given def file dir has no files: " + defDir.getPath() + " , will not send def contents in requests.");
+ System.out.println("# Given def file dir has no files: " +
+ defDir.getPath() + " , will not send def contents in requests.");
return ret;
}
for (File f : files) {
@@ -118,20 +143,17 @@ public class LoadTester {
return ret;
}
- private void printOutput(long start, long threads, long iterations, Metrics metrics) {
- long stop = System.currentTimeMillis();
- float durSec = (float) (stop - start) / 1000f;
+ private void printResults(float durationInSeconds, long threads, long iterations, Metrics metrics) {
StringBuilder sb = new StringBuilder();
- sb.append("#reqs/sec #bytes/sec #avglatency #minlatency #maxlatency #failedrequests\n");
- sb.append(((float) (iterations * threads)) / durSec).append(",");
- sb.append((metrics.totBytes / durSec)).append(",");
- sb.append((metrics.totLatency / threads / iterations)).append(",");
+ sb.append("#reqs/sec #avglatency #minlatency #maxlatency #failedrequests\n");
+ sb.append(((float) (iterations * threads)) / durationInSeconds).append(",");
+ sb.append((metrics.latencyInMillis / threads / iterations)).append(",");
sb.append((metrics.minLatency)).append(",");
sb.append((metrics.maxLatency)).append(",");
sb.append((metrics.failedRequests));
sb.append("\n");
sb.append('#').append(TransportMetrics.getInstance().snapshot().toString()).append('\n');
- System.out.println(sb.toString());
+ System.out.println(sb);
}
private List<ConfigKey<?>> readConfigs(String configsList) throws IOException {
@@ -149,25 +171,32 @@ public class LoadTester {
return ret;
}
+ private void validateConfigs(List<ConfigKey<?>> configs, Map<ConfigDefinitionKey, Tuple2<String, String[]>> defs) {
+ for (ConfigKey<?> configKey : configs) {
+ ConfigDefinitionKey dKey = new ConfigDefinitionKey(configKey);
+ Tuple2<String, String[]> defContent = defs.get(dKey);
+ if (defContent == null)
+ throw new IllegalArgumentException("No matching config definition for " + configKey +
+ ", known config definitions: " + defs.keySet());
+ }
+ }
+
private static class Metrics {
- long totBytes = 0;
- long totLatency = 0;
+ long latencyInMillis = 0;
long failedRequests = 0;
long maxLatency = Long.MIN_VALUE;
long minLatency = Long.MAX_VALUE;
public void merge(Metrics m) {
- this.totBytes += m.totBytes;
- this.totLatency += m.totLatency;
+ this.latencyInMillis += m.latencyInMillis;
this.failedRequests += m.failedRequests;
updateMin(m.minLatency);
updateMax(m.maxLatency);
}
- public void update(long bytes, long latency) {
- this.totBytes += bytes;
- this.totLatency += latency;
+ public void update(long latency) {
+ this.latencyInMillis += latency;
updateMin(latency);
updateMax(latency);
}
@@ -189,83 +218,70 @@ public class LoadTester {
private class LoadThread extends Thread {
- int iterations = 0;
- String host = "";
- int port = 0;
- Metrics metrics = new Metrics();
+ private final int iterations;
+ private final Spec spec;
+ private final Metrics metrics = new Metrics();
LoadThread(int iterations, String host, int port) {
this.iterations = iterations;
- this.host = host;
- this.port = port;
+ this.spec = new Spec(host, port);
}
@Override
public void run() {
- Spec spec = new Spec(host, port);
Target target = connect(spec);
- ConfigKey<?> reqKey;
- JRTClientConfigRequest request;
- int totConfs = configs.size();
- boolean reconnCycle = false; // to log reconn message only once, for instance at restart
+ int numberOfConfigs = configs.size();
for (int i = 0; i < iterations; i++) {
- reqKey = configs.get(ThreadLocalRandom.current().nextInt(totConfs));
- ConfigDefinitionKey dKey = new ConfigDefinitionKey(reqKey);
- Tuple2<String, String[]> defContent = defs.get(dKey);
- if (defContent == null && defs.size() > 0) { // Only complain if we actually did run with a def dir
- System.out.println("# No def found for " + dKey + ", not sending in request.");
- }
- request = getRequest(ConfigKey.createFull(reqKey.getName(), reqKey.getConfigId(), reqKey.getNamespace(), defContent.first), defContent.second);
- if (debug) System.out.println("# Requesting: " + reqKey);
- long start = System.currentTimeMillis();
+ ConfigKey<?> reqKey = configs.get(ThreadLocalRandom.current().nextInt(numberOfConfigs));
+ JRTClientConfigRequest request = createRequest(reqKey);
+ if (debug)
+ System.out.println("# Requesting: " + reqKey);
+
+ long start = System.nanoTime();
target.invokeSync(request.getRequest(), 10.0);
- long end = System.currentTimeMillis();
+ long durationInMillis = (System.nanoTime() - start) / 1_000_000;
+
if (request.isError()) {
- if ("Connection lost".equals(request.errorMessage()) || "Connection down".equals(request.errorMessage())) {
- try {
- Thread.sleep(100);
- } catch (InterruptedException e) {
- throw new RuntimeException(e);
- }
- if (!reconnCycle) {
- System.out.println("# Connection lost, reconnecting...");
- reconnCycle = true;
- }
- target.close();
- target = connect(spec);
- } else {
- System.err.println(request.errorMessage());
- }
- metrics.incFailedRequests();
+ target = handleError(request, spec, target);
} else {
- if (reconnCycle) {
- reconnCycle = false;
- System.out.println("# Connection OK");
- }
- long duration = end - start;
-
- if (debug) {
- String payload = request.getNewPayload().toString();
- metrics.update(payload.length(), duration); // assume 8 bit...
- System.out.println("# Ret: " + payload);
- } else {
- metrics.update(0, duration);
- }
+ metrics.update(durationInMillis);
}
}
}
- private JRTClientConfigRequest getRequest(ConfigKey<?> reqKey, String[] defContent) {
- if (defContent == null) defContent = new String[0];
+ private JRTClientConfigRequest createRequest(ConfigKey<?> reqKey) {
+ ConfigDefinitionKey dKey = new ConfigDefinitionKey(reqKey);
+ Tuple2<String, String[]> defContent = defs.get(dKey);
+ ConfigKey<?> fullKey = createFull(reqKey.getName(), reqKey.getConfigId(), reqKey.getNamespace(), defContent.first);
+
final long serverTimeout = 1000;
- return JRTClientConfigRequestV3.createWithParams(reqKey, DefContent.fromList(Arrays.asList(defContent)),
- ConfigUtils.getCanonicalHostName(), "", 0, serverTimeout, Trace.createDummy(),
+ return JRTClientConfigRequestV3.createWithParams(fullKey, DefContent.fromList(List.of(defContent.second)),
+ ConfigUtils.getCanonicalHostName(), "",
+ 0, serverTimeout, Trace.createDummy(),
compressionType, Optional.empty());
}
private Target connect(Spec spec) {
return supervisor.connect(spec);
}
+
+ private Target handleError(JRTClientConfigRequest request, Spec spec, Target target) {
+ if (List.of("Connection lost", "Connection down").contains(request.errorMessage())) {
+ try {
+ Thread.sleep(100);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ System.out.println("# Connection lost, reconnecting...");
+ target.close();
+ target = connect(spec);
+ } else {
+ System.err.println(request.errorMessage());
+ }
+ metrics.incFailedRequests();
+ return target;
+ }
+
}
}
diff --git a/config/src/vespa/config/common/configvalue.cpp b/config/src/vespa/config/common/configvalue.cpp
index 8857eacc987..d5c0c2047df 100644
--- a/config/src/vespa/config/common/configvalue.cpp
+++ b/config/src/vespa/config/common/configvalue.cpp
@@ -57,8 +57,12 @@ ConfigValue::getLegacyFormat() const
const vespalib::string
ConfigValue::asJson() const {
- const vespalib::slime::Inspector & payload(_payload->getSlimePayload());
- return payload.toString();
+ if (_payload) {
+ const vespalib::slime::Inspector & payload(_payload->getSlimePayload());
+ return payload.toString();
+ } else {
+ return {};
+ }
}
void
@@ -74,7 +78,9 @@ ConfigValue::serializeV1(vespalib::slime::Cursor & cursor) const
void
ConfigValue::serializeV2(vespalib::slime::Cursor & cursor) const
{
- copySlimeObject(_payload->getSlimePayload(), cursor);
+ if (_payload) {
+ copySlimeObject(_payload->getSlimePayload(), cursor);
+ }
}
}
diff --git a/config/src/vespa/config/subscription/configsubscriptionset.cpp b/config/src/vespa/config/subscription/configsubscriptionset.cpp
index 949e36c046b..659458fb533 100644
--- a/config/src/vespa/config/subscription/configsubscriptionset.cpp
+++ b/config/src/vespa/config/subscription/configsubscriptionset.cpp
@@ -40,7 +40,7 @@ ConfigSubscriptionSet::acquireSnapshot(milliseconds timeoutInMillis, bool ignore
int64_t lastGeneration = _currentGeneration;
bool inSync = false;
- LOG(debug, "Going into nextConfig loop, time left is %" PRId64, timeLeft.count());
+ LOG(spam, "Going into nextConfig loop, time left is %" PRId64, timeLeft.count());
while (!isClosed() && (timeLeft.count() >= 0) && !inSync) {
size_t numChanged = 0;
size_t numGenerationChanged = 0;
@@ -62,7 +62,7 @@ ConfigSubscriptionSet::acquireSnapshot(milliseconds timeoutInMillis, bool ignore
} else {
LOG(spam, "Config subscription did not change, id(%s), defname(%s)", key.getConfigId().c_str(), key.getDefName().c_str());
}
- LOG(spam, "Previous generation is %" PRId64 ", updates is %" PRId64, generation, subscription->getGeneration());
+ LOG(spam, "Previous generation is %" PRId64 ", updates is %" PRId64, lastGeneration, subscription->getGeneration());
if (isGenerationNewer(subscription->getGeneration(), _currentGeneration)) {
numGenerationChanged++;
}
diff --git a/configd/src/apps/cmd/main.cpp b/configd/src/apps/cmd/main.cpp
index 33b4aa8111d..1b90483b65d 100644
--- a/configd/src/apps/cmd/main.cpp
+++ b/configd/src/apps/cmd/main.cpp
@@ -13,6 +13,24 @@
#include <vespa/log/log.h>
LOG_SETUP("vespa-sentinel-cmd");
+namespace {
+struct Method {
+ const char * name;
+ const char * rpcMethod;
+ bool noArgNeeded;
+ bool needsTimeoutArg;
+};
+const Method methods[] = {
+ { "list", "sentinel.ls", true, false },
+ { "restart", "sentinel.service.restart", false, false },
+ { "start", "sentinel.service.start", false, false },
+ { "stop", "sentinel.service.stop", false, false },
+ { "connectivity", "sentinel.report.connectivity", true, true }
+};
+
+}
+
+
class Cmd
{
private:
@@ -22,7 +40,7 @@ private:
public:
Cmd() : _server(), _target(nullptr) {}
~Cmd();
- int run(const char *cmd, const char *arg);
+ int run(const Method &cmd, const char *arg);
void initRPC(const char *spec);
void finiRPC();
};
@@ -41,6 +59,7 @@ void usage()
fprintf(stderr, " restart {service}\n");
fprintf(stderr, " start {service}\n");
fprintf(stderr, " stop {service}\n");
+ fprintf(stderr, " connectivity [milliseconds]\n");
}
void
@@ -63,7 +82,7 @@ Cmd::finiRPC()
int
-Cmd::run(const char *cmd, const char *arg)
+Cmd::run(const Method &cmd, const char *arg)
{
int retval = 0;
try {
@@ -74,33 +93,61 @@ Cmd::run(const char *cmd, const char *arg)
return 2;
}
FRT_RPCRequest *req = _server->supervisor().AllocRPCRequest();
- req->SetMethodName(cmd);
+ req->SetMethodName(cmd.rpcMethod);
- if (arg) {
+ int pingTimeoutMs = 5000;
+ if (cmd.needsTimeoutArg) {
+ if (arg) {
+ pingTimeoutMs = atoi(arg);
+ }
+ req->GetParams()->AddInt32(pingTimeoutMs);
+ } else if (arg) {
// one param
req->GetParams()->AddString(arg);
}
- _target->InvokeSync(req, 5.0);
+ _target->InvokeSync(req, 2 * pingTimeoutMs * 0.001);
if (req->IsError()) {
fprintf(stderr, "vespa-sentinel-cmd '%s' error %d: %s\n",
- cmd, req->GetErrorCode(), req->GetErrorMessage());
+ cmd.name, req->GetErrorCode(), req->GetErrorMessage());
retval = 1;
} else {
FRT_Values &answer = *(req->GetReturn());
const char *atypes = answer.GetTypeString();
- fprintf(stderr, "vespa-sentinel-cmd '%s' OK.\n", cmd);
- uint32_t idx = 0;
- while (atypes != nullptr && *atypes != '\0') {
- switch (*atypes) {
- case 's':
+ fprintf(stderr, "vespa-sentinel-cmd '%s' OK.\n", cmd.name);
+ if (atypes && (strcmp(atypes, "SS") == 0)) {
+ uint32_t numHosts = answer[0]._string_array._len;
+ uint32_t numStats = answer[1]._string_array._len;
+ FRT_StringValue *hosts = answer[0]._string_array._pt;
+ FRT_StringValue *stats = answer[1]._string_array._pt;
+ uint32_t ml = 0;
+ uint32_t j;
+ for (j = 0; j < numHosts; ++j) {
+ uint32_t hl = strlen(hosts[j]._str);
+ if (hl > ml) ml = hl;
+ }
+ for (j = 0; j < numHosts && j < numStats; ++j) {
+ printf("%-*s -> %s\n", ml, hosts[j]._str, stats[j]._str);
+ }
+ for (; j < numHosts; ++j) {
+ printf("Extra host: %s\n", hosts[j]._str);
+ }
+ for (; j < numStats; ++j) {
+ printf("Extra stat: %s\n", stats[j]._str);
+ }
+ } else {
+ uint32_t idx = 0;
+ while (atypes != nullptr && *atypes != '\0') {
+ switch (*atypes) {
+ case 's':
printf("%s\n", answer[idx]._string._str);
break;
- default:
+ default:
printf("BAD: unknown type %c\n", *atypes);
- }
- ++atypes;
+ }
+ ++atypes;
++idx;
+ }
}
}
req->SubRef();
@@ -108,19 +155,15 @@ Cmd::run(const char *cmd, const char *arg)
return retval;
}
-const char *
+const Method *
parseCmd(const char *arg)
{
- if (strcmp(arg, "list") == 0) {
- return "sentinel.ls";
- } else if (strcmp(arg, "restart") == 0) {
- return "sentinel.service.restart";
- } else if (strcmp(arg, "start") == 0) {
- return "sentinel.service.start";
- } else if (strcmp(arg, "stop") == 0) {
- return "sentinel.service.stop";
+ for (const auto & method : methods) {
+ if (strcmp(arg, method.name) == 0) {
+ return &method;
+ }
}
- return 0;
+ return nullptr;
}
void hookSignals() {
@@ -131,14 +174,15 @@ void hookSignals() {
int main(int argc, char** argv)
{
int retval = 1;
- const char *cmd = 0;
+ const Method *cmd = nullptr;
if (argc > 1) {
cmd = parseCmd(argv[1]);
}
- if (cmd) {
+ const char *extraArg = (argc > 2 ? argv[2] : nullptr);
+ if (cmd && (extraArg || cmd->noArgNeeded)) {
hookSignals();
Cmd runner;
- retval = runner.run(cmd, argc > 2 ? argv[2] : 0);
+ retval = runner.run(*cmd, extraArg);
} else {
usage();
}
diff --git a/configd/src/apps/sentinel/CMakeLists.txt b/configd/src/apps/sentinel/CMakeLists.txt
index e77abc19077..0323df2864f 100644
--- a/configd/src/apps/sentinel/CMakeLists.txt
+++ b/configd/src/apps/sentinel/CMakeLists.txt
@@ -4,13 +4,16 @@ vespa_add_executable(configd_config-sentinel_app
check-completion-handler.cpp
cmdq.cpp
config-owner.cpp
+ connectivity.cpp
env.cpp
line-splitter.cpp
manager.cpp
metrics.cpp
+ model-owner.cpp
output-connection.cpp
outward-check.cpp
peer-check.cpp
+ report-connectivity.cpp
rpchooks.cpp
rpcserver.cpp
sentinel.cpp
diff --git a/configd/src/apps/sentinel/cc-result.h b/configd/src/apps/sentinel/cc-result.h
new file mode 100644
index 00000000000..3468cf4324a
--- /dev/null
+++ b/configd/src/apps/sentinel/cc-result.h
@@ -0,0 +1,9 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace config::sentinel {
+
+enum class CcResult { UNKNOWN, CONN_FAIL, UNREACHABLE_UP, INDIRECT_PING_FAIL, INDIRECT_PING_UNAVAIL, ALL_OK };
+
+}
diff --git a/configd/src/apps/sentinel/config-owner.cpp b/configd/src/apps/sentinel/config-owner.cpp
index d5f06dff76b..89d5796ae79 100644
--- a/configd/src/apps/sentinel/config-owner.cpp
+++ b/configd/src/apps/sentinel/config-owner.cpp
@@ -6,11 +6,11 @@
#include <string>
#include <vespa/log/log.h>
-LOG_SETUP(".config-owner");
+LOG_SETUP(".sentinel.config-owner");
namespace config::sentinel {
-ConfigOwner::ConfigOwner() : _subscriber() {}
+ConfigOwner::ConfigOwner() = default;
ConfigOwner::~ConfigOwner() = default;
@@ -27,7 +27,7 @@ ConfigOwner::doConfigure()
_currGeneration = _subscriber.getGeneration();
const SentinelConfig& config(*_currConfig);
const auto & app = config.application;
- LOG(config, "Sentinel got %zd service elements [tenant(%s), application(%s), instance(%s)] for config generation %zd",
+ LOG(config, "Sentinel got %zd service elements [tenant(%s), application(%s), instance(%s)] for config generation %" PRId64,
config.service.size(), app.tenant.c_str(), app.name.c_str(), app.instance.c_str(), _currGeneration);
}
@@ -42,29 +42,4 @@ ConfigOwner::checkForConfigUpdate() {
return false;
}
-std::unique_ptr<ModelConfig>
-ConfigOwner::fetchModelConfig(std::chrono::milliseconds timeout)
-{
- std::unique_ptr<ModelConfig> modelConfig;
- ConfigSubscriber tempSubscriber;
- try {
- ConfigHandle<ModelConfig>::UP modelHandle =
- tempSubscriber.subscribe<ModelConfig>("admin/model", timeout);
- if (tempSubscriber.nextGenerationNow()) {
- modelConfig = modelHandle->getConfig();
- LOG(config, "Sentinel got model info [version %s] for %zd hosts [config generation %zd",
- modelConfig->vespaVersion.c_str(), modelConfig->hosts.size(),
- tempSubscriber.getGeneration());
- }
- } catch (ConfigTimeoutException & ex) {
- LOG(warning, "Timeout getting model config: %s [skipping connectivity checks]", ex.getMessage().c_str());
- } catch (InvalidConfigException& ex) {
- LOG(warning, "Invalid model config: %s [skipping connectivity checks]", ex.getMessage().c_str());
- } catch (ConfigRuntimeException& ex) {
- LOG(warning, "Runtime exception getting model config: %s [skipping connectivity checks]", ex.getMessage().c_str());
-
- }
- return modelConfig;
-}
-
}
diff --git a/configd/src/apps/sentinel/config-owner.h b/configd/src/apps/sentinel/config-owner.h
index 2850e6b3904..b72aed59271 100644
--- a/configd/src/apps/sentinel/config-owner.h
+++ b/configd/src/apps/sentinel/config-owner.h
@@ -7,10 +7,6 @@
#include <vespa/config/config.h>
using cloud::config::SentinelConfig;
-using cloud::config::ModelConfig;
-
-using config::ConfigSubscriber;
-using config::ConfigHandle;
namespace config::sentinel {
@@ -19,9 +15,9 @@ namespace config::sentinel {
**/
class ConfigOwner {
private:
- ConfigSubscriber _subscriber;
- ConfigHandle<SentinelConfig>::UP _sentinelHandle;
-
+ config::ConfigSubscriber _subscriber;
+ config::ConfigHandle<SentinelConfig>::UP _sentinelHandle;
+
int64_t _currGeneration = -1;
std::unique_ptr<SentinelConfig> _currConfig;
@@ -37,7 +33,6 @@ public:
bool hasConfig() const { return _currConfig.get() != nullptr; }
const SentinelConfig& getConfig() const { return *_currConfig; }
int64_t getGeneration() const { return _currGeneration; }
- static std::unique_ptr<ModelConfig> fetchModelConfig(std::chrono::milliseconds timeout);
};
}
diff --git a/configd/src/apps/sentinel/connectivity.cpp b/configd/src/apps/sentinel/connectivity.cpp
new file mode 100644
index 00000000000..5996d709c5d
--- /dev/null
+++ b/configd/src/apps/sentinel/connectivity.cpp
@@ -0,0 +1,213 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "config-owner.h"
+#include "connectivity.h"
+#include "outward-check.h"
+#include <vespa/defaults.h>
+#include <vespa/log/log.h>
+#include <vespa/vespalib/util/exceptions.h>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <thread>
+#include <chrono>
+
+LOG_SETUP(".sentinel.connectivity");
+
+using vespalib::make_string_short::fmt;
+using namespace std::chrono_literals;
+
+namespace config::sentinel {
+
+Connectivity::Connectivity() = default;
+Connectivity::~Connectivity() = default;
+
+namespace {
+
+std::string toString(CcResult value) {
+ switch (value) {
+ case CcResult::UNKNOWN: return "BAD: missing result"; // very very bad
+ case CcResult::INDIRECT_PING_FAIL: return "connect OK, but reverse check FAILED"; // very bad
+ case CcResult::UNREACHABLE_UP: return "unreachable from me, but up"; // very bad
+ case CcResult::CONN_FAIL: return "failed to connect"; // bad
+ case CcResult::INDIRECT_PING_UNAVAIL: return "connect OK (but reverse check unavailable)"; // unfortunate
+ case CcResult::ALL_OK: return "OK: both ways connectivity verified"; // good
+ }
+ LOG(error, "Unknown CcResult enum value: %d", (int)value);
+ LOG_ABORT("Unknown CcResult enum value");
+}
+
+using ConnectivityMap = std::map<std::string, OutwardCheck>;
+using HostAndPort = Connectivity::HostAndPort;
+using SpecMap = Connectivity::SpecMap;
+
+std::string spec(const SpecMap::value_type &host_and_port) {
+ return fmt("tcp/%s:%d", host_and_port.first.c_str(), host_and_port.second);
+}
+
+void classifyConnFails(ConnectivityMap &connectivityMap,
+ const SpecMap &specMap,
+ RpcServer &rpcServer)
+{
+ std::vector<HostAndPort> failedConnSpecs;
+ std::vector<HostAndPort> goodNeighborSpecs;
+ std::string myHostname = vespa::Defaults::vespaHostname();
+ for (auto & [hostname, check] : connectivityMap) {
+ if (hostname == myHostname) {
+ if (check.result() == CcResult::CONN_FAIL) {
+ check.classifyResult(CcResult::UNREACHABLE_UP);
+ }
+ } else {
+ auto iter = specMap.find(hostname);
+ LOG_ASSERT(iter != specMap.end());
+ if (check.result() == CcResult::ALL_OK) {
+ goodNeighborSpecs.push_back(*iter);
+ }
+ if (check.result() == CcResult::CONN_FAIL) {
+ failedConnSpecs.push_back(*iter);
+ }
+ }
+ }
+ if ((failedConnSpecs.size() == 0) || (goodNeighborSpecs.size() == 0)) {
+ return;
+ }
+ for (const auto & toClassify : failedConnSpecs) {
+ const auto & [ nameToCheck, portToCheck ] = toClassify;
+ auto cmIter = connectivityMap.find(nameToCheck);
+ LOG_ASSERT(cmIter != connectivityMap.end());
+ OutwardCheckContext cornerContext(goodNeighborSpecs.size(), nameToCheck, portToCheck, rpcServer.orb());
+ ConnectivityMap cornerProbes;
+ for (const auto & hp : goodNeighborSpecs) {
+ cornerProbes.try_emplace(hp.first, spec(hp), cornerContext);
+ }
+ cornerContext.latch.await();
+ size_t numReportsUp = 0;
+ size_t numReportsDown = 0;
+ for (const auto & [hostname, probe] : cornerProbes) {
+ if (probe.result() == CcResult::INDIRECT_PING_FAIL) ++numReportsDown;
+ if (probe.result() == CcResult::ALL_OK) ++numReportsUp;
+ }
+ if (numReportsUp > 0) {
+ LOG(debug, "Unreachable: %s is up according to %zd hosts (down according to me + %zd others)",
+ nameToCheck.c_str(), numReportsUp, numReportsDown);
+ OutwardCheckContext reverseContext(1,
+ myHostname,
+ rpcServer.getPort(),
+ rpcServer.orb());
+ OutwardCheck check(spec(toClassify), reverseContext);
+ reverseContext.latch.await();
+ auto secondResult = check.result();
+ if (secondResult == CcResult::CONN_FAIL) {
+ cmIter->second.classifyResult(CcResult::UNREACHABLE_UP);
+ } else {
+ LOG(debug, "Recheck %s gives new result: %s",
+ nameToCheck.c_str(), toString(secondResult).c_str());
+ cmIter->second.classifyResult(secondResult);
+ }
+ }
+ }
+}
+
+} // namespace <unnamed>
+
+SpecMap Connectivity::specsFrom(const ModelConfig &model) {
+ SpecMap checkSpecs;
+ for (const auto & h : model.hosts) {
+ bool foundSentinelPort = false;
+ for (const auto & s : h.services) {
+ if (s.name == "config-sentinel") {
+ for (const auto & p : s.ports) {
+ if (p.tags.find("rpc") != p.tags.npos) {
+ checkSpecs[h.name] = p.number;
+ foundSentinelPort = true;
+ }
+ }
+ }
+ }
+ if (! foundSentinelPort) {
+ LOG(warning, "Did not find 'config-sentinel' RPC port in model for host %s [%zd services]",
+ h.name.c_str(), h.services.size());
+ }
+ }
+ return checkSpecs;
+}
+
+void Connectivity::configure(const SentinelConfig::Connectivity &config,
+ const ModelConfig &model)
+{
+ _config = config;
+ LOG(config, "connectivity.maxBadCount = %d", _config.maxBadCount);
+ LOG(config, "connectivity.minOkPercent = %d", _config.minOkPercent);
+ _checkSpecs = specsFrom(model);
+}
+
+bool
+Connectivity::checkConnectivity(RpcServer &rpcServer) {
+ size_t clusterSize = _checkSpecs.size();
+ if (clusterSize == 0) {
+ LOG(warning, "could not get model config, skipping connectivity checks");
+ return true;
+ }
+ std::string myHostname = vespa::Defaults::vespaHostname();
+ OutwardCheckContext checkContext(clusterSize,
+ myHostname,
+ rpcServer.getPort(),
+ rpcServer.orb());
+ ConnectivityMap connectivityMap;
+ for (const auto &host_and_port : _checkSpecs) {
+ connectivityMap.try_emplace(host_and_port.first, spec(host_and_port), checkContext);
+ }
+ checkContext.latch.await();
+ classifyConnFails(connectivityMap, _checkSpecs, rpcServer);
+ Accumulator accumulated;
+ for (const auto & [hostname, check] : connectivityMap) {
+ std::string detail = toString(check.result());
+ std::string prev = _detailsPerHost[hostname];
+ if (prev != detail) {
+ LOG(info, "Connectivity check details: %s -> %s", hostname.c_str(), detail.c_str());
+ }
+ _detailsPerHost[hostname] = detail;
+ LOG_ASSERT(check.result() != CcResult::UNKNOWN);
+ accumulated.handleResult(check.result());
+ }
+ return accumulated.enoughOk(_config);
+}
+
+void Connectivity::Accumulator::handleResult(CcResult value) {
+ ++_numHandled;
+ switch (value) {
+ case CcResult::UNKNOWN:
+ case CcResult::UNREACHABLE_UP:
+ case CcResult::INDIRECT_PING_FAIL:
+ ++_numBad;
+ break;
+ case CcResult::CONN_FAIL:
+ // not OK, but not a serious issue either
+ break;
+ case CcResult::INDIRECT_PING_UNAVAIL:
+ case CcResult::ALL_OK:
+ ++_numOk;
+ break;
+ }
+}
+
+bool Connectivity::Accumulator::enoughOk(const SentinelConfig::Connectivity &config) const {
+ bool enough = true;
+ if (_numBad > size_t(config.maxBadCount)) {
+ LOG(warning, "%zu of %zu nodes up but with network connectivity problems (max is %d)",
+ _numBad, _numHandled, config.maxBadCount);
+ enough = false;
+ }
+ if (_numOk * 100.0 < config.minOkPercent * _numHandled) {
+ double pct = _numOk * 100.0 / _numHandled;
+ LOG(warning, "Only %zu of %zu nodes are up and OK, %.1f%% (min is %d%%)",
+ _numOk, _numHandled, pct, config.minOkPercent);
+ enough = false;
+ }
+ if (_numOk == _numHandled) {
+ LOG(info, "All connectivity checks OK, proceeding with service startup");
+ } else if (enough) {
+ LOG(info, "Enough connectivity checks OK, proceeding with service startup");
+ }
+ return enough;
+}
+
+}
diff --git a/configd/src/apps/sentinel/connectivity.h b/configd/src/apps/sentinel/connectivity.h
new file mode 100644
index 00000000000..8d923387ffa
--- /dev/null
+++ b/configd/src/apps/sentinel/connectivity.h
@@ -0,0 +1,46 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "rpcserver.h"
+#include "cc-result.h"
+#include <vespa/config-sentinel.h>
+#include <vespa/config-model.h>
+#include <string>
+#include <map>
+
+using cloud::config::SentinelConfig;
+using cloud::config::ModelConfig;
+
+namespace config::sentinel {
+
+/**
+ * Utility class for running connectivity check.
+ **/
+class Connectivity {
+public:
+ using SpecMap = std::map<std::string, int>;
+ using HostAndPort = SpecMap::value_type;
+
+ Connectivity();
+ ~Connectivity();
+ void configure(const SentinelConfig::Connectivity &config,
+ const ModelConfig &model);
+ bool checkConnectivity(RpcServer &rpcServer);
+ static SpecMap specsFrom(const ModelConfig &model);
+private:
+ class Accumulator {
+ private:
+ size_t _numOk = 0;
+ size_t _numBad = 0;
+ size_t _numHandled = 0;
+ public:
+ void handleResult(CcResult value);
+ bool enoughOk(const SentinelConfig::Connectivity &config) const;
+ };
+ SentinelConfig::Connectivity _config;
+ SpecMap _checkSpecs;
+ std::map<std::string, std::string> _detailsPerHost;
+};
+
+}
diff --git a/configd/src/apps/sentinel/env.cpp b/configd/src/apps/sentinel/env.cpp
index e4174ee450d..c345de1df36 100644
--- a/configd/src/apps/sentinel/env.cpp
+++ b/configd/src/apps/sentinel/env.cpp
@@ -2,27 +2,41 @@
#include "env.h"
#include "check-completion-handler.h"
-#include "outward-check.h"
+#include "connectivity.h"
#include <vespa/defaults.h>
#include <vespa/log/log.h>
#include <vespa/config/common/exceptions.h>
#include <vespa/vespalib/util/exceptions.h>
+#include <vespa/vespalib/util/signalhandler.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <thread>
#include <chrono>
-LOG_SETUP(".env");
+LOG_SETUP(".sentinel.env");
using vespalib::make_string_short::fmt;
using namespace std::chrono_literals;
namespace config::sentinel {
+namespace {
+
+void maybeStopNow() {
+ if (vespalib::SignalHandler::INT.check() ||
+ vespalib::SignalHandler::TERM.check())
+ {
+ throw vespalib::FatalException("got signal during boot()");
+ }
+}
+
constexpr std::chrono::milliseconds CONFIG_TIMEOUT_MS = 3min;
-constexpr std::chrono::milliseconds MODEL_TIMEOUT_MS = 1500ms;
+constexpr int maxConnectivityRetries = 100;
+
+} // namespace <unnamed>
Env::Env()
: _cfgOwner(),
+ _modelOwner("admin/model"),
_rpcCommandQueue(),
_rpcServer(),
_stateApi(),
@@ -31,6 +45,7 @@ Env::Env()
_statePort(0)
{
_startMetrics.startedTime = vespalib::steady_clock::now();
+ _stateApi.myHealth.setFailed("initializing...");
}
Env::~Env() = default;
@@ -38,17 +53,41 @@ Env::~Env() = default;
void Env::boot(const std::string &configId) {
LOG(debug, "Reading configuration for ID: %s", configId.c_str());
_cfgOwner.subscribe(configId, CONFIG_TIMEOUT_MS);
- bool ok = _cfgOwner.checkForConfigUpdate();
+ _modelOwner.start(CONFIG_TIMEOUT_MS, true);
// subscribe() should throw if something is not OK
- LOG_ASSERT(ok && _cfgOwner.hasConfig());
- const auto & cfg = _cfgOwner.getConfig();
- LOG(config, "Booting sentinel '%s' with [stateserver port %d] and [rpc port %d]",
- configId.c_str(), cfg.port.telnet, cfg.port.rpc);
- rpcPort(cfg.port.rpc);
- statePort(cfg.port.telnet);
- if (auto up = ConfigOwner::fetchModelConfig(MODEL_TIMEOUT_MS)) {
- waitForConnectivity(*up);
+ Connectivity checker;
+ for (int retry = 0; retry < maxConnectivityRetries; ++retry) {
+ bool changed = _cfgOwner.checkForConfigUpdate();
+ LOG_ASSERT(changed || retry > 0);
+ if (changed) {
+ LOG_ASSERT(_cfgOwner.hasConfig());
+ const auto & cfg = _cfgOwner.getConfig();
+ LOG(config, "Booting sentinel '%s' with [stateserver port %d] and [rpc port %d]",
+ configId.c_str(), cfg.port.telnet, cfg.port.rpc);
+ rpcPort(cfg.port.rpc);
+ statePort(cfg.port.telnet);
+ _modelOwner.checkForUpdates();
+ auto model = _modelOwner.getModelConfig();
+ if (model.has_value()) {
+ checker.configure(cfg.connectivity, model.value());
+ }
+ }
+ if (checker.checkConnectivity(*_rpcServer)) {
+ _stateApi.myHealth.setOk();
+ return;
+ } else {
+ _stateApi.myHealth.setFailed("FAILED connectivity check");
+ if ((retry % 10) == 0) {
+ LOG(warning, "Bad network connectivity (try %d)", 1+retry);
+ }
+ for (int i = 0; i < 5; ++i) {
+ respondAsEmpty();
+ maybeStopNow();
+ std::this_thread::sleep_for(600ms);
+ }
+ }
}
+ throw vespalib::FatalException("Giving up - too many connectivity check failures");
}
void Env::rpcPort(int port) {
@@ -61,7 +100,7 @@ void Env::rpcPort(int port) {
if (_rpcServer && port == _rpcServer->getPort()) {
return; // ok already
}
- _rpcServer = std::make_unique<RpcServer>(port, _rpcCommandQueue);
+ _rpcServer = std::make_unique<RpcServer>(port, _rpcCommandQueue, _modelOwner);
}
void Env::statePort(int port) {
@@ -83,7 +122,6 @@ void Env::statePort(int port) {
void Env::notifyConfigUpdated() {
vespalib::ComponentConfigProducer::Config current("sentinel", _cfgOwner.getGeneration(), "ok");
_stateApi.myComponents.addConfig(current);
-
}
void Env::respondAsEmpty() {
@@ -93,60 +131,4 @@ void Env::respondAsEmpty() {
}
}
-namespace {
-
-const char *toString(CcResult value) {
- switch (value) {
- case CcResult::UNKNOWN: return "unknown";
- case CcResult::CONN_FAIL: return "failed to connect";
- case CcResult::REVERSE_FAIL: return "connect OK, but reverse check FAILED";
- case CcResult::REVERSE_UNAVAIL: return "connect OK, but reverse check unavailable";
- case CcResult::ALL_OK: return "both ways connectivity OK";
- }
- LOG(error, "Unknown CcResult enum value: %d", (int)value);
- LOG_ABORT("Unknown CcResult enum value");
-}
-
-std::map<std::string, std::string> specsFrom(const ModelConfig &model) {
- std::map<std::string, std::string> checkSpecs;
- for (const auto & h : model.hosts) {
- bool foundSentinelPort = false;
- for (const auto & s : h.services) {
- if (s.name == "config-sentinel") {
- for (const auto & p : s.ports) {
- if (p.tags.find("rpc") != p.tags.npos) {
- auto spec = fmt("tcp/%s:%d", h.name.c_str(), p.number);
- checkSpecs[h.name] = spec;
- foundSentinelPort = true;
- }
- }
- }
- }
- if (! foundSentinelPort) {
- LOG(warning, "Did not find 'config-sentinel' RPC port in model for host %s [%zd services]",
- h.name.c_str(), h.services.size());
- }
- }
- return checkSpecs;
-}
-
-}
-
-void Env::waitForConnectivity(const ModelConfig &model) {
- auto checkSpecs = specsFrom(model);
- OutwardCheckContext checkContext(checkSpecs.size(),
- vespa::Defaults::vespaHostname(),
- _rpcServer->getPort(),
- _rpcServer->orb());
- std::map<std::string, OutwardCheck> connectivityMap;
- for (const auto & [ hn, spec ] : checkSpecs) {
- connectivityMap.try_emplace(hn, spec, checkContext);
- }
- checkContext.latch.await();
- for (const auto & [hostname, check] : connectivityMap) {
- LOG(info, "outward check status for host %s is: %s",
- hostname.c_str(), toString(check.result()));
- }
-}
-
}
diff --git a/configd/src/apps/sentinel/env.h b/configd/src/apps/sentinel/env.h
index f117854f006..1bd3a7380ba 100644
--- a/configd/src/apps/sentinel/env.h
+++ b/configd/src/apps/sentinel/env.h
@@ -5,6 +5,7 @@
#include "cmdq.h"
#include "config-owner.h"
#include "metrics.h"
+#include "model-owner.h"
#include "rpcserver.h"
#include "state-api.h"
#include <vespa/vespalib/net/state_server.h>
@@ -22,6 +23,7 @@ public:
~Env();
ConfigOwner &configOwner() { return _cfgOwner; }
+ ModelOwner &modelOwner() { return _modelOwner; }
CommandQueue &commandQueue() { return _rpcCommandQueue; }
StartMetrics &metrics() { return _startMetrics; }
@@ -32,8 +34,8 @@ public:
void notifyConfigUpdated();
private:
void respondAsEmpty();
- void waitForConnectivity(const ModelConfig &model);
ConfigOwner _cfgOwner;
+ ModelOwner _modelOwner;
CommandQueue _rpcCommandQueue;
std::unique_ptr<RpcServer> _rpcServer;
StateApi _stateApi;
diff --git a/configd/src/apps/sentinel/manager.cpp b/configd/src/apps/sentinel/manager.cpp
index 6e0ed78211c..839f7c96ae2 100644
--- a/configd/src/apps/sentinel/manager.cpp
+++ b/configd/src/apps/sentinel/manager.cpp
@@ -11,7 +11,7 @@
#include <sys/wait.h>
#include <vespa/log/log.h>
-LOG_SETUP(".manager");
+LOG_SETUP(".sentinel.manager");
namespace config::sentinel {
@@ -116,6 +116,7 @@ Manager::doWork()
if (_env.configOwner().checkForConfigUpdate()) {
doConfigure();
}
+ _env.modelOwner().checkForUpdates();
handleRestarts();
handleCommands();
handleOutputs();
diff --git a/configd/src/apps/sentinel/model-owner.cpp b/configd/src/apps/sentinel/model-owner.cpp
new file mode 100644
index 00000000000..cfa9f1f6bf5
--- /dev/null
+++ b/configd/src/apps/sentinel/model-owner.cpp
@@ -0,0 +1,66 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "model-owner.h"
+#include <vespa/vespalib/util/exceptions.h>
+#include <vespa/config/common/exceptions.h>
+#include <string>
+#include <chrono>
+#include <vespa/log/log.h>
+
+LOG_SETUP(".sentinel.model-owner");
+
+using namespace std::chrono_literals;
+
+namespace config::sentinel {
+
+std::optional<ModelConfig> ModelOwner::getModelConfig() {
+ std::lock_guard<std::mutex> guard(_lock);
+ if (_modelConfig) {
+ return ModelConfig(*_modelConfig);
+ } else {
+ return {};
+ }
+}
+
+
+ModelOwner::ModelOwner(const std::string &configId)
+ : _configId(configId)
+{}
+
+ModelOwner::~ModelOwner() = default;
+
+void
+ModelOwner::start(std::chrono::milliseconds timeout, bool firstTime) {
+ try {
+ _modelHandle =_subscriber.subscribe<ModelConfig>(_configId, timeout);
+ } catch (ConfigTimeoutException & ex) {
+ if (firstTime) {
+ LOG(warning, "Timeout getting model config: %s [skipping connectivity checks]", ex.message());
+ }
+ } catch (InvalidConfigException& ex) {
+ if (firstTime) {
+ LOG(warning, "Invalid model config: %s [skipping connectivity checks]", ex.message());
+ }
+ } catch (ConfigRuntimeException& ex) {
+ if (firstTime) {
+ LOG(warning, "Runtime exception getting model config: %s [skipping connectivity checks]", ex.message());
+ }
+ }
+}
+
+void
+ModelOwner::checkForUpdates() {
+ if (! _modelHandle) {
+ start(250ms, false);
+ }
+ if (_modelHandle && _subscriber.nextGenerationNow()) {
+ if (auto newModel = _modelHandle->getConfig()) {
+ LOG(config, "Sentinel got model info [version %s] for %zd hosts [config generation %" PRId64 "]",
+ newModel->vespaVersion.c_str(), newModel->hosts.size(), _subscriber.getGeneration());
+ std::lock_guard<std::mutex> guard(_lock);
+ _modelConfig = std::move(newModel);
+ }
+ }
+}
+
+}
diff --git a/configd/src/apps/sentinel/model-owner.h b/configd/src/apps/sentinel/model-owner.h
new file mode 100644
index 00000000000..0513463e955
--- /dev/null
+++ b/configd/src/apps/sentinel/model-owner.h
@@ -0,0 +1,32 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/config-model.h>
+#include <vespa/config/config.h>
+#include <optional>
+#include <mutex>
+
+using cloud::config::ModelConfig;
+
+namespace config::sentinel {
+
+/**
+ * Handles config subscription and has a snapshot of current config.
+ **/
+class ModelOwner {
+private:
+ std::string _configId;
+ config::ConfigSubscriber _subscriber;
+ config::ConfigHandle<ModelConfig>::UP _modelHandle;
+ std::mutex _lock;
+ std::unique_ptr<ModelConfig> _modelConfig;
+public:
+ ModelOwner(const std::string &configId);
+ ~ModelOwner();
+ void start(std::chrono::milliseconds timeout, bool firstTime);
+ void checkForUpdates();
+ std::optional<ModelConfig> getModelConfig();
+};
+
+}
diff --git a/configd/src/apps/sentinel/output-connection.cpp b/configd/src/apps/sentinel/output-connection.cpp
index 5dbe1c22f58..caf97c92eea 100644
--- a/configd/src/apps/sentinel/output-connection.cpp
+++ b/configd/src/apps/sentinel/output-connection.cpp
@@ -5,7 +5,7 @@
#include <cstring>
#include <vespa/log/log.h>
-LOG_SETUP("");
+LOG_SETUP(".sentinel.output-connection");
#include <vespa/log/llparser.h>
#include "output-connection.h"
diff --git a/configd/src/apps/sentinel/outward-check.cpp b/configd/src/apps/sentinel/outward-check.cpp
index 5fed69d0b6e..391e5fee8bf 100644
--- a/configd/src/apps/sentinel/outward-check.cpp
+++ b/configd/src/apps/sentinel/outward-check.cpp
@@ -3,10 +3,12 @@
#include "outward-check.h"
#include <vespa/log/log.h>
-LOG_SETUP(".outward-check");
+LOG_SETUP(".sentinel.outward-check");
namespace config::sentinel {
+OutwardCheckContext::~OutwardCheckContext() = default;
+
OutwardCheck::OutwardCheck(const std::string &spec, OutwardCheckContext &context)
: _spec(spec),
_context(context)
@@ -14,8 +16,8 @@ OutwardCheck::OutwardCheck(const std::string &spec, OutwardCheckContext &context
_target = context.orb.GetTarget(spec.c_str());
_req = context.orb.AllocRPCRequest();
_req->SetMethodName("sentinel.check.connectivity");
- _req->GetParams()->AddString(context.myHostname);
- _req->GetParams()->AddInt32(context.myPortnum);
+ _req->GetParams()->AddString(context.targetHostname.c_str());
+ _req->GetParams()->AddInt32(context.targetPortnum);
_req->GetParams()->AddInt32(500);
_target->InvokeAsync(_req, 1.500, this);
}
@@ -29,17 +31,21 @@ void OutwardCheck::RequestDone(FRT_RPCRequest *req) {
if (answer == "ok") {
LOG(debug, "ping to %s with reverse connectivity OK", _spec.c_str());
_result = CcResult::ALL_OK;
- } else {
+ } else if (answer == "bad") {
LOG(debug, "connected to %s, but reverse connectivity fails: %s",
_spec.c_str(), answer.c_str());
- _result = CcResult::REVERSE_FAIL;
+ _result = CcResult::INDIRECT_PING_FAIL;
+ } else {
+ LOG(warning, "connected to %s, but strange reverse connectivity: %s",
+ _spec.c_str(), answer.c_str());
+ _result = CcResult::INDIRECT_PING_UNAVAIL;
}
} else if (req->GetErrorCode() == FRTE_RPC_NO_SUCH_METHOD ||
req->GetErrorCode() == FRTE_RPC_WRONG_PARAMS ||
req->GetErrorCode() == FRTE_RPC_WRONG_RETURN)
{
LOG(debug, "Connected OK to %s but no reverse connectivity check available", _spec.c_str());
- _result = CcResult::REVERSE_UNAVAIL;
+ _result = CcResult::INDIRECT_PING_UNAVAIL;
} else {
LOG(debug, "error on request to %s : %s (%d)", _spec.c_str(),
req->GetErrorMessage(), req->GetErrorCode());
@@ -52,4 +58,9 @@ void OutwardCheck::RequestDone(FRT_RPCRequest *req) {
_context.latch.countDown();
}
+void OutwardCheck::classifyResult(CcResult value) {
+ LOG_ASSERT(_result == CcResult::CONN_FAIL);
+ _result = value;
+}
+
}
diff --git a/configd/src/apps/sentinel/outward-check.h b/configd/src/apps/sentinel/outward-check.h
index 01a298aee18..0e53b9010dc 100644
--- a/configd/src/apps/sentinel/outward-check.h
+++ b/configd/src/apps/sentinel/outward-check.h
@@ -1,5 +1,8 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include "cc-result.h"
#include <string>
#include <vespa/vespalib/util/count_down_latch.h>
#include <vespa/fnet/frt/supervisor.h>
@@ -12,22 +15,21 @@ namespace config::sentinel {
struct OutwardCheckContext {
vespalib::CountDownLatch latch;
- const char * myHostname;
- int myPortnum;
+ std::string targetHostname;
+ int targetPortnum;
FRT_Supervisor &orb;
OutwardCheckContext(size_t count,
- const char * hostname,
+ const std::string &hostname,
int portnumber,
FRT_Supervisor &supervisor)
: latch(count),
- myHostname(hostname),
- myPortnum(portnumber),
+ targetHostname(hostname),
+ targetPortnum(portnumber),
orb(supervisor)
{}
+ ~OutwardCheckContext();
};
-enum class CcResult { UNKNOWN, CONN_FAIL, REVERSE_FAIL, REVERSE_UNAVAIL, ALL_OK };
-
class OutwardCheck : public FRT_IRequestWait {
private:
CcResult _result = CcResult::UNKNOWN;
@@ -41,6 +43,7 @@ public:
void RequestDone(FRT_RPCRequest *req) override;
bool ok() const { return _result == CcResult::ALL_OK; }
CcResult result() const { return _result; }
+ void classifyResult(CcResult value);
};
}
diff --git a/configd/src/apps/sentinel/peer-check.cpp b/configd/src/apps/sentinel/peer-check.cpp
index 60c3d9c96c9..b8060742355 100644
--- a/configd/src/apps/sentinel/peer-check.cpp
+++ b/configd/src/apps/sentinel/peer-check.cpp
@@ -4,7 +4,7 @@
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/log/log.h>
-LOG_SETUP(".peer-check");
+LOG_SETUP(".sentinel.peer-check");
using vespalib::make_string_short::fmt;
@@ -15,7 +15,8 @@ PeerCheck::PeerCheck(StatusCallback &callback, const std::string &host, int port
_hostname(host),
_portnum(port),
_target(nullptr),
- _req(nullptr)
+ _req(nullptr),
+ _statusOk(false)
{
auto spec = fmt("tcp/%s:%d", _hostname.c_str(), _portnum);
_target = orb.GetTarget(spec.c_str());
@@ -31,20 +32,19 @@ PeerCheck::~PeerCheck() {
void PeerCheck::RequestDone(FRT_RPCRequest *req) {
LOG_ASSERT(req == _req);
- bool statusOk = false;
if (req->IsError()) {
- LOG(warning, "error on ping to %s [port %d]: %s (%d)", _hostname.c_str(), _portnum,
+ LOG(debug, "error on ping to %s [port %d]: %s (%d)", _hostname.c_str(), _portnum,
req->GetErrorMessage(), req->GetErrorCode());
} else {
LOG(debug, "OK ping to %s [port %d]", _hostname.c_str(), _portnum);
- statusOk = true;
+ _statusOk = true;
}
_req->SubRef();
_req = nullptr;
_target->SubRef();
_target = nullptr;
// Note: will delete this object, so must be called as final step:
- _callback.returnStatus(statusOk);
+ _callback.returnStatus(_statusOk);
}
}
diff --git a/configd/src/apps/sentinel/peer-check.h b/configd/src/apps/sentinel/peer-check.h
index 096f304467b..ac124106387 100644
--- a/configd/src/apps/sentinel/peer-check.h
+++ b/configd/src/apps/sentinel/peer-check.h
@@ -17,6 +17,9 @@ public:
PeerCheck(StatusCallback &callback, const std::string &host, int portnum, FRT_Supervisor &orb, int timeout_ms);
~PeerCheck();
+ bool okStatus() const { return _statusOk; }
+ const std::string& getHostname() const { return _hostname; }
+
PeerCheck(const PeerCheck &) = delete;
PeerCheck(PeerCheck &&) = delete;
PeerCheck& operator= (const PeerCheck &) = delete;
@@ -30,6 +33,7 @@ private:
int _portnum;
FRT_Target *_target;
FRT_RPCRequest *_req;
+ bool _statusOk;
};
}
diff --git a/configd/src/apps/sentinel/report-connectivity.cpp b/configd/src/apps/sentinel/report-connectivity.cpp
new file mode 100644
index 00000000000..1ea7365aa3f
--- /dev/null
+++ b/configd/src/apps/sentinel/report-connectivity.cpp
@@ -0,0 +1,53 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "report-connectivity.h"
+#include "connectivity.h"
+#include <vespa/config/common/exceptions.h>
+#include <vespa/log/log.h>
+#include <chrono>
+
+LOG_SETUP(".sentinel.report-connectivity");
+
+using cloud::config::ModelConfig;
+using namespace std::chrono_literals;
+
+namespace config::sentinel {
+
+ReportConnectivity::ReportConnectivity(FRT_RPCRequest *req, int timeout_ms, FRT_Supervisor &orb, ModelOwner &modelOwner)
+ : _parentRequest(req),
+ _checks()
+{
+ auto cfg = modelOwner.getModelConfig();
+ if (cfg.has_value()) {
+ auto map = Connectivity::specsFrom(cfg.value());
+ LOG(debug, "making connectivity report for %zd peers", map.size());
+ _remaining = map.size();
+ for (const auto & [ hostname, port ] : map) {
+ _checks.emplace_back(std::make_unique<PeerCheck>(*this, hostname, port, orb, timeout_ms));
+ }
+ } else {
+ _parentRequest->SetError(FRTE_RPC_METHOD_FAILED, "failed getting model config");
+ _parentRequest->Return();
+ }
+}
+
+ReportConnectivity::~ReportConnectivity() = default;
+
+void ReportConnectivity::returnStatus(bool) {
+ if (--_remaining == 0) {
+ finish();
+ }
+}
+
+void ReportConnectivity::finish() const {
+ FRT_Values *dst = _parentRequest->GetReturn();
+ FRT_StringValue *pt_hn = dst->AddStringArray(_checks.size());
+ FRT_StringValue *pt_ss = dst->AddStringArray(_checks.size());
+ for (const auto & peer : _checks) {
+ dst->SetString(pt_hn++, peer->getHostname().c_str());
+ dst->SetString(pt_ss++, peer->okStatus() ? "ok" : "ping failed");
+ }
+ _parentRequest->Return();
+}
+
+}
diff --git a/configd/src/apps/sentinel/report-connectivity.h b/configd/src/apps/sentinel/report-connectivity.h
new file mode 100644
index 00000000000..1f243b73028
--- /dev/null
+++ b/configd/src/apps/sentinel/report-connectivity.h
@@ -0,0 +1,33 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/fnet/frt/rpcrequest.h>
+#include <vespa/fnet/frt/supervisor.h>
+#include <vespa/config-model.h>
+#include <vespa/config/helper/configfetcher.h>
+#include "model-owner.h"
+#include "peer-check.h"
+#include "status-callback.h"
+
+#include <atomic>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace config::sentinel {
+
+class ReportConnectivity : public StatusCallback
+{
+public:
+ ReportConnectivity(FRT_RPCRequest *req, int timeout_ms, FRT_Supervisor &orb, ModelOwner &modelOwner);
+ virtual ~ReportConnectivity();
+ void returnStatus(bool ok) override;
+private:
+ void finish() const;
+ FRT_RPCRequest *_parentRequest;
+ std::vector<std::unique_ptr<PeerCheck>> _checks;
+ std::atomic<size_t> _remaining;
+};
+
+}
diff --git a/configd/src/apps/sentinel/rpchooks.cpp b/configd/src/apps/sentinel/rpchooks.cpp
index 24e3cd53509..3e5509bc8c3 100644
--- a/configd/src/apps/sentinel/rpchooks.cpp
+++ b/configd/src/apps/sentinel/rpchooks.cpp
@@ -1,20 +1,22 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "rpchooks.h"
-#include "cmdq.h"
#include "check-completion-handler.h"
+#include "cmdq.h"
#include "peer-check.h"
+#include "report-connectivity.h"
#include <vespa/fnet/frt/supervisor.h>
#include <vespa/fnet/frt/rpcrequest.h>
#include <vespa/log/log.h>
-LOG_SETUP(".rpchooks");
+LOG_SETUP(".sentinel.rpchooks");
namespace config::sentinel {
-RPCHooks::RPCHooks(CommandQueue &commands, FRT_Supervisor &supervisor)
+RPCHooks::RPCHooks(CommandQueue &commands, FRT_Supervisor &supervisor, ModelOwner &modelOwner)
: _commands(commands),
- _orb(supervisor)
+ _orb(supervisor),
+ _modelOwner(modelOwner)
{
initRPC(&_orb);
}
@@ -53,6 +55,13 @@ RPCHooks::initRPC(FRT_Supervisor *supervisor)
rb.ParamDesc("timeout", "Timeout for check in milliseconds");
rb.ReturnDesc("status", "Status (ok, bad, or unknown) for peer");
//-------------------------------------------------------------------------
+ rb.DefineMethod("sentinel.report.connectivity", "i", "SS",
+ FRT_METHOD(RPCHooks::rpc_reportConnectivity), this);
+ rb.MethodDesc("report connectivity for peer sentinels");
+ rb.ParamDesc("timeout", "Timeout for check in milliseconds");
+ rb.ReturnDesc("hostnames", "Names of peers checked");
+ rb.ReturnDesc("peerstatus", "Status description for each peer");
+ //-------------------------------------------------------------------------
}
void
@@ -106,4 +115,14 @@ RPCHooks::rpc_checkConnectivity(FRT_RPCRequest *req)
req->getStash().create<PeerCheck>(completionHandler, hostname, portnum, _orb, timeout);
}
+void
+RPCHooks::rpc_reportConnectivity(FRT_RPCRequest *req)
+{
+ LOG(debug, "got reportConnectivity");
+ FRT_Values &args = *req->GetParams();
+ int timeout = args[0]._intval32;
+ req->Detach();
+ req->getStash().create<ReportConnectivity>(req, timeout, _orb, _modelOwner);
+}
+
} // namespace slobrok
diff --git a/configd/src/apps/sentinel/rpchooks.h b/configd/src/apps/sentinel/rpchooks.h
index 67f5804dcf7..292e8198b55 100644
--- a/configd/src/apps/sentinel/rpchooks.h
+++ b/configd/src/apps/sentinel/rpchooks.h
@@ -2,6 +2,7 @@
#pragma once
+#include "model-owner.h"
#include <vespa/fnet/frt/invokable.h>
#include <memory>
@@ -25,8 +26,9 @@ class RPCHooks : public FRT_Invokable
private:
CommandQueue &_commands;
FRT_Supervisor &_orb;
+ ModelOwner &_modelOwner;
public:
- RPCHooks(CommandQueue &commands, FRT_Supervisor &supervisor);
+ RPCHooks(CommandQueue &commands, FRT_Supervisor &supervisor, ModelOwner &modelOwner);
~RPCHooks() override;
private:
void initRPC(FRT_Supervisor *supervisor);
@@ -36,6 +38,7 @@ private:
void rpc_stopService(FRT_RPCRequest *req);
void rpc_startService(FRT_RPCRequest *req);
void rpc_checkConnectivity(FRT_RPCRequest *req);
+ void rpc_reportConnectivity(FRT_RPCRequest *req);
};
} // namespace config::sentinel
diff --git a/configd/src/apps/sentinel/rpcserver.cpp b/configd/src/apps/sentinel/rpcserver.cpp
index 80c3c81c826..6c0de35a9e2 100644
--- a/configd/src/apps/sentinel/rpcserver.cpp
+++ b/configd/src/apps/sentinel/rpcserver.cpp
@@ -3,13 +3,13 @@
#include "rpcserver.h"
#include <vespa/log/log.h>
-LOG_SETUP(".rpcserver");
+LOG_SETUP(".sentinel.rpcserver");
namespace config::sentinel {
-RpcServer::RpcServer(int portNumber, CommandQueue &cmdQ)
+RpcServer::RpcServer(int portNumber, CommandQueue &cmdQ, ModelOwner &modelOwner)
: _server(),
- _rpcHooks(cmdQ, _server.supervisor()),
+ _rpcHooks(cmdQ, _server.supervisor(), modelOwner),
_port(portNumber)
{
if (_server.supervisor().Listen(portNumber)) {
diff --git a/configd/src/apps/sentinel/rpcserver.h b/configd/src/apps/sentinel/rpcserver.h
index 4c6dea00ddf..8f60acce1ca 100644
--- a/configd/src/apps/sentinel/rpcserver.h
+++ b/configd/src/apps/sentinel/rpcserver.h
@@ -5,6 +5,7 @@
#include <memory>
#include "cmdq.h"
+#include "model-owner.h"
#include "rpchooks.h"
#include <vespa/fnet/frt/supervisor.h>
@@ -18,7 +19,7 @@ private:
int _port;
public:
- RpcServer(int port, CommandQueue &cmdQ);
+ RpcServer(int port, CommandQueue &cmdQ, ModelOwner &modelOwner);
~RpcServer();
int getPort() const { return _port; }
diff --git a/configd/src/apps/sentinel/sentinel.cpp b/configd/src/apps/sentinel/sentinel.cpp
index 18d4dc28f8a..32f4708188c 100644
--- a/configd/src/apps/sentinel/sentinel.cpp
+++ b/configd/src/apps/sentinel/sentinel.cpp
@@ -11,12 +11,10 @@
#include <sys/time.h>
#include <vespa/log/log.h>
-LOG_SETUP("config-sentinel");
+LOG_SETUP("sentinel.config-sentinel");
using namespace config;
-constexpr std::chrono::milliseconds CONFIG_TIMEOUT_MS(3 * 60 * 1000);
-
static bool stop()
{
return (vespalib::SignalHandler::INT.check() ||
@@ -65,16 +63,20 @@ main(int argc, char **argv)
LOG(debug, "Reading configuration");
try {
environment.boot(configId);
+ } catch (vespalib::FatalException& ex) {
+ LOG(error, "Stopping before boot complete: %s", ex.message());
+ EV_STOPPING("config-sentinel", ex.message());
+ return EXIT_FAILURE;
} catch (ConfigTimeoutException & ex) {
- LOG(warning, "Timeout getting config, please check your setup. Will exit and restart: %s", ex.getMessage().c_str());
- EV_STOPPING("config-sentinel", ex.what());
+ LOG(warning, "Timeout getting config, please check your setup. Will exit and restart: %s", ex.message());
+ EV_STOPPING("config-sentinel", ex.message());
return EXIT_FAILURE;
} catch (InvalidConfigException& ex) {
- LOG(error, "Fatal: Invalid configuration, please check your setup: %s", ex.getMessage().c_str());
- EV_STOPPING("config-sentinel", ex.what());
+ LOG(error, "Fatal: Invalid configuration, please check your setup: %s", ex.message());
+ EV_STOPPING("config-sentinel", ex.message());
return EXIT_FAILURE;
} catch (ConfigRuntimeException& ex) {
- LOG(error, "Fatal: Could not get config, please check your setup: %s", ex.getMessage().c_str());
+ LOG(error, "Fatal: Could not get config, please check your setup: %s", ex.message());
EV_STOPPING("config-sentinel", ex.what());
return EXIT_FAILURE;
}
@@ -86,13 +88,13 @@ main(int argc, char **argv)
vespalib::SignalHandler::CHLD.clear();
manager.doWork(); // Check for child procs & commands
} catch (InvalidConfigException& ex) {
- LOG(warning, "Configuration problem: (ignoring): %s", ex.what());
+ LOG(warning, "Configuration problem: (ignoring): %s", ex.message());
} catch (vespalib::PortListenException& ex) {
- LOG(error, "Fatal: %s", ex.getMessage().c_str());
+ LOG(error, "Fatal: %s", ex.message());
EV_STOPPING("config-sentinel", ex.what());
return EXIT_FAILURE;
} catch (vespalib::FatalException& ex) {
- LOG(error, "Fatal: %s", ex.getMessage().c_str());
+ LOG(error, "Fatal: %s", ex.message());
EV_STOPPING("config-sentinel", ex.what());
return EXIT_FAILURE;
}
diff --git a/configd/src/apps/sentinel/service.cpp b/configd/src/apps/sentinel/service.cpp
index 9c78894f1a7..d1dab4bb26f 100644
--- a/configd/src/apps/sentinel/service.cpp
+++ b/configd/src/apps/sentinel/service.cpp
@@ -12,7 +12,7 @@
#include <sys/wait.h>
#include <vespa/log/log.h>
-LOG_SETUP(".service");
+LOG_SETUP(".sentinel.service");
#include <vespa/log/llparser.h>
static bool stop()
diff --git a/configdefinitions/src/vespa/dispatch.def b/configdefinitions/src/vespa/dispatch.def
index aa40c317d75..150fe2e1603 100644
--- a/configdefinitions/src/vespa/dispatch.def
+++ b/configdefinitions/src/vespa/dispatch.def
@@ -7,10 +7,10 @@ namespace=vespa.config.search
# for that group to be included in queries
minActivedocsPercentage double default=97.0
-# Minimum coverage for allowing a group to be considered for serving
+# Not used. TODO: Remove on Vespa 8
minGroupCoverage double default=100
-# Maximum number of nodes allowed to be down for group to be considered for serving
+# Not used. TODO: Remove on Vespa 8
maxNodesDownPerGroup int default=0
# Distribution policy for group selection
diff --git a/configdefinitions/src/vespa/sentinel.def b/configdefinitions/src/vespa/sentinel.def
index d318a55cccf..cf19e701717 100644
--- a/configdefinitions/src/vespa/sentinel.def
+++ b/configdefinitions/src/vespa/sentinel.def
@@ -17,6 +17,17 @@ application.environment string default="default"
application.instance string default="default"
application.region string default="default"
+# Connectivity checks run before starting services and measure how
+# many nodes in the Vespa cluster we can connect to and how many of
+# those that can connect back to us. We delay starting services
+# if we have more problems than the following limits allow:
+
+## Percentage of nodes that must be up and fully OK, minimum
+connectivity.minOkPercent int default=0
+
+## Absolute number of nodes with confirmed network connectivity problems, maximum
+connectivity.maxBadCount int default=999999999
+
## The command to run. This will be run by sh -c, and the following
## environment variables are defined: $ROOT, $VESPA_SERVICE_NAME,
## $VESPA_CONFIG_ID
diff --git a/configserver-client/src/test/java/ai/vespa/hosted/client/HttpConfigServerClientTest.java b/configserver-client/src/test/java/ai/vespa/hosted/client/HttpConfigServerClientTest.java
index 956bc90380f..90fd6203a21 100644
--- a/configserver-client/src/test/java/ai/vespa/hosted/client/HttpConfigServerClientTest.java
+++ b/configserver-client/src/test/java/ai/vespa/hosted/client/HttpConfigServerClientTest.java
@@ -92,6 +92,7 @@ class HttpConfigServerClientTest {
assertEquals("GET http://localhost:" + server.port() + "/ failed with status 409 and body 'hi'", thrown.getMessage());
server.verify(1, getRequestedFor(urlEqualTo("/")));
server.verify(1, anyRequestedFor(anyUrl()));
+ server.resetRequests();
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
index 276eb51981c..818e65b6caf 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
@@ -1087,9 +1087,9 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
}
ReindexActions reindexActions = actions.getReindexActions();
if ( ! reindexActions.isEmpty()) {
- logger.logApplicationPackage(Level.WARNING,
- "Change(s) between active and new application that may require re-index:\n" +
- reindexActions.format());
+ logger.log(Level.WARNING,
+ "Change(s) between active and new application that may require re-index:\n" +
+ reindexActions.format());
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
index 772c2bf5125..4c2aa33a886 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigServerBootstrap.java
@@ -270,15 +270,20 @@ public class ConfigServerBootstrap extends AbstractComponent implements Runnable
}
});
if ( ! Duration.between(lastLogged, Instant.now()).minus(Duration.ofSeconds(10)).isNegative()) {
- log.log(Level.INFO, () -> finishedDeployments.size() + " of " + applicationCount + " apps redeployed " +
- "(" + failedDeployments.size() + " failed)");
+ logProgress(applicationCount, failedDeployments, finishedDeployments);
lastLogged = Instant.now();
}
} while (failedDeployments.size() + finishedDeployments.size() < applicationCount);
+ logProgress(applicationCount, failedDeployments, finishedDeployments);
return new ArrayList<>(failedDeployments);
}
+ private void logProgress(int applicationCount, Set<ApplicationId> failedDeployments, Set<ApplicationId> finishedDeployments) {
+ log.log(Level.INFO, () -> finishedDeployments.size() + " of " + applicationCount + " apps redeployed " +
+ "(" + failedDeployments.size() + " failed)");
+ }
+
private DeploymentStatus getDeploymentStatus(ApplicationId applicationId, Future<?> future) {
try {
future.get(1, TimeUnit.MILLISECONDS);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java
index 2d336267169..1ab667f8a01 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/Deployment.java
@@ -148,6 +148,8 @@ public class Deployment implements com.yahoo.config.provision.Deployment {
provisioner.get().restart(applicationId, HostFilter.from(hostnames, Set.of(), Set.of(), Set.of()));
deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s",
hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", "))));
+ log.info(String.format("%sScheduled service restart of %d nodes: %s",
+ session.logPre(), hostnames.size(), restartActions.format()));
this.configChangeActions = new ConfigChangeActions(
new RestartActions(), configChangeActions.getRefeedActions(), configChangeActions.getReindexActions());
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
index d110370e72b..94cfba12453 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
@@ -177,6 +177,11 @@ public class ModelContextImpl implements ModelContext {
private final boolean useExternalRankExpression;
private final boolean distributeExternalRankExpressions;
private final int numDistributorStripes;
+ private final boolean requireConnectivityCheck;
+ private final int maxConcurrentMergesPerContentNode;
+ private final int maxMergeQueueSize;
+ private final int largeRankExpressionLimit;
+ private final boolean throwIfResourceLimitsSpecified;
public FeatureFlags(FlagSource source, ApplicationId appId) {
this.dedicatedClusterControllerFlavor = parseDedicatedClusterControllerFlavor(flagValue(source, appId, Flags.DEDICATED_CLUSTER_CONTROLLER_FLAVOR));
@@ -199,6 +204,11 @@ public class ModelContextImpl implements ModelContext {
this.numDistributorStripes = flagValue(source, appId, Flags.NUM_DISTRIBUTOR_STRIPES);
this.useExternalRankExpression = flagValue(source, appId, Flags.USE_EXTERNAL_RANK_EXPRESSION);
this.distributeExternalRankExpressions = flagValue(source, appId, Flags.DISTRIBUTE_EXTERNAL_RANK_EXPRESSION);
+ this.largeRankExpressionLimit = flagValue(source, appId, Flags.LARGE_RANK_EXPRESSION_LIMIT);
+ this.requireConnectivityCheck = flagValue(source, appId, Flags.REQUIRE_CONNECTIVITY_CHECK);
+ this.maxConcurrentMergesPerContentNode = flagValue(source, appId, Flags.MAX_CONCURRENT_MERGES_PER_NODE);
+ this.maxMergeQueueSize = flagValue(source, appId, Flags.MAX_MERGE_QUEUE_SIZE);
+ this.throwIfResourceLimitsSpecified = flagValue(source, appId, Flags.THROW_EXCEPTION_IF_RESOURCE_LIMITS_SPECIFIED);
}
@Override public Optional<NodeResources> dedicatedClusterControllerFlavor() { return Optional.ofNullable(dedicatedClusterControllerFlavor); }
@@ -223,6 +233,11 @@ public class ModelContextImpl implements ModelContext {
@Override public int numDistributorStripes() { return numDistributorStripes; }
@Override public boolean useExternalRankExpressions() { return useExternalRankExpression; }
@Override public boolean distributeExternalRankExpressions() { return distributeExternalRankExpressions; }
+ @Override public int largeRankExpressionLimit() { return largeRankExpressionLimit; }
+ @Override public boolean requireConnectivityCheck() { return requireConnectivityCheck; }
+ @Override public int maxConcurrentMergesPerNode() { return maxConcurrentMergesPerContentNode; }
+ @Override public int maxMergeQueueSize() { return maxMergeQueueSize; }
+ @Override public boolean throwIfResourceLimitsSpecified() { return throwIfResourceLimitsSpecified; }
private static <V> V flagValue(FlagSource source, ApplicationId appId, UnboundFlag<? extends V, ?, ?> flag) {
return flag.bindTo(source)
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java
index ca7489012e2..f0a63757477 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java
@@ -35,7 +35,7 @@ import static com.yahoo.vespa.config.server.zookeeper.ConfigCurator.USERAPP_ZK_S
import static com.yahoo.vespa.config.server.zookeeper.ConfigCurator.USER_DEFCONFIGS_ZK_SUBPATH;
/**
- * A class used for reading and writing application data to zookeeper.
+ * Reads and writes application package to and from ZooKeeper.
*
* @author hmusum
*/
@@ -76,13 +76,13 @@ public class ZooKeeperClient {
*
* @param app the application package to feed to zookeeper
*/
- void write(ApplicationPackage app) {
+ void writeApplicationPackage(ApplicationPackage app) {
try {
writeUserDefs(app);
writeSomeOf(app);
- writeSearchDefinitions(app);
+ writeSchemas(app);
writeUserIncludeDirs(app, app.getUserIncludeDirs());
- write(app.getMetaData());
+ writeMetadata(app.getMetaData());
} catch (Exception e) {
throw new IllegalStateException("Unable to write vespa model to config server(s) " + System.getProperty("configsources") + "\n" +
"Please ensure that config server is started " +
@@ -90,8 +90,8 @@ public class ZooKeeperClient {
}
}
- private void writeSearchDefinitions(ApplicationPackage app) throws IOException {
- Collection<NamedReader> sds = app.getSearchDefinitions();
+ private void writeSchemas(ApplicationPackage app) throws IOException {
+ Collection<NamedReader> sds = app.getSchemas();
if (sds.isEmpty()) return;
Path zkPath = getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(SCHEMAS_DIR);
@@ -153,7 +153,6 @@ public class ZooKeeperClient {
for (ApplicationFile file : listFiles(dir, filenameFilter)) {
String name = file.getPath().getName();
if (name.startsWith(".")) continue; //.svn , .git ...
- if ("CVS".equals(name)) continue;
if (file.isDirectory()) {
configCurator.createNode(path.append(name).getAbsolute());
if (recurse) {
@@ -198,7 +197,6 @@ public class ZooKeeperClient {
}
private void writeUserIncludeDirs(ApplicationPackage applicationPackage, List<String> userIncludeDirs) throws IOException {
- // User defined include directories
for (String userInclude : userIncludeDirs) {
ApplicationFile dir = applicationPackage.getFile(Path.fromString(userInclude));
final List<ApplicationFile> files = dir.listFiles();
@@ -238,12 +236,12 @@ public class ZooKeeperClient {
}
/**
- * Feeds application metadata to zookeeper. Used by vespamodel to create config
- * for application metadata (used by ApplicationStatusHandler)
+ * Feeds application metadata to zookeeper. Used by config model to create config
+ * for application metadata
*
* @param metaData The application metadata.
*/
- private void write(ApplicationMetaData metaData) {
+ private void writeMetadata(ApplicationMetaData metaData) {
configCurator.putData(getZooKeeperAppPath(META_ZK_PATH).getAbsolute(), metaData.asJsonBytes());
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployer.java
index 12aa5b7cc35..8c7d6ea28dd 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployer.java
@@ -33,7 +33,7 @@ public class ZooKeeperDeployer {
public void deploy(ApplicationPackage applicationPackage, Map<Version, FileRegistry> fileRegistryMap,
AllocatedHosts allocatedHosts) throws IOException {
zooKeeperClient.initialize();
- zooKeeperClient.write(applicationPackage);
+ zooKeeperClient.writeApplicationPackage(applicationPackage);
zooKeeperClient.write(fileRegistryMap);
zooKeeperClient.write(allocatedHosts);
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/AddFileInterface.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/AddFileInterface.java
index 2250f2dc579..163c19abe75 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/AddFileInterface.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/AddFileInterface.java
@@ -3,10 +3,13 @@ package com.yahoo.vespa.config.server.filedistribution;
import com.yahoo.config.FileReference;
+import java.nio.ByteBuffer;
+
/**
* @author baldersheim
*/
public interface AddFileInterface {
FileReference addUri(String uri, String relativePath);
FileReference addFile(String relativePath);
+ FileReference addBlob(ByteBuffer blob, String relativePath);
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/ApplicationFileManager.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/ApplicationFileManager.java
index 4152c92c289..a1907c01085 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/ApplicationFileManager.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/ApplicationFileManager.java
@@ -7,6 +7,7 @@ import java.io.FileOutputStream;
import java.io.IOException;
import java.net.SocketTimeoutException;
import java.net.URL;
+import java.nio.ByteBuffer;
import java.nio.channels.Channels;
import java.nio.channels.ReadableByteChannel;
import java.nio.file.Files;
@@ -35,6 +36,32 @@ public class ApplicationFileManager implements AddFileInterface {
return addFile(relativePath);
}
+ @Override
+ public FileReference addBlob(ByteBuffer blob, String relativePath) {
+ writeBlob(blob, relativePath);
+ return addFile(relativePath);
+ }
+
+ private void writeBlob(ByteBuffer blob, String relativePath) {
+ File file = new File(applicationDir, relativePath);
+ FileOutputStream fos = null;
+ try {
+ Files.createDirectories(file.toPath().getParent());
+ fos = new FileOutputStream(file.getAbsolutePath());
+ fos.write(blob.array(), blob.arrayOffset(), blob.remaining());
+ } catch (IOException e) {
+ throw new IllegalArgumentException("Failed creating directory " + file.getParent(), e);
+ } finally {
+ try {
+ if (fos != null) {
+ fos.close();
+ }
+ } catch (IOException e) {
+ throw new IllegalArgumentException("Failed closing down after writing blob of size " + blob.remaining() + " to " + file.getAbsolutePath());
+ }
+ }
+ }
+
private void download(String uri, String relativePath) {
File file = new File(applicationDir, relativePath);
FileOutputStream fos = null;
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java
index ce582a8a1a8..4605d5e5f5c 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java
@@ -51,6 +51,22 @@ public class FileDBRegistry implements FileRegistry {
}
@Override
+ public FileReference addBlob(ByteBuffer blob) {
+ long blobHash = XXHashFactory.fastestJavaInstance().hash64().hash(blob, 0);
+ String blobName = Long.toHexString(blobHash);
+ String relativePath = blobToRelativeFile(blob, blobName);
+ synchronized (this) {
+ Optional<FileReference> cachedReference = Optional.ofNullable(fileReferenceCache.get(blobName));
+ return cachedReference.orElseGet(() -> {
+ FileReference newRef = manager.addBlob(blob, relativePath);
+ entries.add(new Entry(blobName, newRef));
+ fileReferenceCache.put(blobName, newRef);
+ return newRef;
+ });
+ }
+ }
+
+ @Override
public String fileSourceHost() {
return HostName.getLocalhost();
}
@@ -72,4 +88,9 @@ public class FileDBRegistry implements FileRegistry {
return relative;
}
+ private static String blobToRelativeFile(ByteBuffer blob, String blobName) {
+ String relative = "blob/" + blobName;
+ return relative;
+ }
+
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionUtil.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionUtil.java
index 33cd425d6aa..cfe7349a1c6 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionUtil.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDistributionUtil.java
@@ -40,7 +40,7 @@ public class FileDistributionUtil {
/**
* Returns a connection pool with all config servers except this one, or an empty pool if there
- * is only one config server.
+ * is only one config server (no point in trying to download from yourself).
*/
public static ConnectionPool createConnectionPool(ConfigserverConfig configserverConfig) {
List<String> configServers = ConfigServerSpec.fromConfig(configserverConfig)
@@ -49,7 +49,9 @@ public class FileDistributionUtil {
.map(spec -> "tcp/" + spec.getHostName() + ":" + spec.getConfigServerPort())
.collect(Collectors.toList());
- return configServers.size() > 0 ? new JRTConnectionPool(new ConfigSourceSet(configServers)) : emptyConnectionPool();
+ return configServers.size() > 0
+ ? new JRTConnectionPool(new ConfigSourceSet(configServers), "filedistribution-jrt-pool-")
+ : emptyConnectionPool();
}
public static boolean fileReferenceExistsOnDisk(File downloadDirectory, FileReference applicationPackageReference) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
index 93fabd8d6c0..3c91cea91e1 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
@@ -10,9 +10,9 @@ import com.yahoo.jrt.Request;
import com.yahoo.jrt.StringValue;
import com.yahoo.vespa.defaults.Defaults;
import com.yahoo.vespa.filedistribution.CompressedFileReference;
+import com.yahoo.vespa.filedistribution.EmptyFileReferenceData;
import com.yahoo.vespa.filedistribution.FileDownloader;
import com.yahoo.vespa.filedistribution.FileReferenceData;
-import com.yahoo.vespa.filedistribution.EmptyFileReferenceData;
import com.yahoo.vespa.filedistribution.FileReferenceDownload;
import com.yahoo.vespa.filedistribution.LazyFileReferenceData;
import com.yahoo.vespa.filedistribution.LazyTemporaryStorageFileReferenceData;
@@ -82,7 +82,7 @@ public class FileServer {
this.downloader = fileDownloader;
this.root = new FileDirectory(rootDir);
this.executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()),
- new DaemonThreadFactory("file server push"));
+ new DaemonThreadFactory("file-server-"));
}
boolean hasFile(String fileReference) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java
index 5b520b10fcf..dfbce72d4ba 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/ApplicationApiHandler.java
@@ -82,7 +82,7 @@ public class ApplicationApiHandler extends SessionHandler {
.collect(Collectors.toMap(Part::getName, p -> p));
byte[] params = parts.get(MULTIPART_PARAMS).getInputStream().readAllBytes();
- log.log(Level.FINE, "Deploy parameters: [{}]", new String(params, StandardCharsets.UTF_8));
+ log.log(Level.FINE, "Deploy parameters: [{0}]", new String(params, StandardCharsets.UTF_8));
prepareParams = PrepareParams.fromJson(params, tenantName, zookeeperBarrierTimeout);
Part appPackagePart = parts.get(MULTIPART_APPLICATION_PACKAGE);
compressedStream = createFromCompressedStream(appPackagePart.getInputStream(), appPackagePart.getContentType());
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java
index 062a21b1f80..cdfdce91500 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/DeploymentMetricsResponse.java
@@ -40,6 +40,11 @@ public class DeploymentMetricsResponse extends SlimeJsonResponse {
metrics.setDouble("diskUtil", disk.util());
metrics.setDouble("diskFeedBlockLimit", disk.feedBlockLimit());
});
+
+ aggregator.reindexingProgress().ifPresent(reindexingProgress -> {
+ Cursor progressObject = cluster.setObject("reindexingProgress");
+ reindexingProgress.forEach(progressObject::setDouble);
+ });
}
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
index 5519ffc1bdc..7352f71c032 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
@@ -11,6 +11,7 @@ import com.yahoo.vespa.config.server.session.SessionRepository;
import com.yahoo.vespa.config.server.tenant.Tenant;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.defaults.Defaults;
+import com.yahoo.vespa.filedistribution.Downloads;
import com.yahoo.vespa.filedistribution.FileDownloader;
import com.yahoo.vespa.flags.FlagSource;
@@ -48,10 +49,11 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer {
}
@Override
- protected boolean maintain() {
- boolean success = true;
+ protected double maintain() {
+ int attempts = 0;
+ int failures = 0;
- try (var fileDownloader = new FileDownloader(connectionPool, downloadDirectory)) {
+ try (var fileDownloader = new FileDownloader(connectionPool, downloadDirectory, new Downloads())) {
for (var applicationId : applicationRepository.listApplications()) {
log.fine(() -> "Verifying application package for " + applicationId);
Session session = applicationRepository.getActiveSession(applicationId);
@@ -62,12 +64,13 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer {
log.fine(() -> "Verifying application package file reference " + applicationPackage + " for session " + sessionId);
if (applicationPackage != null) {
+ attempts++;
if (! fileReferenceExistsOnDisk(downloadDirectory, applicationPackage)) {
- log.fine(() -> "Downloading missing application package for application " + applicationId + " - session " + sessionId);
+ log.fine(() -> "Downloading missing application package for application " + applicationId + " (session " + sessionId + ")");
if (fileDownloader.getFile(applicationPackage).isEmpty()) {
- success = false;
- log.warning("Failed to download application package for application " + applicationId + " - session " + sessionId);
+ failures++;
+ log.warning("Failed to download application package for application " + applicationId + " (session " + sessionId + ")");
continue;
}
}
@@ -75,7 +78,7 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer {
}
}
}
- return success;
+ return asSuccessFactor(attempts, failures);
}
@Override
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintainer.java
index 4938f34131e..2d88ee77deb 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintainer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ConfigServerMaintainer.java
@@ -35,14 +35,23 @@ public abstract class ConfigServerMaintainer extends Maintainer {
ConfigServerMaintainer(ApplicationRepository applicationRepository, Curator curator, FlagSource flagSource,
Instant now, Duration interval) {
super(null, interval, now, new JobControl(new JobControlFlags(curator, flagSource)),
- jobMetrics(applicationRepository.metric()), cluster(curator), false);
+ new ConfigServerJobMetrics(applicationRepository.metric()), cluster(curator), false);
this.applicationRepository = applicationRepository;
}
- private static JobMetrics jobMetrics(Metric metric) {
- return new JobMetrics((job, consecutiveFailures) -> {
- metric.set("maintenance.consecutiveFailures", consecutiveFailures, metric.createContext(Map.of("job", job)));
- });
+ private static class ConfigServerJobMetrics extends JobMetrics {
+
+ private final Metric metric;
+
+ public ConfigServerJobMetrics(Metric metric) {
+ this.metric = metric;
+ }
+
+ @Override
+ public void completed(String job, double successFactor) {
+ metric.set("maintenance.successFactor", successFactor, metric.createContext(Map.of("job", job)));
+ }
+
}
private static class JobControlFlags implements JobControlState {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/FileDistributionMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/FileDistributionMaintainer.java
index b0876fb57e8..ca8db30c21f 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/FileDistributionMaintainer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/FileDistributionMaintainer.java
@@ -33,9 +33,9 @@ public class FileDistributionMaintainer extends ConfigServerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir, maxUnusedFileReferenceAge);
- return true;
+ return 1.0;
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ReindexingMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ReindexingMaintainer.java
index 971c2c20ae9..af9ea917aaf 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ReindexingMaintainer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ReindexingMaintainer.java
@@ -22,6 +22,7 @@ import java.util.Comparator;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Supplier;
import java.util.logging.Level;
@@ -51,8 +52,9 @@ public class ReindexingMaintainer extends ConfigServerMaintainer {
}
@Override
- protected boolean maintain() {
- AtomicBoolean success = new AtomicBoolean(true);
+ protected double maintain() {
+ AtomicInteger attempts = new AtomicInteger(0);
+ AtomicInteger failures = new AtomicInteger(0);
for (Tenant tenant : applicationRepository.tenantRepository().getAllTenants()) {
ApplicationCuratorDatabase database = tenant.getApplicationRepo().database();
for (ApplicationId id : database.activeApplications())
@@ -60,6 +62,7 @@ public class ReindexingMaintainer extends ConfigServerMaintainer {
.map(application -> application.getForVersionOrLatest(Optional.empty(), clock.instant()))
.ifPresent(application -> {
try {
+ attempts.incrementAndGet();
applicationRepository.modifyReindexing(id, reindexing -> {
reindexing = withNewReady(reindexing, lazyGeneration(application), clock.instant());
reindexing = withOnlyCurrentData(reindexing, application);
@@ -68,11 +71,11 @@ public class ReindexingMaintainer extends ConfigServerMaintainer {
}
catch (RuntimeException e) {
log.log(Level.INFO, "Failed to update reindexing status for " + id + ": " + Exceptions.toMessageString(e));
- success.set(false);
+ failures.incrementAndGet();
}
});
}
- return success.get();
+ return asSuccessFactor(attempts.get(), failures.get());
}
private Supplier<Long> lazyGeneration(Application application) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/SessionsMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/SessionsMaintainer.java
index 7482980e221..1f85dd4579d 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/SessionsMaintainer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/SessionsMaintainer.java
@@ -25,7 +25,7 @@ public class SessionsMaintainer extends ConfigServerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
if (iteration % 10 == 0)
log.log(Level.INFO, () -> "Running " + SessionsMaintainer.class.getSimpleName() + ", iteration " + iteration);
@@ -38,7 +38,7 @@ public class SessionsMaintainer extends ConfigServerMaintainer {
}
iteration++;
- return true;
+ return 1.0;
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/TenantsMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/TenantsMaintainer.java
index 7c01045ee72..0a7df2c9d21 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/TenantsMaintainer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/TenantsMaintainer.java
@@ -31,12 +31,12 @@ public class TenantsMaintainer extends ConfigServerMaintainer {
}
@Override
- protected boolean maintain() {
- if ( ! applicationRepository.configserverConfig().hostedVespa()) return true;
+ protected double maintain() {
+ if ( ! applicationRepository.configserverConfig().hostedVespa()) return 1.0;
Set<TenantName> tenants = applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, clock.instant());
if (tenants.size() > 0) log.log(Level.INFO, "Deleted tenants " + tenants);
- return true;
+ return 1.0;
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
index e1135063f97..77e2f923d4a 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetriever.java
@@ -127,8 +127,10 @@ public class ClusterDeploymentMetricsRetriever {
case VESPA_CONTAINER:
optionalDouble(values.field("query_latency.sum")).ifPresent(qlSum ->
aggregator.get()
- .addContainerLatency(qlSum, values.field("query_latency.count").asDouble())
- .addFeedLatency(values.field("feed.latency.sum").asDouble(), values.field("feed.latency.count").asDouble()));
+ .addContainerLatency(qlSum, values.field("query_latency.count").asDouble()));
+ optionalDouble(values.field("feed.latency.sum")).ifPresent(flSum ->
+ aggregator.get()
+ .addFeedLatency(flSum, values.field("feed.latency.count").asDouble()));
break;
case VESPA_QRSERVER:
optionalDouble(values.field("query_latency.sum")).ifPresent(qlSum ->
@@ -146,6 +148,10 @@ public class ClusterDeploymentMetricsRetriever {
values.field("cluster-controller.resource_usage.memory_limit.last").asDouble())
.addDiskUsage(values.field("cluster-controller.resource_usage.max_disk_utilization.last").asDouble(),
values.field("cluster-controller.resource_usage.disk_limit.last").asDouble()));
+ optionalDouble(values.field("reindexing.progress.last")).ifPresent(progress -> {
+ if (progress < 0 || progress >= 1) return;
+ aggregator.get().addReindexingProgress(metric.field("dimensions").field("documenttype").asString(), progress);
+ });
break;
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java
index f27cf942dd8..7ce6d84ad8c 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/metrics/DeploymentMetricsAggregator.java
@@ -1,6 +1,8 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.metrics;
+import java.util.HashMap;
+import java.util.Map;
import java.util.Optional;
/**
@@ -15,6 +17,7 @@ public class DeploymentMetricsAggregator {
private Double documentCount;
private ResourceUsage memoryUsage;
private ResourceUsage diskUsage;
+ private Map<String, Double> reindexingProgress;
public synchronized DeploymentMetricsAggregator addFeedLatency(double sum, double count) {
this.feed = combineLatency(this.feed, sum, count);
@@ -46,6 +49,12 @@ public class DeploymentMetricsAggregator {
return this;
}
+ public synchronized DeploymentMetricsAggregator addReindexingProgress(String documentType, double progress) {
+ if (reindexingProgress == null) this.reindexingProgress = new HashMap<>();
+ this.reindexingProgress.put(documentType, progress);
+ return this;
+ }
+
public Optional<Double> aggregateFeedLatency() {
return Optional.ofNullable(feed).map(m -> m.sum / m.count).filter(num -> !num.isNaN());
}
@@ -80,6 +89,10 @@ public class DeploymentMetricsAggregator {
return Optional.ofNullable(diskUsage);
}
+ public Optional<Map<String, Double>> reindexingProgress() {
+ return Optional.ofNullable(reindexingProgress);
+ }
+
private static LatencyMetrics combineLatency(LatencyMetrics metricsOrNull, double sum, double count) {
return Optional.ofNullable(metricsOrNull).orElseGet(LatencyMetrics::new).combine(sum, count);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
index 2d4aa78bcf6..4c25708dca2 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
@@ -208,10 +208,11 @@ public abstract class ModelsBuilder<MODELRESULT extends ModelResult> {
builtModelVersions.add(modelVersion);
} catch (RuntimeException e) {
// allow failure to create old config models if there is a validation override that allow skipping old
- // config models (which is always true for manually deployed zones)
- if (builtModelVersions.size() > 0 && builtModelVersions.get(0).getModel().skipOldConfigModels(now))
+ // config models or we're manually deploying
+ if (builtModelVersions.size() > 0 &&
+ ( builtModelVersions.get(0).getModel().skipOldConfigModels(now) || zone().environment().isManuallyDeployed()))
log.log(Level.INFO, applicationId + ": Failed to build version " + version +
- ", but allow failure due to validation override ´skipOldConfigModels´");
+ ", but allow failure due to validation override or manual deployment");
else {
log.log(Level.SEVERE, applicationId + ": Failed to build version " + version);
throw e;
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/GetConfigProcessor.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/GetConfigProcessor.java
index 637fc421457..820f5c15318 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/GetConfigProcessor.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/GetConfigProcessor.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.config.server.rpc;
import com.yahoo.cloud.config.SentinelConfig;
import com.yahoo.collections.Pair;
import com.yahoo.component.Version;
+import com.yahoo.config.ConfigInstance;
import com.yahoo.config.provision.TenantName;
import com.yahoo.jrt.Request;
import com.yahoo.net.HostName;
@@ -164,7 +165,7 @@ class GetConfigProcessor implements Runnable {
private void returnEmpty(JRTServerConfigRequest request) {
log.log(Level.FINE, () -> "Returning empty sentinel config for request from " + request.getClientHostName());
- ConfigPayload emptyPayload = ConfigPayload.empty();
+ var emptyPayload = ConfigPayload.fromInstance(new SentinelConfig.Builder().build());
String configMd5 = ConfigUtils.getMd5(emptyPayload);
ConfigResponse config = SlimeConfigResponse.fromConfigPayload(emptyPayload, 0, false, configMd5);
request.addOkResponse(request.payloadFromResponse(config), config.getGeneration(), false, config.getConfigMd5());
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java
index 1b43e57c01a..071a0dd8f0c 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/PrepareParams.java
@@ -306,7 +306,7 @@ public final class PrepareParams {
.athenzDomain(SlimeUtils.optionalString(params.field(ATHENZ_DOMAIN)).orElse(null))
.applicationRoles(ApplicationRoles.fromString(SlimeUtils.optionalString(params.field(APPLICATION_HOST_ROLE)).orElse(null), SlimeUtils.optionalString(params.field(APPLICATION_CONTAINER_ROLE)).orElse(null)))
.quota(deserialize(params.field(QUOTA_PARAM_NAME), Quota::fromSlime))
- .tenantSecretStores(SlimeUtils.optionalString(params.field(TENANT_SECRET_STORES_PARAM_NAME)).orElse(null))
+ .tenantSecretStores(deserialize(params.field(TENANT_SECRET_STORES_PARAM_NAME), TenantSecretStoreSerializer::listFromSlime, List.of()))
.force(booleanValue(params, FORCE_PARAM_NAME))
.waitForResourcesInPrepare(booleanValue(params, WAIT_FOR_RESOURCES_IN_PREPARE))
.withOperatorCertificates(deserialize(params.field(OPERATOR_CERTIFICATES), PrepareParams::readOperatorCertificates, Collections.emptyList()))
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java
index ac350db5c21..9778b1fc1f2 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java
@@ -36,13 +36,14 @@ import com.yahoo.vespa.config.server.modelfactory.ModelFactoryRegistry;
import com.yahoo.vespa.config.server.monitoring.MetricUpdater;
import com.yahoo.vespa.config.server.monitoring.Metrics;
import com.yahoo.vespa.config.server.provision.HostProvisionerProvider;
-import com.yahoo.vespa.config.server.tenant.TenantListener;
import com.yahoo.vespa.config.server.tenant.TenantRepository;
import com.yahoo.vespa.config.server.zookeeper.ConfigCurator;
import com.yahoo.vespa.config.server.zookeeper.SessionCounter;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.defaults.Defaults;
+import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.vespa.flags.Flags;
import com.yahoo.yolean.Exceptions;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.recipes.cache.ChildData;
@@ -58,6 +59,7 @@ import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
@@ -121,6 +123,7 @@ public class SessionRepository {
private final Zone zone;
private final ModelFactoryRegistry modelFactoryRegistry;
private final ConfigDefinitionRepo configDefinitionRepo;
+ private final BooleanFlag rewriteSearchDefinitions;
public SessionRepository(TenantName tenantName,
TenantApplications applicationRepo,
@@ -161,17 +164,23 @@ public class SessionRepository {
this.zone = zone;
this.modelFactoryRegistry = modelFactoryRegistry;
this.configDefinitionRepo = configDefinitionRepo;
+ this.rewriteSearchDefinitions = Flags.MOVE_SEARCH_DEFINITIONS_TO_SCHEMAS_DIR.bindTo(flagSource);
- loadSessions(); // Needs to be done before creating cache below
+ loadSessions(Flags.LOAD_LOCAL_SESSIONS_WHEN_BOOTSTRAPPING.bindTo(flagSource)); // Needs to be done before creating cache below
this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor);
this.directoryCache.addListener(this::childEvent);
this.directoryCache.start();
}
- private void loadSessions() {
+ private void loadSessions(BooleanFlag loadLocalSessions) {
ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()),
new DaemonThreadFactory("load-sessions-"));
- loadLocalSessions(executor);
+ loadSessions(loadLocalSessions.value(), executor);
+ }
+
+ void loadSessions(boolean loadLocalSessions, ExecutorService executor) {
+ if (loadLocalSessions)
+ loadLocalSessions(executor);
loadRemoteSessions(executor);
try {
executor.shutdown();
@@ -214,7 +223,7 @@ public class SessionRepository {
future.get();
log.log(Level.FINE, () -> "Local session " + sessionId + " loaded");
} catch (ExecutionException | InterruptedException e) {
- log.log(Level.WARNING, "Could not load session " + sessionId, e);
+ throw new RuntimeException("Could not load local session " + sessionId, e);
}
});
}
@@ -379,7 +388,7 @@ public class SessionRepository {
future.get();
log.log(Level.FINE, () -> "Remote session " + sessionId + " loaded");
} catch (ExecutionException | InterruptedException e) {
- log.log(Level.WARNING, "Could not load session " + sessionId, e);
+ throw new RuntimeException("Could not load remote session " + sessionId, e);
}
});
}
@@ -676,6 +685,9 @@ public class SessionRepository {
tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package");
log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath());
IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile());
+ if (rewriteSearchDefinitions.value())
+ moveSearchDefinitionsToSchemasDir(tempDestinationDir);
+
log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath());
Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE);
} finally {
@@ -685,6 +697,24 @@ public class SessionRepository {
}
}
+ // TODO: Remove in Vespa 8 (when we don't allow files in SEARCH_DEFINITIONS_DIR)
+ // Copies schemas from searchdefinitions/ to schemas/ if searchdefinitions/ exists
+ private void moveSearchDefinitionsToSchemasDir(java.nio.file.Path applicationDir) throws IOException {
+ File schemasDir = applicationDir.resolve(ApplicationPackage.SCHEMAS_DIR.getRelative()).toFile();
+ File sdDir = applicationDir.resolve(ApplicationPackage.SEARCH_DEFINITIONS_DIR.getRelative()).toFile();
+ if (sdDir.exists() && sdDir.isDirectory()) {
+ File[] sdFiles = sdDir.listFiles();
+ if (sdFiles != null) {
+ Files.createDirectories(schemasDir.toPath());
+ Arrays.asList(sdFiles).forEach(file -> Exceptions.uncheck(
+ () -> Files.move(file.toPath(),
+ schemasDir.toPath().resolve(file.toPath().getFileName()),
+ StandardCopyOption.REPLACE_EXISTING)));
+ }
+ Files.delete(sdDir.toPath());
+ }
+ }
+
/**
* Returns a new session instance for the given session id.
*/
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java
index 21db290d5e8..2f7b397cbd9 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRepository.java
@@ -467,12 +467,7 @@ public class TenantRepository {
*/
public static String logPre(ApplicationId app) {
if (DEFAULT_TENANT.equals(app.tenant())) return "";
- StringBuilder ret = new StringBuilder()
- .append(logPre(app.tenant()))
- .append("app:" + app.application().value())
- .append(":" + app.instance().value())
- .append(" ");
- return ret.toString();
+ return "app:" + app.toFullString() + " ";
}
/**
@@ -483,10 +478,7 @@ public class TenantRepository {
*/
public static String logPre(TenantName tenant) {
if (DEFAULT_TENANT.equals(tenant)) return "";
- StringBuilder ret = new StringBuilder()
- .append("tenant:" + tenant.value())
- .append(" ");
- return ret.toString();
+ return "tenant:" + tenant.value() + " ";
}
private void stateChanged(CuratorFramework framework, ConnectionState connectionState) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/zookeeper/ZKApplicationPackage.java b/configserver/src/main/java/com/yahoo/vespa/config/server/zookeeper/ZKApplicationPackage.java
index 48c47587c8b..fa562c4813b 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/zookeeper/ZKApplicationPackage.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/zookeeper/ZKApplicationPackage.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.zookeeper;
import com.google.common.base.Joiner;
@@ -132,17 +132,12 @@ public class ZKApplicationPackage implements ApplicationPackage {
}
@Override
- public List<NamedReader> searchDefinitionContents() {
+ public List<NamedReader> getSchemas() {
List<NamedReader> schemas = new ArrayList<>();
for (String sd : zkApplication.getChildren(ConfigCurator.USERAPP_ZK_SUBPATH + "/" + SCHEMAS_DIR)) {
if (sd.endsWith(SD_NAME_SUFFIX))
schemas.add(new NamedReader(sd, new StringReader(zkApplication.getData(ConfigCurator.USERAPP_ZK_SUBPATH + "/" + SCHEMAS_DIR, sd))));
}
- // TODO: Remove when everything is written to SCHEMAS_DIR (July 2021)
- for (String sd : zkApplication.getChildren(ConfigCurator.USERAPP_ZK_SUBPATH + "/" + SEARCH_DEFINITIONS_DIR)) {
- if (sd.endsWith(SD_NAME_SUFFIX))
- schemas.add(new NamedReader(sd, new StringReader(zkApplication.getData(ConfigCurator.USERAPP_ZK_SUBPATH + "/" + SEARCH_DEFINITIONS_DIR, sd))));
- }
return schemas;
}
@@ -165,11 +160,6 @@ public class ZKApplicationPackage implements ApplicationPackage {
return fileRegistry;
}
- @Override
- public List<NamedReader> getSearchDefinitions() {
- return searchDefinitionContents();
- }
-
private Reader retrieveConfigDefReader(String def) {
try {
return zkApplication.getDataReader(ConfigCurator.DEFCONFIGS_ZK_SUBPATH, def);
@@ -262,8 +252,7 @@ public class ZKApplicationPackage implements ApplicationPackage {
@Override
public Reader getRankingExpression(String name) {
- Optional<Reader> reader = zkApplication.getOptionalDataReader(ConfigCurator.USERAPP_ZK_SUBPATH + "/" + SCHEMAS_DIR, name);
- return reader.orElseGet(() -> zkApplication.getDataReader(ConfigCurator.USERAPP_ZK_SUBPATH + "/" + SEARCH_DEFINITIONS_DIR, name));
+ return zkApplication.getDataReader(ConfigCurator.USERAPP_ZK_SUBPATH + "/" + SCHEMAS_DIR, name);
}
@Override
diff --git a/configserver/src/test/apps/app_sdbundles/services.xml b/configserver/src/test/apps/app_sdbundles/services.xml
index f1eabb7d1ef..29c736fb41b 100644
--- a/configserver/src/test/apps/app_sdbundles/services.xml
+++ b/configserver/src/test/apps/app_sdbundles/services.xml
@@ -11,7 +11,7 @@
</admin>
<content version="1.0">
- <redundancy>1</redundancy>
+ <redundancy>2</redundancy>
<documents>
<document type="music" mode="index"/>
</documents>
diff --git a/configserver/src/test/apps/deprecated-features-app/hosts.xml b/configserver/src/test/apps/deprecated-features-app/hosts.xml
new file mode 100644
index 00000000000..f4256c9fc81
--- /dev/null
+++ b/configserver/src/test/apps/deprecated-features-app/hosts.xml
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<hosts>
+ <host name="mytesthost">
+ <alias>node1</alias>
+ </host>
+</hosts>
diff --git a/configserver/src/test/apps/deprecated-features-app/searchdefinitions/music.sd b/configserver/src/test/apps/deprecated-features-app/searchdefinitions/music.sd
new file mode 100644
index 00000000000..a2d4614c657
--- /dev/null
+++ b/configserver/src/test/apps/deprecated-features-app/searchdefinitions/music.sd
@@ -0,0 +1,50 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+# A basic search definition - called music, should be saved to music.sd
+search music {
+
+ # It contains one document type only - called music as well
+ document music {
+
+ field title type string {
+ indexing: summary | index # How this field should be indexed
+ # index-to: title, default # Create two indexes
+ weight: 75 # Ranking importancy of this field, used by the built in nativeRank feature
+ }
+
+ field artist type string {
+ indexing: summary | attribute | index
+ # index-to: artist, default
+
+ weight: 25
+ }
+
+ field year type int {
+ indexing: summary | attribute
+ }
+
+ # Increase query
+ field popularity type int {
+ indexing: summary | attribute
+ }
+
+ field url type uri {
+ indexing: summary | index
+ }
+
+ }
+
+ rank-profile default inherits default {
+ first-phase {
+ expression: nativeRank(title,artist) + attribute(popularity)
+ }
+
+ }
+
+ rank-profile textmatch inherits default {
+ first-phase {
+ expression: nativeRank(title,artist)
+ }
+
+ }
+
+}
diff --git a/configserver/src/test/apps/deprecated-features-app/services.xml b/configserver/src/test/apps/deprecated-features-app/services.xml
new file mode 100644
index 00000000000..509d7786be0
--- /dev/null
+++ b/configserver/src/test/apps/deprecated-features-app/services.xml
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<services version="1.0">
+
+ <admin version="2.0">
+ <adminserver hostalias="node1"/>
+ <logserver hostalias="node1" />
+ </admin>
+
+ <content version="1.0">
+ <redundancy>2</redundancy>
+ <documents>
+ <document type="music" mode="index"/>
+ </documents>
+ <nodes>
+ <node hostalias="node1" distribution-key="0"/>
+ </nodes>
+
+ </content>
+
+ <container version="1.0">
+ <document-processing compressdocuments="true">
+ <chain id="ContainerWrapperTest">
+ <documentprocessor id="com.yahoo.vespa.config.AppleDocProc"/>
+ </chain>
+ </document-processing>
+
+ <config name="project.specific">
+ <value>someval</value>
+ </config>
+
+ <nodes>
+ <node hostalias="node1" />
+ </nodes>
+
+ </container>
+
+</services>
diff --git a/configserver/src/test/apps/hosted-no-write-access-control/services.xml b/configserver/src/test/apps/hosted-no-write-access-control/services.xml
index b12f630ef80..429995c03a4 100644
--- a/configserver/src/test/apps/hosted-no-write-access-control/services.xml
+++ b/configserver/src/test/apps/hosted-no-write-access-control/services.xml
@@ -15,7 +15,7 @@
</container>
<content id="music" version="1.0">
- <redundancy>1</redundancy>
+ <redundancy>2</redundancy>
<documents>
<document type="music" mode="index" />
</documents>
diff --git a/configserver/src/test/apps/hosted/services.xml b/configserver/src/test/apps/hosted/services.xml
index a5c8fa1d26f..456a41c6994 100644
--- a/configserver/src/test/apps/hosted/services.xml
+++ b/configserver/src/test/apps/hosted/services.xml
@@ -18,7 +18,7 @@
</container>
<content id="music" version="1.0">
- <redundancy>1</redundancy>
+ <redundancy>2</redundancy>
<documents>
<document type="music" mode="index" />
</documents>
diff --git a/configserver/src/test/apps/zkapp/services.xml b/configserver/src/test/apps/zkapp/services.xml
index 58ecf41707d..037c8e75677 100644
--- a/configserver/src/test/apps/zkapp/services.xml
+++ b/configserver/src/test/apps/zkapp/services.xml
@@ -19,7 +19,7 @@
</container>
<content version="1.0">
- <redundancy>1</redundancy>
+ <redundancy>2</redundancy>
<documents>
<document type="music" mode="index"/>
</documents>
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
index 2cf4d7e7b69..e8dc08d4e8d 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
@@ -615,7 +615,7 @@ public class ApplicationRepositoryTest {
applicationRepository.prepare(sessionId2, prepareParams());
exceptionRule.expect(ActivationConflictException.class);
- exceptionRule.expectMessage(containsString("tenant:test1 app:testapp:default Cannot activate session 3 because the currently active session (4) has changed since session 3 was created (was 2 at creation time)"));
+ exceptionRule.expectMessage(containsString("app:test1.testapp.default Cannot activate session 3 because the currently active session (4) has changed since session 3 was created (was 2 at creation time)"));
applicationRepository.activate(applicationRepository.getTenant(applicationId()), sessionId2, timeoutBudget, false);
}
@@ -629,7 +629,7 @@ public class ApplicationRepositoryTest {
applicationRepository.prepare(sessionId, prepareParams());
exceptionRule.expect(IllegalArgumentException.class);
- exceptionRule.expectMessage(containsString("tenant:test1 app:testapp:default Session 2 is already active"));
+ exceptionRule.expectMessage(containsString("app:test1.testapp.default Session 2 is already active"));
applicationRepository.activate(applicationRepository.getTenant(applicationId()), sessionId, timeoutBudget, false);
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/DeployTester.java b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/DeployTester.java
index f2722fb49e1..cca26cbb4f1 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/DeployTester.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/DeployTester.java
@@ -209,7 +209,7 @@ public class DeployTester {
@Override
public ModelCreateResult createAndValidateModel(ModelContext modelContext, ValidationParameters validationParameters) {
if ( ! validationParameters.ignoreValidationErrors())
- throw new IllegalArgumentException("Validation fails");
+ throw new IllegalArgumentException("Model building fails");
return new ModelCreateResult(createModel(modelContext), Collections.emptyList());
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClientTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClientTest.java
index 7d14b1996b0..e20363af4e9 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClientTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClientTest.java
@@ -64,7 +64,7 @@ public class ZooKeeperClientTest {
Map<Version, FileRegistry> fileRegistries = createFileRegistries();
app.writeMetaData();
zkc.initialize();
- zkc.write(app);
+ zkc.writeApplicationPackage(app);
zkc.write(fileRegistries);
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
index 6ce94aa8499..2b595992cdb 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
@@ -5,6 +5,7 @@ import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.config.FileReference;
import com.yahoo.io.IOUtils;
import com.yahoo.net.HostName;
+import com.yahoo.vespa.filedistribution.Downloads;
import com.yahoo.vespa.filedistribution.FileDownloader;
import com.yahoo.vespa.filedistribution.FileReferenceData;
import com.yahoo.vespa.filedistribution.FileReferenceDownload;
@@ -85,7 +86,7 @@ public class FileServerTest {
.configServerDBDir(temporaryFolder.newFolder("serverdb").getAbsolutePath())
.configDefinitionsDir(temporaryFolder.newFolder("configdefinitions").getAbsolutePath());
FileServer fileServer = createFileServer(builder);
- assertEquals(0, fileServer.downloader().fileReferenceDownloader().connectionPool().getSize());
+ assertEquals(0, fileServer.downloader().connectionPool().getSize());
// Empty connection pool when only one server, no use in downloading from yourself
List<ConfigserverConfig.Zookeeperserver.Builder> servers = new ArrayList<>();
@@ -95,7 +96,7 @@ public class FileServerTest {
servers.add(serverBuilder);
builder.zookeeperserver(servers);
fileServer = createFileServer(builder);
- assertEquals(0, fileServer.downloader().fileReferenceDownloader().connectionPool().getSize());
+ assertEquals(0, fileServer.downloader().connectionPool().getSize());
// connection pool of size 1 when 2 servers
ConfigserverConfig.Zookeeperserver.Builder serverBuilder2 = new ConfigserverConfig.Zookeeperserver.Builder();
@@ -104,7 +105,7 @@ public class FileServerTest {
servers.add(serverBuilder2);
builder.zookeeperserver(servers);
fileServer = createFileServer(builder);
- assertEquals(1, fileServer.downloader().fileReferenceDownloader().connectionPool().getSize());
+ assertEquals(1, fileServer.downloader().connectionPool().getSize());
}
private void writeFile(String dir) throws IOException {
@@ -137,7 +138,7 @@ public class FileServerTest {
private static class MockFileDownloader extends FileDownloader {
public MockFileDownloader(File downloadDirectory) {
- super(emptyConnectionPool(), downloadDirectory, downloadDirectory, Duration.ofMillis(100), Duration.ofMillis(100));
+ super(emptyConnectionPool(), downloadDirectory, new Downloads(), Duration.ofMillis(100), Duration.ofMillis(100));
}
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/MockFileRegistry.java b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/MockFileRegistry.java
index 890a31645fd..723adc1400b 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/MockFileRegistry.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/MockFileRegistry.java
@@ -4,8 +4,10 @@ package com.yahoo.vespa.config.server.filedistribution;
import com.yahoo.config.FileReference;
import com.yahoo.config.application.api.FileRegistry;
import com.yahoo.net.HostName;
+import net.jpountz.xxhash.XXHashFactory;
import java.io.File;
+import java.nio.ByteBuffer;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
@@ -45,4 +47,14 @@ public class MockFileRegistry implements FileRegistry {
throw new IllegalArgumentException("FileReference addUri(String uri) is not implemented for " + getClass().getCanonicalName());
}
+ @Override
+ public FileReference addBlob(ByteBuffer blob) {
+ long blobHash = XXHashFactory.fastestJavaInstance().hash64().hash(blob, 0);
+ String relativePath = "./" + Long.toHexString(blobHash) + ".blob";
+ FileReference fileReference = addFileInterface.addBlob(blob, relativePath);
+
+ entries.add(new Entry(relativePath, fileReference));
+ return fileReference;
+ }
+
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java
index 7fdfbcdbf03..b5bcae65009 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/metrics/ClusterDeploymentMetricsRetrieverTest.java
@@ -70,7 +70,9 @@ public class ClusterDeploymentMetricsRetrieverTest {
new DeploymentMetricsAggregator()
.addDocumentCount(6000.0)
.addMemoryUsage(0.89074, 0.8)
- .addDiskUsage(0.83517, 0.75),
+ .addDiskUsage(0.83517, 0.75)
+ .addReindexingProgress("test_artifacts", 0.71)
+ .addReindexingProgress("announcements", 0),
aggregatorMap.get(expectedContentCluster)
);
@@ -113,6 +115,7 @@ public class ClusterDeploymentMetricsRetrieverTest {
compareOptionals(expected.diskUsage(), actual.diskUsage(), (a, b) -> assertDoubles.accept(a.feedBlockLimit(), b.feedBlockLimit()));
compareOptionals(expected.memoryUsage(), actual.memoryUsage(), (a, b) -> assertDoubles.accept(a.util(), b.util()));
compareOptionals(expected.memoryUsage(), actual.memoryUsage(), (a, b) -> assertDoubles.accept(a.feedBlockLimit(), b.feedBlockLimit()));
+ assertEquals(expected.reindexingProgress(), actual.reindexingProgress());
}
@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java
index 08794cf0b78..f68e79ae266 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/PrepareParamsTest.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.config.server.session;
import com.yahoo.config.model.api.ApplicationRoles;
import com.yahoo.config.model.api.ContainerEndpoint;
import com.yahoo.config.model.api.EndpointCertificateMetadata;
+import com.yahoo.config.model.api.TenantSecretStore;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.TenantName;
import com.yahoo.container.jdisc.HttpRequest;
@@ -24,6 +25,7 @@ import com.yahoo.slime.SlimeInserter;
import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.config.server.tenant.ContainerEndpointSerializer;
import com.yahoo.vespa.config.server.tenant.EndpointCertificateMetadataSerializer;
+import com.yahoo.vespa.config.server.tenant.TenantSecretStoreSerializer;
import org.junit.Test;
import javax.security.auth.x500.X500Principal;
@@ -203,6 +205,26 @@ public class PrepareParamsTest {
assertEquals(certificate, prepareParams.operatorCertificates().get(0));
}
+ @Test
+ public void testSecretStores() throws IOException {
+ List<TenantSecretStore> secretStores = List.of(new TenantSecretStore("name", "awsId", "role"));
+ Slime secretStoreSlime = TenantSecretStoreSerializer.toSlime(secretStores);
+ String secretStoreParam = new String(SlimeUtils.toJsonBytes(secretStoreSlime), StandardCharsets.UTF_8);
+
+ var prepareParams = createParams(request + "&" + PrepareParams.TENANT_SECRET_STORES_PARAM_NAME + "=" + URLEncoder.encode(secretStoreParam, StandardCharsets.UTF_8), TenantName.from("foo"));
+ assertEquals(1, prepareParams.tenantSecretStores().size());
+ TenantSecretStore tenantSecretStore = prepareParams.tenantSecretStores().get(0);
+ assertEquals("name", tenantSecretStore.getName());
+ assertEquals("awsId", tenantSecretStore.getAwsId());
+ assertEquals("role", tenantSecretStore.getRole());
+
+ // Verify using json object
+ var root = SlimeUtils.jsonToSlime(json);
+ new Injector().inject(secretStoreSlime.get(), new ObjectInserter(root.get(), PrepareParams.TENANT_SECRET_STORES_PARAM_NAME));
+ PrepareParams prepareParamsJson = PrepareParams.fromJson(SlimeUtils.toJsonBytes(root), TenantName.from("foo"), Duration.ofSeconds(60));
+ assertPrepareParamsEqual(prepareParams, prepareParamsJson);
+ }
+
private void assertPrepareParamsEqual(PrepareParams urlParams, PrepareParams jsonParams) {
assertEquals(urlParams.ignoreValidationErrors(), jsonParams.ignoreValidationErrors());
assertEquals(urlParams.isDryRun(), jsonParams.isDryRun());
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionRepositoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionRepositoryTest.java
index a3025cbf364..4790d8f4ae2 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionRepositoryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionRepositoryTest.java
@@ -3,6 +3,8 @@ package com.yahoo.vespa.config.server.session;
import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.component.Version;
+import com.yahoo.concurrent.InThreadExecutorService;
+import com.yahoo.config.application.api.ApplicationFile;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.model.NullConfigModelRegistry;
import com.yahoo.config.model.api.Model;
@@ -14,6 +16,8 @@ import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.test.MockApplicationPackage;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.TenantName;
+import com.yahoo.io.reader.NamedReader;
+import com.yahoo.path.Path;
import com.yahoo.text.Utf8;
import com.yahoo.vespa.config.server.ApplicationRepository;
import com.yahoo.vespa.config.server.MockProvisioner;
@@ -28,10 +32,13 @@ import com.yahoo.vespa.config.server.zookeeper.ConfigCurator;
import com.yahoo.vespa.config.util.ConfigUtils;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.mock.MockCurator;
+import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.VespaModelFactory;
import org.junit.Rule;
import org.junit.Test;
+import org.junit.rules.ExpectedException;
import org.junit.rules.TemporaryFolder;
import java.io.File;
@@ -42,15 +49,18 @@ import java.time.LocalDate;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
import java.util.Optional;
import java.util.function.LongPredicate;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
/**
* @author Ulf Lilleengen
@@ -66,10 +76,14 @@ public class SessionRepositoryTest {
private TenantRepository tenantRepository;
private ApplicationRepository applicationRepository;
private SessionRepository sessionRepository;
+ private final InMemoryFlagSource flagSource = new InMemoryFlagSource();
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
+ @Rule
+ public ExpectedException expectedException = ExpectedException.none();
+
public void setup() throws Exception {
setup(new ModelFactoryRegistry(List.of(new VespaModelFactory(new NullConfigModelRegistry()))));
}
@@ -86,6 +100,7 @@ public class SessionRepositoryTest {
tenantRepository = new TestTenantRepository.Builder()
.withConfigserverConfig(configserverConfig)
.withCurator(curator)
+ .withFlagSource(flagSource)
.withFileDistributionFactory(new MockFileDistributionFactory(configserverConfig))
.withModelFactoryRegistry(modelFactoryRegistry)
.build();
@@ -94,6 +109,7 @@ public class SessionRepositoryTest {
.withTenantRepository(tenantRepository)
.withProvisioner(new MockProvisioner())
.withOrchestrator(new OrchestratorMock())
+ .withFlagSource(flagSource)
.build();
sessionRepository = tenantRepository.getTenant(tenantName).getSessionRepository();
}
@@ -113,6 +129,10 @@ public class SessionRepositoryTest {
assertEquals(applicationId.application(), applicationSet.getForVersionOrLatest(Optional.empty(), Instant.now()).getId().application());
assertNotNull(applicationSet.getForVersionOrLatest(Optional.empty(), Instant.now()).getModel());
+ LocalSession session = sessionRepository.getLocalSession(secondSessionId);
+ Collection<NamedReader> a = session.applicationPackage.get().getSchemas();
+ assertEquals(1, a.size());
+
sessionRepository.close();
// All created sessions are deleted
assertNull(sessionRepository.getLocalSession(firstSessionId));
@@ -157,21 +177,25 @@ public class SessionRepositoryTest {
assertStatusChange(sessionId, Session.Status.ACTIVATE);
}
- // If reading a session throws an exception it should be handled and not prevent other applications
- // from loading. In this test we just show that we end up with one session in remote session
- // repo even if it had bad data (by making getSessionIdForApplication() in FailingTenantApplications
- // throw an exception).
+ // If reading a session throws an exception when bootstrapping SessionRepository it should fail,
+ // to make sure config server does not comes up and serves invalid/old config or, if this is hosted,
+ // serves empty config (takes down services on all nodes belonging to an application)
@Test
- public void testBadApplicationRepoOnActivate() throws Exception {
+ public void testInvalidSessionWhenBootstrappingSessionRepo() throws Exception {
setup();
- long sessionId = 3L;
- TenantName mytenant = TenantName.from("mytenant");
- curator.set(TenantRepository.getApplicationsPath(mytenant).append("mytenant:appX:default"), new byte[0]); // Invalid data
- tenantRepository.addTenant(mytenant);
- curator.create(TenantRepository.getSessionsPath(mytenant));
+
+ // Create a session with invalid data and set active session for application to this session
+ String sessionIdString = "3";
+ Path sessionPath = TenantRepository.getSessionsPath(tenantName).append(sessionIdString);
+ curator.create(sessionPath);
+ curator.set(sessionPath.append("applicationId"), new byte[0]); // Invalid data
+ Path applicationsPath = TenantRepository.getApplicationsPath(tenantName);
+ curator.set(applicationsPath.append(applicationId.serializedForm()), Utf8.toBytes(sessionIdString));
+
+ expectedException.expectMessage("Could not load remote session " + sessionIdString);
+ expectedException.expect(RuntimeException.class);
+ sessionRepository.loadSessions(false, new InThreadExecutorService());
assertThat(sessionRepository.getRemoteSessionsFromZooKeeper().size(), is(0));
- createSession(sessionId, true);
- assertThat(sessionRepository.getRemoteSessionsFromZooKeeper().size(), is(1));
}
@Test(expected = InvalidApplicationException.class)
@@ -241,6 +265,40 @@ public class SessionRepositoryTest {
// Does not cause an error because model version 3 is skipped
}
+ @Test
+ public void require_that_searchdefinitions_are_written_to_schemas_dir() throws Exception {
+ setup();
+
+ // App has schemas in searchdefinitions/, should NOT be moved to schemas/ on deploy
+ flagSource.withBooleanFlag(Flags.MOVE_SEARCH_DEFINITIONS_TO_SCHEMAS_DIR.id(), false);
+ long sessionId = deploy(applicationId, new File("src/test/apps/deprecated-features-app"));
+ LocalSession session = sessionRepository.getLocalSession(sessionId);
+
+ assertEquals(1, session.applicationPackage.get().getSchemas().size());
+
+ ApplicationFile schema = getSchema(session, "schemas");
+ assertFalse(schema.exists());
+ ApplicationFile sd = getSchema(session, "searchdefinitions");
+ assertTrue(sd.exists());
+
+
+ // App has schemas in searchdefinitions/, should be moved to schemas/ on deploy
+ flagSource.withBooleanFlag(Flags.MOVE_SEARCH_DEFINITIONS_TO_SCHEMAS_DIR.id(), true);
+ sessionId = deploy(applicationId, new File("src/test/apps/deprecated-features-app"));
+ session = sessionRepository.getLocalSession(sessionId);
+
+ assertEquals(1, session.applicationPackage.get().getSchemas().size());
+
+ schema = getSchema(session, "schemas");
+ assertTrue(schema.exists());
+ sd = getSchema(session, "searchdefinitions");
+ assertFalse(sd.exists());
+ }
+
+ ApplicationFile getSchema(Session session, String subDirectory) {
+ return session.applicationPackage.get().getFile(Path.fromString(subDirectory).append("music.sd"));
+ }
+
private void createSession(long sessionId, boolean wait) {
SessionZooKeeperClient zkc = new SessionZooKeeperClient(curator,
ConfigCurator.create(curator),
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/TenantRepositoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/TenantRepositoryTest.java
index 6f7e0541cc7..464b3d1ab64 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/TenantRepositoryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/TenantRepositoryTest.java
@@ -191,7 +191,6 @@ public class TenantRepositoryTest {
public void testFailingBootstrap() {
tenantRepository.close(); // stop using the one setup in Before method
- // Should get exception if config is true
expectedException.expect(RuntimeException.class);
expectedException.expectMessage("Could not create all tenants when bootstrapping, failed to create: [default]");
new FailingDuringBootstrapTenantRepository(configserverConfig);
@@ -213,7 +212,7 @@ public class TenantRepositoryTest {
Metrics.createTestMetrics(),
new StripedExecutor<>(new InThreadExecutorService()),
new StripedExecutor<>(new InThreadExecutorService()),
- new FileDistributionFactory(new ConfigserverConfig.Builder().build()),
+ new FileDistributionFactory(configserverConfig),
new InMemoryFlagSource(),
new InThreadExecutorService(),
new MockSecretStore(),
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/zookeeper/ZKApplicationPackageTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/zookeeper/ZKApplicationPackageTest.java
index 80d01fa4d36..458cdb82066 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/zookeeper/ZKApplicationPackageTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/zookeeper/ZKApplicationPackageTest.java
@@ -78,7 +78,7 @@ public class ZKApplicationPackageTest {
assertTrue(Pattern.compile(".*<slobroks>.*",Pattern.MULTILINE+Pattern.DOTALL).matcher(IOUtils.readAll(zkApp.getFile(Path.fromString("services.xml")).createReader())).matches());
DeployState deployState = new DeployState.Builder().applicationPackage(zkApp).build();
assertEquals(deployState.getSchemas().size(), 5);
- assertEquals(zkApp.searchDefinitionContents().size(), 5);
+ assertEquals(zkApp.getSchemas().size(), 5);
assertEquals(IOUtils.readAll(zkApp.getRankingExpression("foo.expression")), "foo()+1\n");
assertEquals(zkApp.getFiles(Path.fromString(""), "xml").size(), 3);
assertEquals(zkApp.getFileReference(Path.fromString("components/file.txt")).getAbsolutePath(), "/home/vespa/test/file.txt");
@@ -124,8 +124,7 @@ public class ZKApplicationPackageTest {
}
/**
- * Takes for instance the dir /app and puts the contents into the given ZK path. Ignores files starting with dot,
- * and dirs called CVS.
+ * Takes for instance the dir /app and puts the contents into the given ZK path. Ignores files starting with dot.
*
* @param dir directory which holds the summary class part files
* @param path zookeeper path
@@ -142,7 +141,6 @@ public class ZKApplicationPackageTest {
}
for (File file : listFiles(dir, filenameFilter)) {
if (file.getName().startsWith(".")) continue; //.svn , .git ...
- if ("CVS".equals(file.getName())) continue;
if (file.isFile()) {
String contents = IOUtils.readFile(file);
zk.putData(path, file.getName(), contents);
diff --git a/configserver/src/test/resources/deploy/advancedapp/services.xml b/configserver/src/test/resources/deploy/advancedapp/services.xml
index b8e93b14317..77fa426041f 100644
--- a/configserver/src/test/resources/deploy/advancedapp/services.xml
+++ b/configserver/src/test/resources/deploy/advancedapp/services.xml
@@ -18,7 +18,7 @@
</container>
<content version="1.0">
- <redundancy>1</redundancy>
+ <redundancy>2</redundancy>
<documents>
<document type="keyvalue" mode="index"/>
</documents>
diff --git a/configserver/src/test/resources/metrics/clustercontroller_metrics.json b/configserver/src/test/resources/metrics/clustercontroller_metrics.json
index 9afcb34d77d..65468749940 100644
--- a/configserver/src/test/resources/metrics/clustercontroller_metrics.json
+++ b/configserver/src/test/resources/metrics/clustercontroller_metrics.json
@@ -20,6 +20,48 @@
},
{
"values": {
+ "reindexing.progress.last": 0.71
+ },
+ "dimensions": {
+ "clustertype": "content",
+ "clusterid": "content_cluster_id",
+ "documenttype": "test_artifacts"
+ }
+ },
+ {
+ "values": {
+ "reindexing.progress.last": 1
+ },
+ "dimensions": {
+ "clustertype": "content",
+ "clusterid": "content_cluster_id",
+ "documenttype": "builds"
+ }
+ },
+ {
+ "values": {
+ "reindexing.progress.last": 0
+ },
+ "dimensions": {
+ "clustertype": "content",
+ "clusterid": "content_cluster_id",
+ "documenttype": "announcements",
+ "state": "running"
+ }
+ },
+ {
+ "values": {
+ "reindexing.progress.last": -1
+ },
+ "dimensions": {
+ "clustertype": "content",
+ "clusterid": "content_cluster_id",
+ "documenttype": "announcements",
+ "state": "successful"
+ }
+ },
+ {
+ "values": {
"some.other.metrics": 1
},
"dimensions": {
diff --git a/container-apache-http-client-bundle/CMakeLists.txt b/container-apache-http-client-bundle/CMakeLists.txt
new file mode 100644
index 00000000000..8daac7a5030
--- /dev/null
+++ b/container-apache-http-client-bundle/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(container-apache-http-client-bundle)
diff --git a/container-apache-http-client-bundle/README.md b/container-apache-http-client-bundle/README.md
new file mode 100644
index 00000000000..99606a95820
--- /dev/null
+++ b/container-apache-http-client-bundle/README.md
@@ -0,0 +1,3 @@
+# container-apache-http-client-bundle
+
+Apache http client 4.x/5.x packaged as bundle
diff --git a/container-apache-http-client-bundle/pom.xml b/container-apache-http-client-bundle/pom.xml
new file mode 100644
index 00000000000..5d7598d6f45
--- /dev/null
+++ b/container-apache-http-client-bundle/pom.xml
@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<!-- Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+ http://maven.apache.org/xsd/maven-4.0.0.xsd">
+ <modelVersion>4.0.0</modelVersion>
+ <artifactId>container-apache-http-client-bundle</artifactId>
+ <packaging>container-plugin</packaging>
+ <version>7-SNAPSHOT</version>
+
+ <parent>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>parent</artifactId>
+ <version>7-SNAPSHOT</version>
+ <relativePath>../parent/pom.xml</relativePath>
+ </parent>
+ <properties>
+ <maven.javadoc.skip>true</maven.javadoc.skip> <!-- Javadoc plugin fails because of no source code in module -->
+ </properties>
+ <dependencies>
+ <!-- provided -->
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>annotations</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <!-- Not directly used in this module, but needed to get Import-Packages for JDK packages it exports. -->
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>jdisc_core</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ <scope>provided</scope>
+ </dependency>
+
+ <!-- compile -->
+ <dependency>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpclient</artifactId>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpmime</artifactId>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.apache.httpcomponents.client5</groupId>
+ <artifactId>httpclient5</artifactId>
+ <scope>compile</scope>
+ <exclusions>
+ <exclusion>
+ <groupId>org.slf4j</groupId>
+ <artifactId>slf4j-api</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ </dependencies>
+
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>bundle-plugin</artifactId>
+ <extensions>true</extensions>
+ </plugin>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-compiler-plugin</artifactId>
+ </plugin>
+ </plugins>
+ </build>
+
+</project>
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/async/methods/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/async/methods/package-info.java
new file mode 100644
index 00000000000..050fc29c7dd
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/async/methods/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.async.methods;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/async/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/async/package-info.java
new file mode 100644
index 00000000000..f1387bb1464
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/async/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.async;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/classic/methods/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/classic/methods/package-info.java
new file mode 100644
index 00000000000..b014ce24aff
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/classic/methods/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.classic.methods;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/classic/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/classic/package-info.java
new file mode 100644
index 00000000000..90b162d3eec
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/classic/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.classic;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/config/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/config/package-info.java
new file mode 100644
index 00000000000..f1a450fbaf0
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/config/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.config;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/entity/mime/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/entity/mime/package-info.java
new file mode 100644
index 00000000000..489d168e19b
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/entity/mime/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.entity.mime;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/entity/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/entity/package-info.java
new file mode 100644
index 00000000000..f4542fa34dc
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/entity/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.entity;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/async/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/async/package-info.java
new file mode 100644
index 00000000000..e9acf08ff88
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/async/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.impl.async;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/classic/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/classic/package-info.java
new file mode 100644
index 00000000000..aeb608544ab
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/classic/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.impl.classic;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/io/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/io/package-info.java
new file mode 100644
index 00000000000..1106f18631a
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/io/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.impl.io;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/nio/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/nio/package-info.java
new file mode 100644
index 00000000000..64636ffec3d
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/nio/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.impl.nio;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/package-info.java
new file mode 100644
index 00000000000..5f0ae9df63b
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/impl/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.impl;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/io/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/io/package-info.java
new file mode 100644
index 00000000000..386f6399057
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/io/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.io;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/nio/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/nio/package-info.java
new file mode 100644
index 00000000000..41b75048e97
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/nio/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.nio;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/package-info.java
new file mode 100644
index 00000000000..ec4bab39b4d
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/protocol/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/protocol/package-info.java
new file mode 100644
index 00000000000..f72c2742b24
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/protocol/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.protocol;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/routing/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/routing/package-info.java
new file mode 100644
index 00000000000..412d824c38c
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/routing/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.routing;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/socket/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/socket/package-info.java
new file mode 100644
index 00000000000..ed95115cd6d
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/socket/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.socket;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/ssl/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/ssl/package-info.java
new file mode 100644
index 00000000000..c7184a484f7
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/client5/http/ssl/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.client5.http.ssl;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/concurrent/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/concurrent/package-info.java
new file mode 100644
index 00000000000..a61bb459a68
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/concurrent/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.concurrent;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/config/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/config/package-info.java
new file mode 100644
index 00000000000..f5a797131d2
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/config/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.http.config;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/io/entity/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/io/entity/package-info.java
new file mode 100644
index 00000000000..576666f8c08
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/io/entity/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.http.io.entity;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/io/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/io/package-info.java
new file mode 100644
index 00000000000..c7b0debd608
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/io/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.http.io;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/io/support/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/io/support/package-info.java
new file mode 100644
index 00000000000..71cb8438727
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/io/support/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.http.io.support;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/message/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/message/package-info.java
new file mode 100644
index 00000000000..27cf37f846f
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/message/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.http.message;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/package-info.java
new file mode 100644
index 00000000000..1b949e0a38a
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.http.nio;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/ssl/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/ssl/package-info.java
new file mode 100644
index 00000000000..ad657066eb7
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/ssl/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.http.nio.ssl;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/support/classic/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/support/classic/package-info.java
new file mode 100644
index 00000000000..98836b631df
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/support/classic/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.http.nio.support.classic;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/support/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/support/package-info.java
new file mode 100644
index 00000000000..3e089b8ca23
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/nio/support/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.http.nio.support;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/package-info.java
new file mode 100644
index 00000000000..30fac3d2de9
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.http;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/protocol/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/protocol/package-info.java
new file mode 100644
index 00000000000..c5b27fbb871
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http/protocol/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.http.protocol;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http2/config/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http2/config/package-info.java
new file mode 100644
index 00000000000..d5482b5f2a2
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http2/config/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.http2.config;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http2/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http2/package-info.java
new file mode 100644
index 00000000000..3c2d9389c74
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/http2/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.http2;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/net/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/net/package-info.java
new file mode 100644
index 00000000000..0009845386c
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/net/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.net;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/reactor/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/reactor/package-info.java
new file mode 100644
index 00000000000..e05e877f730
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/reactor/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.reactor;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/util/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/util/package-info.java
new file mode 100644
index 00000000000..75e38c99e65
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/hc/core5/util/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.hc.core5.util;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/annotation/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/annotation/package-info.java
new file mode 100644
index 00000000000..1599489de0f
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/annotation/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.annotation;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/auth/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/auth/package-info.java
new file mode 100644
index 00000000000..2bb0a297c86
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/auth/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.auth;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/client/config/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/client/config/package-info.java
new file mode 100644
index 00000000000..33b3ec7cae9
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/client/config/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.client.config;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/client/methods/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/client/methods/package-info.java
new file mode 100644
index 00000000000..3b8ad361db5
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/client/methods/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.client.methods;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/client/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/client/package-info.java
new file mode 100644
index 00000000000..6a62880648e
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/client/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.client;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/client/protocol/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/client/protocol/package-info.java
new file mode 100644
index 00000000000..3d8ffbbcd6f
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/client/protocol/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.client.protocol;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/client/utils/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/client/utils/package-info.java
new file mode 100644
index 00000000000..f7d6fac2b0b
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/client/utils/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.client.utils;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/config/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/config/package-info.java
new file mode 100644
index 00000000000..7fba26f38a7
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/config/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.config;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/conn/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/conn/package-info.java
new file mode 100644
index 00000000000..5e64cb7eab2
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/conn/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.conn;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/conn/routing/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/conn/routing/package-info.java
new file mode 100644
index 00000000000..204dc62666b
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/conn/routing/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.conn.routing;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/conn/socket/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/conn/socket/package-info.java
new file mode 100644
index 00000000000..7ac337e6ca6
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/conn/socket/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.conn.socket;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/conn/ssl/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/conn/ssl/package-info.java
new file mode 100644
index 00000000000..23d1644ba84
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/conn/ssl/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.conn.ssl;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/entity/mime/content/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/entity/mime/content/package-info.java
new file mode 100644
index 00000000000..6e31d8e733a
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/entity/mime/content/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.entity.mime.content;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/entity/mime/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/entity/mime/package-info.java
new file mode 100644
index 00000000000..9a41d01fc4d
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/entity/mime/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.entity.mime;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/entity/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/entity/package-info.java
new file mode 100644
index 00000000000..e5d7bce4f01
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/entity/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.entity;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/impl/client/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/impl/client/package-info.java
new file mode 100644
index 00000000000..5ac4dc40fac
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/impl/client/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.impl.client;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/impl/conn/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/impl/conn/package-info.java
new file mode 100644
index 00000000000..b3a27272300
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/impl/conn/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.impl.conn;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/impl/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/impl/package-info.java
new file mode 100644
index 00000000000..ebc2aebb981
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/impl/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.impl;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/message/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/message/package-info.java
new file mode 100644
index 00000000000..5ede836011d
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/message/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.message;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/package-info.java
new file mode 100644
index 00000000000..dbf7b50caa9
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/params/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/params/package-info.java
new file mode 100644
index 00000000000..92123f26b4d
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/params/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.params;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/pool/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/pool/package-info.java
new file mode 100644
index 00000000000..3c2509cd960
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/pool/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.pool;
+
+import com.yahoo.osgi.annotation.ExportPackage; \ No newline at end of file
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/protocol/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/protocol/package-info.java
new file mode 100644
index 00000000000..1985b5b056a
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/protocol/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.protocol;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-apache-http-client-bundle/src/main/java/org/apache/http/util/package-info.java b/container-apache-http-client-bundle/src/main/java/org/apache/http/util/package-info.java
new file mode 100644
index 00000000000..90e4f8e1cff
--- /dev/null
+++ b/container-apache-http-client-bundle/src/main/java/org/apache/http/util/package-info.java
@@ -0,0 +1,8 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+@ExportPackage
+package org.apache.http.util;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/container-core/abi-spec.json b/container-core/abi-spec.json
index efe6701342f..02d43104a3f 100644
--- a/container-core/abi-spec.json
+++ b/container-core/abi-spec.json
@@ -1039,6 +1039,7 @@
"public com.yahoo.jdisc.http.ConnectorConfig$Builder maxRequestsPerConnection(int)",
"public com.yahoo.jdisc.http.ConnectorConfig$Builder maxConnectionLife(double)",
"public com.yahoo.jdisc.http.ConnectorConfig$Builder http2Enabled(boolean)",
+ "public com.yahoo.jdisc.http.ConnectorConfig$Builder http2(com.yahoo.jdisc.http.ConnectorConfig$Http2$Builder)",
"public final boolean dispatchGetConfig(com.yahoo.config.ConfigInstance$Producer)",
"public final java.lang.String getDefMd5()",
"public final java.lang.String getDefName()",
@@ -1053,7 +1054,8 @@
"public com.yahoo.jdisc.http.ConnectorConfig$TlsClientAuthEnforcer$Builder tlsClientAuthEnforcer",
"public com.yahoo.jdisc.http.ConnectorConfig$HealthCheckProxy$Builder healthCheckProxy",
"public com.yahoo.jdisc.http.ConnectorConfig$ProxyProtocol$Builder proxyProtocol",
- "public com.yahoo.jdisc.http.ConnectorConfig$SecureRedirect$Builder secureRedirect"
+ "public com.yahoo.jdisc.http.ConnectorConfig$SecureRedirect$Builder secureRedirect",
+ "public com.yahoo.jdisc.http.ConnectorConfig$Http2$Builder http2"
]
},
"com.yahoo.jdisc.http.ConnectorConfig$HealthCheckProxy$Builder": {
@@ -1089,6 +1091,37 @@
],
"fields": []
},
+ "com.yahoo.jdisc.http.ConnectorConfig$Http2$Builder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "com.yahoo.config.ConfigBuilder"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>()",
+ "public void <init>(com.yahoo.jdisc.http.ConnectorConfig$Http2)",
+ "public com.yahoo.jdisc.http.ConnectorConfig$Http2$Builder streamIdleTimeout(double)",
+ "public com.yahoo.jdisc.http.ConnectorConfig$Http2$Builder maxConcurrentStreams(int)",
+ "public com.yahoo.jdisc.http.ConnectorConfig$Http2 build()"
+ ],
+ "fields": []
+ },
+ "com.yahoo.jdisc.http.ConnectorConfig$Http2": {
+ "superClass": "com.yahoo.config.InnerNode",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final"
+ ],
+ "methods": [
+ "public void <init>(com.yahoo.jdisc.http.ConnectorConfig$Http2$Builder)",
+ "public double streamIdleTimeout()",
+ "public int maxConcurrentStreams()"
+ ],
+ "fields": []
+ },
"com.yahoo.jdisc.http.ConnectorConfig$Producer": {
"superClass": "java.lang.Object",
"interfaces": [
@@ -1361,7 +1394,8 @@
"public com.yahoo.jdisc.http.ConnectorConfig$SecureRedirect secureRedirect()",
"public int maxRequestsPerConnection()",
"public double maxConnectionLife()",
- "public boolean http2Enabled()"
+ "public boolean http2Enabled()",
+ "public com.yahoo.jdisc.http.ConnectorConfig$Http2 http2()"
],
"fields": [
"public static final java.lang.String CONFIG_DEF_MD5",
diff --git a/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentGraph.java b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentGraph.java
index 71d0e539b5a..179abba42c8 100644
--- a/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentGraph.java
+++ b/container-core/src/main/java/com/yahoo/container/di/componentgraph/core/ComponentGraph.java
@@ -258,7 +258,7 @@ public class ComponentGraph {
if (component.isEmpty()) {
Object instance;
try {
- log.log(Level.INFO, "Trying the fallback injector to create" + messageForNoGlobalComponent(clazz, node));
+ log.log(Level.FINE, () -> "Trying the fallback injector to create" + messageForNoGlobalComponent(clazz, node));
instance = fallbackInjector.getInstance(key);
} catch (ConfigurationException e) {
throw removeStackTrace(new IllegalStateException(
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerTestDriver.java b/container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerTestDriver.java
index faa30bd109d..e976caf3f9f 100644
--- a/container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerTestDriver.java
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/RequestHandlerTestDriver.java
@@ -74,14 +74,14 @@ public class RequestHandlerTestDriver implements AutoCloseable {
}
public MockResponseHandler sendRequest(String uri, HttpRequest.Method method, ByteBuffer body) {
- responseHandler = new MockResponseHandler();
+ MockResponseHandler responseHandler = new MockResponseHandler();
Request request = HttpRequest.newServerRequest(driver, URI.create(uri), method);
request.context().put("contextVariable", 37); // TODO: Add a method for accepting a Request instead
ContentChannel requestContent = request.connect(responseHandler);
requestContent.write(body, null);
requestContent.close(null);
request.release();
- return responseHandler;
+ return this.responseHandler = responseHandler;
}
public MockResponseHandler sendRequest(String uri, HttpRequest.Method method, ByteBuffer body, String contentType) {
diff --git a/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java b/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java
index 5b30ce5963d..6f9d7840573 100644
--- a/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java
+++ b/container-core/src/main/java/com/yahoo/container/logging/ConnectionLogEntry.java
@@ -33,6 +33,7 @@ public class ConnectionLogEntry {
private final Instant sslPeerNotAfter;
private final String sslSniServerName;
private final SslHandshakeFailure sslHandshakeFailure;
+ private final List<String> sslSubjectAlternativeNames;
private final String httpProtocol;
private final String proxyProtocolVersion;
@@ -59,6 +60,7 @@ public class ConnectionLogEntry {
this.sslPeerNotAfter = builder.sslPeerNotAfter;
this.sslSniServerName = builder.sslSniServerName;
this.sslHandshakeFailure = builder.sslHandshakeFailure;
+ this.sslSubjectAlternativeNames = builder.sslSubjectAlternativeNames;
this.httpProtocol = builder.httpProtocol;
this.proxyProtocolVersion = builder.proxyProtocolVersion;
}
@@ -88,6 +90,7 @@ public class ConnectionLogEntry {
public Optional<Instant> sslPeerNotAfter() { return Optional.ofNullable(sslPeerNotAfter); }
public Optional<String> sslSniServerName() { return Optional.ofNullable(sslSniServerName); }
public Optional<SslHandshakeFailure> sslHandshakeFailure() { return Optional.ofNullable(sslHandshakeFailure); }
+ public List<String> sslSubjectAlternativeNames() { return sslSubjectAlternativeNames == null ? List.of() : sslSubjectAlternativeNames; }
public Optional<String> httpProtocol() { return Optional.ofNullable(httpProtocol); }
public Optional<String> proxyProtocolVersion() { return Optional.ofNullable(proxyProtocolVersion); }
@@ -139,6 +142,7 @@ public class ConnectionLogEntry {
private Instant sslPeerNotAfter;
private String sslSniServerName;
private SslHandshakeFailure sslHandshakeFailure;
+ private List<String> sslSubjectAlternativeNames;
private String httpProtocol;
private String proxyProtocolVersion;
@@ -225,6 +229,10 @@ public class ConnectionLogEntry {
this.sslHandshakeFailure = sslHandshakeFailure;
return this;
}
+ public Builder withSslSubjectAlternativeNames(List<String> sslSubjectAlternativeNames) {
+ this.sslSubjectAlternativeNames = sslSubjectAlternativeNames;
+ return this;
+ }
public Builder withHttpProtocol(String protocol) {
this.httpProtocol = protocol;
return this;
diff --git a/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java b/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java
index dfdc5f1b55a..53aa79b9f8c 100644
--- a/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java
+++ b/container-core/src/main/java/com/yahoo/container/logging/JsonConnectionLogWriter.java
@@ -11,6 +11,7 @@ import java.io.IOException;
import java.io.OutputStream;
import java.time.Instant;
import java.util.Arrays;
+import java.util.List;
import java.util.Objects;
import java.util.Optional;
@@ -68,6 +69,7 @@ class JsonConnectionLogWriter implements LogWriter<ConnectionLogEntry> {
Instant sslPeerNotAfter = unwrap(record.sslPeerNotAfter());
String sslSniServerName = unwrap(record.sslSniServerName());
ConnectionLogEntry.SslHandshakeFailure sslHandshakeFailure = unwrap(record.sslHandshakeFailure());
+ List<String> sslSubjectAlternativeNames = record.sslSubjectAlternativeNames();
if (isAnyValuePresent(
sslProtocol, sslSessionId, sslCipherSuite, sslPeerSubject, sslPeerNotBefore, sslPeerNotAfter,
@@ -95,7 +97,13 @@ class JsonConnectionLogWriter implements LogWriter<ConnectionLogEntry> {
generator.writeStringField("type", sslHandshakeFailure.type());
generator.writeEndObject();
}
-
+ if (!sslSubjectAlternativeNames.isEmpty()) {
+ generator.writeArrayFieldStart("san");
+ for (String sanEntry : sslSubjectAlternativeNames) {
+ generator.writeString(sanEntry);
+ }
+ generator.writeEndArray();
+ }
generator.writeEndObject();
}
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java
index fc39de72018..92d2cc5d1cd 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java
@@ -8,6 +8,7 @@ import com.yahoo.jdisc.http.ssl.SslContextFactoryProvider;
import com.yahoo.security.tls.MixedMode;
import com.yahoo.security.tls.TransportSecurityUtils;
import org.eclipse.jetty.alpn.server.ALPNServerConnectionFactory;
+import org.eclipse.jetty.http2.parser.RateControl;
import org.eclipse.jetty.http2.server.HTTP2ServerConnectionFactory;
import org.eclipse.jetty.server.ConnectionFactory;
import org.eclipse.jetty.server.DetectorConnectionFactory;
@@ -75,7 +76,7 @@ public class ConnectorFactory {
connector.setName(connectorConfig.name());
connector.setAcceptQueueSize(connectorConfig.acceptQueueSize());
connector.setReuseAddress(connectorConfig.reuseAddress());
- connector.setIdleTimeout(idleTimeoutInMillis());
+ connector.setIdleTimeout(toMillis(connectorConfig.idleTimeout()));
return connector;
}
@@ -162,8 +163,10 @@ public class ConnectorFactory {
private HTTP2ServerConnectionFactory newHttp2ConnectionFactory() {
HTTP2ServerConnectionFactory factory = new HTTP2ServerConnectionFactory(newHttpConfiguration());
- factory.setStreamIdleTimeout(idleTimeoutInMillis());
- factory.setMaxConcurrentStreams(4096);
+ factory.setStreamIdleTimeout(toMillis(connectorConfig.http2().streamIdleTimeout()));
+ factory.setMaxConcurrentStreams(connectorConfig.http2().maxConcurrentStreams());
+ factory.setInitialSessionRecvWindow(1 << 24);
+ factory.setInitialStreamRecvWindow(1 << 20);
return factory;
}
@@ -194,6 +197,6 @@ public class ConnectorFactory {
|| (config.implicitTlsEnabled() && TransportSecurityUtils.isTransportSecurityEnabled());
}
- private long idleTimeoutInMillis() { return (long) (connectorConfig.idleTimeout() * 1000.0); }
+ private static long toMillis(double seconds) { return (long)(seconds * 1000); }
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java
index 7828751df5a..ba292062197 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/HttpRequestDispatch.java
@@ -26,6 +26,7 @@ import java.time.Instant;
import java.util.Arrays;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.CompletionException;
+import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
@@ -119,6 +120,10 @@ class HttpRequestDispatch {
error,
() -> "Network connection was unexpectedly terminated: " + parent.jettyRequest.getRequestURI());
parent.metricReporter.prematurelyClosed();
+ } else if (isErrorOfType(error, TimeoutException.class)) {
+ log.log(Level.FINE,
+ error,
+ () -> "Request/stream was timed out by Jetty: " + parent.jettyRequest.getRequestURI());
} else if (!isErrorOfType(error, OverloadException.class, BindingNotFoundException.class, RequestException.class)) {
log.log(Level.WARNING, "Request failed: " + parent.jettyRequest.getRequestURI(), error);
}
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java
index d337131b313..88e68e7f2e6 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/JettyConnectionLogger.java
@@ -6,6 +6,8 @@ import com.yahoo.container.logging.ConnectionLogEntry;
import com.yahoo.container.logging.ConnectionLogEntry.SslHandshakeFailure.ExceptionEntry;
import com.yahoo.io.HexDump;
import com.yahoo.jdisc.http.ServerConfig;
+import com.yahoo.security.SubjectAlternativeName;
+import com.yahoo.security.X509CertificateUtils;
import org.eclipse.jetty.alpn.server.ALPNServerConnection;
import org.eclipse.jetty.http2.server.HTTP2ServerConnection;
import org.eclipse.jetty.io.Connection;
@@ -36,6 +38,7 @@ import java.util.List;
import java.util.UUID;
import java.util.logging.Level;
import java.util.logging.Logger;
+import java.util.stream.Collectors;
/**
* Jetty integration for jdisc connection log ({@link ConnectionLog}).
@@ -247,6 +250,7 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
private Date sslPeerNotAfter;
private List<SNIServerName> sslSniServerNames;
private SSLHandshakeException sslHandshakeException;
+ private List<String> sslSubjectAlternativeNames;
private String proxyProtocolVersion;
private String httpProtocol;
@@ -300,6 +304,10 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
X509Certificate peerCertificate = (X509Certificate) session.getPeerCertificates()[0];
this.sslPeerNotBefore = peerCertificate.getNotBefore();
this.sslPeerNotAfter = peerCertificate.getNotAfter();
+ this.sslSubjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(peerCertificate).stream()
+ .map(SubjectAlternativeName::getValue)
+ .collect(Collectors.toList());
+
} catch (SSLPeerUnverifiedException e) {
// Throw if peer is not authenticated (e.g when client auth is disabled)
// JSSE provides no means of checking for client authentication without catching this exception
@@ -362,6 +370,9 @@ class JettyConnectionLogger extends AbstractLifeCycle implements Connection.List
.withSslPeerNotAfter(sslPeerNotAfter.toInstant())
.withSslPeerNotBefore(sslPeerNotBefore.toInstant());
}
+ if (sslSubjectAlternativeNames != null && !sslSubjectAlternativeNames.isEmpty()) {
+ builder.withSslSubjectAlternativeNames(sslSubjectAlternativeNames);
+ }
if (sslHandshakeException != null) {
List<ExceptionEntry> exceptionChain = new ArrayList<>();
Throwable cause = sslHandshakeException;
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletResponseController.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletResponseController.java
index 31fa9e9ebaa..d61a3745653 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletResponseController.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ServletResponseController.java
@@ -20,6 +20,7 @@ import java.nio.ByteBuffer;
import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeoutException;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -75,6 +76,9 @@ public class ServletResponseController {
return HttpServletResponse.SC_NOT_FOUND;
} else if (t instanceof RequestException) {
return ((RequestException)t).getResponseStatus();
+ } else if (t instanceof TimeoutException) {
+ // E.g stream idle timeout for HTTP/2
+ return HttpServletResponse.SC_SERVICE_UNAVAILABLE;
} else {
return HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
}
diff --git a/container-core/src/main/resources/configdefinitions/jdisc.http.jdisc.http.connector.def b/container-core/src/main/resources/configdefinitions/jdisc.http.jdisc.http.connector.def
index 09b883a620e..0b01f690aea 100644
--- a/container-core/src/main/resources/configdefinitions/jdisc.http.jdisc.http.connector.def
+++ b/container-core/src/main/resources/configdefinitions/jdisc.http.jdisc.http.connector.def
@@ -128,3 +128,7 @@ maxConnectionLife double default=0.0
# Enable HTTP/2 (in addition to HTTP/1.1 using ALPN)
http2Enabled bool default=true
+
+http2.streamIdleTimeout double default=600
+
+http2.maxConcurrentStreams int default=4096
diff --git a/container-core/src/test/java/com/yahoo/container/logging/JsonConnectionLogWriterTest.java b/container-core/src/test/java/com/yahoo/container/logging/JsonConnectionLogWriterTest.java
index 75bc0c915d3..66b3da06ff2 100644
--- a/container-core/src/test/java/com/yahoo/container/logging/JsonConnectionLogWriterTest.java
+++ b/container-core/src/test/java/com/yahoo/container/logging/JsonConnectionLogWriterTest.java
@@ -26,6 +26,7 @@ class JsonConnectionLogWriterTest {
List.of(
new ConnectionLogEntry.SslHandshakeFailure.ExceptionEntry("javax.net.ssl.SSLHandshakeException", "message"),
new ConnectionLogEntry.SslHandshakeFailure.ExceptionEntry("java.io.IOException", "cause message"))))
+ .withSslSubjectAlternativeNames(List.of("sandns", "sanemail"))
.build();
String expectedJson = "{" +
"\"id\":\""+id.toString()+"\"," +
@@ -34,7 +35,7 @@ class JsonConnectionLogWriterTest {
"\"ssl\":{\"handshake-failure\":{\"exception\":[" +
"{\"cause\":\"javax.net.ssl.SSLHandshakeException\",\"message\":\"message\"}," +
"{\"cause\":\"java.io.IOException\",\"message\":\"cause message\"}" +
- "],\"type\":\"UNKNOWN\"}}}";
+ "],\"type\":\"UNKNOWN\"},\"san\":[\"sandns\",\"sanemail\"]}}";
JsonConnectionLogWriter writer = new JsonConnectionLogWriter();
ByteArrayOutputStream out = new ByteArrayOutputStream();
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java
index 93261a2401f..bb736122867 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java
@@ -8,6 +8,8 @@ import com.yahoo.jdisc.http.ssl.impl.ConfiguredSslContextFactoryProvider;
import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.handler.AbstractHandler;
+import org.junit.After;
+import org.junit.Before;
import org.junit.Test;
import javax.servlet.http.HttpServletRequest;
@@ -16,46 +18,73 @@ import java.io.IOException;
import java.util.Map;
import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.Assert.assertEquals;
/**
* @author Einar M R Rosenvinge
+ * @author bjorncs
*/
public class ConnectorFactoryTest {
- @Test
- public void requireThatServerCanBindChannel() throws Exception {
- Server server = new Server();
+ private Server server;
+
+ @Before
+ public void createServer() {
+ server = new Server();
+ }
+
+ @After
+ public void stopServer() {
try {
- ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder());
- ConnectorFactory factory = createConnectorFactory(config);
- JettyConnectionLogger connectionLogger = new JettyConnectionLogger(
- new ServerConfig.ConnectionLog.Builder().enabled(false).build(),
- new VoidConnectionLog());
- DummyMetric metric = new DummyMetric();
- var connectionMetricAggregator = new ConnectionMetricAggregator(new ServerConfig(new ServerConfig.Builder()), metric);
- JDiscServerConnector connector =
- (JDiscServerConnector)factory.createConnector(metric, server, connectionLogger, connectionMetricAggregator);
- server.addConnector(connector);
- server.setHandler(new HelloWorldHandler());
- server.start();
-
- SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false);
- SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb");
- SimpleHttpClient.ResponseValidator val = ex.execute();
- val.expectContent(equalTo("Hello world"));
- } finally {
- try {
- server.stop();
- } catch (Exception e) {
- //ignore
- }
+ server.stop();
+ server = null;
+ } catch (Exception e) {
+ //ignore
}
}
+ @Test
+ public void requireThatServerCanBindChannel() throws Exception {
+ ConnectorConfig config = new ConnectorConfig(new ConnectorConfig.Builder());
+ ConnectorFactory factory = createConnectorFactory(config);
+ JDiscServerConnector connector = createConnectorFromFactory(factory);
+ server.addConnector(connector);
+ server.setHandler(new HelloWorldHandler());
+ server.start();
+
+ SimpleHttpClient client = new SimpleHttpClient(null, connector.getLocalPort(), false);
+ SimpleHttpClient.RequestExecutor ex = client.newGet("/blaasdfnb");
+ SimpleHttpClient.ResponseValidator val = ex.execute();
+ val.expectContent(equalTo("Hello world"));
+ }
+
+ @Test
+ public void constructed_connector_is_based_on_jdisc_connector_config() {
+ ConnectorConfig config = new ConnectorConfig.Builder()
+ .idleTimeout(25)
+ .name("my-server-name")
+ .listenPort(12345)
+ .build();
+ ConnectorFactory factory = createConnectorFactory(config);
+ JDiscServerConnector connector = createConnectorFromFactory(factory);
+ assertEquals(25000, connector.getIdleTimeout());
+ assertEquals(12345, connector.listenPort());
+ assertEquals("my-server-name", connector.getName());
+ }
+
private static ConnectorFactory createConnectorFactory(ConnectorConfig config) {
return new ConnectorFactory(config, new ConfiguredSslContextFactoryProvider(config));
}
+ private JDiscServerConnector createConnectorFromFactory(ConnectorFactory factory) {
+ JettyConnectionLogger connectionLogger = new JettyConnectionLogger(
+ new ServerConfig.ConnectionLog.Builder().enabled(false).build(),
+ new VoidConnectionLog());
+ DummyMetric metric = new DummyMetric();
+ var connectionMetricAggregator = new ConnectionMetricAggregator(new ServerConfig(new ServerConfig.Builder()), metric);
+ return (JDiscServerConnector)factory.createConnector(metric, server, connectionLogger, connectionMetricAggregator);
+ }
+
private static class HelloWorldHandler extends AbstractHandler {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException {
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java
index 2183098da2b..4c45319daf6 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java
@@ -12,12 +12,11 @@ import com.yahoo.jdisc.http.server.jetty.testutils.ConnectorFactoryRegistryModul
import com.yahoo.jdisc.test.ServerProviderConformanceTest;
import org.apache.http.HttpResponse;
import org.apache.http.HttpVersion;
-import org.apache.http.ProtocolVersion;
-import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.util.EntityUtils;
import org.hamcrest.Description;
@@ -27,6 +26,7 @@ import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
+import java.io.IOException;
import java.net.URI;
import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
@@ -34,6 +34,7 @@ import java.util.Collections;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Pattern;
@@ -64,6 +65,8 @@ public class HttpServerConformanceTest extends ServerProviderConformanceTest {
@SuppressWarnings("LoggerInitializedWithForeignClass")
private static Logger httpRequestDispatchLogger = Logger.getLogger(HttpRequestDispatch.class.getName());
private static Level httpRequestDispatchLoggerOriginalLevel;
+ private static CloseableHttpClient httpClient;
+ private static ExecutorService executorService;
/*
* Reduce logging of every stack trace for {@link ServerProviderConformanceTest.ConformanceException} thrown.
@@ -73,11 +76,16 @@ public class HttpServerConformanceTest extends ServerProviderConformanceTest {
public static void reduceExcessiveLogging() {
httpRequestDispatchLoggerOriginalLevel = httpRequestDispatchLogger.getLevel();
httpRequestDispatchLogger.setLevel(Level.SEVERE);
+ httpClient = HttpClientBuilder.create().build();
+ executorService = Executors.newSingleThreadExecutor();
}
@AfterClass
- public static void restoreExcessiveLogging() {
+ public static void restoreExcessiveLogging() throws IOException, InterruptedException {
httpRequestDispatchLogger.setLevel(httpRequestDispatchLoggerOriginalLevel);
+ httpClient.close();
+ executorService.shutdownNow();
+ executorService.awaitTermination(30, TimeUnit.SECONDS);
}
@AfterClass
@@ -742,20 +750,12 @@ public class HttpServerConformanceTest extends ServerProviderConformanceTest {
}
}
- private class TestRunner implements Adapter<JettyHttpServer, ClientProxy, Future<HttpResponse>> {
+ private class TestRunner implements Adapter<JettyHttpServer, Integer, Future<HttpResponse>> {
private Matcher<ResponseGist> expectedResponse = null;
- HttpVersion requestVersion;
- private final ExecutorService executorService = Executors.newSingleThreadExecutor();
void execute() throws Throwable {
- requestVersion = HttpVersion.HTTP_1_0;
runTest(this);
-
- requestVersion = HttpVersion.HTTP_1_1;
- runTest(this);
-
- executorService.shutdown();
}
TestRunner expect(final Matcher<ResponseGist> matcher) {
@@ -790,30 +790,27 @@ public class HttpServerConformanceTest extends ServerProviderConformanceTest {
}
@Override
- public ClientProxy newClient(final JettyHttpServer server) throws Throwable {
- return new ClientProxy(server.getListenPort(), requestVersion);
+ public Integer newClient(final JettyHttpServer server) throws Throwable {
+ return server.getListenPort();
}
@Override
public Future<HttpResponse> executeRequest(
- final ClientProxy client,
+ final Integer listenPort,
final boolean withRequestContent) throws Throwable {
final HttpUriRequest request;
- final URI requestUri = URI.create("http://localhost:" + client.listenPort + "/status.html");
+ final URI requestUri = URI.create("http://localhost:" + listenPort + "/status.html");
if (!withRequestContent) {
HttpGet httpGet = new HttpGet(requestUri);
- httpGet.setProtocolVersion(client.requestVersion);
+ httpGet.setProtocolVersion(HttpVersion.HTTP_1_1);
request = httpGet;
} else {
final HttpPost post = new HttpPost(requestUri);
post.setEntity(new StringEntity(REQUEST_CONTENT, StandardCharsets.UTF_8));
- post.setProtocolVersion(client.requestVersion);
+ post.setProtocolVersion(HttpVersion.HTTP_1_1);
request = post;
}
- log.fine(() -> "executorService:"
- + " .isShutDown()=" + executorService.isShutdown()
- + " .isTerminated()=" + executorService.isTerminated());
- return executorService.submit(() -> client.delegate.execute(request));
+ return executorService.submit(() -> httpClient.execute(request));
}
@Override
@@ -831,17 +828,4 @@ public class HttpServerConformanceTest extends ServerProviderConformanceTest {
assertThat(responseGist, expectedResponse);
}
}
-
- private static class ClientProxy {
-
- final HttpClient delegate;
- final int listenPort;
- final ProtocolVersion requestVersion;
-
- ClientProxy(final int listenPort, final HttpVersion requestVersion) {
- this.delegate = HttpClientBuilder.create().build();
- this.requestVersion = requestVersion;
- this.listenPort = listenPort;
- }
- }
}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
index 0dec711f4c0..0f625b5c3df 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
@@ -41,6 +41,7 @@ import org.apache.hc.client5.http.entity.mime.FormBodyPart;
import org.apache.hc.client5.http.entity.mime.FormBodyPartBuilder;
import org.apache.hc.client5.http.entity.mime.StringBody;
import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient;
+import org.apache.hc.client5.http.impl.async.H2AsyncClientBuilder;
import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder;
import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder;
import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder;
@@ -969,14 +970,12 @@ public class HttpServerTest {
private static CloseableHttpAsyncClient createHttp2Client(JettyTestDriver driver) {
TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create()
- .setSslContext(driver.sslContext())
- .build();
- var client = HttpAsyncClientBuilder.create()
- .setVersionPolicy(HttpVersionPolicy.FORCE_HTTP_2)
- .disableConnectionState()
- .disableAutomaticRetries()
- .setConnectionManager(PoolingAsyncClientConnectionManagerBuilder.create().setTlsStrategy(tlsStrategy).build())
- .build();
+ .setSslContext(driver.sslContext())
+ .build();
+ var client = H2AsyncClientBuilder.create()
+ .disableAutomaticRetries()
+ .setTlsStrategy(tlsStrategy)
+ .build();
client.start();
return client;
}
diff --git a/container-disc/pom.xml b/container-disc/pom.xml
index ae2e460094b..5446c9e1698 100644
--- a/container-disc/pom.xml
+++ b/container-disc/pom.xml
@@ -59,6 +59,12 @@
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
+ <artifactId>container-apache-http-client-bundle</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
<artifactId>container-core</artifactId>
<version>${project.version}</version>
<exclusions>
@@ -178,6 +184,8 @@
defaults-jar-with-dependencies.jar,
zkfacade-jar-with-dependencies.jar,
zookeeper-server-jar-with-dependencies.jar,
+ <!-- Apache http client repackaged as bundle -->
+ container-apache-http-client-bundle-jar-with-dependencies.jar,
<!-- Jetty -->
alpn-api-${jetty-alpn.version}.jar,
http2-server-${jetty.version}.jar,
diff --git a/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java b/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java
index fe8668427f4..0e4c4446778 100644
--- a/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java
+++ b/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java
@@ -155,7 +155,7 @@ public final class ConfiguredApplication implements Application {
if ( ! qrConfig.rpc().enabled()) return null;
// 1. Set up RPC server
- supervisor = new Supervisor(new Transport("slobrok")).useSmallBuffers();
+ supervisor = new Supervisor(new Transport("slobrok")).setDropEmptyBuffers(true);
Spec listenSpec = new Spec(qrConfig.rpc().port());
try {
acceptor = supervisor.listen(listenSpec);
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/NearestNeighborItem.java b/container-search/src/main/java/com/yahoo/prelude/query/NearestNeighborItem.java
index 1f4afed451c..4fe977bff2b 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/NearestNeighborItem.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/NearestNeighborItem.java
@@ -2,7 +2,6 @@
package com.yahoo.prelude.query;
-import com.google.common.annotations.Beta;
import com.yahoo.compress.IntegerCompressor;
import com.yahoo.prelude.query.textualrepresentation.Discloser;
@@ -17,7 +16,6 @@ import java.nio.ByteBuffer;
*
* @author arnej
*/
-@Beta
public class NearestNeighborItem extends SimpleTaggableItem {
private int targetNumHits = 0;
diff --git a/container-search/src/main/java/com/yahoo/prelude/searcher/FillSearcher.java b/container-search/src/main/java/com/yahoo/prelude/searcher/FillSearcher.java
index 45034482bb6..5390f202ef0 100644
--- a/container-search/src/main/java/com/yahoo/prelude/searcher/FillSearcher.java
+++ b/container-search/src/main/java/com/yahoo/prelude/searcher/FillSearcher.java
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.prelude.searcher;
-import com.yahoo.component.ComponentId;
import com.yahoo.search.Query;
import com.yahoo.search.Result;
import com.yahoo.search.Searcher;
diff --git a/container-search/src/main/java/com/yahoo/search/Searcher.java b/container-search/src/main/java/com/yahoo/search/Searcher.java
index 5fefe9d2468..cd6b7167f08 100644
--- a/container-search/src/main/java/com/yahoo/search/Searcher.java
+++ b/container-search/src/main/java/com/yahoo/search/Searcher.java
@@ -73,6 +73,7 @@ public abstract class Searcher extends Processor {
// Note to developers: If you think you should add something here you are probably wrong
// Create a subclass containing the new method instead.
+
private final Logger logger = Logger.getLogger(getClass().getName());
public Searcher() {}
diff --git a/container-search/src/main/java/com/yahoo/search/cluster/BaseNodeMonitor.java b/container-search/src/main/java/com/yahoo/search/cluster/BaseNodeMonitor.java
index 0d491d2f0c1..8eee7c11d3e 100644
--- a/container-search/src/main/java/com/yahoo/search/cluster/BaseNodeMonitor.java
+++ b/container-search/src/main/java/com/yahoo/search/cluster/BaseNodeMonitor.java
@@ -44,7 +44,7 @@ public abstract class BaseNodeMonitor<T> {
protected MonitorConfiguration configuration;
/** Is the node we monitor part of an internal Vespa cluster or not */
- private boolean internal;
+ private final boolean internal;
public BaseNodeMonitor(boolean internal) {
this.internal=internal;
diff --git a/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java b/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java
index 27d8bb27ee8..c9b8aeee417 100644
--- a/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java
+++ b/container-search/src/main/java/com/yahoo/search/cluster/ClusterMonitor.java
@@ -25,9 +25,9 @@ import java.util.logging.Logger;
*/
public class ClusterMonitor<T> {
- private final MonitorConfiguration configuration = new MonitorConfiguration();
+ private static final Logger log = Logger.getLogger(ClusterMonitor.class.getName());
- private static Logger log = Logger.getLogger(ClusterMonitor.class.getName());
+ private final MonitorConfiguration configuration = new MonitorConfiguration();
private final NodeManager<T> nodeManager;
diff --git a/container-search/src/main/java/com/yahoo/search/cluster/MonitorConfiguration.java b/container-search/src/main/java/com/yahoo/search/cluster/MonitorConfiguration.java
index a2fb982e3c5..95f51b374d6 100644
--- a/container-search/src/main/java/com/yahoo/search/cluster/MonitorConfiguration.java
+++ b/container-search/src/main/java/com/yahoo/search/cluster/MonitorConfiguration.java
@@ -9,7 +9,7 @@ package com.yahoo.search.cluster;
public class MonitorConfiguration {
/** The interval in ms between consecutive checks of the monitored nodes */
- private long checkInterval=1000;
+ private long checkInterval = 1000;
/** The number of milliseconds to attempt to complete a request before giving up */
private final long requestTimeout = 980;
@@ -18,6 +18,7 @@ public class MonitorConfiguration {
private long failLimit = 5000;
/** Sets the interval between each ping of idle or failing nodes. Default is 1000 ms. */
+ @Deprecated // TODO: Remove on Vespa 8
public void setCheckInterval(long intervalMs) { this.checkInterval = intervalMs; }
/** Returns the interval between each ping of idle or failing nodes. Default is 1000 ms. */
@@ -59,6 +60,7 @@ public class MonitorConfiguration {
* Sets the number of milliseconds a node is allowed to fail before we
* mark it as not working
*/
+ @Deprecated // TODO: Remove on Vespa 8
public void setFailLimit(long failLimit) { this.failLimit=failLimit; }
/**
@@ -86,6 +88,7 @@ public class MonitorConfiguration {
@Deprecated // TODO: Remove on Vespa 8
public void setQuarantineTime(long quarantineTime) { }
+ @Override
public String toString() {
return "monitor configuration [" +
"checkInterval: " + checkInterval +
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
index 159a42676ec..9ae25518969 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/searchcluster/SearchCluster.java
@@ -368,7 +368,7 @@ public class SearchCluster implements NodeManager<Node> {
*/
public boolean isPartialGroupCoverageSufficient(List<Node> nodes) {
if (orderedGroups().size() == 1)
- return nodes.size() >= wantedGroupSize() - dispatchConfig.maxNodesDownPerGroup();
+ return true;
long activeDocuments = nodes.stream().mapToLong(Node::getActiveDocuments).sum();
return isGroupCoverageSufficient(activeDocuments, medianDocumentsPerGroup());
}
@@ -378,7 +378,6 @@ public class SearchCluster implements NodeManager<Node> {
boolean changed = group.isFullCoverageStatusChanged(fullCoverage);
if (changed || (!fullCoverage && System.currentTimeMillis() > nextLogTime)) {
nextLogTime = System.currentTimeMillis() + 30 * 1000;
- int requiredNodes = group.nodes().size() - dispatchConfig.maxNodesDownPerGroup();
if (fullCoverage) {
log.info("Cluster " + clusterId + ": " + group + " has full coverage. " +
"Active documents: " + group.getActiveDocuments() + "/" + medianDocuments + ", " +
@@ -391,7 +390,7 @@ public class SearchCluster implements NodeManager<Node> {
}
log.warning("Cluster " + clusterId + ": " + group + " has reduced coverage: " +
"Active documents: " + group.getActiveDocuments() + "/" + medianDocuments + ", " +
- "working nodes: " + group.workingNodes() + "/" + group.nodes().size() + " required " + requiredNodes +
+ "working nodes: " + group.workingNodes() + "/" + group.nodes().size() +
", unresponsive nodes: " + (unresponsive.toString().isEmpty() ? " none" : unresponsive));
}
}
diff --git a/container-search/src/main/java/com/yahoo/search/grouping/request/GroupingOperation.java b/container-search/src/main/java/com/yahoo/search/grouping/request/GroupingOperation.java
index 8b73fa01128..499ed610d34 100644
--- a/container-search/src/main/java/com/yahoo/search/grouping/request/GroupingOperation.java
+++ b/container-search/src/main/java/com/yahoo/search/grouping/request/GroupingOperation.java
@@ -8,7 +8,12 @@ import com.yahoo.search.grouping.request.parser.GroupingParserInput;
import com.yahoo.search.grouping.request.parser.ParseException;
import com.yahoo.search.grouping.request.parser.TokenMgrException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
/**
* This class represents a single node in a grouping operation tree. You may manually construct this tree, or you may
diff --git a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
index ba9f1804d34..e2c15b6e35b 100644
--- a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
+++ b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
@@ -532,12 +532,13 @@ public class SearchHandler extends LoggingRequestHandler {
if (query.getHits() > maxHits) {
return new Result(query, ErrorMessage.createIllegalQuery(query.getHits() +
- " hits requested, configured limit: " + maxHits + "."));
+ " hits requested, configured limit: " + maxHits +
+ ". See https://docs.vespa.ai/en/reference/query-api-reference.html#native-execution-parameters"));
} else if (query.getOffset() > maxOffset) {
- return new Result(query,
- ErrorMessage.createIllegalQuery("Offset of " + query.getOffset() +
- " requested, configured limit: " + maxOffset + "."));
+ return new Result(query, ErrorMessage.createIllegalQuery("Offset of " + query.getOffset() +
+ " requested, configured limit: " + maxOffset +
+ ". See https://docs.vespa.ai/en/reference/query-api-reference.html#native-execution-parameters"));
}
return null;
}
diff --git a/container-search/src/main/java/com/yahoo/search/query/Presentation.java b/container-search/src/main/java/com/yahoo/search/query/Presentation.java
index b10e8442a5f..db2fbf525e0 100644
--- a/container-search/src/main/java/com/yahoo/search/query/Presentation.java
+++ b/container-search/src/main/java/com/yahoo/search/query/Presentation.java
@@ -23,7 +23,7 @@ import java.util.Set;
public class Presentation implements Cloneable {
/** The type representing the property arguments consumed by this */
- private static QueryProfileType argumentType;
+ private static final QueryProfileType argumentType;
public static final String PRESENTATION = "presentation";
public static final String BOLDING = "bolding";
@@ -48,7 +48,7 @@ public class Presentation implements Cloneable {
public static QueryProfileType getArgumentType() { return argumentType; }
/** How the result should be highlighted */
- private Highlight highlight= null;
+ private Highlight highlight = null;
/** The terms to highlight in the result (only used by BoldingSearcher, may be removed later). */
private List<IndexedItem> boldingData = null;
diff --git a/container-search/src/main/java/com/yahoo/search/searchchain/Execution.java b/container-search/src/main/java/com/yahoo/search/searchchain/Execution.java
index 0574fc660c3..fac0d35d509 100644
--- a/container-search/src/main/java/com/yahoo/search/searchchain/Execution.java
+++ b/container-search/src/main/java/com/yahoo/search/searchchain/Execution.java
@@ -523,7 +523,6 @@ public class Execution extends com.yahoo.processing.execution.Execution {
*
* @param result the result to fill
*/
- @SuppressWarnings("deprecation")
public void fillAttributes(Result result) {
fill(result, ATTRIBUTEPREFETCH);
}
diff --git a/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java b/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java
index ca9d17cb656..d22dd2e6af6 100644
--- a/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java
+++ b/container-search/src/main/java/com/yahoo/search/searchers/ValidateNearestNeighborSearcher.java
@@ -19,6 +19,7 @@ import com.yahoo.tensor.TensorType;
import com.yahoo.vespa.config.search.AttributesConfig;
import com.yahoo.yolean.chain.Before;
+import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -32,15 +33,17 @@ import java.util.Optional;
@Before(GroupingExecutor.COMPONENT_NAME) // Must happen before query.prepare()
public class ValidateNearestNeighborSearcher extends Searcher {
- private final Map<String, TensorType> validAttributes = new HashMap<>();
+ private final Map<String, List<TensorType>> validAttributes = new HashMap<>();
public ValidateNearestNeighborSearcher(AttributesConfig attributesConfig) {
for (AttributesConfig.Attribute a : attributesConfig.attribute()) {
- TensorType tt = null;
+ if (! validAttributes.containsKey(a.name())) {
+ validAttributes.put(a.name(), new ArrayList<TensorType>());
+ }
if (a.datatype() == AttributesConfig.Attribute.Datatype.TENSOR) {
- tt = TensorType.fromSpec(a.tensortype());
+ TensorType tt = TensorType.fromSpec(a.tensortype());
+ validAttributes.get(a.name()).add(tt);
}
- validAttributes.put(a.name(), tt);
}
}
@@ -60,10 +63,10 @@ public class ValidateNearestNeighborSearcher extends Searcher {
public Optional<ErrorMessage> errorMessage = Optional.empty();
- private final Map<String, TensorType> validAttributes;
+ private final Map<String, List<TensorType>> validAttributes;
private final Query query;
- public NNVisitor(RankProperties rankProperties, Map<String, TensorType> validAttributes, Query query) {
+ public NNVisitor(RankProperties rankProperties, Map<String, List<TensorType>> validAttributes, Query query) {
this.validAttributes = validAttributes;
this.query = query;
}
@@ -101,17 +104,26 @@ public class ValidateNearestNeighborSearcher extends Searcher {
if (queryTensor.isEmpty())
return item + " requires a tensor rank feature " + queryFeatureName + " but this is not present";
- if ( ! validAttributes.containsKey(item.getIndexName()))
+ if ( ! validAttributes.containsKey(item.getIndexName())) {
return item + " field is not an attribute";
- TensorType fieldType = validAttributes.get(item.getIndexName());
- if (fieldType == null) return item + " field is not a tensor";
- if ( ! isDenseVector(fieldType))
- return item + " tensor type " + fieldType + " is not a dense vector";
-
- if ( ! isCompatible(fieldType, queryTensor.get().type()))
- return item + " field type " + fieldType + " does not match query type " + queryTensor.get().type();
-
- return null;
+ }
+ List<TensorType> allTensorTypes = validAttributes.get(item.getIndexName());
+ for (TensorType fieldType : allTensorTypes) {
+ if (isDenseVector(fieldType) && isCompatible(fieldType, queryTensor.get().type())) {
+ return null;
+ }
+ }
+ for (TensorType fieldType : allTensorTypes) {
+ if (isDenseVector(fieldType) && ! isCompatible(fieldType, queryTensor.get().type())) {
+ return item + " field type " + fieldType + " does not match query type " + queryTensor.get().type();
+ }
+ }
+ for (TensorType fieldType : allTensorTypes) {
+ if (! isDenseVector(fieldType)) {
+ return item + " tensor type " + fieldType + " is not a dense vector";
+ }
+ }
+ return item + " field is not a tensor";
}
@Override
diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java b/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java
index 9d96b2302d7..8db54218e56 100644
--- a/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java
+++ b/container-search/src/test/java/com/yahoo/search/dispatch/MockSearchCluster.java
@@ -119,7 +119,6 @@ public class MockSearchCluster extends SearchCluster {
DispatchConfig.Builder builder = new DispatchConfig.Builder();
builder.minActivedocsPercentage(88.0);
builder.minGroupCoverage(99.0);
- builder.maxNodesDownPerGroup(0);
builder.minSearchCoverage(minSearchCoverage);
builder.distributionPolicy(DispatchConfig.DistributionPolicy.Enum.ROUNDROBIN);
if (minSearchCoverage < 100.0) {
diff --git a/container-search/src/test/java/com/yahoo/search/searchers/ValidateNearestNeighborTestCase.java b/container-search/src/test/java/com/yahoo/search/searchers/ValidateNearestNeighborTestCase.java
index 72956b5b6eb..e5ed6f89fd4 100644
--- a/container-search/src/test/java/com/yahoo/search/searchers/ValidateNearestNeighborTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/searchers/ValidateNearestNeighborTestCase.java
@@ -51,10 +51,20 @@ public class ValidateNearestNeighborTestCase {
"attribute[3].tensortype tensor(x{})\n" +
"attribute[4].name matrix\n" +
"attribute[4].datatype TENSOR\n" +
- "attribute[4].tensortype tensor(x[3],y[1])\n"
+ "attribute[4].tensortype tensor(x[3],y[1])\n" +
+ "attribute[5].name threetypes\n" +
+ "attribute[5].datatype TENSOR\n" +
+ "attribute[5].tensortype tensor(x[42])\n" +
+ "attribute[6].name threetypes\n" +
+ "attribute[6].datatype TENSOR\n" +
+ "attribute[6].tensortype tensor(x[3])\n" +
+ "attribute[7].name threetypes\n" +
+ "attribute[7].datatype TENSOR\n" +
+ "attribute[7].tensortype tensor(x{})\n"
)));
}
+ private static TensorType tt_dense_dvector_42 = TensorType.fromSpec("tensor(x[42])");
private static TensorType tt_dense_dvector_3 = TensorType.fromSpec("tensor(x[3])");
private static TensorType tt_dense_dvector_2 = TensorType.fromSpec("tensor(x[2])");
private static TensorType tt_dense_fvector_3 = TensorType.fromSpec("tensor<float>(x[3])");
@@ -186,6 +196,20 @@ public class ValidateNearestNeighborTestCase {
}
@Test
+ public void testSeveralAttributesWithSameName() {
+ String q = makeQuery("threetypes", "qvector");
+ Tensor t1 = makeTensor(tt_dense_fvector_3);
+ Result r1 = doSearch(searcher, q, t1);
+ assertNull(r1.hits().getError());
+ Tensor t2 = makeTensor(tt_dense_dvector_42, 42);
+ Result r2 = doSearch(searcher, q, t2);
+ assertNull(r2.hits().getError());
+ Tensor t3 = makeTensor(tt_dense_dvector_2, 2);
+ Result r3 = doSearch(searcher, q, t3);
+ assertErrMsg(desc("threetypes", "qvector", 1, "field type tensor(x[42]) does not match query type tensor(x[2])"), r3);
+ }
+
+ @Test
public void testSparseTensor() {
String q = makeQuery("sparse", "qvector");
Tensor t = makeTensor(tt_sparse_vector_x);
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java
index 0e11bcdccaf..ee74aca0e14 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/ClusterMetrics.java
@@ -23,11 +23,13 @@ public class ClusterMetrics {
private final String clusterId;
private final String clusterType;
private final Map<String, Double> metrics;
+ private final Map<String, Double> reindexingProgress;
- public ClusterMetrics(String clusterId, String clusterType, Map<String, Double> metrics) {
+ public ClusterMetrics(String clusterId, String clusterType, Map<String, Double> metrics, Map<String, Double> reindexingProgress) {
this.clusterId = clusterId;
this.clusterType = clusterType;
this.metrics = Map.copyOf(metrics);
+ this.reindexingProgress = Map.copyOf(reindexingProgress);
}
public String getClusterId() {
@@ -74,4 +76,7 @@ public class ClusterMetrics {
return Optional.ofNullable(metrics.get(DISK_FEED_BLOCK_LIMIT));
}
+ public Map<String, Double> reindexingProgress() {
+ return reindexingProgress;
+ }
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/DeploymentData.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/DeploymentData.java
index d0b9653bbf3..55e1e879ef7 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/DeploymentData.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/DeploymentData.java
@@ -12,6 +12,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCe
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint;
import com.yahoo.vespa.hosted.controller.api.integration.secrets.TenantSecretStore;
+import java.security.cert.X509Certificate;
import java.util.List;
import java.util.Optional;
import java.util.Set;
@@ -36,6 +37,7 @@ public class DeploymentData {
private final Optional<TenantRoles> tenantRoles;
private final Quota quota;
private final List<TenantSecretStore> tenantSecretStores;
+ private final List<X509Certificate> operatorCertificates;
public DeploymentData(ApplicationId instance, ZoneId zone, byte[] applicationPackage, Version platform,
Set<ContainerEndpoint> containerEndpoints,
@@ -44,7 +46,8 @@ public class DeploymentData {
Optional<AthenzDomain> athenzDomain,
Optional<TenantRoles> tenantRoles,
Quota quota,
- List<TenantSecretStore> tenantSecretStores) {
+ List<TenantSecretStore> tenantSecretStores,
+ List<X509Certificate> operatorCertificates) {
this.instance = requireNonNull(instance);
this.zone = requireNonNull(zone);
this.applicationPackage = requireNonNull(applicationPackage);
@@ -56,6 +59,7 @@ public class DeploymentData {
this.tenantRoles = tenantRoles;
this.quota = quota;
this.tenantSecretStores = tenantSecretStores;
+ this.operatorCertificates = operatorCertificates;
}
public ApplicationId instance() {
@@ -102,4 +106,7 @@ public class DeploymentData {
return tenantSecretStores;
}
+ public List<X509Certificate> operatorCertificates() {
+ return operatorCertificates;
+ }
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ServiceRegistry.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ServiceRegistry.java
index 98591ba41e2..fdd66c037d9 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ServiceRegistry.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/ServiceRegistry.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.controller.api.integration;
import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveService;
+import com.yahoo.vespa.hosted.controller.api.integration.athenz.AccessControlService;
import com.yahoo.vespa.hosted.controller.api.integration.aws.RoleService;
import com.yahoo.vespa.hosted.controller.api.integration.aws.AwsEventFetcher;
import com.yahoo.vespa.hosted.controller.api.integration.aws.ResourceTagger;
@@ -15,6 +16,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.ArtifactRepo
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud;
import com.yahoo.vespa.hosted.controller.api.integration.dns.NameService;
import com.yahoo.vespa.hosted.controller.api.integration.entity.EntityService;
+import com.yahoo.vespa.hosted.controller.api.integration.horizon.HorizonClient;
import com.yahoo.vespa.hosted.controller.api.integration.organization.ContactRetriever;
import com.yahoo.vespa.hosted.controller.api.integration.organization.DeploymentIssues;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueHandler;
@@ -93,4 +95,8 @@ public interface ServiceRegistry {
ArchiveService archiveService();
ChangeRequestClient changeRequestClient();
+
+ AccessControlService accessControlService();
+
+ HorizonClient horizonClient();
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AccessControlService.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AccessControlService.java
new file mode 100644
index 00000000000..78c67236f78
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AccessControlService.java
@@ -0,0 +1,18 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.hosted.controller.api.integration.athenz;
+
+import com.yahoo.vespa.athenz.api.AthenzUser;
+
+import java.time.Instant;
+import java.util.Collection;
+
+/**
+ * Manage operator data plane access control
+ *
+ * @author mortent
+ */
+public interface AccessControlService {
+ boolean approveDataPlaneAccess(AthenzUser user, Instant expiry);
+ Collection<AthenzUser> listMembers();
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java
new file mode 100644
index 00000000000..0be32165916
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/AthenzAccessControlService.java
@@ -0,0 +1,57 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.hosted.controller.api.integration.athenz;
+
+import com.yahoo.vespa.athenz.api.AthenzDomain;
+import com.yahoo.vespa.athenz.api.AthenzGroup;
+import com.yahoo.vespa.athenz.api.AthenzRole;
+import com.yahoo.vespa.athenz.api.AthenzUser;
+import com.yahoo.vespa.athenz.client.zms.ZmsClient;
+
+import java.time.Instant;
+import java.util.Collection;
+import java.util.List;
+import java.util.stream.Collectors;
+
+public class AthenzAccessControlService implements AccessControlService {
+
+ private static final String ALLOWED_OPERATOR_GROUPNAME = "vespa-team";
+ private static final String DATAPLANE_ACCESS_ROLENAME = "operator-data-plane";
+ private final ZmsClient zmsClient;
+ private final AthenzRole dataPlaneAccessRole;
+ private final AthenzGroup vespaTeam;
+
+
+ public AthenzAccessControlService(ZmsClient zmsClient, AthenzDomain domain) {
+ this.zmsClient = zmsClient;
+ this.dataPlaneAccessRole = new AthenzRole(domain, DATAPLANE_ACCESS_ROLENAME);
+ this.vespaTeam = new AthenzGroup(domain, ALLOWED_OPERATOR_GROUPNAME);
+ }
+
+ @Override
+ public boolean approveDataPlaneAccess(AthenzUser user, Instant expiry) {
+ // Can only approve team members, other members must be manually approved
+ if(!isVespaTeamMember(user)) {
+ throw new IllegalArgumentException(String.format("User %s requires manual approval, please contact Vespa team", user.getName()));
+ }
+ List<AthenzUser> users = zmsClient.listPendingRoleApprovals(dataPlaneAccessRole);
+ if (users.contains(user)) {
+ zmsClient.approvePendingRoleMembership(dataPlaneAccessRole, user, expiry);
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ // Return list of approved members (users, excluding services) of data plane role
+ public Collection<AthenzUser> listMembers() {
+ return zmsClient.listMembers(dataPlaneAccessRole)
+ .stream().filter(AthenzUser.class::isInstance)
+ .map(AthenzUser.class::cast)
+ .collect(Collectors.toList());
+ }
+
+ public boolean isVespaTeamMember(AthenzUser user) {
+ return zmsClient.getGroupMembership(vespaTeam, user);
+ }
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/MockAccessControlService.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/MockAccessControlService.java
new file mode 100644
index 00000000000..81bc7725c7a
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/MockAccessControlService.java
@@ -0,0 +1,34 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.hosted.controller.api.integration.athenz;
+
+import com.yahoo.vespa.athenz.api.AthenzUser;
+
+import java.time.Instant;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Set;
+
+public class MockAccessControlService implements AccessControlService {
+
+ private final Set<AthenzUser> pendingMembers = new HashSet<>();
+ private final Set<AthenzUser> members = new HashSet<>();
+
+ @Override
+ public boolean approveDataPlaneAccess(AthenzUser user, Instant expiry) {
+ if (pendingMembers.remove(user)) {
+ return members.add(user);
+ } else {
+ return false;
+ }
+ }
+
+ @Override
+ public Collection<AthenzUser> listMembers() {
+ return Set.copyOf(members);
+ }
+
+ public void addPendingMember(AthenzUser user) {
+ pendingMembers.add(user);
+ }
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java
index 942f0f35f58..396be0adf92 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/athenz/ZmsClientMock.java
@@ -2,9 +2,11 @@
package com.yahoo.vespa.hosted.controller.api.integration.athenz;
import com.yahoo.vespa.athenz.api.AthenzDomain;
+import com.yahoo.vespa.athenz.api.AthenzGroup;
import com.yahoo.vespa.athenz.api.AthenzIdentity;
import com.yahoo.vespa.athenz.api.AthenzResourceName;
import com.yahoo.vespa.athenz.api.AthenzRole;
+import com.yahoo.vespa.athenz.api.AthenzUser;
import com.yahoo.vespa.athenz.api.OktaAccessToken;
import com.yahoo.vespa.athenz.api.OktaIdentityToken;
import com.yahoo.vespa.athenz.client.zms.RoleAction;
@@ -12,6 +14,7 @@ import com.yahoo.vespa.athenz.client.zms.ZmsClient;
import com.yahoo.vespa.athenz.client.zms.ZmsClientException;
import com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId;
+import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
@@ -74,7 +77,7 @@ public class ZmsClientMock implements ZmsClient {
}
@Override
- public void addRoleMember(AthenzRole role, AthenzIdentity member) {
+ public void addRoleMember(AthenzRole role, AthenzIdentity member, Optional<String> reason) {
if ( ! role.roleName().equals("tenancy.vespa.hosting.admin"))
throw new IllegalArgumentException("Mock only supports adding tenant admins, not " + role.roleName());
getDomainOrThrow(role.domain(), true).tenantAdmin(member);
@@ -96,6 +99,11 @@ public class ZmsClientMock implements ZmsClient {
}
@Override
+ public boolean getGroupMembership(AthenzGroup group, AthenzIdentity identity) {
+ return false;
+ }
+
+ @Override
public List<AthenzDomain> getDomainList(String prefix) {
log("getDomainList()");
return new ArrayList<>(athenz.domains.keySet());
@@ -145,6 +153,19 @@ public class ZmsClientMock implements ZmsClient {
return false;
}
+ @Override
+ public List<AthenzUser> listPendingRoleApprovals(AthenzRole athenzRole) {
+ return List.of();
+ }
+
+ @Override
+ public void approvePendingRoleMembership(AthenzRole athenzRole, AthenzUser athenzUser, Instant expiry) {
+ }
+
+ @Override
+ public List<AthenzIdentity> listMembers(AthenzRole athenzRole) {
+ return List.of();
+ }
@Override
public void close() {}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Cluster.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Cluster.java
index b3317c7f268..ccfd3241810 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Cluster.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Cluster.java
@@ -23,6 +23,7 @@ public class Cluster {
private final Optional<ClusterResources> suggested;
private final Utilization utilization;
private final List<ScalingEvent> scalingEvents;
+ private final String autoscalingStatusCode;
private final String autoscalingStatus;
private final Duration scalingDuration;
private final double maxQueryGrowthRate;
@@ -37,6 +38,7 @@ public class Cluster {
Optional<ClusterResources> suggested,
Utilization utilization,
List<ScalingEvent> scalingEvents,
+ String autoscalingStatusCode,
String autoscalingStatus,
Duration scalingDuration,
double maxQueryGrowthRate,
@@ -50,6 +52,7 @@ public class Cluster {
this.suggested = suggested;
this.utilization = utilization;
this.scalingEvents = scalingEvents;
+ this.autoscalingStatusCode = autoscalingStatusCode;
this.autoscalingStatus = autoscalingStatus;
this.scalingDuration = scalingDuration;
this.maxQueryGrowthRate = maxQueryGrowthRate;
@@ -65,6 +68,7 @@ public class Cluster {
public Optional<ClusterResources> suggested() { return suggested; }
public Utilization utilization() { return utilization; }
public List<ScalingEvent> scalingEvents() { return scalingEvents; }
+ public String autoscalingStatusCode() { return autoscalingStatusCode; }
public String autoscalingStatus() { return autoscalingStatus; }
public Duration scalingDuration() { return scalingDuration; }
public double maxQueryGrowthRate() { return maxQueryGrowthRate; }
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java
index 35b1a238325..9bcb80f24ee 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobType.java
@@ -91,18 +91,22 @@ public enum JobType {
productionAwsApNortheast1a ("production-aws-ap-northeast-1a",
Map.of(Public, ZoneId.from("prod", "aws-ap-northeast-1a"))),
+ testAwsApNortheast1a ("test-aws-ap-northeast-1a",
+ Map.of(Public, ZoneId.from("prod", "aws-ap-northeast-1a")), true),
+
productionAwsEuWest1a ("production-aws-eu-west-1a",
Map.of(Public, ZoneId.from("prod", "aws-eu-west-1a"))),
- testAwsApNortheast1a ("test-aws-ap-northeast-1a",
- Map.of(Public, ZoneId.from("prod", "aws-ap-northeast-1a")), true),
+ testAwsEuWest1a ("test-aws-eu-west-1a",
+ Map.of(Public, ZoneId.from("prod", "aws-eu-west-1a")), true),
productionAwsUsWest2a ("production-aws-us-west-2a",
Map.of(main, ZoneId.from("prod", "aws-us-west-2a"),
Public, ZoneId.from("prod", "aws-us-west-2a"))),
testAwsUsWest2a ("test-aws-us-west-2a",
- Map.of(main, ZoneId.from("prod" , "aws-us-west-2a")), true),
+ Map.of(main, ZoneId.from("prod", "aws-us-west-2a"),
+ Public, ZoneId.from("prod", "aws-us-west-2a")), true),
productionAwsUsEast1b ("production-aws-us-east-1b",
Map.of(main, ZoneId.from("prod" , "aws-us-east-1b"))),
@@ -132,6 +136,9 @@ public enum JobType {
productionCdUsCentral2 ("production-cd-us-central-2",
Map.of(cd , ZoneId.from("prod" , "cd-us-central-2"))),
+ testCdUsCentral2 ("test-cd-us-central-2",
+ Map.of(cd , ZoneId.from("prod" , "cd-us-central-2")), true),
+
productionCdUsWest1 ("production-cd-us-west-1",
Map.of(cd , ZoneId.from("prod" , "cd-us-west-1"))),
@@ -155,7 +162,7 @@ public enum JobType {
Map.of(main, ZoneId.from("perf" , "us-east-3")));
private final String jobName;
- private final Map<SystemName, ZoneId> zones;
+ final Map<SystemName, ZoneId> zones;
private final boolean isProductionTest;
JobType(String jobName, Map<SystemName, ZoneId> zones, boolean isProductionTest) {
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonClient.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonClient.java
new file mode 100644
index 00000000000..554d3e5b7fa
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonClient.java
@@ -0,0 +1,25 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.integration.horizon;
+
+/**
+ * @author olaa
+ */
+public interface HorizonClient {
+
+ HorizonResponse getMetrics(byte[] query);
+
+ HorizonResponse getUser();
+
+ HorizonResponse getDashboard(String dashboardId);
+
+ HorizonResponse getFavorite(String userId);
+
+ HorizonResponse getTopFolders();
+
+ HorizonResponse getRecent(String userId);
+
+ HorizonResponse getClipboard(String dashboardId);
+
+ HorizonResponse getMetaData(byte[] query);
+
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonResponse.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonResponse.java
new file mode 100644
index 00000000000..5447b8c3b0b
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/HorizonResponse.java
@@ -0,0 +1,36 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.integration.horizon;
+
+import java.io.IOException;
+import java.io.InputStream;
+
+/**
+ * @author valerijf
+ */
+public class HorizonResponse implements AutoCloseable {
+
+ private final int code;
+ private final InputStream inputStream;
+
+ public HorizonResponse(int code, InputStream inputStream) {
+ this.code = code;
+ this.inputStream = inputStream;
+ }
+
+ public int code() {
+ return code;
+ }
+
+ public InputStream inputStream() {
+ return inputStream;
+ }
+
+ public static HorizonResponse empty() {
+ return new HorizonResponse(200, InputStream.nullInputStream());
+ }
+
+ @Override
+ public void close() throws IOException {
+ inputStream.close();
+ }
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/MockHorizonClient.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/MockHorizonClient.java
new file mode 100644
index 00000000000..13a8c2ec079
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/MockHorizonClient.java
@@ -0,0 +1,48 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.integration.horizon;
+
+/**
+ * @author olaa
+ */
+public class MockHorizonClient implements HorizonClient {
+
+ @Override
+ public HorizonResponse getMetrics(byte[] query) {
+ return HorizonResponse.empty();
+ }
+
+ @Override
+ public HorizonResponse getUser() {
+ return HorizonResponse.empty();
+ }
+
+ @Override
+ public HorizonResponse getDashboard(String dashboardId) {
+ return HorizonResponse.empty();
+ }
+
+ @Override
+ public HorizonResponse getFavorite(String userId) {
+ return HorizonResponse.empty();
+ }
+
+ @Override
+ public HorizonResponse getTopFolders() {
+ return HorizonResponse.empty();
+ }
+
+ @Override
+ public HorizonResponse getRecent(String userId) {
+ return HorizonResponse.empty();
+ }
+
+ @Override
+ public HorizonResponse getClipboard(String dashboardId) {
+ return HorizonResponse.empty();
+ }
+
+ @Override
+ public HorizonResponse getMetaData(byte[] query) {
+ return HorizonResponse.empty();
+ }
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/package-info.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/package-info.java
new file mode 100644
index 00000000000..80bb635089c
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/horizon/package-info.java
@@ -0,0 +1,5 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+@ExportPackage
+package com.yahoo.vespa.hosted.controller.api.integration.horizon;
+
+import com.yahoo.osgi.annotation.ExportPackage; \ No newline at end of file
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterData.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterData.java
index b6163809f26..6f9b2b496bf 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterData.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/ClusterData.java
@@ -35,6 +35,8 @@ public class ClusterData {
public ClusterUtilizationData utilization;
@JsonProperty("scalingEvents")
public List<ScalingEventData> scalingEvents;
+ @JsonProperty("autoscalingStatusCode")
+ public String autoscalingStatusCode;
@JsonProperty("autoscalingStatus")
public String autoscalingStatus;
@JsonProperty("scalingDuration")
@@ -55,6 +57,7 @@ public class ClusterData {
utilization == null ? Cluster.Utilization.empty() : utilization.toClusterUtilization(),
scalingEvents == null ? List.of()
: scalingEvents.stream().map(data -> data.toScalingEvent()).collect(Collectors.toList()),
+ autoscalingStatusCode,
autoscalingStatus,
scalingDuration == null ? Duration.ofMillis(0) : Duration.ofMillis(scalingDuration),
maxQueryGrowthRate == null ? -1 : maxQueryGrowthRate,
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeHistory.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeHistory.java
index 624e4c61662..cf40ac00d64 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeHistory.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/noderepository/NodeHistory.java
@@ -17,7 +17,7 @@ public class NodeHistory {
@JsonProperty("at")
public Long at;
@JsonProperty("agent")
- public Agent agent;
+ public String agent;
@JsonProperty("event")
public String event;
@@ -25,7 +25,7 @@ public class NodeHistory {
return at;
}
- public Agent getAgent() {
+ public String getAgent() {
return agent;
}
@@ -33,24 +33,4 @@ public class NodeHistory {
return event;
}
- public enum Agent {
- operator,
- application,
- system,
- DirtyExpirer,
- DynamicProvisioningMaintainer,
- FailedExpirer,
- InactiveExpirer,
- NodeFailer,
- NodeHealthTracker,
- ProvisionedExpirer,
- Rebalancer,
- ReservationExpirer,
- RetiringUpgrader,
- RebuildingOsUpgrader,
- SpareCapacityMaintainer,
- SwitchRebalancer,
- HostEncrypter,
- }
-
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/User.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/User.java
index dcce25bda95..d3ed804e546 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/User.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/user/User.java
@@ -1,6 +1,7 @@
// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.api.integration.user;
+import java.time.LocalDate;
import java.util.Objects;
/**
@@ -9,17 +10,34 @@ import java.util.Objects;
public class User {
public static final String ATTRIBUTE_NAME = "vespa.user.attributes";
+ public static final LocalDate NO_DATE = LocalDate.EPOCH;
private final String email;
private final String name;
private final String nickname;
private final String picture;
+ private final boolean isVerified;
+ private final int loginCount;
+ private final LocalDate lastLogin;
public User(String email, String name, String nickname, String picture) {
this.email = Objects.requireNonNull(email);
this.name = name;
this.nickname = nickname;
this.picture = picture;
+ this.isVerified = false;
+ this.loginCount = -1;
+ this.lastLogin = NO_DATE;
+ }
+
+ public User(String email, String name, String nickname, String picture, boolean isVerified, int loginCount, LocalDate lastLogin) {
+ this.email = Objects.requireNonNull(email);
+ this.name = name;
+ this.nickname = nickname;
+ this.picture = picture;
+ this.isVerified = isVerified;
+ this.loginCount = loginCount;
+ this.lastLogin = Objects.requireNonNull(lastLogin);
}
public String name() {
@@ -38,6 +56,12 @@ public class User {
return picture;
}
+ public LocalDate lastLogin() { return lastLogin; }
+
+ public boolean isVerified() { return isVerified; }
+
+ public int loginCount() { return loginCount; }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
@@ -46,11 +70,14 @@ public class User {
return Objects.equals(name, user.name) &&
Objects.equals(email, user.email) &&
Objects.equals(nickname, user.nickname) &&
- Objects.equals(picture, user.picture);
+ Objects.equals(picture, user.picture) &&
+ Objects.equals(lastLogin, user.lastLogin) &&
+ loginCount == user.loginCount &&
+ isVerified == user.isVerified;
}
@Override
public int hashCode() {
- return Objects.hash(name, email, nickname, picture);
+ return Objects.hash(name, email, nickname, picture, lastLogin, loginCount, isVerified);
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
index 5e5dfcd6aed..327175c19ed 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
@@ -202,10 +202,6 @@ enum PathGroup {
classifiedTenantInfo("/application/v4/",
"/application/v4/tenant/"),
- /** Paths which contain (not very strictly) classified information about, e.g., customers. */
- classifiedInfo("/",
- "/d/{*}"),
-
/** Paths providing public information. */
publicInfo("/user/v1/user", // Information about who you are.
"/badge/v1/{*}", // Badges for deployment jobs.
@@ -229,7 +225,10 @@ enum PathGroup {
endpointCertificateRequestInfo("/certificateRequests/"),
/** Path used for secret store management */
- secretStore(Matcher.tenant, "/application/v4/tenant/{tenant}/secret-store/{*}");
+ secretStore(Matcher.tenant, "/application/v4/tenant/{tenant}/secret-store/{*}"),
+
+ /** Paths used to proxy Horizon metric requests */
+ horizonProxy("/horizon/v1/{*}");
final List<String> pathSpecs;
final List<Matcher> matchers;
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java
index ee5f1d806ab..eae5ad5b685 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/Policy.java
@@ -201,7 +201,11 @@ enum Policy {
/** Secret store operations */
secretStoreOperations(Privilege.grant(Action.all())
.on(PathGroup.secretStore)
- .in(SystemName.PublicCd, SystemName.Public));
+ .in(SystemName.PublicCd, SystemName.Public)),
+
+ horizonProxyOperations(Privilege.grant(Action.all())
+ .on(PathGroup.horizonProxy)
+ .in(SystemName.PublicCd));
private final Set<Privilege> privileges;
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/RoleDefinition.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/RoleDefinition.java
index a0ee0fe3548..3b0e7222cf1 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/RoleDefinition.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/RoleDefinition.java
@@ -44,7 +44,8 @@ public enum RoleDefinition {
Policy.publicRead,
Policy.paymentInstrumentRead,
Policy.paymentInstrumentDelete,
- Policy.billingInformationRead),
+ Policy.billingInformationRead,
+ Policy.horizonProxyOperations),
/** User — the dev.ops. role for normal Vespa tenant users */
developer(Policy.applicationCreate,
diff --git a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobTypeTest.java b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobTypeTest.java
new file mode 100644
index 00000000000..22486875a0b
--- /dev/null
+++ b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/JobTypeTest.java
@@ -0,0 +1,28 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.integration.deployment;
+
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+
+/**
+ * @author jonmv
+ */
+public class JobTypeTest {
+
+ @Test
+ public void test() {
+ for (JobType type : JobType.values()) {
+ if (type.isProduction()) {
+ boolean match = false;
+ for (JobType other : JobType.values())
+ match |= type != other
+ && type.isTest() == other.isDeployment()
+ && type.zones.equals(other.zones);
+
+ assertTrue(type + " should have matching job", match);
+ }
+ }
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
index 0f9188d1f65..ff10f3b77ca 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
@@ -63,6 +63,7 @@ import com.yahoo.vespa.hosted.controller.notification.NotificationSource;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
import com.yahoo.vespa.hosted.controller.security.AccessControl;
import com.yahoo.vespa.hosted.controller.security.Credentials;
+import com.yahoo.vespa.hosted.controller.support.access.SupportAccessGrant;
import com.yahoo.vespa.hosted.controller.tenant.AthenzTenant;
import com.yahoo.vespa.hosted.controller.tenant.CloudTenant;
import com.yahoo.vespa.hosted.controller.tenant.Tenant;
@@ -70,6 +71,7 @@ import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import com.yahoo.yolean.Exceptions;
import java.security.Principal;
+import java.security.cert.X509Certificate;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
@@ -440,6 +442,14 @@ public class ApplicationController {
if (applicationPackage.deploymentSpec().requireInstance(instance).concerns(Environment.prod))
application = controller.routing().assignRotations(application, instance);
+ // Validate new deployment spec thoroughly before storing it.
+ controller.jobController().deploymentStatus(application.get());
+
+ // Clear notifications for instances that are no longer declared
+ for (var name : existingInstances)
+ if ( ! declaredInstances.contains(name))
+ controller.notificationsDb().removeNotifications(NotificationSource.from(application.get().id().instance(name)));
+
store(application);
return application;
}
@@ -501,11 +511,14 @@ public class ApplicationController {
.filter(tenant-> tenant instanceof CloudTenant)
.map(tenant -> ((CloudTenant) tenant).tenantSecretStores())
.orElse(List.of());
+ List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(new DeploymentId(application, zone)).stream()
+ .map(SupportAccessGrant::certificate)
+ .collect(toList());
ConfigServer.PreparedApplication preparedApplication =
configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform,
endpoints, endpointCertificateMetadata, dockerImageRepo, domain,
- tenantRoles, deploymentQuota, tenantSecretStores));
+ tenantRoles, deploymentQuota, tenantSecretStores, operatorCertificates));
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(),
applicationPackage.zippedContent().length);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
index 1b1df28c201..990549b6d8c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
@@ -72,7 +72,6 @@ public class RoutingController {
private final RoutingPolicies routingPolicies;
private final RotationRepository rotationRepository;
private final BooleanFlag hideSharedRoutingEndpoint;
- private final BooleanFlag vespaAppDomainInCertificate;
public RoutingController(Controller controller, RotationsConfig rotationsConfig) {
this.controller = Objects.requireNonNull(controller, "controller must be non-null");
@@ -80,7 +79,6 @@ public class RoutingController {
this.rotationRepository = new RotationRepository(rotationsConfig, controller.applications(),
controller.curator());
this.hideSharedRoutingEndpoint = Flags.HIDE_SHARED_ROUTING_ENDPOINT.bindTo(controller.flagSource());
- this.vespaAppDomainInCertificate = Flags.VESPA_APP_DOMAIN_IN_CERTIFICATE.bindTo(controller.flagSource());
}
public RoutingPolicies policies() {
@@ -180,7 +178,7 @@ public class RoutingController {
builder = builder.routingMethod(RoutingMethod.exclusive)
.on(Port.tls());
Endpoint endpoint = builder.in(controller.system());
- if (controller.system().isPublic() && vespaAppDomainInCertificate.with(FetchVector.Dimension.APPLICATION_ID, deployment.applicationId().serializedForm()).value()) {
+ if (controller.system().isPublic()) {
Endpoint legacyEndpoint = builder.legacy().in(controller.system());
endpointDnsNames.add(legacyEndpoint.dnsName());
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java
index 2322b251fe0..4f01df21430 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java
@@ -1,15 +1,14 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.application;
-import com.google.common.collect.ImmutableList;
-
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.UncheckedIOException;
import java.nio.file.Path;
-import java.util.Arrays;
+import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import java.util.function.Predicate;
import java.util.zip.ZipEntry;
@@ -21,19 +20,19 @@ import java.util.zip.ZipOutputStream;
*/
public class ZipStreamReader {
- private final ImmutableList<ZipEntryWithContent> entries;
+ private final List<ZipEntryWithContent> entries = new ArrayList<>();
private final int maxEntrySizeInBytes;
public ZipStreamReader(InputStream input, Predicate<String> entryNameMatcher, int maxEntrySizeInBytes) {
this.maxEntrySizeInBytes = maxEntrySizeInBytes;
try (ZipInputStream zipInput = new ZipInputStream(input)) {
- ImmutableList.Builder<ZipEntryWithContent> builder = new ImmutableList.Builder<>();
ZipEntry zipEntry;
+
while (null != (zipEntry = zipInput.getNextEntry())) {
if (!entryNameMatcher.test(requireName(zipEntry.getName()))) continue;
- builder.add(new ZipEntryWithContent(zipEntry, readContent(zipInput)));
+ entries.add(new ZipEntryWithContent(zipEntry, readContent(zipInput)));
}
- entries = builder.build();
+
} catch (IOException e) {
throw new UncheckedIOException("IO error reading zip content", e);
}
@@ -79,10 +78,10 @@ public class ZipStreamReader {
}
}
- public List<ZipEntryWithContent> entries() { return entries; }
+ public List<ZipEntryWithContent> entries() { return Collections.unmodifiableList(entries); }
private static String requireName(String name) {
- if (Arrays.asList(name.split("/")).contains("..") ||
+ if (List.of(name.split("/")).contains("..") ||
!trimTrailingSlash(name).equals(Path.of(name).normalize().toString())) {
throw new IllegalArgumentException("Unexpected non-normalized path found in zip content: '" + name + "'");
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/athenz/impl/AthenzFacade.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/athenz/impl/AthenzFacade.java
index 073451f6309..cb3e5ea97c1 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/athenz/impl/AthenzFacade.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/athenz/impl/AthenzFacade.java
@@ -215,7 +215,7 @@ public class AthenzFacade implements AccessControl {
}
public void addTenantAdmin(AthenzDomain tenantDomain, AthenzUser user) {
- zmsClient.addRoleMember(new AthenzRole(tenantDomain, "tenancy." + service.getFullName() + ".admin"), user);
+ zmsClient.addRoleMember(new AthenzRole(tenantDomain, "tenancy." + service.getFullName() + ".admin"), user, Optional.empty());
}
private void deleteApplication(AthenzDomain domain, ApplicationName application, OktaIdentityToken identityToken, OktaAccessToken accessToken) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
index 53e88a9a5ac..4f4f4b59a2c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
@@ -5,6 +5,7 @@ import com.yahoo.config.application.api.DeploymentInstanceSpec;
import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.InstanceName;
+import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.Controller;
@@ -32,6 +33,7 @@ import java.util.OptionalLong;
import java.util.function.Supplier;
import java.util.logging.Level;
import java.util.logging.Logger;
+import java.util.stream.Collectors;
import static java.util.Comparator.comparing;
import static java.util.stream.Collectors.groupingBy;
@@ -199,6 +201,35 @@ public class DeploymentTrigger {
return List.copyOf(jobs.keySet());
}
+ /** retrigger job. If the job is already running, it will be canceled, and retrigger enqueued. */
+ public Optional<JobId> reTriggerOrAddToQueue(DeploymentId deployment) {
+ JobType jobType = JobType.from(controller.system(), deployment.zoneId())
+ .orElseThrow(() -> new IllegalArgumentException(String.format("No job to trigger for (system/zone): %s/%s", controller.system().value(), deployment.zoneId().value())));
+ Optional<Run> existingRun = controller.jobController().active(deployment.applicationId()).stream()
+ .filter(run -> run.id().type().equals(jobType))
+ .findFirst();
+
+ if (existingRun.isPresent()) {
+ Run run = existingRun.get();
+ try (Lock lock = controller.curator().lockDeploymentRetriggerQueue()) {
+ List<RetriggerEntry> retriggerEntries = controller.curator().readRetriggerEntries();
+ List<RetriggerEntry> newList = new ArrayList<>(retriggerEntries);
+ RetriggerEntry requiredEntry = new RetriggerEntry(new JobId(deployment.applicationId(), jobType), run.id().number() + 1);
+ if(newList.stream().noneMatch(entry -> entry.jobId().equals(requiredEntry.jobId()) && entry.requiredRun()>=requiredEntry.requiredRun())) {
+ newList.add(requiredEntry);
+ }
+ newList = newList.stream()
+ .filter(entry -> !(entry.jobId().equals(requiredEntry.jobId()) && entry.requiredRun() < requiredEntry.requiredRun()))
+ .collect(toList());
+ controller.curator().writeRetriggerEntries(newList);
+ }
+ controller.jobController().abort(run.id());
+ return Optional.empty();
+ } else {
+ return Optional.of(reTrigger(deployment.applicationId(), jobType));
+ }
+ }
+
/** Prevents jobs of the given type from starting, until the given time. */
public void pauseJob(ApplicationId id, JobType jobType, Instant until) {
if (until.isAfter(clock.instant().plus(maxPause)))
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
index c7270b6c426..10f96ff13cd 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
@@ -340,7 +340,10 @@ public class InternalStepRunner implements StepRunner {
.map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown())))
.orElse(false)) {
if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0)
- failureReason = "No nodes allowed to suspend to progress installation for " + timeouts.noNodesDown().toMinutes() + " minutes.";
+ failureReason = "Timed out after waiting " + timeouts.noNodesDown().toMinutes() + " minutes for " +
+ "nodes to suspend. This is normal if the cluster is excessively busy. " +
+ "Nodes will continue to attempt suspension to progress installation independently of " +
+ "this run.";
else
failureReason = "Nodes not able to start with new application package.";
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntry.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntry.java
new file mode 100644
index 00000000000..9c16d80313e
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntry.java
@@ -0,0 +1,27 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.hosted.controller.deployment;
+
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
+
+/**
+ * @author mortent
+ */
+public class RetriggerEntry {
+ private final JobId jobId;
+ private final long requiredRun;
+
+ public RetriggerEntry(JobId jobId, long requiredRun) {
+ this.jobId = jobId;
+ this.requiredRun = requiredRun;
+ }
+
+ public JobId jobId() {
+ return jobId;
+ }
+
+ public long requiredRun() {
+ return requiredRun;
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntrySerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntrySerializer.java
new file mode 100644
index 00000000000..6d9206d42b6
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/RetriggerEntrySerializer.java
@@ -0,0 +1,63 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.hosted.controller.deployment;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Inspector;
+import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * @author mortent
+ */
+public class RetriggerEntrySerializer {
+
+ private static final String JOB_ID_KEY = "jobId";
+ private static final String APPLICATION_ID_KEY = "applicationId";
+ private static final String JOB_TYPE_KEY = "jobType";
+ private static final String MIN_REQUIRED_RUN_ID_KEY = "minimumRunId";
+
+ public static List<RetriggerEntry> fromSlime(Slime slime) {
+ return SlimeUtils.entriesStream(slime.get().field("entries"))
+ .map(RetriggerEntrySerializer::deserializeEntry)
+ .collect(Collectors.toList());
+ }
+
+ public static Slime toSlime(List<RetriggerEntry> entryList) {
+ Slime slime = new Slime();
+ Cursor root = slime.setObject();
+ Cursor entries = root.setArray("entries");
+ entryList.forEach(e -> RetriggerEntrySerializer.serializeEntry(entries, e));
+ return slime;
+ }
+
+ private static void serializeEntry(Cursor array, RetriggerEntry entry) {
+ Cursor root = array.addObject();
+ Cursor jobid = root.setObject(JOB_ID_KEY);
+ jobid.setString(APPLICATION_ID_KEY, entry.jobId().application().serializedForm());
+ jobid.setString(JOB_TYPE_KEY, entry.jobId().type().jobName());
+ root.setLong(MIN_REQUIRED_RUN_ID_KEY, entry.requiredRun());
+ }
+
+ private static RetriggerEntry deserializeEntry(Inspector inspector) {
+ Inspector jobid = inspector.field(JOB_ID_KEY);
+ ApplicationId applicationId = ApplicationId.fromSerializedForm(require(jobid, APPLICATION_ID_KEY).asString());
+ JobType jobType = JobType.fromJobName(require(jobid, JOB_TYPE_KEY).asString());
+ long minRequiredRunId = require(inspector, MIN_REQUIRED_RUN_ID_KEY).asLong();
+ return new RetriggerEntry(new JobId(applicationId, jobType), minRequiredRunId);
+ }
+
+ private static Inspector require(Inspector inspector, String fieldName) {
+ Inspector field = inspector.field(fieldName);
+ if (!field.valid()) {
+ throw new IllegalStateException("Could not deserialize, field not found in json: " + fieldName);
+ }
+ return field;
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationMetaDataGarbageCollector.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationMetaDataGarbageCollector.java
index 7d94a4c728f..9ec8e4d1a2d 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationMetaDataGarbageCollector.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationMetaDataGarbageCollector.java
@@ -19,14 +19,14 @@ public class ApplicationMetaDataGarbageCollector extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
try {
controller().applications().applicationStore().pruneMeta(controller().clock().instant().minus(Duration.ofDays(365)));
- return true;
+ return 1.0;
}
catch (Exception e) {
log.log(Level.WARNING, "Exception pruning old application meta data", e);
- return false;
+ return 0.0;
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java
index 1f20e48edf5..69e0eb26f16 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java
@@ -18,6 +18,7 @@ import com.yahoo.yolean.Exceptions;
import java.time.Duration;
import java.util.HashMap;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Level;
/**
@@ -39,15 +40,17 @@ public class ApplicationOwnershipConfirmer extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
- return confirmApplicationOwnerships() &
- ensureConfirmationResponses() &
- updateConfirmedApplicationOwners();
+ protected double maintain() {
+ return ( confirmApplicationOwnerships() +
+ ensureConfirmationResponses() +
+ updateConfirmedApplicationOwners() )
+ / 3;
}
/** File an ownership issue with the owners of all applications we know about. */
- private boolean confirmApplicationOwnerships() {
- AtomicBoolean success = new AtomicBoolean(true);
+ private double confirmApplicationOwnerships() {
+ AtomicInteger attempts = new AtomicInteger(0);
+ AtomicInteger failures = new AtomicInteger(0);
applications()
.withProjectId()
.withProductionDeployment()
@@ -56,6 +59,7 @@ public class ApplicationOwnershipConfirmer extends ControllerMaintainer {
.filter(application -> application.createdAt().isBefore(controller().clock().instant().minus(Duration.ofDays(90))))
.forEach(application -> {
try {
+ attempts.incrementAndGet();
// TODO jvenstad: Makes sense to require, and run this only in main?
tenantOf(application.id()).contact().flatMap(contact -> {
return ownershipIssues.confirmOwnership(application.ownershipIssueId(),
@@ -65,17 +69,17 @@ public class ApplicationOwnershipConfirmer extends ControllerMaintainer {
}).ifPresent(newIssueId -> store(newIssueId, application.id()));
}
catch (RuntimeException e) { // Catch errors due to wrong data in the controller, or issues client timeout.
- success.set(false);
+ failures.incrementAndGet();
log.log(Level.INFO, "Exception caught when attempting to file an issue for '" + application.id() + "': " + Exceptions.toMessageString(e));
}
});
- return success.get();
+ return asSuccessFactor(attempts.get(), failures.get());
}
private ApplicationSummary summaryOf(TenantAndApplicationId application) {
var app = applications.requireApplication(application);
var metrics = new HashMap<ZoneId, ApplicationSummary.Metric>();
- for (Instance instance : app.instances().values())
+ for (Instance instance : app.instances().values()) {
for (var kv : instance.deployments().entrySet()) {
var zone = kv.getKey();
var deploymentMetrics = kv.getValue().metrics();
@@ -83,28 +87,31 @@ public class ApplicationOwnershipConfirmer extends ControllerMaintainer {
deploymentMetrics.queriesPerSecond(),
deploymentMetrics.writesPerSecond()));
}
+ }
return new ApplicationSummary(app.id().defaultInstance(), app.activity().lastQueried(), app.activity().lastWritten(),
app.latestVersion().flatMap(version -> version.buildTime()), metrics);
}
/** Escalate ownership issues which have not been closed before a defined amount of time has passed. */
- private boolean ensureConfirmationResponses() {
- AtomicBoolean success = new AtomicBoolean(true);
+ private double ensureConfirmationResponses() {
+ AtomicInteger attempts = new AtomicInteger(0);
+ AtomicInteger failures = new AtomicInteger(0);
for (Application application : applications())
application.ownershipIssueId().ifPresent(issueId -> {
try {
+ attempts.incrementAndGet();
Tenant tenant = tenantOf(application.id());
ownershipIssues.ensureResponse(issueId, tenant.contact());
}
catch (RuntimeException e) {
- success.set(false);
+ failures.incrementAndGet();
log.log(Level.INFO, "Exception caught when attempting to escalate issue with id '" + issueId + "': " + Exceptions.toMessageString(e));
}
});
- return success.get();
+ return asSuccessFactor(attempts.get(), failures.get());
}
- private boolean updateConfirmedApplicationOwners() {
+ private double updateConfirmedApplicationOwners() {
applications()
.withProjectId()
.withProductionDeployment()
@@ -118,7 +125,7 @@ public class ApplicationOwnershipConfirmer extends ControllerMaintainer {
controller().applications().store(lockedApplication.withOwner(owner)));
});
});
- return true;
+ return 1.0;
}
private ApplicationList applications() {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainer.java
index 1a9889284e1..b096a853541 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveAccessMaintainer.java
@@ -37,8 +37,7 @@ public class ArchiveAccessMaintainer extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
-
+ protected double maintain() {
// Count buckets - so we can alert if we get close to the account limit of 1000
zoneRegistry.zones().all().ids().forEach(zoneId ->
metric.set(bucketCountMetricName, archiveBucketDb.buckets(zoneId).size(),
@@ -59,6 +58,7 @@ public class ArchiveAccessMaintainer extends ControllerMaintainer {
)
);
- return true;
+ return 1.0;
}
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdater.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdater.java
index d2141b097b3..ab8e5efa0bd 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdater.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdater.java
@@ -38,7 +38,7 @@ public class ArchiveUriUpdater extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
Map<ZoneId, Set<TenantName>> tenantsByZone = new HashMap<>();
for (var application : applications.asList()) {
for (var instance : application.instances().values()) {
@@ -63,7 +63,7 @@ public class ArchiveUriUpdater extends ControllerMaintainer {
.forEach(tenant -> nodeRepository.removeArchiveUri(zone, tenant));
});
- return true;
+ return 1.0;
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainer.java
index 1f360c477b9..14e3e685a8a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ChangeRequestMaintainer.java
@@ -43,14 +43,14 @@ public class ChangeRequestMaintainer extends ControllerMaintainer {
@Override
- protected boolean maintain() {
+ protected double maintain() {
var currentChangeRequests = pruneOldChangeRequests();
var changeRequests = changeRequestClient.getChangeRequests(currentChangeRequests);
logger.fine(() -> "Found requests: " + changeRequests);
storeChangeRequests(changeRequests);
- return true;
+ return 1.0;
}
private void storeChangeRequests(List<ChangeRequest> changeRequests) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventReporter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventReporter.java
index d923db936cb..5acd0c63670 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventReporter.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudEventReporter.java
@@ -38,7 +38,7 @@ public class CloudEventReporter extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
for (var region : zonesByCloudNativeRegion.keySet()) {
List<CloudEvent> events = eventFetcher.getEvents(region);
for (var event : events) {
@@ -48,7 +48,7 @@ public class CloudEventReporter extends ControllerMaintainer {
deprovisionAffectedHosts(region, event);
}
}
- return true;
+ return 1.0;
}
/** Deprovision any host affected by given event */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirer.java
new file mode 100644
index 00000000000..be8f4254b79
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirer.java
@@ -0,0 +1,80 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.vespa.flags.ListFlag;
+import com.yahoo.vespa.flags.PermanentFlags;
+import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId;
+import com.yahoo.vespa.hosted.controller.tenant.LastLoginInfo;
+import com.yahoo.vespa.hosted.controller.tenant.Tenant;
+
+import java.time.Duration;
+import java.util.List;
+import java.util.stream.Collectors;
+
+/**
+ * Expires unused tenants from Vespa Cloud.
+ *
+ * @author ogronnesby
+ */
+public class CloudTrialExpirer extends ControllerMaintainer {
+
+ private static Duration loginExpiry = Duration.ofDays(14);
+ private final ListFlag<String> extendedTrialTenants;
+
+ public CloudTrialExpirer(Controller controller, Duration interval) {
+ super(controller, interval, null, SystemName.allOf(SystemName::isPublic));
+ this.extendedTrialTenants = PermanentFlags.EXTENDED_TRIAL_TENANTS.bindTo(controller().flagSource());
+ }
+
+ @Override
+ protected double maintain() {
+ var expiredTenants = controller().tenants().asList().stream()
+ .filter(this::tenantIsCloudTenant) // only valid for cloud tenants
+ .filter(this::tenantHasTrialPlan) // only valid to expire actual trial tenants
+ .filter(this::tenantIsNotExemptFromExpiry) // feature flag might exempt tenant from expiry
+ .filter(this::tenantReadersNotLoggedIn) // no user logged in last 14 days
+ .filter(this::tenantHasNoDeployments) // no running deployments active
+ .collect(Collectors.toList());
+
+ expireTenants(expiredTenants);
+
+ return 0;
+ }
+
+ private boolean tenantIsCloudTenant(Tenant tenant) {
+ return tenant.type() == Tenant.Type.cloud;
+ }
+
+ private boolean tenantReadersNotLoggedIn(Tenant tenant) {
+ return tenant.lastLoginInfo().get(LastLoginInfo.UserLevel.user)
+ .map(instant -> {
+ var sinceLastLogin = Duration.between(instant, controller().clock().instant());
+ return sinceLastLogin.compareTo(loginExpiry) > 0;
+ })
+ .orElse(false);
+ }
+
+ private boolean tenantHasTrialPlan(Tenant tenant) {
+ var planId = controller().serviceRegistry().billingController().getPlan(tenant.name());
+ return "trial".equals(planId.value());
+ }
+
+ private boolean tenantIsNotExemptFromExpiry(Tenant tenant) {
+ return ! extendedTrialTenants.value().contains(tenant.name().value());
+ }
+
+ private boolean tenantHasNoDeployments(Tenant tenant) {
+ return controller().applications().asList(tenant.name()).stream()
+ .flatMap(app -> app.instances().values().stream())
+ .mapToLong(instance -> instance.deployments().values().size())
+ .sum() == 0;
+ }
+
+ private void expireTenants(List<Tenant> tenants) {
+ tenants.forEach(tenant -> {
+ controller().serviceRegistry().billingController().setPlan(tenant.name(), PlanId.from("none"), false);
+ });
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ContactInformationMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ContactInformationMaintainer.java
index 7b846fa288c..5ee39f7c8f2 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ContactInformationMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ContactInformationMaintainer.java
@@ -35,12 +35,14 @@ public class ContactInformationMaintainer extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
TenantController tenants = controller().tenants();
- boolean success = true;
+ int attempts = 0;
+ int failures = 0;
for (Tenant tenant : tenants.asList()) {
log.log(FINE, () -> "Updating contact information for " + tenant);
try {
+ attempts++;
switch (tenant.type()) {
case athenz:
tenants.lockIfPresent(tenant.name(), LockedTenant.Athenz.class, lockedTenant -> {
@@ -56,13 +58,13 @@ public class ContactInformationMaintainer extends ControllerMaintainer {
throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'.");
}
} catch (Exception e) {
- success = false;
+ failures++;
log.log(Level.WARNING, "Failed to update contact information for " + tenant + ": " +
Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
}
- return success;
+ return asSuccessFactor(attempts, failures);
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ContainerImageExpirer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ContainerImageExpirer.java
index ff5fc4d2051..f1574381a3d 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ContainerImageExpirer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ContainerImageExpirer.java
@@ -34,7 +34,7 @@ public class ContainerImageExpirer extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
Instant now = controller().clock().instant();
VersionStatus versionStatus = controller().readVersionStatus();
List<ContainerImage> imagesToExpire = controller().serviceRegistry().containerRegistry().list().stream()
@@ -44,7 +44,7 @@ public class ContainerImageExpirer extends ControllerMaintainer {
log.log(Level.INFO, "Expiring " + imagesToExpire.size() + " container images: " + imagesToExpire);
controller().serviceRegistry().containerRegistry().deleteAll(imagesToExpire);
}
- return true;
+ return 1.0;
}
/** Returns whether given image is expired */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainer.java
index 03a6268397e..f7c4a95baf1 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainer.java
@@ -34,7 +34,7 @@ public abstract class ControllerMaintainer extends Maintainer {
public ControllerMaintainer(Controller controller, Duration interval, String name, Set<SystemName> activeSystems) {
super(name, interval, controller.clock().instant(), controller.jobControl(),
- jobMetrics(controller.metric()), controller.curator().cluster(), true);
+ new ControllerJobMetrics(controller.metric()), controller.curator().cluster(), true);
this.controller = controller;
this.activeSystems = Set.copyOf(Objects.requireNonNull(activeSystems));
}
@@ -47,10 +47,19 @@ public abstract class ControllerMaintainer extends Maintainer {
super.run();
}
- private static JobMetrics jobMetrics(Metric metric) {
- return new JobMetrics((job, consecutiveFailures) -> {
- metric.set("maintenance.consecutiveFailures", consecutiveFailures, metric.createContext(Map.of("job", job)));
- });
+ private static class ControllerJobMetrics extends JobMetrics {
+
+ private final Metric metric;
+
+ public ControllerJobMetrics(Metric metric) {
+ this.metric = metric;
+ }
+
+ @Override
+ public void completed(String job, double successFactor) {
+ metric.set("maintenance.successFactor", successFactor, metric.createContext(Map.of("job", job)));
+ }
+
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
index 5a7ef12b246..91dfed500e3 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
@@ -10,6 +10,7 @@ import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.hosted.controller.Controller;
import java.time.Duration;
+import java.time.temporal.ChronoUnit;
import java.time.temporal.TemporalUnit;
import java.util.Collections;
import java.util.List;
@@ -70,6 +71,8 @@ public class ControllerMaintenance extends AbstractComponent {
maintainers.add(new TenantRoleMaintainer(controller, intervals.tenantRoleMaintainer));
maintainers.add(new ChangeRequestMaintainer(controller, intervals.changeRequestMaintainer));
maintainers.add(new VCMRMaintainer(controller, intervals.vcmrMaintainer));
+ maintainers.add(new CloudTrialExpirer(controller, intervals.defaultInterval));
+ maintainers.add(new RetriggerMaintainer(controller, intervals.retriggerMaintainer));
}
public Upgrader upgrader() { return upgrader; }
@@ -125,6 +128,7 @@ public class ControllerMaintenance extends AbstractComponent {
private final Duration tenantRoleMaintainer;
private final Duration changeRequestMaintainer;
private final Duration vcmrMaintainer;
+ private final Duration retriggerMaintainer;
public Intervals(SystemName system) {
this.system = Objects.requireNonNull(system);
@@ -157,6 +161,7 @@ public class ControllerMaintenance extends AbstractComponent {
this.tenantRoleMaintainer = duration(5, MINUTES);
this.changeRequestMaintainer = duration(1, HOURS);
this.vcmrMaintainer = duration(1, HOURS);
+ this.retriggerMaintainer = duration(1, MINUTES);
}
private Duration duration(long amount, TemporalUnit unit) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CostReportMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CostReportMaintainer.java
index 28b64b5bfe0..21cda09d92a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CostReportMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CostReportMaintainer.java
@@ -31,10 +31,10 @@ public class CostReportMaintainer extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
var csv = CostCalculator.resourceShareByPropertyToCsv(nodeRepository, controller(), clock, consumer.fixedAllocations());
consumer.consume(csv);
- return true;
+ return 1.0;
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirer.java
index e5316788802..9e3da506ca8 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirer.java
@@ -28,8 +28,9 @@ public class DeploymentExpirer extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
- boolean success = true;
+ protected double maintain() {
+ int attempts = 0;
+ int failures = 0;
for (Application application : controller().applications().readable()) {
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values()) {
@@ -37,16 +38,17 @@ public class DeploymentExpirer extends ControllerMaintainer {
try {
log.log(Level.INFO, "Expiring deployment of " + instance.id() + " in " + deployment.zone());
+ attempts++;
controller().applications().deactivate(instance.id(), deployment.zone());
} catch (Exception e) {
- success = false;
+ failures++;
log.log(Level.WARNING, "Could not expire " + deployment + " of " + instance +
": " + Exceptions.toMessageString(e) + ". Retrying in " +
interval());
}
}
}
- return success;
+ return asSuccessFactor(attempts, failures);
}
/** Returns whether given deployment has expired according to its TTL */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java
index a3070ef55a0..4e53e07f5af 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java
@@ -21,6 +21,7 @@ import java.util.Collection;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Level;
import static com.yahoo.vespa.hosted.controller.versions.VespaVersion.Confidence.broken;
@@ -45,10 +46,11 @@ public class DeploymentIssueReporter extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
- return maintainDeploymentIssues(applications()) &
- maintainPlatformIssue(applications()) &
- escalateInactiveDeploymentIssues(applications());
+ protected double maintain() {
+ return ( maintainDeploymentIssues(applications()) +
+ maintainPlatformIssue(applications()) +
+ escalateInactiveDeploymentIssues(applications()))
+ / 3;
}
/** Returns the applications to maintain issue status for. */
@@ -63,7 +65,7 @@ public class DeploymentIssueReporter extends ControllerMaintainer {
* and store the issue id for the filed issues. Also, clear the issueIds of applications
* where deployment has not failed for this amount of time.
*/
- private boolean maintainDeploymentIssues(List<Application> applications) {
+ private double maintainDeploymentIssues(List<Application> applications) {
List<TenantAndApplicationId> failingApplications = controller().jobController().deploymentStatuses(ApplicationList.from(applications))
.failingApplicationChangeSince(controller().clock().instant().minus(maxFailureAge))
.mapToList(status -> status.application().id());
@@ -73,7 +75,7 @@ public class DeploymentIssueReporter extends ControllerMaintainer {
fileDeploymentIssueFor(application);
else
store(application.id(), null);
- return true;
+ return 1.0;
}
/**
@@ -81,27 +83,26 @@ public class DeploymentIssueReporter extends ControllerMaintainer {
* applications that have been failing the upgrade to the system version for
* longer than the set grace period, or update this list if the issue already exists.
*/
- private boolean maintainPlatformIssue(List<Application> applications) {
- boolean success = true;
+ private double maintainPlatformIssue(List<Application> applications) {
if (controller().system() == SystemName.cd)
- return success;
+ return 1.0;
VersionStatus versionStatus = controller().readVersionStatus();
Version systemVersion = controller().systemVersion(versionStatus);
if (versionStatus.version(systemVersion).confidence() != broken)
- return success;
+ return 1.0;
DeploymentStatusList statuses = controller().jobController().deploymentStatuses(ApplicationList.from(applications));
if (statuses.failingUpgradeToVersionSince(systemVersion, controller().clock().instant().minus(upgradeGracePeriod)).isEmpty())
- return success;
+ return 1.0;
List<ApplicationId> failingApplications = statuses.failingUpgradeToVersionSince(systemVersion, controller().clock().instant())
.mapToList(status -> status.application().id().defaultInstance());
// TODO jonmv: Send only tenant and application, here and elsewhere in this.
deploymentIssues.fileUnlessOpen(failingApplications, systemVersion);
- return success;
+ return 1.0;
}
private Tenant ownerOf(TenantAndApplicationId applicationId) {
@@ -126,21 +127,23 @@ public class DeploymentIssueReporter extends ControllerMaintainer {
}
/** Escalate issues for which there has been no activity for a certain amount of time. */
- private boolean escalateInactiveDeploymentIssues(Collection<Application> applications) {
- AtomicBoolean success = new AtomicBoolean(true);
+ private double escalateInactiveDeploymentIssues(Collection<Application> applications) {
+ AtomicInteger attempts = new AtomicInteger(0);
+ AtomicInteger failures = new AtomicInteger(0);
applications.forEach(application -> application.deploymentIssueId().ifPresent(issueId -> {
try {
+ attempts.incrementAndGet();
Tenant tenant = ownerOf(application.id());
deploymentIssues.escalateIfInactive(issueId,
maxInactivity,
tenant.type() == Tenant.Type.athenz ? tenant.contact() : Optional.empty());
}
catch (RuntimeException e) {
- success.set(false);
+ failures.incrementAndGet();
log.log(Level.INFO, "Exception caught when attempting to escalate issue with id '" + issueId + "': " + Exceptions.toMessageString(e));
}
}));
- return success.get();
+ return asSuccessFactor(attempts.get(), failures.get());
}
private void store(TenantAndApplicationId id, IssueId issueId) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
index a8214ac8a09..ba4aaf92fc8 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainer.java
@@ -44,7 +44,7 @@ public class DeploymentMetricsMaintainer extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
AtomicInteger failures = new AtomicInteger(0);
AtomicInteger attempts = new AtomicInteger(0);
AtomicReference<Exception> lastException = new AtomicReference<>(null);
@@ -69,7 +69,7 @@ public class DeploymentMetricsMaintainer extends ControllerMaintainer {
lockedInstance -> lockedInstance.with(existingDeployment.zone(), newMetrics)
.recordActivityAt(now, existingDeployment.zone())));
- controller().notificationsDb().setDeploymentFeedingBlockedNotifications(deploymentId, clusterMetrics);
+ controller().notificationsDb().setDeploymentMetricsNotifications(deploymentId, clusterMetrics);
});
} catch (Exception e) {
failures.incrementAndGet();
@@ -92,7 +92,7 @@ public class DeploymentMetricsMaintainer extends ControllerMaintainer {
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
- return lastException.get() == null;
+ return asSuccessFactor(attempts.get(), failures.get());
}
static DeploymentMetrics updateDeploymentMetrics(DeploymentMetrics current, List<ClusterMetrics> metrics) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java
index 55a957f0247..85a69b0f338 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainer.java
@@ -54,7 +54,7 @@ public class EndpointCertificateMaintainer extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
try {
// In order of importance
deployRefreshedCertificates();
@@ -62,10 +62,10 @@ public class EndpointCertificateMaintainer extends ControllerMaintainer {
deleteUnusedCertificates();
} catch (Exception e) {
log.log(LogLevel.ERROR, "Exception caught while maintaining endpoint certificates", e);
- return false;
+ return 0.0;
}
- return true;
+ return 1.0;
}
private void updateRefreshedCertificates() {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/HostInfoUpdater.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/HostInfoUpdater.java
index 83ccda422e6..10e6f9eb039 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/HostInfoUpdater.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/HostInfoUpdater.java
@@ -38,7 +38,7 @@ public class HostInfoUpdater extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
Map<String, NodeEntity> nodeEntities = controller().serviceRegistry().entityService().listNodes().stream()
.collect(Collectors.toMap(NodeEntity::hostname,
Function.identity()));
@@ -62,7 +62,7 @@ public class HostInfoUpdater extends ControllerMaintainer {
LOG.info("Updated information for " + hostsUpdated + " hosts(s)");
}
}
- return true;
+ return 1.0;
}
private static Optional<String> buildModelName(NodeEntity nodeEntity) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/InfrastructureUpgrader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/InfrastructureUpgrader.java
index 9859d12510a..5101de73a33 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/InfrastructureUpgrader.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/InfrastructureUpgrader.java
@@ -39,23 +39,28 @@ public abstract class InfrastructureUpgrader<VERSION> extends ControllerMaintain
}
@Override
- protected boolean maintain() {
- targetVersion().ifPresent(target -> upgradeAll(target, managedApplications));
- return true;
+ protected double maintain() {
+ if (targetVersion().isEmpty()) return 1.0;
+ return upgradeAll(targetVersion().get(), managedApplications);
}
/** Deploy a list of system applications until they converge on the given version */
- private void upgradeAll(VERSION target, List<SystemApplication> applications) {
+ private double upgradeAll(VERSION target, List<SystemApplication> applications) {
+ int attempts = 0;
+ int failures = 0;
for (List<ZoneApi> zones : upgradePolicy.asList()) {
boolean converged = true;
for (ZoneApi zone : zones) {
try {
+ attempts++;
converged &= upgradeAll(target, applications, zone);
} catch (UnreachableNodeRepositoryException e) {
+ failures++;
converged = false;
log.warning(String.format("%s: Failed to communicate with node repository in %s, continuing with next parallel zone: %s",
this, zone, Exceptions.toMessageString(e)));
} catch (Exception e) {
+ failures++;
converged = false;
log.warning(String.format("%s: Failed to upgrade zone: %s, continuing with next parallel zone: %s",
this, zone, Exceptions.toMessageString(e)));
@@ -65,6 +70,7 @@ public abstract class InfrastructureUpgrader<VERSION> extends ControllerMaintain
break;
}
}
+ return asSuccessFactor(attempts, failures);
}
/** Returns whether all applications have converged to the target version in zone */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunner.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunner.java
index b84cfe5af9b..25207b733f0 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunner.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunner.java
@@ -49,10 +49,10 @@ public class JobRunner extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
jobs.active().forEach(this::advance);
jobs.collectGarbage();
- return true;
+ return 1.0;
}
@Override
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java
index b26b94f0b28..3f65c2e49cd 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporter.java
@@ -73,7 +73,7 @@ public class MetricsReporter extends ControllerMaintainer {
}
@Override
- public boolean maintain() {
+ public double maintain() {
reportDeploymentMetrics();
reportRemainingRotations();
reportQueuedNameServiceRequests();
@@ -82,7 +82,7 @@ public class MetricsReporter extends ControllerMaintainer {
reportAuditLog();
reportBrokenSystemVersion(versionStatus);
reportTenantMetrics();
- return true;
+ return 1.0;
}
private void reportBrokenSystemVersion(VersionStatus versionStatus) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/NameServiceDispatcher.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/NameServiceDispatcher.java
index e57affdc15d..fe20db00e05 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/NameServiceDispatcher.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/NameServiceDispatcher.java
@@ -37,13 +37,12 @@ public class NameServiceDispatcher extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
- boolean success = true;
+ protected double maintain() {
try (var lock = db.lockNameServiceQueue()) {
var queue = db.readNameServiceQueue();
var instant = clock.instant();
var remaining = queue.dispatchTo(nameService, requestCount);
- if (queue == remaining) return success; // Queue unchanged
+ if (queue == remaining) return 1.0; // Queue unchanged
var dispatched = queue.first(requestCount);
if (!dispatched.requests().isEmpty()) {
@@ -54,7 +53,7 @@ public class NameServiceDispatcher extends ControllerMaintainer {
}
db.writeNameServiceQueue(remaining);
}
- return success;
+ return 1.0;
}
private static int requestCount(SystemName system) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java
index e1618f05a7d..a02937a03e3 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java
@@ -42,24 +42,24 @@ public class OsUpgradeScheduler extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
+ Instant now = controller().clock().instant();
+ if (!canTriggerAt(now)) return 1.0;
for (var cloud : supportedClouds()) {
- Optional<Version> newTarget = newTargetIn(cloud);
+ Optional<Version> newTarget = newTargetIn(cloud, now);
if (newTarget.isEmpty()) continue;
controller().upgradeOsIn(cloud, newTarget.get(), upgradeBudget(), false);
}
- return true;
+ return 1.0;
}
/** Returns the new target version for given cloud, if any */
- private Optional<Version> newTargetIn(CloudName cloud) {
+ private Optional<Version> newTargetIn(CloudName cloud, Instant now) {
Optional<Version> currentTarget = controller().osVersionTarget(cloud)
.map(OsVersionTarget::osVersion)
.map(OsVersion::version);
if (currentTarget.isEmpty()) return Optional.empty();
if (!hasExpired(currentTarget.get())) return Optional.empty();
-
- Instant now = controller().clock().instant();
String qualifier = LocalDate.ofInstant(now.minus(AVAILABILITY_INTERVAL), ZoneOffset.UTC)
.format(VERSION_DATE_PATTERN);
return Optional.of(new Version(currentTarget.get().getMajor(),
@@ -88,6 +88,14 @@ public class OsUpgradeScheduler extends ControllerMaintainer {
.collect(Collectors.toUnmodifiableSet());
}
+ private boolean canTriggerAt(Instant instant) {
+ int hourOfDay = instant.atZone(ZoneOffset.UTC).getHour();
+ int dayOfWeek = instant.atZone(ZoneOffset.UTC).getDayOfWeek().getValue();
+ // Upgrade can only be scheduled between 07:00 and 12:59 UTC, Monday-Thursday
+ return hourOfDay >= 7 && hourOfDay <= 12 &&
+ dayOfWeek < 5;
+ }
+
private Duration upgradeBudget() {
return controller().system().isCd() ? Duration.ofHours(1) : Duration.ofDays(14);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgrader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgrader.java
index e71fcf12b23..203c8187c2c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgrader.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgrader.java
@@ -3,7 +3,6 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.component.Version;
import com.yahoo.config.provision.CloudName;
-import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
@@ -89,12 +88,18 @@ public class OsUpgrader extends InfrastructureUpgrader<OsVersionTarget> {
/** Returns the available upgrade budget for given zone */
private Duration zoneBudgetOf(Duration totalBudget, ZoneApi zone) {
- if (!zone.getEnvironment().isProduction()) return Duration.ZERO;
- long consecutiveProductionZones = upgradePolicy.asList().stream()
- .filter(parallelZones -> parallelZones.stream().map(ZoneApi::getEnvironment)
- .anyMatch(Environment::isProduction))
- .count();
- return totalBudget.dividedBy(consecutiveProductionZones);
+ if (!spendBudget(zone)) return Duration.ZERO;
+ long consecutiveZones = upgradePolicy.asList().stream()
+ .filter(parallelZones -> parallelZones.stream().anyMatch(this::spendBudget))
+ .count();
+ return totalBudget.dividedBy(consecutiveZones);
+ }
+
+ /** Returns whether to spend upgrade budget on given zone */
+ private boolean spendBudget(ZoneApi zone) {
+ if (!zone.getEnvironment().isProduction()) return false;
+ if (controller().zoneRegistry().systemZone().getVirtualId().equals(zone.getVirtualId())) return false; // Controller zone
+ return true;
}
/** Returns whether node is in a state where it can be upgraded */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsVersionStatusUpdater.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsVersionStatusUpdater.java
index cbd9207fda4..271dd277e1c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsVersionStatusUpdater.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsVersionStatusUpdater.java
@@ -18,16 +18,16 @@ public class OsVersionStatusUpdater extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
try {
OsVersionStatus newStatus = OsVersionStatus.compute(controller());
controller().updateOsVersionStatus(newStatus);
- return true;
+ return 1.0;
} catch (Exception e) {
log.log(Level.WARNING, "Failed to compute OS version status: " + Exceptions.toMessageString(e) +
". Retrying in " + interval());
}
- return false;
+ return 0.0;
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployer.java
index a032f266de5..9d93ac719b7 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployer.java
@@ -19,13 +19,13 @@ public class OutstandingChangeDeployer extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
for (Application application : ApplicationList.from(controller().applications().readable())
.withProductionDeployment()
.withDeploymentSpec()
.asList())
controller().applications().deploymentTrigger().triggerNewRevision(application.id());
- return true;
+ return 1.0;
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ReadyJobsTrigger.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ReadyJobsTrigger.java
index a626f21359a..ffe958cb63a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ReadyJobsTrigger.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ReadyJobsTrigger.java
@@ -17,9 +17,9 @@ public class ReadyJobsTrigger extends ControllerMaintainer {
}
@Override
- public boolean maintain() {
+ public double maintain() {
controller().applications().deploymentTrigger().triggerReadyJobs();
- return true;
+ return 1.0;
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ReindexingTriggerer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ReindexingTriggerer.java
index 263a33cf266..0bd74c844ae 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ReindexingTriggerer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ReindexingTriggerer.java
@@ -40,7 +40,7 @@ public class ReindexingTriggerer extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
try {
Instant now = controller().clock().instant();
for (Application application : controller().applications().asList())
@@ -51,11 +51,11 @@ public class ReindexingTriggerer extends ControllerMaintainer {
&& reindexingIsReady(controller().applications().applicationReindexing(id, deployment.zone()), now))
controller().applications().reindex(id, deployment.zone(), List.of(), List.of(), true);
});
- return true;
+ return 1.0;
}
catch (RuntimeException e) {
log.log(Level.WARNING, "Failed to trigger reindexing: " + Exceptions.toMessageString(e));
- return false;
+ return 0.0;
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
index aed2e637e4b..39ad233ce46 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
@@ -79,19 +79,19 @@ public class ResourceMeterMaintainer extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
Collection<ResourceSnapshot> resourceSnapshots;
try {
resourceSnapshots = getAllResourceSnapshots();
} catch (Exception e) {
log.log(Level.WARNING, "Failed to collect resource snapshots. Retrying in " + interval() + ". Error: " +
Exceptions.toMessageString(e));
- return false;
+ return 0.0;
}
if (systemName.isPublic()) reportResourceSnapshots(resourceSnapshots);
updateDeploymentCost(resourceSnapshots);
- return true;
+ return 1.0;
}
void updateDeploymentCost(Collection<ResourceSnapshot> resourceSnapshots) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainer.java
index c7bf7e765ed..ab988bcf0ac 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceTagMaintainer.java
@@ -28,7 +28,7 @@ public class ResourceTagMaintainer extends ControllerMaintainer {
}
@Override
- public boolean maintain() {
+ public double maintain() {
controller().zoneRegistry().zones()
.ofCloud(CloudName.from("aws"))
.reachable()
@@ -38,7 +38,7 @@ public class ResourceTagMaintainer extends ControllerMaintainer {
if (taggedResources > 0)
log.log(Level.INFO, "Tagged " + taggedResources + " resources in " + zone.getId());
});
- return true;
+ return 1.0;
}
private Map<HostName, Optional<ApplicationId>> getTenantOfParentHosts(ZoneId zoneId) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainer.java
new file mode 100644
index 00000000000..2cc3ac1bd6c
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainer.java
@@ -0,0 +1,66 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
+import com.yahoo.vespa.hosted.controller.deployment.RetriggerEntry;
+import com.yahoo.vespa.hosted.controller.deployment.Run;
+
+import java.time.Duration;
+import java.util.List;
+import java.util.Optional;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+
+public class RetriggerMaintainer extends ControllerMaintainer {
+
+ private static final Logger logger = Logger.getLogger(RetriggerMaintainer.class.getName());
+
+ public RetriggerMaintainer(Controller controller, Duration interval) {
+ super(controller, interval);
+ }
+
+ @Override
+ protected double maintain() {
+ try (var lock = controller().curator().lockDeploymentRetriggerQueue()) {
+ List<RetriggerEntry> retriggerEntries = controller().curator().readRetriggerEntries();
+
+ // Trigger all jobs that still need triggering and is not running
+ retriggerEntries.stream()
+ .filter(this::needsTrigger)
+ .filter(entry -> readyToTrigger(entry.jobId()))
+ .forEach(entry -> controller().applications().deploymentTrigger().reTrigger(entry.jobId().application(), entry.jobId().type()));
+
+ // Remove all jobs that has succeeded with the required job run and persist the list
+ List<RetriggerEntry> remaining = retriggerEntries.stream()
+ .filter(this::needsTrigger)
+ .collect(Collectors.toList());
+ controller().curator().writeRetriggerEntries(remaining);
+ } catch (Exception e) {
+ logger.log(Level.WARNING, "Exception while triggering jobs", e);
+ return 0.0;
+ }
+ return 1.0;
+ }
+
+ /*
+ Returns true if a job is ready to run, i.e is currently not running
+ */
+ private boolean readyToTrigger(JobId jobId) {
+ Optional<Run> existingRun = controller().jobController().active(jobId.application()).stream()
+ .filter(run -> run.id().type().equals(jobId.type()))
+ .findFirst();
+ return existingRun.isEmpty();
+ }
+
+ /*
+ Returns true of job needs triggering. I.e the job has not run since the queue item was last run.
+ */
+ private boolean needsTrigger(RetriggerEntry entry) {
+ return controller().jobController().lastCompleted(entry.jobId())
+ .filter(run -> run.id().number() < entry.requiredRun())
+ .isPresent();
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemRoutingPolicyMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemRoutingPolicyMaintainer.java
index 3b0a1fca4af..e40d772a673 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemRoutingPolicyMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemRoutingPolicyMaintainer.java
@@ -21,14 +21,14 @@ public class SystemRoutingPolicyMaintainer extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
for (var zone : controller().zoneRegistry().zones().all().ids()) {
for (var application : SystemApplication.values()) {
if (!application.hasEndpoint()) continue;
controller().routing().policies().refresh(application.id(), DeploymentSpec.empty, zone);
}
}
- return true;
+ return 1.0;
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainer.java
index 1265d687850..637ae10bcc6 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainer.java
@@ -23,7 +23,7 @@ public class TenantRoleMaintainer extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
var roleService = controller().serviceRegistry().roleService();
var tenants = controller().tenants().asList();
var tenantsWithRoles = tenants.stream()
@@ -31,7 +31,7 @@ public class TenantRoleMaintainer extends ControllerMaintainer {
.filter(this::hasProductionDeployment)
.collect(Collectors.toList());
roleService.maintainRoles(tenantsWithRoles);
- return true;
+ return 1.0;
}
private boolean hasProductionDeployment(TenantName tenant) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdater.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdater.java
index fbe9faa9754..0af0d01478b 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdater.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdater.java
@@ -36,30 +36,34 @@ public class TrafficShareUpdater extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
- boolean success = false;
+ protected double maintain() {
Exception lastException = null;
+ int attempts = 0;
+ int failures = 0;
for (var application : applications.asList()) {
for (var instance : application.instances().values()) {
for (var deployment : instance.deployments().values()) {
if ( ! deployment.zone().environment().isProduction()) continue;
try {
- success |= updateTrafficFraction(instance, deployment);
+ attempts++;
+ updateTrafficFraction(instance, deployment);
}
catch (Exception e) {
// Some failures due to locked applications are expected and benign
+ failures++;
lastException = e;
}
}
}
}
- if ( ! success && lastException != null) // log on complete failure
+ double successFactor = asSuccessFactor(attempts, failures);
+ if ( successFactor == 0 )
log.log(Level.WARNING, "Could not update traffic share on any applications", lastException);
- return success;
+ return successFactor;
}
- private boolean updateTrafficFraction(Instance instance, Deployment deployment) {
+ private void updateTrafficFraction(Instance instance, Deployment deployment) {
double qpsInZone = deployment.metrics().queriesPerSecond();
double totalQps = instance.deployments().values().stream()
.filter(i -> i.zone().environment().isProduction())
@@ -73,7 +77,6 @@ public class TrafficShareUpdater extends ControllerMaintainer {
maxReadShare = currentReadShare; // distribution can be incorrect
nodeRepository.patchApplication(deployment.zone(), instance.id(), currentReadShare, maxReadShare);
- return true;
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/Upgrader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/Upgrader.java
index 8d5019904fa..2326f7b66ee 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/Upgrader.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/Upgrader.java
@@ -51,7 +51,7 @@ public class Upgrader extends ControllerMaintainer {
* Schedule application upgrades. Note that this implementation must be idempotent.
*/
@Override
- public boolean maintain() {
+ public double maintain() {
// Determine target versions for each upgrade policy
VersionStatus versionStatus = controller().readVersionStatus();
Version canaryTarget = controller().systemVersion(versionStatus);
@@ -91,7 +91,7 @@ public class Upgrader extends ControllerMaintainer {
upgrade(instances.with(UpgradePolicy.canary), canaryTarget, targetMajorVersion, instances.size());
defaultTargets.forEach(target -> upgrade(instances.with(UpgradePolicy.defaultPolicy), target, targetMajorVersion, numberOfApplicationsToUpgrade()));
conservativeTargets.forEach(target -> upgrade(instances.with(UpgradePolicy.conservative), target, targetMajorVersion, numberOfApplicationsToUpgrade()));
- return true;
+ return 1.0;
}
/** Returns the target versions for given confidence, one per major version in the system */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainer.java
index fedf3d90760..4cd24289676 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VCMRMaintainer.java
@@ -57,7 +57,7 @@ public class VCMRMaintainer extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
var changeRequests = curator.readChangeRequests()
.stream()
.filter(shouldUpdate())
@@ -81,7 +81,7 @@ public class VCMRMaintainer extends ControllerMaintainer {
});
}
});
- return true;
+ return 1.0;
}
/**
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VersionStatusUpdater.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VersionStatusUpdater.java
index a3e9672b715..e4866c43f13 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VersionStatusUpdater.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/VersionStatusUpdater.java
@@ -29,7 +29,7 @@ public class VersionStatusUpdater extends ControllerMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
try {
VersionStatus newStatus = VersionStatus.compute(controller());
controller().updateVersionStatus(newStatus);
@@ -37,12 +37,12 @@ public class VersionStatusUpdater extends ControllerMaintainer {
controller().serviceRegistry().systemMonitor().reportSystemVersion(version.versionNumber(),
convert(version.confidence()));
});
- return true;
+ return 1.0;
} catch (Exception e) {
log.log(Level.WARNING, "Failed to compute version status: " + Exceptions.toMessageString(e) +
". Retrying in " + interval());
}
- return false;
+ return 0.0;
}
static SystemMonitor.Confidence convert(VespaVersion.Confidence confidence) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java
index ea0422ea9fc..b65a9290e43 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/Notification.java
@@ -62,7 +62,7 @@ public class Notification {
public enum Level {
// Must be ordered in order of importance
- warning, error
+ info, warning, error
}
public enum Type {
@@ -73,7 +73,10 @@ public class Notification {
deployment,
/** Application cluster is (near) external feed blocked */
- feedBlock;
+ feedBlock,
+
+ /** Application cluster is reindexing document(s) */
+ reindex;
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java
index 21df0c01f0f..0cf1aebeba9 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDb.java
@@ -95,31 +95,22 @@ public class NotificationsDb {
}
/**
- * Updates feeding blocked notifications for the given deployment based on current cluster metrics.
- * Will clear notifications of any cluster not reporting the metrics or whose metrics indicate feed is not blocked,
- * while setting notifications for cluster that are (Level.error) or are nearly (Level.warning) feed blocked.
+ * Updates notifications based on deployment metrics (e.g. feed blocked and reindexing progress) for the given
+ * deployment based on current cluster metrics.
+ * Will clear notifications of any cluster not reporting the metrics or whose metrics indicate feed is not blocked
+ * or reindexing no longer in progress. Will set notification for clusters:
+ * - that are (Level.error) or are nearly (Level.warning) feed blocked,
+ * - that are (Level.info) currently reindexing at least 1 document type.
*/
- public void setDeploymentFeedingBlockedNotifications(DeploymentId deploymentId, List<ClusterMetrics> clusterMetrics) {
+ public void setDeploymentMetricsNotifications(DeploymentId deploymentId, List<ClusterMetrics> clusterMetrics) {
Instant now = clock.instant();
- List<Notification> feedBlockNotifications = clusterMetrics.stream()
+ List<Notification> newNotifications = clusterMetrics.stream()
.flatMap(metric -> {
- Optional<Pair<Level, String>> memoryStatus =
- resourceUtilToFeedBlockStatus("memory", metric.memoryUtil(), metric.memoryFeedBlockLimit());
- Optional<Pair<Level, String>> diskStatus =
- resourceUtilToFeedBlockStatus("disk", metric.diskUtil(), metric.diskFeedBlockLimit());
- if (memoryStatus.isEmpty() && diskStatus.isEmpty()) return Stream.empty();
-
- // Find the max among levels
- Level level = Stream.of(memoryStatus, diskStatus)
- .flatMap(status -> status.stream().map(Pair::getFirst))
- .max(Comparator.comparing(Enum::ordinal)).get();
- List<String> messages = Stream.concat(memoryStatus.stream(), diskStatus.stream())
- .filter(status -> status.getFirst() == level) // Do not mix message from different levels
- .map(Pair::getSecond)
- .collect(Collectors.toUnmodifiableList());
NotificationSource source = NotificationSource.from(deploymentId, ClusterSpec.Id.from(metric.getClusterId()));
- return Stream.of(new Notification(now, Type.feedBlock, level, source, messages));
+ return Stream.of(createFeedBlockNotification(source, now, metric),
+ createReindexNotification(source, now, metric));
})
+ .flatMap(Optional::stream)
.collect(Collectors.toUnmodifiableList());
NotificationSource deploymentSource = NotificationSource.from(deploymentId);
@@ -128,10 +119,11 @@ public class NotificationsDb {
List<Notification> updated = Stream.concat(
initial.stream()
.filter(notification ->
- // Filter out old feed block notifications for this deployment
- notification.type() != Type.feedBlock || !deploymentSource.contains(notification.source())),
+ // Filter out old feed block notifications and reindex for this deployment
+ (notification.type() != Type.feedBlock && notification.type() != Type.reindex) ||
+ !deploymentSource.contains(notification.source())),
// ... and add the new notifications for this deployment
- feedBlockNotifications.stream())
+ newNotifications.stream())
.collect(Collectors.toUnmodifiableList());
if (!initial.equals(updated))
@@ -139,6 +131,33 @@ public class NotificationsDb {
}
}
+ private static Optional<Notification> createFeedBlockNotification(NotificationSource source, Instant at, ClusterMetrics metric) {
+ Optional<Pair<Level, String>> memoryStatus =
+ resourceUtilToFeedBlockStatus("memory", metric.memoryUtil(), metric.memoryFeedBlockLimit());
+ Optional<Pair<Level, String>> diskStatus =
+ resourceUtilToFeedBlockStatus("disk", metric.diskUtil(), metric.diskFeedBlockLimit());
+ if (memoryStatus.isEmpty() && diskStatus.isEmpty()) return Optional.empty();
+
+ // Find the max among levels
+ Level level = Stream.of(memoryStatus, diskStatus)
+ .flatMap(status -> status.stream().map(Pair::getFirst))
+ .max(Comparator.comparing(Enum::ordinal)).get();
+ List<String> messages = Stream.concat(memoryStatus.stream(), diskStatus.stream())
+ .filter(status -> status.getFirst() == level) // Do not mix message from different levels
+ .map(Pair::getSecond)
+ .collect(Collectors.toUnmodifiableList());
+ return Optional.of(new Notification(at, Type.feedBlock, level, source, messages));
+ }
+
+ private static Optional<Notification> createReindexNotification(NotificationSource source, Instant at, ClusterMetrics metric) {
+ if (metric.reindexingProgress().isEmpty()) return Optional.empty();
+ List<String> messages = metric.reindexingProgress().entrySet().stream()
+ .map(entry -> String.format("document type '%s' (%.1f%% done)", entry.getKey(), 100 * entry.getValue()))
+ .sorted()
+ .collect(Collectors.toUnmodifiableList());
+ return Optional.of(new Notification(at, Type.reindex, Level.info, source, messages));
+ }
+
/**
* Returns a feed block summary for the given resource: the notification level and
* notification message for the given resource utilization wrt. given resource limit.
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
index 06442779b9c..26fb4be04af 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
@@ -295,18 +295,18 @@ public class ApplicationSerializer {
Inspector root = slime.get();
TenantAndApplicationId id = TenantAndApplicationId.fromSerialized(root.field(idField).asString());
- Instant createdAt = Serializers.instant(root.field(createdAtField));
+ Instant createdAt = SlimeUtils.instant(root.field(createdAtField));
DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false);
ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString());
- Optional<IssueId> deploymentIssueId = Serializers.optionalString(root.field(deploymentIssueField)).map(IssueId::from);
- Optional<IssueId> ownershipIssueId = Serializers.optionalString(root.field(ownershipIssueIdField)).map(IssueId::from);
- Optional<User> owner = Serializers.optionalString(root.field(ownerField)).map(User::from);
- OptionalInt majorVersion = Serializers.optionalInteger(root.field(majorVersionField));
+ Optional<IssueId> deploymentIssueId = SlimeUtils.optionalString(root.field(deploymentIssueField)).map(IssueId::from);
+ Optional<IssueId> ownershipIssueId = SlimeUtils.optionalString(root.field(ownershipIssueIdField)).map(IssueId::from);
+ Optional<User> owner = SlimeUtils.optionalString(root.field(ownerField)).map(User::from);
+ OptionalInt majorVersion = SlimeUtils.optionalInteger(root.field(majorVersionField));
ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(),
root.field(writeQualityField).asDouble());
Set<PublicKey> deployKeys = deployKeysFromSlime(root.field(pemDeployKeysField));
List<Instance> instances = instancesFromSlime(id, root.field(instancesField));
- OptionalLong projectId = Serializers.optionalLong(root.field(projectIdField));
+ OptionalLong projectId = SlimeUtils.optionalLong(root.field(projectIdField));
Optional<ApplicationVersion> latestVersion = latestVersionFromSlime(root.field(latestVersionField));
return new Application(id, createdAt, deploymentSpec, validationOverrides,
@@ -354,18 +354,18 @@ public class ApplicationSerializer {
return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)),
applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)),
Version.fromString(deploymentObject.field(versionField).asString()),
- Serializers.instant(deploymentObject.field(deployTimeField)),
+ SlimeUtils.instant(deploymentObject.field(deployTimeField)),
deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)),
- DeploymentActivity.create(Serializers.optionalInstant(deploymentObject.field(lastQueriedField)),
- Serializers.optionalInstant(deploymentObject.field(lastWrittenField)),
- Serializers.optionalDouble(deploymentObject.field(lastQueriesPerSecondField)),
- Serializers.optionalDouble(deploymentObject.field(lastWritesPerSecondField))),
- QuotaUsage.create(Serializers.optionalDouble(deploymentObject.field(quotaUsageRateField))),
- Serializers.optionalDouble(deploymentObject.field(deploymentCostField)));
+ DeploymentActivity.create(SlimeUtils.optionalInstant(deploymentObject.field(lastQueriedField)),
+ SlimeUtils.optionalInstant(deploymentObject.field(lastWrittenField)),
+ SlimeUtils.optionalDouble(deploymentObject.field(lastQueriesPerSecondField)),
+ SlimeUtils.optionalDouble(deploymentObject.field(lastWritesPerSecondField))),
+ QuotaUsage.create(SlimeUtils.optionalDouble(deploymentObject.field(quotaUsageRateField))),
+ SlimeUtils.optionalDouble(deploymentObject.field(deploymentCostField)));
}
private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) {
- Optional<Instant> instant = Serializers.optionalInstant(object.field(deploymentMetricsUpdateTime));
+ Optional<Instant> instant = SlimeUtils.optionalInstant(object.field(deploymentMetricsUpdateTime));
return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(),
object.field(deploymentMetricsWPSField).asDouble(),
object.field(deploymentMetricsDocsField).asDouble(),
@@ -388,7 +388,7 @@ public class ApplicationSerializer {
object.traverse((ArrayTraverser) (idx, statusObject) -> statusMap.put(new RotationId(statusObject.field(rotationIdField).asString()),
new RotationStatus.Targets(
singleRotationStatusFromSlime(statusObject.field(statusField)),
- Serializers.instant(statusObject.field(lastUpdatedField)))));
+ SlimeUtils.instant(statusObject.field(lastUpdatedField)))));
return RotationStatus.from(statusMap);
}
@@ -411,16 +411,16 @@ public class ApplicationSerializer {
private ApplicationVersion applicationVersionFromSlime(Inspector object) {
if ( ! object.valid()) return ApplicationVersion.unknown;
- OptionalLong applicationBuildNumber = Serializers.optionalLong(object.field(applicationBuildNumberField));
+ OptionalLong applicationBuildNumber = SlimeUtils.optionalLong(object.field(applicationBuildNumberField));
if (applicationBuildNumber.isEmpty())
return ApplicationVersion.unknown;
Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField));
- Optional<String> authorEmail = Serializers.optionalString(object.field(authorEmailField));
- Optional<Version> compileVersion = Serializers.optionalString(object.field(compileVersionField)).map(Version::fromString);
- Optional<Instant> buildTime = Serializers.optionalInstant(object.field(buildTimeField));
- Optional<String> sourceUrl = Serializers.optionalString(object.field(sourceUrlField));
- Optional<String> commit = Serializers.optionalString(object.field(commitField));
+ Optional<String> authorEmail = SlimeUtils.optionalString(object.field(authorEmailField));
+ Optional<Version> compileVersion = SlimeUtils.optionalString(object.field(compileVersionField)).map(Version::fromString);
+ Optional<Instant> buildTime = SlimeUtils.optionalInstant(object.field(buildTimeField));
+ Optional<String> sourceUrl = SlimeUtils.optionalString(object.field(sourceUrlField));
+ Optional<String> commit = SlimeUtils.optionalString(object.field(commitField));
return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit);
}
@@ -437,7 +437,7 @@ public class ApplicationSerializer {
object.field(jobStatusField).traverse((ArrayTraverser) (__, jobPauseObject) ->
JobType.fromOptionalJobName(jobPauseObject.field(jobTypeField).asString())
.ifPresent(jobType -> jobPauses.put(jobType,
- Serializers.instant(jobPauseObject.field(pausedUntilField)))));
+ SlimeUtils.instant(jobPauseObject.field(pausedUntilField)))));
return jobPauses;
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/AuditLogSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/AuditLogSerializer.java
index 7ea722bf5de..e5253462730 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/AuditLogSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/AuditLogSerializer.java
@@ -5,6 +5,7 @@ import com.yahoo.slime.ArrayTraverser;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.controller.auditlog.AuditLog;
import java.util.ArrayList;
@@ -51,11 +52,11 @@ public class AuditLogSerializer {
Cursor root = slime.get();
root.field(entriesField).traverse((ArrayTraverser) (i, entryObject) -> {
entries.add(new AuditLog.Entry(
- Serializers.instant(entryObject.field(atField)),
+ SlimeUtils.instant(entryObject.field(atField)),
entryObject.field(principalField).asString(),
methodFrom(entryObject.field(methodField)),
entryObject.field(resourceField).asString(),
- Serializers.optionalString(entryObject.field(dataField))
+ SlimeUtils.optionalString(entryObject.field(dataField))
));
});
return new AuditLog(entries);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ControllerVersionSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ControllerVersionSerializer.java
index 30fcc0e40c6..24a6ef72438 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ControllerVersionSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ControllerVersionSerializer.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.controller.persistence;
import com.yahoo.component.Version;
import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.controller.versions.ControllerVersion;
/**
@@ -36,7 +37,7 @@ public class ControllerVersionSerializer {
var root = slime.get();
var version = Version.fromString(root.field(VERSION_FIELD).asString());
var commitSha = root.field(COMMIT_SHA_FIELD).asString();
- var commitDate = Serializers.instant(root.field(COMMIT_DATE_FIELD));
+ var commitDate = SlimeUtils.instant(root.field(COMMIT_DATE_FIELD));
return new ControllerVersion(version, commitSha, commitDate);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
index 3736d18a01c..08f1900c6e8 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
@@ -22,6 +22,8 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.auditlog.AuditLog;
+import com.yahoo.vespa.hosted.controller.deployment.RetriggerEntry;
+import com.yahoo.vespa.hosted.controller.deployment.RetriggerEntrySerializer;
import com.yahoo.vespa.hosted.controller.deployment.Run;
import com.yahoo.vespa.hosted.controller.deployment.Step;
import com.yahoo.vespa.hosted.controller.dns.NameServiceQueue;
@@ -221,6 +223,10 @@ public class CuratorDb {
return curator.lock(lockRoot.append("supportAccess").append(deploymentId.dottedString()), defaultLockTimeout);
}
+ public Lock lockDeploymentRetriggerQueue() {
+ return curator.lock(lockRoot.append("deploymentRetriggerQueue"), defaultLockTimeout);
+ }
+
// -------------- Helpers ------------------------------------------
/** Try locking with a low timeout, meaning it is OK to fail lock acquisition.
@@ -632,7 +638,17 @@ public class CuratorDb {
/** Take lock before reading before writing */
public void writeSupportAccess(DeploymentId deploymentId, SupportAccess supportAccess) {
- curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess, true, Optional.empty())));
+ curator.set(supportAccessPath(deploymentId), asJson(SupportAccessSerializer.toSlime(supportAccess)));
+ }
+
+ // -------------- Job Retrigger entries -----------------------------------
+
+ public List<RetriggerEntry> readRetriggerEntries() {
+ return readSlime(deploymentRetriggerPath()).map(RetriggerEntrySerializer::fromSlime).orElse(List.of());
+ }
+
+ public void writeRetriggerEntries(List<RetriggerEntry> retriggerEntries) {
+ curator.set(deploymentRetriggerPath(), asJson(RetriggerEntrySerializer.toSlime(retriggerEntries)));
}
// -------------- Paths ---------------------------------------------------
@@ -772,4 +788,8 @@ public class CuratorDb {
return supportAccessRoot.append(deploymentId.dottedString());
}
+ private static Path deploymentRetriggerPath() {
+ return root.append("deploymentRetriggerQueue");
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/LogSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/LogSerializer.java
index 6416d077ce4..4e3ab293a02 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/LogSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/LogSerializer.java
@@ -92,7 +92,7 @@ class LogSerializer {
private LogEntry fromSlime(Inspector entryObject) {
return new LogEntry(entryObject.field(idField).asLong(),
- Serializers.instant(entryObject.field(timestampField)),
+ SlimeUtils.instant(entryObject.field(timestampField)),
typeOf(entryObject.field(typeField).asString()),
entryObject.field(messageField).asString());
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NameServiceQueueSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NameServiceQueueSerializer.java
index ff1dd4d95c6..dd431d94d94 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NameServiceQueueSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NameServiceQueueSerializer.java
@@ -6,6 +6,7 @@ import com.yahoo.slime.ArrayTraverser;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.controller.api.integration.dns.Record;
import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordData;
import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordName;
@@ -113,8 +114,8 @@ public class NameServiceQueueSerializer {
private RemoveRecords removeRecordsFromSlime(Inspector object) {
var type = Record.Type.valueOf(object.field(typeField).asString());
- var name = Serializers.optionalString(object.field(nameField)).map(RecordName::from);
- var data = Serializers.optionalString(object.field(dataField)).map(RecordData::from);
+ var name = SlimeUtils.optionalString(object.field(nameField)).map(RecordName::from);
+ var data = SlimeUtils.optionalString(object.field(dataField)).map(RecordData::from);
return new RemoveRecords(type, name, data);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NodeVersionSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NodeVersionSerializer.java
index f9f8de96591..2861e8922a5 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NodeVersionSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NodeVersionSerializer.java
@@ -8,6 +8,7 @@ import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.slime.ArrayTraverser;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
+import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.controller.versions.NodeVersion;
import com.yahoo.vespa.hosted.controller.versions.NodeVersions;
@@ -47,7 +48,7 @@ public class NodeVersionSerializer {
var hostname = HostName.from(entry.field(hostnameField).asString());
var zone = ZoneId.from(entry.field(zoneField).asString());
var wantedVersion = Version.fromString(entry.field(wantedVersionField).asString());
- var suspendedAt = Serializers.optionalInstant(entry.field(suspendedAtField));
+ var suspendedAt = SlimeUtils.optionalInstant(entry.field(suspendedAtField));
nodeVersions.put(hostname, new NodeVersion(hostname, zone, version, wantedVersion, suspendedAt));
});
return new NodeVersions(nodeVersions.build());
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java
index 54dc102d573..ba1c5350580 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/NotificationsSerializer.java
@@ -74,18 +74,18 @@ public class NotificationsSerializer {
private static Notification fromInspector(TenantName tenantName, Inspector inspector) {
return new Notification(
- Serializers.instant(inspector.field(atFieldName)),
- typeFrom(inspector.field(typeField)),
- levelFrom(inspector.field(levelField)),
- new NotificationSource(
- tenantName,
- Serializers.optionalString(inspector.field(applicationField)).map(ApplicationName::from),
- Serializers.optionalString(inspector.field(instanceField)).map(InstanceName::from),
- Serializers.optionalString(inspector.field(zoneField)).map(ZoneId::from),
- Serializers.optionalString(inspector.field(clusterIdField)).map(ClusterSpec.Id::from),
- Serializers.optionalString(inspector.field(jobTypeField)).map(JobType::fromJobName),
- Serializers.optionalLong(inspector.field(runNumberField))),
- SlimeUtils.entriesStream(inspector.field(messagesField)).map(Inspector::asString).collect(Collectors.toUnmodifiableList()));
+ SlimeUtils.instant(inspector.field(atFieldName)),
+ typeFrom(inspector.field(typeField)),
+ levelFrom(inspector.field(levelField)),
+ new NotificationSource(
+ tenantName,
+ SlimeUtils.optionalString(inspector.field(applicationField)).map(ApplicationName::from),
+ SlimeUtils.optionalString(inspector.field(instanceField)).map(InstanceName::from),
+ SlimeUtils.optionalString(inspector.field(zoneField)).map(ZoneId::from),
+ SlimeUtils.optionalString(inspector.field(clusterIdField)).map(ClusterSpec.Id::from),
+ SlimeUtils.optionalString(inspector.field(jobTypeField)).map(JobType::fromJobName),
+ SlimeUtils.optionalLong(inspector.field(runNumberField))),
+ SlimeUtils.entriesStream(inspector.field(messagesField)).map(Inspector::asString).collect(Collectors.toUnmodifiableList()));
}
private static String asString(Notification.Type type) {
@@ -93,6 +93,7 @@ public class NotificationsSerializer {
case applicationPackage: return "applicationPackage";
case deployment: return "deployment";
case feedBlock: return "feedBlock";
+ case reindex: return "reindex";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
@@ -102,12 +103,14 @@ public class NotificationsSerializer {
case "applicationPackage": return Notification.Type.applicationPackage;
case "deployment": return Notification.Type.deployment;
case "feedBlock": return Notification.Type.feedBlock;
+ case "reindex": return Notification.Type.reindex;
default: throw new IllegalArgumentException("Unknown serialized notification type value '" + field.asString() + "'");
}
}
private static String asString(Notification.Level level) {
switch (level) {
+ case info: return "info";
case warning: return "warning";
case error: return "error";
default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
@@ -116,6 +119,7 @@ public class NotificationsSerializer {
private static Notification.Level levelFrom(Inspector field) {
switch (field.asString()) {
+ case "info": return Notification.Level.info;
case "warning": return Notification.Level.warning;
case "error": return Notification.Level.error;
default: throw new IllegalArgumentException("Unknown serialized notification level value '" + field.asString() + "'");
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/OsVersionTargetSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/OsVersionTargetSerializer.java
index 6942fc0549f..7c27533c144 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/OsVersionTargetSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/OsVersionTargetSerializer.java
@@ -42,9 +42,7 @@ public class OsVersionTargetSerializer {
Set<OsVersionTarget> osVersionTargets = new TreeSet<>();
array.traverse((ArrayTraverser) (i, inspector) -> {
OsVersion osVersion = osVersionSerializer.fromSlime(inspector);
- // TODO(mpolden): Require this field after 2021-05-01
- Duration upgradeBudget = Serializers.optionalDuration(inspector.field(upgradeBudgetField))
- .orElse(Duration.ZERO);
+ Duration upgradeBudget = Duration.ofMillis(inspector.field(upgradeBudgetField).asLong());
osVersionTargets.add(new OsVersionTarget(osVersion, upgradeBudget));
});
return Collections.unmodifiableSet(osVersionTargets);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java
index 2697651f61b..d14cd780a8c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java
@@ -9,6 +9,7 @@ import com.yahoo.slime.ArrayTraverser;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.controller.application.EndpointId;
import com.yahoo.vespa.hosted.controller.routing.GlobalRouting;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicy;
@@ -80,7 +81,7 @@ public class RoutingPolicySerializer {
ZoneId.from(inspect.field(zoneField).asString()));
policies.put(id, new RoutingPolicy(id,
HostName.from(inspect.field(canonicalNameField).asString()),
- Serializers.optionalString(inspect.field(dnsZoneField)),
+ SlimeUtils.optionalString(inspect.field(dnsZoneField)),
endpointIds,
new Status(inspect.field(loadBalancerActiveField).asBool(),
globalRoutingFromSlime(inspect.field(globalRoutingField)))));
@@ -97,7 +98,7 @@ public class RoutingPolicySerializer {
public GlobalRouting globalRoutingFromSlime(Inspector object) {
var status = GlobalRouting.Status.valueOf(object.field(statusField).asString());
var agent = GlobalRouting.Agent.valueOf(object.field(agentField).asString());
- var changedAt = Serializers.optionalInstant(object.field(changedAtField)).orElse(Instant.EPOCH);
+ var changedAt = SlimeUtils.optionalInstant(object.field(changedAtField)).orElse(Instant.EPOCH);
return new GlobalRouting(status, agent, changedAt);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
index 60d8afe0f5e..87527085237 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
@@ -9,6 +9,7 @@ import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
import com.yahoo.slime.ObjectTraverser;
import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
@@ -121,7 +122,7 @@ class RunSerializer {
// For historical reasons are the step details stored in a separate JSON structure from the step statuses.
Inspector stepDetailsField = detailsField.field(step);
Inspector startTimeValue = stepDetailsField.field(startTimeField);
- Optional<Instant> startTime = Serializers.optionalInstant(startTimeValue);
+ Optional<Instant> startTime = SlimeUtils.optionalInstant(startTimeValue);
steps.put(typedStep, new StepInfo(typedStep, stepStatusOf(status.asString()), startTime));
});
@@ -130,12 +131,12 @@ class RunSerializer {
runObject.field(numberField).asLong()),
steps,
versionsFromSlime(runObject.field(versionsField)),
- Serializers.instant(runObject.field(startField)),
- Serializers.optionalInstant(runObject.field(endField)),
+ SlimeUtils.instant(runObject.field(startField)),
+ SlimeUtils.optionalInstant(runObject.field(endField)),
runStatusOf(runObject.field(statusField).asString()),
runObject.field(lastTestRecordField).asLong(),
Instant.EPOCH.plus(runObject.field(lastVespaLogTimestampField).asLong(), ChronoUnit.MICROS),
- Serializers.optionalInstant(runObject.field(noNodesDownSinceField)),
+ SlimeUtils.optionalInstant(runObject.field(noNodesDownSinceField)),
convergenceSummaryFrom(runObject.field(convergenceSummaryField)),
Optional.of(runObject.field(testerCertificateField))
.filter(Inspector::valid)
@@ -166,11 +167,11 @@ class RunSerializer {
versionObject.field(branchField).asString(),
versionObject.field(commitField).asString()))
.filter(revision -> ! revision.commit().isBlank() && ! revision.repository().isBlank() && ! revision.branch().isBlank());
- Optional<String> authorEmail = Serializers.optionalString(versionObject.field(authorEmailField));
- Optional<Version> compileVersion = Serializers.optionalString(versionObject.field(compileVersionField)).map(Version::fromString);
- Optional<Instant> buildTime = Serializers.optionalInstant(versionObject.field(buildTimeField));
- Optional<String> sourceUrl = Serializers.optionalString(versionObject.field(sourceUrlField));
- Optional<String> commit = Serializers.optionalString(versionObject.field(commitField));
+ Optional<String> authorEmail = SlimeUtils.optionalString(versionObject.field(authorEmailField));
+ Optional<Version> compileVersion = SlimeUtils.optionalString(versionObject.field(compileVersionField)).map(Version::fromString);
+ Optional<Instant> buildTime = SlimeUtils.optionalInstant(versionObject.field(buildTimeField));
+ Optional<String> sourceUrl = SlimeUtils.optionalString(versionObject.field(sourceUrlField));
+ Optional<String> commit = SlimeUtils.optionalString(versionObject.field(commitField));
return new ApplicationVersion(source, OptionalLong.of(buildNumber), authorEmail,
compileVersion, buildTime, sourceUrl, commit);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/Serializers.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/Serializers.java
deleted file mode 100644
index 7c8a09e244e..00000000000
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/Serializers.java
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.persistence;
-
-import com.yahoo.slime.Inspector;
-import com.yahoo.slime.SlimeUtils;
-
-import java.time.Duration;
-import java.time.Instant;
-import java.util.Optional;
-import java.util.OptionalDouble;
-import java.util.OptionalInt;
-import java.util.OptionalLong;
-
-/**
- * Reusable serialization logic.
- *
- * @author mpolden
- */
-public class Serializers {
-
- private Serializers() {}
-
- public static Instant instant(Inspector field) {
- return Instant.ofEpochMilli(field.asLong());
- }
-
- public static OptionalLong optionalLong(Inspector field) {
- return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty();
- }
-
- public static OptionalInt optionalInteger(Inspector field) {
- return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty();
- }
-
- public static OptionalDouble optionalDouble(Inspector field) {
- return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty();
- }
-
- public static Optional<String> optionalString(Inspector field) {
- return SlimeUtils.optionalString(field);
- }
-
- public static Optional<Instant> optionalInstant(Inspector field) {
- return optionalLong(field).stream().mapToObj(Instant::ofEpochMilli).findFirst();
- }
-
- public static Optional<Duration> optionalDuration(Inspector field) {
- return optionalLong(field).stream().mapToObj(Duration::ofMillis).findFirst();
- }
-
-}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializer.java
index 74e2bfbb471..33596fce2bd 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializer.java
@@ -45,33 +45,65 @@ public class SupportAccessSerializer {
private static final String certificateFieldName = "certificate";
- public static Slime toSlime(SupportAccess supportAccess, boolean includeCertificates, Optional<Instant> withCurrentState) {
+ public static Slime toSlime(SupportAccess supportAccess) {
Slime slime = new Slime();
Cursor root = slime.setObject();
- withCurrentState.ifPresent(now -> {
- Cursor status = root.setObject(stateFieldName);
- SupportAccess.CurrentStatus currentState = supportAccess.currentStatus(now);
- status.setString(supportAccessFieldName, currentState.state().name());
- if (currentState.state() == SupportAccess.State.ALLOWED) {
- status.setString(untilFieldName, serializeInstant(currentState.allowedUntil().orElseThrow()));
- status.setString(byFieldName, currentState.allowedBy().orElseThrow());
- }
- }
- );
-
- Cursor history = root.setArray(historyFieldName);
- for (SupportAccessChange change : supportAccess.changeHistory()) {
- Cursor historyObject = history.addObject();
+ serializeHistoricEvents(root, supportAccess.changeHistory(), List.of());
+ serializeGrants(root, supportAccess.grantHistory(), true);
+
+ return slime;
+ }
+
+ public static Slime serializeCurrentState(SupportAccess supportAccess, Instant currentTime) {
+ Slime slime = new Slime();
+ Cursor root = slime.setObject();
+
+ Cursor status = root.setObject(stateFieldName);
+ SupportAccess.CurrentStatus currentState = supportAccess.currentStatus(currentTime);
+ status.setString(supportAccessFieldName, currentState.state().name());
+ if (currentState.state() == SupportAccess.State.ALLOWED) {
+ status.setString(untilFieldName, serializeInstant(currentState.allowedUntil().orElseThrow()));
+ status.setString(byFieldName, currentState.allowedBy().orElseThrow());
+ }
+
+ List<SupportAccessGrant> inactiveGrants = supportAccess.grantHistory().stream()
+ .filter(grant -> currentTime.isAfter(grant.certificate().getNotAfter().toInstant()))
+ .collect(Collectors.toList());
+
+ serializeHistoricEvents(root, supportAccess.changeHistory(), inactiveGrants);
+
+ // Active grants should show up in the grant section
+ List<SupportAccessGrant> activeGrants = supportAccess.grantHistory().stream()
+ .filter(grant -> currentTime.isBefore(grant.certificate().getNotAfter().toInstant()))
+ .collect(Collectors.toList());
+ serializeGrants(root, activeGrants, false);
+ return slime;
+ }
+
+ private static void serializeHistoricEvents(Cursor root, List<SupportAccessChange> changeEvents, List<SupportAccessGrant> historicGrants) {
+ Cursor historyRoot = root.setArray(historyFieldName);
+ for (SupportAccessChange change : changeEvents) {
+ Cursor historyObject = historyRoot.addObject();
historyObject.setString(stateFieldName, change.accessAllowedUntil().isPresent() ? allowedStateName : disallowedStateName);
historyObject.setString(atFieldName, serializeInstant(change.changeTime()));
change.accessAllowedUntil().ifPresent(allowedUntil -> historyObject.setString(untilFieldName, serializeInstant(allowedUntil)));
historyObject.setString(byFieldName, change.madeBy());
}
- Cursor grants = root.setArray(grantFieldName);
- for (SupportAccessGrant grant : supportAccess.grantHistory()) {
- Cursor grantObject = grants.addObject();
+ for (SupportAccessGrant grant : historicGrants) {
+ Cursor historyObject = historyRoot.addObject();
+ historyObject.setString(stateFieldName, "grant");
+ historyObject.setString(atFieldName, serializeInstant(grant.certificate().getNotBefore().toInstant()));
+ historyObject.setString(untilFieldName, serializeInstant(grant.certificate().getNotAfter().toInstant()));
+ historyObject.setString(byFieldName, grant.requestor());
+ }
+ }
+
+ private static void serializeGrants(Cursor root, List<SupportAccessGrant> grants, boolean includeCertificates) {
+ Cursor grantsRoot = root.setArray(grantFieldName);
+ for (SupportAccessGrant grant : grants) {
+ Cursor grantObject = grantsRoot.addObject();
grantObject.setString(requestorFieldName, grant.requestor());
if (includeCertificates) {
grantObject.setString(certificateFieldName, X509CertificateUtils.toPem(grant.certificate()));
@@ -80,7 +112,6 @@ public class SupportAccessSerializer {
grantObject.setString(notAfterFieldName, serializeInstant(grant.certificate().getNotAfter().toInstant()));
}
- return slime;
}
private static String serializeInstant(Instant i) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializer.java
index 8e97368624d..6b167f26314 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/TenantSerializer.java
@@ -151,14 +151,14 @@ public class TenantSerializer {
Property property = new Property(tenantObject.field(propertyField).asString());
Optional<PropertyId> propertyId = SlimeUtils.optionalString(tenantObject.field(propertyIdField)).map(PropertyId::new);
Optional<Contact> contact = contactFrom(tenantObject.field(contactField));
- Instant createdAt = Serializers.instant(tenantObject.field(createdAtField));
+ Instant createdAt = SlimeUtils.instant(tenantObject.field(createdAtField));
LastLoginInfo lastLoginInfo = lastLoginInfoFromSlime(tenantObject.field(lastLoginInfoField));
return new AthenzTenant(name, domain, property, propertyId, contact, createdAt, lastLoginInfo);
}
private CloudTenant cloudTenantFrom(Inspector tenantObject) {
TenantName name = TenantName.from(tenantObject.field(nameField).asString());
- Instant createdAt = Serializers.instant(tenantObject.field(createdAtField));
+ Instant createdAt = SlimeUtils.instant(tenantObject.field(createdAtField));
LastLoginInfo lastLoginInfo = lastLoginInfoFromSlime(tenantObject.field(lastLoginInfoField));
Optional<Principal> creator = SlimeUtils.optionalString(tenantObject.field(creatorField)).map(SimplePrincipal::new);
BiMap<PublicKey, Principal> developerKeys = developerKeysFromSlime(tenantObject.field(pemDeveloperKeysField));
@@ -227,7 +227,7 @@ public class TenantSerializer {
private LastLoginInfo lastLoginInfoFromSlime(Inspector lastLoginInfoObject) {
Map<LastLoginInfo.UserLevel, Instant> lastLoginByUserLevel = new HashMap<>();
lastLoginInfoObject.traverse((String name, Inspector value) ->
- lastLoginByUserLevel.put(userLevelOf(name), Serializers.instant(value)));
+ lastLoginByUserLevel.put(userLevelOf(name), SlimeUtils.instant(value)));
return new LastLoginInfo(lastLoginByUserLevel);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializer.java
index 12d15aa7cdd..eccda7332e1 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/VersionStatusSerializer.java
@@ -7,6 +7,7 @@ import com.yahoo.slime.ArrayTraverser;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.controller.versions.NodeVersions;
import com.yahoo.vespa.hosted.controller.versions.VersionStatus;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
@@ -106,7 +107,7 @@ public class VersionStatusSerializer {
var version = Version.fromString(object.field(deploymentStatisticsField).field(versionField).asString());
return new VespaVersion(version,
object.field(releaseCommitField).asString(),
- Serializers.instant(object.field(committedAtField)),
+ SlimeUtils.instant(object.field(committedAtField)),
object.field(isControllerVersionField).asBool(),
object.field(isSystemVersionField).asBool(),
object.field(isReleasedField).asBool(),
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index b72a6d2f820..1510bb05a62 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -525,12 +525,14 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
case applicationPackage: return "applicationPackage";
case deployment: return "deployment";
case feedBlock: return "feedBlock";
+ case reindex: return "reindex";
default: throw new IllegalArgumentException("No serialization defined for notification type " + type);
}
}
private static String notificationLevelAsString(Notification.Level level) {
switch (level) {
+ case info: return "info";
case warning: return "warning";
case error: return "error";
default: throw new IllegalArgumentException("No serialization defined for notification level " + level);
@@ -899,6 +901,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
cluster.suggested().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject("suggested")));
utilizationToSlime(cluster.utilization(), clusterObject.setObject("utilization"));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
+ clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatusCode());
clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
clusterObject.setLong("scalingDuration", cluster.scalingDuration().toMillis());
clusterObject.setDouble("maxQueryGrowthRate", cluster.maxQueryGrowthRate());
@@ -979,7 +982,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
private HttpResponse supportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, Map<String, String> queryParameters) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
SupportAccess supportAccess = controller.supportAccess().forDeployment(deployment);
- return new SlimeJsonResponse(SupportAccessSerializer.toSlime(supportAccess, false, Optional.of(controller.clock().instant())));
+ return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(supportAccess, controller.clock().instant()));
}
// TODO support access: only let tenants (not operators!) allow access
@@ -989,14 +992,15 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
Principal principal = requireUserPrincipal(request);
Instant now = controller.clock().instant();
SupportAccess allowed = controller.supportAccess().allow(deployment, now.plus(7, ChronoUnit.DAYS), principal.getName());
- return new SlimeJsonResponse(SupportAccessSerializer.toSlime(allowed, false, Optional.of(now)));
+ return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(allowed, now));
}
private HttpResponse disallowSupportAccess(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deployment = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName), requireZone(environment, region));
Principal principal = requireUserPrincipal(request);
SupportAccess disallowed = controller.supportAccess().disallow(deployment, principal.getName());
- return new SlimeJsonResponse(SupportAccessSerializer.toSlime(disallowed, false, Optional.of(controller.clock().instant())));
+ controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment);
+ return new SlimeJsonResponse(SupportAccessSerializer.serializeCurrentState(disallowed, controller.clock().instant()));
}
private HttpResponse metrics(String tenantName, String applicationName, String instanceName, String environment, String region) {
@@ -2091,14 +2095,16 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
for (Application application : applications) {
DeploymentStatus status = null;
for (Instance instance : showOnlyProductionInstances(request) ? application.productionInstances().values()
- : application.instances().values())
+ : application.instances().values()) {
+ if (showOnlyActiveInstances(request) && instance.deployments().isEmpty())
+ continue;
if (recurseOverApplications(request)) {
if (status == null) status = controller.jobController().deploymentStatus(application);
toSlime(applicationArray.addObject(), instance, status, request);
- }
- else {
+ } else {
toSlime(instance.id(), applicationArray.addObject(), request);
}
+ }
}
tenantMetaDataToSlime(tenant, applications, object.setObject("metaData"));
}
@@ -2389,6 +2395,10 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
return "true".equals(request.getProperty("production"));
}
+ private static boolean showOnlyActiveInstances(HttpRequest request) {
+ return "true".equals(request.getProperty("activeInstances"));
+ }
+
private static String tenantType(Tenant tenant) {
switch (tenant.type()) {
case athenz: return "ATHENS";
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java
index ac9612a56c5..cffdd9fc928 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandler.java
@@ -16,7 +16,6 @@ import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.controller.Controller;
-import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequest;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.VespaChangeRequest;
import com.yahoo.vespa.hosted.controller.auditlog.AuditLoggingRequestHandler;
@@ -134,7 +133,9 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler {
Inspector inspector = inspectorOrThrow(request);
// For now; mandatory fields
- Inspector hostArray = getInspectorFieldOrThrow(inspector, "hosts");
+ Inspector hostArray = inspector.field("hosts");
+ Inspector switchArray = inspector.field("switches");
+
// The impacted hostnames
List<String> hostNames = new ArrayList<>();
@@ -142,6 +143,15 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler {
hostArray.traverse((ArrayTraverser) (i, host) -> hostNames.add(host.asString()));
}
+ if (switchArray.valid()) {
+ List<String> switchNames = new ArrayList<>();
+ switchArray.traverse((ArrayTraverser) (i, switchName) -> switchNames.add(switchName.asString()));
+ hostNames.addAll(hostsOnSwitch(switchNames));
+ }
+
+ if (hostNames.isEmpty())
+ return ErrorResponse.badRequest("No prod hosts in provided host/switch list");
+
return doAssessment(hostNames);
}
@@ -272,13 +282,7 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler {
.map(HostName::from)
.collect(Collectors.toList());
- var potentialZones = controller.zoneRegistry()
- .zones()
- .reachable()
- .in(Environment.prod)
- .ids();
-
- for (var zone : potentialZones) {
+ for (var zone : getProdZones()) {
var affectedHostsInZone = controller.serviceRegistry().configServer().nodeRepository().list(zone, affectedHosts);
if (!affectedHostsInZone.isEmpty())
return Optional.of(zone);
@@ -287,4 +291,20 @@ public class ChangeManagementApiHandler extends AuditLoggingRequestHandler {
return Optional.empty();
}
+ private List<String> hostsOnSwitch(List<String> switches) {
+ return getProdZones().stream()
+ .flatMap(zone -> controller.serviceRegistry().configServer().nodeRepository().list(zone, false).stream())
+ .filter(node -> node.switchHostname().map(switches::contains).orElse(false))
+ .map(node -> node.hostname().value())
+ .collect(Collectors.toList());
+ }
+
+ private List<ZoneId> getProdZones() {
+ return controller.zoneRegistry()
+ .zones()
+ .reachable()
+ .in(Environment.prod)
+ .ids();
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/AccessRequestResponse.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/AccessRequestResponse.java
new file mode 100644
index 00000000000..e17421764e5
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/AccessRequestResponse.java
@@ -0,0 +1,28 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.hosted.controller.restapi.controller;
+
+import com.yahoo.restapi.SlimeJsonResponse;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Slime;
+import com.yahoo.vespa.athenz.api.AthenzIdentity;
+import com.yahoo.vespa.athenz.api.AthenzUser;
+
+import java.util.Collection;
+
+public class AccessRequestResponse extends SlimeJsonResponse {
+
+ public AccessRequestResponse(Collection<AthenzUser> members) {
+ super(toSlime(members));
+ }
+
+ private static Slime toSlime(Collection<AthenzUser> members) {
+ Slime slime = new Slime();
+ Cursor root = slime.setObject();
+ Cursor array = root.setArray("members");
+ members.stream()
+ .map(AthenzIdentity::getFullName)
+ .forEach(array::addString);
+ return slime;
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiHandler.java
index 98a9ade1b16..59ae2a505bb 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiHandler.java
@@ -2,27 +2,40 @@
package com.yahoo.vespa.hosted.controller.restapi.controller;
import com.yahoo.component.Version;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.container.jdisc.LoggingRequestHandler;
import com.yahoo.io.IOUtils;
import com.yahoo.restapi.ErrorResponse;
+import com.yahoo.restapi.MessageResponse;
import com.yahoo.restapi.Path;
import com.yahoo.restapi.ResourceResponse;
+import com.yahoo.security.X509CertificateUtils;
import com.yahoo.slime.Inspector;
import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.athenz.api.AthenzUser;
import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.auditlog.AuditLoggingRequestHandler;
import com.yahoo.vespa.hosted.controller.maintenance.ControllerMaintenance;
import com.yahoo.vespa.hosted.controller.maintenance.Upgrader;
+import com.yahoo.vespa.hosted.controller.support.access.SupportAccess;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion.Confidence;
import com.yahoo.yolean.Exceptions;
+import javax.ws.rs.InternalServerErrorException;
import java.io.IOException;
import java.io.InputStream;
import java.io.UncheckedIOException;
+import java.security.Principal;
+import java.security.cert.X509Certificate;
+import java.time.Instant;
import java.util.Optional;
import java.util.Scanner;
+import java.util.function.Function;
import java.util.logging.Level;
/**
@@ -77,9 +90,51 @@ public class ControllerApiHandler extends AuditLoggingRequestHandler {
private HttpResponse post(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/jobs/upgrader/confidence/{version}")) return overrideConfidence(request, path.get("version"));
+ if (path.matches("/controller/v1/access/requests/{user}")) return approveMembership(request, path.get("user"));
+ if (path.matches("/controller/v1/access/grants/{user}")) return grantAccess(request, path.get("user"));
return notFound(path);
}
+ private HttpResponse approveMembership(HttpRequest request, String user) {
+ AthenzUser athenzUser = AthenzUser.fromUserId(user);
+ byte[] jsonBytes = toJsonBytes(request.getData());
+ Inspector inspector = SlimeUtils.jsonToSlime(jsonBytes).get();
+ ApplicationId applicationId = requireField(inspector, "applicationId", ApplicationId::fromSerializedForm);
+ ZoneId zone = requireField(inspector, "zone", ZoneId::from);
+ if(controller.supportAccess().allowDataplaneMembership(athenzUser, new DeploymentId(applicationId, zone))) {
+ return new AccessRequestResponse(controller.serviceRegistry().accessControlService().listMembers());
+ } else {
+ return new MessageResponse(400, "Unable to approve membership request");
+ }
+ }
+
+ private HttpResponse grantAccess(HttpRequest request, String user) {
+ Principal principal = requireUserPrincipal(request);
+ Instant now = controller.clock().instant();
+
+ byte[] jsonBytes = toJsonBytes(request.getData());
+ Inspector requestObject = SlimeUtils.jsonToSlime(jsonBytes).get();
+ X509Certificate certificate = requireField(requestObject, "certificate", X509CertificateUtils::fromPem);
+ ApplicationId applicationId = requireField(requestObject, "applicationId", ApplicationId::fromSerializedForm);
+ ZoneId zone = requireField(requestObject, "zone", ZoneId::from);
+ DeploymentId deployment = new DeploymentId(applicationId, zone);
+
+ // Register grant
+ SupportAccess supportAccess = controller.supportAccess().registerGrant(deployment, principal.getName(), certificate);
+
+ // Trigger deployment to include operator cert
+ Optional<JobId> jobId = controller.applications().deploymentTrigger().reTriggerOrAddToQueue(deployment);
+ return new MessageResponse(
+ jobId.map(id -> String.format("Operator %s granted access and job %s triggered", principal.getName(), id.type().jobName()))
+ .orElseGet(() -> String.format("Operator %s granted access and job trigger queued", principal.getName())));
+ }
+
+ private <T> T requireField(Inspector inspector, String field, Function<String, T> mapper) {
+ return SlimeUtils.optionalString(inspector.field(field))
+ .map(mapper::apply)
+ .orElseThrow(() -> new IllegalArgumentException("Expected field \"" + field + "\" in request"));
+ }
+
private HttpResponse delete(HttpRequest request) {
Path path = new Path(request.getUri());
if (path.matches("/controller/v1/jobs/upgrader/confidence/{version}")) return removeConfidenceOverride(path.get("version"));
@@ -145,4 +200,9 @@ public class ControllerApiHandler extends AuditLoggingRequestHandler {
}
}
+ private static Principal requireUserPrincipal(HttpRequest request) {
+ Principal principal = request.getJDiscRequest().getUserPrincipal();
+ if (principal == null) throw new InternalServerErrorException("Expected a user principal");
+ return principal;
+ }
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java
new file mode 100644
index 00000000000..6f5b1f30592
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java
@@ -0,0 +1,121 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.restapi.horizon;
+
+import com.google.inject.Inject;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.container.jdisc.HttpRequest;
+import com.yahoo.container.jdisc.HttpResponse;
+import com.yahoo.container.jdisc.LoggingRequestHandler;
+import com.yahoo.restapi.ErrorResponse;
+import com.yahoo.restapi.Path;
+import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.api.integration.horizon.HorizonClient;
+import com.yahoo.vespa.hosted.controller.api.integration.horizon.HorizonResponse;
+import com.yahoo.vespa.hosted.controller.api.role.SecurityContext;
+import com.yahoo.yolean.Exceptions;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Optional;
+import java.util.logging.Level;
+
+/**
+ * Proxies metrics requests from Horizon UI
+ *
+ * @author valerijf
+ */
+public class HorizonApiHandler extends LoggingRequestHandler {
+
+ private final SystemName systemName;
+ private final HorizonClient client;
+
+ @Inject
+ public HorizonApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller) {
+ super(parentCtx);
+ this.systemName = controller.system();
+ this.client = controller.serviceRegistry().horizonClient();
+ }
+
+ @Override
+ public HttpResponse handle(HttpRequest request) {
+ try {
+ switch (request.getMethod()) {
+ case GET: return get(request);
+ case POST: return post(request);
+ case PUT: return put(request);
+ default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
+ }
+ }
+ catch (IllegalArgumentException e) {
+ return ErrorResponse.badRequest(Exceptions.toMessageString(e));
+ }
+ catch (RuntimeException e) {
+ log.log(Level.WARNING, "Unexpected error handling '" + request.getUri() + "'", e);
+ return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
+ }
+ }
+
+ private HttpResponse get(HttpRequest request) {
+ Path path = new Path(request.getUri());
+ if (path.matches("/horizon/v1/config/dashboard/topFolders")) return new JsonInputStreamResponse(client.getTopFolders());
+ if (path.matches("/horizon/v1/config/dashboard/file/{id}")) return new JsonInputStreamResponse(client.getDashboard(path.get("id")));
+ if (path.matches("/horizon/v1/config/dashboard/favorite")) return new JsonInputStreamResponse(client.getFavorite(request.getProperty("user")));
+ if (path.matches("/horizon/v1/config/dashboard/recent")) return new JsonInputStreamResponse(client.getRecent(request.getProperty("user")));
+ return ErrorResponse.notFoundError("Nothing at " + path);
+ }
+
+ private HttpResponse post(HttpRequest request) {
+ Path path = new Path(request.getUri());
+ if (path.matches("/horizon/v1/tsdb/api/query/graph")) return tsdbQuery(request, true);
+ if (path.matches("/horizon/v1/meta/search/timeseries")) return tsdbQuery(request, false);
+ return ErrorResponse.notFoundError("Nothing at " + path);
+ }
+
+ private HttpResponse put(HttpRequest request) {
+ Path path = new Path(request.getUri());
+ if (path.matches("/horizon/v1/config/user")) return new JsonInputStreamResponse(client.getUser());
+ return ErrorResponse.notFoundError("Nothing at " + path);
+ }
+
+ private HttpResponse tsdbQuery(HttpRequest request, boolean isMetricQuery) {
+ SecurityContext securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
+ try {
+ byte[] data = TsdbQueryRewriter.rewrite(request.getData().readAllBytes(), securityContext.roles(), systemName);
+ return new JsonInputStreamResponse(isMetricQuery ? client.getMetrics(data) : client.getMetaData(data));
+ } catch (TsdbQueryRewriter.UnauthorizedException e) {
+ return ErrorResponse.forbidden("Access denied");
+ } catch (IOException e) {
+ return ErrorResponse.badRequest("Failed to parse request body: " + e.getMessage());
+ }
+ }
+
+ private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> clazz) {
+ return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
+ .filter(clazz::isInstance)
+ .map(clazz::cast)
+ .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
+ }
+
+ private static class JsonInputStreamResponse extends HttpResponse {
+
+ private final HorizonResponse response;
+
+ public JsonInputStreamResponse(HorizonResponse response) {
+ super(response.code());
+ this.response = response;
+ }
+
+ @Override
+ public String getContentType() {
+ return "application/json";
+ }
+
+ @Override
+ public void render(OutputStream outputStream) throws IOException {
+ try (InputStream inputStream = response.inputStream()) {
+ inputStream.transferTo(outputStream);
+ }
+ }
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java
new file mode 100644
index 00000000000..e034be46063
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java
@@ -0,0 +1,112 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.restapi.horizon;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.vespa.hosted.controller.api.role.Role;
+import com.yahoo.vespa.hosted.controller.api.role.RoleDefinition;
+import com.yahoo.vespa.hosted.controller.api.role.TenantRole;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * @author valerijf
+ */
+public class TsdbQueryRewriter {
+
+ private static final ObjectMapper mapper = new ObjectMapper();
+ private static final EnumSet<RoleDefinition> operatorRoleDefinitions =
+ EnumSet.of(RoleDefinition.hostedOperator, RoleDefinition.hostedSupporter);
+
+ public static byte[] rewrite(byte[] data, Set<Role> roles, SystemName systemName) throws IOException {
+ boolean operator = roles.stream().map(Role::definition).anyMatch(operatorRoleDefinitions::contains);
+
+ // Anyone with any tenant relation can view metrics for apps within those tenants
+ Set<TenantName> authorizedTenants = roles.stream()
+ .filter(TenantRole.class::isInstance)
+ .map(role -> ((TenantRole) role).tenant())
+ .collect(Collectors.toUnmodifiableSet());
+ if (!operator && authorizedTenants.isEmpty())
+ throw new UnauthorizedException();
+
+ JsonNode root = mapper.readTree(data);
+ requireLegalType(root);
+ getField(root, "executionGraph", ArrayNode.class)
+ .ifPresent(graph -> rewriteQueryGraph(graph, authorizedTenants, operator, systemName));
+ getField(root, "filters", ArrayNode.class)
+ .ifPresent(filters -> rewriteFilters(filters, authorizedTenants, operator, systemName));
+ getField(root, "queries", ArrayNode.class)
+ .ifPresent(graph -> rewriteQueryGraph(graph, authorizedTenants, operator, systemName));
+
+ return mapper.writeValueAsBytes(root);
+ }
+
+ private static void rewriteQueryGraph(ArrayNode executionGraph, Set<TenantName> tenantNames, boolean operator, SystemName systemName) {
+ for (int i = 0; i < executionGraph.size(); i++) {
+ JsonNode execution = executionGraph.get(i);
+
+ // Will be handled by rewriteFilters()
+ if (execution.has("filterId")) continue;
+
+ rewriteFilter((ObjectNode) execution, tenantNames, operator, systemName);
+ }
+ }
+
+ private static void rewriteFilters(ArrayNode filters, Set<TenantName> tenantNames, boolean operator, SystemName systemName) {
+ for (int i = 0; i < filters.size(); i++)
+ rewriteFilter((ObjectNode) filters.get(i), tenantNames, operator, systemName);
+ }
+
+ private static void rewriteFilter(ObjectNode parent, Set<TenantName> tenantNames, boolean operator, SystemName systemName) {
+ ObjectNode prev = ((ObjectNode) parent.get("filter"));
+ ArrayNode filters;
+ // If we dont already have a filter object, or the object that we have is not an AND filter
+ if (prev == null || !"Chain".equals(prev.get("type").asText()) || prev.get("op") != null && !"AND".equals(prev.get("op").asText())) {
+ // Create new filter object
+ filters = parent.putObject("filter")
+ .put("type", "Chain")
+ .put("op", "AND")
+ .putArray("filters");
+
+ // Add the previous filter to the AND expression
+ if (prev != null) filters.add(prev);
+ } else filters = (ArrayNode) prev.get("filters");
+
+ // Make sure we only show metrics in the relevant system
+ ObjectNode systemFilter = filters.addObject();
+ systemFilter.put("type", "TagValueLiteralOr");
+ systemFilter.put("filter", systemName.name().toLowerCase());
+ systemFilter.put("tagKey", "system");
+
+ // Make sure non-operators cannot see metrics outside of their tenants
+ if (!operator) {
+ ObjectNode appFilter = filters.addObject();
+ appFilter.put("type", "TagValueRegex");
+ appFilter.put("filter",
+ tenantNames.stream().map(TenantName::value).sorted().collect(Collectors.joining("|", "^(", ")\\..*")));
+ appFilter.put("tagKey", "applicationId");
+ }
+ }
+
+ private static void requireLegalType(JsonNode root) {
+ Optional.ofNullable(root.get("type"))
+ .map(JsonNode::asText)
+ .filter(type -> !"TAG_KEYS_AND_VALUES".equals(type))
+ .ifPresent(type -> { throw new IllegalArgumentException("Illegal type " + type); });
+ }
+
+ private static <T extends JsonNode> Optional<T> getField(JsonNode object, String fieldName, Class<T> clazz) {
+ return Optional.ofNullable(object.get(fieldName)).filter(clazz::isInstance).map(clazz::cast);
+ }
+
+ static class UnauthorizedException extends RuntimeException { }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java
index 6e069b2b5ec..e195401f03a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java
@@ -40,6 +40,8 @@ import com.yahoo.vespa.hosted.controller.tenant.Tenant;
import com.yahoo.yolean.Exceptions;
import java.security.PublicKey;
+import java.time.LocalDateTime;
+import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
@@ -136,12 +138,16 @@ public class UserApiHandler extends LoggingRequestHandler {
RoleDefinition.hostedAccountant);
private HttpResponse userMetadata(HttpRequest request) {
- @SuppressWarnings("unchecked")
- Map<String, String> userAttributes = (Map<String, String>) getAttribute(request, User.ATTRIBUTE_NAME, Map.class);
- User user = new User(userAttributes.get("email"),
- userAttributes.get("name"),
- userAttributes.get("nickname"),
- userAttributes.get("picture"));
+ User user;
+ if (request.getJDiscRequest().context().get(User.ATTRIBUTE_NAME) instanceof User) {
+ user = getAttribute(request, User.ATTRIBUTE_NAME, User.class);
+ } else {
+ // Remove this after June 2021 (once all security filters are setting this)
+ @SuppressWarnings("unchecked")
+ Map<String, String> attr = (Map<String, String>) getAttribute(request, User.ATTRIBUTE_NAME, Map.class);
+ user = new User(attr.get("email"), attr.get("name"), attr.get("nickname"), attr.get("picture"));
+ }
+
Set<Role> roles = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class).roles();
Map<TenantName, List<TenantRole>> tenantRolesByTenantName = roles.stream()
@@ -241,6 +247,11 @@ public class UserApiHandler extends LoggingRequestHandler {
userObject.setString("email", user.email());
if (user.nickname() != null) userObject.setString("nickname", user.nickname());
if (user.picture() != null) userObject.setString("picture", user.picture());
+ userObject.setBool("verified", user.isVerified());
+ if (!user.lastLogin().equals(User.NO_DATE))
+ userObject.setString("lastLogin", user.lastLogin().format(DateTimeFormatter.ISO_DATE));
+ if (user.loginCount() > -1)
+ userObject.setLong("loginCount", user.loginCount());
}
private HttpResponse addTenantRoleMember(String tenantName, HttpRequest request) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/support/access/SupportAccessControl.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/support/access/SupportAccessControl.java
index 4a550ad3379..6bbec918ba9 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/support/access/SupportAccessControl.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/support/access/SupportAccessControl.java
@@ -1,16 +1,20 @@
// Copyright 2021 Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.support.access;
+import com.yahoo.vespa.athenz.api.AthenzIdentity;
+import com.yahoo.vespa.athenz.api.AthenzUser;
import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
+import java.security.Principal;
import java.security.cert.X509Certificate;
import java.time.Instant;
import java.time.Period;
import java.util.List;
import java.util.stream.Collectors;
+import static com.yahoo.vespa.hosted.controller.support.access.SupportAccess.State.ALLOWED;
import static com.yahoo.vespa.hosted.controller.support.access.SupportAccess.State.NOT_ALLOWED;
/**
@@ -83,8 +87,19 @@ public class SupportAccessControl {
if (supportAccess.currentStatus(now).state() == NOT_ALLOWED) return List.of();
return supportAccess.grantHistory().stream()
- .filter(grant -> !grant.certificate().getNotBefore().toInstant().isBefore(now))
- .filter(grant -> !grant.certificate().getNotAfter().toInstant().isAfter(now))
+ .filter(grant -> now.isAfter(grant.certificate().getNotBefore().toInstant()))
+ .filter(grant -> now.isBefore(grant.certificate().getNotAfter().toInstant()))
.collect(Collectors.toUnmodifiableList());
}
+
+ public boolean allowDataplaneMembership(AthenzUser identity, DeploymentId deploymentId) {
+ Instant instant = controller.clock().instant();
+ SupportAccess supportAccess = forDeployment(deploymentId);
+ SupportAccess.CurrentStatus currentStatus = supportAccess.currentStatus(instant);
+ if(currentStatus.state() == ALLOWED) {
+ return controller.serviceRegistry().accessControlService().approveDataPlaneAccess(identity, currentStatus.allowedUntil().orElse(instant.plus(MAX_SUPPORT_ACCESS_TIME)));
+ } else {
+ return false;
+ }
+ }
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/certificate/EndpointCertificatesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/certificate/EndpointCertificatesTest.java
index d8544ff3947..a3580a9fda3 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/certificate/EndpointCertificatesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/certificate/EndpointCertificatesTest.java
@@ -13,8 +13,6 @@ import com.yahoo.security.SignatureAlgorithm;
import com.yahoo.security.X509CertificateBuilder;
import com.yahoo.security.X509CertificateUtils;
import com.yahoo.test.ManualClock;
-import com.yahoo.vespa.flags.Flags;
-import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateMetadata;
@@ -133,7 +131,6 @@ public class EndpointCertificatesTest {
@Test
public void provisions_new_certificate_in_public_prod() {
ControllerTester tester = new ControllerTester(SystemName.Public);
- ((InMemoryFlagSource) tester.controller().flagSource()).withBooleanFlag(Flags.VESPA_APP_DOMAIN_IN_CERTIFICATE.id(), true);
EndpointCertificateValidatorImpl endpointCertificateValidator = new EndpointCertificateValidatorImpl(secretStore, clock);
EndpointCertificates endpointCertificates = new EndpointCertificates(tester.controller(), endpointCertificateMock, endpointCertificateValidator);
List<String> expectedSans = List.of(
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
index fc7a99eb2f0..78f688f545b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
@@ -27,6 +27,7 @@ import java.util.Date;
import java.util.List;
import java.util.OptionalInt;
import java.util.StringJoiner;
+import java.util.zip.Deflater;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
@@ -273,27 +274,27 @@ public class ApplicationPackageBuilder {
}
ByteArrayOutputStream zip = new ByteArrayOutputStream();
try (ZipOutputStream out = new ZipOutputStream(zip)) {
- out.putNextEntry(new ZipEntry(dir + "deployment.xml"));
- out.write(deploymentSpec());
- out.closeEntry();
- out.putNextEntry(new ZipEntry(dir + "validation-overrides.xml"));
- out.write(validationOverrides());
- out.closeEntry();
- out.putNextEntry(new ZipEntry(dir + "search-definitions/test.sd"));
- out.write(searchDefinition());
- out.closeEntry();
- out.putNextEntry(new ZipEntry(dir + "build-meta.json"));
- out.write(buildMeta(compileVersion));
- out.closeEntry();
- out.putNextEntry(new ZipEntry(dir + "security/clients.pem"));
- out.write(X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8));
- out.closeEntry();
+ out.setLevel(Deflater.NO_COMPRESSION); // This is for testing purposes so we skip compression for performance
+ writeZipEntry(out, dir + "deployment.xml", deploymentSpec());
+ writeZipEntry(out, dir + "validation-overrides.xml", validationOverrides());
+ writeZipEntry(out, dir + "search-definitions/test.sd", searchDefinition());
+ writeZipEntry(out, dir + "build-meta.json", buildMeta(compileVersion));
+ if (!trustedCertificates.isEmpty()) {
+ writeZipEntry(out, dir + "security/clients.pem", X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8));
+ }
} catch (IOException e) {
throw new UncheckedIOException(e);
}
return new ApplicationPackage(zip.toByteArray());
}
+ private void writeZipEntry(ZipOutputStream out, String name, byte[] content) throws IOException {
+ ZipEntry entry = new ZipEntry(name);
+ out.putNextEntry(entry);
+ out.write(content);
+ out.closeEntry();
+ }
+
private static String asIso8601Date(Instant instant) {
return new SimpleDateFormat("yyyy-MM-dd").format(Date.from(instant));
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
index c8b4eaa5236..7077e14a648 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
@@ -11,6 +11,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
+import org.junit.Assert;
import org.junit.Test;
import java.time.Duration;
@@ -1228,4 +1229,17 @@ public class DeploymentTriggerTest {
assertEquals(List.of(), tester.jobs().active());
}
+ @Test
+ public void testRetriggerQueue() {
+ var app = tester.newDeploymentContext().submit().deploy();
+ app.submit();
+ tester.triggerJobs();
+
+ tester.deploymentTrigger().reTrigger(app.instanceId(), productionUsEast3);
+ tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3")));
+ tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3")));
+
+ List<RetriggerEntry> retriggerEntries = tester.controller().curator().readRetriggerEntries();
+ Assert.assertEquals(1, retriggerEntries.size());
+ }
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
index c3a527a0bd9..098282e4e89 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
@@ -64,7 +64,6 @@ import java.util.Set;
import java.util.UUID;
import java.util.logging.Level;
import java.util.stream.Collectors;
-import java.util.stream.IntStream;
import static com.yahoo.config.provision.NodeResources.DiskSpeed.slow;
import static com.yahoo.config.provision.NodeResources.StorageType.remote;
@@ -122,7 +121,8 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
current,
Instant.ofEpochMilli(1234),
Optional.of(Instant.ofEpochMilli(2234)))),
- "the autoscaling status",
+ "ideal",
+ "Cluster is ideally scaled",
Duration.ofMinutes(6),
0.7,
0.3);
@@ -167,18 +167,18 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
public void addNodes(List<ZoneId> zones, List<SystemApplication> applications) {
for (ZoneId zone : zones) {
for (SystemApplication application : applications) {
- List<Node> nodes = IntStream.rangeClosed(1, 3)
- .mapToObj(i -> new Node.Builder()
- .hostname(HostName.from("node-" + i + "-" + application.id().application()
- .value() + "-" + zone.value()))
- .state(Node.State.active)
- .type(application.nodeType())
- .owner(application.id())
- .currentVersion(initialVersion).wantedVersion(initialVersion)
- .currentOsVersion(Version.emptyVersion).wantedOsVersion(Version.emptyVersion)
- .build())
- .collect(Collectors.toList());
- nodeRepository().putNodes(zone, nodes);
+ for (int i = 1; i <= 3; i++) {
+ Node node = new Node.Builder()
+ .hostname(HostName.from("node-" + i + "-" + application.id().application()
+ .value() + "-" + zone.value()))
+ .state(Node.State.active)
+ .type(application.nodeType())
+ .owner(application.id())
+ .currentVersion(initialVersion).wantedVersion(initialVersion)
+ .currentOsVersion(Version.emptyVersion).wantedOsVersion(Version.emptyVersion)
+ .build();
+ nodeRepository().putNode(zone, node);
+ }
convergeServices(application.id(), zone);
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java
index afb56f10c38..4079591730d 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/NodeRepositoryMock.java
@@ -59,9 +59,14 @@ public class NodeRepositoryMock implements NodeRepository {
/** Add or update given nodes in zone */
public void putNodes(ZoneId zone, List<Node> nodes) {
- nodeRepository.putIfAbsent(zone, new HashMap<>());
- nodeRepository.get(zone).putAll(nodes.stream().collect(Collectors.toMap(Node::hostname,
- Function.identity())));
+ Map<HostName, Node> zoneNodes = nodeRepository.computeIfAbsent(zone, (k) -> new HashMap<>());
+ for (var node : nodes) {
+ zoneNodes.put(node.hostname(), node);
+ }
+ }
+
+ public void putNode(ZoneId zone, Node node) {
+ nodeRepository.computeIfAbsent(zone, (k) -> new HashMap<>()).put(node.hostname(), node);
}
public void putApplication(ZoneId zone, Application application) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java
index 702ce83d116..a4ce0316e25 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ServiceRegistryMock.java
@@ -9,11 +9,12 @@ import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.controller.api.integration.ServiceRegistry;
import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveService;
import com.yahoo.vespa.hosted.controller.api.integration.archive.MockArchiveService;
+import com.yahoo.vespa.hosted.controller.api.integration.athenz.AccessControlService;
+import com.yahoo.vespa.hosted.controller.api.integration.athenz.MockAccessControlService;
import com.yahoo.vespa.hosted.controller.api.integration.aws.MockRoleService;
import com.yahoo.vespa.hosted.controller.api.integration.aws.RoleService;
import com.yahoo.vespa.hosted.controller.api.integration.aws.MockAwsEventFetcher;
import com.yahoo.vespa.hosted.controller.api.integration.aws.MockResourceTagger;
-import com.yahoo.vespa.hosted.controller.api.integration.aws.NoopRoleService;
import com.yahoo.vespa.hosted.controller.api.integration.aws.ResourceTagger;
import com.yahoo.vespa.hosted.controller.api.integration.billing.BillingController;
import com.yahoo.vespa.hosted.controller.api.integration.billing.MockBillingController;
@@ -22,6 +23,8 @@ import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCe
import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateValidatorMock;
import com.yahoo.vespa.hosted.controller.api.integration.dns.MemoryNameService;
import com.yahoo.vespa.hosted.controller.api.integration.entity.MemoryEntityService;
+import com.yahoo.vespa.hosted.controller.api.integration.horizon.HorizonClient;
+import com.yahoo.vespa.hosted.controller.api.integration.horizon.MockHorizonClient;
import com.yahoo.vespa.hosted.controller.api.integration.organization.MockContactRetriever;
import com.yahoo.vespa.hosted.controller.api.integration.organization.MockIssueHandler;
import com.yahoo.vespa.hosted.controller.api.integration.resource.CostReportConsumerMock;
@@ -35,7 +38,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMailer;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMeteringClient;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockRunDataStore;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockTesterCloud;
-import com.yahoo.vespa.hosted.controller.api.integration.vcmr.ChangeRequestClient;
import com.yahoo.vespa.hosted.controller.api.integration.vcmr.MockChangeRequestClient;
/**
@@ -73,6 +75,8 @@ public class ServiceRegistryMock extends AbstractComponent implements ServiceReg
private final NoopTenantSecretService tenantSecretService = new NoopTenantSecretService();
private final ArchiveService archiveService = new MockArchiveService();
private final MockChangeRequestClient changeRequestClient = new MockChangeRequestClient();
+ private final AccessControlService accessControlService = new MockAccessControlService();
+ private final HorizonClient horizonClient = new MockHorizonClient();
public ServiceRegistryMock(SystemName system) {
this.zoneRegistryMock = new ZoneRegistryMock(system);
@@ -229,6 +233,16 @@ public class ServiceRegistryMock extends AbstractComponent implements ServiceReg
return changeRequestClient;
}
+ @Override
+ public AccessControlService accessControlService() {
+ return accessControlService;
+ }
+
+ @Override
+ public HorizonClient horizonClient() {
+ return horizonClient;
+ }
+
public ConfigServerMock configServerMock() {
return configServerMock;
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java
index 7fdbab49ba4..10fee56621c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneApiMock.java
@@ -78,8 +78,7 @@ public class ZoneApiMock implements ZoneApi {
public static class Builder {
- private final SystemName systemName = SystemName.defaultSystem();
-
+ private SystemName systemName = SystemName.defaultSystem();
private ZoneId id = ZoneId.defaultId();
private ZoneId virtualId ;
private CloudName cloudName = CloudName.defaultName();
@@ -90,6 +89,11 @@ public class ZoneApiMock implements ZoneApi {
return this;
}
+ public Builder withSystem(SystemName systemName) {
+ this.systemName = systemName;
+ return this;
+ }
+
public Builder withId(String id) {
return with(ZoneId.from(id));
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirerTest.java
new file mode 100644
index 00000000000..f3c4f9f7438
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CloudTrialExpirerTest.java
@@ -0,0 +1,93 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.vespa.flags.InMemoryFlagSource;
+import com.yahoo.vespa.flags.PermanentFlags;
+import com.yahoo.vespa.hosted.controller.ControllerTester;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId;
+import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
+import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
+import com.yahoo.vespa.hosted.controller.integration.ZoneApiMock;
+import com.yahoo.vespa.hosted.controller.tenant.LastLoginInfo;
+import com.yahoo.vespa.hosted.controller.tenant.Tenant;
+import org.junit.Test;
+
+import java.time.Duration;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author ogronnesby
+ */
+public class CloudTrialExpirerTest {
+ private final ControllerTester tester = new ControllerTester(SystemName.Public);
+ private final DeploymentTester deploymentTester = new DeploymentTester(tester);
+ private final CloudTrialExpirer expirer = new CloudTrialExpirer(tester.controller(), Duration.ofMinutes(5));
+
+ @Test
+ public void expire_inactive_tenant() {
+ registerTenant("trial-tenant", "trial", Duration.ofDays(14).plusMillis(1));
+ expirer.maintain();
+ assertPlan("trial-tenant", "none");
+ }
+
+ @Test
+ public void keep_inactive_nontrial_tenants() {
+ registerTenant("not-a-trial-tenant", "pay-as-you-go", Duration.ofDays(30));
+ expirer.maintain();
+ assertPlan("not-a-trial-tenant", "pay-as-you-go");
+ }
+
+ @Test
+ public void keep_active_trial_tenants() {
+ registerTenant("active-trial-tenant", "trial", Duration.ofHours(14).minusMillis(1));
+ expirer.maintain();
+ assertPlan("active-trial-tenant", "trial");
+ }
+
+ @Test
+ public void keep_inactive_exempt_tenants() {
+ registerTenant("exempt-trial-tenant", "trial", Duration.ofDays(40));
+ ((InMemoryFlagSource) tester.controller().flagSource()).withListFlag(PermanentFlags.EXTENDED_TRIAL_TENANTS.id(), List.of("exempt-trial-tenant"), String.class);
+ expirer.maintain();
+ assertPlan("exempt-trial-tenant", "trial");
+ }
+
+ @Test
+ public void keep_inactive_trial_tenants_with_deployments() {
+ registerTenant("with-deployments", "trial", Duration.ofDays(30));
+ registerDeployment("with-deployments", "my-app", "default", "aws-us-east-1c");
+ expirer.maintain();
+ assertPlan("with-deployments", "trial");
+ }
+
+ private void registerTenant(String tenantName, String plan, Duration timeSinceLastLogin) {
+ var name = TenantName.from(tenantName);
+ tester.createTenant(tenantName, Tenant.Type.cloud);
+ tester.serviceRegistry().billingController().setPlan(name, PlanId.from(plan), false);
+ tester.controller().tenants().updateLastLogin(name, List.of(LastLoginInfo.UserLevel.user), tester.controller().clock().instant().minus(timeSinceLastLogin));
+ }
+
+ private void registerDeployment(String tenantName, String appName, String instanceName, String regionName) {
+ var zone = ZoneApiMock.newBuilder()
+ .withSystem(tester.zoneRegistry().system())
+ .withId("prod." + regionName)
+ .build();
+ tester.zoneRegistry().setZones(zone);
+ var app = tester.createApplication(tenantName, appName, instanceName);
+ var ctx = deploymentTester.newDeploymentContext(tenantName, appName, instanceName);
+ var pkg = new ApplicationPackageBuilder()
+ .instances("default")
+ .region(regionName)
+ .trustDefaultCertificate()
+ .build();
+ ctx.submit(pkg).deploy();
+ }
+
+ private void assertPlan(String tenant, String planId) {
+ assertEquals(planId, tester.serviceRegistry().billingController().getPlan(TenantName.from(tenant)).value());
+ }
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainerTest.java
index 27b4f3744e7..4bdb657d3af 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintainerTest.java
@@ -38,14 +38,14 @@ public class ControllerMaintainerTest {
public void records_metric() {
TestControllerMaintainer maintainer = new TestControllerMaintainer(tester.controller(), SystemName.main, new AtomicInteger());
maintainer.run();
- assertEquals(0L, consecutiveFailuresMetric());
+ assertEquals(1.0, successFactorMetric(), 0.0000001);
maintainer.success = false;
maintainer.run();
maintainer.run();
- assertEquals(2L, consecutiveFailuresMetric());
+ assertEquals(0.0, successFactorMetric(), 0.0000001);
maintainer.success = true;
maintainer.run();
- assertEquals(0, consecutiveFailuresMetric());
+ assertEquals(1.0, successFactorMetric(), 0.0000001);
}
private long consecutiveFailuresMetric() {
@@ -54,6 +54,12 @@ public class ControllerMaintainerTest {
"maintenance.consecutiveFailures").get().longValue();
}
+ private long successFactorMetric() {
+ MetricsMock metrics = (MetricsMock) tester.controller().metric();
+ return metrics.getMetric((context) -> "TestControllerMaintainer".equals(context.get("job")),
+ "maintenance.successFactor").get().longValue();
+ }
+
private static class TestControllerMaintainer extends ControllerMaintainer {
private final AtomicInteger executions;
@@ -65,9 +71,9 @@ public class ControllerMaintainerTest {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
executions.incrementAndGet();
- return success;
+ return success ? 1.0 : 0.0;
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
index 232521c9609..ffc82f90ad4 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
@@ -70,7 +70,7 @@ public class DeploymentExpirerTest {
assertEquals(1, permanentDeployments(prodApp.instance()));
// Dev application expires when enough time has passed since most recent attempt
- tester.clock().advance(Duration.ofDays(12));
+ tester.clock().advance(Duration.ofDays(12).plus(Duration.ofSeconds(1)));
expirer.maintain();
assertEquals(0, permanentDeployments(devApp.instance()));
assertEquals(1, permanentDeployments(prodApp.instance()));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
index 59fb5b596f1..c45aaa563e1 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
@@ -118,8 +118,8 @@ public class DeploymentMetricsMaintainerTest {
@Test
public void cluster_metric_aggregation_test() {
List<ClusterMetrics> clusterMetrics = List.of(
- new ClusterMetrics("niceCluster", "container", Map.of("queriesPerSecond", 23.0, "queryLatency", 1337.0)),
- new ClusterMetrics("alsoNiceCluster", "container", Map.of("queriesPerSecond", 11.0, "queryLatency", 12.0)));
+ new ClusterMetrics("niceCluster", "container", Map.of("queriesPerSecond", 23.0, "queryLatency", 1337.0), Map.of()),
+ new ClusterMetrics("alsoNiceCluster", "container", Map.of("queriesPerSecond", 11.0, "queryLatency", 12.0), Map.of()));
DeploymentMetrics deploymentMetrics = DeploymentMetricsMaintainer.updateDeploymentMetrics(DeploymentMetrics.none, clusterMetrics);
@@ -131,7 +131,7 @@ public class DeploymentMetricsMaintainerTest {
}
private void setMetrics(ApplicationId application, Map<String, Double> metrics) {
- var clusterMetrics = new ClusterMetrics("default", "container", metrics);
+ var clusterMetrics = new ClusterMetrics("default", "container", metrics, Map.of());
tester.controllerTester().serviceRegistry().configServerMock().setMetrics(new DeploymentId(application, ZoneId.from("dev", "us-east-1")), clusterMetrics);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainerTest.java
index 66bda66bbf9..ce219b8beed 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainerTest.java
@@ -33,7 +33,7 @@ public class EndpointCertificateMaintainerTest {
@Test
public void old_and_unused_cert_is_deleted() {
tester.curator().writeEndpointCertificateMetadata(ApplicationId.defaultId(), exampleMetadata);
- assertTrue(maintainer.maintain());
+ assertEquals(1.0, maintainer.maintain(), 0.0000001);
assertTrue(tester.curator().readEndpointCertificateMetadata(ApplicationId.defaultId()).isEmpty());
}
@@ -41,7 +41,7 @@ public class EndpointCertificateMaintainerTest {
public void unused_but_recently_used_cert_is_not_deleted() {
EndpointCertificateMetadata recentlyRequestedCert = exampleMetadata.withLastRequested(tester.clock().instant().minusSeconds(3600).getEpochSecond());
tester.curator().writeEndpointCertificateMetadata(ApplicationId.defaultId(), recentlyRequestedCert);
- assertTrue(maintainer.maintain());
+ assertEquals(1.0, maintainer.maintain(), 0.0000001);
assertEquals(Optional.of(recentlyRequestedCert), tester.curator().readEndpointCertificateMetadata(ApplicationId.defaultId()));
}
@@ -53,7 +53,7 @@ public class EndpointCertificateMaintainerTest {
secretStore.setSecret(exampleMetadata.keyName(), "foo", 1);
secretStore.setSecret(exampleMetadata.certName(), "bar", 1);
- assertTrue(maintainer.maintain());
+ assertEquals(1.0, maintainer.maintain(), 0.0000001);
var updatedCert = Optional.of(recentlyRequestedCert.withLastRefreshed(tester.clock().instant().getEpochSecond()).withVersion(1));
@@ -77,7 +77,7 @@ public class EndpointCertificateMaintainerTest {
tester.curator().writeEndpointCertificateMetadata(appId, exampleMetadata);
- assertTrue(maintainer.maintain());
+ assertEquals(1.0, maintainer.maintain(), 0.0000001);
assertTrue(tester.curator().readEndpointCertificateMetadata(appId).isPresent()); // cert should not be deleted, the app is deployed!
}
@@ -97,7 +97,7 @@ public class EndpointCertificateMaintainerTest {
tester.curator().writeEndpointCertificateMetadata(appId, exampleMetadata);
- assertTrue(maintainer.maintain());
+ assertEquals(1.0, maintainer.maintain(), 0.0000001);
assertTrue(tester.curator().readEndpointCertificateMetadata(appId).isPresent()); // cert should not be deleted, the app is deployed!
tester.clock().advance(Duration.ofDays(3));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java
index 7d512ba090c..7a0175845ca 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java
@@ -24,8 +24,8 @@ public class OsUpgradeSchedulerTest {
public void maintain() {
ControllerTester tester = new ControllerTester();
OsUpgradeScheduler scheduler = new OsUpgradeScheduler(tester.controller(), Duration.ofDays(1));
- Instant initialTime = Instant.parse("2021-01-23T00:00:00.00Z");
- tester.clock().setInstant(initialTime);
+ Instant t0 = Instant.parse("2021-01-23T00:00:00.00Z"); // Outside trigger period
+ tester.clock().setInstant(t0);
CloudName cloud = CloudName.from("cloud");
ZoneApi zone = zone("prod.us-west-1", cloud);
@@ -50,7 +50,12 @@ public class OsUpgradeSchedulerTest {
Version version1 = Version.fromString("7.0.0.20210302");
tester.clock().advance(Duration.ofDays(15).plus(Duration.ofSeconds(1)));
scheduler.maintain();
- assertEquals("New target set", version1, tester.controller().osVersionTarget(cloud).get().osVersion().version());
+ assertEquals("Target is unchanged because we're outside trigger period", version0,
+ tester.controller().osVersionTarget(cloud).get().osVersion().version());
+ tester.clock().advance(Duration.ofHours(7)); // Put us inside the trigger period
+ scheduler.maintain();
+ assertEquals("New target set", version1,
+ tester.controller().osVersionTarget(cloud).get().osVersion().version());
// A few days pass and target remains unchanged
tester.clock().advance(Duration.ofDays(2));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java
index 3e2fd4ec0b9..664a1fdc83c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgraderTest.java
@@ -119,11 +119,13 @@ public class OsUpgraderTest {
@Test
public void upgrade_os_with_budget() {
CloudName cloud = CloudName.from("cloud");
+ ZoneApi zone0 = zone("prod.us-north-42", "prod.controller", cloud);
ZoneApi zone1 = zone("dev.us-east-1", cloud);
ZoneApi zone2 = zone("prod.us-west-1", cloud);
ZoneApi zone3 = zone("prod.us-central-1", cloud);
ZoneApi zone4 = zone("prod.eu-west-1", cloud);
UpgradePolicy upgradePolicy = UpgradePolicy.create()
+ .upgrade(zone0)
.upgrade(zone1)
.upgradeInParallel(zone2, zone3)
.upgrade(zone4);
@@ -133,6 +135,7 @@ public class OsUpgraderTest {
List<SystemApplication> nodeTypes = List.of(SystemApplication.configServerHost, SystemApplication.tenantHost);
tester.configServer().bootstrap(List.of(zone1.getId(), zone2.getId(), zone3.getId(), zone4.getId()),
nodeTypes);
+ tester.configServer().addNodes(List.of(zone0.getVirtualId()), List.of(SystemApplication.controllerHost));
// Upgrade with budget
Version version = Version.fromString("7.1");
@@ -141,7 +144,16 @@ public class OsUpgraderTest {
statusUpdater.maintain();
osUpgrader.maintain();
+ // Controllers upgrade first
+ osUpgrader.maintain();
+ assertWanted(version, SystemApplication.controllerHost, zone0);
+ assertEquals("Controller zone gets a zero budget", Duration.ZERO, upgradeBudget(zone0, SystemApplication.controllerHost, version));
+ completeUpgrade(version, SystemApplication.controllerHost, zone0);
+ statusUpdater.maintain();
+ assertEquals(3, nodesOn(version).size());
+
// First zone upgrades
+ osUpgrader.maintain();
for (var nodeType : nodeTypes) {
assertEquals("Dev zone gets a zero budget", Duration.ZERO, upgradeBudget(zone1, nodeType, version));
completeUpgrade(version, nodeType, zone1);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java
new file mode 100644
index 00000000000..df93efab893
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java
@@ -0,0 +1,70 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.zone.ZoneId;
+import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
+import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
+import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
+import com.yahoo.vespa.hosted.controller.deployment.RetriggerEntry;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.time.Duration;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author mortent
+ */
+public class RetriggerMaintainerTest {
+
+ private final DeploymentTester tester = new DeploymentTester();
+
+ @Test
+ public void processes_queue() throws IOException {
+ RetriggerMaintainer maintainer = new RetriggerMaintainer(tester.controller(), Duration.ofDays(1));
+ ApplicationId applicationId = ApplicationId.from("tenant", "app", "default");
+ var devApp = tester.newDeploymentContext(applicationId);
+ ApplicationPackage appPackage = new ApplicationPackageBuilder()
+ .region("us-west-1")
+ .build();
+
+ // Deploy app
+ devApp.runJob(JobType.devUsEast1, appPackage);
+ devApp.completeRollout();
+
+ // Trigger a run (to simulate a running job)
+ tester.deploymentTrigger().reTrigger(applicationId, JobType.devUsEast1);
+
+ // Add a job to the queue
+ tester.deploymentTrigger().reTriggerOrAddToQueue(devApp.deploymentIdIn(ZoneId.from("dev", "us-east-1")));
+
+ // Should be 1 entry in the queue:
+ List<RetriggerEntry> retriggerEntries = tester.controller().curator().readRetriggerEntries();
+ assertEquals(1, retriggerEntries.size());
+
+ // Adding to queue triggers abort
+ devApp.jobAborted(JobType.devUsEast1);
+ assertEquals(0, tester.jobs().active(applicationId).size());
+
+ // The maintainer runs and will actually trigger dev us-east, but keeps the entry in queue to verify it was actually run
+ maintainer.maintain();
+ retriggerEntries = tester.controller().curator().readRetriggerEntries();
+ assertEquals(1, retriggerEntries.size());
+ assertEquals(1, tester.jobs().active(applicationId).size());
+
+ // Run outstanding jobs
+ devApp.runJob(JobType.devUsEast1);
+ assertEquals(0, tester.jobs().active(applicationId).size());
+
+ // Run maintainer again, should find that the job has already run successfully and will remove the entry.
+ maintainer.maintain();
+ retriggerEntries = tester.controller().curator().readRetriggerEntries();
+ assertEquals(0, retriggerEntries.size());
+ assertEquals(0, tester.jobs().active(applicationId).size());
+ }
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java
index 2afa3a0faea..29d77c38b1a 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java
@@ -16,7 +16,6 @@ import java.time.Duration;
import java.util.Map;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
/**
* Tests the traffic fraction updater. This also tests its dependency on DeploymentMetricsMaintainer.
@@ -39,7 +38,7 @@ public class TrafficShareUpdaterTest {
// Single zone
setQpsMetric(50.0, application.application().id().defaultInstance(), prod1, tester);
deploymentMetricsMaintainer.maintain();
- assertTrue(updater.maintain());
+ assertEquals(1.0, updater.maintain(), 0.0000001);
assertTrafficFraction(1.0, 1.0, application.instanceId(), prod1, tester);
// Two zones
@@ -48,14 +47,14 @@ public class TrafficShareUpdaterTest {
setQpsMetric(50.0, application.application().id().defaultInstance(), prod1, tester);
setQpsMetric(0.0, application.application().id().defaultInstance(), prod2, tester);
deploymentMetricsMaintainer.maintain();
- assertTrue(updater.maintain());
+ assertEquals(1.0, updater.maintain(), 0.0000001);
assertTrafficFraction(1.0, 1.0, application.instanceId(), prod1, tester);
assertTrafficFraction(0.0, 1.0, application.instanceId(), prod2, tester);
// - both hot
setQpsMetric(53.0, application.application().id().defaultInstance(), prod1, tester);
setQpsMetric(47.0, application.application().id().defaultInstance(), prod2, tester);
deploymentMetricsMaintainer.maintain();
- assertTrue(updater.maintain());
+ assertEquals(1.0, updater.maintain(), 0.0000001);
assertTrafficFraction(0.53, 1.0, application.instanceId(), prod1, tester);
assertTrafficFraction(0.47, 1.0, application.instanceId(), prod2, tester);
@@ -66,7 +65,7 @@ public class TrafficShareUpdaterTest {
setQpsMetric(47.0, application.application().id().defaultInstance(), prod2, tester);
setQpsMetric(0.0, application.application().id().defaultInstance(), prod3, tester);
deploymentMetricsMaintainer.maintain();
- assertTrue(updater.maintain());
+ assertEquals(1.0, updater.maintain(), 0.0000001);
assertTrafficFraction(0.53, 0.53, application.instanceId(), prod1, tester);
assertTrafficFraction(0.47, 0.50, application.instanceId(), prod2, tester);
assertTrafficFraction(0.00, 0.50, application.instanceId(), prod3, tester);
@@ -75,14 +74,14 @@ public class TrafficShareUpdaterTest {
setQpsMetric(25.0, application.application().id().defaultInstance(), prod2, tester);
setQpsMetric(25.0, application.application().id().defaultInstance(), prod3, tester);
deploymentMetricsMaintainer.maintain();
- assertTrue(updater.maintain());
+ assertEquals(1.0, updater.maintain(), 0.0000001);
assertTrafficFraction(0.50, 0.5, application.instanceId(), prod1, tester);
assertTrafficFraction(0.25, 0.5, application.instanceId(), prod2, tester);
assertTrafficFraction(0.25, 0.5, application.instanceId(), prod3, tester);
}
private void setQpsMetric(double qps, ApplicationId application, ZoneId zone, DeploymentTester tester) {
- var clusterMetrics = new ClusterMetrics("default", "container", Map.of(ClusterMetrics.QUERIES_PER_SECOND, qps));
+ var clusterMetrics = new ClusterMetrics("default", "container", Map.of(ClusterMetrics.QUERIES_PER_SECOND, qps), Map.of());
tester.controllerTester().serviceRegistry().configServerMock().setMetrics(new DeploymentId(application, zone), clusterMetrics);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
index 484b471cbaa..326f4bf311e 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
@@ -164,7 +164,6 @@ public class UpgraderTest {
tester.triggerJobs();
assertEquals("Upgrade with error should retry", 1, tester.jobs().active().size());
-
// --- Failing application is repaired by changing the application, causing confidence to move above 'high' threshold
// Deploy application change
default0.submit(applicationPackage("default"));
@@ -1114,11 +1113,32 @@ public class UpgraderTest {
assertEquals("Upgrade orders are distinct", versions.size(), upgradeOrders.size());
}
+ private static final ApplicationPackage canaryApplicationPackage =
+ new ApplicationPackageBuilder().upgradePolicy("canary")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ private static final ApplicationPackage defaultApplicationPackage =
+ new ApplicationPackageBuilder().upgradePolicy("default")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ private static final ApplicationPackage conservativeApplicationPackage =
+ new ApplicationPackageBuilder().upgradePolicy("conservative")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ /** Returns empty prebuilt applications for efficiency */
private ApplicationPackage applicationPackage(String upgradePolicy) {
- return new ApplicationPackageBuilder().upgradePolicy(upgradePolicy)
- .region("us-west-1")
- .region("us-east-3")
- .build();
+ switch (upgradePolicy) {
+ case "canary" : return canaryApplicationPackage;
+ case "default" : return defaultApplicationPackage;
+ case "conservative" : return conservativeApplicationPackage;
+ default : throw new IllegalArgumentException("No upgrade policy '" + upgradePolicy + "'");
+ }
}
private DeploymentContext createAndDeploy(String applicationName, String upgradePolicy) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java
index 5bd7d1db769..786809fb7b1 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/notification/NotificationsDbTest.java
@@ -24,13 +24,12 @@ import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
+import static com.yahoo.vespa.hosted.controller.notification.Notification.Level;
+import static com.yahoo.vespa.hosted.controller.notification.Notification.Type;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
-import static com.yahoo.vespa.hosted.controller.notification.Notification.Level;
-import static com.yahoo.vespa.hosted.controller.notification.Notification.Type;
-
/**
* @author freva
*/
@@ -105,54 +104,56 @@ public class NotificationsDbTest {
List<Notification> expected = new ArrayList<>(notifications);
// No metrics, no new notification
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of());
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of());
assertEquals(expected, curatorDb.readNotifications(tenant));
// Metrics that contain none of the feed block metrics does not create new notification
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", null, null, null, null)));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", null, null, null, null, Map.of())));
assertEquals(expected, curatorDb.readNotifications(tenant));
// Metrics that only contain util or limit (should not be possible) should not cause any issues
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, null, null, 0.5)));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, null, null, 0.5, Map.of())));
assertEquals(expected, curatorDb.readNotifications(tenant));
// One resource is at warning
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.85, 0.9, 0.3, 0.5)));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.85, 0.9, 0.3, 0.5, Map.of())));
expected.add(notification(12345, Type.feedBlock, Level.warning, sourceCluster1, "disk (usage: 85.0%, feed block limit: 90.0%)"));
assertEquals(expected, curatorDb.readNotifications(tenant));
// Both resources over the limit
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.3, 0.5)));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.3, 0.5, Map.of())));
expected.set(6, notification(12345, Type.feedBlock, Level.error, sourceCluster1, "disk (usage: 95.0%, feed block limit: 90.0%)"));
assertEquals(expected, curatorDb.readNotifications(tenant));
// One resource at warning, one at error: Only show error message
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.7, 0.5)));
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(clusterMetrics("cluster1", 0.95, 0.9, 0.7, 0.5, Map.of())));
expected.set(6, notification(12345, Type.feedBlock, Level.error, sourceCluster1,
"memory (usage: 70.0%, feed block limit: 50.0%)", "disk (usage: 95.0%, feed block limit: 90.0%)"));
assertEquals(expected, curatorDb.readNotifications(tenant));
}
@Test
- public void feed_blocked_multiple_cluster_test() {
+ public void deployment_metrics_multiple_cluster_test() {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenant.value(), "app1", "instance1"), ZoneId.from("prod", "us-south-3"));
NotificationSource sourceCluster1 = NotificationSource.from(deploymentId, ClusterSpec.Id.from("cluster1"));
NotificationSource sourceCluster2 = NotificationSource.from(deploymentId, ClusterSpec.Id.from("cluster2"));
NotificationSource sourceCluster3 = NotificationSource.from(deploymentId, ClusterSpec.Id.from("cluster3"));
List<Notification> expected = new ArrayList<>(notifications);
- // Cluster1 and cluster2 are having issues
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(
- clusterMetrics("cluster1", 0.85, 0.9, 0.3, 0.5), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75), clusterMetrics("cluster3", 0.1, 0.8, 0.2, 0.9)));
+ // Cluster1 and cluster2 are having feed block issues, cluster 3 is reindexing
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(
+ clusterMetrics("cluster1", 0.85, 0.9, 0.3, 0.5, Map.of()), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75, Map.of()), clusterMetrics("cluster3", 0.1, 0.8, 0.2, 0.9, Map.of("announcements", 0.75, "build", 0.5))));
expected.add(notification(12345, Type.feedBlock, Level.warning, sourceCluster1, "disk (usage: 85.0%, feed block limit: 90.0%)"));
expected.add(notification(12345, Type.feedBlock, Level.error, sourceCluster2, "memory (usage: 90.0%, feed block limit: 75.0%)"));
+ expected.add(notification(12345, Type.reindex, Level.info, sourceCluster3, "document type 'announcements' (75.0% done)", "document type 'build' (50.0% done)"));
assertEquals(expected, curatorDb.readNotifications(tenant));
- // Cluster1 improves, while cluster3 starts having issues
- notificationsDb.setDeploymentFeedingBlockedNotifications(deploymentId, List.of(
- clusterMetrics("cluster1", 0.15, 0.9, 0.3, 0.5), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75), clusterMetrics("cluster3", 0.75, 0.8, 0.2, 0.9)));
+ // Cluster1 improves, while cluster3 starts having feed block issues and finishes reindexing 'build' documents
+ notificationsDb.setDeploymentMetricsNotifications(deploymentId, List.of(
+ clusterMetrics("cluster1", 0.15, 0.9, 0.3, 0.5, Map.of()), clusterMetrics("cluster2", 0.6, 0.8, 0.9, 0.75, Map.of()), clusterMetrics("cluster3", 0.75, 0.8, 0.2, 0.9, Map.of("announcements", 0.9))));
expected.set(6, notification(12345, Type.feedBlock, Level.error, sourceCluster2, "memory (usage: 90.0%, feed block limit: 75.0%)"));
expected.set(7, notification(12345, Type.feedBlock, Level.warning, sourceCluster3, "disk (usage: 75.0%, feed block limit: 80.0%)"));
+ expected.set(8, notification(12345, Type.reindex, Level.info, sourceCluster3, "document type 'announcements' (90.0% done)"));
assertEquals(expected, curatorDb.readNotifications(tenant));
}
@@ -169,12 +170,14 @@ public class NotificationsDbTest {
return new Notification(Instant.ofEpochSecond(secondsSinceEpoch), type, level, source, List.of(messages));
}
- private static ClusterMetrics clusterMetrics(String clusterId, Double diskUtil, Double diskLimit, Double memoryUtil, Double memoryLimit) {
+ private static ClusterMetrics clusterMetrics(String clusterId,
+ Double diskUtil, Double diskLimit, Double memoryUtil, Double memoryLimit,
+ Map<String, Double> reindexingProgress) {
Map<String, Double> metrics = new HashMap<>();
if (diskUtil != null) metrics.put(ClusterMetrics.DISK_UTIL, diskUtil);
if (diskLimit != null) metrics.put(ClusterMetrics.DISK_FEED_BLOCK_LIMIT, diskLimit);
if (memoryUtil != null) metrics.put(ClusterMetrics.MEMORY_UTIL, memoryUtil);
if (memoryLimit != null) metrics.put(ClusterMetrics.MEMORY_FEED_BLOCK_LIMIT, memoryLimit);
- return new ClusterMetrics(clusterId, "content", metrics);
+ return new ClusterMetrics(clusterId, "content", metrics, reindexingProgress);
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializerTest.java
index 2a43f8cc4f3..97cf53d7b89 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializerTest.java
@@ -7,7 +7,7 @@ import com.yahoo.security.SignatureAlgorithm;
import com.yahoo.security.X509CertificateBuilder;
import com.yahoo.security.X509CertificateUtils;
import com.yahoo.slime.JsonFormat;
-import com.yahoo.slime.SlimeUtils;
+import com.yahoo.slime.Slime;
import com.yahoo.vespa.hosted.controller.support.access.SupportAccess;
import com.yahoo.vespa.hosted.controller.support.access.SupportAccessGrant;
import org.intellij.lang.annotations.Language;
@@ -21,7 +21,6 @@ import java.math.BigInteger;
import java.security.cert.X509Certificate;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
-import java.util.Optional;
import static org.junit.Assert.*;
@@ -80,7 +79,8 @@ public class SupportAccessSerializerTest {
@Test
public void serialize_default() {
- assertSerialized(SupportAccess.DISALLOWED_NO_HISTORY, true, Instant.EPOCH, "{\n" +
+ var slime = SupportAccessSerializer.serializeCurrentState(SupportAccess.DISALLOWED_NO_HISTORY, Instant.EPOCH);
+ assertSerialized(slime, "{\n" +
" \"state\": {\n" +
" \"supportAccess\": \"NOT_ALLOWED\"\n" +
" },\n" +
@@ -93,12 +93,14 @@ public class SupportAccessSerializerTest {
@Test
public void serialize_with_certificates() {
- assertSerialized(supportAccessExample, true, null, expectedWithCertificates);
+ var slime = SupportAccessSerializer.toSlime(supportAccessExample);
+ assertSerialized(slime, expectedWithCertificates);
}
@Test
public void serialize_with_status() {
- assertSerialized(supportAccessExample, false, hour(32),
+ var slime = SupportAccessSerializer.serializeCurrentState(supportAccessExample, hour(12));
+ assertSerialized(slime,
"{\n" +
" \"state\": {\n" +
" \"supportAccess\": \"ALLOWED\",\n" +
@@ -122,6 +124,12 @@ public class SupportAccessSerializerTest {
" \"at\": \"1970-01-01T02:00:00Z\",\n" +
" \"until\": \"1970-01-02T00:00:00Z\",\n" +
" \"by\": \"andreer\"\n" +
+ " },\n" +
+ " {\n" +
+ " \"state\": \"grant\",\n" +
+ " \"at\": \"1970-01-01T03:00:00Z\",\n" +
+ " \"until\": \"1970-01-01T04:00:00Z\",\n" +
+ " \"by\": \"mortent\"\n" +
" }\n" +
" ],\n" +
" \"grants\": [\n" +
@@ -129,28 +137,26 @@ public class SupportAccessSerializerTest {
" \"requestor\": \"mortent\",\n" +
" \"notBefore\": \"1970-01-01T07:00:00Z\",\n" +
" \"notAfter\": \"1970-01-01T19:00:00Z\"\n" +
- " },\n" +
- " {\n" +
- " \"requestor\": \"mortent\",\n" +
- " \"notBefore\": \"1970-01-01T03:00:00Z\",\n" +
- " \"notAfter\": \"1970-01-01T04:00:00Z\"\n" +
- " }\n" +
+ " }" +
+ "\n" +
" ]\n" +
"}\n");
}
@Test
public void deserialize() {
- assertEquals(supportAccessExample, SupportAccessSerializer.fromSlime(SlimeUtils.jsonToSlime(expectedWithCertificates)));
+ var slime = SupportAccessSerializer.toSlime(supportAccessExample);
+ assertSerialized(slime, expectedWithCertificates);
+
+ var deserialized = SupportAccessSerializer.fromSlime(slime);
+ assertEquals(supportAccessExample, deserialized);
}
private Instant hour(long h) {
return Instant.EPOCH.plus(h, ChronoUnit.HOURS);
}
- private void assertSerialized(SupportAccess supportAccess, boolean includeCertificates, Instant now, String expected) {
- var slime = SupportAccessSerializer.toSlime(supportAccess, includeCertificates, Optional.ofNullable(now));
-
+ private void assertSerialized(Slime slime, String expected) {
try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
new JsonFormat(false).encode(out, slime);
assertEquals(expected, out.toString());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
index 47aa3e6b9d4..ed81ce36600 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
@@ -18,6 +18,7 @@ import com.yahoo.security.KeyAlgorithm;
import com.yahoo.security.KeyUtils;
import com.yahoo.security.SignatureAlgorithm;
import com.yahoo.security.X509CertificateBuilder;
+import com.yahoo.security.X509CertificateUtils;
import com.yahoo.vespa.athenz.api.AthenzDomain;
import com.yahoo.vespa.athenz.api.AthenzIdentity;
import com.yahoo.vespa.athenz.api.AthenzPrincipal;
@@ -70,6 +71,7 @@ import com.yahoo.vespa.hosted.controller.restapi.ControllerContainerTest;
import com.yahoo.vespa.hosted.controller.routing.GlobalRouting;
import com.yahoo.vespa.hosted.controller.security.AthenzCredentials;
import com.yahoo.vespa.hosted.controller.security.AthenzTenantSpec;
+import com.yahoo.vespa.hosted.controller.support.access.SupportAccessGrant;
import com.yahoo.vespa.hosted.controller.tenant.AthenzTenant;
import com.yahoo.vespa.hosted.controller.tenant.LastLoginInfo;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
@@ -82,6 +84,7 @@ import java.io.File;
import java.math.BigInteger;
import java.net.URI;
import java.security.cert.X509Certificate;
+import java.time.Duration;
import java.time.Instant;
import java.time.format.DateTimeFormatter;
import java.time.temporal.ChronoUnit;
@@ -214,6 +217,11 @@ public class ApplicationApiTest extends ControllerContainerTest {
tester.assertResponse(request("/application/v4/tenant/tenant1", GET).userIdentity(USER_ID),
new File("tenant-with-application.json"));
+ tester.assertResponse(request("/application/v4/tenant/tenant1", GET)
+ .userIdentity(USER_ID)
+ .properties(Map.of("activeInstances", "true")),
+ new File("tenant-without-applications.json"));
+
// GET tenant applications
tester.assertResponse(request("/application/v4/tenant/tenant1/application/", GET).userIdentity(USER_ID),
new File("application-list.json"));
@@ -1501,6 +1509,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
var zone = ZoneId.from(Environment.prod, RegionName.from("us-west-1"));
deploymentTester.controllerTester().zoneRegistry().setRoutingMethod(ZoneApiMock.from(zone),
List.of(RoutingMethod.exclusive, RoutingMethod.shared));
+ addUserToHostedOperatorRole(HostedAthenzIdentities.from(HOSTED_VESPA_OPERATOR));
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.athenzIdentity(com.yahoo.config.provision.AthenzDomain.from("domain"), AthenzService.from("service"))
.compileVersion(RoutingController.DIRECT_ROUTING_MIN_VERSION)
@@ -1528,26 +1537,46 @@ public class ApplicationApiTest extends ControllerContainerTest {
// Grant access to support user
X509Certificate support_cert = grantCertificate(now, now.plusSeconds(3600));
- tester.controller().supportAccess().registerGrant(app.deploymentIdIn(zone), "user.andreer", support_cert);
+ String grantPayload= "{\n" +
+ " \"applicationId\": \"tenant1:application1:instance1\",\n" +
+ " \"zone\": \"prod.us-west-1\",\n" +
+ " \"certificate\":\""+X509CertificateUtils.toPem(support_cert)+ "\"\n" +
+ "}";
+ tester.assertResponse(request("/controller/v1/access/grants/"+HOSTED_VESPA_OPERATOR.id(), POST)
+ .data(grantPayload)
+ .userIdentity(HOSTED_VESPA_OPERATOR),
+ "{\"message\":\"Operator user.johnoperator granted access and job production-us-west-1 triggered\"}");
// GET shows grant
String grantResponse = allowedResponse.replaceAll("\"grants\":\\[]",
- "\"grants\":[{\"requestor\":\"user.andreer\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
+ "\"grants\":[{\"requestor\":\"user.johnoperator\",\"notBefore\":\"" + serializeInstant(now) + "\",\"notAfter\":\"" + serializeInstant(now.plusSeconds(3600)) + "\"}]");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", GET)
.userIdentity(USER_ID),
grantResponse, 200
);
+ // Should be 1 available grant
+ List<SupportAccessGrant> activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
+ assertEquals(1, activeGrants.size());
+
+ // Adding grant should trigger job
+ app.assertRunning(JobType.productionUsWest1);
+
// DELETE removes access
- System.out.println("grantresponse:\n"+grantResponse+"\n");
String disallowedResponse = grantResponse
.replaceAll("ALLOWED\".*?}", "NOT_ALLOWED\"}")
.replace("history\":[", "history\":[{\"state\":\"disallowed\",\"at\":\""+ serializeInstant(now) +"\",\"by\":\"user.myuser\"},");
- System.out.println("disallowedResponse:\n"+disallowedResponse+"\n");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/environment/prod/region/us-west-1/access/support", DELETE)
.userIdentity(USER_ID),
disallowedResponse, 200
);
+
+ // Revoking access should trigger job
+ app.assertRunning(JobType.productionUsWest1);
+
+ // Should be no available grant
+ activeGrants = tester.controller().supportAccess().activeGrantsFor(new DeploymentId(ApplicationId.fromSerializedForm("tenant1:application1:instance1"), zone));
+ assertEquals(0, activeGrants.size());
}
private static String serializeInstant(Instant i) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json
index cd1fe5acf6a..fc40a9ce692 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-clusters.json
@@ -97,7 +97,8 @@
"completion": 2234
}
],
- "autoscalingStatus": "the autoscaling status",
+ "autoscalingStatusCode": "ideal",
+ "autoscalingStatus": "Cluster is ideally scaled",
"scalingDuration": 360000,
"maxQueryGrowthRate": 0.7,
"currentQueryFractionOfMax":0.3
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java
index d87da62b8f2..80cee3af58b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/changemanagement/ChangeManagementApiHandlerTest.java
@@ -27,7 +27,6 @@ import java.time.Instant;
import java.time.ZonedDateTime;
import java.util.ArrayList;
import java.util.List;
-import java.util.UUID;
import static org.junit.Assert.assertEquals;
@@ -52,6 +51,7 @@ public class ChangeManagementApiHandlerTest extends ControllerContainerTest {
@Test
public void test_api() {
assertFile(new Request("http://localhost:8080/changemanagement/v1/assessment", "{\"zone\":\"prod.us-east-3\", \"hosts\": [\"host1\"]}", Request.Method.POST), "initial.json");
+ assertFile(new Request("http://localhost:8080/changemanagement/v1/assessment", "{\"zone\":\"prod.us-east-3\", \"switches\": [\"switch1\"]}", Request.Method.POST), "initial.json");
assertFile(new Request("http://localhost:8080/changemanagement/v1/vcmr"), "vcmrs.json");
}
@@ -98,6 +98,7 @@ public class ChangeManagementApiHandlerTest extends ControllerContainerTest {
private Node createNode() {
return new Node.Builder()
.hostname(HostName.from("host1"))
+ .switchHostname("switch1")
.build();
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiTest.java
index 8f6988dbc27..fc83c58cc67 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiTest.java
@@ -6,8 +6,11 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.test.ManualClock;
+import com.yahoo.vespa.athenz.api.AthenzUser;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.flags.PermanentFlags;
+import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
+import com.yahoo.vespa.hosted.controller.api.integration.athenz.MockAccessControlService;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Application;
import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceSnapshot;
import com.yahoo.vespa.hosted.controller.auditlog.AuditLogger;
@@ -165,4 +168,25 @@ public class ControllerApiTest extends ControllerContainerTest {
);
}
+ @Test
+ public void testApproveMembership() {
+ ApplicationId applicationId = ApplicationId.from("tenant", "app", "instance");
+ DeploymentId deployment = new DeploymentId(applicationId, ZoneId.defaultId());
+ String requestBody = "{\n" +
+ " \"applicationId\": \"" + deployment.applicationId().serializedForm() + "\",\n" +
+ " \"zone\": \"" + deployment.zoneId().value() + "\"\n" +
+ "}";
+
+ MockAccessControlService accessControlService = (MockAccessControlService) tester.serviceRegistry().accessControlService();
+ tester.assertResponse(operatorRequest("http://localhost:8080/controller/v1/access/requests/"+hostedOperator.getName(), requestBody, Request.Method.POST),
+ "{\"message\":\"Unable to approve membership request\"}", 400);
+
+ accessControlService.addPendingMember(hostedOperator);
+ tester.assertResponse(operatorRequest("http://localhost:8080/controller/v1/access/requests/"+hostedOperator.getName(), requestBody, Request.Method.POST),
+ "{\"message\":\"Unable to approve membership request\"}", 400);
+
+ tester.controller().supportAccess().allow(deployment, Instant.now().plus(Duration.ofHours(1)), "tenantx");
+ tester.assertResponse(operatorRequest("http://localhost:8080/controller/v1/access/requests/"+hostedOperator.getName(), requestBody, Request.Method.POST),
+ "{\"members\":[\"user.alice\"]}");
+ }
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json
index 3cf79977fb8..e906df94023 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json
@@ -19,6 +19,9 @@
"name": "CloudEventReporter"
},
{
+ "name": "CloudTrialExpirer"
+ },
+ {
"name": "ContactInformationMaintainer"
},
{
@@ -76,6 +79,9 @@
"name": "ResourceTagMaintainer"
},
{
+ "name":"RetriggerMaintainer"
+ },
+ {
"name": "SystemRoutingPolicyMaintainer"
},
{
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java
new file mode 100644
index 00000000000..ab9d50f8eae
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java
@@ -0,0 +1,59 @@
+package com.yahoo.vespa.hosted.controller.restapi.horizon;
+
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.slime.JsonFormat;
+import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.hosted.controller.api.role.Role;
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author valerijf
+ */
+public class TsdbQueryRewriterTest {
+
+ @Test
+ public void rewrites_query() throws IOException {
+ assertRewrite("filters-complex.json", "filters-complex.expected.json", Role.reader(TenantName.from("tenant2")));
+
+ assertRewrite("filter-in-execution-graph.json",
+ "filter-in-execution-graph.expected.json",
+ Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")));
+
+ assertRewrite("filter-in-execution-graph.json",
+ "filter-in-execution-graph.expected.operator.json",
+ Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")), Role.hostedOperator());
+
+ assertRewrite("no-filters.json",
+ "no-filters.expected.json",
+ Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")));
+
+ assertRewrite("filters-meta-query.json",
+ "filters-meta-query.expected.json",
+ Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")));
+ }
+
+ @Test(expected = TsdbQueryRewriter.UnauthorizedException.class)
+ public void throws_if_no_roles() throws IOException {
+ assertRewrite("filters-complex.json", "filters-complex.expected.json");
+ }
+
+ private static void assertRewrite(String initialFilename, String expectedFilename, Role... roles) throws IOException {
+ byte[] data = Files.readAllBytes(Paths.get("src/test/resources/horizon", initialFilename));
+ data = TsdbQueryRewriter.rewrite(data, Set.of(roles), SystemName.Public);
+
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ new JsonFormat(false).encode(baos, SlimeUtils.jsonToSlime(data));
+ String expectedJson = Files.readString(Paths.get("src/test/resources/horizon", expectedFilename));
+
+ assertEquals(expectedJson, baos.toString());
+ }
+} \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/application-roles.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/application-roles.json
index 9bd66c16308..ca437dba761 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/application-roles.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/application-roles.json
@@ -6,11 +6,13 @@
{
"name": "administrator@tenant",
"email": "administrator@tenant",
+ "verified": false,
"roles": {}
},
{
"name": "developer@tenant",
"email": "developer@tenant",
+ "verified": false,
"roles": {}
}
]
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/tenant-roles.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/tenant-roles.json
index 6a1c4c88878..bc921e4bdf4 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/tenant-roles.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/tenant-roles.json
@@ -9,6 +9,7 @@
{
"name": "administrator@tenant",
"email": "administrator@tenant",
+ "verified": false,
"roles": {
"administrator": {
"explicit": true,
@@ -27,6 +28,7 @@
{
"name": "developer@tenant",
"email": "developer@tenant",
+ "verified": false,
"roles": {
"administrator": {
"explicit": false,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json
index 2ae3514bec3..5d3a38334ad 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-athenz.json
@@ -6,7 +6,8 @@
"user": {
"name": "Joe Developer",
"email": "dev@domail",
- "nickname": "dev"
+ "nickname": "dev",
+ "verified": false
},
"tenants": {
"sandbox": {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json
index 2d2a137c2ca..ae3dc68d9e3 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json
@@ -6,7 +6,8 @@
"user": {
"name": "Joe Developer",
"email": "dev@domail",
- "nickname": "dev"
+ "nickname": "dev",
+ "verified":false
},
"tenants": {
"sandbox": {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json
index e03a18a1949..3bf999b490b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-applications.json
@@ -6,7 +6,8 @@
"user": {
"name": "Joe Developer",
"email": "dev@domail",
- "nickname": "dev"
+ "nickname": "dev",
+ "verified":false
},
"tenants": {},
"operator": [
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json
index a7410b14850..27242424579 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-without-trial-capacity-cloud.json
@@ -6,7 +6,8 @@
"user": {
"name": "Joe Developer",
"email": "dev@domail",
- "nickname": "dev"
+ "nickname": "dev",
+ "verified":false
},
"tenants": {}
} \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
index 047a4461f7c..79b564eee52 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
@@ -66,9 +66,9 @@ public class RoutingPoliciesTest {
private static final ZoneId zone3 = ZoneId.from("prod", "aws-us-east-1a");
private static final ZoneId zone4 = ZoneId.from("prod", "aws-us-east-1b");
- private final ApplicationPackage applicationPackage = applicationPackageBuilder().region(zone1.region())
- .region(zone2.region())
- .build();
+ private static final ApplicationPackage applicationPackage = applicationPackageBuilder().region(zone1.region())
+ .region(zone2.region())
+ .build();
@Test
public void global_routing_policies() {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
index 77ce86f1664..4dd283cf5d7 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
@@ -127,11 +127,7 @@ public class VersionStatusTest {
@Test
public void testVersionStatusAfterApplicationUpdates() {
DeploymentTester tester = new DeploymentTester();
- ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
- .upgradePolicy("default")
- .region("us-west-1")
- .region("us-east-3")
- .build();
+ ApplicationPackage applicationPackage = applicationPackage("default");
Version version1 = new Version("6.2");
Version version2 = new Version("6.3");
@@ -216,10 +212,9 @@ public class VersionStatusTest {
Version version0 = new Version("6.2");
tester.controllerTester().upgradeSystem(version0);
tester.upgrader().maintain();
- var builder = new ApplicationPackageBuilder().region("us-west-1").region("us-east-3");
// Setup applications - all running on version0
- ApplicationPackage canaryPolicy = builder.upgradePolicy("canary").build();
+ ApplicationPackage canaryPolicy = applicationPackage("canary");
var canary0 = tester.newDeploymentContext("tenant1", "canary0", "default")
.submit(canaryPolicy)
.deploy();
@@ -230,7 +225,7 @@ public class VersionStatusTest {
.submit(canaryPolicy)
.deploy();
- ApplicationPackage defaultPolicy = builder.upgradePolicy("default").build();
+ ApplicationPackage defaultPolicy = applicationPackage("default");
var default0 = tester.newDeploymentContext("tenant1", "default0", "default")
.submit(defaultPolicy)
.deploy();
@@ -262,7 +257,7 @@ public class VersionStatusTest {
.submit(defaultPolicy)
.deploy();
- ApplicationPackage conservativePolicy = builder.upgradePolicy("conservative").build();
+ ApplicationPackage conservativePolicy = applicationPackage("conservative");
var conservative0 = tester.newDeploymentContext("tenant1", "conservative0", "default")
.submit(conservativePolicy)
.deploy();
@@ -388,10 +383,10 @@ public class VersionStatusTest {
Version version0 = new Version("6.2");
tester.controllerTester().upgradeSystem(version0);
tester.upgrader().maintain();
- var appPackage = new ApplicationPackageBuilder().region("us-west-1").region("us-east-3").upgradePolicy("canary");
+ var appPackage = applicationPackage("canary");
var canary0 = tester.newDeploymentContext("tenant1", "canary0", "default")
- .submit(appPackage.build())
+ .submit(appPackage)
.deploy();
assertEquals("All applications running on this version: High",
@@ -537,13 +532,13 @@ public class VersionStatusTest {
Version version0 = Version.fromString("7.1");
tester.controllerTester().upgradeSystem(version0);
var canary0 = tester.newDeploymentContext("tenant1", "canary0", "default")
- .submit(new ApplicationPackageBuilder().upgradePolicy("canary").region("us-west-1").build())
+ .submit(applicationPackage("canary"))
.deploy();
var canary1 = tester.newDeploymentContext("tenant1", "canary1", "default")
- .submit(new ApplicationPackageBuilder().upgradePolicy("canary").region("us-west-1").build())
+ .submit(applicationPackage("canary"))
.deploy();
var default0 = tester.newDeploymentContext("tenant1", "default0", "default")
- .submit(new ApplicationPackageBuilder().upgradePolicy("default").region("us-west-1").build())
+ .submit(applicationPackage("default"))
.deploy();
tester.controllerTester().computeVersionStatus();
assertSame(Confidence.high, tester.controller().readVersionStatus().version(version0).confidence());
@@ -609,12 +604,11 @@ public class VersionStatusTest {
public void testStatusIncludesIncompleteUpgrades() {
var tester = new DeploymentTester().atMondayMorning();
var version0 = Version.fromString("7.1");
- var applicationPackage = new ApplicationPackageBuilder().region("us-west-1").build();
// Application deploys on initial version
tester.controllerTester().upgradeSystem(version0);
var context = tester.newDeploymentContext("tenant1", "default0", "default");
- context.submit(applicationPackage).deploy();
+ context.submit(applicationPackage("default")).deploy();
// System is upgraded and application starts upgrading to next version
var version1 = Version.fromString("7.2");
@@ -688,4 +682,32 @@ public class VersionStatusTest {
.orElseThrow(() -> new IllegalArgumentException("Expected to find version: " + version));
}
+ private static final ApplicationPackage canaryApplicationPackage =
+ new ApplicationPackageBuilder().upgradePolicy("canary")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ private static final ApplicationPackage defaultApplicationPackage =
+ new ApplicationPackageBuilder().upgradePolicy("default")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ private static final ApplicationPackage conservativeApplicationPackage =
+ new ApplicationPackageBuilder().upgradePolicy("conservative")
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+
+ /** Returns empty prebuilt applications for efficiency */
+ private ApplicationPackage applicationPackage(String upgradePolicy) {
+ switch (upgradePolicy) {
+ case "canary" : return canaryApplicationPackage;
+ case "default" : return defaultApplicationPackage;
+ case "conservative" : return conservativeApplicationPackage;
+ default : throw new IllegalArgumentException("No upgrade policy '" + upgradePolicy + "'");
+ }
+ }
+
}
diff --git a/controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.json b/controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.json
new file mode 100644
index 00000000000..a71fd812de9
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.json
@@ -0,0 +1,37 @@
+{
+ "start": 1619301600000,
+ "end": 1623161217471,
+ "executionGraph": [
+ {
+ "id": "q1_m1",
+ "type": "TimeSeriesDataSource",
+ "metric": {
+ "type": "MetricLiteral",
+ "metric": "Vespa.vespa.distributor.vds.distributor.docsstored.average"
+ },
+ "sourceId": null,
+ "fetchLast": false,
+ "filter": {
+ "type": "Chain",
+ "op": "AND",
+ "filters": [
+ {
+ "type": "TagValueLiteralOr",
+ "filter": "tenant1.application1.instance1",
+ "tagKey": "applicationId"
+ },
+ {
+ "type": "TagValueLiteralOr",
+ "filter": "public",
+ "tagKey": "system"
+ },
+ {
+ "type": "TagValueRegex",
+ "filter": "^(tenant2|tenant3)\\..*",
+ "tagKey": "applicationId"
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.operator.json b/controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.operator.json
new file mode 100644
index 00000000000..babf3219c6a
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/filter-in-execution-graph.expected.operator.json
@@ -0,0 +1,32 @@
+{
+ "start": 1619301600000,
+ "end": 1623161217471,
+ "executionGraph": [
+ {
+ "id": "q1_m1",
+ "type": "TimeSeriesDataSource",
+ "metric": {
+ "type": "MetricLiteral",
+ "metric": "Vespa.vespa.distributor.vds.distributor.docsstored.average"
+ },
+ "sourceId": null,
+ "fetchLast": false,
+ "filter": {
+ "type": "Chain",
+ "op": "AND",
+ "filters": [
+ {
+ "type": "TagValueLiteralOr",
+ "filter": "tenant1.application1.instance1",
+ "tagKey": "applicationId"
+ },
+ {
+ "type": "TagValueLiteralOr",
+ "filter": "public",
+ "tagKey": "system"
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/controller-server/src/test/resources/horizon/filter-in-execution-graph.json b/controller-server/src/test/resources/horizon/filter-in-execution-graph.json
new file mode 100644
index 00000000000..6a2512c3642
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/filter-in-execution-graph.json
@@ -0,0 +1,21 @@
+{
+ "start": 1619301600000,
+ "end": 1623161217471,
+ "executionGraph": [
+ {
+ "id": "q1_m1",
+ "type": "TimeSeriesDataSource",
+ "metric": {
+ "type": "MetricLiteral",
+ "metric": "Vespa.vespa.distributor.vds.distributor.docsstored.average"
+ },
+ "sourceId": null,
+ "fetchLast": false,
+ "filter": {
+ "type": "TagValueLiteralOr",
+ "filter": "tenant1.application1.instance1",
+ "tagKey": "applicationId"
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/controller-server/src/test/resources/horizon/filters-complex.expected.json b/controller-server/src/test/resources/horizon/filters-complex.expected.json
new file mode 100644
index 00000000000..b3416f8a410
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/filters-complex.expected.json
@@ -0,0 +1,56 @@
+{
+ "start": 1623080040000,
+ "end": 1623166440000,
+ "executionGraph": [
+ {
+ "id": "q1_m1",
+ "type": "TimeSeriesDataSource",
+ "metric": {
+ "type": "MetricLiteral",
+ "metric": "Vespa.vespa.qrserver.documents_covered.count"
+ },
+ "sourceId": null,
+ "fetchLast": false,
+ "filterId": "filter-ni8"
+ }
+ ],
+ "filters": [
+ {
+ "filter": {
+ "type": "Chain",
+ "op": "AND",
+ "filters": [
+ {
+ "type": "NOT",
+ "filter": {
+ "type": "TagValueLiteralOr",
+ "filter": "tenant1.app1.instance1",
+ "tagKey": "applicationId"
+ }
+ },
+ {
+ "type": "TagValueLiteralOr",
+ "filter": "public",
+ "tagKey": "system"
+ },
+ {
+ "type": "TagValueRegex",
+ "filter": "^(tenant2)\\..*",
+ "tagKey": "applicationId"
+ }
+ ]
+ },
+ "id": "filter-ni8"
+ }
+ ],
+ "serdesConfigs": [
+ {
+ "id": "JsonV3QuerySerdes",
+ "filter": [
+ "summarizer"
+ ]
+ }
+ ],
+ "logLevel": "ERROR",
+ "cacheMode": null
+}
diff --git a/controller-server/src/test/resources/horizon/filters-complex.json b/controller-server/src/test/resources/horizon/filters-complex.json
new file mode 100644
index 00000000000..3acc7fe5044
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/filters-complex.json
@@ -0,0 +1,46 @@
+{
+ "start": 1623080040000,
+ "end": 1623166440000,
+ "executionGraph": [
+ {
+ "id": "q1_m1",
+ "type": "TimeSeriesDataSource",
+ "metric": {
+ "type": "MetricLiteral",
+ "metric": "Vespa.vespa.qrserver.documents_covered.count"
+ },
+ "sourceId": null,
+ "fetchLast": false,
+ "filterId": "filter-ni8"
+ }
+ ],
+ "filters": [
+ {
+ "filter": {
+ "type": "Chain",
+ "op": "AND",
+ "filters": [
+ {
+ "type": "NOT",
+ "filter": {
+ "type": "TagValueLiteralOr",
+ "filter": "tenant1.app1.instance1",
+ "tagKey": "applicationId"
+ }
+ }
+ ]
+ },
+ "id": "filter-ni8"
+ }
+ ],
+ "serdesConfigs": [
+ {
+ "id": "JsonV3QuerySerdes",
+ "filter": [
+ "summarizer"
+ ]
+ }
+ ],
+ "logLevel": "ERROR",
+ "cacheMode": null
+}
diff --git a/controller-server/src/test/resources/horizon/filters-meta-query.expected.json b/controller-server/src/test/resources/horizon/filters-meta-query.expected.json
new file mode 100644
index 00000000000..6c8cab217fa
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/filters-meta-query.expected.json
@@ -0,0 +1,39 @@
+{
+ "from": 0,
+ "to": 1,
+ "order": "ASCENDING",
+ "type": "TAG_KEYS_AND_VALUES",
+ "source": "",
+ "aggregationSize": 1000,
+ "queries": [
+ {
+ "id": "id-0",
+ "namespace": "Vespa",
+ "filter": {
+ "type": "Chain",
+ "filters": [
+ {
+ "type": "TagValueRegex",
+ "filter": ".*",
+ "tagKey": "applicationId"
+ },
+ {
+ "type": "MetricLiteral",
+ "metric": "vespa.distributor.vds.distributor.docsstored.average|vespa.searchnode.content.proton.resource_usage.disk.average|vespa.searchnode.content.proton.resource_usage.memory.average|vespa.container.peak_qps.max"
+ },
+ {
+ "type": "TagValueLiteralOr",
+ "filter": "public",
+ "tagKey": "system"
+ },
+ {
+ "type": "TagValueRegex",
+ "filter": "^(tenant2|tenant3)\\..*",
+ "tagKey": "applicationId"
+ }
+ ]
+ }
+ }
+ ],
+ "aggregationField": "applicationId"
+}
diff --git a/controller-server/src/test/resources/horizon/filters-meta-query.json b/controller-server/src/test/resources/horizon/filters-meta-query.json
new file mode 100644
index 00000000000..ed59bef5eaa
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/filters-meta-query.json
@@ -0,0 +1,29 @@
+{
+ "from": 0,
+ "to": 1,
+ "order": "ASCENDING",
+ "type": "TAG_KEYS_AND_VALUES",
+ "source": "",
+ "aggregationSize": 1000,
+ "queries": [
+ {
+ "id": "id-0",
+ "namespace": "Vespa",
+ "filter": {
+ "type": "Chain",
+ "filters": [
+ {
+ "type": "TagValueRegex",
+ "filter": ".*",
+ "tagKey": "applicationId"
+ },
+ {
+ "type": "MetricLiteral",
+ "metric": "vespa.distributor.vds.distributor.docsstored.average|vespa.searchnode.content.proton.resource_usage.disk.average|vespa.searchnode.content.proton.resource_usage.memory.average|vespa.container.peak_qps.max"
+ }
+ ]
+ }
+ }
+ ],
+ "aggregationField": "applicationId"
+} \ No newline at end of file
diff --git a/controller-server/src/test/resources/horizon/no-filters.expected.json b/controller-server/src/test/resources/horizon/no-filters.expected.json
new file mode 100644
index 00000000000..35decea21db
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/no-filters.expected.json
@@ -0,0 +1,32 @@
+{
+ "start": 1619301600000,
+ "end": 1623161217471,
+ "executionGraph": [
+ {
+ "id": "q1_m1",
+ "type": "TimeSeriesDataSource",
+ "metric": {
+ "type": "MetricLiteral",
+ "metric": "Vespa.vespa.distributor.vds.distributor.docsstored.average"
+ },
+ "sourceId": null,
+ "fetchLast": false,
+ "filter": {
+ "type": "Chain",
+ "op": "AND",
+ "filters": [
+ {
+ "type": "TagValueLiteralOr",
+ "filter": "public",
+ "tagKey": "system"
+ },
+ {
+ "type": "TagValueRegex",
+ "filter": "^(tenant2|tenant3)\\..*",
+ "tagKey": "applicationId"
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/controller-server/src/test/resources/horizon/no-filters.json b/controller-server/src/test/resources/horizon/no-filters.json
new file mode 100644
index 00000000000..3ff80feba02
--- /dev/null
+++ b/controller-server/src/test/resources/horizon/no-filters.json
@@ -0,0 +1,16 @@
+{
+ "start": 1619301600000,
+ "end": 1623161217471,
+ "executionGraph": [
+ {
+ "id": "q1_m1",
+ "type": "TimeSeriesDataSource",
+ "metric": {
+ "type": "MetricLiteral",
+ "metric": "Vespa.vespa.distributor.vds.distributor.docsstored.average"
+ },
+ "sourceId": null,
+ "fetchLast": false
+ }
+ ]
+} \ No newline at end of file
diff --git a/default_build_settings.cmake b/default_build_settings.cmake
index 326481e25c7..a61410ebf31 100644
--- a/default_build_settings.cmake
+++ b/default_build_settings.cmake
@@ -31,11 +31,7 @@ endfunction()
function(setup_vespa_default_build_settings_centos_8)
message("-- Setting up default build settings for centos 8")
set(DEFAULT_EXTRA_INCLUDE_DIRECTORY "${VESPA_DEPS}/include" "/usr/include/openblas" PARENT_SCOPE)
- if (VESPA_OS_DISTRO_NAME STREQUAL "CentOS Stream")
- set(DEFAULT_VESPA_LLVM_VERSION "11" PARENT_SCOPE)
- else()
- set(DEFAULT_VESPA_LLVM_VERSION "10" PARENT_SCOPE)
- endif()
+ set(DEFAULT_VESPA_LLVM_VERSION "11" PARENT_SCOPE)
endfunction()
function(setup_vespa_default_build_settings_darwin)
@@ -302,8 +298,8 @@ function(vespa_use_default_cxx_compiler)
unset(DEFAULT_CMAKE_CXX_COMPILER)
if(NOT DEFINED VESPA_COMPILER_VARIANT OR VESPA_COMPILER_VARIANT STREQUAL "gcc")
if(APPLE)
- set(DEFAULT_CMAKE_C_COMPILER "/usr/local/bin/gcc-10")
- set(DEFAULT_CMAKE_CXX_COMPILER "/usr/local/bin/g++-10")
+ set(DEFAULT_CMAKE_C_COMPILER "/usr/local/bin/gcc-11")
+ set(DEFAULT_CMAKE_CXX_COMPILER "/usr/local/bin/g++-11")
elseif(VESPA_OS_DISTRO_COMBINED STREQUAL "amzn 2")
set(DEFAULT_CMAKE_C_COMPILER "/usr/bin/gcc10-gcc")
set(DEFAULT_CMAKE_CXX_COMPILER "/usr/bin/gcc10-g++")
diff --git a/dist/vespa.spec b/dist/vespa.spec
index 34afc46c80c..13ea9a733e1 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -16,6 +16,7 @@
%define _create_vespa_user 1
%define _create_vespa_service 1
%define _defattr_is_vespa_vespa 0
+%define _command_cmake cmake3
Name: vespa
Version: _VESPA_VERSION_
@@ -42,11 +43,11 @@ BuildRequires: maven
%define _java_home /usr/lib/jvm/java-11-amazon-corretto.%{?_arch}
BuildRequires: python3-pytest
%else
-BuildRequires: devtoolset-9-gcc-c++
-BuildRequires: devtoolset-9-libatomic-devel
-BuildRequires: devtoolset-9-binutils
+BuildRequires: devtoolset-10-gcc-c++
+BuildRequires: devtoolset-10-libatomic-devel
+BuildRequires: devtoolset-10-binutils
BuildRequires: rh-maven35
-%define _devtoolset_enable /opt/rh/devtoolset-9/enable
+%define _devtoolset_enable /opt/rh/devtoolset-10/enable
%define _rhmaven35_enable /opt/rh/rh-maven35/enable
BuildRequires: python36-pytest
%endif
@@ -54,19 +55,9 @@ BuildRequires: vespa-pybind11-devel
BuildRequires: python3-devel
%endif
%if 0%{?el8}
-%if 0%{?centos}
-%global _centos_stream %(grep -qs '^NAME="CentOS Stream"' /etc/os-release && echo 1 || echo 0)
-%endif
-%if 0%{?_centos_stream}
BuildRequires: gcc-toolset-10-gcc-c++
BuildRequires: gcc-toolset-10-binutils
%define _devtoolset_enable /opt/rh/gcc-toolset-10/enable
-BuildRequires: vespa-boost-devel >= 1.75.0-1
-%else
-BuildRequires: gcc-toolset-9-gcc-c++
-BuildRequires: gcc-toolset-9-binutils
-%define _devtoolset_enable /opt/rh/gcc-toolset-9/enable
-%endif
BuildRequires: maven
BuildRequires: pybind11-devel
BuildRequires: python3-pytest
@@ -82,7 +73,7 @@ BuildRequires: python3-devel
%if 0%{?el7}
BuildRequires: cmake3
BuildRequires: llvm7.0-devel
-BuildRequires: vespa-boost-devel >= 1.59.0-6
+BuildRequires: vespa-boost-devel >= 1.76.0-1
BuildRequires: vespa-gtest >= 1.8.1-1
BuildRequires: vespa-icu-devel >= 65.1.0-1
BuildRequires: vespa-lz4-devel >= 1.9.2-2
@@ -101,12 +92,15 @@ BuildRequires: vespa-libzstd-devel >= 1.4.5-2
%endif
%if 0%{?el8}
BuildRequires: cmake >= 3.11.4-3
-%if 0%{?_centos_stream}
-BuildRequires: llvm-devel >= 11.0.0
+%if 0%{?centos}
+# Current cmake on CentOS 8 is broken and manually requires libarchive install
+BuildRequires: libarchive
+%define _command_cmake cmake
+BuildRequires: (llvm-devel >= 11.0.0 and llvm-devel < 12)
%else
-BuildRequires: llvm-devel >= 10.0.1
+BuildRequires: (llvm-devel >= 10.0.1 and llvm-devel < 11)
%endif
-BuildRequires: boost-devel >= 1.66
+BuildRequires: vespa-boost-devel >= 1.76.0-1
BuildRequires: openssl-devel
BuildRequires: vespa-gtest >= 1.8.1-1
BuildRequires: vespa-lz4-devel >= 1.9.2-2
@@ -152,7 +146,7 @@ BuildRequires: gmock-devel
%endif
%if 0%{?el7} && 0%{?amzn2}
BuildRequires: vespa-xxhash-devel = 0.8.0
-BuildRequires: vespa-openblas-devel = 0.3.12
+BuildRequires: vespa-openblas-devel = 0.3.15
BuildRequires: vespa-re2-devel = 20190801
%else
BuildRequires: xxhash-devel >= 0.8.0
@@ -225,7 +219,7 @@ Requires: vespa-valgrind >= 3.17.0-1
%endif
%endif
%if 0%{?el8}
-%if 0%{?_centos_stream}
+%if 0%{?centos}
%define _vespa_llvm_version 11
%else
%define _vespa_llvm_version 10
@@ -315,7 +309,7 @@ Requires: vespa-libzstd >= 1.4.5-2
Requires: openblas
%else
%if 0%{?amzn2}
-Requires: vespa-openblas
+Requires: vespa-openblas = 0.3.15
%else
Requires: openblas-serial
%endif
@@ -353,10 +347,10 @@ Requires: libicu
Requires: openssl-libs
%endif
%if 0%{?el8}
-%if 0%{?_centos_stream}
-Requires: llvm-libs >= 11.0.0
+%if 0%{?centos}
+Requires: (llvm-libs >= 11.0.0 and llvm-libs < 12)
%else
-Requires: llvm-libs >= 10.0.1
+Requires: (llvm-libs >= 10.0.1 and llvm-libs < 11)
%endif
Requires: vespa-protobuf = 3.7.0-5.el8
%endif
@@ -488,7 +482,7 @@ mvn --batch-mode -e -N io.takari:maven:wrapper -Dmaven=3.6.3
%endif
%{?_use_mvn_wrapper:env VESPA_MAVEN_COMMAND=$(pwd)/mvnw }sh bootstrap.sh java
%{?_use_mvn_wrapper:./mvnw}%{!?_use_mvn_wrapper:mvn} --batch-mode -nsu -T 1C install -Dmaven.test.skip=true -Dmaven.javadoc.skip=true
-cmake3 -DCMAKE_INSTALL_PREFIX=%{_prefix} \
+%{_command_cmake} -DCMAKE_INSTALL_PREFIX=%{_prefix} \
-DJAVA_HOME=$JAVA_HOME \
-DCMAKE_PREFIX_PATH=%{_vespa_deps_prefix} \
-DEXTRA_LINK_DIRECTORY="%{_extra_link_directory}" \
@@ -731,7 +725,7 @@ fi
%{_prefix}/bin/vespa-feed-client
%{_prefix}/conf/vespa-feed-client/logging.properties
%{_prefix}/lib/jars/vespa-http-client-jar-with-dependencies.jar
-%{_prefix}/lib/jars/vespa-feed-client-cli.jar
+%{_prefix}/lib/jars/vespa-feed-client-cli-jar-with-dependencies.jar
%files config-model-fat
%if %{_defattr_is_vespa_vespa}
@@ -775,6 +769,7 @@ fi
%{_prefix}/lib/jars/config-model-api-jar-with-dependencies.jar
%{_prefix}/lib/jars/config-model-jar-with-dependencies.jar
%{_prefix}/lib/jars/config-provisioning-jar-with-dependencies.jar
+%{_prefix}/lib/jars/container-apache-http-client-bundle-jar-with-dependencies.jar
%{_prefix}/lib/jars/container-disc-jar-with-dependencies.jar
%{_prefix}/lib/jars/container-jersey2-jar-with-dependencies.jar
%{_prefix}/lib/jars/container-search-and-docproc-jar-with-dependencies.jar
diff --git a/document/src/main/java/com/yahoo/document/StructDataType.java b/document/src/main/java/com/yahoo/document/StructDataType.java
index 73fe580308e..8a153856eff 100644
--- a/document/src/main/java/com/yahoo/document/StructDataType.java
+++ b/document/src/main/java/com/yahoo/document/StructDataType.java
@@ -22,7 +22,7 @@ public class StructDataType extends BaseStructDataType {
super(name);
}
- public StructDataType(int id,String name) {
+ public StructDataType(int id, String name) {
super(id, name);
}
diff --git a/document/src/main/java/com/yahoo/document/StructuredDataType.java b/document/src/main/java/com/yahoo/document/StructuredDataType.java
index e4bb94a5465..8a5f344e79e 100644
--- a/document/src/main/java/com/yahoo/document/StructuredDataType.java
+++ b/document/src/main/java/com/yahoo/document/StructuredDataType.java
@@ -10,8 +10,6 @@ import java.util.Collection;
import java.util.List;
/**
- * TODO: What is this and why
- *
* @author HÃ¥kon Humberset
*/
public abstract class StructuredDataType extends DataType {
diff --git a/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java b/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java
index 0449612da6f..f4139a597d2 100644
--- a/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java
+++ b/document/src/main/java/com/yahoo/document/TemporaryStructuredDataType.java
@@ -2,7 +2,8 @@
package com.yahoo.document;
/**
- * Internal class, DO NOT USE!!&nbsp;Only public because it must be used from com.yahoo.searchdefinition.parser.
+ * Internal class, DO NOT USE!!
+ * Only public because it must be used from com.yahoo.searchdefinition.parser.
*
* @author Einar M R Rosenvinge
*/
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ExternPolicy.java b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ExternPolicy.java
index 94dfabb2c4f..26b7cb71f2d 100755
--- a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ExternPolicy.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/ExternPolicy.java
@@ -59,7 +59,7 @@ public class ExternPolicy implements DocumentProtocolRoutingPolicy {
pattern = args[1];
session = pattern.substring(pos);
orb = new Supervisor(new Transport("externpolicy"));
- orb.useSmallBuffers();
+ orb.setDropEmptyBuffers(true);
mirror = new Mirror(orb, slobroks);
error = null;
}
diff --git a/documentgen-test/src/test/java/com/yahoo/vespa/config/DocumentGenPluginTest.java b/documentgen-test/src/test/java/com/yahoo/vespa/config/DocumentGenPluginTest.java
index b3ae9d2bd0c..e4cddf0a606 100644
--- a/documentgen-test/src/test/java/com/yahoo/vespa/config/DocumentGenPluginTest.java
+++ b/documentgen-test/src/test/java/com/yahoo/vespa/config/DocumentGenPluginTest.java
@@ -61,7 +61,6 @@ import com.yahoo.vespa.documentgen.test.annotation.Person;
import org.junit.Ignore;
import org.junit.Test;
-import java.lang.Class;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.nio.ByteBuffer;
@@ -151,18 +150,18 @@ public class DocumentGenPluginTest {
assertEquals(music.getFieldValue(artist), bingoAstroburger);
assertEquals(music.getFieldValue("artist"), bingoAstroburger);
- assertEquals(music.getFieldValue(new Field("nonexisting")), null);
- assertEquals(music.getFieldValue("nono"), null);
- assertEquals(music.getField("nope"), null);
- assertEquals(music.getFieldValue(new Field("nada")), null);
- assertEquals(music.getFieldValue("zilch"), null);
- assertEquals(music.getFieldValue("zero"), null);
-
- assertEquals(music.removeFieldValue("nothere"), null);
- assertEquals(music.removeFieldValue(new Field("nothereno")), null);
- assertEquals(music.removeFieldValue(new Field("invalid")), null);
- assertEquals(music.removeFieldValue("goner"), null);
- assertEquals(music.removeFieldValue("absent"), null);
+ assertNull(music.getFieldValue(new Field("nonexisting")));
+ assertNull(music.getFieldValue("nono"));
+ assertNull(music.getField("nope"));
+ assertNull(music.getFieldValue(new Field("nada")));
+ assertNull(music.getFieldValue("zilch"));
+ assertNull(music.getFieldValue("zero"));
+
+ assertNull(music.removeFieldValue("nothere"));
+ assertNull(music.removeFieldValue(new Field("nothereno")));
+ assertNull(music.removeFieldValue(new Field("invalid")));
+ assertNull(music.removeFieldValue("goner"));
+ assertNull(music.removeFieldValue("absent"));
}
@Test
@@ -193,12 +192,12 @@ public class DocumentGenPluginTest {
Book book = getBook();
book.setAuthor(null);
Field a = new Field("author", DataType.STRING);
- assertEquals(book.getFieldValue("author"), null);
- assertEquals(book.getFieldValue(a), null);
+ assertNull(book.getFieldValue("author"));
+ assertNull(book.getFieldValue(a));
assertEquals(book.getField("author"), a);
- assertEquals(book.getFieldValue(a), null);
- assertEquals(book.getFieldValue("author"), null);
- assertEquals(book.getFieldValue("author"), null);
+ assertNull(book.getFieldValue(a));
+ assertNull(book.getFieldValue("author"));
+ assertNull(book.getFieldValue("author"));
book.removeFieldValue("isbn");
book.removeFieldValue(new Field("year", DataType.INT));
@@ -207,26 +206,26 @@ public class DocumentGenPluginTest {
assertEquals(old.get(0), new IntegerFieldValue(10));
book.removeFieldValue("stringmap");
book.removeFieldValue("mywsfloat");
- assertEquals(book.getIsbn(), null);
- assertEquals(book.getYear(), null);
- assertEquals(book.getDescription(), null);
- assertEquals(book.getStringmap(), null);
- assertEquals(book.getMyarrayint(), null);
- assertEquals(book.getMywsfloat(), null);
+ assertNull(book.getIsbn());
+ assertNull(book.getYear());
+ assertNull(book.getDescription());
+ assertNull(book.getStringmap());
+ assertNull(book.getMyarrayint());
+ assertNull(book.getMywsfloat());
Music music = getMusicBasic();
Field artist = music.getField("artist");
Field year = music.getField("year");
music.removeFieldValue(artist);
- assertEquals(music.getArtist(), null);
+ assertNull(music.getArtist());
music.removeFieldValue("disp_song");
- assertEquals(music.getDisp_song(), null);
+ assertNull(music.getDisp_song());
music.removeFieldValue(year);
- assertEquals(music.getYear(), null);
+ assertNull(music.getYear());
music.removeFieldValue("uri");
- assertEquals(music.getUri(), null);
+ assertNull(music.getUri());
music.removeFieldValue("weight_src");
- assertEquals(music.getWeight_src(), null);
+ assertNull(music.getWeight_src());
}
@Test
@@ -393,12 +392,12 @@ public class DocumentGenPluginTest {
Person p2 = new Person();
p2.setName("H. Melville");
descTree.annotate(p2);
- book.setDescriptionSpanTrees(new HashMap<String, SpanTree>(){{ put(descTree.getName(), descTree); }});
+ book.setDescriptionSpanTrees(new HashMap<>(){{ put(descTree.getName(), descTree); }});
assertEquals(((Person) ((StringFieldValue) book.getFieldValue(book.getField("description"))).getSpanTrees().iterator().next().iterator().next()).getName(),
"H. Melville");
assertEquals(((Person) ((StringFieldValue) book.removeFieldValue("description")).getSpanTrees().iterator().next().iterator().next()).getName(), "H. Melville");
- assertEquals(book.descriptionSpanTrees(), null);
- assertEquals((book.getFieldValue("description")), null);
+ assertNull(book.descriptionSpanTrees());
+ assertNull((book.getFieldValue("description")));
Artist a = new Artist();
assertTrue(Person.class.isInstance(a));
assertEquals(((StructDataType) a.getType().getDataType()).getField("name").getDataType(), DataType.STRING);
@@ -553,13 +552,13 @@ public class DocumentGenPluginTest {
private Book newBookConcrete(int i) {
Book book = new Book(new DocumentId("id:book:book::"+i));
book.setAuthor("Melville");
- Date date = new Date().setExacttime(99l);
- book.setTitleSpanTrees(new HashMap<String, SpanTree>());
+ Date date = new Date().setExacttime(99L);
+ book.setTitleSpanTrees(new HashMap<>());
SpanTree t = new SpanTree().annotate(date);
book.titleSpanTrees().put(t.getName(), t);
book.setTitle("Moby Dick");
book.setYear(1851);
- book.setMystruct(new Ss1().setSs01(new Ss0().setS0("My s0").setD0(99d)).setS1("My s1").setL1(89l));//.setAl1(myAs1));
+ book.setMystruct(new Ss1().setSs01(new Ss0().setS0("My s0").setD0(99d)).setS1("My s1").setL1(89L));//.setAl1(myAs1));
Map<Float, Integer> wsFloat = new HashMap<>();
wsFloat.put(56f, 55);
wsFloat.put(57f, 54);
@@ -587,7 +586,7 @@ public class DocumentGenPluginTest {
AnnotationType dateType = mgr.getAnnotationTypeRegistry().getType("date");
Struct dateStruct = new Struct(mgr.getAnnotationTypeRegistry().getType("date").getDataType());
- dateStruct.setFieldValue("exacttime", new LongFieldValue(99l));
+ dateStruct.setFieldValue("exacttime", new LongFieldValue(99L));
Annotation date = new Annotation(dateType);
date.setFieldValue(dateStruct);
titleTree.annotate(date);
@@ -637,7 +636,7 @@ public class DocumentGenPluginTest {
AnnotationType dateType = mgr.getAnnotationTypeRegistry().getType("date");
Struct dateStruct = new Struct(mgr.getAnnotationTypeRegistry().getType("date").getDataType());
- dateStruct.setFieldValue("exacttime", new LongFieldValue(99l));
+ dateStruct.setFieldValue("exacttime", new LongFieldValue(99L));
Annotation date = new Annotation(dateType);
date.setFieldValue(dateStruct);
titleTree.annotate(date);
@@ -647,7 +646,7 @@ public class DocumentGenPluginTest {
assertEquals(titleCheck.getWrappedValue(), "Moby Dick");
SpanTree treeCheck = titleCheck.getSpanTrees().iterator().next();
Annotation titleAnnCheck = treeCheck.iterator().next();
- assertEquals(((StructuredFieldValue) titleAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 99l);
+ assertEquals(((StructuredFieldValue) titleAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 99L);
bookGeneric.setFieldValue("year", new IntegerFieldValue(1851));
Struct myS0 = new Struct(mgr.getDataType("ss0"));
@@ -689,7 +688,7 @@ public class DocumentGenPluginTest {
assertEquals(book.getMystruct().getAs1().get(1), "as1_2");
treeCheck = book.titleSpanTrees().values().iterator().next();
titleAnnCheck = treeCheck.iterator().next();
- assertEquals(((StructuredFieldValue) titleAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 99l);
+ assertEquals(((StructuredFieldValue) titleAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 99L);
Book book2 = new Book(book, book.getId());
assertEquals(book2.getId(), bookGeneric.getId());
@@ -704,7 +703,7 @@ public class DocumentGenPluginTest {
assertEquals(book2.getMystruct().getAs1().get(1), "as1_2");
treeCheck = book2.titleSpanTrees().values().iterator().next();
titleAnnCheck = treeCheck.iterator().next();
- assertEquals(((StructuredFieldValue) titleAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 99l);
+ assertEquals(((StructuredFieldValue) titleAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 99L);
}
@Test
@@ -712,13 +711,13 @@ public class DocumentGenPluginTest {
Book b = (Book) ConcreteDocumentFactory.getDocument("book", new DocumentId("id:book:book::10"));
b.setAuthor("Per Ulv");
final Date d = (Date) ConcreteDocumentFactory.getAnnotation("date");
- d.setExacttime(79l);
- b.setAuthorSpanTrees(new HashMap<String, SpanTree>() {{ put("root", new SpanTree("root").annotate(d)); }});
+ d.setExacttime(79L);
+ b.setAuthorSpanTrees(new HashMap<>() {{ put("root", new SpanTree("root").annotate(d)); }});
StringFieldValue authorCheck=(StringFieldValue) b.getFieldValue("author");
assertEquals(authorCheck.getWrappedValue(), "Per Ulv");
SpanTree treeCheck = authorCheck.getSpanTrees().iterator().next();
Annotation authorAnnCheck = treeCheck.iterator().next();
- assertEquals(((Struct) authorAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 79l);
+ assertEquals(((Struct) authorAnnCheck.getFieldValue()).getFieldValue("exacttime").getWrappedValue(), 79L);
b.setMystruct(((Ss1) ConcreteDocumentFactory.getStruct("ss1")).setS1("Test s1!"));
assertEquals(((Struct) b.getFieldValue("mystruct")).getFieldValue("s1").getWrappedValue(), "Test s1!");
@@ -761,7 +760,7 @@ public class DocumentGenPluginTest {
}
private String className(String s) {
- return s.substring(0, 1).toUpperCase()+s.substring(1, s.length());
+ return s.substring(0, 1).toUpperCase()+s.substring(1);
}
private Music getMusicBasic() {
@@ -799,8 +798,8 @@ public class DocumentGenPluginTest {
myArrInt.add(30);
book.setMyarrayint(myArrInt);
- List<Integer> intL = new ArrayList<Integer>(){{add(1);add(2);add(3);}};
- List<Integer> intL2 = new ArrayList<Integer>(){{add(9);add(10);add(11);}};
+ List<Integer> intL = new ArrayList<>(){{add(1);add(2);add(3);}};
+ List<Integer> intL2 = new ArrayList<>(){{add(9);add(10);add(11);}};
List<List<Integer>> doubleIntL = new ArrayList<>();
doubleIntL.add(intL);
doubleIntL.add(intL2);
@@ -861,7 +860,6 @@ public class DocumentGenPluginTest {
}
@Test
- @SuppressWarnings("deprecation")
public void testSerialization() {
final Book book = getBook();
assertEquals(book.getMystruct().getD1(), (Double)56.777);
diff --git a/eval/CMakeLists.txt b/eval/CMakeLists.txt
index 302b6768cea..16c9c72d8a5 100644
--- a/eval/CMakeLists.txt
+++ b/eval/CMakeLists.txt
@@ -5,6 +5,7 @@ vespa_define_module(
staging_vespalib
APPS
+ src/apps/analyze_onnx_model
src/apps/eval_expr
src/apps/make_tensor_binary_format_test_spec
src/apps/tensor_conformance
diff --git a/eval/src/apps/analyze_onnx_model/.gitignore b/eval/src/apps/analyze_onnx_model/.gitignore
new file mode 100644
index 00000000000..12ce20b03ba
--- /dev/null
+++ b/eval/src/apps/analyze_onnx_model/.gitignore
@@ -0,0 +1 @@
+/vespa-analyze-onnx-model
diff --git a/eval/src/apps/analyze_onnx_model/CMakeLists.txt b/eval/src/apps/analyze_onnx_model/CMakeLists.txt
new file mode 100644
index 00000000000..47cbb6504f4
--- /dev/null
+++ b/eval/src/apps/analyze_onnx_model/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(vespa-analyze-onnx-model
+ SOURCES
+ analyze_onnx_model.cpp
+ INSTALL bin
+ DEPENDS
+ vespaeval
+)
diff --git a/eval/src/apps/analyze_onnx_model/analyze_onnx_model.cpp b/eval/src/apps/analyze_onnx_model/analyze_onnx_model.cpp
new file mode 100644
index 00000000000..3f56610dcaa
--- /dev/null
+++ b/eval/src/apps/analyze_onnx_model/analyze_onnx_model.cpp
@@ -0,0 +1,208 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/onnx/onnx_wrapper.h>
+#include <vespa/eval/eval/tensor_spec.h>
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/eval/eval/fast_value.h>
+#include <vespa/vespalib/util/benchmark_timer.h>
+#include <vespa/vespalib/util/require.h>
+#include <vespa/vespalib/util/guard.h>
+#include <vespa/vespalib/util/stringfmt.h>
+
+using vespalib::make_string_short::fmt;
+
+using vespalib::FilePointer;
+using namespace vespalib::eval;
+
+bool read_line(FilePointer &file, vespalib::string &line) {
+ char line_buffer[1024];
+ char *res = fgets(line_buffer, sizeof(line_buffer), file.fp());
+ if (res == nullptr) {
+ line.clear();
+ return false;
+ }
+ line = line_buffer;
+ while (!line.empty() && isspace(line[line.size() - 1])) {
+ line.pop_back();
+ }
+ return true;
+}
+
+void extract(const vespalib::string &str, const vespalib::string &prefix, vespalib::string &dst) {
+ if (starts_with(str, prefix)) {
+ size_t pos = prefix.size();
+ while ((str.size() > pos) && isspace(str[pos])) {
+ ++pos;
+ }
+ dst = str.substr(pos);
+ }
+}
+
+void report_memory_usage(const vespalib::string &desc) {
+ vespalib::string vm_size = "unknown";
+ vespalib::string vm_rss = "unknown";
+ vespalib::string line;
+ FilePointer file(fopen("/proc/self/status", "r"));
+ while (read_line(file, line)) {
+ extract(line, "VmSize:", vm_size);
+ extract(line, "VmRSS:", vm_rss);
+ }
+ fprintf(stderr, "vm_size: %s, vm_rss: %s (%s)\n", vm_size.c_str(), vm_rss.c_str(), desc.c_str());
+}
+
+struct Options {
+ size_t pos = 0;
+ std::vector<vespalib::string> opt_list;
+ void add_option(const vespalib::string &opt) {
+ opt_list.push_back(opt);
+ }
+ vespalib::string get_option(const vespalib::string &desc, const vespalib::string &fallback) {
+ vespalib::string opt;
+ if (pos < opt_list.size()) {
+ opt = opt_list[pos];
+ fprintf(stderr, "option[%zu](%s): %s\n",
+ pos, desc.c_str(), opt.c_str());
+ } else {
+ opt = fallback;
+ fprintf(stderr, "unspecified option[%zu](%s), fallback: %s\n",
+ pos, desc.c_str(), fallback.c_str());
+ }
+ ++pos;
+ return opt;
+ }
+ bool get_bool_opt(const vespalib::string &desc, const vespalib::string &fallback) {
+ auto opt = get_option(desc, fallback);
+ REQUIRE((opt == "true") || (opt == "false"));
+ return (opt == "true");
+ }
+ size_t get_size_opt(const vespalib::string &desc, const vespalib::string &fallback) {
+ auto opt = get_option(desc, fallback);
+ size_t value = atoi(opt.c_str());
+ REQUIRE(value > 0);
+ return value;
+ }
+};
+
+void dump_model_info(const Onnx &model) {
+ fprintf(stderr, "model meta-data:\n");
+ for (size_t i = 0; i < model.inputs().size(); ++i) {
+ fprintf(stderr, " input[%zu]: '%s' %s\n", i, model.inputs()[i].name.c_str(), model.inputs()[i].type_as_string().c_str());
+ }
+ for (size_t i = 0; i < model.outputs().size(); ++i) {
+ fprintf(stderr, " output[%zu]: '%s' %s\n", i, model.outputs()[i].name.c_str(), model.outputs()[i].type_as_string().c_str());
+ }
+}
+
+void dump_wire_info(const Onnx::WireInfo &wire) {
+ fprintf(stderr, "test setup:\n");
+ REQUIRE_EQ(wire.vespa_inputs.size(), wire.onnx_inputs.size());
+ for (size_t i = 0; i < wire.vespa_inputs.size(); ++i) {
+ fprintf(stderr, " input[%zu]: %s -> %s\n", i, wire.vespa_inputs[i].to_spec().c_str(), wire.onnx_inputs[i].type_as_string().c_str());
+ }
+ REQUIRE_EQ(wire.onnx_outputs.size(), wire.vespa_outputs.size());
+ for (size_t i = 0; i < wire.onnx_outputs.size(); ++i) {
+ fprintf(stderr, " output[%zu]: %s -> %s\n", i, wire.onnx_outputs[i].type_as_string().c_str(), wire.vespa_outputs[i].to_spec().c_str());
+ }
+}
+
+struct MakeInputType {
+ Options &opts;
+ std::map<vespalib::string,int> symbolic_sizes;
+ MakeInputType(Options &opts_in) : opts(opts_in), symbolic_sizes() {}
+ ValueType operator()(const Onnx::TensorInfo &info) {
+ int d = 0;
+ std::vector<ValueType::Dimension> dim_list;
+ for (const auto &dim: info.dimensions) {
+ REQUIRE(d <= 9);
+ size_t size = 0;
+ if (dim.is_known()) {
+ size = dim.value;
+ } else if (dim.is_symbolic()) {
+ size = symbolic_sizes[dim.name];
+ if (size == 0) {
+ size = opts.get_size_opt(fmt("symbolic size '%s'", dim.name.c_str()), "1");
+ symbolic_sizes[dim.name] = size;
+ }
+ } else {
+ size = opts.get_size_opt(fmt("size of input '%s' dimension %d", info.name.c_str(), d), "1");
+ }
+ dim_list.emplace_back(fmt("d%d", d), size);
+ ++d;
+ }
+ return ValueType::make_type(Onnx::WirePlanner::best_cell_type(info.elements), std::move(dim_list));
+ }
+};
+
+Onnx::WireInfo make_plan(Options &opts, const Onnx &model) {
+ Onnx::WirePlanner planner;
+ MakeInputType make_input_type(opts);
+ for (const auto &input: model.inputs()) {
+ auto type = make_input_type(input);
+ REQUIRE(planner.bind_input_type(type, input));
+ }
+ for (const auto &output: model.outputs()) {
+ REQUIRE(!planner.make_output_type(output).is_error());
+ }
+ return planner.get_wire_info(model);
+}
+
+struct MyEval {
+ Onnx::EvalContext context;
+ std::vector<Value::UP> inputs;
+ MyEval(const Onnx &model, const Onnx::WireInfo &wire) : context(model, wire), inputs() {
+ for (const auto &input_type: wire.vespa_inputs) {
+ TensorSpec spec(input_type.to_spec());
+ inputs.push_back(value_from_spec(spec, FastValueBuilderFactory::get()));
+ }
+ }
+ void eval() {
+ for (size_t i = 0; i < inputs.size(); ++i) {
+ context.bind_param(i, *inputs[i]);
+ }
+ context.eval();
+ }
+};
+
+int usage(const char *self) {
+ fprintf(stderr, "usage: %s <onnx-model> [options...]\n", self);
+ fprintf(stderr, " load onnx model and report memory usage\n");
+ fprintf(stderr, " options are used to specify unknown values, like dimension sizes\n");
+ fprintf(stderr, " options are accepted in the order in which they are needed\n");
+ fprintf(stderr, " tip: run without options first, to see which you need\n");
+ return 1;
+}
+
+int main(int argc, char **argv) {
+ if (argc < 2) {
+ return usage(argv[0]);
+ }
+ Options opts;
+ for (int i = 2; i < argc; ++i) {
+ opts.add_option(argv[i]);
+ }
+ Onnx::Optimize optimize = opts.get_bool_opt("optimize model", "true")
+ ? Onnx::Optimize::ENABLE : Onnx::Optimize::DISABLE;
+ report_memory_usage("before loading model");
+ Onnx model(argv[1], optimize);
+ report_memory_usage("after loading model");
+ dump_model_info(model);
+ auto wire_info = make_plan(opts, model);
+ dump_wire_info(wire_info);
+ std::vector<std::unique_ptr<MyEval>> eval_list;
+ size_t max_concurrent = opts.get_size_opt("max concurrent evaluations", "1");
+ report_memory_usage("no evaluations yet");
+ for (size_t i = 1; i <= max_concurrent; ++i) {
+ eval_list.push_back(std::make_unique<MyEval>(model, wire_info));
+ eval_list.back()->eval();
+ if ((i % 8) == 0) {
+ report_memory_usage(fmt("concurrent evaluations: %zu", i));
+ }
+ }
+ if ((max_concurrent % 8) != 0) {
+ report_memory_usage(fmt("concurrent evaluations: %zu", max_concurrent));
+ }
+ eval_list.resize(1);
+ double min_time_s = vespalib::BenchmarkTimer::benchmark([&e = *eval_list.back()](){ e.eval(); }, 10.0);
+ fprintf(stderr, "estimated model evaluation time: %g ms\n", min_time_s * 1000.0);
+ return 0;
+}
diff --git a/eval/src/apps/tensor_conformance/generate.cpp b/eval/src/apps/tensor_conformance/generate.cpp
index 9ae33c1234f..8a596ad38d4 100644
--- a/eval/src/apps/tensor_conformance/generate.cpp
+++ b/eval/src/apps/tensor_conformance/generate.cpp
@@ -232,11 +232,24 @@ void generate_join_expr(const vespalib::string &expr, const Sequence &seq, TestB
}
}
+void generate_join_expr(const vespalib::string &expr, const Sequence &seq_a, const Sequence &seq_b, TestBuilder &dst) {
+ for (const auto &layouts: join_layouts) {
+ GenSpec a = GenSpec::from_desc(layouts.first).seq(seq_a);
+ GenSpec b = GenSpec::from_desc(layouts.second).seq(seq_b);
+ generate(expr, a, b, dst);
+ }
+}
+
void generate_op2_join(const vespalib::string &op2_expr, const Sequence &seq, TestBuilder &dst) {
generate_join_expr(op2_expr, seq, dst);
generate_join_expr(fmt("join(a,b,f(a,b)(%s))", op2_expr.c_str()), seq, dst);
}
+void generate_op2_join(const vespalib::string &op2_expr, const Sequence &seq_a, const Sequence &seq_b, TestBuilder &dst) {
+ generate_join_expr(op2_expr, seq_a, seq_b, dst);
+ generate_join_expr(fmt("join(a,b,f(a,b)(%s))", op2_expr.c_str()), seq_a, seq_b, dst);
+}
+
void generate_join(TestBuilder &dst) {
generate_op2_join("a+b", Div16(N()), dst);
generate_op2_join("a-b", Div16(N()), dst);
@@ -259,6 +272,7 @@ void generate_join(TestBuilder &dst) {
generate_op2_join("fmod(a,b)", Div16(N()), dst);
generate_op2_join("min(a,b)", Div16(N()), dst);
generate_op2_join("max(a,b)", Div16(N()), dst);
+ generate_op2_join("bit(a,b)", Seq({-128, -43, -1, 0, 85, 127}), Seq({0, 1, 2, 3, 4, 5, 6, 7}), dst);
// inverted lambda
generate_join_expr("join(a,b,f(a,b)(b-a))", Div16(N()), dst);
// custom lambda
@@ -276,11 +290,24 @@ void generate_merge_expr(const vespalib::string &expr, const Sequence &seq, Test
}
}
+void generate_merge_expr(const vespalib::string &expr, const Sequence &seq_a, const Sequence &seq_b, TestBuilder &dst) {
+ for (const auto &layouts: merge_layouts) {
+ GenSpec a = GenSpec::from_desc(layouts.first).seq(seq_a);
+ GenSpec b = GenSpec::from_desc(layouts.second).seq(seq_b);
+ generate(expr, a, b, dst);
+ }
+}
+
void generate_op2_merge(const vespalib::string &op2_expr, const Sequence &seq, TestBuilder &dst) {
generate_merge_expr(op2_expr, seq, dst);
generate_merge_expr(fmt("merge(a,b,f(a,b)(%s))", op2_expr.c_str()), seq, dst);
}
+void generate_op2_merge(const vespalib::string &op2_expr, const Sequence &seq_a, const Sequence &seq_b, TestBuilder &dst) {
+ generate_merge_expr(op2_expr, seq_a, seq_b, dst);
+ generate_merge_expr(fmt("merge(a,b,f(a,b)(%s))", op2_expr.c_str()), seq_a, seq_b, dst);
+}
+
void generate_merge(TestBuilder &dst) {
generate_op2_merge("a+b", Div16(N()), dst);
generate_op2_merge("a-b", Div16(N()), dst);
@@ -303,6 +330,7 @@ void generate_merge(TestBuilder &dst) {
generate_op2_merge("fmod(a,b)", Div16(N()), dst);
generate_op2_merge("min(a,b)", Div16(N()), dst);
generate_op2_merge("max(a,b)", Div16(N()), dst);
+ generate_op2_merge("bit(a,b)", Seq({-128, -43, -1, 0, 85, 127}), Seq({0, 1, 2, 3, 4, 5, 6, 7}), dst);
// inverted lambda
generate_merge_expr("merge(a,b,f(a,b)(b-a))", Div16(N()), dst);
// custom lambda
diff --git a/eval/src/tests/eval/inline_operation/inline_operation_test.cpp b/eval/src/tests/eval/inline_operation/inline_operation_test.cpp
index de5a3fbf395..ae5f503b680 100644
--- a/eval/src/tests/eval/inline_operation/inline_operation_test.cpp
+++ b/eval/src/tests/eval/inline_operation/inline_operation_test.cpp
@@ -115,6 +115,7 @@ TEST(InlineOperationTest, op2_lambdas_are_recognized) {
EXPECT_EQ(as_op2("fmod(a,b)"), &Mod::f);
EXPECT_EQ(as_op2("min(a,b)"), &Min::f);
EXPECT_EQ(as_op2("max(a,b)"), &Max::f);
+ EXPECT_EQ(as_op2("bit(a,b)"), &Bit::f);
}
TEST(InlineOperationTest, op2_lambdas_are_recognized_with_different_parameter_names) {
diff --git a/eval/src/tests/eval/node_tools/node_tools_test.cpp b/eval/src/tests/eval/node_tools/node_tools_test.cpp
index 13185065f57..e8296c01d73 100644
--- a/eval/src/tests/eval/node_tools/node_tools_test.cpp
+++ b/eval/src/tests/eval/node_tools/node_tools_test.cpp
@@ -100,6 +100,7 @@ TEST("require that call node types can be copied") {
TEST_DO(verify_copy("sigmoid(a)"));
TEST_DO(verify_copy("elu(a)"));
TEST_DO(verify_copy("erf(a)"));
+ TEST_DO(verify_copy("bit(a,b)"));
}
TEST("require that tensor node types can NOT be copied (yet)") {
diff --git a/eval/src/tests/eval/node_types/node_types_test.cpp b/eval/src/tests/eval/node_types/node_types_test.cpp
index 504f66ac717..b2373f0d8f5 100644
--- a/eval/src/tests/eval/node_types/node_types_test.cpp
+++ b/eval/src/tests/eval/node_types/node_types_test.cpp
@@ -218,6 +218,7 @@ TEST("require that various operations resolve appropriate type") {
TEST_DO(verify_op1("sigmoid(%s)")); // Sigmoid
TEST_DO(verify_op1("elu(%s)")); // Elu
TEST_DO(verify_op1("erf(%s)")); // Erf
+ TEST_DO(verify_op2("bit(%s,%s)")); // Bit
}
TEST("require that map resolves correct type") {
diff --git a/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp b/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp
index 54f958f8111..6b45172ef80 100644
--- a/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp
+++ b/eval/src/tests/tensor/onnx_wrapper/onnx_wrapper_test.cpp
@@ -3,6 +3,7 @@
#include <vespa/eval/eval/tensor_spec.h>
#include <vespa/eval/eval/int8float.h>
#include <vespa/eval/onnx/onnx_wrapper.h>
+#include <vespa/eval/onnx/onnx_model_cache.h>
#include <vespa/vespalib/util/bfloat16.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/gtest/gtest.h>
@@ -443,4 +444,23 @@ TEST(OnnxTest, default_allocator_type) {
fprintf(stderr, "default allocator type: %d\n", int(res));
}
+TEST(OnnxModelCacheTest, share_and_evict_onnx_models) {
+ {
+ auto simple1 = OnnxModelCache::load(simple_model);
+ auto simple2 = OnnxModelCache::load(simple_model);
+ auto dynamic1 = OnnxModelCache::load(dynamic_model);
+ auto dynamic2 = OnnxModelCache::load(dynamic_model);
+ auto dynamic3 = OnnxModelCache::load(dynamic_model);
+ EXPECT_EQ(simple1->get().inputs().size(), 3);
+ EXPECT_EQ(dynamic1->get().inputs().size(), 3);
+ EXPECT_EQ(&(simple1->get()), &(simple2->get()));
+ EXPECT_EQ(&(dynamic1->get()), &(dynamic2->get()));
+ EXPECT_EQ(&(dynamic2->get()), &(dynamic3->get()));
+ EXPECT_EQ(OnnxModelCache::num_cached(), 2);
+ EXPECT_EQ(OnnxModelCache::count_refs(), 5);
+ }
+ EXPECT_EQ(OnnxModelCache::num_cached(), 0);
+ EXPECT_EQ(OnnxModelCache::count_refs(), 0);
+}
+
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/vespa/eval/eval/aggr.h b/eval/src/vespa/eval/eval/aggr.h
index 516ead0f0bf..fe70107af79 100644
--- a/eval/src/vespa/eval/eval/aggr.h
+++ b/eval/src/vespa/eval/eval/aggr.h
@@ -10,13 +10,10 @@
#include <algorithm>
#include <cmath>
-namespace vespalib {
+namespace vespalib { class Stash; }
-class Stash;
+namespace vespalib::eval {
-namespace eval {
-
-struct BinaryOperation;
/**
* Enumeration of all different aggregators that are allowed to be
@@ -237,4 +234,3 @@ struct TypifyAggr {
};
} // namespace vespalib::eval
-} // namespace vespalib
diff --git a/eval/src/vespa/eval/eval/array_array_map.h b/eval/src/vespa/eval/eval/array_array_map.h
index 89fa0c77819..f49e3e4edbb 100644
--- a/eval/src/vespa/eval/eval/array_array_map.h
+++ b/eval/src/vespa/eval/eval/array_array_map.h
@@ -116,7 +116,7 @@ private:
_keys.push_back(k);
}
}
- _values.resize(_values.size() + _values_per_entry, V{});
+ _values.resize(_values.size() + _values_per_entry);
auto [pos, was_inserted] = _map.insert(MyKey{{tag_id},hash});
assert(was_inserted);
return Tag{tag_id};
diff --git a/eval/src/vespa/eval/eval/call_nodes.cpp b/eval/src/vespa/eval/eval/call_nodes.cpp
index 2fc25bdbc77..798583cf89a 100644
--- a/eval/src/vespa/eval/eval/call_nodes.cpp
+++ b/eval/src/vespa/eval/eval/call_nodes.cpp
@@ -43,6 +43,7 @@ CallRepo::CallRepo() : _map() {
add(nodes::Sigmoid());
add(nodes::Elu());
add(nodes::Erf());
+ add(nodes::Bit());
}
} // namespace vespalib::eval::nodes
diff --git a/eval/src/vespa/eval/eval/call_nodes.h b/eval/src/vespa/eval/eval/call_nodes.h
index 2a7d4173e64..945aba69596 100644
--- a/eval/src/vespa/eval/eval/call_nodes.h
+++ b/eval/src/vespa/eval/eval/call_nodes.h
@@ -139,6 +139,7 @@ struct Relu : CallHelper<Relu> { Relu() : Helper("relu", 1) {} };
struct Sigmoid : CallHelper<Sigmoid> { Sigmoid() : Helper("sigmoid", 1) {} };
struct Elu : CallHelper<Elu> { Elu() : Helper("elu", 1) {} };
struct Erf : CallHelper<Erf> { Erf() : Helper("erf", 1) {} };
+struct Bit : CallHelper<Bit> { Bit() : Helper("bit", 2) {} };
//-----------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/extract_bit.h b/eval/src/vespa/eval/eval/extract_bit.h
new file mode 100644
index 00000000000..ecf56b33b02
--- /dev/null
+++ b/eval/src/vespa/eval/eval/extract_bit.h
@@ -0,0 +1,13 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace vespalib::eval {
+
+inline double extract_bit(double a, double b) {
+ int8_t value = (int8_t) a;
+ uint32_t n = (uint32_t) b;
+ return ((n < 8) && bool(value & (1 << n))) ? 1.0 : 0.0;
+}
+
+}
diff --git a/eval/src/vespa/eval/eval/key_gen.cpp b/eval/src/vespa/eval/eval/key_gen.cpp
index a8fb205f124..a40a8887119 100644
--- a/eval/src/vespa/eval/eval/key_gen.cpp
+++ b/eval/src/vespa/eval/eval/key_gen.cpp
@@ -87,6 +87,7 @@ struct KeyGen : public NodeVisitor, public NodeTraverser {
void visit(const Sigmoid &) override { add_byte(60); }
void visit(const Elu &) override { add_byte(61); }
void visit(const Erf &) override { add_byte(62); }
+ void visit(const Bit &) override { add_byte(63); }
// traverse
bool open(const Node &node) override { node.accept(*this); return true; }
diff --git a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp
index 42911a56c14..2a9b7815aa8 100644
--- a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp
+++ b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.cpp
@@ -4,6 +4,7 @@
#include "llvm_wrapper.h"
#include <vespa/eval/eval/node_visitor.h>
#include <vespa/eval/eval/node_traverser.h>
+#include <vespa/eval/eval/extract_bit.h>
#include <llvm/IR/Verifier.h>
#include <llvm/Support/TargetSelect.h>
#include <llvm/IR/IRBuilder.h>
@@ -29,6 +30,7 @@ double vespalib_eval_approx(double a, double b) { return (vespalib::approx_equal
double vespalib_eval_relu(double a) { return std::max(a, 0.0); }
double vespalib_eval_sigmoid(double a) { return 1.0 / (1.0 + std::exp(-1.0 * a)); }
double vespalib_eval_elu(double a) { return (a < 0) ? std::exp(a) - 1.0 : a; }
+double vespalib_eval_bit(double a, double b) { return vespalib::eval::extract_bit(a, b); }
using vespalib::eval::gbdt::Forest;
using resolve_function = double (*)(void *ctx, size_t idx);
@@ -646,6 +648,9 @@ struct FunctionBuilder : public NodeVisitor, public NodeTraverser {
void visit(const Erf &) override {
make_call_1("erf");
}
+ void visit(const Bit &) override {
+ make_call_2("vespalib_eval_bit");
+ }
};
FunctionBuilder::~FunctionBuilder() { }
diff --git a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.h b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.h
index 040c0bdb73f..e04b477750d 100644
--- a/eval/src/vespa/eval/eval/llvm/llvm_wrapper.h
+++ b/eval/src/vespa/eval/eval/llvm/llvm_wrapper.h
@@ -19,6 +19,7 @@ extern "C" {
double vespalib_eval_relu(double a);
double vespalib_eval_sigmoid(double a);
double vespalib_eval_elu(double a);
+ double vespalib_eval_bit(double a, double b);
};
namespace vespalib::eval {
diff --git a/eval/src/vespa/eval/eval/make_tensor_function.cpp b/eval/src/vespa/eval/eval/make_tensor_function.cpp
index b65c3d5aaa7..498be2a738b 100644
--- a/eval/src/vespa/eval/eval/make_tensor_function.cpp
+++ b/eval/src/vespa/eval/eval/make_tensor_function.cpp
@@ -357,6 +357,9 @@ struct TensorFunctionBuilder : public NodeVisitor, public NodeTraverser {
void visit(const Erf &node) override {
make_map(node, operation::Erf::f);
}
+ void visit(const Bit &node) override {
+ make_join(node, operation::Bit::f);
+ }
//-------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/node_tools.cpp b/eval/src/vespa/eval/eval/node_tools.cpp
index e7341bc1755..fa2d16a2271 100644
--- a/eval/src/vespa/eval/eval/node_tools.cpp
+++ b/eval/src/vespa/eval/eval/node_tools.cpp
@@ -182,6 +182,7 @@ struct CopyNode : NodeTraverser, NodeVisitor {
void visit(const Sigmoid &node) override { copy_call(node); }
void visit(const Elu &node) override { copy_call(node); }
void visit(const Erf &node) override { copy_call(node); }
+ void visit(const Bit &node) override { copy_call(node); }
// traverse nodes
bool open(const Node &) override { return !error; }
diff --git a/eval/src/vespa/eval/eval/node_types.cpp b/eval/src/vespa/eval/eval/node_types.cpp
index 63da6d79c6f..8622fd734f1 100644
--- a/eval/src/vespa/eval/eval/node_types.cpp
+++ b/eval/src/vespa/eval/eval/node_types.cpp
@@ -278,6 +278,7 @@ struct TypeResolver : public NodeVisitor, public NodeTraverser {
void visit(const Sigmoid &node) override { resolve_op1(node); }
void visit(const Elu &node) override { resolve_op1(node); }
void visit(const Erf &node) override { resolve_op1(node); }
+ void visit(const Bit &node) override { resolve_op2(node); }
//-------------------------------------------------------------------------
diff --git a/eval/src/vespa/eval/eval/node_visitor.h b/eval/src/vespa/eval/eval/node_visitor.h
index 172cd48fe2a..475bbf5405c 100644
--- a/eval/src/vespa/eval/eval/node_visitor.h
+++ b/eval/src/vespa/eval/eval/node_visitor.h
@@ -85,6 +85,7 @@ struct NodeVisitor {
virtual void visit(const nodes::Sigmoid &) = 0;
virtual void visit(const nodes::Elu &) = 0;
virtual void visit(const nodes::Erf &) = 0;
+ virtual void visit(const nodes::Bit &) = 0;
virtual ~NodeVisitor() {}
};
@@ -154,6 +155,7 @@ struct EmptyNodeVisitor : NodeVisitor {
void visit(const nodes::Sigmoid &) override {}
void visit(const nodes::Elu &) override {}
void visit(const nodes::Erf &) override {}
+ void visit(const nodes::Bit &) override {}
};
} // namespace vespalib::eval
diff --git a/eval/src/vespa/eval/eval/operation.cpp b/eval/src/vespa/eval/eval/operation.cpp
index b97ac3f2261..a82a79e6bc4 100644
--- a/eval/src/vespa/eval/eval/operation.cpp
+++ b/eval/src/vespa/eval/eval/operation.cpp
@@ -3,6 +3,7 @@
#include "operation.h"
#include "function.h"
#include "key_gen.h"
+#include "extract_bit.h"
#include <vespa/vespalib/util/approx.h>
#include <algorithm>
@@ -50,6 +51,7 @@ double Relu::f(double a) { return std::max(a, 0.0); }
double Sigmoid::f(double a) { return 1.0 / (1.0 + std::exp(-1.0 * a)); }
double Elu::f(double a) { return (a < 0) ? std::exp(a) - 1 : a; }
double Erf::f(double a) { return std::erf(a); }
+double Bit::f(double a, double b) { return extract_bit(a, b); }
//-----------------------------------------------------------------------------
double Inv::f(double a) { return (1.0 / a); }
double Square::f(double a) { return (a * a); }
@@ -143,6 +145,7 @@ std::map<vespalib::string,op2_t> make_op2_map() {
add_op2(map, "fmod(a,b)", Mod::f);
add_op2(map, "min(a,b)", Min::f);
add_op2(map, "max(a,b)", Max::f);
+ add_op2(map, "bit(a,b)", Bit::f);
return map;
}
diff --git a/eval/src/vespa/eval/eval/operation.h b/eval/src/vespa/eval/eval/operation.h
index 3170c868214..438b510b714 100644
--- a/eval/src/vespa/eval/eval/operation.h
+++ b/eval/src/vespa/eval/eval/operation.h
@@ -49,6 +49,7 @@ struct Relu { static double f(double a); };
struct Sigmoid { static double f(double a); };
struct Elu { static double f(double a); };
struct Erf { static double f(double a); };
+struct Bit { static double f(double a, double b); };
//-----------------------------------------------------------------------------
struct Inv { static double f(double a); };
struct Square { static double f(double a); };
diff --git a/eval/src/vespa/eval/eval/test/eval_spec.cpp b/eval/src/vespa/eval/eval/test/eval_spec.cpp
index 63a3a23d9ae..5d51a1d23b5 100644
--- a/eval/src/vespa/eval/eval/test/eval_spec.cpp
+++ b/eval/src/vespa/eval/eval/test/eval_spec.cpp
@@ -158,6 +158,17 @@ EvalSpec::add_function_call_cases() {
add_rule({"a", -1.0, 1.0}, {"b", -1.0, 1.0}, "fmod(a,b)", [](double a, double b){ return std::fmod(a, b); });
add_rule({"a", -1.0, 1.0}, {"b", -1.0, 1.0}, "min(a,b)", [](double a, double b){ return std::min(a, b); });
add_rule({"a", -1.0, 1.0}, {"b", -1.0, 1.0}, "max(a,b)", [](double a, double b){ return std::max(a, b); });
+ add_expression({"a", "b"}, "bit(a,b)")
+ .add_case({-128, 7}, 1.0).add_case({-128, 6}, 0.0).add_case({-128, 5}, 0.0).add_case({-128, 4}, 0.0)
+ .add_case({-128, 3}, 0.0).add_case({-128, 2}, 0.0).add_case({-128, 1}, 0.0).add_case({-128, 0}, 0.0)
+ .add_case({-43, 7}, 1.0).add_case({-43, 6}, 1.0).add_case({-43, 5}, 0.0).add_case({-43, 4}, 1.0)
+ .add_case({-43, 3}, 0.0).add_case({-43, 2}, 1.0).add_case({-43, 1}, 0.0).add_case({-43, 0}, 1.0)
+ .add_case({0, 7}, 0.0).add_case({0, 6}, 0.0).add_case({0, 5}, 0.0).add_case({0, 4}, 0.0)
+ .add_case({0, 3}, 0.0).add_case({0, 2}, 0.0).add_case({0, 1}, 0.0).add_case({0, 0}, 0.0)
+ .add_case({85, 7}, 0.0).add_case({85, 6}, 1.0).add_case({85, 5}, 0.0).add_case({85, 4}, 1.0)
+ .add_case({85, 3}, 0.0).add_case({85, 2}, 1.0).add_case({85, 1}, 0.0).add_case({85, 0}, 1.0)
+ .add_case({127, 7}, 0.0).add_case({127, 6}, 1.0).add_case({127, 5}, 1.0).add_case({127, 4}, 1.0)
+ .add_case({127, 3}, 1.0).add_case({127, 2}, 1.0).add_case({127, 1}, 1.0).add_case({127, 0}, 1.0);
}
void
diff --git a/eval/src/vespa/eval/eval/test/reference_evaluation.cpp b/eval/src/vespa/eval/eval/test/reference_evaluation.cpp
index 4824751bb14..58e4b91f6d9 100644
--- a/eval/src/vespa/eval/eval/test/reference_evaluation.cpp
+++ b/eval/src/vespa/eval/eval/test/reference_evaluation.cpp
@@ -335,6 +335,9 @@ struct EvalNode : public NodeVisitor {
void visit(const Erf &node) override {
eval_map(node.get_child(0), operation::Erf::f);
}
+ void visit(const Bit &node) override {
+ eval_join(node.get_child(0), node.get_child(1), operation::Bit::f);
+ }
};
TensorSpec eval_node(const Node &node, const std::vector<TensorSpec> &params) {
diff --git a/eval/src/vespa/eval/eval/visit_stuff.cpp b/eval/src/vespa/eval/eval/visit_stuff.cpp
index 9306a720837..786562d823f 100644
--- a/eval/src/vespa/eval/eval/visit_stuff.cpp
+++ b/eval/src/vespa/eval/eval/visit_stuff.cpp
@@ -59,6 +59,7 @@ vespalib::string name_of(join_fun_t fun) {
if (fun == operation::Ldexp::f) return "ldexp";
if (fun == operation::Min::f) return "min";
if (fun == operation::Max::f) return "max";
+ if (fun == operation::Bit::f) return "bit";
return "[other join function]";
}
diff --git a/eval/src/vespa/eval/onnx/CMakeLists.txt b/eval/src/vespa/eval/onnx/CMakeLists.txt
index 9b18557c036..40444936d02 100644
--- a/eval/src/vespa/eval/onnx/CMakeLists.txt
+++ b/eval/src/vespa/eval/onnx/CMakeLists.txt
@@ -2,5 +2,6 @@
vespa_add_library(eval_onnx OBJECT
SOURCES
+ onnx_model_cache.cpp
onnx_wrapper.cpp
)
diff --git a/eval/src/vespa/eval/onnx/onnx_model_cache.cpp b/eval/src/vespa/eval/onnx/onnx_model_cache.cpp
new file mode 100644
index 00000000000..01d5fdd9c84
--- /dev/null
+++ b/eval/src/vespa/eval/onnx/onnx_model_cache.cpp
@@ -0,0 +1,51 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "onnx_model_cache.h"
+
+namespace vespalib::eval {
+
+std::mutex OnnxModelCache::_lock{};
+OnnxModelCache::Map OnnxModelCache::_cached{};
+
+void
+OnnxModelCache::release(Map::iterator entry)
+{
+ std::lock_guard<std::mutex> guard(_lock);
+ if (--(entry->second.num_refs) == 0) {
+ _cached.erase(entry);
+ }
+}
+
+OnnxModelCache::Token::UP
+OnnxModelCache::load(const vespalib::string &model_file)
+{
+ std::lock_guard<std::mutex> guard(_lock);
+ auto pos = _cached.find(model_file);
+ if (pos == _cached.end()) {
+ auto model = std::make_unique<Onnx>(model_file, Onnx::Optimize::ENABLE);
+ auto res = _cached.emplace(model_file, std::move(model));
+ assert(res.second);
+ pos = res.first;
+ }
+ return std::make_unique<Token>(pos, ctor_tag());
+}
+
+size_t
+OnnxModelCache::num_cached()
+{
+ std::lock_guard<std::mutex> guard(_lock);
+ return _cached.size();
+}
+
+size_t
+OnnxModelCache::count_refs()
+{
+ std::lock_guard<std::mutex> guard(_lock);
+ size_t refs = 0;
+ for (const auto &entry: _cached) {
+ refs += entry.second.num_refs;
+ }
+ return refs;
+}
+
+}
diff --git a/eval/src/vespa/eval/onnx/onnx_model_cache.h b/eval/src/vespa/eval/onnx/onnx_model_cache.h
new file mode 100644
index 00000000000..35d5fefa061
--- /dev/null
+++ b/eval/src/vespa/eval/onnx/onnx_model_cache.h
@@ -0,0 +1,58 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "onnx_wrapper.h"
+#include <vespa/vespalib/stllike/string.h>
+#include <memory>
+#include <mutex>
+#include <map>
+
+namespace vespalib::eval {
+
+/**
+ * Cache used to share loaded onnx models between users. The cache
+ * itself will not keep anything alive, but will let you find loaded
+ * models that are currently in use by others.
+ **/
+class OnnxModelCache
+{
+private:
+ struct ctor_tag {};
+ using Key = vespalib::string;
+ struct Value {
+ size_t num_refs;
+ std::unique_ptr<Onnx> model;
+ Value(std::unique_ptr<Onnx> model_in) : num_refs(0), model(std::move(model_in)) {}
+ const Onnx &get() { return *model; }
+ };
+ using Map = std::map<Key,Value>;
+ static std::mutex _lock;
+ static Map _cached;
+
+ static void release(Map::iterator entry);
+
+public:
+ class Token
+ {
+ private:
+ OnnxModelCache::Map::iterator _entry;
+ public:
+ Token(Token &&) = delete;
+ Token(const Token &) = delete;
+ Token &operator=(Token &&) = delete;
+ Token &operator=(const Token &) = delete;
+ using UP = std::unique_ptr<Token>;
+ explicit Token(OnnxModelCache::Map::iterator entry, ctor_tag) : _entry(entry) {
+ ++_entry->second.num_refs;
+ }
+ const Onnx &get() const { return _entry->second.get(); }
+ ~Token() { OnnxModelCache::release(_entry); }
+ };
+
+ static Token::UP load(const vespalib::string &model_file);
+ static size_t num_cached();
+ static size_t count_refs();
+};
+
+}
diff --git a/eval/src/vespa/eval/onnx/onnx_wrapper.cpp b/eval/src/vespa/eval/onnx/onnx_wrapper.cpp
index f848c421c9d..e2528fcb1c3 100644
--- a/eval/src/vespa/eval/onnx/onnx_wrapper.cpp
+++ b/eval/src/vespa/eval/onnx/onnx_wrapper.cpp
@@ -117,23 +117,6 @@ auto convert_optimize(Onnx::Optimize optimize) {
abort();
}
-CellType to_cell_type(Onnx::ElementType type) {
- switch (type) {
- case Onnx::ElementType::INT8: return CellType::INT8;
- case Onnx::ElementType::BFLOAT16: return CellType::BFLOAT16;
- case Onnx::ElementType::UINT8: [[fallthrough]];
- case Onnx::ElementType::INT16: [[fallthrough]];
- case Onnx::ElementType::UINT16: [[fallthrough]];
- case Onnx::ElementType::FLOAT: return CellType::FLOAT;
- case Onnx::ElementType::INT32: [[fallthrough]];
- case Onnx::ElementType::INT64: [[fallthrough]];
- case Onnx::ElementType::UINT32: [[fallthrough]];
- case Onnx::ElementType::UINT64: [[fallthrough]];
- case Onnx::ElementType::DOUBLE: return CellType::DOUBLE;
- }
- abort();
-}
-
Onnx::ElementType make_element_type(ONNXTensorElementDataType element_type) {
switch (element_type) {
case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8: return Onnx::ElementType::INT8;
@@ -245,12 +228,41 @@ Onnx::TensorInfo::type_as_string() const
Onnx::TensorInfo::~TensorInfo() = default;
+vespalib::string
+Onnx::TensorType::type_as_string() const
+{
+ vespalib::string res = type_name(elements);
+ for (const auto &size: dimensions) {
+ res += DimSize(size).as_string();
+ }
+ return res;
+}
+
//-----------------------------------------------------------------------------
Onnx::WireInfo::~WireInfo() = default;
Onnx::WirePlanner::~WirePlanner() = default;
+CellType
+Onnx::WirePlanner::best_cell_type(Onnx::ElementType type)
+{
+ switch (type) {
+ case Onnx::ElementType::INT8: return CellType::INT8;
+ case Onnx::ElementType::BFLOAT16: return CellType::BFLOAT16;
+ case Onnx::ElementType::UINT8: [[fallthrough]];
+ case Onnx::ElementType::INT16: [[fallthrough]];
+ case Onnx::ElementType::UINT16: [[fallthrough]];
+ case Onnx::ElementType::FLOAT: return CellType::FLOAT;
+ case Onnx::ElementType::INT32: [[fallthrough]];
+ case Onnx::ElementType::INT64: [[fallthrough]];
+ case Onnx::ElementType::UINT32: [[fallthrough]];
+ case Onnx::ElementType::UINT64: [[fallthrough]];
+ case Onnx::ElementType::DOUBLE: return CellType::DOUBLE;
+ }
+ abort();
+}
+
bool
Onnx::WirePlanner::bind_input_type(const ValueType &vespa_in, const TensorInfo &onnx_in)
{
@@ -309,7 +321,7 @@ Onnx::WirePlanner::make_output_type(const TensorInfo &onnx_out) const
}
dim_list.emplace_back(fmt("d%zu", dim_list.size()), dim_size);
}
- return ValueType::make_type(to_cell_type(elements), std::move(dim_list));
+ return ValueType::make_type(best_cell_type(elements), std::move(dim_list));
}
Onnx::WireInfo
diff --git a/eval/src/vespa/eval/onnx/onnx_wrapper.h b/eval/src/vespa/eval/onnx/onnx_wrapper.h
index 507d75efbd9..9392536eae7 100644
--- a/eval/src/vespa/eval/onnx/onnx_wrapper.h
+++ b/eval/src/vespa/eval/onnx/onnx_wrapper.h
@@ -68,6 +68,7 @@ public:
std::vector<int64_t> dimensions;
TensorType(ElementType elements_in, std::vector<int64_t> dimensions_in) noexcept
: elements(elements_in), dimensions(std::move(dimensions_in)) {}
+ vespalib::string type_as_string() const;
};
// how the model should be wired with inputs/outputs
@@ -88,6 +89,7 @@ public:
public:
WirePlanner() : _input_types(), _symbolic_sizes(), _bound_unknown_sizes() {}
~WirePlanner();
+ static CellType best_cell_type(Onnx::ElementType type);
bool bind_input_type(const ValueType &vespa_in, const TensorInfo &onnx_in);
ValueType make_output_type(const TensorInfo &onnx_out) const;
WireInfo get_wire_info(const Onnx &model) const;
diff --git a/fbench/src/fbench/client.cpp b/fbench/src/fbench/client.cpp
index c1b444e9a6b..108d44e2b9b 100644
--- a/fbench/src/fbench/client.cpp
+++ b/fbench/src/fbench/client.cpp
@@ -5,38 +5,32 @@
#include <util/clientstatus.h>
#include <httpclient/httpclient.h>
#include <util/filereader.h>
-#include <cassert>
#include <cstring>
#include <iostream>
#include <vespa/vespalib/encoding/base64.h>
using namespace vespalib;
-Client::Client(vespalib::CryptoEngine::SP engine, ClientArguments *args)
- : _args(args),
- _status(new ClientStatus()),
- _reqTimer(new Timer()),
- _cycleTimer(new Timer()),
- _masterTimer(new Timer()),
- _http(new HTTPClient(std::move(engine), _args->_hostname, _args->_port,
- _args->_keepAlive, _args->_headerBenchmarkdataCoverage,
- _args->_extraHeaders, _args->_authority)),
- _reader(new FileReader()),
+Client::Client(vespalib::CryptoEngine::SP engine, std::unique_ptr<ClientArguments> args)
+ : _args(std::move(args)),
+ _status(std::make_unique<ClientStatus>()),
+ _reqTimer(std::make_unique<Timer>()),
+ _cycleTimer(std::make_unique<Timer>()),
+ _masterTimer(std::make_unique<Timer>()),
+ _http(std::make_unique<HTTPClient>(std::move(engine), _args->_hostname, _args->_port, _args->_keepAlive,
+ _args->_headerBenchmarkdataCoverage, _args->_extraHeaders, _args->_authority)),
+ _reader(std::make_unique<FileReader>()),
_output(),
- _linebufsize(args->_maxLineSize),
- _linebuf(new char[_linebufsize]),
+ _linebufsize(_args->_maxLineSize),
+ _linebuf(std::make_unique<char[]>(_linebufsize)),
_stop(false),
_done(false),
_thread()
{
- assert(args != NULL);
_cycleTimer->SetMax(_args->_cycle);
}
-Client::~Client()
-{
- delete [] _linebuf;
-}
+Client::~Client() = default;
void Client::runMe(Client * me) {
me->run();
@@ -173,15 +167,15 @@ Client::run()
std::this_thread::sleep_for(std::chrono::milliseconds(_args->_delay));
// open query file
- snprintf(inputFilename, 1024, _args->_filenamePattern, _args->_myNum);
+ snprintf(inputFilename, 1024, _args->_filenamePattern.c_str(), _args->_myNum);
if (!_reader->Open(inputFilename)) {
printf("Client %d: ERROR: could not open file '%s' [read mode]\n",
_args->_myNum, inputFilename);
_status->SetError("Could not open query file.");
return;
}
- if (_args->_outputPattern != NULL) {
- snprintf(outputFilename, 1024, _args->_outputPattern, _args->_myNum);
+ if ( ! _args->_outputPattern.empty()) {
+ snprintf(outputFilename, 1024, _args->_outputPattern.c_str(), _args->_myNum);
_output = std::make_unique<std::ofstream>(outputFilename, std::ofstream::out | std::ofstream::binary);
if (_output->fail()) {
printf("Client %d: ERROR: could not open file '%s' [write mode]\n",
@@ -208,7 +202,7 @@ Client::run()
_cycleTimer->Start();
- linelen = urlSource.nextUrl(_linebuf, _linebufsize);
+ linelen = urlSource.nextUrl(_linebuf.get(), _linebufsize);
if (linelen > 0) {
++urlNumber;
} else {
@@ -222,11 +216,11 @@ Client::run()
if (linelen < _linebufsize) {
if (_output) {
_output->write("URL: ", strlen("URL: "));
- _output->write(_linebuf, linelen);
+ _output->write(_linebuf.get(), linelen);
_output->write("\n\n", 2);
}
if (linelen + (int)_args->_queryStringToAppend.length() < _linebufsize) {
- strcat(_linebuf, _args->_queryStringToAppend.c_str());
+ strcat(_linebuf.get(), _args->_queryStringToAppend.c_str());
}
int cLen = _args->_usePostMode ? urlSource.nextContent() : 0;
@@ -239,7 +233,7 @@ Client::run()
}
_reqTimer->Start();
- auto fetch_status = _http->Fetch(_linebuf, _output.get(), _args->_usePostMode, content, cLen);
+ auto fetch_status = _http->Fetch(_linebuf.get(), _output.get(), _args->_usePostMode, content, cLen);
_reqTimer->Stop();
_status->AddRequestStatus(fetch_status.RequestStatus());
if (fetch_status.Ok() && fetch_status.TotalHitCount() == 0)
diff --git a/fbench/src/fbench/client.h b/fbench/src/fbench/client.h
index 70d83f71971..3349a112fa2 100644
--- a/fbench/src/fbench/client.h
+++ b/fbench/src/fbench/client.h
@@ -21,23 +21,17 @@ struct ClientArguments
int _myNum;
/**
- * The total number of clients controlled by the parent fbench
- * application
- **/
- int _totNum;
-
- /**
* Pattern that combined with the client number will become the name
* of the file containing the urls this client should request.
**/
- const char *_filenamePattern;
+ std::string _filenamePattern;
/**
* Pattern that combined with the client number will become the name
* of the file this client should dump url content to. If this
* pattern is set to NULL no output file is generated.
**/
- const char *_outputPattern;
+ std::string _outputPattern;
/**
* The server the client should fetch urls from.
@@ -116,9 +110,9 @@ struct ClientArguments
std::string _extraHeaders;
std::string _authority;
- ClientArguments(int myNum, int totNum,
- const char *filenamePattern,
- const char *outputPattern,
+ ClientArguments(int myNum,
+ const std::string & filenamePattern,
+ const std::string & outputPattern,
const char *hostname, int port,
long cycle, long delay,
int ignoreCount, int byteLimit,
@@ -129,7 +123,6 @@ struct ClientArguments
const std::string & queryStringToAppend, const std::string & extraHeaders,
const std::string &authority, bool postMode)
: _myNum(myNum),
- _totNum(totNum),
_filenamePattern(filenamePattern),
_outputPattern(outputPattern),
_hostname(hostname),
@@ -181,13 +174,11 @@ private:
std::unique_ptr<FileReader> _reader;
std::unique_ptr<std::ofstream> _output;
int _linebufsize;
- char *_linebuf;
+ std::unique_ptr<char[]> _linebuf;
std::atomic<bool> _stop;
std::atomic<bool> _done;
std::thread _thread;
- Client(const Client &);
- Client &operator=(const Client &);
static void runMe(Client * client);
void run();
@@ -197,7 +188,9 @@ public:
* The client arguments given to this method becomes the
* responsibility of the client.
**/
- Client(vespalib::CryptoEngine::SP engine, ClientArguments *args);
+ Client(vespalib::CryptoEngine::SP engine, std::unique_ptr<ClientArguments> args);
+ Client(const Client &) = delete;
+ Client &operator=(const Client &) = delete;
/**
* Delete objects owned by this client, including the client arguments.
diff --git a/fbench/src/fbench/fbench.cpp b/fbench/src/fbench/fbench.cpp
index 57efb8a47e0..1ba49e9897a 100644
--- a/fbench/src/fbench/fbench.cpp
+++ b/fbench/src/fbench/fbench.cpp
@@ -1,4 +1,8 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "fbench.h"
+#include "client.h"
+
#include <util/timer.h>
#include <httpclient/httpclient.h>
#include <util/filereader.h>
@@ -9,8 +13,6 @@
#include <vespa/vespalib/net/tls/tls_crypto_engine.h>
#include <vespa/vespalib/io/mapped_file_input.h>
#include <vespa/vespalib/util/size_literals.h>
-#include "client.h"
-#include "fbench.h"
#include <cstring>
#include <cmath>
#include <csignal>
@@ -19,7 +21,8 @@
namespace {
-std::string maybe_load(const std::string &file_name, bool &failed) {
+std::string
+maybe_load(const std::string &file_name, bool &failed) {
std::string content;
if (!file_name.empty()) {
vespalib::MappedFileInput file(file_name);
@@ -42,8 +45,8 @@ FBench::FBench()
_clients(),
_ignoreCount(0),
_cycle(0),
- _filenamePattern(NULL),
- _outputPattern(NULL),
+ _filenamePattern(),
+ _outputPattern(),
_byteLimit(0),
_restartLimit(0),
_maxLineSize(0),
@@ -58,8 +61,6 @@ FBench::FBench()
FBench::~FBench()
{
_clients.clear();
- free(_filenamePattern);
- free(_outputPattern);
}
bool
@@ -121,11 +122,13 @@ FBench::InitBenchmark(int numClients, int ignoreCount, int cycle,
_ignoreCount = ignoreCount;
_cycle = cycle;
- free(_filenamePattern);
- _filenamePattern = strdup(filenamePattern);
- free(_outputPattern);
- _outputPattern = (outputPattern == NULL) ?
- NULL : strdup(outputPattern);
+ _filenamePattern = filenamePattern;
+ if (outputPattern != nullptr) {
+ _outputPattern = outputPattern;
+ } else {
+ _outputPattern.clear();
+ }
+
_queryStringToAppend = queryStringToAppend;
_extraHeaders = extraHeaders;
_authority = authority;
@@ -154,15 +157,12 @@ FBench::CreateClients()
off_end = _queryfileOffset[i+1];
}
client = std::make_unique<Client>(_crypto_engine,
- new ClientArguments(i, _clients.size(), _filenamePattern,
- _outputPattern, _hostnames[i % _hostnames.size()].c_str(),
- _ports[i % _ports.size()], _cycle,
- random() % spread, _ignoreCount,
- _byteLimit, _restartLimit, _maxLineSize,
- _keepAlive, _base64Decode,
- _headerBenchmarkdataCoverage,
- off_beg, off_end,
- _singleQueryFile, _queryStringToAppend, _extraHeaders, _authority, _usePostMode));
+ std::make_unique<ClientArguments>(i, _filenamePattern, _outputPattern,
+ _hostnames[i % _hostnames.size()].c_str(),
+ _ports[i % _ports.size()], _cycle,random() % spread,
+ _ignoreCount, _byteLimit, _restartLimit, _maxLineSize, _keepAlive,
+ _base64Decode, _headerBenchmarkdataCoverage, off_beg, off_end,
+ _singleQueryFile, _queryStringToAppend, _extraHeaders, _authority, _usePostMode));
++i;
}
}
@@ -278,6 +278,8 @@ FBench::PrintSummary()
printf("utilization: %8.2f %%\n",
(maxRate > 0) ? 100 * (actualRate / maxRate) : 0);
printf("zero hit queries: %8ld\n", status._zeroHitQueries);
+ printf("zero hit percentage: %8.2f %%\n",
+ (status._requestCnt > 0) ? 100.0*(double(status._zeroHitQueries)/status._requestCnt) : 0.0);
printf("http request status breakdown:\n");
for (const auto& entry : status._requestStatusDistribution)
printf(" %8u : %8u \n", entry.first, entry.second);
@@ -345,7 +347,7 @@ FBench::Main(int argc, char *argv[])
const int minLineSize = 1024;
const char *queryFilePattern = "query%03d.txt";
- const char *outputFilePattern = NULL;
+ const char *outputFilePattern = nullptr;
std::string queryStringToAppend;
std::string extraHeaders;
std::string ca_certs_file_name; // -T
@@ -599,8 +601,8 @@ main(int argc, char** argv)
sigemptyset(&act.sa_mask);
act.sa_flags = 0;
- sigaction(SIGINT, &act, NULL);
- sigaction(SIGPIPE, &act, NULL);
+ sigaction(SIGINT, &act, nullptr);
+ sigaction(SIGPIPE, &act, nullptr);
FBench myApp;
return myApp.Main(argc, argv);
diff --git a/fbench/src/fbench/fbench.h b/fbench/src/fbench/fbench.h
index 362a463a4f1..e66fc28683d 100644
--- a/fbench/src/fbench/fbench.h
+++ b/fbench/src/fbench/fbench.h
@@ -1,6 +1,14 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
+#include <vector>
+#include <string>
+#include <memory>
+
+class Client;
+
+namespace vespalib { class CryptoEngine; }
+
/**
* This is the application class of the fbench program. It controls
* the operation of the test clients and collects overall results.
@@ -10,29 +18,27 @@
class FBench
{
private:
- vespalib::CryptoEngine::SP _crypto_engine;
- std::vector<Client::UP> _clients;
- int _numClients;
- int _ignoreCount;
- int _cycle;
+ std::shared_ptr<vespalib::CryptoEngine> _crypto_engine;
+ std::vector<std::unique_ptr<Client>> _clients;
+ int _ignoreCount;
+ int _cycle;
std::vector<std::string> _hostnames;
- std::vector<int> _ports;
- char *_filenamePattern;
- char *_outputPattern;
- int _byteLimit;
- int _restartLimit;
- int _maxLineSize;
- bool _keepAlive;
- bool _base64Decode;
- bool _usePostMode;
- bool _headerBenchmarkdataCoverage;
- int _seconds;
- std::vector<uint64_t> _queryfileOffset;
- int _numberOfQueries;
- bool _singleQueryFile;
- std::string _queryStringToAppend;
- std::string _extraHeaders;
- std::string _authority;
+ std::vector<int> _ports;
+ std::string _filenamePattern;
+ std::string _outputPattern;
+ int _byteLimit;
+ int _restartLimit;
+ int _maxLineSize;
+ bool _keepAlive;
+ bool _base64Decode;
+ bool _usePostMode;
+ bool _headerBenchmarkdataCoverage;
+ int _seconds;
+ std::vector<uint64_t> _queryfileOffset;
+ bool _singleQueryFile;
+ std::string _queryStringToAppend;
+ std::string _extraHeaders;
+ std::string _authority;
bool init_crypto_engine(const std::string &ca_certs_file_name,
const std::string &cert_chain_file_name,
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/Downloads.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/Downloads.java
new file mode 100644
index 00000000000..eab6cd39352
--- /dev/null
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/Downloads.java
@@ -0,0 +1,133 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.filedistribution;
+
+import com.yahoo.config.FileReference;
+
+import java.io.File;
+import java.time.Instant;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+
+/**
+ * Keeps track of downloads and download status
+ *
+ * @author hmusum
+ */
+public class Downloads {
+
+ private static final Logger log = Logger.getLogger(Downloads.class.getName());
+
+ private final Map<FileReference, FileReferenceDownload> downloads = new ConcurrentHashMap<>();
+ private final DownloadStatuses downloadStatuses = new DownloadStatuses();
+
+ public DownloadStatuses downloadStatuses() { return downloadStatuses; }
+
+ void setDownloadStatus(FileReference fileReference, double completeness) {
+ downloadStatuses.put(fileReference, completeness);
+ }
+
+ void completedDownloading(FileReference fileReference, File file) {
+ Optional<FileReferenceDownload> download = get(fileReference);
+ setDownloadStatus(fileReference, 1.0);
+ if (download.isPresent()) {
+ downloads.remove(fileReference);
+ download.get().future().complete(Optional.of(file));
+ } else {
+ log.log(Level.FINE, () -> "Received '" + fileReference + "', which was not requested. Can be ignored if happening during upgrades/restarts");
+ }
+ }
+
+ void add(FileReferenceDownload fileReferenceDownload) {
+ downloads.put(fileReferenceDownload.fileReference(), fileReferenceDownload);
+ downloadStatuses.put(fileReferenceDownload.fileReference());
+ }
+
+ void remove(FileReference fileReference) {
+ downloadStatuses.get(fileReference).ifPresent(d -> new DownloadStatus(d.fileReference(), 0.0));
+ downloads.remove(fileReference);
+ }
+
+ double downloadStatus(FileReference fileReference) {
+ double status = 0.0;
+ Optional<Downloads.DownloadStatus> downloadStatus = downloadStatuses.get(fileReference);
+ if (downloadStatus.isPresent()) {
+ status = downloadStatus.get().progress();
+ }
+ return status;
+ }
+
+ Map<FileReference, Double> downloadStatus() {
+ return downloadStatuses.all().values().stream().collect(Collectors.toMap(Downloads.DownloadStatus::fileReference, Downloads.DownloadStatus::progress));
+ }
+
+ Optional<FileReferenceDownload> get(FileReference fileReference) {
+ return Optional.ofNullable(downloads.get(fileReference));
+ }
+
+ /* Status for ongoing and completed downloads, keeps at most status for 100 last downloads */
+ static class DownloadStatuses {
+
+ private static final int maxEntries = 100;
+
+ private final Map<FileReference, DownloadStatus> downloadStatus = Collections.synchronizedMap(new HashMap<>());
+
+ void put(FileReference fileReference) {
+ put(fileReference, 0.0);
+ }
+
+ void put(FileReference fileReference, double progress) {
+ downloadStatus.put(fileReference, new DownloadStatus(fileReference, progress));
+ if (downloadStatus.size() > maxEntries) {
+ Map.Entry<FileReference, DownloadStatus> oldest =
+ Collections.min(downloadStatus.entrySet(), Comparator.comparing(e -> e.getValue().created));
+ downloadStatus.remove(oldest.getKey());
+ }
+ }
+
+ Optional<DownloadStatus> get(FileReference fileReference) {
+ return Optional.ofNullable(downloadStatus.get(fileReference));
+ }
+
+ Map<FileReference, DownloadStatus> all() {
+ return Map.copyOf(downloadStatus);
+ }
+
+ @Override
+ public String toString() {
+ return downloadStatus.entrySet().stream().map(entry -> entry.getKey().value() + "=>" + entry.getValue().progress).collect(Collectors.joining(", "));
+ }
+
+ }
+
+ static class DownloadStatus {
+ private final FileReference fileReference;
+ private final double progress; // between 0 and 1
+ private final Instant created;
+
+ DownloadStatus(FileReference fileReference, double progress) {
+ this.fileReference = fileReference;
+ this.progress = progress;
+ this.created = Instant.now();
+ }
+
+ public FileReference fileReference() {
+ return fileReference;
+ }
+
+ public double progress() {
+ return progress;
+ }
+
+ public Instant created() {
+ return created;
+ }
+ }
+
+}
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDownloader.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDownloader.java
index b1e43e4cee1..292674497ed 100644
--- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDownloader.java
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileDownloader.java
@@ -30,23 +30,30 @@ public class FileDownloader implements AutoCloseable {
private final static Logger log = Logger.getLogger(FileDownloader.class.getName());
public static File defaultDownloadDirectory = new File(Defaults.getDefaults().underVespaHome("var/db/vespa/filedistribution"));
+ private final ConnectionPool connectionPool;
private final File downloadDirectory;
private final Duration timeout;
private final FileReferenceDownloader fileReferenceDownloader;
+ private final Downloads downloads;
public FileDownloader(ConnectionPool connectionPool) {
- this(connectionPool, defaultDownloadDirectory );
+ this(connectionPool, defaultDownloadDirectory, new Downloads());
}
- public FileDownloader(ConnectionPool connectionPool, File downloadDirectory) {
+ public FileDownloader(ConnectionPool connectionPool, File downloadDirectory, Downloads downloads) {
// TODO: Reduce timeout even more, timeout is so long that we might get starvation
- this(connectionPool, downloadDirectory, downloadDirectory, Duration.ofMinutes(5), Duration.ofSeconds(10));
+ this(connectionPool, downloadDirectory, downloads, Duration.ofMinutes(5), Duration.ofSeconds(10));
}
- public FileDownloader(ConnectionPool connectionPool, File downloadDirectory, File tmpDirectory, Duration timeout, Duration sleepBetweenRetries) {
+ public FileDownloader(ConnectionPool connectionPool, File downloadDirectory, Downloads downloads,
+ Duration timeout, Duration sleepBetweenRetries) {
+ this.connectionPool = connectionPool;
this.downloadDirectory = downloadDirectory;
this.timeout = timeout;
- this.fileReferenceDownloader = new FileReferenceDownloader(downloadDirectory, tmpDirectory, connectionPool, timeout, sleepBetweenRetries);
+ // Needed to receive RPC calls receiveFile* from server after asking for files
+ new FileReceiver(connectionPool.getSupervisor(), downloads, downloadDirectory);
+ this.fileReferenceDownloader = new FileReferenceDownloader(connectionPool, downloads, timeout, sleepBetweenRetries);
+ this.downloads = downloads;
}
public Optional<File> getFile(FileReference fileReference) {
@@ -74,39 +81,39 @@ public class FileDownloader implements AutoCloseable {
: download(fileReferenceDownload);
}
- double downloadStatus(FileReference fileReference) {
- return fileReferenceDownloader.downloadStatus(fileReference.value());
- }
+ public Map<FileReference, Double> downloadStatus() { return downloads.downloadStatus(); }
- public Map<FileReference, Double> downloadStatus() {
- return fileReferenceDownloader.downloadStatus();
- }
+ public ConnectionPool connectionPool() { return connectionPool; }
File downloadDirectory() {
return downloadDirectory;
}
- // Files are moved atomically, so if file reference exists and is accessible we can use it
private Optional<File> getFileFromFileSystem(FileReference fileReference) {
File[] files = new File(downloadDirectory, fileReference.value()).listFiles();
- if (downloadDirectory.exists() && downloadDirectory.isDirectory() && files != null && files.length > 0) {
- File file = files[0];
- if (!file.exists()) {
- throw new RuntimeException("File reference '" + fileReference.value() + "' does not exist");
- } else if (!file.canRead()) {
- throw new RuntimeException("File reference '" + fileReference.value() + "'exists, but unable to read it");
- } else {
- log.log(Level.FINE, () -> "File reference '" + fileReference.value() + "' found: " + file.getAbsolutePath());
- fileReferenceDownloader.setDownloadStatus(fileReference, 1.0);
- return Optional.of(file);
- }
+ if (files == null) return Optional.empty();
+ if (files.length == 0) return Optional.empty();
+ if (files.length > 1) throw new RuntimeException("More than one file reference found for " + fileReference);
+
+ File file = files[0];
+ if (!file.exists()) {
+ throw new RuntimeException("File reference '" + fileReference.value() + "' does not exist");
+ } else if (!file.canRead()) {
+ throw new RuntimeException("File reference '" + fileReference.value() + "' exists, but unable to read it");
+ } else {
+ log.log(Level.FINE, () -> "File reference '" + fileReference.value() + "' found: " + file.getAbsolutePath());
+ downloads.setDownloadStatus(fileReference, 1.0);
+ return Optional.of(file);
}
- return Optional.empty();
}
- private boolean alreadyDownloaded(FileReference fileReference) {
+ boolean isDownloading(FileReference fileReference) {
+ return downloads.get(fileReference).isPresent();
+ }
+
+ private boolean alreadyDownloaded(FileReferenceDownload fileReferenceDownload) {
try {
- return (getFileFromFileSystem(fileReference).isPresent());
+ return getFileFromFileSystem(fileReferenceDownload.fileReference()).isPresent();
} catch (RuntimeException e) {
return false;
}
@@ -114,8 +121,7 @@ public class FileDownloader implements AutoCloseable {
/** Start a download, don't wait for result */
public void downloadIfNeeded(FileReferenceDownload fileReferenceDownload) {
- FileReference fileReference = fileReferenceDownload.fileReference();
- if (alreadyDownloaded(fileReference)) return;
+ if (alreadyDownloaded(fileReferenceDownload)) return;
download(fileReferenceDownload);
}
@@ -125,11 +131,8 @@ public class FileDownloader implements AutoCloseable {
return fileReferenceDownloader.download(fileReferenceDownload);
}
- public FileReferenceDownloader fileReferenceDownloader() {
- return fileReferenceDownloader;
- }
-
public void close() {
fileReferenceDownloader.close();
}
+
}
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java
index 1bc44e0bed2..e1a8cf92513 100644
--- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java
@@ -8,7 +8,6 @@ import com.yahoo.jrt.Int32Value;
import com.yahoo.jrt.Method;
import com.yahoo.jrt.Request;
import com.yahoo.jrt.Supervisor;
-import java.util.logging.Level;
import net.jpountz.xxhash.StreamingXXHash64;
import net.jpountz.xxhash.XXHashFactory;
@@ -22,6 +21,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.logging.Level;
import java.util.logging.Logger;
/**
@@ -39,11 +39,8 @@ public class FileReceiver {
public final static String RECEIVE_EOF_METHOD = "filedistribution.receiveFileEof";
private final Supervisor supervisor;
- private final FileReferenceDownloader downloader;
+ private final Downloads downloads;
private final File downloadDirectory;
- // Should be on same partition as downloadDirectory to make sure moving files from tmpDirectory
- // to downloadDirectory is atomic
- private final File tmpDirectory;
private final AtomicInteger nextSessionId = new AtomicInteger(1);
private final Map<Integer, Session> sessions = new HashMap<>();
@@ -61,7 +58,7 @@ public class FileReceiver {
private final File tmpDir;
private final File inprogressFile;
- Session(File downloadDirectory, File tmpDirectory, int sessionId, FileReference reference,
+ Session(File downloadDirectory, int sessionId, FileReference reference,
FileReferenceData.Type fileType, String fileName, long fileSize)
{
this.hasher = XXHashFactory.fastestInstance().newStreamingHash64(0);
@@ -74,12 +71,12 @@ public class FileReceiver {
currentPartId = 0;
currentHash = 0;
fileReferenceDir = new File(downloadDirectory, reference.value());
- this.tmpDir = tmpDirectory;
+ this.tmpDir = downloadDirectory;
try {
- inprogressFile = Files.createTempFile(tmpDirectory.toPath(), fileName, ".inprogress").toFile();
+ inprogressFile = Files.createTempFile(tmpDir.toPath(), fileName, ".inprogress").toFile();
} catch (IOException e) {
- String msg = "Failed creating temp file for inprogress file for " + fileName + " in '" + tmpDirectory.toPath() + "': ";
+ String msg = "Failed creating temp file for inprogress file for " + fileName + " in '" + tmpDir.toPath() + "': ";
log.log(Level.SEVERE, msg + e.getMessage(), e);
throw new RuntimeException(msg, e);
}
@@ -149,11 +146,10 @@ public class FileReceiver {
}
}
- FileReceiver(Supervisor supervisor, FileReferenceDownloader downloader, File downloadDirectory, File tmpDirectory) {
+ FileReceiver(Supervisor supervisor, Downloads downloads, File downloadDirectory) {
this.supervisor = supervisor;
- this.downloader = downloader;
+ this.downloads = downloads;
this.downloadDirectory = downloadDirectory;
- this.tmpDirectory = tmpDirectory;
registerMethods();
}
@@ -231,7 +227,7 @@ public class FileReceiver {
log.severe("Session id " + sessionId + " already exist, impossible. Request from(" + req.target() + ")");
} else {
try {
- sessions.put(sessionId, new Session(downloadDirectory, tmpDirectory, sessionId, reference,
+ sessions.put(sessionId, new Session(downloadDirectory, sessionId, reference,
FileReferenceData.Type.valueOf(type),fileName, fileSize));
} catch (Exception e) {
retval = 1;
@@ -260,7 +256,7 @@ public class FileReceiver {
}
double completeness = (double) session.currentFileSize / (double) session.fileSize;
log.log(Level.FINEST, () -> String.format("%.1f percent of '%s' downloaded", completeness * 100, reference.value()));
- downloader.setDownloadStatus(reference, completeness);
+ downloads.setDownloadStatus(reference, completeness);
}
req.returnValues().add(new Int32Value(retval));
}
@@ -273,7 +269,7 @@ public class FileReceiver {
Session session = getSession(sessionId);
int retval = verifySession(session, sessionId, reference);
File file = session.close(xxhash);
- downloader.completedDownloading(reference, file);
+ downloads.completedDownloading(reference, file);
synchronized (sessions) {
sessions.remove(sessionId);
}
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java
index 0ce69c182ce..01240357fbe 100644
--- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceDownloader.java
@@ -12,23 +12,16 @@ import com.yahoo.vespa.config.ConnectionPool;
import java.io.File;
import java.time.Duration;
import java.time.Instant;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Map;
import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
-import java.util.stream.Collectors;
/**
* Downloads file reference using rpc requests to config server and keeps track of files being downloaded
- * <p>
- * Some methods are synchronized to make sure access to downloads is atomic
*
* @author hmusum
*/
@@ -40,20 +33,19 @@ public class FileReferenceDownloader {
Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()),
new DaemonThreadFactory("filereference downloader"));
private final ConnectionPool connectionPool;
- /* Ongoing downloads */
- private final Downloads downloads = new Downloads();
- /* Status for ongoing and finished downloads */
- private final DownloadStatuses downloadStatuses = new DownloadStatuses();
+ private final Downloads downloads;
private final Duration downloadTimeout;
private final Duration sleepBetweenRetries;
private final Duration rpcTimeout;
- FileReferenceDownloader(File downloadDirectory, File tmpDirectory, ConnectionPool connectionPool, Duration timeout, Duration sleepBetweenRetries) {
+ FileReferenceDownloader(ConnectionPool connectionPool,
+ Downloads downloads,
+ Duration timeout,
+ Duration sleepBetweenRetries) {
this.connectionPool = connectionPool;
+ this.downloads = downloads;
this.downloadTimeout = timeout;
this.sleepBetweenRetries = sleepBetweenRetries;
- // Needed to receive RPC calls receiveFile* from server after asking for files
- new FileReceiver(connectionPool.getSupervisor(), this, downloadDirectory, tmpDirectory);
String timeoutString = System.getenv("VESPA_CONFIGPROXY_FILEDOWNLOAD_RPC_TIMEOUT");
this.rpcTimeout = Duration.ofSeconds(timeoutString == null ? 30 : Integer.parseInt(timeoutString));
}
@@ -90,24 +82,11 @@ public class FileReferenceDownloader {
log.log(Level.FINE, () -> "Will download file reference '" + fileReference.value() + "' with timeout " + downloadTimeout);
downloads.add(fileReferenceDownload);
- downloadStatuses.add(fileReference);
downloadExecutor.submit(() -> startDownload(fileReferenceDownload));
return fileReferenceDownload.future();
}
- void completedDownloading(FileReference fileReference, File file) {
- Optional<FileReferenceDownload> download = downloads.get(fileReference);
- if (download.isPresent()) {
- downloadStatuses.get(fileReference).ifPresent(DownloadStatus::finished);
- downloads.remove(fileReference);
- download.get().future().complete(Optional.of(file));
- } else {
- log.log(Level.FINE, () -> "Received '" + fileReference + "', which was not requested. Can be ignored if happening during upgrades/restarts");
- }
- }
-
void failedDownloading(FileReference fileReference) {
- downloadStatuses.get(fileReference).ifPresent(d -> d.setProgress(0.0));
downloads.remove(fileReference);
}
@@ -139,10 +118,6 @@ public class FileReferenceDownloader {
}
}
- boolean isDownloading(FileReference fileReference) {
- return downloads.get(fileReference).isPresent();
- }
-
private boolean validateResponse(Request request) {
if (request.isError()) {
return false;
@@ -155,31 +130,6 @@ public class FileReferenceDownloader {
return true;
}
- double downloadStatus(String file) {
- double status = 0.0;
- Optional<DownloadStatus> downloadStatus = downloadStatuses.get(new FileReference(file));
- if (downloadStatus.isPresent()) {
- status = downloadStatus.get().progress();
- }
- return status;
- }
-
- void setDownloadStatus(FileReference fileReference, double completeness) {
- Optional<DownloadStatus> downloadStatus = downloadStatuses.get(fileReference);
- if (downloadStatus.isPresent())
- downloadStatus.get().setProgress(completeness);
- else
- downloadStatuses.add(fileReference, completeness);
- }
-
- Map<FileReference, Double> downloadStatus() {
- return downloadStatuses.all().values().stream().collect(Collectors.toMap(DownloadStatus::fileReference, DownloadStatus::progress));
- }
-
- public ConnectionPool connectionPool() {
- return connectionPool;
- }
-
public void close() {
downloadExecutor.shutdown();
try {
@@ -189,84 +139,4 @@ public class FileReferenceDownloader {
}
}
- private static class Downloads {
- private final Map<FileReference, FileReferenceDownload> downloads = new ConcurrentHashMap<>();
-
- void add(FileReferenceDownload fileReferenceDownload) {
- downloads.put(fileReferenceDownload.fileReference(), fileReferenceDownload);
- }
-
- void remove(FileReference fileReference) {
- downloads.remove(fileReference);
- }
-
- Optional<FileReferenceDownload> get(FileReference fileReference) {
- return Optional.ofNullable(downloads.get(fileReference));
- }
- }
-
- private static class DownloadStatus {
- private final FileReference fileReference;
- private double progress; // between 0 and 1
- private final Instant created;
-
- DownloadStatus(FileReference fileReference) {
- this.fileReference = fileReference;
- this.progress = 0.0;
- this.created = Instant.now();
- }
-
- public FileReference fileReference() {
- return fileReference;
- }
-
- public double progress() {
- return progress;
- }
-
- public void setProgress(double progress) {
- this.progress = progress;
- }
-
- public void finished() {
- setProgress(1.0);
- }
-
- public Instant created() {
- return created;
- }
- }
-
- /* Status for ongoing and completed downloads, keeps at most status for 100 last downloads */
- private static class DownloadStatuses {
-
- private static final int maxEntries = 100;
-
- private final Map<FileReference, DownloadStatus> downloadStatus = new ConcurrentHashMap<>();
-
- void add(FileReference fileReference) {
- add(fileReference, 0.0);
- }
-
- void add(FileReference fileReference, double progress) {
- DownloadStatus ds = new DownloadStatus(fileReference);
- ds.setProgress(progress);
- downloadStatus.put(fileReference, ds);
- if (downloadStatus.size() > maxEntries) {
- Map.Entry<FileReference, DownloadStatus> oldest =
- Collections.min(downloadStatus.entrySet(), Comparator.comparing(e -> e.getValue().created));
- downloadStatus.remove(oldest.getKey());
- }
- }
-
- Optional<DownloadStatus> get(FileReference fileReference) {
- return Optional.ofNullable(downloadStatus.get(fileReference));
- }
-
- Map<FileReference, DownloadStatus> all() {
- return Map.copyOf(downloadStatus);
- }
-
- }
-
}
diff --git a/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java b/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java
index 61575b650ce..6169f6fbe55 100644
--- a/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java
+++ b/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileDownloaderTest.java
@@ -1,5 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.filedistribution;
import com.yahoo.config.FileReference;
@@ -42,17 +41,17 @@ public class FileDownloaderTest {
private static final Duration sleepBetweenRetries = Duration.ofMillis(10);
private MockConnection connection;
+ private Downloads downloads;
private FileDownloader fileDownloader;
private File downloadDir;
- private File tempDir;
@Before
public void setup() {
try {
downloadDir = Files.createTempDirectory("filedistribution").toFile();
- tempDir = Files.createTempDirectory("download").toFile();
connection = new MockConnection();
- fileDownloader = new FileDownloader(connection, downloadDir, tempDir, Duration.ofSeconds(1), sleepBetweenRetries);
+ downloads = new Downloads();
+ fileDownloader = new FileDownloader(connection, downloadDir, downloads, Duration.ofSeconds(1), sleepBetweenRetries);
} catch (IOException e) {
e.printStackTrace();
fail(e.getMessage());
@@ -85,7 +84,7 @@ public class FileDownloaderTest {
assertEquals("content", IOUtils.readFile(pathToFile.get()));
// Verify download status when downloaded
- assertDownloadStatus(fileDownloader, fileReference, 1.0);
+ assertDownloadStatus(fileReference, 1.0);
}
{
@@ -98,22 +97,22 @@ public class FileDownloaderTest {
assertFalse(fileReferenceFullPath.getAbsolutePath(), fileDownloader.getFile(fileReference).isPresent());
// Verify download status when unable to download
- assertDownloadStatus(fileDownloader, fileReference, 0.0);
+ assertDownloadStatus(fileReference, 0.0);
}
{
// fileReference does not exist on disk, needs to be downloaded)
- FileReference fileReference = new FileReference("fileReference");
+ FileReference fileReference = new FileReference("baz");
File fileReferenceFullPath = fileReferenceFullPath(downloadDir, fileReference);
assertFalse(fileReferenceFullPath.getAbsolutePath(), fileDownloader.getFile(fileReference).isPresent());
// Verify download status
- assertDownloadStatus(fileDownloader, fileReference, 0.0);
+ assertDownloadStatus(fileReference, 0.0);
// Receives fileReference, should return and make it available to caller
String filename = "abc.jar";
- receiveFile(fileDownloader, fileReference, filename, FileReferenceData.Type.file, "some other content");
+ receiveFile(fileReference, filename, FileReferenceData.Type.file, "some other content");
Optional<File> downloadedFile = fileDownloader.getFile(fileReference);
assertTrue(downloadedFile.isPresent());
@@ -122,7 +121,8 @@ public class FileDownloaderTest {
assertEquals("some other content", IOUtils.readFile(downloadedFile.get()));
// Verify download status when downloaded
- assertDownloadStatus(fileDownloader, fileReference, 1.0);
+ System.out.println(downloads.downloadStatuses());
+ assertDownloadStatus(fileReference, 1.0);
}
{
@@ -133,7 +133,7 @@ public class FileDownloaderTest {
assertFalse(fileReferenceFullPath.getAbsolutePath(), fileDownloader.getFile(fileReference).isPresent());
// Verify download status
- assertDownloadStatus(fileDownloader, fileReference, 0.0);
+ assertDownloadStatus(fileReference, 0.0);
// Receives fileReference, should return and make it available to caller
String filename = "abc.tar.gz";
@@ -147,7 +147,7 @@ public class FileDownloaderTest {
File tarFile = CompressedFileReference.compress(tempPath.toFile(), Arrays.asList(fooFile, barFile), new File(tempPath.toFile(), filename));
byte[] tarredContent = IOUtils.readFileBytes(tarFile);
- receiveFile(fileDownloader, fileReference, filename, FileReferenceData.Type.compressed, tarredContent);
+ receiveFile(fileReference, filename, FileReferenceData.Type.compressed, tarredContent);
Optional<File> downloadedFile = fileDownloader.getFile(fileReference);
assertTrue(downloadedFile.isPresent());
@@ -157,13 +157,13 @@ public class FileDownloaderTest {
assertEquals("bar", IOUtils.readFile(downloadedBar));
// Verify download status when downloaded
- assertDownloadStatus(fileDownloader, fileReference, 1.0);
+ assertDownloadStatus(fileReference, 1.0);
}
}
@Test
public void getFileWhenConnectionError() throws IOException {
- fileDownloader = new FileDownloader(connection, downloadDir, tempDir, Duration.ofSeconds(2), sleepBetweenRetries);
+ fileDownloader = new FileDownloader(connection, downloadDir, downloads, Duration.ofSeconds(2), sleepBetweenRetries);
File downloadDir = fileDownloader.downloadDirectory();
int timesToFail = 2;
@@ -175,12 +175,12 @@ public class FileDownloaderTest {
assertFalse(fileReferenceFullPath.getAbsolutePath(), fileDownloader.getFile(fileReference).isPresent());
// Getting file failed, verify download status and since there was an error is not downloading ATM
- assertDownloadStatus(fileDownloader, fileReference, 0.0);
- assertFalse(fileDownloader.fileReferenceDownloader().isDownloading(fileReference));
+ assertDownloadStatus(fileReference, 0.0);
+ assertFalse(fileDownloader.isDownloading(fileReference));
// Receives fileReference, should return and make it available to caller
String filename = "abc.jar";
- receiveFile(fileDownloader, fileReference, filename, FileReferenceData.Type.file, "some other content");
+ receiveFile(fileReference, filename, FileReferenceData.Type.file, "some other content");
Optional<File> downloadedFile = fileDownloader.getFile(fileReference);
assertTrue(downloadedFile.isPresent());
File downloadedFileFullPath = new File(fileReferenceFullPath, filename);
@@ -188,7 +188,7 @@ public class FileDownloaderTest {
assertEquals("some other content", IOUtils.readFile(downloadedFile.get()));
// Verify download status when downloaded
- assertDownloadStatus(fileDownloader, fileReference, 1.0);
+ assertDownloadStatus(fileReference, 1.0);
assertEquals(timesToFail, responseHandler.failedTimes);
}
@@ -197,27 +197,27 @@ public class FileDownloaderTest {
public void getFileWhenDownloadInProgress() throws IOException, ExecutionException, InterruptedException {
ExecutorService executor = Executors.newFixedThreadPool(2);
String filename = "abc.jar";
- fileDownloader = new FileDownloader(connection, downloadDir, tempDir, Duration.ofSeconds(3), sleepBetweenRetries);
+ fileDownloader = new FileDownloader(connection, downloadDir, downloads, Duration.ofSeconds(3), sleepBetweenRetries);
File downloadDir = fileDownloader.downloadDirectory();
// Delay response so that we can make a second request while downloading the file from the first request
connection.setResponseHandler(new MockConnection.WaitResponseHandler(Duration.ofSeconds(1)));
- FileReference fileReference = new FileReference("fileReference");
+ FileReference fileReference = new FileReference("fileReference123");
File fileReferenceFullPath = fileReferenceFullPath(downloadDir, fileReference);
FileReferenceDownload fileReferenceDownload = new FileReferenceDownload(fileReference);
Future<Future<Optional<File>>> future1 = executor.submit(() -> fileDownloader.getFutureFile(fileReferenceDownload));
do {
Thread.sleep(10);
- } while (! fileDownloader.fileReferenceDownloader().isDownloading(fileReference));
- assertTrue(fileDownloader.fileReferenceDownloader().isDownloading(fileReference));
+ } while (! fileDownloader.isDownloading(fileReference));
+ assertTrue(fileDownloader.isDownloading(fileReference));
// Request file while download is in progress
Future<Future<Optional<File>>> future2 = executor.submit(() -> fileDownloader.getFutureFile(fileReferenceDownload));
// Receive file, will complete downloading and futures
- receiveFile(fileDownloader, fileReference, filename, FileReferenceData.Type.file, "some other content");
+ receiveFile(fileReference, filename, FileReferenceData.Type.file, "some other content");
// Check that we got file correctly with first request
Optional<File> downloadedFile = future1.get().get();
@@ -233,30 +233,30 @@ public class FileDownloaderTest {
}
@Test
- public void setFilesToDownload() throws IOException {
+ public void setFilesToDownload() {
Duration timeout = Duration.ofMillis(200);
MockConnection connectionPool = new MockConnection();
connectionPool.setResponseHandler(new MockConnection.WaitResponseHandler(timeout.plus(Duration.ofMillis(1000))));
- FileDownloader fileDownloader = new FileDownloader(connectionPool, downloadDir, tempDir, timeout, sleepBetweenRetries);
- FileReference foo = new FileReference("foo");
+ FileDownloader fileDownloader = new FileDownloader(connectionPool, downloadDir, downloads, timeout, sleepBetweenRetries);
+ FileReference xyzzy = new FileReference("xyzzy");
// Should download since we do not have the file on disk
- fileDownloader.downloadIfNeeded(new FileReferenceDownload(foo));
- assertTrue(fileDownloader.fileReferenceDownloader().isDownloading(foo));
- assertFalse(fileDownloader.getFile(foo).isPresent());
+ fileDownloader.downloadIfNeeded(new FileReferenceDownload(xyzzy));
+ assertTrue(fileDownloader.isDownloading(xyzzy));
+ assertFalse(fileDownloader.getFile(xyzzy).isPresent());
// Receive files to simulate download
- receiveFile();
+ receiveFile(xyzzy, "xyzzy.jar", FileReferenceData.Type.file, "content");
// Should not download, since file has already been downloaded
- fileDownloader.downloadIfNeeded(new FileReferenceDownload(foo));
+ fileDownloader.downloadIfNeeded(new FileReferenceDownload(xyzzy));
// and file should be available
- assertTrue(fileDownloader.getFile(foo).isPresent());
+ assertTrue(fileDownloader.getFile(xyzzy).isPresent());
}
@Test
public void receiveFile() throws IOException {
- FileReference foo = new FileReference("foo");
+ FileReference foobar = new FileReference("foobar");
String filename = "foo.jar";
- receiveFile(fileDownloader, foo, filename, FileReferenceData.Type.file, "content");
- File downloadedFile = new File(fileReferenceFullPath(downloadDir, foo), filename);
+ receiveFile(foobar, filename, FileReferenceData.Type.file, "content");
+ File downloadedFile = new File(fileReferenceFullPath(downloadDir, foobar), filename);
assertEquals("content", IOUtils.readFile(downloadedFile));
}
@@ -271,24 +271,26 @@ public class FileDownloaderTest {
return new File(dir, fileReference.value());
}
- private void assertDownloadStatus(FileDownloader fileDownloader, FileReference fileReference, double expectedDownloadStatus) {
- double downloadStatus = fileDownloader.downloadStatus(fileReference);
- assertEquals(expectedDownloadStatus, downloadStatus, 0.0001);
+ private void assertDownloadStatus(FileReference fileReference, double expectedDownloadStatus) {
+ double downloadStatus = downloads.downloadStatus(fileReference);
+ assertEquals("Download statuses: " + downloads.downloadStatuses().toString(),
+ expectedDownloadStatus,
+ downloadStatus,
+ 0.0001);
}
- private void receiveFile(FileDownloader fileDownloader, FileReference fileReference, String filename,
- FileReferenceData.Type type, String content) {
- receiveFile(fileDownloader, fileReference, filename, type, Utf8.toBytes(content));
+ private void receiveFile(FileReference fileReference, String filename, FileReferenceData.Type type, String content) {
+ receiveFile(fileReference, filename, type, Utf8.toBytes(content));
}
- private void receiveFile(FileDownloader fileDownloader, FileReference fileReference, String filename,
+ private void receiveFile(FileReference fileReference, String filename,
FileReferenceData.Type type, byte[] content) {
XXHash64 hasher = XXHashFactory.fastestInstance().hash64();
FileReceiver.Session session =
- new FileReceiver.Session(downloadDir, tempDir, 1, fileReference, type, filename, content.length);
+ new FileReceiver.Session(downloadDir, 1, fileReference, type, filename, content.length);
session.addPart(0, content);
File file = session.close(hasher.hash(ByteBuffer.wrap(content), 0));
- fileDownloader.fileReferenceDownloader().completedDownloading(fileReference, file);
+ downloads.completedDownloading(fileReference, file);
}
private static class MockConnection implements ConnectionPool, com.yahoo.vespa.config.Connection {
diff --git a/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileReceiverTest.java b/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileReceiverTest.java
index a9ddff655e3..69d4344d246 100644
--- a/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileReceiverTest.java
+++ b/filedistribution/src/test/java/com/yahoo/vespa/filedistribution/FileReceiverTest.java
@@ -21,7 +21,6 @@ import java.nio.file.Files;
public class FileReceiverTest {
private File root;
- private File tempDir;
private final XXHash64 hasher = XXHashFactory.fastestInstance().hash64();
@Rule
@@ -30,7 +29,6 @@ public class FileReceiverTest {
@Before
public void setup() throws IOException {
root = temporaryFolder.newFolder("root");
- tempDir = temporaryFolder.newFolder("tmp");
}
@Test
@@ -70,7 +68,7 @@ public class FileReceiverTest {
private void transferPartsAndAssert(FileReference ref, String fileName, String all, int numParts) throws IOException {
byte [] allContent = Utf8.toBytes(all);
- FileReceiver.Session session = new FileReceiver.Session(root, tempDir, 1, ref,
+ FileReceiver.Session session = new FileReceiver.Session(root, 1, ref,
FileReferenceData.Type.file, fileName, allContent.length);
int partSize = (allContent.length+(numParts-1))/numParts;
ByteBuffer bb = ByteBuffer.wrap(allContent);
@@ -91,7 +89,7 @@ public class FileReceiverTest {
private void transferCompressedData(FileReference ref, String fileName, byte[] data) {
FileReceiver.Session session =
- new FileReceiver.Session(root, tempDir, 1, ref, FileReferenceData.Type.compressed, fileName, data.length);
+ new FileReceiver.Session(root, 1, ref, FileReferenceData.Type.compressed, fileName, data.length);
session.addPart(0, data);
session.close(hasher.hash(ByteBuffer.wrap(data), 0));
}
diff --git a/flags/pom.xml b/flags/pom.xml
index 4f1bdcb61e3..3774ab3bf5f 100644
--- a/flags/pom.xml
+++ b/flags/pom.xml
@@ -93,6 +93,11 @@
<artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>org.junit.vintage</groupId>
+ <artifactId>junit-vintage-engine</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
<plugins>
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index c1750c73c2b..4885f5c9ae5 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -41,15 +41,8 @@ import static com.yahoo.vespa.flags.FetchVector.Dimension.ZONE_ID;
* @author hakonhall
*/
public class Flags {
- private static volatile TreeMap<FlagId, FlagDefinition> flags = new TreeMap<>();
- public static final UnboundStringFlag ALLOCATE_OS_REQUIREMENT = defineStringFlag(
- "allocate-os-requirement", "any",
- List.of("hakonhall"), "2021-01-26", "2021-07-26",
- "Allocations of new nodes are limited to the given host OS. Must be one of 'rhel7', " +
- "'rhel8', or 'any'",
- "Takes effect on next (re)deployment.",
- APPLICATION_ID);
+ private static volatile TreeMap<FlagId, FlagDefinition> flags = new TreeMap<>();
public static final UnboundDoubleFlag DEFAULT_TERM_WISE_LIMIT = defineDoubleFlag(
"default-term-wise-limit", 1.0,
@@ -146,14 +139,14 @@ public class Flags {
public static final UnboundBooleanFlag ENCRYPT_DISK = defineFeatureFlag(
"encrypt-disk", false,
- List.of("hakonhall"), "2021-05-05", "2021-06-05",
+ List.of("hakonhall"), "2021-05-05", "2021-08-05",
"Allow migrating an unencrypted data partition to being encrypted.",
"Takes effect on next host-admin tick.");
public static final UnboundBooleanFlag ENCRYPT_DIRTY_DISK = defineFeatureFlag(
"encrypt-dirty-disk", false,
- List.of("hakonhall"), "2021-05-14", "2021-06-05",
- "Allow migrating an unencrypted data partition to being encrypted when provisioned or dirty.",
+ List.of("hakonhall"), "2021-05-14", "2021-08-05",
+ "Allow migrating an unencrypted data partition to being encrypted when (de)provisioned.",
"Takes effect on next host-admin tick.");
public static final UnboundBooleanFlag ENABLE_FEED_BLOCK_IN_DISTRIBUTOR = defineFeatureFlag(
@@ -165,7 +158,7 @@ public class Flags {
public static final UnboundIntFlag METRICS_PROXY_MAX_HEAP_SIZE_IN_MB = defineIntFlag(
"metrics-proxy-max-heap-size-in-mb", 256,
- List.of("hmusum"), "2021-03-01", "2021-06-15",
+ List.of("hmusum"), "2021-03-01", "2021-07-01",
"JVM max heap size for metrics proxy in Mb",
"Takes effect when restarting metrics proxy",
CLUSTER_TYPE);
@@ -211,6 +204,20 @@ public class Flags {
"Takes effect after distributor restart",
ZONE_ID, APPLICATION_ID);
+ public static final UnboundIntFlag MAX_CONCURRENT_MERGES_PER_NODE = defineIntFlag(
+ "max-concurrent-merges-per-node", 16,
+ List.of("balder", "vekterli"), "2021-06-06", "2021-08-01",
+ "Specifies max concurrent merges per content node.",
+ "Takes effect at redeploy",
+ ZONE_ID, APPLICATION_ID);
+
+ public static final UnboundIntFlag MAX_MERGE_QUEUE_SIZE = defineIntFlag(
+ "max-merge-queue-size", 1024,
+ List.of("balder", "vekterli"), "2021-06-06", "2021-08-01",
+ "Specifies max size of merge queue.",
+ "Takes effect at redeploy",
+ ZONE_ID, APPLICATION_ID);
+
public static final UnboundBooleanFlag USE_EXTERNAL_RANK_EXPRESSION = defineFeatureFlag(
"use-external-rank-expression", false,
List.of("baldersheim"), "2021-05-24", "2021-07-01",
@@ -225,6 +232,13 @@ public class Flags {
"Takes effect on next internal redeployment",
APPLICATION_ID);
+ public static final UnboundIntFlag LARGE_RANK_EXPRESSION_LIMIT = defineIntFlag(
+ "large-rank-expression-limit", 0x10000,
+ List.of("baldersheim"), "2021-06-09", "2021-07-01",
+ "Limit for size of rank expressions distributed by filedistribution",
+ "Takes effect on next internal redeployment",
+ APPLICATION_ID);
+
public static final UnboundBooleanFlag ENABLE_ROUTING_CORE_DUMP = defineFeatureFlag(
"enable-routing-core-dumps", false,
List.of("tokle"), "2021-04-16", "2021-08-01",
@@ -239,19 +253,39 @@ public class Flags {
"Takes effect immediately",
APPLICATION_ID);
- public static final UnboundBooleanFlag VESPA_APP_DOMAIN_IN_CERTIFICATE = defineFeatureFlag(
- "new-domain-in-certificate", false,
- List.of("mpolden"), "2021-05-25", "2021-09-01",
- "Whether to include the vespa-app.cloud names in certificate requests",
- "Takes effect on next deployment through controller",
- APPLICATION_ID);
-
public static final UnboundIntFlag MAX_ENCRYPTING_HOSTS = defineIntFlag(
"max-encrypting-hosts", 0,
List.of("mpolden", "hakonhall"), "2021-05-27", "2021-10-01",
"The maximum number of hosts allowed to encrypt their disk concurrently",
"Takes effect on next run of HostEncrypter, but any currently encrypting hosts will not be cancelled when reducing the limit");
+ public static final UnboundBooleanFlag REQUIRE_CONNECTIVITY_CHECK = defineFeatureFlag(
+ "require-connectivity-check", false,
+ List.of("arnej"), "2021-06-03", "2021-09-01",
+ "Require that config-sentinel connectivity check passes with good quality before starting services",
+ "Takes effect on next restart",
+ ZONE_ID, APPLICATION_ID);
+
+ public static final UnboundBooleanFlag THROW_EXCEPTION_IF_RESOURCE_LIMITS_SPECIFIED = defineFeatureFlag(
+ "throw-exception-if-resource-limits-specified", false,
+ List.of("hmusum"), "2021-06-07", "2021-08-07",
+ "Whether to throw an exception in hosted Vespa if the application specifies resource limits in services.xml",
+ "Takes effect on next deployment through controller",
+ APPLICATION_ID);
+
+ public static final UnboundBooleanFlag MOVE_SEARCH_DEFINITIONS_TO_SCHEMAS_DIR = defineFeatureFlag(
+ "move-search-definitions-to-schemas-dir", false,
+ List.of("hmusum"), "2021-06-09", "2021-08-09",
+ "Whether to move files in searchdefinitions/ to schemas/ when deploying an application",
+ "Takes effect on next deployment",
+ ZONE_ID, APPLICATION_ID);
+
+ public static final UnboundBooleanFlag LOAD_LOCAL_SESSIONS_WHEN_BOOTSTRAPPING = defineFeatureFlag(
+ "load-local-sessions-when-bootstrapping", true,
+ List.of("hmusum"), "2021-06-15", "2021-07-15",
+ "Whether to load local sessions when bootstrapping config server",
+ "Takes effect on restart of config server");
+
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners,
String createdAt, String expiresAt, String description,
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java b/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java
index 92b7b3bc04d..ec49c1b0eff 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/JsonNodeRawFlag.java
@@ -6,6 +6,7 @@ import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.util.Collection;
+import java.util.concurrent.atomic.AtomicReference;
import static com.yahoo.yolean.Exceptions.uncheck;
@@ -15,7 +16,8 @@ import static com.yahoo.yolean.Exceptions.uncheck;
* @author hakonhall
*/
public class JsonNodeRawFlag implements RawFlag {
- private static final ObjectMapper mapper = new ObjectMapper();
+
+ private static final AtomicReference<ObjectMapper> mapper = new AtomicReference<>();
private final JsonNode jsonNode;
@@ -24,7 +26,7 @@ public class JsonNodeRawFlag implements RawFlag {
}
public static JsonNodeRawFlag fromJson(String json) {
- return new JsonNodeRawFlag(uncheck(() -> mapper.readTree(json)));
+ return new JsonNodeRawFlag(uncheck(() -> objectMapper().readTree(json)));
}
public static JsonNodeRawFlag fromJsonNode(JsonNode jsonNode) {
@@ -32,20 +34,20 @@ public class JsonNodeRawFlag implements RawFlag {
}
public static <T> JsonNodeRawFlag fromJacksonClass(T value) {
- return new JsonNodeRawFlag(uncheck(() -> mapper.valueToTree(value)));
+ return new JsonNodeRawFlag(uncheck(() -> objectMapper().valueToTree(value)));
}
public <T> T toJacksonClass(Class<T> jacksonClass) {
- return uncheck(() -> mapper.treeToValue(jsonNode, jacksonClass));
+ return uncheck(() -> objectMapper().treeToValue(jsonNode, jacksonClass));
}
public <T> T toJacksonClass(JavaType jacksonClass) {
- return uncheck(() -> mapper.readValue(jsonNode.toString(), jacksonClass));
+ return uncheck(() -> objectMapper().readValue(jsonNode.toString(), jacksonClass));
}
@SuppressWarnings("rawtypes")
public static JavaType constructCollectionType(Class<? extends Collection> collectionClass, Class<?> elementClass) {
- return mapper.getTypeFactory().constructCollectionType(collectionClass, elementClass);
+ return objectMapper().getTypeFactory().constructCollectionType(collectionClass, elementClass);
}
@Override
@@ -57,4 +59,14 @@ public class JsonNodeRawFlag implements RawFlag {
public String asJson() {
return jsonNode.toString();
}
+
+ /** Initialize object mapper lazily */
+ private static ObjectMapper objectMapper() {
+ // ObjectMapper is a heavy-weight object so we construct it only when we need it
+ return mapper.updateAndGet((objectMapper) -> {
+ if (objectMapper != null) return objectMapper;
+ return new ObjectMapper();
+ });
+ }
+
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
index 52b09a281d5..1b7f0c034a3 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
@@ -163,6 +163,13 @@ public class PermanentFlags {
"Takes effect immediately, but any current excess rebuilds will not be cancelled"
);
+ public static final UnboundListFlag<String> EXTENDED_TRIAL_TENANTS = defineListFlag(
+ "extended-trial-tenants", List.of(), String.class,
+ "Tenants that will not be expired from their trial plan",
+ "Takes effect immediately, used by the CloudTrialExpirer maintainer",
+ TENANT_ID
+ );
+
private PermanentFlags() {}
private static UnboundBooleanFlag defineFeatureFlag(
diff --git a/fnet/src/vespa/fnet/connection.cpp b/fnet/src/vespa/fnet/connection.cpp
index 47d6a1e429a..4315e76f7ef 100644
--- a/fnet/src/vespa/fnet/connection.cpp
+++ b/fnet/src/vespa/fnet/connection.cpp
@@ -491,10 +491,10 @@ FNET_Connection::FNET_Connection(FNET_TransportThread *owner,
_packetCHID(0),
_writeWork(0),
_currentID(1), // <-- NB
- _input(FNET_READ_SIZE * 2),
+ _input(0),
_queue(256),
_myQueue(256),
- _output(FNET_WRITE_SIZE * 2),
+ _output(0),
_channels(),
_callbackTarget(nullptr),
_cleanup(nullptr)
@@ -525,10 +525,10 @@ FNET_Connection::FNET_Connection(FNET_TransportThread *owner,
_packetCHID(0),
_writeWork(0),
_currentID(0),
- _input(FNET_READ_SIZE * 2),
+ _input(0),
_queue(256),
_myQueue(256),
- _output(FNET_WRITE_SIZE * 2),
+ _output(0),
_channels(),
_callbackTarget(nullptr),
_cleanup(nullptr)
diff --git a/fnet/src/vespa/fnet/connection.h b/fnet/src/vespa/fnet/connection.h
index 3da9b58f928..532bd7c6638 100644
--- a/fnet/src/vespa/fnet/connection.h
+++ b/fnet/src/vespa/fnet/connection.h
@@ -11,6 +11,7 @@
#include <vespa/vespalib/net/socket_handle.h>
#include <vespa/vespalib/net/async_resolver.h>
#include <vespa/vespalib/net/crypto_socket.h>
+#include <vespa/vespalib/util/size_literals.h>
#include <atomic>
class FNET_IPacketStreamer;
@@ -60,9 +61,9 @@ public:
};
enum {
- FNET_READ_SIZE = 32768,
+ FNET_READ_SIZE = 16_Ki,
FNET_READ_REDO = 10,
- FNET_WRITE_SIZE = 32768,
+ FNET_WRITE_SIZE = 16_Ki,
FNET_WRITE_REDO = 10
};
diff --git a/fnet/src/vespa/fnet/frt/supervisor.cpp b/fnet/src/vespa/fnet/frt/supervisor.cpp
index 388d754ece4..d992567f776 100644
--- a/fnet/src/vespa/fnet/frt/supervisor.cpp
+++ b/fnet/src/vespa/fnet/frt/supervisor.cpp
@@ -430,4 +430,8 @@ StandaloneFRT::~StandaloneFRT()
_transport->ShutDown(true);
}
+void StandaloneFRT::shutdown() {
+ _transport->ShutDown(true);
+}
+
}
diff --git a/fnet/src/vespa/fnet/frt/supervisor.h b/fnet/src/vespa/fnet/frt/supervisor.h
index 1332bbe3ddb..2743cafae26 100644
--- a/fnet/src/vespa/fnet/frt/supervisor.h
+++ b/fnet/src/vespa/fnet/frt/supervisor.h
@@ -133,6 +133,7 @@ public:
explicit StandaloneFRT(std::shared_ptr<vespalib::CryptoEngine> crypto);
~StandaloneFRT();
FRT_Supervisor & supervisor() { return *_supervisor; }
+ void shutdown();
private:
std::unique_ptr<FastOS_ThreadPool> _threadPool;
std::unique_ptr<FNET_Transport> _transport;
diff --git a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java
index 8b6ef83f05e..81a5305a778 100644
--- a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java
+++ b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotator.java
@@ -115,9 +115,7 @@ public class LinguisticsAnnotator {
}
return;
}
- if ( ! token.isIndexable()) {
- return;
- }
+ if ( ! token.isIndexable()) return;
}
String orig = token.getOrig();
int pos = (int)token.getOffset();
@@ -138,9 +136,6 @@ public class LinguisticsAnnotator {
String lowercasedTerm = lowercasedOrig;
String term = token.getTokenString();
if (term != null) {
- term = tokenizer.getReplacementTerm(term);
- }
- if (term != null) {
lowercasedTerm = toLowerCase(term);
}
if (! lowercasedOrig.equals(lowercasedTerm)) {
@@ -155,12 +150,7 @@ public class LinguisticsAnnotator {
}
} else {
String term = token.getTokenString();
- if (term != null) {
- term = tokenizer.getReplacementTerm(term);
- }
- if (term == null || term.trim().isEmpty()) {
- return;
- }
+ if (term == null || term.trim().isEmpty()) return;
if (termOccurrences.termCountBelowLimit(term)) {
parent.span(pos, len).annotate(lowerCaseTermAnnotation(term, token.getOrig()));
}
diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java
index afbcf597a46..5f436720990 100644
--- a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java
+++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/linguistics/LinguisticsAnnotatorTestCase.java
@@ -19,6 +19,7 @@ import org.junit.Test;
import org.mockito.Mockito;
import java.util.*;
+import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -30,12 +31,6 @@ public class LinguisticsAnnotatorTestCase {
private static final AnnotatorConfig CONFIG = new AnnotatorConfig();
- // --------------------------------------------------------------------------------
- //
- // Tests
- //
- // --------------------------------------------------------------------------------
-
@Test
public void requireThatAnnotateFailsWithZeroTokens() {
assertAnnotations(null, "foo");
@@ -145,7 +140,7 @@ public class LinguisticsAnnotatorTestCase {
continue;
}
assertAnnotations(expected, "foo",
- newLinguistics(Arrays.asList(newToken("foo", "foo", type, specialToken)),
+ newLinguistics(List.of(newToken("foo", "foo", type, specialToken)),
Collections.singletonMap("foo", "bar")));
}
}
@@ -159,7 +154,7 @@ public class LinguisticsAnnotatorTestCase {
StringFieldValue val = new StringFieldValue("foo");
val.setSpanTree(spanTree);
- Linguistics linguistics = newLinguistics(Arrays.asList(newToken("foo", "bar", TokenType.ALPHABETIC, false)),
+ Linguistics linguistics = newLinguistics(List.of(newToken("foo", "bar", TokenType.ALPHABETIC, false)),
Collections.<String, String>emptyMap());
new LinguisticsAnnotator(linguistics, CONFIG).annotate(val);
@@ -253,11 +248,15 @@ public class LinguisticsAnnotatorTestCase {
private static class MyTokenizer implements Tokenizer {
final List<Token> tokens;
- final Map<String, String> replacementTerms;
public MyTokenizer(List<? extends Token> tokens, Map<String, String> replacementTerms) {
- this.tokens = new ArrayList<>(tokens);
- this.replacementTerms = replacementTerms;
+ this.tokens = tokens.stream().map(token -> replace(token, replacementTerms)).collect(Collectors.toList());
+ }
+
+ private Token replace(Token token, Map<String, String> replacementTerms) {
+ var simpleToken = (SimpleToken)token;
+ simpleToken.setTokenString(replacementTerms.getOrDefault(token.getTokenString(), token.getTokenString()));
+ return simpleToken;
}
@Override
@@ -265,10 +264,6 @@ public class LinguisticsAnnotatorTestCase {
return tokens;
}
- @Override
- public String getReplacementTerm(String term) {
- String replacement = replacementTerms.get(term);
- return replacement != null ? replacement : term;
- }
}
+
}
diff --git a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilter.java b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilter.java
index dd4b62ee494..6136bcdfd3a 100644
--- a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilter.java
+++ b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilter.java
@@ -3,12 +3,14 @@ package com.yahoo.jdisc.http.filter.security.athenz;
import com.google.inject.Inject;
import com.yahoo.jdisc.Metric;
+import com.yahoo.jdisc.http.HttpRequest;
import com.yahoo.jdisc.http.filter.DiscFilterRequest;
import com.yahoo.jdisc.http.filter.security.athenz.RequestResourceMapper.ResourceNameAndAction;
import com.yahoo.jdisc.http.filter.security.base.JsonSecurityRequestFilterBase;
import com.yahoo.vespa.athenz.api.AthenzAccessToken;
import com.yahoo.vespa.athenz.api.AthenzIdentity;
import com.yahoo.vespa.athenz.api.AthenzPrincipal;
+import com.yahoo.vespa.athenz.api.AthenzRole;
import com.yahoo.vespa.athenz.api.ZToken;
import com.yahoo.vespa.athenz.tls.AthenzX509CertificateUtils;
import com.yahoo.vespa.athenz.utils.AthenzIdentities;
@@ -20,6 +22,7 @@ import java.security.cert.X509Certificate;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.logging.Level;
@@ -56,16 +59,20 @@ public class AthenzAuthorizationFilter extends JsonSecurityRequestFilterBase {
private final RequestResourceMapper requestResourceMapper;
private final Metric metric;
private final Set<AthenzIdentity> allowedProxyIdentities;
+ private final Optional<AthenzRole> readRole;
+ private final Optional<AthenzRole> writeRole;
@Inject
public AthenzAuthorizationFilter(AthenzAuthorizationFilterConfig config, RequestResourceMapper resourceMapper, Metric metric) {
- this(config, resourceMapper, new DefaultZpe(), metric);
+ this(config, resourceMapper, new DefaultZpe(), metric, null, null);
}
public AthenzAuthorizationFilter(AthenzAuthorizationFilterConfig config,
RequestResourceMapper resourceMapper,
Zpe zpe,
- Metric metric) {
+ Metric metric,
+ AthenzRole readRole,
+ AthenzRole writeRole) {
this.roleTokenHeaderName = config.roleTokenHeaderName();
List<EnabledCredentials.Enum> enabledCredentials = config.enabledCredentials();
this.enabledCredentials = enabledCredentials.isEmpty()
@@ -77,6 +84,8 @@ public class AthenzAuthorizationFilter extends JsonSecurityRequestFilterBase {
this.allowedProxyIdentities = config.allowedProxyIdentities().stream()
.map(AthenzIdentities::from)
.collect(Collectors.toSet());
+ this.readRole = Optional.ofNullable(readRole);
+ this.writeRole = Optional.ofNullable(writeRole);
}
@Override
@@ -86,7 +95,7 @@ public class AthenzAuthorizationFilter extends JsonSecurityRequestFilterBase {
requestResourceMapper.getResourceNameAndAction(request);
log.log(Level.FINE, () -> String.format("Resource mapping for '%s': %s", request, resourceMapping));
if (resourceMapping.isEmpty()) {
- incrementAcceptedMetrics(request, false);
+ incrementAcceptedMetrics(request, false, Optional.empty());
return Optional.empty();
}
Result result = checkAccessAllowed(request, resourceMapping.get());
@@ -94,15 +103,15 @@ public class AthenzAuthorizationFilter extends JsonSecurityRequestFilterBase {
setAttribute(request, RESULT_ATTRIBUTE, resultType.name());
if (resultType == AuthorizationResult.Type.ALLOW) {
populateRequestWithResult(request, result);
- incrementAcceptedMetrics(request, true);
+ incrementAcceptedMetrics(request, true, Optional.of(result));
return Optional.empty();
}
log.log(Level.FINE, () -> String.format("Forbidden (403) for '%s': %s", request, resultType.name()));
- incrementRejectedMetrics(request, FORBIDDEN, resultType.name());
+ incrementRejectedMetrics(request, FORBIDDEN, resultType.name(), Optional.of(result));
return Optional.of(new ErrorResponse(FORBIDDEN, "Access forbidden: " + resultType.getDescription()));
} catch (IllegalArgumentException e) {
log.log(Level.FINE, () -> String.format("Unauthorized (401) for '%s': %s", request, e.getMessage()));
- incrementRejectedMetrics(request, UNAUTHORIZED, "Unauthorized");
+ incrementRejectedMetrics(request, UNAUTHORIZED, "Unauthorized", Optional.empty());
return Optional.of(new ErrorResponse(UNAUTHORIZED, e.getMessage()));
}
}
@@ -130,33 +139,53 @@ public class AthenzAuthorizationFilter extends JsonSecurityRequestFilterBase {
X509Certificate identityCertificate = getClientCertificate(request).get();
AthenzIdentity peerIdentity = AthenzIdentities.from(identityCertificate);
if (allowedProxyIdentities.contains(peerIdentity)) {
- return checkAccessWithProxiedAccessToken(resourceAndAction, accessToken, identityCertificate);
+ return checkAccessWithProxiedAccessToken(request, resourceAndAction, accessToken, identityCertificate);
} else {
var zpeResult = zpe.checkAccessAllowed(
accessToken, identityCertificate, resourceAndAction.resourceName(), resourceAndAction.action());
- return new Result(ACCESS_TOKEN, peerIdentity, zpeResult);
+ return getResult(ACCESS_TOKEN, peerIdentity, zpeResult, request, resourceAndAction, mapToRequestPrivileges(accessToken.roles()));
}
}
- private Result checkAccessWithProxiedAccessToken(ResourceNameAndAction resourceAndAction, AthenzAccessToken accessToken, X509Certificate identityCertificate) {
+ private Result getResult(EnabledCredentials.Enum credentialType, AthenzIdentity identity, AuthorizationResult zpeResult, DiscFilterRequest request, ResourceNameAndAction resourceAndAction, List<String> privileges) {
+ String currentAction = resourceAndAction.action();
+ String futureAction = resourceAndAction.futureAction();
+ return new Result(credentialType, identity, zpeResult, privileges, currentAction, futureAction);
+ }
+
+ private List<String> mapToRequestPrivileges(List<AthenzRole> roles) {
+ return roles.stream()
+ .map(this::rolePrivilege)
+ .filter(Objects::nonNull)
+ .collect(Collectors.toList());
+ }
+
+ private String rolePrivilege(AthenzRole role) {
+ if (readRole.stream().anyMatch(role::equals)) return "read";
+ if (writeRole.stream().anyMatch(role::equals)) return "write";
+ return null;
+ }
+
+ private Result checkAccessWithProxiedAccessToken(DiscFilterRequest request, ResourceNameAndAction resourceAndAction, AthenzAccessToken accessToken, X509Certificate identityCertificate) {
AthenzIdentity proxyIdentity = AthenzIdentities.from(identityCertificate);
log.log(Level.FINE,
() -> String.format("Checking proxied access token. Proxy identity: '%s'. Allowed identities: %s", proxyIdentity, allowedProxyIdentities));
var zpeResult = zpe.checkAccessAllowed(accessToken, resourceAndAction.resourceName(), resourceAndAction.action());
- return new Result(ACCESS_TOKEN, AthenzIdentities.from(identityCertificate), zpeResult);
+ return getResult(ACCESS_TOKEN, AthenzIdentities.from(identityCertificate), zpeResult, request, resourceAndAction, mapToRequestPrivileges(accessToken.roles()));
}
private Result checkAccessWithRoleCertificate(DiscFilterRequest request, ResourceNameAndAction resourceAndAction) {
X509Certificate roleCertificate = getClientCertificate(request).get();
var zpeResult = zpe.checkAccessAllowed(roleCertificate, resourceAndAction.resourceName(), resourceAndAction.action());
AthenzIdentity identity = AthenzX509CertificateUtils.getIdentityFromRoleCertificate(roleCertificate);
- return new Result(ROLE_CERTIFICATE, identity, zpeResult);
+ AthenzX509CertificateUtils.getRolesFromRoleCertificate(roleCertificate).roleName();
+ return getResult(ROLE_CERTIFICATE, identity, zpeResult, request, resourceAndAction, mapToRequestPrivileges(List.of(AthenzX509CertificateUtils.getRolesFromRoleCertificate(roleCertificate))));
}
private Result checkAccessWithRoleToken(DiscFilterRequest request, ResourceNameAndAction resourceAndAction) {
ZToken roleToken = getRoleToken(request);
var zpeResult = zpe.checkAccessAllowed(roleToken, resourceAndAction.resourceName(), resourceAndAction.action());
- return new Result(ROLE_TOKEN, roleToken.getIdentity(), zpeResult);
+ return getResult(ROLE_TOKEN, roleToken.getIdentity(), zpeResult, request, resourceAndAction, mapToRequestPrivileges(roleToken.getRoles()));
}
private static boolean isAccessTokenPresent(DiscFilterRequest request) {
@@ -246,20 +275,30 @@ public class AthenzAuthorizationFilter extends JsonSecurityRequestFilterBase {
request.setAttribute(name, value);
}
- private void incrementAcceptedMetrics(DiscFilterRequest request, boolean authzRequired) {
+ private void incrementAcceptedMetrics(DiscFilterRequest request, boolean authzRequired, Optional<Result> result) {
String hostHeader = request.getHeader("Host");
Metric.Context context = metric.createContext(Map.of(
"endpoint", hostHeader != null ? hostHeader : "",
- "authz-required", Boolean.toString(authzRequired)));
+ "authz-required", Boolean.toString(authzRequired),
+ "httpMethod", HttpRequest.Method.valueOf(request.getMethod()).name(),
+ "requestPrivileges", result.map(r -> String.join(",", r.requestPrivileges)).orElse(""),
+ "currentRequestMapping", result.map(r -> r.currentAction).orElse(""),
+ "futureRequestMapping", result.map(r -> r.futureAction).orElse("")
+ ));
metric.add(ACCEPTED_METRIC_NAME, 1L, context);
}
- private void incrementRejectedMetrics(DiscFilterRequest request, int statusCode, String zpeCode) {
+ private void incrementRejectedMetrics(DiscFilterRequest request, int statusCode, String zpeCode, Optional<Result> result) {
String hostHeader = request.getHeader("Host");
Metric.Context context = metric.createContext(Map.of(
"endpoint", hostHeader != null ? hostHeader : "",
"status-code", Integer.toString(statusCode),
- "zpe-status", zpeCode));
+ "zpe-status", zpeCode,
+ "httpMethod", HttpRequest.Method.valueOf(request.getMethod()),
+ "requestPrivileges", result.map(r -> String.join(",", r.requestPrivileges)).orElse(""),
+ "currentRequestMapping", result.map(r -> r.currentAction).orElse(""),
+ "futureRequestMapping", result.map(r -> r.futureAction).orElse("")
+ ));
metric.add(REJECTED_METRIC_NAME, 1L, context);
}
@@ -267,11 +306,17 @@ public class AthenzAuthorizationFilter extends JsonSecurityRequestFilterBase {
final EnabledCredentials.Enum credentialType;
final AthenzIdentity identity;
final AuthorizationResult zpeResult;
+ final List<String> requestPrivileges;
+ final String currentAction;
+ final String futureAction;
- Result(EnabledCredentials.Enum credentialType, AthenzIdentity identity, AuthorizationResult zpeResult) {
+ public Result(EnabledCredentials.Enum credentialType, AthenzIdentity identity, AuthorizationResult zpeResult, List<String> requestPrivileges, String currentAction, String futureAction) {
this.credentialType = credentialType;
this.identity = identity;
this.zpeResult = zpeResult;
+ this.requestPrivileges = requestPrivileges;
+ this.currentAction = currentAction;
+ this.futureAction = futureAction;
}
}
}
diff --git a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/RequestResourceMapper.java b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/RequestResourceMapper.java
index 56c52bd71c4..c962e973959 100644
--- a/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/RequestResourceMapper.java
+++ b/jdisc-security-filters/src/main/java/com/yahoo/jdisc/http/filter/security/athenz/RequestResourceMapper.java
@@ -28,10 +28,15 @@ public interface RequestResourceMapper {
class ResourceNameAndAction {
private final AthenzResourceName resourceName;
private final String action;
+ private final String futureAction;
public ResourceNameAndAction(AthenzResourceName resourceName, String action) {
+ this(resourceName, action, action);
+ }
+ public ResourceNameAndAction(AthenzResourceName resourceName, String action, String futureAction) {
this.resourceName = resourceName;
this.action = action;
+ this.futureAction = futureAction;
}
public AthenzResourceName resourceName() {
@@ -42,6 +47,14 @@ public interface RequestResourceMapper {
return action;
}
+ public ResourceNameAndAction withFutureAction(String futureAction) {
+ return new ResourceNameAndAction(resourceName, action, futureAction);
+ }
+
+ public String futureAction() {
+ return futureAction;
+ }
+
@Override
public String toString() {
return "ResourceNameAndAction{" +
diff --git a/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilterTest.java b/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilterTest.java
index bfe02d1f279..137e4653670 100644
--- a/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilterTest.java
+++ b/jdisc-security-filters/src/test/java/com/yahoo/jdisc/http/filter/security/athenz/AthenzAuthorizationFilterTest.java
@@ -296,7 +296,9 @@ public class AthenzAuthorizationFilterTest {
.allowedProxyIdentities(allowedProxyIdentities)),
new StaticRequestResourceMapper(RESOURCE_NAME, ACTION),
zpe,
- metric);
+ metric,
+ new AthenzRole("domain","reader"),
+ new AthenzRole("domain", "writer"));
}
private static void assertAuthorizationResult(DiscFilterRequest request, Type expectedResult) {
diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/test/ServerProviderConformanceTest.java b/jdisc_core/src/main/java/com/yahoo/jdisc/test/ServerProviderConformanceTest.java
index c9ce0e0b2e5..79fe8ea11d4 100644
--- a/jdisc_core/src/main/java/com/yahoo/jdisc/test/ServerProviderConformanceTest.java
+++ b/jdisc_core/src/main/java/com/yahoo/jdisc/test/ServerProviderConformanceTest.java
@@ -45,7 +45,7 @@ public abstract class ServerProviderConformanceTest {
private static final Logger log = Logger.getLogger(ServerProviderConformanceTest.class.getName());
- private static final int NUM_RUNS_EACH_TEST = 10;
+ private static final int NUM_RUNS_EACH_TEST = 1;
/**
* <p>This interface declares the adapter between the general conformance test and an actual <code>ServerProvider</code>
diff --git a/jrt/src/com/yahoo/jrt/Buffer.java b/jrt/src/com/yahoo/jrt/Buffer.java
index 937666a28ae..06a658740e5 100644
--- a/jrt/src/com/yahoo/jrt/Buffer.java
+++ b/jrt/src/com/yahoo/jrt/Buffer.java
@@ -6,9 +6,6 @@ import java.nio.ByteBuffer;
class Buffer {
-
- static final int MAX_IO = 65000;
-
private ByteBuffer buf;
private int readPos;
private int writePos;
@@ -111,20 +108,4 @@ class Buffer {
ensureFree(minFree);
return buf;
}
-
- public ByteBuffer getChannelReadable() {
- ByteBuffer bb = getReadable();
- if (bb.remaining() > MAX_IO) {
- bb.limit(bb.position() + MAX_IO);
- }
- return bb;
- }
-
- public ByteBuffer getChannelWritable(int minFree) {
- ByteBuffer bb = getWritable(minFree);
- if (bb.remaining() > MAX_IO) {
- bb.limit(bb.position() + MAX_IO);
- }
- return bb;
- }
}
diff --git a/jrt/src/com/yahoo/jrt/Connection.java b/jrt/src/com/yahoo/jrt/Connection.java
index 891558684ed..6158576348a 100644
--- a/jrt/src/com/yahoo/jrt/Connection.java
+++ b/jrt/src/com/yahoo/jrt/Connection.java
@@ -19,9 +19,9 @@ class Connection extends Target {
private static final Logger log = Logger.getLogger(Connection.class.getName());
- private static final int READ_SIZE = 32768;
+ private static final int READ_SIZE = 16*1024;
private static final int READ_REDO = 10;
- private static final int WRITE_SIZE = 32768;
+ private static final int WRITE_SIZE = 16*1024;
private static final int WRITE_REDO = 10;
private static final int INITIAL = 0;
@@ -32,11 +32,11 @@ class Connection extends Target {
private int state = INITIAL;
private final Queue queue = new Queue();
private final Queue myQueue = new Queue();
- private final Buffer input = new Buffer(0x1000); // Start off with small buffer.
- private final Buffer output = new Buffer(0x1000); // Start off with small buffer.
- private int maxInputSize = 64*1024;
- private int maxOutputSize = 64*1024;
- private boolean dropEmptyBuffers = false;
+ private final Buffer input = new Buffer(0); // Start off with empty buffer.
+ private final Buffer output = new Buffer(0); // Start off with empty buffer.
+ private final int maxInputSize;
+ private final int maxOutputSize;
+ private final boolean dropEmptyBuffers;
private final boolean tcpNoDelay;
private final Map<Integer, ReplyHandler> replyMap = new HashMap<>();
private final Map<TargetWatcher, TargetWatcher> watchers = new IdentityHashMap<>();
@@ -98,6 +98,9 @@ class Connection extends Target {
this.socket = parent.transport().createServerCryptoSocket(channel);
this.spec = null;
this.tcpNoDelay = tcpNoDelay;
+ maxInputSize = owner.getMaxInputBufferSize();
+ maxOutputSize = owner.getMaxOutputBufferSize();
+ dropEmptyBuffers = owner.getDropEmptyBuffers();
server = true;
owner.sessionInit(this);
}
@@ -108,22 +111,13 @@ class Connection extends Target {
this.owner = owner;
this.spec = spec;
this.tcpNoDelay = tcpNoDelay;
+ maxInputSize = owner.getMaxInputBufferSize();
+ maxOutputSize = owner.getMaxOutputBufferSize();
+ dropEmptyBuffers = owner.getDropEmptyBuffers();
server = false;
owner.sessionInit(this);
}
- public void setMaxInputSize(int bytes) {
- maxInputSize = bytes;
- }
-
- public void setMaxOutputSize(int bytes) {
- maxOutputSize = bytes;
- }
-
- public void setDropEmptyBuffers(boolean value) {
- dropEmptyBuffers = value;
- }
-
public TransportThread transportThread() {
return parent;
}
@@ -235,7 +229,7 @@ class Connection extends Target {
readSize = socket.getMinimumReadBufferSize();
}
setState(CONNECTED);
- while (socket.drain(input.getChannelWritable(readSize)) > 0) {
+ while (socket.drain(input.getWritable(readSize)) > 0) {
handlePackets();
}
break;
@@ -302,14 +296,14 @@ class Connection extends Target {
private void read() throws IOException {
boolean doneRead = false;
for (int i = 0; !doneRead && i < READ_REDO; i++) {
- ByteBuffer wb = input.getChannelWritable(readSize);
+ ByteBuffer wb = input.getWritable(readSize);
if (socket.read(wb) == -1) {
throw new IOException("jrt: Connection closed by peer");
}
doneRead = (wb.remaining() > 0);
handlePackets();
}
- while (socket.drain(input.getChannelWritable(readSize)) > 0) {
+ while (socket.drain(input.getWritable(readSize)) > 0) {
handlePackets();
}
if (dropEmptyBuffers) {
@@ -346,7 +340,7 @@ class Connection extends Target {
owner.writePacket(info);
info.encodePacket(packet, wb);
}
- ByteBuffer rb = output.getChannelReadable();
+ ByteBuffer rb = output.getReadable();
if (rb.remaining() == 0) {
break;
}
diff --git a/jrt/src/com/yahoo/jrt/Supervisor.java b/jrt/src/com/yahoo/jrt/Supervisor.java
index d7c2c83ea69..b82664b2f56 100644
--- a/jrt/src/com/yahoo/jrt/Supervisor.java
+++ b/jrt/src/com/yahoo/jrt/Supervisor.java
@@ -21,8 +21,8 @@ public class Supervisor {
private SessionHandler sessionHandler = null;
private final Object methodMapLock = new Object();
private final AtomicReference<HashMap<String, Method>> methodMap = new AtomicReference<>(new HashMap<>());
- private int maxInputBufferSize = 0;
- private int maxOutputBufferSize = 0;
+ private int maxInputBufferSize = 64*1024;
+ private int maxOutputBufferSize = 64*1024;
private boolean dropEmptyBuffers = false;
/**
@@ -37,16 +37,6 @@ public class Supervisor {
}
/**
- * Will optimize buffers size for small memory footprint
- * Use this when you have many connections with very little traffic.
- **/
- public Supervisor useSmallBuffers() {
- setMaxInputBufferSize(SMALL_INPUT_BUFFER_SIZE);
- setMaxOutputBufferSize(SMALL_OUTPUT_BUFFER_SIZE);
- return this;
- }
-
- /**
* Drop empty buffers. This will reduce memory footprint for idle
* connections at the cost of extra allocations when buffer space
* is needed again.
@@ -57,6 +47,7 @@ public class Supervisor {
dropEmptyBuffers = value;
return this;
}
+ boolean getDropEmptyBuffers() { return dropEmptyBuffers; }
/**
* Set maximum input buffer size. This value will only affect
@@ -71,6 +62,7 @@ public class Supervisor {
public void setMaxInputBufferSize(int bytes) {
maxInputBufferSize = bytes;
}
+ int getMaxInputBufferSize() { return maxInputBufferSize; }
/**
* Set maximum output buffer size. This value will only affect
@@ -85,6 +77,7 @@ public class Supervisor {
public void setMaxOutputBufferSize(int bytes) {
maxOutputBufferSize = bytes;
}
+ int getMaxOutputBufferSize() { return maxOutputBufferSize; }
/**
* Obtain the method map for this Supervisor
@@ -202,12 +195,6 @@ public class Supervisor {
* @param target the target
**/
void sessionInit(Target target) {
- if (target instanceof Connection) {
- Connection conn = (Connection) target;
- conn.setMaxInputSize(maxInputBufferSize);
- conn.setMaxOutputSize(maxOutputBufferSize);
- conn.setDropEmptyBuffers(dropEmptyBuffers);
- }
SessionHandler handler = sessionHandler;
if (handler != null) {
handler.handleSessionInit(target);
diff --git a/jrt/src/com/yahoo/jrt/TlsCryptoSocket.java b/jrt/src/com/yahoo/jrt/TlsCryptoSocket.java
index 7ba83d6718e..09bb584c983 100644
--- a/jrt/src/com/yahoo/jrt/TlsCryptoSocket.java
+++ b/jrt/src/com/yahoo/jrt/TlsCryptoSocket.java
@@ -51,11 +51,13 @@ public class TlsCryptoSocket implements CryptoSocket {
public TlsCryptoSocket(SocketChannel channel, SSLEngine sslEngine) {
this.channel = channel;
this.sslEngine = sslEngine;
+ this.wrapBuffer = new Buffer(0);
+ this.unwrapBuffer = new Buffer(0);
SSLSession nullSession = sslEngine.getSession();
- this.wrapBuffer = new Buffer(Math.max(0x8000, nullSession.getPacketBufferSize()));
- this.unwrapBuffer = new Buffer(Math.max(0x8000, nullSession.getPacketBufferSize()));
+ sessionApplicationBufferSize = nullSession.getApplicationBufferSize();
+ sessionPacketBufferSize = nullSession.getPacketBufferSize();
// Note: Dummy buffer as unwrap requires a full size application buffer even though no application data is unwrapped
- this.handshakeDummyBuffer = ByteBuffer.allocate(nullSession.getApplicationBufferSize());
+ this.handshakeDummyBuffer = ByteBuffer.allocate(sessionApplicationBufferSize);
this.handshakeState = HandshakeState.NOT_STARTED;
log.fine(() -> "Initialized with " + sslEngine.toString());
}
diff --git a/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java b/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java
index 854cd973e4d..6d66a38406a 100644
--- a/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java
+++ b/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java
@@ -39,7 +39,7 @@ public class Slobrok {
public Slobrok(int port) throws ListenFailedException {
// NB: rpc must be single-threaded
- orb = new Supervisor(new Transport("slobrok-" + port, 1)).useSmallBuffers();
+ orb = new Supervisor(new Transport("slobrok-" + port, 1)).setDropEmptyBuffers(true);
registerMethods();
try {
listener = orb.listen(new Spec(port));
diff --git a/jrt/tests/com/yahoo/jrt/BufferTest.java b/jrt/tests/com/yahoo/jrt/BufferTest.java
index 7f3145365d9..10f1fbc17d3 100644
--- a/jrt/tests/com/yahoo/jrt/BufferTest.java
+++ b/jrt/tests/com/yahoo/jrt/BufferTest.java
@@ -28,7 +28,7 @@ public class BufferTest {
@org.junit.Test
public void testBuffer() {
- int size = Buffer.MAX_IO + (Buffer.MAX_IO / 10);
+ int size = 70*1024;
Buffer buf = new Buffer(1024);
ByteBuffer b = null;
@@ -118,62 +118,6 @@ public class BufferTest {
}
@org.junit.Test
- public void testBufferMax() {
- int size = Buffer.MAX_IO + (Buffer.MAX_IO / 10);
- Buffer buf = new Buffer(1024);
- ByteBuffer b = null;
-
- byte[] x = new byte[size];
- byte[] y = new byte[size];
-
- Arrays.fill(x, (byte) 10);
- Arrays.fill(y, (byte) 55);
-
- assertEquals(buf.bytes(), 0);
- assertFalse(Arrays.equals(x, y));
-
- b = buf.getChannelWritable(size);
- assertEquals(b.remaining(), Buffer.MAX_IO);
- assertTrue(b.remaining() < size);
- assertEquals(buf.bytes(), 0);
- b.put(x, 0, Buffer.MAX_IO);
- assertEquals(buf.bytes(), Buffer.MAX_IO);
- assertEquals(b.remaining(), 0);
-
- b = buf.getChannelWritable(size - Buffer.MAX_IO);
- assertTrue(b.remaining() >= size - Buffer.MAX_IO);
- assertEquals(buf.bytes(), Buffer.MAX_IO);
- b.put(x, Buffer.MAX_IO, x.length - Buffer.MAX_IO);
- assertEquals(buf.bytes(), size);
-
- b = buf.getChannelReadable();
- assertEquals(buf.bytes(), size);
-
- b = buf.getChannelWritable(512);
- assertEquals(buf.bytes(), size);
- b.put((byte)42);
- assertEquals(buf.bytes(), size + 1);
-
- b = buf.getChannelReadable();
- assertEquals(buf.bytes(), size + 1);
- assertEquals(b.remaining(), Buffer.MAX_IO);
- b.get(y, 0, Buffer.MAX_IO);
- assertEquals(buf.bytes(), size - Buffer.MAX_IO + 1);
-
- b = buf.getChannelReadable();
- assertEquals(buf.bytes(), size - Buffer.MAX_IO + 1);
- assertEquals(b.remaining(), size - Buffer.MAX_IO + 1);
- b.get(y, Buffer.MAX_IO, y.length - Buffer.MAX_IO);
- assertEquals(buf.bytes(), 1);
- assertEquals(b.remaining(), 1);
- assertEquals(b.get(), 42);
- assertEquals(buf.bytes(), 0);
- assertEquals(b.remaining(), 0);
-
- assertTrue(Arrays.equals(x, y));
- }
-
- @org.junit.Test
public void testBufferShrink() {
Buffer buf = new Buffer(500);
ByteBuffer b = null;
diff --git a/linguistics/src/main/java/com/yahoo/language/process/Token.java b/linguistics/src/main/java/com/yahoo/language/process/Token.java
index 73c0ac857ab..70b78ef1a92 100644
--- a/linguistics/src/main/java/com/yahoo/language/process/Token.java
+++ b/linguistics/src/main/java/com/yahoo/language/process/Token.java
@@ -38,12 +38,12 @@ public interface Token {
TokenScript getScript();
/**
- * Returns token string in a form suitable for indexing: The
- * most lowercased variant of the most processed token form available.
+ * Returns the token string in a form suitable for indexing: The
+ * most lowercased variant of the most processed token form available,
* If called on a compound token this returns a lowercased form of the
* entire word.
- *
- * @return token string value
+ * If this is a special token with a configured replacement,
+ * this will return the replacement token.
*/
String getTokenString();
diff --git a/linguistics/src/main/java/com/yahoo/language/process/Tokenizer.java b/linguistics/src/main/java/com/yahoo/language/process/Tokenizer.java
index 7e61cd885a8..5be0a6fa635 100644
--- a/linguistics/src/main/java/com/yahoo/language/process/Tokenizer.java
+++ b/linguistics/src/main/java/com/yahoo/language/process/Tokenizer.java
@@ -23,16 +23,11 @@ public interface Tokenizer {
Iterable<Token> tokenize(String input, Language language, StemMode stemMode, boolean removeAccents);
/**
- * Return a replacement for an input token string.
- * This accepts strings returned by Token.getTokenString
- * and returns a replacement which will be used as the index token.
- * The input token string is returned if there is no replacement.
- * <p>
- * This default implementation always returns the input token string.
+ * Not used.
*
- * @param tokenString the token string of the term to lookup a replacement for
- * @return the replacement, if any, or the argument token string if not
+ * @deprecated replacements are already applied in tokens returned by tokenize
*/
+ @Deprecated // Remove on Vespa 8
default String getReplacementTerm(String tokenString) { return tokenString; }
}
diff --git a/linguistics/src/main/java/com/yahoo/language/simple/SimpleToken.java b/linguistics/src/main/java/com/yahoo/language/simple/SimpleToken.java
index 122b9b6dff6..7b63650fa94 100644
--- a/linguistics/src/main/java/com/yahoo/language/simple/SimpleToken.java
+++ b/linguistics/src/main/java/com/yahoo/language/simple/SimpleToken.java
@@ -25,6 +25,10 @@ public class SimpleToken implements Token {
this.orig = orig;
}
+ public SimpleToken(String orig, String tokenString) {
+ this.orig = orig;
+ }
+
@Override
public String getOrig() {
return orig;
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/StaticThrottlePolicy.java b/messagebus/src/main/java/com/yahoo/messagebus/StaticThrottlePolicy.java
index aa1bc1ce624..52b92737bb9 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/StaticThrottlePolicy.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/StaticThrottlePolicy.java
@@ -7,7 +7,7 @@ package com.yahoo.messagebus;
* way of {@link #setMaxPendingCount(int)}), the total size of pending messages (by way of {@link
* #setMaxPendingSize(long)}), or some combination thereof.
*
- * <b>NOTE:</b> By context, "pending" is refering to the number of sent messages that have not been replied to yet.
+ * <b>NOTE:</b> By context, "pending" refers to the number of sent messages that have not been replied to yet.
*
* @author Simon Thoresen Hult
*/
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/rpc/RpcConnector.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/rpc/RpcConnector.java
index 9d93b440a1d..881ed19ce0c 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/rpc/RpcConnector.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/rpc/RpcConnector.java
@@ -27,7 +27,7 @@ public class RpcConnector extends AbstractComponent {
private final Acceptor acceptor;
public RpcConnector(RpcConnectorConfig config) {
- supervisor = new Supervisor(new Transport("rpc-" + config.port())).useSmallBuffers();
+ supervisor = new Supervisor(new Transport("rpc-" + config.port())).setDropEmptyBuffers(true);
Spec spec = new Spec(config.port());
try {
acceptor = supervisor.listen(spec);
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/ConfigSentinelClient.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/ConfigSentinelClient.java
index ab05e778ea6..d07a52f42bd 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/ConfigSentinelClient.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/service/ConfigSentinelClient.java
@@ -28,7 +28,7 @@ public class ConfigSentinelClient extends AbstractComponent {
@Inject
public ConfigSentinelClient() {
- supervisor = new Supervisor(new Transport("sentinel-client")).useSmallBuffers();
+ supervisor = new Supervisor(new Transport("sentinel-client")).setDropEmptyBuffers(true);
}
@Override
diff --git a/metrics/src/tests/summetrictest.cpp b/metrics/src/tests/summetrictest.cpp
index e3d58659daf..d0380a630f1 100644
--- a/metrics/src/tests/summetrictest.cpp
+++ b/metrics/src/tests/summetrictest.cpp
@@ -125,4 +125,45 @@ TEST(SumMetricTest, test_start_value)
EXPECT_EQ(int64_t(60), sum.getLongValue("value"));
}
+namespace {
+
+struct MetricSetWithSum : public MetricSet
+{
+ LongValueMetric _v1;
+ LongValueMetric _v2;
+ SumMetric<LongValueMetric> _sum;
+ MetricSetWithSum();
+ ~MetricSetWithSum() override;
+};
+
+MetricSetWithSum::MetricSetWithSum()
+ : MetricSet("MetricSetWithSum", {}, ""),
+ _v1("v1", {}, "", this),
+ _v2("v2", {}, "", this),
+ _sum("sum", {}, "", this)
+{
+ _sum.addMetricToSum(_v1);
+ _sum.addMetricToSum(_v2);
+}
+
+MetricSetWithSum::~MetricSetWithSum() = default;
+
+}
+
+TEST(SumMetricTest, test_nested_sum)
+{
+ MetricSetWithSum w1;
+ MetricSetWithSum w2;
+ MetricSetWithSum sum;
+ w1._v1.addValue(10);
+ w1._v2.addValue(13);
+ w2._v1.addValue(27);
+ w2._v2.addValue(29);
+ w1.addToPart(sum);
+ w2.addToPart(sum);
+ EXPECT_EQ(int64_t(37), sum._v1.getLongValue("value"));
+ EXPECT_EQ(int64_t(42), sum._v2.getLongValue("value"));
+ EXPECT_EQ(int64_t(79), sum._sum.getLongValue("value"));
+}
+
}
diff --git a/metrics/src/vespa/metrics/countmetric.h b/metrics/src/vespa/metrics/countmetric.h
index 02a6827d1ce..1701071104e 100644
--- a/metrics/src/vespa/metrics/countmetric.h
+++ b/metrics/src/vespa/metrics/countmetric.h
@@ -105,7 +105,7 @@ public:
void addToSnapshot(Metric&, std::vector<Metric::UP> &) const override;
};
-typedef CountMetric<uint64_t, true> LongCountMetric;
+using LongCountMetric = CountMetric<uint64_t, true>;
} // metrics
diff --git a/metrics/src/vespa/metrics/metric.cpp b/metrics/src/vespa/metrics/metric.cpp
index a8d8194b26d..50fc36c62cb 100644
--- a/metrics/src/vespa/metrics/metric.cpp
+++ b/metrics/src/vespa/metrics/metric.cpp
@@ -232,4 +232,11 @@ Metric::assignValues(const Metric& m) {
assert(ownerList.empty());
return this;
}
+
+bool
+Metric::is_sum_metric() const
+{
+ return false;
+}
+
} // metrics
diff --git a/metrics/src/vespa/metrics/metric.h b/metrics/src/vespa/metrics/metric.h
index 10b74a2da22..c8fb3031278 100644
--- a/metrics/src/vespa/metrics/metric.h
+++ b/metrics/src/vespa/metrics/metric.h
@@ -247,6 +247,8 @@ public:
virtual bool isMetricSet() const { return false; }
+ virtual bool is_sum_metric() const;
+
private:
/**
diff --git a/metrics/src/vespa/metrics/metricvalueset.h b/metrics/src/vespa/metrics/metricvalueset.h
index 2463990378e..c522876f5b1 100644
--- a/metrics/src/vespa/metrics/metricvalueset.h
+++ b/metrics/src/vespa/metrics/metricvalueset.h
@@ -76,12 +76,6 @@ public:
*/
bool setValues(const ValueClass& values);
- /**
- * Retrieve and reset in a single operation, to minimize chance of
- * alteration in the process.
- */
- ValueClass getValuesAndReset();
-
void reset() {
setFlag(RESET);
}
@@ -105,9 +99,6 @@ public:
_flags.store(_flags.load(std::memory_order_relaxed) & ~flags,
std::memory_order_relaxed);
}
- uint32_t getFlags() const {
- return _flags.load(std::memory_order_relaxed);
- }
};
} // metrics
diff --git a/metrics/src/vespa/metrics/metricvalueset.hpp b/metrics/src/vespa/metrics/metricvalueset.hpp
index 8c5b32afcf8..57b3e7f9901 100644
--- a/metrics/src/vespa/metrics/metricvalueset.hpp
+++ b/metrics/src/vespa/metrics/metricvalueset.hpp
@@ -70,14 +70,6 @@ MetricValueSet<ValueClass>::setValues(const ValueClass& values) {
}
template<typename ValueClass>
-ValueClass
-MetricValueSet<ValueClass>::getValuesAndReset() {
- ValueClass result(getValues());
- setFlag(RESET);
- return result;
-}
-
-template<typename ValueClass>
std::string
MetricValueSet<ValueClass>::toString() {
std::ostringstream ost;
diff --git a/metrics/src/vespa/metrics/summetric.h b/metrics/src/vespa/metrics/summetric.h
index f04c1696638..7b60c968e5b 100644
--- a/metrics/src/vespa/metrics/summetric.h
+++ b/metrics/src/vespa/metrics/summetric.h
@@ -69,6 +69,7 @@ public:
void printDebug(std::ostream&, const std::string& indent="") const override;
void addToPart(Metric&) const override;
void addToSnapshot(Metric&, std::vector<Metric::UP> &) const override;
+ bool is_sum_metric() const override;
private:
friend struct MetricManagerTest;
diff --git a/metrics/src/vespa/metrics/summetric.hpp b/metrics/src/vespa/metrics/summetric.hpp
index 9520456a974..e067b9643c2 100644
--- a/metrics/src/vespa/metrics/summetric.hpp
+++ b/metrics/src/vespa/metrics/summetric.hpp
@@ -142,8 +142,17 @@ template<typename AddendMetric>
void
SumMetric<AddendMetric>::addToPart(Metric& m) const
{
- std::pair<std::vector<Metric::UP>, Metric::UP> sum(generateSum());
- sum.second->addToPart(m);
+ if (!m.is_sum_metric()) {
+ std::pair<std::vector<Metric::UP>, Metric::UP> sum(generateSum());
+ sum.second->addToPart(m);
+ }
+}
+
+template<typename AddendMetric>
+bool
+SumMetric<AddendMetric>::is_sum_metric() const
+{
+ return true;
}
template<typename AddendMetric>
diff --git a/model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java b/model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java
index 4b42e18d75e..a7186aae5fe 100644
--- a/model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java
+++ b/model-integration/src/test/java/ai/vespa/modelintegration/evaluator/OnnxEvaluatorTest.java
@@ -74,7 +74,7 @@ public class OnnxEvaluatorTest {
assertEvaluate("cast_int8_float.onnx", "tensor<float>(d0[1]):[-128]", "tensor<int8>(d0[1]):[128]");
assertEvaluate("cast_float_int8.onnx", "tensor<int8>(d0[1]):[-1]", "tensor<float>(d0[1]):[255]");
- // ONNX Runtime 1.7.0 does not support much of bfloat16 yet
+ // ONNX Runtime 1.8.0 does not support much of bfloat16 yet
// assertEvaluate("cast_bfloat16_float.onnx", "tensor<float>(d0[1]):[1]", "tensor<bfloat16>(d0[1]):[1]");
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeReports.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeReports.java
index 70ce548916a..d3ab6464822 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeReports.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeReports.java
@@ -42,6 +42,8 @@ public class NodeReports {
reports.put(reportId, jsonNode);
}
+ public boolean hasReport(String reportId) { return reports.containsKey(reportId); }
+
public <T> Optional<T> getReport(String reportId, Class<T> jacksonClass) {
return Optional.ofNullable(reports.get(reportId)).map(r -> uncheck(() -> mapper.treeToValue(r, jacksonClass)));
}
@@ -75,6 +77,17 @@ public class NodeReports {
return new TreeMap<>(reports);
}
+ /** Apply the override to this. null value means removing report. */
+ public void updateFromRawMap(Map<String, JsonNode> override) {
+ override.forEach((reportId, jsonNode) -> {
+ if (jsonNode == null) {
+ reports.remove(reportId);
+ } else {
+ reports.put(reportId, jsonNode);
+ }
+ });
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java
index 7408041462c..fa1f8528b31 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java
@@ -604,7 +604,7 @@ public class NodeSpec {
attributes.getCurrentOsVersion().ifPresent(this::currentOsVersion);
attributes.getRebootGeneration().ifPresent(this::currentRebootGeneration);
attributes.getRestartGeneration().ifPresent(this::currentRestartGeneration);
- NodeReports.fromMap(attributes.getReports());
+ this.reports.updateFromRawMap(attributes.getReports());
return this;
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java
index 4c384b09fad..ce8fed0aa70 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollector.java
@@ -29,7 +29,10 @@ public class CoreCollector {
private static final Pattern CORE_GENERATOR_PATH_PATTERN = Pattern.compile("^Core was generated by `(?<path>.*?)'.$");
private static final Pattern EXECFN_PATH_PATTERN = Pattern.compile("^.* execfn: '(?<path>.*?)'");
private static final Pattern FROM_PATH_PATTERN = Pattern.compile("^.* from '(?<path>.*?)'");
- static final String GDB_PATH = "/opt/rh/devtoolset-9/root/bin/gdb";
+ static final String GDB_PATH_RHEL7_DT9 = "/opt/rh/devtoolset-9/root/bin/gdb";
+ static final String GDB_PATH_RHEL7_DT10 = "/opt/rh/devtoolset-10/root/bin/gdb";
+ static final String GDB_PATH_RHEL8 = "/opt/rh/gcc-toolset-10/root/bin/gdb";
+
static final Map<String, Object> JAVA_HEAP_DUMP_METADATA =
Map.of("bin_path", "java", "backtrace", List.of("Heap dump, no backtrace available"));
@@ -39,8 +42,23 @@ public class CoreCollector {
this.docker = docker;
}
+ String getGdbPath(NodeAgentContext context) {
+ // TODO: Remove when we do not have any devtoolset-9 installs left
+ String[] command_rhel7_dt9 = {"stat", GDB_PATH_RHEL7_DT9};
+ if (docker.executeCommandInContainerAsRoot(context, command_rhel7_dt9).getExitStatus() == 0) {
+ return GDB_PATH_RHEL7_DT9;
+ }
+
+ String[] command_rhel7_dt10 = {"stat", GDB_PATH_RHEL7_DT10};
+ if (docker.executeCommandInContainerAsRoot(context, command_rhel7_dt10).getExitStatus() == 0) {
+ return GDB_PATH_RHEL7_DT10;
+ }
+
+ return GDB_PATH_RHEL8;
+ }
+
Path readBinPathFallback(NodeAgentContext context, Path coredumpPath) {
- String command = GDB_PATH + " -n -batch -core " + coredumpPath + " | grep \'^Core was generated by\'";
+ String command = getGdbPath(context) + " -n -batch -core " + coredumpPath + " | grep \'^Core was generated by\'";
String[] wrappedCommand = {"/bin/sh", "-c", command};
ProcessResult result = docker.executeCommandInContainerAsRoot(context, wrappedCommand);
@@ -79,7 +97,7 @@ public class CoreCollector {
List<String> readBacktrace(NodeAgentContext context, Path coredumpPath, Path binPath, boolean allThreads) {
String threads = allThreads ? "thread apply all bt" : "bt";
- String[] command = {GDB_PATH, "-n", "-ex", threads, "-batch", binPath.toString(), coredumpPath.toString()};
+ String[] command = {getGdbPath(context), "-n", "-ex", threads, "-batch", binPath.toString(), coredumpPath.toString()};
ProcessResult result = docker.executeCommandInContainerAsRoot(context, command);
if (result.getExitStatus() != 0)
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java
index 6d9eae5c4dc..e510618c5a4 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.hosted.node.admin.nodeadmin;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext;
import java.time.Duration;
-import java.util.List;
import java.util.Set;
/**
@@ -41,10 +40,9 @@ public interface NodeAdmin {
Duration subsystemFreezeDuration();
/**
- * Stop services on these nodes
- * @param nodes List of hostnames to suspend
+ * Stop all services on these nodes
*/
- void stopNodeAgentServices(List<String> nodes);
+ void stopNodeAgentServices();
/**
* Start node-admin schedulers.
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
index caffe5ef2f1..5d7ab48753f 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
@@ -21,6 +21,7 @@ import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
import java.util.stream.Collectors;
+import java.util.stream.Stream;
/**
* Administers a host (for now only docker hosts) and its nodes (docker containers nodes).
@@ -131,7 +132,7 @@ public class NodeAdminImpl implements NodeAdmin {
}
// Use filter with count instead of allMatch() because allMatch() will short circuit on first non-match
- boolean allNodeAgentsConverged = nodeAgentWithSchedulerByHostname.values().parallelStream()
+ boolean allNodeAgentsConverged = parallelStreamOfNodeAgentWithScheduler()
.filter(nodeAgentScheduler -> !nodeAgentScheduler.setFrozen(wantFrozen, freezeTimeout))
.count() == 0;
@@ -157,12 +158,9 @@ public class NodeAdminImpl implements NodeAdmin {
}
@Override
- public void stopNodeAgentServices(List<String> hostnames) {
+ public void stopNodeAgentServices() {
// Each container may spend 1-1:30 minutes stopping
- hostnames.parallelStream()
- .filter(nodeAgentWithSchedulerByHostname::containsKey)
- .map(nodeAgentWithSchedulerByHostname::get)
- .forEach(NodeAgentWithScheduler::stopForHostSuspension);
+ parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForHostSuspension);
}
@Override
@@ -173,7 +171,18 @@ public class NodeAdminImpl implements NodeAdmin {
@Override
public void stop() {
// Stop all node-agents in parallel, will block until the last NodeAgent is stopped
- nodeAgentWithSchedulerByHostname.values().parallelStream().forEach(NodeAgentWithScheduler::stopForRemoval);
+ parallelStreamOfNodeAgentWithScheduler().forEach(NodeAgentWithScheduler::stopForRemoval);
+ }
+
+ /**
+ * Returns a parallel stream of NodeAgentWithScheduler.
+ *
+ * <p>Why not just call nodeAgentWithSchedulerByHostname.values().parallelStream()? Experiments
+ * with Java 11 have shown that with 10 nodes and forEach(), there are a maximum of 3 concurrent
+ * threads. With HashMap it produces 5. With List it produces 10 concurrent threads.</p>
+ */
+ private Stream<NodeAgentWithScheduler> parallelStreamOfNodeAgentWithScheduler() {
+ return List.copyOf(nodeAgentWithSchedulerByHostname.values()).parallelStream();
}
// Set-difference. Returns minuend minus subtrahend.
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
index 58ca4ae3f41..c24b2261f42 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
@@ -150,7 +150,7 @@ public class NodeAdminStateUpdater {
// The node agent services are stopped by this thread, which is OK only
// because the node agents are frozen (see above).
- nodeAdmin.stopNodeAgentServices(nodesInActiveState);
+ nodeAdmin.stopNodeAgentServices();
break;
default:
throw new IllegalStateException("Unknown wanted state " + wantedState);
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
index df3f075e8d9..05c765c9d78 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.nodeagent;
import com.yahoo.config.provision.ApplicationId;
@@ -357,7 +357,7 @@ public class NodeAgentImpl implements NodeAgent {
}
try {
- if (context.node().state() != NodeState.dirty) {
+ if (context.node().state() == NodeState.active) {
suspend(context);
}
stopServices(context);
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java
index 2827e99c697..d61ab9e53b8 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/coredump/CoreCollectorTest.java
@@ -12,7 +12,9 @@ import java.nio.file.Paths;
import java.util.List;
import java.util.Map;
-import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.GDB_PATH;
+import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.GDB_PATH_RHEL7_DT9;
+import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.GDB_PATH_RHEL7_DT10;
+import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.GDB_PATH_RHEL8;
import static com.yahoo.vespa.hosted.node.admin.maintenance.coredump.CoreCollector.JAVA_HEAP_DUMP_METADATA;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
@@ -60,9 +62,10 @@ public class CoreCollectorTest {
"execfn: '/usr/bin/program', platform: 'x86_64");
assertEquals(TEST_BIN_PATH, coreCollector.readBinPath(context, TEST_CORE_PATH));
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output");
Path fallbackResponse = Paths.get("/response/from/fallback");
- mockExec(new String[]{"/bin/sh", "-c", GDB_PATH + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"},
+ mockExec(new String[]{"/bin/sh", "-c", GDB_PATH_RHEL7_DT9 + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"},
"Core was generated by `/response/from/fallback'.");
mockExec(cmd,
"/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style");
@@ -74,8 +77,11 @@ public class CoreCollectorTest {
@Test
public void extractsBinaryPathUsingGdbTest() {
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "", "stat: No such file or directory");
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT10}, "The stat output");
+
final String[] cmd = new String[]{"/bin/sh", "-c",
- GDB_PATH + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"};
+ GDB_PATH_RHEL7_DT10 + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"};
mockExec(cmd, "Core was generated by `/usr/bin/program-from-gdb --identity foo/search/cluster.content_'.");
assertEquals(Paths.get("/usr/bin/program-from-gdb"), coreCollector.readBinPathFallback(context, TEST_CORE_PATH));
@@ -86,30 +92,34 @@ public class CoreCollectorTest {
fail("Expected not to be able to get bin path");
} catch (RuntimeException e) {
assertEquals("Failed to extract binary path from GDB, result: ProcessResult { exitStatus=1 output= errors=Error 123 }, command: " +
- "[/bin/sh, -c, /opt/rh/devtoolset-9/root/bin/gdb -n -batch -core /tmp/core.1234 | grep '^Core was generated by']", e.getMessage());
+ "[/bin/sh, -c, /opt/rh/devtoolset-10/root/bin/gdb -n -batch -core /tmp/core.1234 | grep '^Core was generated by']", e.getMessage());
}
}
@Test
public void extractsBacktraceUsingGdb() {
- mockExec(new String[]{GDB_PATH, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output");
+
+ mockExec(new String[]{GDB_PATH_RHEL7_DT9, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
String.join("\n", GDB_BACKTRACE));
assertEquals(GDB_BACKTRACE, coreCollector.readBacktrace(context, TEST_CORE_PATH, TEST_BIN_PATH, false));
- mockExec(new String[]{GDB_PATH, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
+ mockExec(new String[]{GDB_PATH_RHEL7_DT9, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
"", "Failure");
try {
coreCollector.readBacktrace(context, TEST_CORE_PATH, TEST_BIN_PATH, false);
fail("Expected not to be able to read backtrace");
} catch (RuntimeException e) {
assertEquals("Failed to read backtrace ProcessResult { exitStatus=1 output= errors=Failure }, Command: " +
- "[/opt/rh/devtoolset-9/root/bin/gdb, -n, -ex, bt, -batch, /usr/bin/program, /tmp/core.1234]", e.getMessage());
+ "[" + GDB_PATH_RHEL7_DT9 + ", -n, -ex, bt, -batch, /usr/bin/program, /tmp/core.1234]", e.getMessage());
}
}
@Test
public void extractsBacktraceFromAllThreadsUsingGdb() {
- mockExec(new String[]{GDB_PATH, "-n", "-ex", "thread apply all bt", "-batch",
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output");
+
+ mockExec(new String[]{GDB_PATH_RHEL7_DT9, "-n", "-ex", "thread apply all bt", "-batch",
"/usr/bin/program", "/tmp/core.1234"},
String.join("\n", GDB_BACKTRACE));
assertEquals(GDB_BACKTRACE, coreCollector.readBacktrace(context, TEST_CORE_PATH, TEST_BIN_PATH, true));
@@ -120,9 +130,11 @@ public class CoreCollectorTest {
mockExec(new String[]{"file", TEST_CORE_PATH.toString()},
"/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, from " +
"'/usr/bin/program'");
- mockExec(new String[]{GDB_PATH, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "", "stat: No such file or directory");
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT10}, "", "stat: No such file or directory");
+ mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "bt", "-batch", "/usr/bin/program", "/tmp/core.1234"},
String.join("\n", GDB_BACKTRACE));
- mockExec(new String[]{GDB_PATH, "-n", "-ex", "thread apply all bt", "-batch",
+ mockExec(new String[]{GDB_PATH_RHEL8, "-n", "-ex", "thread apply all bt", "-batch",
"/usr/bin/program", "/tmp/core.1234"},
String.join("\n", GDB_BACKTRACE));
@@ -138,7 +150,8 @@ public class CoreCollectorTest {
mockExec(new String[]{"file", TEST_CORE_PATH.toString()},
"/tmp/core.1234: ELF 64-bit LSB core file x86-64, version 1 (SYSV), SVR4-style, from " +
"'/usr/bin/program'");
- mockExec(new String[]{GDB_PATH + " -n -ex bt -batch /usr/bin/program /tmp/core.1234"},
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "The stat output");
+ mockExec(new String[]{GDB_PATH_RHEL7_DT9 + " -n -ex bt -batch /usr/bin/program /tmp/core.1234"},
"", "Failure");
Map<String, Object> expectedData = Map.of("bin_path", TEST_BIN_PATH.toString());
@@ -149,7 +162,11 @@ public class CoreCollectorTest {
public void reportsJstackInsteadOfGdbForJdkCores() {
mockExec(new String[]{"file", TEST_CORE_PATH.toString()},
"dump.core.5954: ELF 64-bit LSB core file x86-64, version 1 (SYSV), too many program header sections (33172)");
- mockExec(new String[]{"/bin/sh", "-c", GDB_PATH + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"},
+
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT9}, "", "stat: No such file or directory");
+ mockExec(new String[]{"stat", GDB_PATH_RHEL7_DT10}, "", "stat: No such file or directory");
+
+ mockExec(new String[]{"/bin/sh", "-c", GDB_PATH_RHEL8 + " -n -batch -core /tmp/core.1234 | grep '^Core was generated by'"},
"Core was generated by `" + JDK_PATH + " -Dconfig.id=default/container.11 -XX:+Pre'.");
String jstack = "jstack11";
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
index 4a678597e41..8ee3a95744b 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
@@ -102,7 +102,7 @@ public class NodeAdminStateUpdaterTest {
// At this point orchestrator will say its OK to suspend, but something goes wrong when we try to stop services
final String exceptionMessage = "Failed to stop services";
verify(orchestrator, times(0)).suspend(eq(hostHostname.value()), eq(suspendHostnames));
- doThrow(new RuntimeException(exceptionMessage)).doNothing().when(nodeAdmin).stopNodeAgentServices(eq(activeHostnames));
+ doThrow(new RuntimeException(exceptionMessage)).doNothing().when(nodeAdmin).stopNodeAgentServices();
assertConvergeError(SUSPENDED, exceptionMessage);
verify(orchestrator, times(1)).suspend(eq(hostHostname.value()), eq(suspendHostnames));
// Make sure we dont roll back if we fail to stop services - we will try to stop again next tick
diff --git a/node-repository/pom.xml b/node-repository/pom.xml
index d2deaf51afe..30aa76658fd 100644
--- a/node-repository/pom.xml
+++ b/node-repository/pom.xml
@@ -75,7 +75,7 @@
<dependency>
<groupId>org.questdb</groupId>
<artifactId>questdb</artifactId>
- <version>6.0.2</version>
+ <version>6.0.3</version>
<scope>compile</scope>
</dependency>
<dependency>
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index d113ca68d01..f084b83bf97 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -109,7 +109,7 @@ public class NodeRepository extends AbstractComponent {
"dynamicProvisioning property must be 1-to-1 with availability of HostProvisioner, was: dynamicProvisioning=%s, hostProvisioner=%s",
zone.getCloud().dynamicProvisioning(), provisionServiceProvider.getHostProvisioner().map(__ -> "present").orElse("empty")));
- this.db = new CuratorDatabaseClient(flavors, curator, clock, zone, useCuratorClientCache, nodeCacheSize);
+ this.db = new CuratorDatabaseClient(flavors, curator, clock, useCuratorClientCache, nodeCacheSize);
this.zone = zone;
this.clock = clock;
this.nodes = new Nodes(db, zone, clock);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Application.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Application.java
index 5eb01b4fe72..c8d5e4361a5 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Application.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Application.java
@@ -62,7 +62,7 @@ public class Application {
public Application withCluster(ClusterSpec.Id id, boolean exclusive, ClusterResources min, ClusterResources max) {
Cluster cluster = clusters.get(id);
if (cluster == null)
- cluster = new Cluster(id, exclusive, min, max, Optional.empty(), Optional.empty(), List.of(), "");
+ cluster = new Cluster(id, exclusive, min, max, Optional.empty(), Optional.empty(), List.of(), AutoscalingStatus.empty());
else
cluster = cluster.withConfiguration(exclusive, min, max);
return with(cluster);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java
index ccd5af1cb64..fe363bf3786 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Applications.java
@@ -2,8 +2,8 @@
package com.yahoo.vespa.hosted.provision.applications;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ApplicationLockException;
import com.yahoo.config.provision.ApplicationTransaction;
-import com.yahoo.config.provision.ProvisionLock;
import com.yahoo.transaction.Mutex;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.hosted.provision.persistence.CuratorDatabaseClient;
@@ -28,6 +28,8 @@ public class Applications {
for (ApplicationId id : ids()) {
try (Mutex lock = db.lock(id)) {
get(id).ifPresent(application -> put(application, lock));
+ } catch (ApplicationLockException e) {
+ throw new ApplicationLockException(e.getMessage()); // No need for stack trace here
}
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java
new file mode 100644
index 00000000000..c40408c9109
--- /dev/null
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/AutoscalingStatus.java
@@ -0,0 +1,69 @@
+package com.yahoo.vespa.hosted.provision.applications;
+
+import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler;
+
+import java.util.Objects;
+
+/**
+ * The current autoscaling status of a cluster.
+ * A value object.
+ *
+ * @author bratseth
+ */
+public class AutoscalingStatus {
+
+ public enum Status {
+
+ /** No status is available: Aautoscaling is disabled, or a brand new application. */
+ unavailable,
+
+ /** Autoscaling is not taking any action at the moment due to recent changes or a lack of data */
+ waiting,
+
+ /** The cluster is ideally scaled to the current load */
+ ideal,
+
+ /** The cluster should be rescaled further, but no better configuration is allowed by the current limits */
+ insufficient,
+
+ /** Rescaling of this cluster has been scheduled */
+ rescaling
+
+ };
+
+ private final Status status;
+ private final String description;
+
+ public AutoscalingStatus(Status status, String description) {
+ this.status = status;
+ this.description = description;
+ }
+
+ public Status status() { return status; }
+ public String description() { return description; }
+
+ public static AutoscalingStatus empty() { return new AutoscalingStatus(Status.unavailable, ""); }
+
+ @Override
+ public boolean equals(Object o) {
+ if (o == this) return true;
+ if ( ! ( o instanceof AutoscalingStatus)) return false;
+
+ AutoscalingStatus other = (AutoscalingStatus)o;
+ if ( other.status != this.status ) return false;
+ if ( ! other.description.equals(this.description) ) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(status, description);
+ }
+
+ @Override
+ public String toString() {
+ return "autoscaling status: " + status +
+ ( description.isEmpty() ? "" : " (" + description + ")");
+ }
+
+}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
index 59b70ff1ef0..d4bbe6adc1b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
@@ -5,7 +5,6 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler;
-import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
@@ -31,7 +30,7 @@ public class Cluster {
/** The maxScalingEvents last scaling events of this, sorted by increasing time (newest last) */
private final List<ScalingEvent> scalingEvents;
- private final String autoscalingStatus;
+ private final AutoscalingStatus autoscalingStatus;
public Cluster(ClusterSpec.Id id,
boolean exclusive,
@@ -40,7 +39,7 @@ public class Cluster {
Optional<Suggestion> suggestedResources,
Optional<ClusterResources> targetResources,
List<ScalingEvent> scalingEvents,
- String autoscalingStatus) {
+ AutoscalingStatus autoscalingStatus) {
this.id = Objects.requireNonNull(id);
this.exclusive = exclusive;
this.min = Objects.requireNonNull(minResources);
@@ -95,8 +94,8 @@ public class Cluster {
return Optional.of(scalingEvents.get(scalingEvents.size() - 1));
}
- /** The latest autoscaling status of this cluster, or empty (never null) if none */
- public String autoscalingStatus() { return autoscalingStatus; }
+ /** The latest autoscaling status of this cluster, or unknown (never null) if none */
+ public AutoscalingStatus autoscalingStatus() { return autoscalingStatus; }
public Cluster withConfiguration(boolean exclusive, ClusterResources min, ClusterResources max) {
return new Cluster(id, exclusive, min, max, suggested, target, scalingEvents, autoscalingStatus);
@@ -124,7 +123,7 @@ public class Cluster {
return new Cluster(id, exclusive, min, max, suggested, target, scalingEvents, autoscalingStatus);
}
- public Cluster withAutoscalingStatus(String autoscalingStatus) {
+ public Cluster with(AutoscalingStatus autoscalingStatus) {
if (autoscalingStatus.equals(this.autoscalingStatus)) return this;
return new Cluster(id, exclusive, min, max, suggested, target, scalingEvents, autoscalingStatus);
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
index 0a1c6c5df6b..6eaae755708 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
@@ -6,7 +6,6 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
-import com.yahoo.vespa.hosted.provision.applications.Cluster;
import java.util.Optional;
@@ -48,7 +47,7 @@ public class AllocationOptimizer {
limits = Limits.of(new ClusterResources(minimumNodes, 1, NodeResources.unspecified()),
new ClusterResources(maximumNodes, maximumNodes, NodeResources.unspecified()));
else
- limits = atLeast(minimumNodes, limits);
+ limits = atLeast(minimumNodes, limits).fullySpecified(current.clusterSpec().type(), nodeRepository);
Optional<AllocatableClusterResources> bestAllocation = Optional.empty();
NodeList hosts = nodeRepository.nodes().list().hosts();
for (int groups = limits.min().groups(); groups <= limits.max().groups(); groups++) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
index 1d0ba3da6c5..ccaf23c49ed 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
@@ -7,6 +7,8 @@ import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
+import com.yahoo.vespa.hosted.provision.applications.AutoscalingStatus;
+import com.yahoo.vespa.hosted.provision.applications.AutoscalingStatus.Status;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import java.time.Duration;
@@ -52,7 +54,8 @@ public class Autoscaler {
* @return scaling advice for this cluster
*/
public Advice autoscale(Application application, Cluster cluster, NodeList clusterNodes) {
- if (cluster.minResources().equals(cluster.maxResources())) return Advice.none("Autoscaling is not enabled");
+ if (cluster.minResources().equals(cluster.maxResources()))
+ return Advice.none(Status.unavailable, "Autoscaling is not enabled");
return autoscale(application, cluster, clusterNodes, Limits.of(cluster));
}
@@ -65,17 +68,20 @@ public class Autoscaler {
nodeRepository.clock());
if ( ! clusterIsStable(clusterNodes, nodeRepository))
- return Advice.none("Cluster change in progress");
+ return Advice.none(Status.waiting, "Cluster change in progress");
if (scaledIn(clusterModel.scalingDuration(), cluster))
- return Advice.dontScale("Won't autoscale now: Less than " + clusterModel.scalingDuration() + " since last resource change");
+ return Advice.dontScale(Status.waiting,
+ "Won't autoscale now: Less than " + clusterModel.scalingDuration() + " since last resource change");
if (clusterModel.nodeTimeseries().measurementsPerNode() < minimumMeasurementsPerNode(clusterModel.scalingDuration()))
- return Advice.none("Collecting more data before making new scaling decisions: Need to measure for " +
+ return Advice.none(Status.waiting,
+ "Collecting more data before making new scaling decisions: Need to measure for " +
clusterModel.scalingDuration() + " since the last resource change completed");
if (clusterModel.nodeTimeseries().nodesMeasured() != clusterNodes.size())
- return Advice.none("Collecting more data before making new scaling decisions: " +
+ return Advice.none(Status.waiting,
+ "Collecting more data before making new scaling decisions: " +
"Have measurements from " + clusterModel.nodeTimeseries().nodesMeasured() +
" nodes, but require from " + clusterNodes.size());
@@ -85,13 +91,18 @@ public class Autoscaler {
Optional<AllocatableClusterResources> bestAllocation =
allocationOptimizer.findBestAllocation(target, currentAllocation, clusterModel, limits);
if (bestAllocation.isEmpty())
- return Advice.dontScale("No allocation improvements are possible within configured limits");
+ return Advice.dontScale(Status.insufficient, "No allocations are possible within configured limits");
- if (similar(bestAllocation.get().realResources(), currentAllocation.realResources()))
- return Advice.dontScale("Cluster is ideally scaled within configured limits");
+ if (similar(bestAllocation.get().realResources(), currentAllocation.realResources())) {
+ if (bestAllocation.get().fulfilment() < 1)
+ return Advice.dontScale(Status.insufficient, "Configured limits prevents better scaling of this cluster");
+ else
+ return Advice.dontScale(Status.ideal, "Cluster is ideally scaled");
+ }
if (isDownscaling(bestAllocation.get(), currentAllocation) && scaledIn(clusterModel.scalingDuration().multipliedBy(3), cluster))
- return Advice.dontScale("Waiting " + clusterModel.scalingDuration().multipliedBy(3) +
+ return Advice.dontScale(Status.waiting,
+ "Waiting " + clusterModel.scalingDuration().multipliedBy(3) +
" since the last change before reducing resources");
return Advice.scaleTo(bestAllocation.get().advertisedResources());
@@ -154,9 +165,9 @@ public class Autoscaler {
private final boolean present;
private final Optional<ClusterResources> target;
- private final String reason;
+ private final AutoscalingStatus reason;
- private Advice(Optional<ClusterResources> target, boolean present, String reason) {
+ private Advice(Optional<ClusterResources> target, boolean present, AutoscalingStatus reason) {
this.target = target;
this.present = present;
this.reason = Objects.requireNonNull(reason);
@@ -175,12 +186,20 @@ public class Autoscaler {
public boolean isPresent() { return present; }
/** The reason for this advice */
- public String reason() { return reason; }
+ public AutoscalingStatus reason() { return reason; }
+
+ private static Advice none(Status status, String description) {
+ return new Advice(Optional.empty(), false, new AutoscalingStatus(status, description));
+ }
+
+ private static Advice dontScale(Status status, String description) {
+ return new Advice(Optional.empty(), true, new AutoscalingStatus(status, description));
+ }
- private static Advice none(String reason) { return new Advice(Optional.empty(), false, reason); }
- private static Advice dontScale(String reason) { return new Advice(Optional.empty(), true, reason); }
private static Advice scaleTo(ClusterResources target) {
- return new Advice(Optional.of(target), true, "Scheduled scaling to " + target + " due to load changes");
+ return new Advice(Optional.of(target), true,
+ new AutoscalingStatus(AutoscalingStatus.Status.rescaling,
+ "Scheduled scaling to " + target + " due to load changes"));
}
@Override
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
index 19cc9f5714f..bbac3bf93da 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
@@ -32,10 +32,8 @@ public class ClusterModel {
static final double idealDiskLoad = 0.6;
private final Application application;
- private final Cluster cluster;
/** The current nodes of this cluster, or empty if this models a new cluster not yet deployed */
private final NodeList nodes;
- private final MetricsDb metricsDb;
private final Clock clock;
private final Duration scalingDuration;
private final ClusterTimeseries clusterTimeseries;
@@ -52,9 +50,7 @@ public class ClusterModel {
MetricsDb metricsDb,
Clock clock) {
this.application = application;
- this.cluster = cluster;
this.nodes = clusterNodes;
- this.metricsDb = metricsDb;
this.clock = clock;
this.scalingDuration = computeScalingDuration(cluster, clusterSpec);
this.clusterTimeseries = metricsDb.getClusterTimeseries(application.id(), cluster.id());
@@ -69,9 +65,7 @@ public class ClusterModel {
ClusterTimeseries clusterTimeseries,
ClusterNodesTimeseries nodeTimeseries) {
this.application = application;
- this.cluster = cluster;
this.nodes = null;
- this.metricsDb = null;
this.clock = clock;
this.scalingDuration = scalingDuration;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java
index 80ad81f6cdf..cafea4b0eaf 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Limits.java
@@ -3,8 +3,11 @@ package com.yahoo.vespa.hosted.provision.autoscale;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterResources;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeResources;
+import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
+import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies;
import java.util.Objects;
@@ -56,6 +59,15 @@ public class Limits {
return resources;
}
+ public Limits fullySpecified(ClusterSpec.Type type, NodeRepository nodeRepository) {
+ if (this.isEmpty()) throw new IllegalStateException("Unspecified limits can not be made fully specified");
+
+ var defaultResources = new CapacityPolicies(nodeRepository).defaultNodeResources(type);
+ var specifiedMin = min.nodeResources().isUnspecified() ? min.with(defaultResources) : min;
+ var specifiedMax = max.nodeResources().isUnspecified() ? max.with(defaultResources) : max;
+ return new Limits(specifiedMin, specifiedMax);
+ }
+
private double between(double min, double max, double value) {
value = Math.max(min, value);
value = Math.min(max, value);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java
index fba0993f2f9..40f9b330634 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerService.java
@@ -29,10 +29,7 @@ public interface LoadBalancerService {
Protocol protocol();
/** Returns whether load balancers created by this service can forward traffic to given node and cluster type */
- default boolean canForwardTo(NodeType nodeType, ClusterSpec.Type clusterType) {
- return (nodeType == NodeType.tenant && clusterType.isContainer()) ||
- nodeType.isConfigServerLike();
- }
+ boolean supports(NodeType nodeType, ClusterSpec.Type clusterType);
/** Load balancer protocols */
enum Protocol {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java
index b912087da46..f752cbc4349 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/LoadBalancerServiceMock.java
@@ -5,6 +5,7 @@ import com.google.common.collect.ImmutableSet;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostName;
+import com.yahoo.config.provision.NodeType;
import java.util.Collections;
import java.util.HashMap;
@@ -18,6 +19,7 @@ public class LoadBalancerServiceMock implements LoadBalancerService {
private final Map<LoadBalancerId, LoadBalancerInstance> instances = new HashMap<>();
private boolean throwOnCreate = false;
+ private boolean supportsProvisioning = true;
public Map<LoadBalancerId, LoadBalancerInstance> instances() {
return Collections.unmodifiableMap(instances);
@@ -28,6 +30,18 @@ public class LoadBalancerServiceMock implements LoadBalancerService {
return this;
}
+ public LoadBalancerServiceMock supportsProvisioning(boolean supportsProvisioning) {
+ this.supportsProvisioning = supportsProvisioning;
+ return this;
+ }
+
+ @Override
+ public boolean supports(NodeType nodeType, ClusterSpec.Type clusterType) {
+ if (!supportsProvisioning) return false;
+ return (nodeType == NodeType.tenant && clusterType.isContainer()) ||
+ nodeType.isConfigServerLike();
+ }
+
@Override
public Protocol protocol() {
return Protocol.ipv4;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/PassthroughLoadBalancerService.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/PassthroughLoadBalancerService.java
index 7667672e470..9a6a65eca69 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/PassthroughLoadBalancerService.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/PassthroughLoadBalancerService.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.provision.lb;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.NodeType;
import java.util.Comparator;
import java.util.Optional;
@@ -35,4 +36,9 @@ public class PassthroughLoadBalancerService implements LoadBalancerService {
return Protocol.ipv4;
}
+ @Override
+ public boolean supports(NodeType nodeType, ClusterSpec.Type clusterType) {
+ return true;
+ }
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java
index 33a3c138d70..e17e5a5a449 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/lb/SharedLoadBalancerService.java
@@ -66,7 +66,7 @@ public class SharedLoadBalancerService implements LoadBalancerService {
}
@Override
- public boolean canForwardTo(NodeType nodeType, ClusterSpec.Type clusterType) {
+ public boolean supports(NodeType nodeType, ClusterSpec.Type clusterType) {
// Shared routing layer only supports routing to tenant nodes
return nodeType == NodeType.tenant && clusterType.isContainer();
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java
index 9ac1ca2b4c1..24160c19dfa 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainer.java
@@ -41,9 +41,9 @@ public abstract class ApplicationMaintainer extends NodeRepositoryMaintainer {
}
@Override
- protected final boolean maintain() {
+ protected final double maintain() {
applicationsNeedingMaintenance().forEach(this::deploy);
- return true;
+ return 1.0;
}
/** Returns the number of deployments that are pending execution */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
index 7da6e0d3ebe..3914a0c9f07 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
@@ -14,16 +14,13 @@ import com.yahoo.vespa.hosted.provision.applications.Applications;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.autoscale.AllocatableClusterResources;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler;
-import com.yahoo.vespa.hosted.provision.autoscale.MetricsDb;
import com.yahoo.vespa.hosted.provision.autoscale.NodeMetricSnapshot;
-import com.yahoo.vespa.hosted.provision.autoscale.NodeTimeseries;
import com.yahoo.vespa.hosted.provision.node.History;
import java.time.Duration;
import java.time.Instant;
import java.util.Map;
import java.util.Optional;
-import java.util.Set;
/**
* Maintainer making automatic scaling decisions
@@ -47,14 +44,13 @@ public class AutoscalingMaintainer extends NodeRepositoryMaintainer {
}
@Override
- protected boolean maintain() {
- if ( ! nodeRepository().nodes().isWorking()) return false;
+ protected double maintain() {
+ if ( ! nodeRepository().nodes().isWorking()) return 0.0;
- boolean success = true;
- if ( ! nodeRepository().zone().environment().isAnyOf(Environment.dev, Environment.prod)) return success;
+ if ( ! nodeRepository().zone().environment().isAnyOf(Environment.dev, Environment.prod)) return 1.0;
activeNodesByApplication().forEach(this::autoscale);
- return success;
+ return 1.0;
}
private void autoscale(ApplicationId application, NodeList applicationNodes) {
@@ -81,7 +77,7 @@ public class AutoscalingMaintainer extends NodeRepositoryMaintainer {
// 1. Update cluster info
updatedCluster = updateCompletion(cluster.get(), clusterNodes)
- .withAutoscalingStatus(advice.reason())
+ .with(advice.reason())
.withTarget(advice.target());
applications().put(application.get().with(updatedCluster), lock);
if (advice.isPresent() && advice.target().isPresent() && !cluster.get().targetResources().equals(advice.target())) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainer.java
index 4224667a726..0eb2038e233 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DynamicProvisioningMaintainer.java
@@ -12,13 +12,10 @@ import com.yahoo.config.provision.OutOfCapacityException;
import com.yahoo.jdisc.Metric;
import com.yahoo.lang.MutableInteger;
import com.yahoo.transaction.Mutex;
-import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagSource;
-import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.JacksonFlag;
import com.yahoo.vespa.flags.ListFlag;
import com.yahoo.vespa.flags.PermanentFlags;
-import com.yahoo.vespa.flags.StringFlag;
import com.yahoo.vespa.flags.custom.ClusterCapacity;
import com.yahoo.vespa.flags.custom.SharedHost;
import com.yahoo.vespa.hosted.provision.LockedNodeList;
@@ -64,7 +61,6 @@ public class DynamicProvisioningMaintainer extends NodeRepositoryMaintainer {
private final HostProvisioner hostProvisioner;
private final ListFlag<ClusterCapacity> preprovisionCapacityFlag;
private final JacksonFlag<SharedHost> sharedHostFlag;
- private final StringFlag allocateOsRequirement;
DynamicProvisioningMaintainer(NodeRepository nodeRepository,
Duration interval,
@@ -75,17 +71,16 @@ public class DynamicProvisioningMaintainer extends NodeRepositoryMaintainer {
this.hostProvisioner = hostProvisioner;
this.preprovisionCapacityFlag = PermanentFlags.PREPROVISION_CAPACITY.bindTo(flagSource);
this.sharedHostFlag = PermanentFlags.SHARED_HOST.bindTo(flagSource);
- this.allocateOsRequirement = Flags.ALLOCATE_OS_REQUIREMENT.bindTo(flagSource);
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
try (Mutex lock = nodeRepository().nodes().lockUnallocated()) {
NodeList nodes = nodeRepository().nodes().list();
resumeProvisioning(nodes, lock);
convergeToCapacity(nodes);
}
- return true;
+ return 1.0;
}
/** Resume provisioning of already provisioned hosts and their children */
@@ -301,12 +296,9 @@ public class DynamicProvisioningMaintainer extends NodeRepositoryMaintainer {
NodeSpec nodeSpec = NodeSpec.from(clusterCapacity.count(), nodeResources, false, true);
int wantedGroups = 1;
- String allocateOsRequirement = this.allocateOsRequirement
- .with(FetchVector.Dimension.APPLICATION_ID, ApplicationId.defaultId().serializedForm())
- .value();
NodePrioritizer prioritizer = new NodePrioritizer(nodeList, applicationId, clusterSpec, nodeSpec, wantedGroups,
true, nodeRepository().nameResolver(), nodeRepository().resourcesCalculator(),
- nodeRepository().spareCount(), allocateOsRequirement);
+ nodeRepository().spareCount());
List<NodeCandidate> nodeCandidates = prioritizer.collect(List.of());
MutableInteger index = new MutableInteger(0);
return nodeCandidates
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainer.java
index 47337518a65..39183688340 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainer.java
@@ -20,19 +20,22 @@ import java.util.Set;
import java.util.stream.Collectors;
/**
- * The application maintainer detects manual operator changes to nodes and redeploys affected applications.
- * The purpose of this is to redeploy affected applications faster than achieved by the regular application
- * maintenance to reduce the time period where the node repository and the application model is out of sync.
+ * This maintainer detects changes to nodes that must be expedited, and redeploys affected applications.
+ *
+ * The purpose of this is to redeploy affected applications faster than achieved by
+ * {@link PeriodicApplicationMaintainer}, to reduce the time period where the node repository and the application model
+ * is out of sync.
*
* Why can't the manual change directly make the application redeployment?
- * Because the redeployment must run at the right config server, while the node state change may be running
- * at any config server.
+ *
+ * Because we want to queue redeployments to avoid overloading config servers.
*
* @author bratseth
+ * @author mpolden
*/
-public class OperatorChangeApplicationMaintainer extends ApplicationMaintainer {
+public class ExpeditedChangeApplicationMaintainer extends ApplicationMaintainer {
- OperatorChangeApplicationMaintainer(Deployer deployer, Metric metric, NodeRepository nodeRepository, Duration interval) {
+ ExpeditedChangeApplicationMaintainer(Deployer deployer, Metric metric, NodeRepository nodeRepository, Duration interval) {
super(deployer, metric, nodeRepository, interval);
}
@@ -57,7 +60,7 @@ public class OperatorChangeApplicationMaintainer extends ApplicationMaintainer {
boolean deployed = deployWithLock(application);
if (deployed)
log.info("Redeployed application " + application.toShortString() +
- " as a manual change was made to its nodes");
+ " as an expedited change was made to its nodes");
}
private boolean hasNodesWithChanges(ApplicationId applicationId, NodeList nodes) {
@@ -66,7 +69,7 @@ public class OperatorChangeApplicationMaintainer extends ApplicationMaintainer {
return nodes.stream()
.flatMap(node -> node.history().events().stream())
- .filter(event -> event.agent() == Agent.operator)
+ .filter(event -> expediteChangeBy(event.agent()))
.map(History.Event::at)
.anyMatch(e -> lastDeployTime.get().isBefore(e));
}
@@ -84,5 +87,14 @@ public class OperatorChangeApplicationMaintainer extends ApplicationMaintainer {
.groupingBy(node -> node.allocation().get().owner());
}
+ /** Returns whether to expedite changes performed by agent */
+ private boolean expediteChangeBy(Agent agent) {
+ switch (agent) {
+ case operator:
+ case RebuildingOsUpgrader:
+ case HostEncrypter: return true;
+ }
+ return false;
+ }
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java
index 2443a12d198..25108425e6e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Expirer.java
@@ -40,7 +40,7 @@ public abstract class Expirer extends NodeRepositoryMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
NodeList expired = nodeRepository().nodes().list(fromState).matching(this::isExpired);
if ( ! expired.isEmpty()) {
@@ -49,7 +49,7 @@ public abstract class Expirer extends NodeRepositoryMaintainer {
}
metric.add("expired." + fromState, expired.size(), null);
- return true;
+ return 1.0;
}
protected boolean isExpired(Node node) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java
index e98da35aa6a..7505ce42668 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java
@@ -66,7 +66,7 @@ public class FailedExpirer extends NodeRepositoryMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
List<Node> remainingNodes = new ArrayList<>(nodeRepository.nodes().list(Node.State.failed)
.nodeType(NodeType.tenant, NodeType.host)
.asList());
@@ -78,7 +78,7 @@ public class FailedExpirer extends NodeRepositoryMaintainer {
recycleIf(remainingNodes, node ->
node.allocation().get().membership().cluster().isStateful() &&
node.history().hasEventBefore(History.Event.Type.failed, clock().instant().minus(statefulExpiry)));
- return true;
+ return 1.0;
}
/** Recycle the nodes matching condition, and remove those nodes from the nodes list. */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostEncrypter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostEncrypter.java
index caf20463f60..6d88e43630a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostEncrypter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/HostEncrypter.java
@@ -43,15 +43,17 @@ public class HostEncrypter extends NodeRepositoryMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
Instant now = nodeRepository().clock().instant();
NodeList allNodes = nodeRepository().nodes().list();
for (var nodeType : NodeType.values()) {
if (!nodeType.isHost()) continue;
+ // TODO: Require a minimum number of proxies in Orchestrator. For now skip proxy hosts.
+ if (nodeType == NodeType.proxyhost) continue;
if (upgradingVespa(allNodes, nodeType)) continue;
unencryptedHosts(allNodes, nodeType).forEach(host -> encrypt(host, now));
}
- return true;
+ return 1.0;
}
/** Returns whether any node of given type is currently upgrading its Vespa version */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureProvisioner.java
index e317333135c..d9f5ea6a7a9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/InfrastructureProvisioner.java
@@ -39,9 +39,9 @@ public class InfrastructureProvisioner extends NodeRepositoryMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
infraDeployer.activateAllSupportedInfraApplications(false);
- return true;
+ return 1.0;
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
index 10069fd1a18..c3d6f5c42b8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/LoadBalancerExpirer.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.provision.maintenance;
import com.google.common.collect.Sets;
import com.yahoo.jdisc.Metric;
+import com.yahoo.lang.MutableInteger;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancer;
@@ -54,9 +55,9 @@ public class LoadBalancerExpirer extends NodeRepositoryMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
expireReserved();
- return removeInactive() & pruneReals();
+ return ( removeInactive() + pruneReals() ) / 2;
}
/** Move reserved load balancer that have expired to inactive */
@@ -64,11 +65,12 @@ public class LoadBalancerExpirer extends NodeRepositoryMaintainer {
Instant now = nodeRepository().clock().instant();
Instant expiry = now.minus(reservedExpiry);
patchLoadBalancers(lb -> lb.state() == State.reserved && lb.changedAt().isBefore(expiry),
- lb -> db.writeLoadBalancer(lb.with(State.inactive, now)));
+ lb -> db.writeLoadBalancer(lb.with(State.inactive, now), lb.state()));
}
/** Deprovision inactive load balancers that have expired */
- private boolean removeInactive() {
+ private double removeInactive() {
+ MutableInteger attempts = new MutableInteger(0);
var failed = new ArrayList<LoadBalancerId>();
var lastException = new AtomicReference<Exception>();
var expiry = nodeRepository().clock().instant().minus(inactiveExpiry);
@@ -76,6 +78,7 @@ public class LoadBalancerExpirer extends NodeRepositoryMaintainer {
lb.changedAt().isBefore(expiry) &&
allocatedNodes(lb.id()).isEmpty(), lb -> {
try {
+ attempts.add(1);
log.log(Level.INFO, () -> "Removing expired inactive load balancer " + lb.id());
service.remove(lb.id().application(), lb.id().cluster());
db.removeLoadBalancer(lb.id());
@@ -92,11 +95,12 @@ public class LoadBalancerExpirer extends NodeRepositoryMaintainer {
.collect(Collectors.joining(", ")),
interval()));
}
- return lastException.get() == null;
+ return asSuccessFactor(attempts.get(), failed.size());
}
/** Remove reals from inactive load balancers */
- private boolean pruneReals() {
+ private double pruneReals() {
+ var attempts = new MutableInteger(0);
var failed = new ArrayList<LoadBalancerId>();
var lastException = new AtomicReference<Exception>();
patchLoadBalancers(lb -> lb.state() == State.inactive, lb -> {
@@ -107,9 +111,10 @@ public class LoadBalancerExpirer extends NodeRepositoryMaintainer {
reals.removeIf(real -> !allocatedNodes.contains(real.hostname().value()));
if (reals.equals(lb.instance().get().reals())) return; // Nothing to remove
try {
+ attempts.add(1);
LOG.log(Level.INFO, () -> "Removing reals from inactive load balancer " + lb.id() + ": " + Sets.difference(lb.instance().get().reals(), reals));
service.create(new LoadBalancerSpec(lb.id().application(), lb.id().cluster(), reals), true);
- db.writeLoadBalancer(lb.with(lb.instance().map(instance -> instance.withReals(reals))));
+ db.writeLoadBalancer(lb.with(lb.instance().map(instance -> instance.withReals(reals))), lb.state());
} catch (Exception e) {
failed.add(lb.id());
lastException.set(e);
@@ -124,7 +129,7 @@ public class LoadBalancerExpirer extends NodeRepositoryMaintainer {
interval()),
lastException.get());
}
- return lastException.get() == null;
+ return asSuccessFactor(attempts.get(), failed.size());
}
/** Patch load balancers matching given filter, while holding lock */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
index 3b16ecbcaa9..892372f27e7 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MaintenanceDeployment.java
@@ -102,11 +102,13 @@ class MaintenanceDeployment implements Closeable {
}
private Optional<Mutex> tryLock(ApplicationId application, NodeRepository nodeRepository) {
+ Duration timeout = Duration.ofSeconds(3);
try {
// Use a short lock to avoid interfering with change deployments
- return Optional.of(nodeRepository.nodes().lock(application, Duration.ofSeconds(1)));
+ return Optional.of(nodeRepository.nodes().lock(application, timeout));
}
catch (ApplicationLockException e) {
+ log.log(Level.WARNING, () -> "Could not lock " + application + " for maintenance deployment within " + timeout);
return Optional.empty();
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
index 85437b3e78a..d8bbf305b57 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporter.java
@@ -27,6 +27,7 @@ import com.yahoo.vespa.service.monitor.ServiceModel;
import com.yahoo.vespa.service.monitor.ServiceMonitor;
import java.time.Duration;
+import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -65,8 +66,10 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
}
@Override
- public boolean maintain() {
- NodeList nodes = nodeRepository().nodes().list();
+ public double maintain() {
+ // Sort by hostname to get deterministic metric reporting order (and hopefully avoid changes
+ // to metric reporting time so we get double reporting or no reporting within a minute)
+ NodeList nodes = nodeRepository().nodes().list().sortedBy(Comparator.comparing(Node::hostname));
ServiceModel serviceModel = serviceMonitor.getServiceModelSnapshot();
updateZoneMetrics();
@@ -80,7 +83,7 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
updateRepairTicketMetrics(nodes);
updateAllocationMetrics(nodes);
updateExclusiveSwitchMetrics(nodes);
- return true;
+ return 1.0;
}
private void updateAllocationMetrics(NodeList nodes) {
@@ -200,6 +203,11 @@ public class MetricsReporter extends NodeRepositoryMaintainer {
metric.set("wantToDeprovision", node.status().wantToDeprovision() ? 1 : 0, context);
metric.set("failReport", NodeFailer.reasonsToFailParentHost(node).isEmpty() ? 0 : 1, context);
+ if (node.type().isHost()) {
+ metric.set("wantToEncrypt", node.reports().getReport("wantToEncrypt").isPresent() ? 1 : 0, context);
+ metric.set("diskEncrypted", node.reports().getReport("diskEncrypted").isPresent() ? 1 : 0, context);
+ }
+
HostName hostname = new HostName(node.hostname());
serviceModel.getApplication(hostname)
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
index effa41dc69f..f16459ee8b9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
@@ -72,17 +72,21 @@ public class NodeFailer extends NodeRepositoryMaintainer {
}
@Override
- protected boolean maintain() {
- if ( ! nodeRepository().nodes().isWorking()) return false;
+ protected double maintain() {
+ if ( ! nodeRepository().nodes().isWorking()) return 0.0;
+ int attempts = 0;
+ int failures = 0;
int throttledHostFailures = 0;
int throttledNodeFailures = 0;
// Ready nodes
try (Mutex lock = nodeRepository().nodes().lockUnallocated()) {
for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) {
+ attempts++;
Node node = entry.getKey();
if (throttle(node)) {
+ failures++;
if (node.type().isHost())
throttledHostFailures++;
else
@@ -96,10 +100,12 @@ public class NodeFailer extends NodeRepositoryMaintainer {
// Active nodes
for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason().entrySet()) {
+ attempts++;
Node node = entry.getKey();
if (!failAllowedFor(node.type())) continue;
if (throttle(node)) {
+ failures++;
if (node.type().isHost())
throttledHostFailures++;
else
@@ -116,11 +122,15 @@ public class NodeFailer extends NodeRepositoryMaintainer {
if ( ! activeNodes.childrenOf(host).isEmpty()) continue;
Optional<NodeMutex> locked = Optional.empty();
try {
+ attempts++;
locked = nodeRepository().nodes().lockAndGet(host);
if (locked.isEmpty()) continue;
nodeRepository().nodes().fail(List.of(locked.get().node()), Agent.NodeFailer,
"Host should be failed and have no tenant nodes");
}
+ catch (Exception e) {
+ failures++;
+ }
finally {
locked.ifPresent(NodeMutex::close);
}
@@ -130,7 +140,7 @@ public class NodeFailer extends NodeRepositoryMaintainer {
metric.set(throttlingActiveMetric, throttlingActive, null);
metric.set(throttledHostFailuresMetric, throttledHostFailures, null);
metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null);
- return throttlingActive == 0;
+ return asSuccessFactor(attempts, failures);
}
private Map<Node, String> getReadyNodesByFailureReason() {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java
index fe2fb5229f9..37969a30b81 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeHealthTracker.java
@@ -5,6 +5,7 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationLockException;
import com.yahoo.config.provision.HostLivenessTracker;
import com.yahoo.jdisc.Metric;
+import com.yahoo.lang.MutableInteger;
import com.yahoo.transaction.Mutex;
import com.yahoo.vespa.applicationmodel.ServiceInstance;
import com.yahoo.vespa.applicationmodel.ServiceStatus;
@@ -48,13 +49,11 @@ public class NodeHealthTracker extends NodeRepositoryMaintainer {
}
@Override
- protected boolean maintain() {
- updateReadyNodeLivenessEvents();
- updateActiveNodeDownState();
- return true;
+ protected double maintain() {
+ return ( updateReadyNodeLivenessEvents() + updateActiveNodeDownState() ) / 2;
}
- private void updateReadyNodeLivenessEvents() {
+ private double updateReadyNodeLivenessEvents() {
// Update node last request events through ZooKeeper to collect request to all config servers.
// We do this here ("lazily") to avoid writing to zk for each config request.
try (Mutex lock = nodeRepository().nodes().lockUnallocated()) {
@@ -69,13 +68,16 @@ public class NodeHealthTracker extends NodeRepositoryMaintainer {
}
}
}
+ return 1.0;
}
/**
* If the node is down (see {@link #allDown}), and there is no "down" history record, we add it.
* Otherwise we remove any "down" history record.
*/
- private void updateActiveNodeDownState() {
+ private double updateActiveNodeDownState() {
+ var attempts = new MutableInteger(0);
+ var failures = new MutableInteger(0);
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName().forEach((hostname, serviceInstances) -> {
Optional<Node> node = activeNodes.node(hostname.toString());
@@ -90,6 +92,7 @@ public class NodeHealthTracker extends NodeRepositoryMaintainer {
try (var lock = nodeRepository().nodes().lock(owner)) {
node = getNode(hostname.toString(), owner, lock); // Re-get inside lock
if (node.isEmpty()) return; // Node disappeared or changed allocation
+ attempts.add(1);
if (isDown) {
recordAsDown(node.get(), lock);
} else {
@@ -98,8 +101,10 @@ public class NodeHealthTracker extends NodeRepositoryMaintainer {
} catch (ApplicationLockException e) {
// Fine, carry on with other nodes. We'll try updating this one in the next run
log.log(Level.WARNING, "Could not lock " + owner + ": " + Exceptions.toMessageString(e));
+ failures.add(1);
}
});
+ return asSuccessFactor(attempts.get(), failures.get());
}
/**
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainer.java
index 1ea4577f7fe..d671900d08c 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainer.java
@@ -33,19 +33,21 @@ public class NodeMetricsDbMaintainer extends NodeRepositoryMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
+ int attempts = 0;
+ var failures = new MutableInteger(0);
try {
- var warnings = new MutableInteger(0);
Set<ApplicationId> applications = activeNodesByApplication().keySet();
- if (applications.isEmpty()) return true;
+ if (applications.isEmpty()) return 1.0;
long pauseMs = interval().toMillis() / applications.size() - 1; // spread requests over interval
int done = 0;
for (ApplicationId application : applications) {
+ attempts++;
metricsFetcher.fetchMetrics(application)
.whenComplete((metricsResponse, exception) -> handleResponse(metricsResponse,
exception,
- warnings,
+ failures,
application));
if (++done < applications.size())
Thread.sleep(pauseMs);
@@ -56,23 +58,22 @@ public class NodeMetricsDbMaintainer extends NodeRepositoryMaintainer {
nodeRepository().metricsDb().gc();
- // Suppress failures for manual zones for now to avoid noise
- return nodeRepository().zone().environment().isManuallyDeployed() || warnings.get() == 0;
+ return asSuccessFactor(attempts, failures.get());
}
catch (InterruptedException e) {
- return false;
+ return asSuccessFactor(attempts, failures.get());
}
}
private void handleResponse(MetricsResponse response,
Throwable exception,
- MutableInteger warnings,
+ MutableInteger failures,
ApplicationId application) {
if (exception != null) {
- if (warnings.get() < maxWarningsPerInvocation)
+ if (failures.get() < maxWarningsPerInvocation)
log.log(Level.WARNING, "Could not update metrics for " + application + ": " +
Exceptions.toMessageString(exception));
- warnings.add(1);
+ failures.add(1);
}
else if (response != null) {
nodeRepository().metricsDb().addNodeMetrics(response.nodeMetrics());
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java
index 6ee657beadd..c282fcdb7fc 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRebooter.java
@@ -37,7 +37,7 @@ public class NodeRebooter extends NodeRepositoryMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
// Reboot candidates: Nodes in long-term states, where we know we can safely orchestrate a reboot
List<Node> nodesToReboot = nodeRepository().nodes().list(Node.State.active, Node.State.ready).stream()
.filter(node -> node.type().isHost())
@@ -46,7 +46,7 @@ public class NodeRebooter extends NodeRepositoryMaintainer {
if (!nodesToReboot.isEmpty())
nodeRepository().nodes().reboot(NodeListFilter.from(nodesToReboot));
- return true;
+ return 1.0;
}
private boolean shouldReboot(Node node) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintainer.java
index 0a1f6961f9f..0fade7b32f8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintainer.java
@@ -25,7 +25,7 @@ public abstract class NodeRepositoryMaintainer extends Maintainer {
public NodeRepositoryMaintainer(NodeRepository nodeRepository, Duration interval, Metric metric) {
super(null, interval, nodeRepository.clock().instant(), nodeRepository.jobControl(),
- jobMetrics(metric), nodeRepository.database().cluster(), true);
+ new NodeRepositoryJobMetrics(metric), nodeRepository.database().cluster(), true);
this.nodeRepository = nodeRepository;
}
@@ -48,10 +48,19 @@ public abstract class NodeRepositoryMaintainer extends Maintainer {
.groupingBy(node -> node.allocation().get().owner());
}
- private static JobMetrics jobMetrics(Metric metric) {
- return new JobMetrics((job, consecutiveFailures) -> {
- metric.set("maintenance.consecutiveFailures", consecutiveFailures, metric.createContext(Map.of("job", job)));
- });
+ private static class NodeRepositoryJobMetrics extends JobMetrics {
+
+ private final Metric metric;
+
+ public NodeRepositoryJobMetrics(Metric metric) {
+ this.metric = metric;
+ }
+
+ @Override
+ public void completed(String job, double successFactor) {
+ metric.set("maintenance.successFactor", successFactor, metric.createContext(Map.of("job", job)));
+ }
+
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
index 44ee8b5a8b3..79d6fbfbdcd 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeRepositoryMaintenance.java
@@ -48,7 +48,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
maintainers.add(new NodeFailer(deployer, nodeRepository, defaults.failGrace, defaults.nodeFailerInterval, orchestrator, defaults.throttlePolicy, metric));
maintainers.add(new NodeHealthTracker(hostLivenessTracker, serviceMonitor, nodeRepository, defaults.nodeFailureStatusUpdateInterval, metric));
- maintainers.add(new OperatorChangeApplicationMaintainer(deployer, metric, nodeRepository, defaults.operatorChangeRedeployInterval));
+ maintainers.add(new ExpeditedChangeApplicationMaintainer(deployer, metric, nodeRepository, defaults.expeditedChangeRedeployInterval));
maintainers.add(new ReservationExpirer(nodeRepository, defaults.reservationExpiry, metric));
maintainers.add(new RetiredExpirer(nodeRepository, orchestrator, deployer, metric, defaults.retiredInterval, defaults.retiredExpiry));
maintainers.add(new InactiveExpirer(nodeRepository, defaults.inactiveExpiry, Map.of(NodeType.config, defaults.inactiveConfigServerExpiry,
@@ -91,7 +91,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
/** Time between each run of maintainer that does periodic redeployment */
private final Duration redeployMaintainerInterval;
/** Applications are redeployed after manual operator changes within this time period */
- private final Duration operatorChangeRedeployInterval;
+ private final Duration expeditedChangeRedeployInterval;
/** The time a node must be continuously unresponsive before it is failed */
private final Duration failGrace;
@@ -127,13 +127,13 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
dynamicProvisionerInterval = Duration.ofMinutes(5);
failedExpirerInterval = Duration.ofMinutes(10);
failGrace = Duration.ofMinutes(30);
- infrastructureProvisionInterval = Duration.ofMinutes(1);
+ infrastructureProvisionInterval = Duration.ofMinutes(3);
loadBalancerExpirerInterval = Duration.ofMinutes(5);
metricsInterval = Duration.ofMinutes(1);
nodeFailerInterval = Duration.ofMinutes(15);
nodeFailureStatusUpdateInterval = Duration.ofMinutes(2);
nodeMetricsCollectionInterval = Duration.ofMinutes(1);
- operatorChangeRedeployInterval = Duration.ofMinutes(3);
+ expeditedChangeRedeployInterval = Duration.ofMinutes(3);
// Vespa upgrade frequency is higher in CD so (de)activate OS upgrades more frequently as well
osUpgradeActivatorInterval = zone.system().isCd() ? Duration.ofSeconds(30) : Duration.ofMinutes(5);
periodicRedeployInterval = Duration.ofMinutes(60);
@@ -152,7 +152,7 @@ public class NodeRepositoryMaintenance extends AbstractComponent {
if (zone.environment().isProduction() && ! zone.system().isCd()) {
inactiveExpiry = Duration.ofHours(4); // enough time for the application owner to discover and redeploy
- retiredInterval = Duration.ofMinutes(30);
+ retiredInterval = Duration.ofMinutes(15);
dirtyExpiry = Duration.ofHours(2); // enough time to clean the node
retiredExpiry = Duration.ofDays(4); // give up migrating data after 4 days
} else {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OsUpgradeActivator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OsUpgradeActivator.java
index 4eba15307cb..749603a373d 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OsUpgradeActivator.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/OsUpgradeActivator.java
@@ -23,13 +23,13 @@ public class OsUpgradeActivator extends NodeRepositoryMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
for (var nodeType : NodeType.values()) {
if (!nodeType.isHost()) continue;
boolean resume = canUpgradeOsOf(nodeType);
nodeRepository().osVersions().resumeUpgradeOf(nodeType, resume);
}
- return true;
+ return 1.0;
}
/** Returns whether to allow OS upgrade of nodes of given type */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java
index 856d534bbd2..76c8210338e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirer.java
@@ -4,15 +4,18 @@ package com.yahoo.vespa.hosted.provision.maintenance;
import com.yahoo.config.provision.NodeType;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.History;
import java.time.Duration;
+import java.time.Instant;
import java.util.List;
/**
* This moves nodes of type {@link NodeType#host} from provisioned to parked if they have been in provisioned too long.
+ * Parked hosts are deprovisioned as well, if too many hosts are being expired.
*
* Only {@link NodeType#host} is moved because any number of nodes of that type can exist. Other node types such as
* {@link NodeType#confighost} have a fixed number and thus cannot be replaced while the fixed number of nodes exist in
@@ -22,17 +25,40 @@ import java.util.List;
*/
public class ProvisionedExpirer extends Expirer {
+ private final NodeRepository nodeRepository;
+ private static final int MAXIMUM_ALLOWED_EXPIRED_HOSTS = 20;
+
ProvisionedExpirer(NodeRepository nodeRepository, Duration dirtyTimeout, Metric metric) {
super(Node.State.provisioned, History.Event.Type.provisioned, nodeRepository, dirtyTimeout, metric);
+ this.nodeRepository = nodeRepository;
}
@Override
protected void expire(List<Node> expired) {
+ int previouslyExpired = numberOfPreviouslyExpired();
for (Node expiredNode : expired) {
- if (expiredNode.type() == NodeType.host) {
- nodeRepository().nodes().parkRecursively(expiredNode.hostname(), Agent.ProvisionedExpirer, "Node is stuck in provisioned");
+ if (expiredNode.type() != NodeType.host)
+ continue;
+ nodeRepository().nodes().parkRecursively(expiredNode.hostname(), Agent.ProvisionedExpirer, "Node is stuck in provisioned");
+ if (MAXIMUM_ALLOWED_EXPIRED_HOSTS < ++previouslyExpired) {
+ nodeRepository.nodes().deprovision(expiredNode.hostname(), Agent.ProvisionedExpirer, nodeRepository.clock().instant());
}
}
}
+ private int numberOfPreviouslyExpired() {
+ return nodeRepository.nodes()
+ .list(Node.State.parked)
+ .nodeType(NodeType.host)
+ .matching(this::parkedByProvisionedExpirer)
+ .not().deprovisioning()
+ .size();
+ }
+
+ private boolean parkedByProvisionedExpirer(Node node) {
+ return node.history().event(History.Event.Type.parked)
+ .map(History.Event::agent)
+ .map(Agent.ProvisionedExpirer::equals)
+ .orElse(false);
+ }
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Rebalancer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Rebalancer.java
index 1543506a78e..7bb748c92c9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Rebalancer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/Rebalancer.java
@@ -33,19 +33,18 @@ public class Rebalancer extends NodeMover<Rebalancer.Move> {
}
@Override
- protected boolean maintain() {
- if ( ! nodeRepository().nodes().isWorking()) return false;
+ protected double maintain() {
+ if ( ! nodeRepository().nodes().isWorking()) return 0.0;
- boolean success = true;
- if (nodeRepository().zone().getCloud().dynamicProvisioning()) return success; // Rebalancing not necessary
- if (nodeRepository().zone().environment().isTest()) return success; // Short lived deployments; no need to rebalance
+ if (nodeRepository().zone().getCloud().dynamicProvisioning()) return 1.0; // Rebalancing not necessary
+ if (nodeRepository().zone().environment().isTest()) return 1.0; // Short lived deployments; no need to rebalance
// Work with an unlocked snapshot as this can take a long time and full consistency is not needed
NodeList allNodes = nodeRepository().nodes().list();
updateSkewMetric(allNodes);
- if ( ! zoneIsStable(allNodes)) return success;
+ if ( ! zoneIsStable(allNodes)) return 1.0;
findBestMove(allNodes).execute(true, Agent.Rebalancer, deployer, metric, nodeRepository());
- return success;
+ return 1.0;
}
@Override
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java
index f72daf1bc2b..3f5893b368a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirer.java
@@ -48,7 +48,10 @@ public class RetiredExpirer extends NodeRepositoryMaintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
+ int attempts = 0;
+ int successes = 0;
+
NodeList activeNodes = nodeRepository().nodes().list(Node.State.active);
Map<ApplicationId, NodeList> retiredNodesByApplication = activeNodes.retired().groupingBy(node -> node.allocation().get().owner());
for (Map.Entry<ApplicationId, NodeList> entry : retiredNodesByApplication.entrySet()) {
@@ -57,17 +60,19 @@ public class RetiredExpirer extends NodeRepositoryMaintainer {
List<Node> nodesToRemove = retiredNodes.stream().filter(n -> canRemove(n, activeNodes)).collect(Collectors.toList());
if (nodesToRemove.isEmpty()) continue;
+ attempts++;
try (MaintenanceDeployment deployment = new MaintenanceDeployment(application, deployer, metric, nodeRepository())) {
if ( ! deployment.isValid()) continue;
nodeRepository().nodes().setRemovable(application, nodesToRemove);
boolean success = deployment.activate().isPresent();
- if ( ! success) return success;
+ if ( ! success) continue;
String nodeList = nodesToRemove.stream().map(Node::hostname).collect(Collectors.joining(", "));
log.info("Redeployed " + application + " to deactivate retired nodes: " + nodeList);
+ successes++;
}
}
- return true;
+ return attempts == 0 ? 1.0 : ((double)successes / attempts);
}
/**
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java
index c217580872b..b606e40ef42 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java
@@ -36,20 +36,19 @@ public class ScalingSuggestionsMaintainer extends NodeRepositoryMaintainer {
}
@Override
- protected boolean maintain() {
- if ( ! nodeRepository().zone().environment().isProduction()) return true;
+ protected double maintain() {
+ if ( ! nodeRepository().zone().environment().isProduction()) return 1.0;
- int successes = 0;
- for (var application : activeNodesByApplication().entrySet())
- successes += suggest(application.getKey(), application.getValue());
- return successes > 0;
- }
-
- private int suggest(ApplicationId application, NodeList applicationNodes) {
- int successes = 0;
- for (var cluster : nodesByCluster(applicationNodes).entrySet())
- successes += suggest(application, cluster.getKey(), cluster.getValue()) ? 1 : 0;
- return successes;
+ int attempts = 0;
+ int failures = 0;
+ for (var application : activeNodesByApplication().entrySet()) {
+ for (var cluster : nodesByCluster(application.getValue()).entrySet()) {
+ attempts++;
+ if ( ! suggest(application.getKey(), cluster.getKey(), cluster.getValue()))
+ failures++;
+ }
+ }
+ return asSuccessFactor(attempts, failures);
}
private Applications applications() {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java
index 0307ae13b24..0589571e9d8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainer.java
@@ -66,12 +66,11 @@ public class SpareCapacityMaintainer extends NodeRepositoryMaintainer {
}
@Override
- protected boolean maintain() {
- if ( ! nodeRepository().nodes().isWorking()) return false;
+ protected double maintain() {
+ if ( ! nodeRepository().nodes().isWorking()) return 0.0;
- boolean success = true;
// Don't need to maintain spare capacity in dynamically provisioned zones; can provision more on demand.
- if (nodeRepository().zone().getCloud().dynamicProvisioning()) return success;
+ if (nodeRepository().zone().getCloud().dynamicProvisioning()) return 1.0;
NodeList allNodes = nodeRepository().nodes().list();
CapacityChecker capacityChecker = new CapacityChecker(allNodes);
@@ -80,6 +79,7 @@ public class SpareCapacityMaintainer extends NodeRepositoryMaintainer {
metric.set("overcommittedHosts", overcommittedHosts.size(), null);
retireOvercommitedHosts(allNodes, overcommittedHosts);
+ boolean success = true;
Optional<CapacityChecker.HostFailurePath> failurePath = capacityChecker.worstCaseHostLossLeadingToFailure();
if (failurePath.isPresent()) {
int spareHostCapacity = failurePath.get().hostsCausingFailure.size() - 1;
@@ -96,7 +96,7 @@ public class SpareCapacityMaintainer extends NodeRepositoryMaintainer {
}
metric.set("spareHostCapacity", spareHostCapacity, null);
}
- return success;
+ return success ? 1.0 : 0.0;
}
private boolean execute(List<Move> mitigation, CapacityChecker.HostFailurePath failurePath) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java
index cfab980570d..44890f2f5af 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/SwitchRebalancer.java
@@ -33,14 +33,14 @@ public class SwitchRebalancer extends NodeMover<Move> {
}
@Override
- protected boolean maintain() {
- if (!nodeRepository().nodes().isWorking()) return false;
- if (!nodeRepository().zone().environment().isProduction()) return true;
+ protected double maintain() {
+ if (!nodeRepository().nodes().isWorking()) return 0.0;
+ if (!nodeRepository().zone().environment().isProduction()) return 1.0;
NodeList allNodes = nodeRepository().nodes().list(); // Lockless as strong consistency is not needed
- if (!zoneIsStable(allNodes)) return true;
+ if (!zoneIsStable(allNodes)) return 1.0;
findBestMove(allNodes).execute(false, Agent.SwitchRebalancer, deployer, metric, nodeRepository());
- return true;
+ return 1.0;
}
@Override
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Agent.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Agent.java
index d1c3f00ddca..ed82470fa42 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Agent.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Agent.java
@@ -21,6 +21,7 @@ public enum Agent {
InactiveExpirer,
ProvisionedExpirer,
ReservationExpirer,
+ ParkedExpirer,
DynamicProvisioningMaintainer,
RetiringUpgrader,
RebuildingOsUpgrader,
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
index 3afe5824af5..4d67c83a179 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
@@ -161,6 +161,14 @@ public class Nodes {
node = node.with(node.status().withFailCount(existing.get().status().failCount()));
if (existing.get().status().firmwareVerifiedAt().isPresent())
node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get()));
+ // Preserve wantToRebuild/wantToRetire when rebuilding as the fields shouldn't be cleared until the
+ // host is readied (i.e. we know it is up and rebuild completed)
+ boolean rebuilding = existing.get().status().wantToRebuild();
+ if (rebuilding) {
+ node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(),
+ false,
+ rebuilding));
+ }
nodesToRemove.add(existing.get());
}
@@ -508,6 +516,8 @@ public class Nodes {
public void forget(Node node) {
if (node.state() != Node.State.deprovisioned)
throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten");
+ if (node.status().wantToRebuild())
+ throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten");
NestedTransaction transaction = new NestedTransaction();
db.removeNodes(List.of(node), transaction);
transaction.commit();
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java
index c8b928779b9..e3fb3379da1 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializer.java
@@ -10,6 +10,7 @@ import com.yahoo.slime.ObjectTraverser;
import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.provision.applications.Application;
+import com.yahoo.vespa.hosted.provision.applications.AutoscalingStatus;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
import com.yahoo.vespa.hosted.provision.applications.Status;
@@ -55,6 +56,8 @@ public class ApplicationSerializer {
private static final String nodeResourcesKey = "resources";
private static final String scalingEventsKey = "scalingEvents";
private static final String autoscalingStatusKey = "autoscalingStatus";
+ private static final String autoscalingStatusObjectKey = "autoscalingStatusObject";
+ private static final String descriptionKey = "description";
private static final String fromKey = "from";
private static final String toKey = "to";
private static final String generationKey = "generation";
@@ -118,7 +121,8 @@ public class ApplicationSerializer {
cluster.suggestedResources().ifPresent(suggested -> toSlime(suggested, clusterObject.setObject(suggestedKey)));
cluster.targetResources().ifPresent(target -> toSlime(target, clusterObject.setObject(targetResourcesKey)));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray(scalingEventsKey));
- clusterObject.setString(autoscalingStatusKey, cluster.autoscalingStatus());
+ clusterObject.setString(autoscalingStatusKey, cluster.autoscalingStatus().description()); // TODO: Remove after June 2021
+ toSlime(cluster.autoscalingStatus(), clusterObject.setObject(autoscalingStatusObjectKey));
}
private static Cluster clusterFromSlime(String id, Inspector clusterObject) {
@@ -129,7 +133,7 @@ public class ApplicationSerializer {
optionalSuggestionFromSlime(clusterObject.field(suggestedKey)),
optionalClusterResourcesFromSlime(clusterObject.field(targetResourcesKey)),
scalingEventsFromSlime(clusterObject.field(scalingEventsKey)),
- clusterObject.field(autoscalingStatusKey).asString());
+ autoscalingStatusFromSlime(clusterObject.field(autoscalingStatusObjectKey), clusterObject));
}
private static void toSlime(Cluster.Suggestion suggestion, Cursor suggestionObject) {
@@ -188,6 +192,42 @@ public class ApplicationSerializer {
optionalInstant(inspector.field(completionKey)));
}
+ private static void toSlime(AutoscalingStatus status, Cursor object) {
+ object.setString(statusKey, toAutoscalingStatusCode(status.status()));
+ object.setString(descriptionKey, status.description());
+ }
+
+ private static AutoscalingStatus autoscalingStatusFromSlime(Inspector object, Inspector parent) {
+ // TODO: Remove this clause after June 2021
+ if ( ! object.valid()) return new AutoscalingStatus(AutoscalingStatus.Status.unavailable,
+ parent.field(autoscalingStatusKey).asString());
+
+ return new AutoscalingStatus(fromAutoscalingStatusCode(object.field(statusKey).asString()),
+ object.field(descriptionKey).asString());
+ }
+
+ private static String toAutoscalingStatusCode(AutoscalingStatus.Status status) {
+ switch (status) {
+ case unavailable : return "unavailable";
+ case waiting : return "waiting";
+ case ideal : return "ideal";
+ case insufficient : return "insufficient";
+ case rescaling : return "rescaling";
+ default : throw new IllegalArgumentException("Unknown autoscaling status " + status);
+ }
+ }
+
+ private static AutoscalingStatus.Status fromAutoscalingStatusCode(String code) {
+ switch (code) {
+ case "unavailable" : return AutoscalingStatus.Status.unavailable;
+ case "waiting" : return AutoscalingStatus.Status.waiting;
+ case "ideal" : return AutoscalingStatus.Status.ideal;
+ case "insufficient" : return AutoscalingStatus.Status.insufficient;
+ case "rescaling" : return AutoscalingStatus.Status.rescaling;
+ default : throw new IllegalArgumentException("Unknown autoscaling status '" + code + "'");
+ }
+ }
+
private static Optional<Instant> optionalInstant(Inspector inspector) {
return inspector.valid() ? Optional.of(Instant.ofEpochMilli(inspector.asLong())) : Optional.empty();
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
index d1f881f8b7a..0205cc6c818 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClient.java
@@ -11,7 +11,6 @@ import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.config.provision.Zone;
import com.yahoo.path.Path;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.transaction.Transaction;
@@ -72,18 +71,15 @@ public class CuratorDatabaseClient {
private static final Path firmwareCheckPath = root.append("firmwareCheck");
private static final Path archiveUrisPath = root.append("archiveUris");
- private static final Duration defaultLockTimeout = Duration.ofMinutes(6);
+ private static final Duration defaultLockTimeout = Duration.ofMinutes(10);
private final NodeSerializer nodeSerializer;
private final CuratorDatabase db;
private final Clock clock;
- private final Zone zone;
private final CuratorCounter provisionIndexCounter;
- public CuratorDatabaseClient(NodeFlavors flavors, Curator curator, Clock clock, Zone zone, boolean useCache,
- long nodeCacheSize) {
+ public CuratorDatabaseClient(NodeFlavors flavors, Curator curator, Clock clock, boolean useCache, long nodeCacheSize) {
this.nodeSerializer = new NodeSerializer(flavors, nodeCacheSize);
- this.zone = zone;
this.db = new CuratorDatabase(curator, root, useCache);
this.clock = clock;
this.provisionIndexCounter = new CuratorCounter(curator, root.append("provisionIndexCounter").getAbsolute());
@@ -486,18 +482,29 @@ public class CuratorDatabaseClient {
return read(loadBalancerPath(id), LoadBalancerSerializer::fromJson);
}
- public void writeLoadBalancer(LoadBalancer loadBalancer) {
+ public void writeLoadBalancer(LoadBalancer loadBalancer, LoadBalancer.State fromState) {
NestedTransaction transaction = new NestedTransaction();
- writeLoadBalancers(List.of(loadBalancer), transaction);
+ writeLoadBalancers(List.of(loadBalancer), fromState, transaction);
transaction.commit();
}
- public void writeLoadBalancers(Collection<LoadBalancer> loadBalancers, NestedTransaction transaction) {
+ public void writeLoadBalancers(Collection<LoadBalancer> loadBalancers, LoadBalancer.State fromState, NestedTransaction transaction) {
CuratorTransaction curatorTransaction = db.newCuratorTransactionIn(transaction);
loadBalancers.forEach(loadBalancer -> {
curatorTransaction.add(createOrSet(loadBalancerPath(loadBalancer.id()),
LoadBalancerSerializer.toJson(loadBalancer)));
});
+ transaction.onCommitted(() -> {
+ for (var lb : loadBalancers) {
+ if (lb.state() == fromState) continue;
+ if (fromState == null) {
+ log.log(Level.INFO, () -> "Creating " + lb.id() + " in " + lb.state());
+ } else {
+ log.log(Level.INFO, () -> "Moving " + lb.id() + " from " + fromState +
+ " to " + lb.state());
+ }
+ }
+ });
}
public void removeLoadBalancer(LoadBalancerId loadBalancer) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
index d83f21e5fec..dff4a66bd42 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
@@ -482,6 +482,7 @@ public class NodeSerializer {
case "SpareCapacityMaintainer": return Agent.SpareCapacityMaintainer;
case "SwitchRebalancer": return Agent.SwitchRebalancer;
case "HostEncrypter": return Agent.HostEncrypter;
+ case "ParkedExpirer": return Agent.ParkedExpirer;
}
throw new IllegalArgumentException("Unknown node event agent '" + eventAgentField.asString() + "'");
}
@@ -504,6 +505,7 @@ public class NodeSerializer {
case SpareCapacityMaintainer: return "SpareCapacityMaintainer";
case SwitchRebalancer: return "SwitchRebalancer";
case HostEncrypter: return "HostEncrypter";
+ case ParkedExpirer: return "ParkedExpirer";
}
throw new IllegalArgumentException("Serialized form of '" + agent + "' not defined");
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
index 6d06dc31a42..cb965e87739 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Activator.java
@@ -13,6 +13,7 @@ import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeMutex;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
+import com.yahoo.vespa.hosted.provision.applications.AutoscalingStatus;
import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.node.Allocation;
@@ -117,7 +118,8 @@ class Activator {
}
if (cluster.targetResources().isPresent()
&& cluster.targetResources().get().justNumbers().equals(currentResources.justNumbers())) {
- cluster = cluster.withAutoscalingStatus("Cluster is ideally scaled within configured limits");
+ cluster = cluster.with(new AutoscalingStatus(AutoscalingStatus.Status.ideal,
+ "Cluster is ideally scaled within configured limits"));
}
if (cluster != modified.cluster(clusterEntry.getKey()).get())
modified = modified.with(cluster);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
index 711fe39d056..abd910485ac 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
@@ -60,7 +60,7 @@ public class CapacityPolicies {
return target;
}
- private NodeResources defaultNodeResources(ClusterSpec.Type clusterType) {
+ public NodeResources defaultNodeResources(ClusterSpec.Type clusterType) {
if (clusterType == ClusterSpec.Type.admin) {
if (zone.system() == SystemName.dev) {
// Use small logserver in dev system
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
index def992a264b..5d45bed19e8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
@@ -7,10 +7,6 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.OutOfCapacityException;
import com.yahoo.transaction.Mutex;
-import com.yahoo.vespa.flags.FetchVector;
-import com.yahoo.vespa.flags.FlagSource;
-import com.yahoo.vespa.flags.Flags;
-import com.yahoo.vespa.flags.StringFlag;
import com.yahoo.vespa.hosted.provision.LockedNodeList;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
@@ -33,14 +29,10 @@ public class GroupPreparer {
private final NodeRepository nodeRepository;
private final Optional<HostProvisioner> hostProvisioner;
- private final StringFlag allocateOsRequirementFlag;
- public GroupPreparer(NodeRepository nodeRepository,
- Optional<HostProvisioner> hostProvisioner,
- FlagSource flagSource) {
+ public GroupPreparer(NodeRepository nodeRepository, Optional<HostProvisioner> hostProvisioner) {
this.nodeRepository = nodeRepository;
this.hostProvisioner = hostProvisioner;
- this.allocateOsRequirementFlag = Flags.ALLOCATE_OS_REQUIREMENT.bindTo(flagSource);
}
/**
@@ -60,17 +52,11 @@ public class GroupPreparer {
// active config model which is changed on activate
public List<Node> prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
List<Node> surplusActiveNodes, NodeIndices indices, int wantedGroups) {
-
- String allocateOsRequirement = allocateOsRequirementFlag
- .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm())
- .value();
-
// Try preparing in memory without global unallocated lock. Most of the time there should be no changes and we
// can return nodes previously allocated.
{
NodeAllocation probeAllocation = prepareAllocation(application, cluster, requestedNodes, surplusActiveNodes,
- indices::probeNext, wantedGroups, PROBE_LOCK,
- allocateOsRequirement);
+ indices::probeNext, wantedGroups, PROBE_LOCK);
if (probeAllocation.fulfilledAndNoChanges()) {
List<Node> acceptedNodes = probeAllocation.finalNodes();
surplusActiveNodes.removeAll(acceptedNodes);
@@ -85,21 +71,18 @@ public class GroupPreparer {
Mutex allocationLock = nodeRepository.nodes().lockUnallocated()) {
NodeAllocation allocation = prepareAllocation(application, cluster, requestedNodes, surplusActiveNodes,
- indices::next, wantedGroups, allocationLock,
- allocateOsRequirement);
+ indices::next, wantedGroups, allocationLock);
NodeType hostType = allocation.nodeType().hostType();
if (canProvisionDynamically(hostType)) {
HostSharing sharing = hostSharing(requestedNodes, hostType);
+ Version osVersion = nodeRepository.osVersions().targetFor(hostType).orElse(Version.emptyVersion);
List<ProvisionedHost> provisionedHosts = allocation.hostDeficit()
- .map(deficit ->
- hostProvisioner.get().provisionHosts(allocation.provisionIndices(deficit.count()),
- hostType,
- deficit.resources(),
- application,
- decideOsVersion(allocateOsRequirement, hostType),
- sharing)
- )
- .orElseGet(List::of);
+ .map(deficit -> hostProvisioner.get().provisionHosts(allocation.provisionIndices(deficit.count()),
+ hostType,
+ deficit.resources(),
+ application, osVersion,
+ sharing))
+ .orElseGet(List::of);
// At this point we have started provisioning of the hosts, the first priority is to make sure that
// the returned hosts are added to the node-repo so that they are tracked by the provision maintainers
@@ -131,7 +114,7 @@ public class GroupPreparer {
private NodeAllocation prepareAllocation(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes,
List<Node> surplusActiveNodes, Supplier<Integer> nextIndex, int wantedGroups,
- Mutex allocationLock, String allocateOsRequirement) {
+ Mutex allocationLock) {
LockedNodeList allNodes = nodeRepository.nodes().list(allocationLock);
NodeAllocation allocation = new NodeAllocation(allNodes, application, cluster, requestedNodes, nextIndex, nodeRepository);
NodePrioritizer prioritizer = new NodePrioritizer(allNodes,
@@ -142,8 +125,7 @@ public class GroupPreparer {
nodeRepository.zone().getCloud().dynamicProvisioning(),
nodeRepository.nameResolver(),
nodeRepository.resourcesCalculator(),
- nodeRepository.spareCount(),
- allocateOsRequirement);
+ nodeRepository.spareCount());
allocation.offer(prioritizer.collect(surplusActiveNodes));
return allocation;
}
@@ -161,13 +143,4 @@ public class GroupPreparer {
return sharing;
}
- private Version decideOsVersion(String allocateOsRequirement, NodeType hostType) {
- if (allocateOsRequirement.equals("rhel8"))
- return new Version(8, Integer.MAX_VALUE /* always use latest 8 version */, 0);
- else if (allocateOsRequirement.equals("rhel7"))
- return new Version(7, Integer.MAX_VALUE /* always use latest 7 version */, 0);
- else
- return nodeRepository.osVersions().targetFor(hostType).orElse(Version.emptyVersion);
- }
-
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
index c114aa58a05..1a01fac247e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
@@ -7,7 +7,6 @@ import com.yahoo.config.provision.ApplicationTransaction;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.NodeType;
-import com.yahoo.config.provision.ProvisionLock;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.exception.LoadBalancerServiceException;
import com.yahoo.transaction.NestedTransaction;
@@ -61,7 +60,7 @@ public class LoadBalancerProvisioner {
for (var id : db.readLoadBalancerIds()) {
try (var lock = db.lock(id.application())) {
var loadBalancer = db.readLoadBalancer(id);
- loadBalancer.ifPresent(db::writeLoadBalancer);
+ loadBalancer.ifPresent(lb -> db.writeLoadBalancer(lb, lb.state()));
}
}
}
@@ -77,15 +76,12 @@ public class LoadBalancerProvisioner {
* Calling this for irrelevant node or cluster types is a no-op.
*/
public void prepare(ApplicationId application, ClusterSpec cluster, NodeSpec requestedNodes) {
- if (!service.canForwardTo(requestedNodes.type(), cluster.type())) return; // Nothing to provision for this node and cluster type
- if (application.instance().isTester()) return; // Do not provision for tester instances
+ if (!shouldProvision(application, requestedNodes.type(), cluster.type())) return;
try (var lock = db.lock(application)) {
ClusterSpec.Id clusterId = effectiveId(cluster);
- NodeList nodes = nodesOf(clusterId, application);
LoadBalancerId loadBalancerId = requireNonClashing(new LoadBalancerId(application, clusterId));
- ApplicationTransaction transaction = new ApplicationTransaction(new ProvisionLock(application, lock), new NestedTransaction());
- provision(transaction, loadBalancerId, nodes, false);
- transaction.nested().commit();
+ NodeList nodes = nodesOf(clusterId, application);
+ prepare(loadBalancerId, nodes);
}
}
@@ -100,14 +96,18 @@ public class LoadBalancerProvisioner {
* Calling this when no load balancer has been prepared for given cluster is a no-op.
*/
public void activate(Set<ClusterSpec> clusters, ApplicationTransaction transaction) {
+ Set<ClusterSpec.Id> activatingClusters = clusters.stream()
+ .map(LoadBalancerProvisioner::effectiveId)
+ .collect(Collectors.toSet());
for (var cluster : loadBalancedClustersOf(transaction.application()).entrySet()) {
- // Provision again to ensure that load balancer instance is re-configured with correct nodes
- provision(transaction, cluster.getKey(), cluster.getValue());
+ if (!activatingClusters.contains(cluster.getKey())) continue;
+
+ Node clusterNode = cluster.getValue().first().get();
+ if (!shouldProvision(transaction.application(), clusterNode.type(), clusterNode.allocation().get().membership().cluster().type())) continue;
+ activate(transaction, cluster.getKey(), cluster.getValue());
}
// Deactivate any surplus load balancers, i.e. load balancers for clusters that have been removed
- var surplusLoadBalancers = surplusLoadBalancersOf(transaction.application(), clusters.stream()
- .map(LoadBalancerProvisioner::effectiveId)
- .collect(Collectors.toSet()));
+ var surplusLoadBalancers = surplusLoadBalancersOf(transaction.application(), activatingClusters);
deactivate(surplusLoadBalancers, transaction.nested());
}
@@ -119,6 +119,13 @@ public class LoadBalancerProvisioner {
deactivate(nodeRepository.loadBalancers().list(transaction.application()).asList(), transaction.nested());
}
+ /** Returns whether to provision a load balancer for given application */
+ private boolean shouldProvision(ApplicationId application, NodeType nodeType, ClusterSpec.Type clusterType) {
+ if (application.instance().isTester()) return false; // Do not provision for tester instances
+ if (!service.supports(nodeType, clusterType)) return false; // Nothing to provision for this node and cluster type
+ return true;
+ }
+
/** Returns load balancers of given application that are no longer referenced by given clusters */
private List<LoadBalancer> surplusLoadBalancersOf(ApplicationId application, Set<ClusterSpec.Id> activeClusters) {
var activeLoadBalancersByCluster = nodeRepository.loadBalancers().list(application)
@@ -140,7 +147,7 @@ public class LoadBalancerProvisioner {
var deactivatedLoadBalancers = loadBalancers.stream()
.map(lb -> lb.with(LoadBalancer.State.inactive, now))
.collect(Collectors.toList());
- db.writeLoadBalancers(deactivatedLoadBalancers, transaction);
+ db.writeLoadBalancers(deactivatedLoadBalancers, LoadBalancer.State.active, transaction);
}
/** Find all load balancer IDs owned by given tenant and application */
@@ -165,52 +172,41 @@ public class LoadBalancerProvisioner {
return loadBalancerId;
}
- /** Idempotently provision a load balancer for given application and cluster */
- private void provision(ApplicationTransaction transaction, LoadBalancerId id, NodeList nodes, boolean activate) {
+ private void prepare(LoadBalancerId id, NodeList nodes) {
Instant now = nodeRepository.clock().instant();
Optional<LoadBalancer> loadBalancer = db.readLoadBalancer(id);
- if (loadBalancer.isEmpty() && activate) return; // Nothing to activate as this load balancer was never prepared
-
- Set<Real> reals = realsOf(nodes);
- Optional<LoadBalancerInstance> instance = provisionInstance(id, reals, loadBalancer);
+ Optional<LoadBalancerInstance> instance = provisionInstance(id, nodes, loadBalancer);
LoadBalancer newLoadBalancer;
+ LoadBalancer.State fromState = null;
if (loadBalancer.isEmpty()) {
newLoadBalancer = new LoadBalancer(id, instance, LoadBalancer.State.reserved, now);
} else {
- LoadBalancer.State state = activate && instance.isPresent()
- ? LoadBalancer.State.active
- : loadBalancer.get().state();
- newLoadBalancer = loadBalancer.get().with(instance).with(state, now);
- if (loadBalancer.get().state() != newLoadBalancer.state()) {
- log.log(Level.INFO, () -> "Moving " + newLoadBalancer.id() + " from " + loadBalancer.get().state() +
- " to " + newLoadBalancer.state());
- }
- }
-
- if (activate) {
- db.writeLoadBalancers(List.of(newLoadBalancer), transaction.nested());
- } else {
- // Always store load balancer so that LoadBalancerExpirer can expire partially provisioned load balancers
- db.writeLoadBalancer(newLoadBalancer);
- }
-
- // Signal that load balancer is not ready yet
- if (instance.isEmpty()) {
- throw new LoadBalancerServiceException("Could not (re)configure " + id + ", targeting: " +
- reals + ". The operation will be retried on next deployment",
- null);
+ newLoadBalancer = loadBalancer.get().with(instance);
+ fromState = newLoadBalancer.state();
}
+ // Always store load balancer so that LoadBalancerExpirer can expire partially provisioned load balancers
+ db.writeLoadBalancer(newLoadBalancer, fromState);
+ requireInstance(id, instance);
}
- private void provision(ApplicationTransaction transaction, ClusterSpec.Id clusterId, NodeList nodes) {
- provision(transaction, new LoadBalancerId(transaction.application(), clusterId), nodes, true);
+ private void activate(ApplicationTransaction transaction, ClusterSpec.Id cluster, NodeList nodes) {
+ Instant now = nodeRepository.clock().instant();
+ LoadBalancerId id = new LoadBalancerId(transaction.application(), cluster);
+ Optional<LoadBalancer> loadBalancer = db.readLoadBalancer(id);
+ if (loadBalancer.isEmpty()) throw new IllegalArgumentException("Could not active load balancer that was never prepared: " + id);
+
+ Optional<LoadBalancerInstance> instance = provisionInstance(id, nodes, loadBalancer);
+ LoadBalancer.State state = instance.isPresent() ? LoadBalancer.State.active : loadBalancer.get().state();
+ LoadBalancer newLoadBalancer = loadBalancer.get().with(instance).with(state, now);
+ db.writeLoadBalancers(List.of(newLoadBalancer), loadBalancer.get().state(), transaction.nested());
+ requireInstance(id, instance);
}
/** Provision or reconfigure a load balancer instance, if necessary */
- private Optional<LoadBalancerInstance> provisionInstance(LoadBalancerId id, Set<Real> reals,
- Optional<LoadBalancer> currentLoadBalancer) {
+ private Optional<LoadBalancerInstance> provisionInstance(LoadBalancerId id, NodeList nodes, Optional<LoadBalancer> currentLoadBalancer) {
+ Set<Real> reals = realsOf(nodes);
if (hasReals(currentLoadBalancer, reals)) return currentLoadBalancer.get().instance();
- log.log(Level.INFO, () -> "Creating " + id + ", targeting: " + reals);
+ log.log(Level.INFO, () -> "Provisioning instance for " + id + ", targeting: " + reals);
try {
return Optional.of(service.create(new LoadBalancerSpec(id.application(), id.cluster(), reals),
allowEmptyReals(currentLoadBalancer)));
@@ -241,7 +237,7 @@ public class LoadBalancerProvisioner {
/** Returns real servers for given nodes */
private Set<Real> realsOf(NodeList nodes) {
- var reals = new LinkedHashSet<Real>();
+ Set<Real> reals = new LinkedHashSet<Real>();
for (var node : nodes) {
for (var ip : reachableIpAddresses(node)) {
reals.add(new Real(HostName.from(node.hostname()), ip));
@@ -289,6 +285,14 @@ public class LoadBalancerProvisioner {
return reachable;
}
+ private static void requireInstance(LoadBalancerId id, Optional<LoadBalancerInstance> instance) {
+ if (instance.isEmpty()) {
+ // Signal that load balancer is not ready yet
+ throw new LoadBalancerServiceException("Could not (re)configure " + id + ". The operation will be retried on next deployment",
+ null);
+ }
+ }
+
private static ClusterSpec.Id effectiveId(ClusterSpec cluster) {
return cluster.combinedId().orElse(cluster.id());
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
index 8eca4ff2d95..695f0dd8659 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
@@ -1,7 +1,6 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.provisioning;
-import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeType;
@@ -32,7 +31,6 @@ public class NodePrioritizer {
private final List<NodeCandidate> nodes = new ArrayList<>();
private final LockedNodeList allNodes;
- private final String allocateOsRequirement;
private final HostCapacity capacity;
private final NodeSpec requestedNodes;
private final ApplicationId application;
@@ -48,9 +46,8 @@ public class NodePrioritizer {
public NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec,
int wantedGroups, boolean dynamicProvisioning, NameResolver nameResolver,
- HostResourcesCalculator hostResourcesCalculator, int spareCount, String allocateOsRequirement) {
+ HostResourcesCalculator hostResourcesCalculator, int spareCount) {
this.allNodes = allNodes;
- this.allocateOsRequirement = allocateOsRequirement;
this.capacity = new HostCapacity(allNodes, hostResourcesCalculator);
this.requestedNodes = nodeSpec;
this.clusterSpec = clusterSpec;
@@ -140,13 +137,6 @@ public class NodePrioritizer {
if (host.reservedTo().isPresent() && !host.reservedTo().get().equals(application.tenant())) continue;
if (host.reservedTo().isPresent() && application.instance().isTester()) continue;
if (host.exclusiveTo().isPresent()) continue; // Never allocate new nodes to exclusive hosts
-
- if (host.status().osVersion().isBefore(new Version(8))) {
- if (allocateOsRequirement.equals("rhel8")) continue;
- } else {
- if (allocateOsRequirement.equals("rhel7")) continue;
- }
-
if (spareHosts.contains(host) && !canAllocateToSpareHosts) continue;
if ( ! capacity.hasCapacity(host, requestedNodes.resources().get())) continue;
if ( ! allNodes.childrenOf(host).owner(application).cluster(clusterSpec.id()).isEmpty()) continue;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index 2cc7d7e2555..24d23f13bb5 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -71,7 +71,6 @@ public class NodeRepositoryProvisioner implements Provisioner {
.map(lbService -> new LoadBalancerProvisioner(nodeRepository, lbService));
this.nodeResourceLimits = new NodeResourceLimits(nodeRepository);
this.preparer = new Preparer(nodeRepository,
- flagSource,
provisionServiceProvider.getHostProvisioner(),
loadBalancerProvisioner);
this.activator = new Activator(nodeRepository, loadBalancerProvisioner);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
index 4e74104c25b..d23b3c782c8 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeResourceLimits.java
@@ -64,6 +64,7 @@ public class NodeResourceLimits {
private double minAdvertisedVcpu(ClusterSpec.Type clusterType) {
if (zone().environment() == Environment.dev && !zone().getCloud().dynamicProvisioning()) return 0.1;
+ if (clusterType.isContent() && zone().environment().isProduction()) return 1.0;
if (clusterType == ClusterSpec.Type.admin) return 0.1;
return 0.5;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index 97f935d273b..3fa44d4c091 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -5,7 +5,6 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterMembership;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.OutOfCapacityException;
-import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
@@ -28,11 +27,11 @@ class Preparer {
private final GroupPreparer groupPreparer;
private final Optional<LoadBalancerProvisioner> loadBalancerProvisioner;
- public Preparer(NodeRepository nodeRepository, FlagSource flagSource, Optional<HostProvisioner> hostProvisioner,
+ public Preparer(NodeRepository nodeRepository, Optional<HostProvisioner> hostProvisioner,
Optional<LoadBalancerProvisioner> loadBalancerProvisioner) {
this.nodeRepository = nodeRepository;
this.loadBalancerProvisioner = loadBalancerProvisioner;
- this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner, flagSource);
+ this.groupPreparer = new GroupPreparer(nodeRepository, hostProvisioner);
}
/** Prepare all required resources for the given application and cluster */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
index 9c6efd9efe6..e800f6c9c84 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/ApplicationSerializer.java
@@ -73,7 +73,8 @@ public class ApplicationSerializer {
cluster.targetResources().ifPresent(target -> toSlime(target, clusterObject.setObject("target")));
clusterModel.ifPresent(model -> clusterUtilizationToSlime(model, clusterObject.setObject("utilization")));
scalingEventsToSlime(cluster.scalingEvents(), clusterObject.setArray("scalingEvents"));
- clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus());
+ clusterObject.setString("autoscalingStatusCode", cluster.autoscalingStatus().status().name());
+ clusterObject.setString("autoscalingStatus", cluster.autoscalingStatus().description());
clusterModel.ifPresent(model -> clusterObject.setLong("scalingDuration", model.scalingDuration().toMillis()));
clusterModel.ifPresent(model -> clusterObject.setDouble("maxQueryGrowthRate", model.maxQueryGrowthRate()));
clusterModel.ifPresent(model -> clusterObject.setDouble("currentQueryFractionOfMax", model.queryFractionOfMax()));
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeSerializer.java
index b72d021e4f5..706c36b35ac 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/NodeSerializer.java
@@ -1,9 +1,7 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.restapi;
-import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
-import com.yahoo.slime.Cursor;
import com.yahoo.vespa.hosted.provision.Node;
/**
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
index 351f9fe44ee..4bfe01375c1 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTest.java
@@ -197,8 +197,16 @@ public class NodeRepositoryTest {
}
tester.nodeRepository().nodes().removeRecursively("host1");
+ // Set host 2 properties and deprovision it
+ try (var lock = tester.nodeRepository().nodes().lockAndGetRequired("host2")) {
+ Node host2 = lock.node().withWantToRetire(true, false, true, Agent.system, tester.nodeRepository().clock().instant());
+ tester.nodeRepository().nodes().write(host2, lock);
+ }
+ tester.nodeRepository().nodes().removeRecursively("host2");
+
// Host 1 is deprovisioned and unwanted properties are cleared
Node host1 = tester.nodeRepository().nodes().node("host1").get();
+ Node host2 = tester.nodeRepository().nodes().node("host2").get();
assertEquals(Node.State.deprovisioned, host1.state());
assertTrue(host1.history().hasEventAfter(History.Event.Type.deprovisioned, testStart));
@@ -214,6 +222,8 @@ public class NodeRepositoryTest {
assertTrue("Transferred from deprovisioned host", host1.status().firmwareVerifiedAt().isPresent());
assertEquals("Transferred from deprovisioned host", 1, host1.status().failCount());
assertEquals("Transferred from deprovisioned host", 1, host1.reports().getReports().size());
+ assertTrue("Transferred from rebuilt host", host2.status().wantToRetire());
+ assertTrue("Transferred from rebuilt host", host2.status().wantToRebuild());
}
@Test
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index 98bfa2170a5..41a399c5e2f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -15,6 +15,7 @@ import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.Nodelike;
+import com.yahoo.vespa.hosted.provision.provisioning.CapacityPolicies;
import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator;
import org.junit.Test;
@@ -219,6 +220,32 @@ public class AutoscalingTest {
}
@Test
+ public void autoscaling_with_unspecified_resources_use_defaults() {
+ NodeResources hostResources = new NodeResources(6, 100, 100, 1);
+ ClusterResources min = new ClusterResources( 2, 1, NodeResources.unspecified());
+ ClusterResources max = new ClusterResources( 6, 1, NodeResources.unspecified());
+ AutoscalingTester tester = new AutoscalingTester(hostResources);
+
+ ApplicationId application1 = tester.applicationId("application1");
+ ClusterSpec cluster1 = tester.clusterSpec(ClusterSpec.Type.container, "cluster1");
+
+ NodeResources defaultResources =
+ new CapacityPolicies(tester.nodeRepository()).defaultNodeResources(cluster1.type());
+
+ // deploy
+ tester.deploy(application1, cluster1, Capacity.from(min, max));
+ tester.assertResources("Min number of nodes and default resources",
+ 2, 1, defaultResources,
+ Optional.of(tester.nodeRepository().nodes().list().owner(application1).toResources()));
+ tester.addMeasurements(0.25f, 0.95f, 0.95f, 0, 120, application1);
+ tester.clock().advance(Duration.ofMinutes(-10 * 5));
+ tester.addQueryRateMeasurements(application1, cluster1.id(), 10, t -> t == 0 ? 20.0 : 10.0); // Query traffic only
+ tester.assertResources("Scaling up to limit since resource usage is too high",
+ 4, 1, defaultResources,
+ tester.autoscale(application1, cluster1.id(), min, max).target());
+ }
+
+ @Test
public void autoscaling_respects_group_limit() {
NodeResources hostResources = new NodeResources(30.0, 100, 100, 1);
ClusterResources min = new ClusterResources( 2, 2, new NodeResources(1, 1, 1, 1));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
index 5fe6023e5af..f96679b7195 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTester.java
@@ -267,7 +267,7 @@ class AutoscalingTester {
}
public Autoscaler.Advice autoscale(ApplicationId applicationId, ClusterSpec.Id clusterId,
- ClusterResources min, ClusterResources max) {
+ ClusterResources min, ClusterResources max) {
Application application = nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId))
.withCluster(clusterId, false, min, max);
try (Mutex lock = nodeRepository().nodes().lock(applicationId)) {
@@ -278,7 +278,7 @@ class AutoscalingTester {
}
public Autoscaler.Advice suggest(ApplicationId applicationId, ClusterSpec.Id clusterId,
- ClusterResources min, ClusterResources max) {
+ ClusterResources min, ClusterResources max) {
Application application = nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId))
.withCluster(clusterId, false, min, max);
try (Mutex lock = nodeRepository().nodes().lock(applicationId)) {
@@ -290,6 +290,15 @@ class AutoscalingTester {
public ClusterResources assertResources(String message,
int nodeCount, int groupCount,
+ NodeResources expectedResources,
+ Optional<ClusterResources> resources) {
+ return assertResources(message, nodeCount, groupCount,
+ expectedResources.vcpu(), expectedResources.memoryGb(), expectedResources.diskGb(),
+ resources);
+ }
+
+ public ClusterResources assertResources(String message,
+ int nodeCount, int groupCount,
double approxCpu, double approxMemory, double approxDisk,
Optional<ClusterResources> resources) {
double delta = 0.0000000001;
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
index c6575931c6d..af1bd2aa231 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
@@ -7,6 +7,7 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.provision.applications.Application;
+import com.yahoo.vespa.hosted.provision.applications.AutoscalingStatus;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.Status;
import org.junit.Test;
@@ -79,7 +80,7 @@ public class ClusterModelTest {
Optional.empty(),
Optional.empty(),
List.of(),
- "");
+ AutoscalingStatus.empty());
}
/** Creates the given number of measurements, spaced 5 minutes between, using the given function */
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainerTest.java
index db6aebacddc..62d09c99f16 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/OperatorChangeApplicationMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainerTest.java
@@ -26,7 +26,7 @@ import static org.junit.Assert.assertEquals;
/**
* @author bratseth
*/
-public class OperatorChangeApplicationMaintainerTest {
+public class ExpeditedChangeApplicationMaintainerTest {
@Test
public void test_application_maintenance() {
@@ -42,10 +42,10 @@ public class OperatorChangeApplicationMaintainerTest {
// Create applications
fixture.activate();
assertEquals("Initial applications are deployed", 3, fixture.deployer.redeployments);
- OperatorChangeApplicationMaintainer maintainer = new OperatorChangeApplicationMaintainer(fixture.deployer,
- new TestMetric(),
- nodeRepository,
- Duration.ofMinutes(1));
+ ExpeditedChangeApplicationMaintainer maintainer = new ExpeditedChangeApplicationMaintainer(fixture.deployer,
+ new TestMetric(),
+ nodeRepository,
+ Duration.ofMinutes(1));
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
index 968427f0781..7fa6810c1ba 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MetricsReporterTest.java
@@ -43,6 +43,7 @@ import java.util.TreeMap;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
@@ -112,6 +113,8 @@ public class MetricsReporterTest {
expectedMetrics.put("wantToRetire", 0);
expectedMetrics.put("wantToDeprovision", 0);
expectedMetrics.put("failReport", 0);
+
+
expectedMetrics.put("allowedToBeDown", 1);
expectedMetrics.put("suspended", 1);
expectedMetrics.put("suspendedSeconds", 123L);
@@ -146,6 +149,27 @@ public class MetricsReporterTest {
assertEquals(expectedMetrics, new TreeMap<>(metric.values));
}
+ @Test
+ public void test_registered_metrics_for_host() {
+ NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default");
+ Orchestrator orchestrator = mock(Orchestrator.class);
+ when(orchestrator.getHostInfo(eq(reference), any())).thenReturn(
+ HostInfo.createSuspended(HostStatus.ALLOWED_TO_BE_DOWN, Instant.ofEpochSecond(1)));
+ ProvisioningTester tester = new ProvisioningTester.Builder().flavors(nodeFlavors.getFlavors()).orchestrator(orchestrator).build();
+ tester.makeProvisionedNodes(1, "default", NodeType.host, 0);
+
+ tester.clock().setInstant(Instant.ofEpochSecond(124));
+
+ TestMetric metric = new TestMetric();
+ MetricsReporter metricsReporter = metricsReporter(metric, tester);
+ metricsReporter.maintain();
+
+ // Only verify metrics that are set for hosts
+ TreeMap<String, Number> metrics = new TreeMap<>(metric.values);
+ assertTrue(metrics.containsKey("wantToEncrypt"));
+ assertTrue(metrics.containsKey("diskEncrypted"));
+ }
+
private void verifyAndRemoveIntegerMetricSum(TestMetric metric, String key, int expected) {
assertEquals(expected, (int) metric.sumNumberValues(key));
metric.remove(key);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainerTest.java
index 5b2f7ce91e8..cfe6e4d348d 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeMetricsDbMaintainerTest.java
@@ -43,7 +43,7 @@ public class NodeMetricsDbMaintainerTest {
fetcher,
Duration.ofHours(1),
new TestMetric());
- assertTrue(maintainer.maintain());
+ assertEquals(maintainer.maintain(), 1.0, 0.0000001);
List<NodeTimeseries> timeseriesList = tester.nodeRepository().metricsDb().getNodeTimeseries(Duration.ofDays(1),
Set.of("host-1.yahoo.com", "host-2.yahoo.com"));
assertEquals(2, timeseriesList.size());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java
new file mode 100644
index 00000000000..786faae24b4
--- /dev/null
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ProvisionedExpirerTest.java
@@ -0,0 +1,50 @@
+package com.yahoo.vespa.hosted.provision.maintenance;
+
+import com.yahoo.config.provision.Cloud;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.Flavor;
+import com.yahoo.config.provision.NodeResources;
+import com.yahoo.config.provision.NodeType;
+import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.node.Agent;
+import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
+import com.yahoo.vespa.hosted.provision.testutils.MockHostProvisioner;
+import org.junit.Test;
+
+import java.time.Duration;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static org.junit.Assert.*;
+
+/**
+ * @author olaa
+ */
+public class ProvisionedExpirerTest {
+
+ private ProvisioningTester tester;
+
+ @Test
+ public void deprovisions_hosts_if_excessive_expiry() {
+ tester = new ProvisioningTester.Builder().build();
+ populateNodeRepo();
+
+ tester.clock().advance(Duration.ofMinutes(5));
+ new ProvisionedExpirer(tester.nodeRepository(), Duration.ofMinutes(4), new TestMetric()).maintain();
+
+ assertEquals(5, tester.nodeRepository().nodes().list().deprovisioning().size());
+ assertEquals(20, tester.nodeRepository().nodes().list().not().deprovisioning().size());
+ }
+
+ private void populateNodeRepo() {
+ var nodes = IntStream.range(0, 25)
+ .mapToObj(i -> Node.create("id-" + i, "host-" + i, new Flavor(NodeResources.unspecified()), Node.State.provisioned, NodeType.host).build())
+ .collect(Collectors.toList());
+ tester.nodeRepository().database().addNodesInState(nodes, Node.State.provisioned, Agent.system);
+ }
+
+}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java
index 9cac6430d6e..61ff494679f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/ApplicationSerializerTest.java
@@ -6,6 +6,7 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.applications.Application;
+import com.yahoo.vespa.hosted.provision.applications.AutoscalingStatus;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.ScalingEvent;
import com.yahoo.vespa.hosted.provision.applications.Status;
@@ -35,7 +36,7 @@ public class ApplicationSerializerTest {
Optional.empty(),
Optional.empty(),
List.of(),
- ""));
+ AutoscalingStatus.empty()));
var minResources = new NodeResources(1, 2, 3, 4);
clusters.add(new Cluster(ClusterSpec.Id.from("c2"),
true,
@@ -50,7 +51,7 @@ public class ApplicationSerializerTest {
7L,
Instant.ofEpochMilli(12345L),
Optional.of(Instant.ofEpochMilli(67890L)))),
- "Autoscaling status"));
+ new AutoscalingStatus(AutoscalingStatus.Status.insufficient, "Autoscaling status")));
Application original = new Application(ApplicationId.from("myTenant", "myApplication", "myInstance"),
Status.initial().withCurrentReadShare(0.3).withMaxReadShare(0.5),
clusters);
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java
index f47fb7f23be..99f6ce4fb00 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDatabaseClientTest.java
@@ -1,4 +1,4 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.persistence;
import com.yahoo.config.provision.ApplicationId;
@@ -6,7 +6,6 @@ import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.mock.MockCurator;
import com.yahoo.vespa.hosted.provision.Node;
@@ -25,7 +24,7 @@ public class CuratorDatabaseClientTest {
private final Curator curator = new MockCurator();
private final CuratorDatabaseClient zkClient = new CuratorDatabaseClient(
- FlavorConfigBuilder.createDummies("default"), curator, Clock.systemUTC(), Zone.defaultZone(), true, 1000);
+ FlavorConfigBuilder.createDummies("default"), curator, Clock.systemUTC(), true, 1000);
@Test
public void can_read_stored_host_information() throws Exception {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java
index 8b2febf37b1..afbd44a346f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningCompleteHostCalculatorTest.java
@@ -26,7 +26,7 @@ public class DockerProvisioningCompleteHostCalculatorTest {
@Test
public void changing_to_different_range_preserves_allocation() {
- Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 1000, 4));
+ Flavor hostFlavor = new Flavor(new NodeResources(40, 40, 1000, 4));
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
.resourcesCalculator(new CompleteResourcesCalculator(hostFlavor))
.flavors(List.of(hostFlavor))
@@ -36,25 +36,25 @@ public class DockerProvisioningCompleteHostCalculatorTest {
ApplicationId app1 = ProvisioningTester.applicationId("app1");
ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
- var initialResources = new NodeResources(2, 16, 50, 1);
+ var initialResources = new NodeResources(20, 16, 50, 1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, initialResources),
new ClusterResources(2, 1, initialResources)));
tester.assertNodes("Initial allocation",
- 2, 1, 2, 16, 50, 1.0,
+ 2, 1, 20, 16, 50, 1.0,
app1, cluster1);
- var newMinResources = new NodeResources(0.5, 4, 11, 1);
- var newMaxResources = new NodeResources(2.0, 10, 30, 1);
+ var newMinResources = new NodeResources( 5, 4, 11, 1);
+ var newMaxResources = new NodeResources(20, 10, 30, 1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
new ClusterResources(7, 1, newMaxResources)));
tester.assertNodes("New allocation preserves total resources",
- 7, 1, 0.7, 4.6, 14.3, 1.0,
+ 7, 1, 7, 4.6, 14.3, 1.0,
app1, cluster1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
new ClusterResources(7, 1, newMaxResources)));
tester.assertNodes("Redeploying the same ranges does not cause changes",
- 7, 1, 0.7, 4.6, 14.3, 1.0,
+ 7, 1, 7, 4.6, 14.3, 1.0,
app1, cluster1);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
index f2ca993f4d7..fd8cf9ea00f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DockerProvisioningTest.java
@@ -295,7 +295,7 @@ public class DockerProvisioningTest {
@Test
public void changing_to_different_range_preserves_allocation() {
- Flavor hostFlavor = new Flavor(new NodeResources(20, 40, 100, 4));
+ Flavor hostFlavor = new Flavor(new NodeResources(40, 40, 100, 4));
ProvisioningTester tester = new ProvisioningTester.Builder().zone(new Zone(Environment.prod, RegionName.from("us-east")))
.resourcesCalculator(3, 0)
.flavors(List.of(hostFlavor))
@@ -305,25 +305,25 @@ public class DockerProvisioningTest {
ApplicationId app1 = ProvisioningTester.applicationId("app1");
ClusterSpec cluster1 = ClusterSpec.request(ClusterSpec.Type.content, new ClusterSpec.Id("cluster1")).vespaVersion("7").build();
- var initialResources = new NodeResources(2, 16, 50, 1);
+ var initialResources = new NodeResources(20, 16, 50, 1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(2, 1, initialResources),
new ClusterResources(2, 1, initialResources)));
tester.assertNodes("Initial allocation",
- 2, 1, 2, 16, 50, 1.0,
+ 2, 1, 20, 16, 50, 1.0,
app1, cluster1);
- var newMinResources = new NodeResources(0.5, 6, 11, 1);
- var newMaxResources = new NodeResources(2.0, 10, 30, 1);
+ var newMinResources = new NodeResources( 5, 6, 11, 1);
+ var newMaxResources = new NodeResources(20, 10, 30, 1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
new ClusterResources(7, 1, newMaxResources)));
tester.assertNodes("New allocation preserves total resources",
- 7, 1, 0.7, 6.7, 14.3, 1.0,
+ 7, 1, 7, 6.7, 14.3, 1.0,
app1, cluster1);
tester.activate(app1, cluster1, Capacity.from(new ClusterResources(7, 1, newMinResources),
new ClusterResources(7, 1, newMaxResources)));
tester.assertNodes("Redeploying does not cause changes",
- 7, 1, 0.7, 6.7, 14.3, 1.0,
+ 7, 1, 7, 6.7, 14.3, 1.0,
app1, cluster1);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
index f5cf9dbb471..029c9ffa559 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/DynamicDockerProvisionTest.java
@@ -286,7 +286,7 @@ public class DynamicDockerProvisionTest {
}
// Initial deployment
- tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 0.5, 5, 20),
+ tester.activate(app1, cluster1, Capacity.from(resources(4, 2, 1, 5, 20),
resources(6, 3, 4, 20, 40)));
tester.assertNodes("Initial allocation at first actual flavor above min (except for disk)",
4, 2, 1, 10, 20,
@@ -316,7 +316,7 @@ public class DynamicDockerProvisionTest {
// Force 1 more groups: Reducing to 2 nodes per group to preserve node count is rejected
// since it will reduce total group memory from 60 to 40.
- tester.activate(app1, cluster1, Capacity.from(resources(6, 3, 0.5, 5, 10),
+ tester.activate(app1, cluster1, Capacity.from(resources(6, 3, 1, 5, 10),
resources(9, 3, 5, 20, 15)));
tester.assertNodes("Group size is preserved",
9, 3, 2, 20, 15,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
index bdc3bdfd816..16fe5ef241a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisionerTest.java
@@ -300,6 +300,13 @@ public class LoadBalancerProvisionerTest {
assertTrue("Load balancer has instance", loadBalancers.get(0).instance().isPresent());
}
+ @Test
+ public void provisioning_load_balancer_for_unsupported_cluster_fails_gracefully() {
+ tester.loadBalancerService().supportsProvisioning(false);
+ tester.activate(app1, prepare(app1, clusterRequest(ClusterSpec.Type.container, ClusterSpec.Id.from("qrs"))));
+ assertTrue("No load balancer provisioned", tester.nodeRepository().loadBalancers().list(app1).asList().isEmpty());
+ }
+
private void dirtyNodesOf(ApplicationId application) {
tester.nodeRepository().nodes().deallocate(tester.nodeRepository().nodes().list().owner(application).asList(), Agent.system, this.getClass().getSimpleName());
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java
index a28c11d009f..dd16d4674ad 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/NodesV2ApiTest.java
@@ -243,6 +243,13 @@ public class NodesV2ApiTest {
new byte[0], Request.Method.DELETE),
"{\"message\":\"Removed dockerhost1.yahoo.com\"}");
// ... and then forget it completely
+ tester.assertResponse(new Request("http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com",
+ new byte[0], Request.Method.DELETE),
+ 400,
+ "{\"error-code\":\"BAD_REQUEST\",\"message\":\"deprovisioned host dockerhost1.yahoo.com is rebuilding and cannot be forgotten\"}");
+ assertResponse(new Request("http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com",
+ Utf8.toBytes("{\"wantToRebuild\": false}"), Request.Method.PATCH),
+ "{\"message\":\"Updated dockerhost1.yahoo.com\"}");
assertResponse(new Request("http://localhost:8080/nodes/v2/node/dockerhost1.yahoo.com",
new byte[0], Request.Method.DELETE),
"{\"message\":\"Permanently removed dockerhost1.yahoo.com\"}");
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json
index 1083930e294..ca0c548c1be 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application1.json
@@ -104,6 +104,7 @@
"at" : 123
}
],
+ "autoscalingStatusCode": "unavailable",
"autoscalingStatus": "",
"scalingDuration": 600000,
"maxQueryGrowthRate": 0.1,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2.json
index 61e0569d349..c7eaa4af974 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/application2.json
@@ -80,6 +80,7 @@
"at" : 123
}
],
+ "autoscalingStatusCode": "unavailable",
"autoscalingStatus" : "",
"scalingDuration": 43200000,
"maxQueryGrowthRate": 0.1,
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json
index 2dcf2d0b838..72224ef3cba 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/restapi/responses/maintenance.json
@@ -7,6 +7,9 @@
"name": "DirtyExpirer"
},
{
+ "name": "ExpeditedChangeApplicationMaintainer"
+ },
+ {
"name": "FailedExpirer"
},
{
@@ -37,9 +40,6 @@
"name": "NodeRebooter"
},
{
- "name": "OperatorChangeApplicationMaintainer"
- },
- {
"name": "OsUpgradeActivator"
},
{
diff --git a/parent/pom.xml b/parent/pom.xml
index 6f1d6f23f51..4c100397e9d 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -605,11 +605,6 @@
</dependency>
<dependency>
<groupId>org.apache.httpcomponents</groupId>
- <artifactId>fluent-hc</artifactId>
- <version>4.3.6</version>
- </dependency>
- <dependency>
- <groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>${apache.httpclient.version}</version>
</dependency>
@@ -631,7 +626,7 @@
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpmime</artifactId>
- <version>4.3.6</version>
+ <version>${apache.httpclient.version}</version>
</dependency>
<dependency>
<groupId>org.apache.maven</groupId>
@@ -885,7 +880,7 @@
<maven-site-plugin.version>3.3</maven-site-plugin.version>
<maven-source-plugin.version>3.0.1</maven-source-plugin.version>
<prometheus.client.version>0.6.0</prometheus.client.version>
- <onnxruntime.version>1.7.0</onnxruntime.version>
+ <onnxruntime.version>1.8.0</onnxruntime.version>
<protobuf.version>3.11.4</protobuf.version>
<spifly.version>1.3.3</spifly.version>
<surefire.version>2.22.0</surefire.version>
diff --git a/pom.xml b/pom.xml
index 82d9efbb70e..fa7fa8f6ae3 100644
--- a/pom.xml
+++ b/pom.xml
@@ -53,6 +53,7 @@
<module>configserver-flags</module>
<module>config_test</module>
<module>container</module>
+ <module>container-apache-http-client-bundle</module>
<module>container-core</module>
<module>container-dependencies-enforcer</module>
<module>container-dependency-versions</module>
diff --git a/screwdriver/build-vespa.sh b/screwdriver/build-vespa.sh
index 4480b33e6f9..91375728ca9 100755
--- a/screwdriver/build-vespa.sh
+++ b/screwdriver/build-vespa.sh
@@ -6,7 +6,7 @@ set -e
readonly SOURCE_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd )"
readonly NUM_THREADS=$(( $(nproc) + 2 ))
-source /etc/profile.d/enable-devtoolset-9.sh
+source /etc/profile.d/enable-devtoolset-10.sh
source /etc/profile.d/enable-rh-maven35.sh
export MALLOC_ARENA_MAX=1
@@ -52,9 +52,9 @@ esac
if [[ $SHOULD_BUILD == systemtest ]]; then
yum -y --setopt=skip_missing_names_on_install=False install \
zstd \
- devtoolset-9-gcc-c++ \
- devtoolset-9-libatomic-devel \
- devtoolset-9-binutils \
+ devtoolset-10-gcc-c++ \
+ devtoolset-10-libatomic-devel \
+ devtoolset-10-binutils \
libxml2-devel \
rh-ruby27-rubygems-devel \
rh-ruby27-ruby-devel \
diff --git a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
index 463a7b164e1..3013e8f38d1 100644
--- a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
@@ -19,6 +19,8 @@
#include <vespa/searchcore/proton/server/document_db_explorer.h>
#include <vespa/searchcore/proton/server/documentdb.h>
#include <vespa/searchcore/proton/server/documentdbconfigmanager.h>
+#include <vespa/searchcore/proton/server/feedhandler.h>
+#include <vespa/searchcore/proton/server/fileconfigmanager.h>
#include <vespa/searchcore/proton/server/memoryconfigstore.h>
#include <vespa/persistence/dummyimpl/dummy_bucket_executor.h>
#include <vespa/searchcorespi/index/indexflushtarget.h>
@@ -28,7 +30,10 @@
#include <vespa/vespalib/data/slime/slime.h>
#include <vespa/vespalib/util/size_literals.h>
#include <vespa/config-bucketspaces.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/testkit/test_kit.h>
+#include <iostream>
using namespace cloud::config::filedistribution;
using namespace proton;
@@ -39,6 +44,7 @@ using document::DocumentType;
using document::DocumentTypeRepo;
using document::DocumenttypesConfig;
using document::test::makeBucketSpace;
+using search::SerialNum;
using search::TuneFileDocumentDB;
using search::index::DummyFileHeaderContext;
using search::index::Schema;
@@ -51,6 +57,24 @@ using vespalib::Slime;
namespace {
+void
+cleanup_dirs(bool file_config)
+{
+ vespalib::rmdir("typea", true);
+ vespalib::rmdir("tmp", true);
+ if (file_config) {
+ vespalib::rmdir("config", true);
+ }
+}
+
+vespalib::string
+config_subdir(SerialNum serialNum)
+{
+ vespalib::asciistream os;
+ os << "config/config-" << serialNum;
+ return os.str();
+}
+
struct MyDBOwner : public DummyDBOwner
{
std::shared_ptr<DocumentDBReferenceRegistry> _registry;
@@ -67,7 +91,30 @@ MyDBOwner::MyDBOwner()
{}
MyDBOwner::~MyDBOwner() = default;
-struct Fixture {
+struct FixtureBase {
+ bool _cleanup;
+ bool _file_config;
+ FixtureBase(bool file_config);
+ ~FixtureBase();
+ void disable_cleanup() { _cleanup = false; }
+};
+
+FixtureBase::FixtureBase(bool file_config)
+ : _cleanup(true),
+ _file_config(file_config)
+{
+ vespalib::mkdir("typea");
+}
+
+
+FixtureBase::~FixtureBase()
+{
+ if (_cleanup) {
+ cleanup_dirs(_file_config);
+ }
+}
+
+struct Fixture : public FixtureBase {
DummyWireService _dummy;
MyDBOwner _myDBOwner;
vespalib::ThreadStackExecutor _summaryExecutor;
@@ -79,12 +126,20 @@ struct Fixture {
matching::QueryLimiter _queryLimiter;
vespalib::Clock _clock;
+ std::unique_ptr<ConfigStore> make_config_store();
Fixture();
+ Fixture(bool file_config);
~Fixture();
};
Fixture::Fixture()
- : _dummy(),
+ : Fixture(false)
+{
+}
+
+Fixture::Fixture(bool file_config)
+ : FixtureBase(file_config),
+ _dummy(),
_myDBOwner(),
_summaryExecutor(8, 128_Ki),
_hwInfo(),
@@ -111,13 +166,25 @@ Fixture::Fixture()
_db = DocumentDB::create(".", mgr.getConfig(), "tcp/localhost:9014", _queryLimiter, _clock, DocTypeName("typea"),
makeBucketSpace(),
*b->getProtonConfigSP(), _myDBOwner, _summaryExecutor, _summaryExecutor, _bucketExecutor, _tls, _dummy,
- _fileHeaderContext, std::make_unique<MemoryConfigStore>(),
+ _fileHeaderContext, make_config_store(),
std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), _hwInfo);
_db->start();
_db->waitForOnlineState();
}
-Fixture::~Fixture() = default;
+Fixture::~Fixture()
+{
+}
+
+std::unique_ptr<ConfigStore>
+Fixture::make_config_store()
+{
+ if (_file_config) {
+ return std::make_unique<FileConfigManager>("config", "", "typea");
+ } else {
+ return std::make_unique<MemoryConfigStore>();
+ }
+}
const IFlushTarget *
extractRealFlushTarget(const IFlushTarget *target)
@@ -249,11 +316,56 @@ TEST_F("require that document db registers reference", Fixture)
EXPECT_EQUAL(search::attribute::BasicType::INT32, attrReadGuard->attribute()->getBasicType());
}
+TEST("require that normal restart works")
+{
+ {
+ Fixture f(true);
+ f.disable_cleanup();
+ }
+ {
+ Fixture f(true);
+ }
+}
+
+TEST("require that resume after interrupted save config works")
+{
+ SerialNum serialNum = 0;
+ {
+ Fixture f(true);
+ f.disable_cleanup();
+ serialNum = f._db->getFeedHandler().getSerialNum();
+ }
+ {
+ /*
+ * Simulate interrupted save config by copying best config to
+ * serial number after end of transaction log
+ */
+ std::cout << "Replay end serial num is " << serialNum << std::endl;
+ search::IndexMetaInfo info("config");
+ ASSERT_TRUE(info.load());
+ auto best_config_snapshot = info.getBestSnapshot();
+ ASSERT_TRUE(best_config_snapshot.valid);
+ std::cout << "Best config serial is " << best_config_snapshot.syncToken << std::endl;
+ auto old_config_subdir = config_subdir(best_config_snapshot.syncToken);
+ auto new_config_subdir = config_subdir(serialNum + 1);
+ vespalib::mkdir(new_config_subdir);
+ auto config_files = vespalib::listDirectory(old_config_subdir);
+ for (auto &config_file : config_files) {
+ vespalib::copy(old_config_subdir + "/" + config_file, new_config_subdir + "/" + config_file, false, false);
+ }
+ info.addSnapshot({true, serialNum + 1, new_config_subdir.substr(new_config_subdir.rfind('/') + 1)});
+ info.save();
+ }
+ {
+ Fixture f(true);
+ }
+}
+
} // namespace
TEST_MAIN() {
+ cleanup_dirs(true);
DummyFileHeaderContext::setCreator("documentdb_test");
- FastOS_File::MakeDirectory("typea");
TEST_RUN_ALL();
- FastOS_FileInterface::EmptyAndRemoveDirectory("typea");
+ cleanup_dirs(true);
}
diff --git a/searchcore/src/tests/proton/server/memory_flush_config_updater/memory_flush_config_updater_test.cpp b/searchcore/src/tests/proton/server/memory_flush_config_updater/memory_flush_config_updater_test.cpp
index cff44631c6c..f918ebe9179 100644
--- a/searchcore/src/tests/proton/server/memory_flush_config_updater/memory_flush_config_updater_test.cpp
+++ b/searchcore/src/tests/proton/server/memory_flush_config_updater/memory_flush_config_updater_test.cpp
@@ -159,7 +159,7 @@ TEST_F("require that more disk bloat is allowed while node state is retired", Fi
f.notifyDiskMemUsage(ResourceUsageState(0.7, 0.3), belowLimit());
TEST_DO(f.assertStrategyDiskConfig(0.2, 0.2));
f.setNodeRetired(true);
- TEST_DO(f.assertStrategyDiskConfig((0.8 - 0.3 / 0.7) * 0.8, 1.0));
+ TEST_DO(f.assertStrategyDiskConfig((0.8 - ((0.3/0.7)*(1 - 0.2))) / 0.8, 1.0));
f.notifyDiskMemUsage(belowLimit(), belowLimit());
TEST_DO(f.assertStrategyDiskConfig(0.2, 0.2));
}
diff --git a/searchcore/src/vespa/searchcore/config/proton.def b/searchcore/src/vespa/searchcore/config/proton.def
index 3b94bb9984e..dd411d7f30f 100644
--- a/searchcore/src/vespa/searchcore/config/proton.def
+++ b/searchcore/src/vespa/searchcore/config/proton.def
@@ -175,7 +175,7 @@ index.cache.size long default=0 restart
attribute.write.io enum {NORMAL, OSYNC, DIRECTIO} default=DIRECTIO restart
## Multiple optional options for use with mmap
-search.mmap.options[] enum {MLOCK, POPULATE, HUGETLB} restart
+search.mmap.options[] enum {POPULATE, HUGETLB} restart
## Advise to give to os when mapping memory.
search.mmap.advise enum {NORMAL, RANDOM, SEQUENTIAL} default=NORMAL restart
@@ -277,7 +277,7 @@ summary.write.io enum {NORMAL, OSYNC, DIRECTIO} default=DIRECTIO
summary.read.io enum {NORMAL, DIRECTIO, MMAP } default=MMAP restart
## Multiple optional options for use with mmap
-summary.read.mmap.options[] enum {MLOCK, POPULATE, HUGETLB} restart
+summary.read.mmap.options[] enum {POPULATE, HUGETLB} restart
## Advise to give to os when mapping memory.
summary.read.mmap.advise enum {NORMAL, RANDOM, SEQUENTIAL} default=NORMAL restart
diff --git a/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp b/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp
index 55e9ce16f70..01dd069b03c 100644
--- a/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp
+++ b/searchcore/src/vespa/searchcore/grouping/groupingcontext.cpp
@@ -4,17 +4,15 @@
#include <vespa/searchlib/aggregation/predicates.h>
#include <vespa/searchlib/aggregation/modifiers.h>
-namespace search {
+namespace search::grouping {
using aggregation::CountFS4Hits;
using aggregation::FS4HitSetDistributionKey;
-namespace grouping {
-
void
GroupingContext::deserialize(const char *groupSpec, uint32_t groupSpecLen)
{
- if ((groupSpec != NULL) && (groupSpecLen > 4)) {
+ if ((groupSpec != nullptr) && (groupSpecLen > 4)) {
vespalib::nbostream is(groupSpec, groupSpecLen);
vespalib::NBOSerializer nis(is);
uint32_t numGroupings = 0;
@@ -102,6 +100,4 @@ GroupingContext::needRanking() const
return true;
}
-
-} // namespace search::grouping
-} // namespace search
+}
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
index aa633536419..e53e817af8d 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
@@ -632,8 +632,9 @@ DocumentDB::saveInitialConfig(const DocumentDBConfig &configSnapshot)
// Only called from ctor
lock_guard guard(_configMutex);
- if (_config_store->getBestSerialNum() != 0)
+ if (_config_store->getBestSerialNum() != 0) {
return; // Initial config already present
+ }
SerialNum confSerial = _feedHandler->inc_replay_end_serial_num();
_feedHandler->setSerialNum(confSerial);
@@ -658,16 +659,17 @@ void
DocumentDB::resumeSaveConfig()
{
SerialNum bestSerial = _config_store->getBestSerialNum();
- if (bestSerial == 0)
- return;
- if (bestSerial != _feedHandler->get_replay_end_serial_num() + 1)
+ assert(bestSerial != 0);
+ if (bestSerial != _feedHandler->get_replay_end_serial_num() + 1) {
return;
+ }
+ LOG(warning, "DocumentDB(%s): resumeSaveConfig() resuming save config for serial %" PRIu64,
+ _docTypeName.toString().c_str(), bestSerial);
// proton was interrupted when saving later config.
SerialNum confSerial = _feedHandler->inc_replay_end_serial_num();
- _feedHandler->setSerialNum(confSerial);
+ assert(confSerial == bestSerial);
// resume operation, i.e. save config entry in transaction log
NewConfigOperation op(confSerial, *_config_store);
- op.setSerialNum(_feedHandler->inc_replay_end_serial_num());
(void) _feedHandler->storeOperationSync(op);
sync(op.getSerialNum());
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp
index 4b862b40896..04aea64fbd4 100644
--- a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp
@@ -4,8 +4,6 @@
#include "bootstrapconfig.h"
#include <vespa/searchcore/proton/common/hw_info_sampler.h>
#include <vespa/config/print/fileconfigwriter.h>
-#include <vespa/config/print/fileconfigsnapshotreader.h>
-#include <vespa/config/print/fileconfigsnapshotwriter.h>
#include <vespa/config-bucketspaces.h>
#include <vespa/document/repo/document_type_repo_factory.h>
#include <vespa/searchcommon/common/schemaconfigurer.h>
@@ -42,7 +40,8 @@ using vespa::config::search::summary::JuniperrcConfig;
using vespa::config::content::core::BucketspacesConfig;
using vespalib::nbostream;
-typedef IndexMetaInfo::SnapshotList SnapshotList;
+using SnapshotList = IndexMetaInfo::SnapshotList;
+using Snapshot = IndexMetaInfo::Snapshot;
using namespace std::chrono_literals;
namespace proton {
@@ -74,9 +73,7 @@ fsyncFile(const vespalib::string &fileName)
template <class Config>
void
-saveHelper(const vespalib::string &snapDir,
- const vespalib::string &name,
- const Config &config)
+saveHelper(const vespalib::string &snapDir, const vespalib::string &name, const Config &config)
{
vespalib::string fileName(snapDir + "/" + name + ".cfg");
config::FileConfigWriter writer(fileName);
@@ -105,8 +102,7 @@ public:
ConfigFile();
~ConfigFile();
- ConfigFile(const vespalib::string &name,
- const vespalib::string &fullName);
+ ConfigFile(const vespalib::string &name, const vespalib::string &fullName);
nbostream &serialize(nbostream &stream) const;
nbostream &deserialize(nbostream &stream);
@@ -122,8 +118,7 @@ ConfigFile::ConfigFile()
ConfigFile::~ConfigFile() = default;
-ConfigFile::ConfigFile(const vespalib::string &name,
- const vespalib::string &fullName)
+ConfigFile::ConfigFile(const vespalib::string &name, const vespalib::string &fullName)
: _name(name),
_modTime(0),
_content()
@@ -142,7 +137,7 @@ ConfigFile::ConfigFile(const vespalib::string &name,
nbostream &
ConfigFile::serialize(nbostream &stream) const
{
- assert(strchr(_name.c_str(), '/') == NULL);
+ assert(strchr(_name.c_str(), '/') == nullptr);
stream << _name;
stream << static_cast<int64_t>(_modTime);;
uint32_t sz = _content.size();
@@ -155,7 +150,7 @@ nbostream &
ConfigFile::deserialize(nbostream &stream)
{
stream >> _name;
- assert(strchr(_name.c_str(), '/') == NULL);
+ assert(strchr(_name.c_str(), '/') == nullptr);
int64_t modTime;
stream >> modTime;
_modTime = modTime;
@@ -255,8 +250,7 @@ FileConfigManager::getOldestSerialNum() const
}
void
-FileConfigManager::saveConfig(const DocumentDBConfig &snapshot,
- SerialNum serialNum)
+FileConfigManager::saveConfig(const DocumentDBConfig &snapshot, SerialNum serialNum)
{
if (getBestSerialNum() >= serialNum) {
LOG(warning, "Config for serial >= %" PRIu64 " already saved",
@@ -318,8 +312,7 @@ void addEmptyFile(vespalib::string snapDir, vespalib::string fileName)
}
void
-FileConfigManager::loadConfig(const DocumentDBConfig &currentSnapshot,
- search::SerialNum serialNum,
+FileConfigManager::loadConfig(const DocumentDBConfig &currentSnapshot, search::SerialNum serialNum,
DocumentDBConfig::SP &loadedSnapshot)
{
vespalib::string snapDirBaseName(makeSnapDirBaseName(serialNum));
@@ -333,13 +326,14 @@ FileConfigManager::loadConfig(const DocumentDBConfig &currentSnapshot,
DocumentDBConfigHelper dbc(spec, _docTypeName);
- typedef DocumenttypesConfig DTC;
- typedef DocumentDBConfig::DocumenttypesConfigSP DTCSP;
- DTCSP docTypesCfg(config::ConfigGetter<DTC>::getConfig("", spec).release());
+ using DTC = DocumenttypesConfig;
+ using DTCSP = DocumentDBConfig::DocumenttypesConfigSP;
+ DTCSP docTypesCfg = config::ConfigGetter<DTC>::getConfig("", spec);
std::shared_ptr<const DocumentTypeRepo> repo;
if (currentSnapshot.getDocumenttypesConfigSP() &&
currentSnapshot.getDocumentTypeRepoSP() &&
- currentSnapshot.getDocumenttypesConfig() == *docTypesCfg) {
+ (currentSnapshot.getDocumenttypesConfig() == *docTypesCfg))
+ {
docTypesCfg = currentSnapshot.getDocumenttypesConfigSP();
repo = currentSnapshot.getDocumentTypeRepoSP();
} else {
@@ -462,8 +456,7 @@ FileConfigManager::serializeConfig(SerialNum serialNum, nbostream &stream)
uint32_t numConfigs = configs.size();
stream << numConfigs;
for (const auto &config : configs) {
- ConfigFile file(config,
- snapDir + "/" + config);
+ ConfigFile file(config, snapDir + "/" + config);
stream << file;
}
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h
index 1c477ffd3c8..d58d7920c67 100644
--- a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h
+++ b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.h
@@ -10,17 +10,12 @@
namespace proton {
class FileConfigManager : public ConfigStore {
-public:
- typedef std::unique_ptr<FileConfigManager> UP;
- typedef std::shared_ptr<FileConfigManager> SP;
- typedef search::IndexMetaInfo::Snapshot Snapshot;
-
private:
- vespalib::string _baseDir;
- vespalib::string _configId;
- vespalib::string _docTypeName;
+ vespalib::string _baseDir;
+ vespalib::string _configId;
+ vespalib::string _docTypeName;
search::IndexMetaInfo _info;
- ProtonConfigSP _protonConfig;
+ ProtonConfigSP _protonConfig;
public:
/**
@@ -33,14 +28,12 @@ public:
const vespalib::string &configId,
const vespalib::string &docTypeName);
- virtual
- ~FileConfigManager();
+ ~FileConfigManager() override;
- virtual SerialNum getBestSerialNum() const override;
- virtual SerialNum getOldestSerialNum() const override;
+ SerialNum getBestSerialNum() const override;
+ SerialNum getOldestSerialNum() const override;
- virtual void saveConfig(const DocumentDBConfig &snapshot,
- SerialNum serialNum) override;
+ void saveConfig(const DocumentDBConfig &snapshot, SerialNum serialNum) override;
/**
* Load a config snapshot from disk corresponding to the given
@@ -53,23 +46,21 @@ public:
* @param loadedSnapshot the shared pointer in which to store the
* resulting config snapshot.
*/
- virtual void loadConfig(const DocumentDBConfig &currentSnapshot,
- SerialNum serialNum,
- DocumentDBConfig::SP &loadedSnapshot) override;
+ void loadConfig(const DocumentDBConfig &currentSnapshot, SerialNum serialNum,
+ DocumentDBConfig::SP &loadedSnapshot) override;
- virtual void removeInvalid() override;
- virtual void prune(SerialNum serialNum) override;
- virtual bool hasValidSerial(SerialNum serialNum) const override;
+ void removeInvalid() override;
+ void prune(SerialNum serialNum) override;
+ bool hasValidSerial(SerialNum serialNum) const override;
- virtual SerialNum getPrevValidSerial(SerialNum serialNum) const override;
+ SerialNum getPrevValidSerial(SerialNum serialNum) const override;
/**
* Serialize config files.
*
* Used for serializing config into transaction log.
*/
- virtual void
- serializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override;
+ void serializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override;
/**
@@ -80,10 +71,9 @@ public:
* takes precedence over the serialized config files in the
* transaction log.
*/
- virtual void
- deserializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override;
+ void deserializeConfig(SerialNum serialNum, vespalib::nbostream &stream) override;
- virtual void setProtonConfig(const ProtonConfigSP &protonConfig) override;
+ void setProtonConfig(const ProtonConfigSP &protonConfig) override;
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.cpp b/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.cpp
index 88e2096aa63..cf51c7be518 100644
--- a/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.cpp
@@ -13,7 +13,8 @@ namespace {
bool
shouldUseConservativeMode(const ResourceUsageState &resourceState,
bool currentlyUseConservativeMode,
- double lowWatermarkFactor) {
+ double lowWatermarkFactor)
+{
return resourceState.aboveLimit() ||
(currentlyUseConservativeMode && resourceState.aboveLimit(lowWatermarkFactor));
}
@@ -21,8 +22,7 @@ shouldUseConservativeMode(const ResourceUsageState &resourceState,
}
void
-MemoryFlushConfigUpdater::considerUseConservativeDiskMode(const LockGuard &guard,
- MemoryFlush::Config &newConfig)
+MemoryFlushConfigUpdater::considerUseConservativeDiskMode(const LockGuard &guard, MemoryFlush::Config &newConfig)
{
if (shouldUseConservativeMode(_currState.diskState(), _useConservativeDiskMode,
_currConfig.conservative.lowwatermarkfactor))
@@ -38,8 +38,7 @@ MemoryFlushConfigUpdater::considerUseConservativeDiskMode(const LockGuard &guard
}
void
-MemoryFlushConfigUpdater::considerUseConservativeMemoryMode(const LockGuard &,
- MemoryFlush::Config &newConfig)
+MemoryFlushConfigUpdater::considerUseConservativeMemoryMode(const LockGuard &, MemoryFlush::Config &newConfig)
{
if (shouldUseConservativeMode(_currState.memoryState(), _useConservativeMemoryMode,
_currConfig.conservative.lowwatermarkfactor))
@@ -59,18 +58,29 @@ MemoryFlushConfigUpdater::considerUseRelaxedDiskMode(const LockGuard &, MemoryFl
double bloatMargin = _currConfig.conservative.lowwatermarkfactor - utilization;
if (bloatMargin > 0.0) {
// Node retired and disk utiliation is below low mater mark factor.
+ // Compute how much of disk is occupied by live data, give that bloat is maxed,
+ // which is normally the case in a system that has been running for a while.
+ double spaceUtilization = utilization * (1 - _currConfig.diskbloatfactor);
+ // Then compute how much bloat can allowed given the current space usage and still stay below low watermark
+ double targetBloat = (_currConfig.conservative.lowwatermarkfactor - spaceUtilization) / _currConfig.conservative.lowwatermarkfactor;
newConfig.diskBloatFactor = 1.0;
- newConfig.globalDiskBloatFactor = std::max(bloatMargin * 0.8, _currConfig.diskbloatfactor);
+ newConfig.globalDiskBloatFactor = std::max(targetBloat, _currConfig.diskbloatfactor);
}
}
void
-MemoryFlushConfigUpdater::updateFlushStrategy(const LockGuard &guard)
+MemoryFlushConfigUpdater::updateFlushStrategy(const LockGuard &guard, const char * why)
{
MemoryFlush::Config newConfig = convertConfig(_currConfig, _memory);
considerUseConservativeDiskMode(guard, newConfig);
considerUseConservativeMemoryMode(guard, newConfig);
_flushStrategy->setConfig(newConfig);
+ LOG(info, "Due to %s (conservative-disk=%d, conservative-memory=%d, retired=%d) flush config updated to "
+ "global-disk-bloat(%1.2f), max-tls-size(%" PRIu64 "),"
+ "max-global-memory(%" PRIu64 "), max-memory-gain(%" PRIu64 ")",
+ why, _useConservativeDiskMode, _useConservativeMemoryMode, _nodeRetired,
+ newConfig.globalDiskBloatFactor, newConfig.maxGlobalTlsSize,
+ newConfig.maxGlobalMemory, newConfig.maxMemoryGain);
}
MemoryFlushConfigUpdater::MemoryFlushConfigUpdater(const MemoryFlush::SP &flushStrategy,
@@ -92,7 +102,7 @@ MemoryFlushConfigUpdater::setConfig(const ProtonConfig::Flush::Memory &newConfig
{
LockGuard guard(_mutex);
_currConfig = newConfig;
- updateFlushStrategy(guard);
+ updateFlushStrategy(guard, "new config");
}
void
@@ -100,7 +110,7 @@ MemoryFlushConfigUpdater::notifyDiskMemUsage(DiskMemUsageState newState)
{
LockGuard guard(_mutex);
_currState = newState;
- updateFlushStrategy(guard);
+ updateFlushStrategy(guard, "disk-mem-usage update");
}
void
@@ -108,7 +118,7 @@ MemoryFlushConfigUpdater::setNodeRetired(bool nodeRetired)
{
LockGuard guard(_mutex);
_nodeRetired = nodeRetired;
- updateFlushStrategy(guard);
+ updateFlushStrategy(guard, nodeRetired ? "node retired" : "node unretired");
}
namespace {
@@ -122,8 +132,7 @@ getHardMemoryLimit(const HwInfo::Memory &memory)
}
MemoryFlush::Config
-MemoryFlushConfigUpdater::convertConfig(const ProtonConfig::Flush::Memory &config,
- const HwInfo::Memory &memory)
+MemoryFlushConfigUpdater::convertConfig(const ProtonConfig::Flush::Memory &config, const HwInfo::Memory &memory)
{
const size_t hardMemoryLimit = getHardMemoryLimit(memory);
size_t totalMaxMemory = config.maxmemory;
diff --git a/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.h b/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.h
index 28ee330689d..c19074c288f 100644
--- a/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.h
+++ b/searchcore/src/vespa/searchcore/proton/server/memory_flush_config_updater.h
@@ -21,23 +21,20 @@ private:
using LockGuard = std::lock_guard<Mutex>;
using ProtonConfig = vespa::config::search::core::ProtonConfig;
- Mutex _mutex;
- MemoryFlush::SP _flushStrategy;
+ Mutex _mutex;
+ MemoryFlush::SP _flushStrategy;
ProtonConfig::Flush::Memory _currConfig;
- HwInfo::Memory _memory;
- DiskMemUsageState _currState;
- bool _useConservativeDiskMode;
- bool _useConservativeMemoryMode;
- bool _nodeRetired;
+ HwInfo::Memory _memory;
+ DiskMemUsageState _currState;
+ bool _useConservativeDiskMode;
+ bool _useConservativeMemoryMode;
+ bool _nodeRetired;
- void considerUseConservativeDiskMode(const LockGuard &guard,
- MemoryFlush::Config &newConfig);
- void considerUseConservativeMemoryMode(const LockGuard &guard,
- MemoryFlush::Config &newConfig);
- void considerUseRelaxedDiskMode(const LockGuard &guard,
- MemoryFlush::Config &newConfig);
- void updateFlushStrategy(const LockGuard &guard);
+ void considerUseConservativeDiskMode(const LockGuard &guard, MemoryFlush::Config &newConfig);
+ void considerUseConservativeMemoryMode(const LockGuard &guard, MemoryFlush::Config &newConfig);
+ void considerUseRelaxedDiskMode(const LockGuard &guard, MemoryFlush::Config &newConfig);
+ void updateFlushStrategy(const LockGuard &guard, const char * why);
public:
using UP = std::unique_ptr<MemoryFlushConfigUpdater>;
@@ -47,7 +44,7 @@ public:
const HwInfo::Memory &memory);
void setConfig(const ProtonConfig::Flush::Memory &newConfig);
void setNodeRetired(bool nodeRetired);
- virtual void notifyDiskMemUsage(DiskMemUsageState newState) override;
+ void notifyDiskMemUsage(DiskMemUsageState newState) override;
static MemoryFlush::Config convertConfig(const ProtonConfig::Flush::Memory &config,
const HwInfo::Memory &memory);
diff --git a/searchlib/abi-spec.json b/searchlib/abi-spec.json
index 2d006bbd973..65151dd6ff0 100644
--- a/searchlib/abi-spec.json
+++ b/searchlib/abi-spec.json
@@ -336,6 +336,8 @@
"public java.util.Map getRankProperties(com.yahoo.searchlib.rankingexpression.rule.SerializationContext)",
"public java.util.Map getRankProperties(java.util.List)",
"public static java.lang.String propertyName(java.lang.String)",
+ "public static java.lang.String propertyExpressionName(java.lang.String)",
+ "public static java.lang.String extractScriptName(java.lang.String)",
"public com.yahoo.tensor.TensorType type(com.yahoo.tensor.evaluation.TypeContext)",
"public com.yahoo.searchlib.rankingexpression.evaluation.Value evaluate(com.yahoo.searchlib.rankingexpression.evaluation.Context)",
"public static com.yahoo.searchlib.rankingexpression.RankingExpression from(java.lang.String)"
@@ -1031,6 +1033,7 @@
"public static final int FMOD",
"public static final int LDEXP",
"public static final int POW",
+ "public static final int BIT",
"public static final int MAP",
"public static final int REDUCE",
"public static final int JOIN",
@@ -1385,7 +1388,8 @@
"public static final enum com.yahoo.searchlib.rankingexpression.rule.Function ldexp",
"public static final enum com.yahoo.searchlib.rankingexpression.rule.Function max",
"public static final enum com.yahoo.searchlib.rankingexpression.rule.Function min",
- "public static final enum com.yahoo.searchlib.rankingexpression.rule.Function pow"
+ "public static final enum com.yahoo.searchlib.rankingexpression.rule.Function pow",
+ "public static final enum com.yahoo.searchlib.rankingexpression.rule.Function bit"
]
},
"com.yahoo.searchlib.rankingexpression.rule.FunctionNode": {
diff --git a/searchlib/src/apps/docstore/benchmarkdatastore.cpp b/searchlib/src/apps/docstore/benchmarkdatastore.cpp
index 20f06a087f5..5277c71cea2 100644
--- a/searchlib/src/apps/docstore/benchmarkdatastore.cpp
+++ b/searchlib/src/apps/docstore/benchmarkdatastore.cpp
@@ -29,7 +29,7 @@ class BenchmarkDataStoreApp : public FastOS_Application
void
BenchmarkDataStoreApp::usage()
{
- printf("Usage: %s <direcory> <numreads> <numthreads> <objects per read> <normal,directio,mmap,mlock>\n", _argv[0]);
+ printf("Usage: %s <direcory> <numreads> <numthreads> <objects per read> <normal,directio,mmap>\n", _argv[0]);
fflush(stdout);
}
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java
index c508296d739..c7080ec28d8 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/aggregation/Group.java
@@ -4,9 +4,17 @@ package com.yahoo.searchlib.aggregation;
import com.yahoo.searchlib.expression.AggregationRefNode;
import com.yahoo.searchlib.expression.ExpressionNode;
import com.yahoo.searchlib.expression.ResultNode;
-import com.yahoo.vespa.objects.*;
-
-import java.util.*;
+import com.yahoo.vespa.objects.Deserializer;
+import com.yahoo.vespa.objects.Identifiable;
+import com.yahoo.vespa.objects.ObjectOperation;
+import com.yahoo.vespa.objects.ObjectPredicate;
+import com.yahoo.vespa.objects.ObjectVisitor;
+import com.yahoo.vespa.objects.Serializer;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
public class Group extends Identifiable {
@@ -132,11 +140,7 @@ public class Group extends Identifiable {
if (sortType == SortType.BYID) {
return;
}
- Collections.sort(children, new Comparator<Group>() {
- public int compare(Group lhs, Group rhs) {
- return lhs.compareId(rhs);
- }
- });
+ Collections.sort(children, (Group lhs, Group rhs) -> lhs.compareId(rhs));
sortType = SortType.BYID;
}
@@ -145,11 +149,8 @@ public class Group extends Identifiable {
if (sortType == SortType.BYRANK) {
return;
}
- Collections.sort(children, new Comparator<Group>() {
- public int compare(Group lhs, Group rhs) {
- return lhs.compareRank(rhs);
- }
- });
+ Collections.sort(children, (Group lhs, Group rhs) -> lhs.compareRank(rhs) );
+
sortType = SortType.BYRANK;
}
@@ -403,22 +404,19 @@ public class Group extends Identifiable {
if (id != null) {
obj.id = (ResultNode)id.clone();
}
- obj.aggregationResults = new ArrayList<AggregationResult>();
+ obj.aggregationResults = new ArrayList<>();
for (AggregationResult result : aggregationResults) {
obj.aggregationResults.add(result.clone());
}
- obj.orderByIdx = new ArrayList<Integer>();
- for (Integer idx : orderByIdx) {
- obj.orderByIdx.add(idx);
- }
- obj.orderByExp = new ArrayList<ExpressionNode>();
+ obj.orderByIdx = new ArrayList<>(orderByIdx);
+ obj.orderByExp = new ArrayList<>();
RefResolver resolver = new RefResolver(obj);
for (ExpressionNode exp : orderByExp) {
exp = exp.clone();
exp.select(REF_LOCATOR, resolver);
obj.orderByExp.add(exp);
}
- obj.children = new ArrayList<Group>();
+ obj.children = new ArrayList<>();
for (Group child : children) {
obj.children.add(child.clone());
}
@@ -447,7 +445,7 @@ public class Group extends Identifiable {
}
}
- private static enum SortType {
+ private enum SortType {
UNSORTED,
BYRANK,
BYID
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/RankingExpression.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/RankingExpression.java
index 3eb4f16a9dd..cdaaba34a44 100755
--- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/RankingExpression.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/RankingExpression.java
@@ -81,6 +81,9 @@ public class RankingExpression implements Serializable {
private String name = "";
private ExpressionNode root;
+ private final static String RANKEXPRESSION = "rankingExpression(";
+ private final static String RANKINGSCRIPT = ").rankingScript";
+ private final static String EXPRESSION_NAME = ").expressionName";
/** Creates an anonymous ranking expression by consuming from the reader */
public RankingExpression(Reader reader) throws ParseException {
@@ -273,7 +276,16 @@ public class RankingExpression implements Serializable {
* @return the property name.
*/
public static String propertyName(String expressionName) {
- return "rankingExpression(" + expressionName + ").rankingScript";
+ return RANKEXPRESSION + expressionName + RANKINGSCRIPT;
+ }
+ public static String propertyExpressionName(String expressionName) {
+ return RANKEXPRESSION + expressionName + EXPRESSION_NAME;
+ }
+ public static String extractScriptName(String propertyName) {
+ if (propertyName.startsWith(RANKEXPRESSION) && propertyName.endsWith(RANKINGSCRIPT)) {
+ return propertyName.substring(RANKEXPRESSION.length(), propertyName.length() - RANKINGSCRIPT.length());
+ }
+ return null;
}
/**
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/evaluation/TensorValue.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/evaluation/TensorValue.java
index b109e6503e3..e41732f9d16 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/evaluation/TensorValue.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/evaluation/TensorValue.java
@@ -156,6 +156,7 @@ public class TensorValue extends Value {
case pow: return value.pow(argument);
case fmod: return value.fmod(argument);
case ldexp: return value.ldexp(argument);
+ case bit: return value.bit(argument);
default: throw new UnsupportedOperationException("Cannot combine two tensors using " + function);
}
}
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/Function.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/Function.java
index 99afb3b38d0..16aa947986d 100644
--- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/Function.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/rule/Function.java
@@ -45,7 +45,8 @@ public enum Function implements Serializable {
ldexp(2) { public double evaluate(double x, double y) { return x*pow(2,(int)y); } },
max(2) { public double evaluate(double x, double y) { return max(x,y); } },
min(2) { public double evaluate(double x, double y) { return min(x,y); } },
- pow(2) { public double evaluate(double x, double y) { return pow(x,y); } };
+ pow(2) { public double evaluate(double x, double y) { return pow(x,y); } },
+ bit(2) { public double evaluate(double x, double y) { return ((int)y < 8 && (int)y >= 0 && ((int)x & (1 << (int)y)) != 0) ? 1.0 : 0.0; } };
private final int arity;
diff --git a/searchlib/src/main/javacc/RankingExpressionParser.jj b/searchlib/src/main/javacc/RankingExpressionParser.jj
index 7506fe250fc..99eff010628 100755
--- a/searchlib/src/main/javacc/RankingExpressionParser.jj
+++ b/searchlib/src/main/javacc/RankingExpressionParser.jj
@@ -123,6 +123,7 @@ TOKEN :
// MAX
// MIN
<POW: "pow"> |
+ <BIT: "bit"> |
<MAP: "map"> |
<REDUCE: "reduce"> |
@@ -733,7 +734,8 @@ Function binaryFunctionName() : { }
<LDEXP> { return Function.ldexp; } |
<MAX> { return Function.max; } |
<MIN> { return Function.min; } |
- <POW> { return Function.pow; }
+ <POW> { return Function.pow; } |
+ <BIT> { return Function.bit; }
}
List<ExpressionNode> expressionList() :
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java
index ec379e5f8af..e6143a17523 100644
--- a/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/aggregation/GroupingTestCase.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.searchlib.aggregation;
+import com.yahoo.searchlib.expression.FloatResultNode;
import com.yahoo.searchlib.expression.NullResultNode;
import com.yahoo.searchlib.expression.StringBucketResultNode;
import com.yahoo.vespa.objects.BufferSerializer;
@@ -186,6 +187,7 @@ public class GroupingTestCase {
public void requireThatNeedDeepResultCollectionWorks() {
assertFalse(new Grouping().addLevel(new GroupingLevel().setGroupPrototype(new Group())).needDeepResultCollection());
assertTrue(new Grouping().addLevel(new GroupingLevel().setGroupPrototype(new Group().addOrderBy(new CountAggregationResult(9), true))).needDeepResultCollection());
+ assertTrue(new Grouping().addLevel(new GroupingLevel().setGroupPrototype(new Group().addOrderBy(new AverageAggregationResult(), true))).needDeepResultCollection());
}
@Test
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/RankingExpressionTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/RankingExpressionTestCase.java
index 092faa1934e..4214727eb5f 100755
--- a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/RankingExpressionTestCase.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/RankingExpressionTestCase.java
@@ -14,6 +14,7 @@ import com.yahoo.searchlib.rankingexpression.rule.TensorFunctionNode;
import com.yahoo.tensor.functions.Reduce;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertFalse;
@@ -23,10 +24,13 @@ import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
-import java.util.Collections;
import java.util.List;
import java.util.Map;
-import java.util.concurrent.*;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
/**
* @author Simon Thoresen Hult
@@ -208,6 +212,16 @@ public class RankingExpressionTestCase {
}
@Test
+ public void testPropertyName() {
+ assertEquals("rankingExpression(m4).rankingScript", RankingExpression.propertyName("m4"));
+ assertEquals("m4", RankingExpression.extractScriptName("rankingExpression(m4).rankingScript"));
+ assertNull(RankingExpression.extractScriptName("rankingexpression(m4).rankingScript"));
+ assertNull(RankingExpression.extractScriptName("rankingExpression(m4).rankingscript"));
+
+ assertEquals("rankingExpression(m4).expressionName", RankingExpression.propertyExpressionName("m4"));
+ }
+
+ @Test
public void testBug3464208() throws ParseException {
List<ExpressionFunction> functions = new ArrayList<>();
functions.add(new ExpressionFunction("log10tweetage", null, new RankingExpression("69")));
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/EvaluationTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/EvaluationTestCase.java
index d6302d7026e..4a3c4b248be 100644
--- a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/EvaluationTestCase.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/evaluation/EvaluationTestCase.java
@@ -403,6 +403,24 @@ public class EvaluationTestCase {
}
@Test
+ public void testBitExtraction() {
+ EvaluationTester tester = new EvaluationTester();
+ tester.assertEvaluates(1.0, "bit(-43,7)");
+ tester.assertEvaluates(1.0, "bit(-43,6)");
+ tester.assertEvaluates(0.0, "bit(-43,5)");
+ tester.assertEvaluates(1.0, "bit(-43,4)");
+ tester.assertEvaluates(0.0, "bit(-43,3)");
+ tester.assertEvaluates(1.0, "bit(-43,2)");
+ tester.assertEvaluates(0.0, "bit(-43,1)");
+ tester.assertEvaluates(1.0, "bit(-43,0)");
+ tester.assertEvaluates(
+ "tensor<int8>(x[40]):[1,1,0,1,0,1,0,1, 0,0,0,0,0,0,0,0, 0,1,0,1,0,1,0,1, 0,1,1,1,1,1,1,1, 1,0,0,0,0,0,0,0]",
+ "tensor<int8>(x[40])(bit(tensor0{y:x / 8}, 7 - x % 8))",
+ "tensor<int8>(y[5]):[-43,0,85,127,-128]"
+ );
+ }
+
+ @Test
public void testCellTypeCasting() {
EvaluationTester tester = new EvaluationTester();
diff --git a/searchlib/src/tests/attribute/attribute_test.cpp b/searchlib/src/tests/attribute/attribute_test.cpp
index af1fcea2e21..79e120d0683 100644
--- a/searchlib/src/tests/attribute/attribute_test.cpp
+++ b/searchlib/src/tests/attribute/attribute_test.cpp
@@ -1670,8 +1670,8 @@ AttributeTest::testStatus()
AttributePtr ptr = createAttribute("as", cfg);
addDocs(ptr, numDocs);
auto & sa = *(static_cast<StringAttribute *>(ptr.get()));
- const size_t numUniq(16);
- const size_t numValuesPerDoc(16);
+ const size_t numValuesPerDoc(values.size());
+ const size_t numUniq(numValuesPerDoc);
for (uint32_t i = 0; i < numDocs; ++i) {
EXPECT_TRUE(appendToVector(sa, i, numValuesPerDoc, values));
}
@@ -1680,11 +1680,11 @@ AttributeTest::testStatus()
EXPECT_EQUAL(ptr->getStatus().getNumValues(), numDocs*numValuesPerDoc);
EXPECT_EQUAL(ptr->getStatus().getNumUniqueValues(), numUniq);
size_t expUsed = 0;
- expUsed += 1 * InternalNodeSize + 1 * LeafNodeSize; // enum store tree
- expUsed += numUniq * 32; // enum store (16 unique values, 32 bytes per entry)
+ expUsed += 1 * InternalNodeSize + 1 * LeafNodeSize; // Approximate enum store tree
+ expUsed += 272; // TODO Approximate... enum store (16 unique values, 17 bytes per entry)
// multi value mapping (numdocs * sizeof(MappingIndex) + numvalues * sizeof(EnumIndex) +
- // numdocs * sizeof(Array<EnumIndex>) (due to vector vector))
- expUsed += numDocs * sizeof(vespalib::datastore::EntryRef) + numDocs * numValuesPerDoc * sizeof(IEnumStore::Index) + ((numValuesPerDoc > 1024) ? numDocs * NestedVectorSize : 0);
+ // 32 + numdocs * sizeof(Array<EnumIndex>) (due to vector vector))
+ expUsed += 32 + numDocs * sizeof(vespalib::datastore::EntryRef) + numDocs * numValuesPerDoc * sizeof(IEnumStore::Index) + ((numValuesPerDoc > 1024) ? numDocs * NestedVectorSize : 0);
EXPECT_GREATER_EQUAL(ptr->getStatus().getUsed(), expUsed);
EXPECT_GREATER_EQUAL(ptr->getStatus().getAllocated(), expUsed);
}
diff --git a/searchlib/src/tests/attribute/changevector/changevector_test.cpp b/searchlib/src/tests/attribute/changevector/changevector_test.cpp
index ad33774e904..93a23630407 100644
--- a/searchlib/src/tests/attribute/changevector/changevector_test.cpp
+++ b/searchlib/src/tests/attribute/changevector/changevector_test.cpp
@@ -2,17 +2,27 @@
#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/searchlib/attribute/changevector.hpp>
-
+#include <vespa/vespalib/stllike/hash_set.h>
using namespace search;
+using Change = ChangeTemplate<NumericChangeData<long>>;
+using CV = ChangeVectorT<Change>;
+
template <typename T>
void verifyStrictOrdering(const T & v) {
- long count(0);
- for (const auto & c : v) {
- count++;
- EXPECT_EQUAL(count, c._data.get());
+ vespalib::hash_set<uint32_t> complete;
+ uint32_t prev_doc(0);
+ uint32_t prev_value(0);
+ for (const auto & c : v.getDocIdInsertOrder()) {
+ if (prev_doc != c._doc) {
+ complete.insert(prev_doc);
+ EXPECT_FALSE(complete.contains(c._doc));
+ prev_doc = c._doc;
+ } else {
+ EXPECT_GREATER(c._data, prev_value);
+ }
+ prev_value = c._data;
}
- EXPECT_EQUAL(v.size(), size_t(count));
}
class Accessor {
@@ -30,8 +40,6 @@ private:
TEST("require insert ordering is preserved for same doc")
{
- typedef ChangeTemplate<NumericChangeData<long>> Change;
- typedef ChangeVectorT<Change> CV;
CV a;
a.push_back(Change(Change::NOOP, 7, 1));
EXPECT_EQUAL(1u, a.size());
@@ -42,8 +50,6 @@ TEST("require insert ordering is preserved for same doc")
TEST("require insert ordering is preserved ")
{
- typedef ChangeTemplate<NumericChangeData<long>> Change;
- typedef ChangeVectorT<Change> CV;
CV a;
a.push_back(Change(Change::NOOP, 7, 1));
EXPECT_EQUAL(1u, a.size());
@@ -56,8 +62,6 @@ TEST("require insert ordering is preserved ")
TEST("require insert ordering is preserved with mix")
{
- typedef ChangeTemplate<NumericChangeData<long>> Change;
- typedef ChangeVectorT<Change> CV;
CV a;
a.push_back(Change(Change::NOOP, 7, 1));
EXPECT_EQUAL(1u, a.size());
@@ -77,8 +81,6 @@ TEST("require insert ordering is preserved with mix")
}
TEST("require that inserting empty vector does not affect the vector.") {
- typedef ChangeTemplate<NumericChangeData<long>> Change;
- typedef ChangeVectorT<Change> CV;
CV a;
std::vector<long> v;
Accessor ac(v);
@@ -86,4 +88,42 @@ TEST("require that inserting empty vector does not affect the vector.") {
EXPECT_EQUAL(0u, a.size());
}
+TEST("require that we have control over buffer construction size") {
+ CV a;
+ EXPECT_EQUAL(0u, a.size());
+ EXPECT_EQUAL(256u, a.capacity());
+ a.clear();
+ EXPECT_EQUAL(0u, a.size());
+ EXPECT_EQUAL(256u, a.capacity());
+}
+
+TEST("require that buffer can grow some") {
+ CV a;
+ for (size_t i(0); i < 1024; i++) {
+ a.push_back(Change(Change::NOOP, i, i));
+ }
+ EXPECT_EQUAL(1024u, a.size());
+ EXPECT_EQUAL(1024u, a.capacity());
+ a.clear();
+ EXPECT_EQUAL(0u, a.size());
+ EXPECT_EQUAL(1024u, a.capacity());
+}
+
+TEST("require that buffer can grow some, but not unbound") {
+ CV a;
+ for (size_t i(0); i < 1025; i++) {
+ a.push_back(Change(Change::NOOP, i, i));
+ }
+ EXPECT_EQUAL(1025u, a.size());
+ EXPECT_EQUAL(2048u, a.capacity());
+ a.clear();
+ EXPECT_EQUAL(0u, a.size());
+ EXPECT_EQUAL(256u, a.capacity());
+}
+
+TEST("Control Change size") {
+ EXPECT_EQUAL(32u, sizeof(ChangeTemplate<NumericChangeData<long>>));
+ EXPECT_EQUAL(88u, sizeof(ChangeTemplate<StringChangeData>));
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchlib/src/tests/features/tensor_from_labels/tensor_from_labels_test.cpp b/searchlib/src/tests/features/tensor_from_labels/tensor_from_labels_test.cpp
index 2e83d2acbf2..8d202100699 100644
--- a/searchlib/src/tests/features/tensor_from_labels/tensor_from_labels_test.cpp
+++ b/searchlib/src/tests/features/tensor_from_labels/tensor_from_labels_test.cpp
@@ -95,6 +95,7 @@ struct ExecFixture
attrs.push_back(AttributeFactory::createAttribute("astr", AVC(AVBT::STRING, AVCT::ARRAY)));
attrs.push_back(AttributeFactory::createAttribute("aint", AVC(AVBT::INT32, AVCT::ARRAY)));
attrs.push_back(AttributeFactory::createAttribute("wsstr", AVC(AVBT::STRING, AVCT::WSET)));
+ attrs.push_back(AttributeFactory::createAttribute("sint", AVC(AVBT::INT32, AVCT::SINGLE)));
for (const auto &attr : attrs) {
attr->addReservedDoc();
@@ -112,6 +113,9 @@ struct ExecFixture
aint->append(1, 3, 0);
aint->append(1, 5, 0);
aint->append(1, 7, 0);
+
+ IntegerAttribute *sint = static_cast<IntegerAttribute *>(attrs[3].get());
+ sint->update(1, 5);
for (const auto &attr : attrs) {
attr->commit();
@@ -167,6 +171,20 @@ TEST_F("require that array attribute can be converted to tensor (explicit dimens
.add({{"dim", "5"}}, 1)), f.execute());
}
+TEST_F("require that single-value integer attribute can be converted to tensor (default dimension)",
+ ExecFixture("tensorFromLabels(attribute(sint))"))
+{
+ EXPECT_EQUAL(*make_tensor(TensorSpec("tensor(sint{})")
+ .add({{"sint", "5"}}, 1)), f.execute());
+}
+
+TEST_F("require that single-value integer attribute can be converted to tensor (explicit dimension)",
+ ExecFixture("tensorFromLabels(attribute(sint),foobar)"))
+{
+ EXPECT_EQUAL(*make_tensor(TensorSpec("tensor(foobar{})")
+ .add({{"foobar", "5"}}, 1)), f.execute());
+}
+
TEST_F("require that empty tensor is created if attribute does not exists",
ExecFixture("tensorFromLabels(attribute(null))"))
{
diff --git a/searchlib/src/tests/predicate/predicate_bounds_posting_list_test.cpp b/searchlib/src/tests/predicate/predicate_bounds_posting_list_test.cpp
index 191c7495271..31aebf95ea2 100644
--- a/searchlib/src/tests/predicate/predicate_bounds_posting_list_test.cpp
+++ b/searchlib/src/tests/predicate/predicate_bounds_posting_list_test.cpp
@@ -29,7 +29,7 @@ SimpleIndexConfig config;
const uint64_t hash = 0x123;
TEST("require that empty bounds posting list starts at 0.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
vespalib::datastore::EntryRef ref;
PredicateBoundsPostingList<PredicateIndex::BTreeIterator>
posting_list(index.getIntervalStore(),
@@ -54,7 +54,7 @@ void checkNext(PredicateBoundsPostingList<PredicateIndex::BTreeIterator> &postin
}
TEST("require that bounds posting list checks bounds.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
const auto &bounds_index = index.getBoundsIndex();
for (uint32_t id = 1; id < 100; ++id) {
PredicateTreeAnnotations annotations(id);
diff --git a/searchlib/src/tests/predicate/predicate_index_test.cpp b/searchlib/src/tests/predicate/predicate_index_test.cpp
index 669f70dd544..19ad0301b5c 100644
--- a/searchlib/src/tests/predicate/predicate_index_test.cpp
+++ b/searchlib/src/tests/predicate/predicate_index_test.cpp
@@ -33,7 +33,7 @@ DummyDocIdLimitProvider dummy_provider;
SimpleIndexConfig simple_index_config;
TEST("require that PredicateIndex can index empty documents") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_EQUAL(0u, index.getZeroConstraintDocs().size());
index.indexEmptyDocument(2);
index.commit();
@@ -41,7 +41,7 @@ TEST("require that PredicateIndex can index empty documents") {
}
TEST("require that indexDocument don't index empty documents") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_EQUAL(0u, index.getZeroConstraintDocs().size());
PredicateTreeAnnotations annotations;
index.indexDocument(3, annotations);
@@ -50,7 +50,7 @@ TEST("require that indexDocument don't index empty documents") {
}
TEST("require that PredicateIndex can remove empty documents") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_EQUAL(0u, index.getZeroConstraintDocs().size());
index.indexEmptyDocument(2);
index.commit();
@@ -61,7 +61,7 @@ TEST("require that PredicateIndex can remove empty documents") {
}
TEST("require that indexing the same empty document multiple times is ok") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_EQUAL(0u, index.getZeroConstraintDocs().size());
index.indexEmptyDocument(2);
index.commit();
@@ -109,11 +109,10 @@ const IntervalWithBounds bounds = {0x0001ffff, 0x03};
Interval single_buf;
TEST("require that PredicateIndex can index document") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_FALSE(index.getIntervalIndex().lookup(hash).valid());
indexFeature(index, doc_id, min_feature, {{hash, interval}}, {});
index.commit();
-
auto posting_it = lookupPosting(index, hash);
EXPECT_EQUAL(doc_id, posting_it.getKey());
uint32_t size;
@@ -123,8 +122,27 @@ TEST("require that PredicateIndex can index document") {
EXPECT_EQUAL(interval, interval_list[0]);
}
+TEST("require that bit vector cache is initialized correctly") {
+ BitVectorCache::KeyAndCountSet keySet;
+ keySet.emplace_back(hash, dummy_provider.getDocIdLimit()/2);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
+ EXPECT_FALSE(index.getIntervalIndex().lookup(hash).valid());
+ indexFeature(index, doc_id, min_feature, {{hash, interval}}, {});
+ index.requireCachePopulation();
+ index.populateIfNeeded(dummy_provider.getDocIdLimit());
+ EXPECT_TRUE(index.lookupCachedSet(keySet).empty());
+ index.commit();
+ EXPECT_TRUE(index.getIntervalIndex().lookup(hash).valid());
+ EXPECT_TRUE(index.lookupCachedSet(keySet).empty());
+
+ index.requireCachePopulation();
+ index.populateIfNeeded(dummy_provider.getDocIdLimit());
+ EXPECT_FALSE(index.lookupCachedSet(keySet).empty());
+}
+
+
TEST("require that PredicateIndex can index document with bounds") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_FALSE(index.getIntervalIndex().lookup(hash).valid());
indexFeature(index, doc_id, min_feature, {}, {{hash, bounds}});
index.commit();
@@ -149,7 +167,7 @@ TEST("require that PredicateIndex can index document with bounds") {
TEST("require that PredicateIndex can index multiple documents "
"with the same feature") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_FALSE(index.getIntervalIndex().lookup(hash).valid());
for (uint32_t id = 1; id < 100; ++id) {
indexFeature(index, id, min_feature, {{hash, interval}}, {});
@@ -171,7 +189,7 @@ TEST("require that PredicateIndex can index multiple documents "
}
TEST("require that PredicateIndex can remove indexed documents") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_FALSE(index.getIntervalIndex().lookup(hash).valid());
indexFeature(index, doc_id, min_feature,
{{hash, interval}}, {{hash2, bounds}});
@@ -187,7 +205,7 @@ TEST("require that PredicateIndex can remove indexed documents") {
}
TEST("require that PredicateIndex can remove multiple documents") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
const auto &interval_index = index.getIntervalIndex();
EXPECT_FALSE(interval_index.lookup(hash).valid());
for (uint32_t id = 1; id < 100; ++id) {
@@ -214,7 +232,7 @@ TEST("require that PredicateIndex can remove multiple documents with "
intervals.push_back(make_pair(hash + i, interval));
bounds_intervals.push_back(make_pair(hash2 + i, bounds));
}
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
const auto &interval_index = index.getIntervalIndex();
EXPECT_FALSE(interval_index.lookup(hash).valid());
for (uint32_t id = 1; id < 100; ++id) {
@@ -272,7 +290,7 @@ TEST("require that PredicateIndex can be (de)serialized") {
intervals.push_back(make_pair(hash + i, interval));
bounds_intervals.push_back(make_pair(hash2 + i, bounds));
}
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 8);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 8);
EXPECT_FALSE(index.getIntervalIndex().lookup(hash).valid());
for (uint32_t id = 1; id < 100; ++id) {
indexFeature(index, id, id, intervals, bounds_intervals);
@@ -284,7 +302,7 @@ TEST("require that PredicateIndex can be (de)serialized") {
index.serialize(buffer);
uint32_t doc_id_limit;
DocIdLimitFinder finder(doc_id_limit);
- PredicateIndex index2(generation_handler, generation_holder, dummy_provider, simple_index_config,
+ PredicateIndex index2(generation_holder, dummy_provider, simple_index_config,
buffer, finder, PredicateAttribute::PREDICATE_ATTRIBUTE_VERSION);
const PredicateIntervalStore &interval_store = index2.getIntervalStore();
EXPECT_EQUAL(199u, doc_id_limit);
@@ -322,7 +340,7 @@ TEST("require that PredicateIndex can be (de)serialized") {
}
TEST("require that DocumentFeaturesStore is restored on deserialization") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
EXPECT_FALSE(index.getIntervalIndex().lookup(hash).valid());
indexFeature(index, doc_id, min_feature,
{{hash, interval}}, {{hash2, bounds}});
@@ -330,7 +348,7 @@ TEST("require that DocumentFeaturesStore is restored on deserialization") {
index.serialize(buffer);
uint32_t doc_id_limit;
DocIdLimitFinder finder(doc_id_limit);
- PredicateIndex index2(generation_handler, generation_holder, dummy_provider, simple_index_config,
+ PredicateIndex index2(generation_holder, dummy_provider, simple_index_config,
buffer, finder, PredicateAttribute::PREDICATE_ATTRIBUTE_VERSION);
const auto &interval_index = index2.getIntervalIndex();
const auto &bounds_index = index2.getBoundsIndex();
@@ -351,7 +369,7 @@ TEST("require that DocumentFeaturesStore is restored on deserialization") {
}
TEST("require that hold lists are attempted emptied on destruction") {
- PredicateIndex index(generation_handler, generation_holder, dummy_provider, simple_index_config, 10);
+ PredicateIndex index(generation_holder, dummy_provider, simple_index_config, 10);
indexFeature(index, doc_id, min_feature,
{{hash, interval}}, {{hash2, bounds}});
{
diff --git a/searchlib/src/tests/predicate/predicate_interval_posting_list_test.cpp b/searchlib/src/tests/predicate/predicate_interval_posting_list_test.cpp
index a77542f364e..660d8556b5c 100644
--- a/searchlib/src/tests/predicate/predicate_interval_posting_list_test.cpp
+++ b/searchlib/src/tests/predicate/predicate_interval_posting_list_test.cpp
@@ -28,7 +28,7 @@ SimpleIndexConfig config;
const uint64_t hash = 0x123;
TEST("require that empty posting list starts at 0.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
vespalib::datastore::EntryRef ref;
PredicateIntervalPostingList<PredicateIndex::BTreeIterator>
posting_list(index.getIntervalStore(), index.getIntervalIndex().getBTreePostingList(ref));
@@ -38,7 +38,7 @@ TEST("require that empty posting list starts at 0.") {
}
TEST("require that posting list can iterate.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
const auto &interval_index = index.getIntervalIndex();
for (uint32_t id = 1; id < 100; ++id) {
PredicateTreeAnnotations annotations(id);
diff --git a/searchlib/src/tests/predicate/predicate_zero_constraint_posting_list_test.cpp b/searchlib/src/tests/predicate/predicate_zero_constraint_posting_list_test.cpp
index e427c99c007..12de48b5d31 100644
--- a/searchlib/src/tests/predicate/predicate_zero_constraint_posting_list_test.cpp
+++ b/searchlib/src/tests/predicate/predicate_zero_constraint_posting_list_test.cpp
@@ -25,7 +25,7 @@ DummyDocIdLimitProvider limit_provider;
SimpleIndexConfig config;
TEST("require that empty posting list starts at 0.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
PredicateZeroConstraintPostingList posting_list(index.getZeroConstraintDocs().begin());
EXPECT_EQUAL(0u, posting_list.getDocId());
EXPECT_EQUAL(0x00010001u, posting_list.getInterval());
@@ -33,7 +33,7 @@ TEST("require that empty posting list starts at 0.") {
}
TEST("require that posting list can iterate.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
for (uint32_t id = 1; id < 100; ++id) {
index.indexEmptyDocument(id);
}
diff --git a/searchlib/src/tests/predicate/predicate_zstar_compressed_posting_list_test.cpp b/searchlib/src/tests/predicate/predicate_zstar_compressed_posting_list_test.cpp
index 4e86e996704..6d00b45a283 100644
--- a/searchlib/src/tests/predicate/predicate_zstar_compressed_posting_list_test.cpp
+++ b/searchlib/src/tests/predicate/predicate_zstar_compressed_posting_list_test.cpp
@@ -29,7 +29,7 @@ SimpleIndexConfig config;
const uint64_t hash = 0x123;
TEST("require that empty posting list starts at 0.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
vespalib::datastore::EntryRef ref;
PredicateZstarCompressedPostingList<PredicateIndex::BTreeIterator>
posting_list(index.getIntervalStore(), index.getIntervalIndex().getBTreePostingList(ref));
@@ -39,7 +39,7 @@ TEST("require that empty posting list starts at 0.") {
}
TEST("require that posting list can iterate.") {
- PredicateIndex index(generation_handler, generation_holder, limit_provider, config, 8);
+ PredicateIndex index(generation_holder, limit_provider, config, 8);
const auto &interval_index = index.getIntervalIndex();
vector<vector<Interval>> intervals =
{{{0x00010000}},
diff --git a/searchlib/src/tests/queryeval/predicate/predicate_blueprint_test.cpp b/searchlib/src/tests/queryeval/predicate/predicate_blueprint_test.cpp
index 3dd2ec26dea..5b8d5f5b9ce 100644
--- a/searchlib/src/tests/queryeval/predicate/predicate_blueprint_test.cpp
+++ b/searchlib/src/tests/queryeval/predicate/predicate_blueprint_test.cpp
@@ -86,8 +86,7 @@ TEST_F("require that blueprint with empty index estimates empty.", Fixture) {
EXPECT_EQUAL(0u, blueprint.getState().estimate().estHits);
}
-TEST_F("require that blueprint with zero-constraint doc estimates non-empty.",
- Fixture) {
+TEST_F("require that blueprint with zero-constraint doc estimates non-empty.", Fixture) {
f.indexEmptyDocument(42);
PredicateBlueprint blueprint(f.field, f.guard(), f.query);
EXPECT_FALSE(blueprint.getState().estimate().empty);
@@ -98,11 +97,9 @@ const int min_feature = 1;
const uint32_t doc_id = 2;
const uint32_t interval = 0x0001ffff;
-TEST_F("require that blueprint with posting list entry estimates non-empty.",
- Fixture) {
+TEST_F("require that blueprint with posting list entry estimates non-empty.", Fixture) {
PredicateTreeAnnotations annotations(min_feature);
- annotations.interval_map[PredicateHash::hash64("key=value")] =
- std::vector<Interval>{{interval}};
+ annotations.interval_map[PredicateHash::hash64("key=value")] = std::vector<Interval>{{interval}};
f.indexDocument(doc_id, annotations);
PredicateBlueprint blueprint(f.field, f.guard(), f.query);
@@ -110,8 +107,7 @@ TEST_F("require that blueprint with posting list entry estimates non-empty.",
EXPECT_EQUAL(0u, blueprint.getState().estimate().estHits);
}
-TEST_F("require that blueprint with 'bounds' posting list entry estimates "
- "non-empty.", Fixture) {
+TEST_F("require that blueprint with 'bounds' posting list entry estimates non-empty.", Fixture) {
PredicateTreeAnnotations annotations(min_feature);
annotations.bounds_map[PredicateHash::hash64("range_key=40")] =
std::vector<IntervalWithBounds>{{interval, 0x80000003}};
@@ -122,34 +118,50 @@ TEST_F("require that blueprint with 'bounds' posting list entry estimates "
EXPECT_EQUAL(0u, blueprint.getState().estimate().estHits);
}
-TEST_F("require that blueprint with zstar-compressed estimates non-empty.",
- Fixture) {
+TEST_F("require that blueprint with zstar-compressed estimates non-empty.", Fixture) {
PredicateTreeAnnotations annotations(1);
- annotations.interval_map[Constants::z_star_compressed_hash] =std::vector<Interval>{{0xfffe0000}};
+ annotations.interval_map[Constants::z_star_compressed_hash] = std::vector<Interval>{{0xfffe0000}};
f.indexDocument(doc_id, annotations);
PredicateBlueprint blueprint(f.field, f.guard(), f.query);
EXPECT_FALSE(blueprint.getState().estimate().empty);
EXPECT_EQUAL(0u, blueprint.getState().estimate().estHits);
}
-TEST_F("require that blueprint can create search", Fixture) {
- PredicateTreeAnnotations annotations(1);
- annotations.interval_map[PredicateHash::hash64("key=value")] =std::vector<Interval>{{interval}};
- f.indexDocument(doc_id, annotations);
-
+void
+runQuery(Fixture & f, std::vector<uint32_t> expected, bool expectCachedSize, uint32_t expectedKV) {
PredicateBlueprint blueprint(f.field, f.guard(), f.query);
blueprint.fetchPostings(ExecuteInfo::TRUE);
+ EXPECT_EQUAL(expectCachedSize, blueprint.getCachedFeatures().size());
+ for (uint32_t docId : expected) {
+ EXPECT_EQUAL(expectedKV, uint32_t(blueprint.getKV()[docId]));
+ }
TermFieldMatchDataArray tfmda;
SearchIterator::UP it = blueprint.createLeafSearch(tfmda, true);
ASSERT_TRUE(it.get());
it->initFullRange();
EXPECT_EQUAL(SearchIterator::beginId(), it->getDocId());
- EXPECT_FALSE(it->seek(doc_id - 1));
- EXPECT_EQUAL(doc_id, it->getDocId());
- EXPECT_TRUE(it->seek(doc_id));
- EXPECT_EQUAL(doc_id, it->getDocId());
- EXPECT_FALSE(it->seek(doc_id + 1));
- EXPECT_TRUE(it->isAtEnd());
+ std::vector<uint32_t> actual;
+ for (it->seek(1); ! it->isAtEnd(); it->seek(it->getDocId()+1)) {
+ actual.push_back(it->getDocId());
+ }
+ EXPECT_EQUAL(expected.size(), actual.size());
+ for (size_t i(0); i < expected.size(); i++) {
+ EXPECT_EQUAL(expected[i], actual[i]);
+ }
+}
+
+TEST_F("require that blueprint can create search", Fixture) {
+ PredicateTreeAnnotations annotations(1);
+ annotations.interval_map[PredicateHash::hash64("key=value")] = std::vector<Interval>{{interval}};
+ for (size_t i(0); i < 9; i++) {
+ f.indexDocument(doc_id + i, annotations);
+ }
+ runQuery(f, {2,3,4,5,6,7,8,9,10}, 0, 1);
+ f.indexDocument(doc_id+9, annotations);
+ runQuery(f, {2, 3,4,5,6,7,8,9,10,11}, 0, 1);
+ f.index().requireCachePopulation();
+ f.indexDocument(doc_id+10, annotations);
+ runQuery(f, {2,3,4,5,6,7,8,9,10,11,12}, 1, 1);
}
TEST_F("require that blueprint can create more advanced search", Fixture) {
diff --git a/searchlib/src/vespa/searchlib/aggregation/group.h b/searchlib/src/vespa/searchlib/aggregation/group.h
index 5b425de24e6..681cda43afa 100644
--- a/searchlib/src/vespa/searchlib/aggregation/group.h
+++ b/searchlib/src/vespa/searchlib/aggregation/group.h
@@ -232,7 +232,7 @@ public:
/**
* Recursively checks if any itself or any children needs a full resort.
- * Then all hits must be processed and should be doen before any hit sorting.
+ * Then all hits must be processed and should be done before any hit sorting.
*/
bool needResort() const { return _aggr.needResort(); }
diff --git a/searchlib/src/vespa/searchlib/attribute/attributevector.hpp b/searchlib/src/vespa/searchlib/attribute/attributevector.hpp
index efc96bc57c2..616096e9091 100644
--- a/searchlib/src/vespa/searchlib/attribute/attributevector.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/attributevector.hpp
@@ -89,40 +89,36 @@ AttributeVector::adjustWeight(ChangeVectorT< ChangeTemplate<T> >& changes, DocId
template<typename T>
bool
-AttributeVector::applyArithmetic(ChangeVectorT< ChangeTemplate<T> > & changes, DocId doc, const T & v,
+AttributeVector::applyArithmetic(ChangeVectorT< ChangeTemplate<T> > & changes, DocId doc, const T &,
const ArithmeticValueUpdate & arithm)
{
- (void) v;
- bool retval(!hasMultiValue() && (doc < getNumDocs()));
- if (retval) {
- size_t oldSz(changes.size());
- ArithmeticValueUpdate::Operator op(arithm.getOperator());
- double aop = arithm.getOperand();
- if (op == ArithmeticValueUpdate::Add) {
- changes.push_back(ChangeTemplate<T>(ChangeBase::ADD, doc, 0, 0));
- } else if (op == ArithmeticValueUpdate::Sub) {
- changes.push_back(ChangeTemplate<T>(ChangeBase::SUB, doc, 0, 0));
- } else if (op == ArithmeticValueUpdate::Mul) {
- changes.push_back(ChangeTemplate<T>(ChangeBase::MUL, doc, 0, 0));
- } else if (op == ArithmeticValueUpdate::Div) {
- if (this->getClass().inherits(IntegerAttribute::classId) && aop == 0) {
- divideByZeroWarning();
- } else {
- changes.push_back(ChangeTemplate<T>(ChangeBase::DIV, doc, 0, 0));
- }
+ if (hasMultiValue() || (doc >= getNumDocs())) return false;
+
+ size_t oldSz(changes.size());
+ ArithmeticValueUpdate::Operator op(arithm.getOperator());
+ double aop = arithm.getOperand();
+ if (op == ArithmeticValueUpdate::Add) {
+ changes.push_back(ChangeTemplate<T>(ChangeBase::ADD, doc, 0, 0));
+ } else if (op == ArithmeticValueUpdate::Sub) {
+ changes.push_back(ChangeTemplate<T>(ChangeBase::SUB, doc, 0, 0));
+ } else if (op == ArithmeticValueUpdate::Mul) {
+ changes.push_back(ChangeTemplate<T>(ChangeBase::MUL, doc, 0, 0));
+ } else if (op == ArithmeticValueUpdate::Div) {
+ if (this->getClass().inherits(IntegerAttribute::classId) && aop == 0) {
+ divideByZeroWarning();
} else {
- retval = false;
- }
- if (retval) {
- const size_t diff = changes.size() - oldSz;
- _status.incNonIdempotentUpdates(diff);
- _status.incUpdates(diff);
- if (diff > 0) {
- changes.back()._arithOperand = aop;
- }
+ changes.push_back(ChangeTemplate<T>(ChangeBase::DIV, doc, 0, 0));
}
+ } else {
+ return false;
}
- return retval;
+ const size_t diff = changes.size() - oldSz;
+ _status.incNonIdempotentUpdates(diff);
+ _status.incUpdates(diff);
+ if (diff > 0) {
+ changes.back()._arithOperand = aop;
+ }
+ return true;
}
template<typename T>
diff --git a/searchlib/src/vespa/searchlib/attribute/changevector.h b/searchlib/src/vespa/searchlib/attribute/changevector.h
index c3b4d0ce3b0..d63ef2e2b35 100644
--- a/searchlib/src/vespa/searchlib/attribute/changevector.h
+++ b/searchlib/src/vespa/searchlib/attribute/changevector.h
@@ -2,8 +2,8 @@
#pragma once
-#include <vespa/vespalib/stllike/hash_map.h>
#include <vespa/searchcommon/common/undefinedvalues.h>
+#include <vespa/vespalib/stllike/allocator.h>
#include <vector>
namespace vespalib { class MemoryUsage; }
@@ -26,11 +26,10 @@ struct ChangeBase {
DIV,
CLEARDOC
};
- enum {TAIL=0, UNSET_ENUM = 0xffffffffu};
+ enum {UNSET_ENUM = 0xffffffffu};
ChangeBase() :
_type(NOOP),
- _next(TAIL),
_doc(0),
_weight(1),
_enumScratchPad(UNSET_ENUM),
@@ -39,7 +38,6 @@ struct ChangeBase {
ChangeBase(Type type, uint32_t d, int32_t w = 1) :
_type(type),
- _next(TAIL),
_doc(d),
_weight(w),
_enumScratchPad(UNSET_ENUM),
@@ -48,18 +46,11 @@ struct ChangeBase {
int cmp(const ChangeBase &b) const { int diff(_doc - b._doc); return diff; }
bool operator <(const ChangeBase & b) const { return cmp(b) < 0; }
- bool isAtEnd() const { return _next == TAIL; }
- uint32_t getNext() const { return _next; }
- void setNext(uint32_t next) { _next = next; }
uint32_t getEnum() const { return _enumScratchPad; }
void setEnum(uint32_t value) const { _enumScratchPad = value; }
bool isEnumValid() const { return _enumScratchPad != UNSET_ENUM; }
- void invalidateEnum() const { _enumScratchPad = UNSET_ENUM; }
Type _type;
-private:
- uint32_t _next;
-public:
uint32_t _doc;
int32_t _weight;
mutable uint32_t _enumScratchPad;
@@ -108,7 +99,7 @@ struct ChangeTemplate : public ChangeBase {
ChangeBase(type, d, w), _data(v)
{ }
- T _data;
+ T _data;
};
template <>
@@ -131,54 +122,66 @@ NumericChangeData<double>::operator<(const NumericChangeData<double> &rhs) const
return _v < rhs._v;
}
-class ChangeVectorBase {
-protected:
-};
-
/**
- * Maintains a list of changes where changes to the same docid are adjacent, but ordered by insertion order.
- * Apart from that no ordering by docid.
+ * Maintains a list of changes.
+ * You can select to view the in insert order,
+ * or unordered, but changes to the same docid are adjacent and ordered by insertion order.
*/
template <typename T>
-class ChangeVectorT : public ChangeVectorBase {
+class ChangeVectorT {
private:
- using Map = vespalib::hash_map<uint32_t, uint32_t>;
- using Vector = std::vector<T>;
+ using Vector = std::vector<T, vespalib::allocator_large<T>>;
public:
+ using const_iterator = typename Vector::const_iterator;
ChangeVectorT();
~ChangeVectorT();
- class const_iterator {
- public:
- const_iterator(const Vector & vector, uint32_t next) : _v(&vector), _next(next) { }
- bool operator == (const const_iterator & rhs) const { return _v == rhs._v && _next == rhs._next; }
- bool operator != (const const_iterator & rhs) const { return _v != rhs._v || _next != rhs._next; }
- const_iterator& operator++() { advance(); return *this; }
- const_iterator operator++(int) { const_iterator other(*this); advance(); return other; }
- const T & operator * () const { return v()[_next]; }
- const T * operator -> () const { return &v()[_next]; }
- private:
- void advance() { _next = v()[_next].getNext(); }
- const Vector & v() const { return *_v; }
- const Vector * _v;
- uint32_t _next;
- };
-
void push_back(const T & c);
template <typename Accessor>
void push_back(uint32_t doc, Accessor & ac);
- const T & back() const { return _v.back(); }
T & back() { return _v.back(); }
size_t size() const { return _v.size(); }
+ size_t capacity() const { return _v.capacity(); }
bool empty() const { return _v.empty(); }
void clear();
- const_iterator begin() const { return const_iterator(_v, 0); }
- const_iterator end() const { return const_iterator(_v, size()); }
+ class InsertOrder {
+ public:
+ InsertOrder(const Vector & v) : _v(v) { }
+ const_iterator begin() const { return _v.begin(); }
+ const_iterator end() const { return _v.end(); }
+ private:
+ const Vector &_v;
+ };
+ class DocIdInsertOrder {
+ using AdjacentDocIds = std::vector<uint64_t, vespalib::allocator_large<uint64_t>>;
+ public:
+ class const_iterator {
+ public:
+ const_iterator(const Vector & vector, const AdjacentDocIds & order, uint32_t cur)
+ : _v(&vector), _o(&order), _cur(cur) { }
+ bool operator == (const const_iterator & rhs) const { return _v == rhs._v && _cur == rhs._cur; }
+ bool operator != (const const_iterator & rhs) const { return _v != rhs._v || _cur != rhs._cur; }
+ const_iterator& operator++() { _cur++; return *this; }
+ const_iterator operator++(int) { const_iterator other(*this); _cur++; return other; }
+ const T & operator * () const { return v(); }
+ const T * operator -> () const { return &v(); }
+ private:
+ const T & v() const { return (*_v)[(*_o)[_cur] & 0xffffffff]; }
+ const Vector * _v;
+ const AdjacentDocIds * _o;
+ uint32_t _cur;
+ };
+ DocIdInsertOrder(const Vector & v);
+ const_iterator begin() const { return const_iterator(_v, _adjacent, 0); }
+ const_iterator end() const { return const_iterator(_v, _adjacent, _v.size()); }
+ private:
+ const Vector &_v;
+ AdjacentDocIds _adjacent;
+ };
+ InsertOrder getInsertOrder() const { return InsertOrder(_v); }
+ DocIdInsertOrder getDocIdInsertOrder() const { return DocIdInsertOrder(_v); }
vespalib::MemoryUsage getMemoryUsage() const;
private:
- void linkIn(uint32_t doc, size_t index, size_t last);
- Vector _v;
- Map _docs;
- uint32_t _tail;
+ Vector _v;
};
} // namespace search
diff --git a/searchlib/src/vespa/searchlib/attribute/changevector.hpp b/searchlib/src/vespa/searchlib/attribute/changevector.hpp
index dcb31ebae73..5052f4b9a10 100644
--- a/searchlib/src/vespa/searchlib/attribute/changevector.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/changevector.hpp
@@ -4,9 +4,12 @@
#include "changevector.h"
#include <vespa/vespalib/util/memoryusage.h>
+#include <vespa/vespalib/util/alloc.h>
namespace search {
+using vespalib::roundUp2inN;
+
namespace {
// This number is selected to be large enough to hold bursts between commits
@@ -16,11 +19,9 @@ constexpr size_t NUM_ELEMS_TO_RESERVE = 200;
template <typename T>
ChangeVectorT<T>::ChangeVectorT()
- : _v(),
- _docs(NUM_ELEMS_TO_RESERVE*2),
- _tail(0)
+ : _v()
{
- _v.reserve(vespalib::roundUp2inN(NUM_ELEMS_TO_RESERVE, sizeof(T)));
+ _v.reserve(roundUp2inN<T>(NUM_ELEMS_TO_RESERVE));
}
template <typename T>
@@ -29,17 +30,21 @@ ChangeVectorT<T>::~ChangeVectorT() = default;
template <typename T>
void
ChangeVectorT<T>::clear() {
- _v.clear();
- _docs.clear();
+ if (_v.capacity() > roundUp2inN<T>(NUM_ELEMS_TO_RESERVE * 5)) {
+ // Ensure we do not keep insanely large buffers over time, due to abnormal peaks
+ // caused by hickups else where.
+ _v = Vector();
+ _v.reserve(roundUp2inN<T>(NUM_ELEMS_TO_RESERVE));
+ } else {
+ _v.clear();
+ }
}
template <typename T>
void
ChangeVectorT<T>::push_back(const T & c)
{
- size_t index(size());
_v.push_back(c);
- linkIn(c._doc, index, index);
}
template <typename T>
@@ -49,48 +54,32 @@ ChangeVectorT<T>::push_back(uint32_t doc, Accessor & ac)
{
if (ac.size() <= 0) { return; }
- size_t index(size());
- _v.reserve(vespalib::roundUp2inN(index + ac.size(), sizeof(T)));
+ _v.reserve(roundUp2inN<T>(size() + ac.size()));
for (size_t i(0), m(ac.size()); i < m; i++, ac.next()) {
_v.push_back(T(ChangeBase::APPEND, doc, typename T::DataType(ac.value()), ac.weight()));
- _v.back().setNext(index + i + 1);
}
- linkIn(doc, index, size() - 1);
}
template <typename T>
-void
-ChangeVectorT<T>::linkIn(uint32_t doc, size_t first, size_t last)
+vespalib::MemoryUsage
+ChangeVectorT<T>::getMemoryUsage() const
{
- if (first != 0 && (_v[_tail]._doc == doc)) {
- _v[_tail].setNext(first);
- _tail = last;
- } else {
- Map::iterator found(_docs.find(doc));
- if (found == _docs.end()) {
- _docs[doc] = last;
- if (_tail != first) {
- _v[_tail].setNext(first);
- }
- _tail = last;
- } else {
- uint32_t prev(found->second);
- for (; _v[_v[prev].getNext()]._doc == doc; prev = _v[prev].getNext());
- _v[last].setNext(_v[prev].getNext());
- _v[prev].setNext(first);
- found->second = last;
- }
- }
- _v[_tail].setNext(size());
+ size_t usedBytes = _v.size() * sizeof(T);
+ size_t allocBytes = _v.capacity() * sizeof(T);
+ return vespalib::MemoryUsage(allocBytes, usedBytes, 0, 0);
}
template <typename T>
-vespalib::MemoryUsage
-ChangeVectorT<T>::getMemoryUsage() const
+ChangeVectorT<T>::DocIdInsertOrder::DocIdInsertOrder(const Vector & v)
+ : _v(v),
+ _adjacent()
{
- size_t usedBytes = _v.size() * sizeof(T) + _docs.getMemoryUsed();
- size_t allocBytes = _v.capacity() * sizeof(T) + _docs.getMemoryConsumption();
- return vespalib::MemoryUsage(allocBytes, usedBytes, 0, 0);
+ _adjacent.reserve(v.size());
+ uint32_t index(0);
+ for (const auto & c : _v) {
+ _adjacent.push_back((uint64_t(c._doc) << 32) | index++);
+ }
+ std::sort(_adjacent.begin(), _adjacent.end());
}
} // namespace search
diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp
index d867ae9f211..3b3fdd9bc5c 100644
--- a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.cpp
@@ -1,13 +1,8 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "enum_store_dictionary.h"
-#include "enumstore.h"
#include <vespa/vespalib/btree/btree.hpp>
-#include <vespa/vespalib/btree/btreeiterator.hpp>
#include <vespa/vespalib/btree/btreenode.hpp>
-#include <vespa/vespalib/btree/btreenodeallocator.hpp>
-#include <vespa/vespalib/btree/btreeroot.hpp>
-#include <vespa/vespalib/datastore/datastore.hpp>
#include <vespa/vespalib/datastore/sharded_hash_map.h>
#include <vespa/vespalib/datastore/unique_store_dictionary.hpp>
#include <vespa/searchlib/util/bufferwriter.h>
@@ -15,7 +10,6 @@
#include <vespa/log/log.h>
LOG_SETUP(".searchlib.attribute.enum_store_dictionary");
-using vespalib::datastore::EntryComparator;
using vespalib::datastore::EntryRef;
using vespalib::datastore::UniqueStoreAddResult;
@@ -25,12 +19,8 @@ using vespalib::btree::BTreeNode;
template <typename BTreeDictionaryT, typename HashDictionaryT>
void
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::remove_unused_values(const IndexSet& unused,
- const vespalib::datastore::EntryComparator& cmp)
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::remove_unused_values(const IndexList & unused,const EntryComparator& cmp)
{
- if (unused.empty()) {
- return;
- }
for (const auto& ref : unused) {
this->remove(cmp, ref);
}
@@ -48,9 +38,9 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::~EnumStoreDictionary() =
template <typename BTreeDictionaryT, typename HashDictionaryT>
void
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::free_unused_values(const vespalib::datastore::EntryComparator& cmp)
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::free_unused_values(const EntryComparator& cmp)
{
- IndexSet unused;
+ IndexList unused;
// find unused enums
if constexpr (has_btree_dictionary) {
@@ -58,19 +48,26 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::free_unused_values(const
_enumStore.free_value_if_unused(iter.getKey(), unused);
}
} else {
- this->_hash_dict.foreach_key([this, &unused](EntryRef ref) { _enumStore.free_value_if_unused(ref, unused); });
+ this->_hash_dict.foreach_key([this, &unused](EntryRef ref) {
+ _enumStore.free_value_if_unused(ref, unused);
+ });
}
remove_unused_values(unused, cmp);
}
template <typename BTreeDictionaryT, typename HashDictionaryT>
void
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::free_unused_values(const IndexSet& to_remove,
- const vespalib::datastore::EntryComparator& cmp)
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::free_unused_values(const IndexList& to_remove, const EntryComparator& cmp)
{
- IndexSet unused;
+ IndexList unused;
+
+ EntryRef prev;
for (const auto& index : to_remove) {
- _enumStore.free_value_if_unused(index, unused);
+ assert(prev <= index);
+ if (index != prev) {
+ _enumStore.free_value_if_unused(index, unused);
+ prev = index;
+ }
}
remove_unused_values(unused, cmp);
}
@@ -96,8 +93,7 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::remove(const EntryCompar
template <typename BTreeDictionaryT, typename HashDictionaryT>
bool
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_index(const vespalib::datastore::EntryComparator& cmp,
- Index& idx) const
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_index(const EntryComparator& cmp, Index& idx) const
{
if constexpr (has_hash_dictionary) {
auto find_result = this->_hash_dict.find(cmp, EntryRef());
@@ -118,8 +114,7 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_index(const vespali
template <typename BTreeDictionaryT, typename HashDictionaryT>
bool
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_frozen_index(const vespalib::datastore::EntryComparator& cmp,
- Index& idx) const
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_frozen_index(const EntryComparator& cmp, Index& idx) const
{
if constexpr (has_hash_dictionary) {
auto find_result = this->_hash_dict.find(cmp, EntryRef());
@@ -140,7 +135,7 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_frozen_index(const
template <typename BTreeDictionaryT, typename HashDictionaryT>
std::vector<IEnumStore::EnumHandle>
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_matching_enums(const vespalib::datastore::EntryComparator& cmp) const
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_matching_enums(const EntryComparator& cmp) const
{
std::vector<IEnumStore::EnumHandle> result;
if constexpr (has_btree_dictionary) {
@@ -171,14 +166,14 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::get_frozen_root() const
template <>
std::pair<IEnumStore::Index, EntryRef>
-EnumStoreDictionary<EnumTree>::find_posting_list(const vespalib::datastore::EntryComparator&, EntryRef) const
+EnumStoreDictionary<EnumTree>::find_posting_list(const EntryComparator&, EntryRef) const
{
LOG_ABORT("should not be reached");
}
template <typename BTreeDictionaryT, typename HashDictionaryT>
std::pair<IEnumStore::Index, EntryRef>
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_posting_list(const vespalib::datastore::EntryComparator& cmp, EntryRef root) const
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_posting_list(const EntryComparator& cmp, EntryRef root) const
{
if constexpr (has_hash_dictionary) {
(void) root;
@@ -199,7 +194,7 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::find_posting_list(const
template <typename BTreeDictionaryT, typename HashDictionaryT>
void
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::collect_folded(Index idx, EntryRef, const std::function<void(vespalib::datastore::EntryRef)>& callback) const
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::collect_folded(Index idx, EntryRef, const std::function<void(EntryRef)>& callback) const
{
callback(idx);
}
@@ -244,14 +239,14 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::clear_all_posting_lists(
template <>
void
-EnumStoreDictionary<EnumTree>::update_posting_list(Index, const vespalib::datastore::EntryComparator&, std::function<EntryRef(EntryRef)>)
+EnumStoreDictionary<EnumTree>::update_posting_list(Index, const EntryComparator&, std::function<EntryRef(EntryRef)>)
{
LOG_ABORT("should not be reached");
}
template <typename BTreeDictionaryT, typename HashDictionaryT>
void
-EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::update_posting_list(Index idx, const vespalib::datastore::EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater)
+EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::update_posting_list(Index idx, const EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater)
{
if constexpr (has_btree_dictionary) {
auto& dict = this->_btree_dict;
@@ -336,7 +331,7 @@ EnumStoreDictionary<BTreeDictionaryT, HashDictionaryT>::get_posting_dictionary()
return this->_btree_dict;
}
-EnumStoreFoldedDictionary::EnumStoreFoldedDictionary(IEnumStore& enumStore, std::unique_ptr<vespalib::datastore::EntryComparator> compare, std::unique_ptr<EntryComparator> folded_compare)
+EnumStoreFoldedDictionary::EnumStoreFoldedDictionary(IEnumStore& enumStore, std::unique_ptr<EntryComparator> compare, std::unique_ptr<EntryComparator> folded_compare)
: EnumStoreDictionary<EnumPostingTree>(enumStore, std::move(compare)),
_folded_compare(std::move(folded_compare))
{
@@ -389,7 +384,7 @@ EnumStoreFoldedDictionary::remove(const EntryComparator& comp, EntryRef ref)
}
void
-EnumStoreFoldedDictionary::collect_folded(Index idx, EntryRef root, const std::function<void(vespalib::datastore::EntryRef)>& callback) const
+EnumStoreFoldedDictionary::collect_folded(Index idx, EntryRef root, const std::function<void(EntryRef)>& callback) const
{
BTreeDictionaryType::ConstIterator itr(vespalib::btree::BTreeNode::Ref(), _btree_dict.getAllocator());
itr.lower_bound(root, idx, *_folded_compare);
@@ -421,6 +416,7 @@ namespace vespalib::btree {
using search::IEnumStore;
using search::EnumTreeTraits;
+using datastore::EntryComparatorWrapper;
template
class BTreeNodeT<IEnumStore::Index, EnumTreeTraits::INTERNAL_SLOTS>;
@@ -456,19 +452,19 @@ class BTreeNodeStore<IEnumStore::Index, uint32_t, NoAggregated,
template
class BTreeRoot<IEnumStore::Index, BTreeNoLeafData, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTreeRoot<IEnumStore::Index, uint32_t, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTreeRootT<IEnumStore::Index, BTreeNoLeafData, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTreeRootT<IEnumStore::Index, uint32_t, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTreeRootBase<IEnumStore::Index, BTreeNoLeafData, NoAggregated,
@@ -494,23 +490,23 @@ class BTreeIteratorBase<IEnumStore::Index, uint32_t, NoAggregated,
EnumTreeTraits::INTERNAL_SLOTS, EnumTreeTraits::LEAF_SLOTS, EnumTreeTraits::PATH_SIZE>;
template class BTreeConstIterator<IEnumStore::Index, BTreeNoLeafData, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template class BTreeConstIterator<IEnumStore::Index, uint32_t, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTreeIterator<IEnumStore::Index, BTreeNoLeafData, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTreeIterator<IEnumStore::Index, uint32_t, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTree<IEnumStore::Index, BTreeNoLeafData, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
template
class BTree<IEnumStore::Index, uint32_t, NoAggregated,
- const vespalib::datastore::EntryComparatorWrapper, EnumTreeTraits>;
+ const EntryComparatorWrapper, EnumTreeTraits>;
}
diff --git a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h
index a39ff524618..3626fb098d2 100644
--- a/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h
+++ b/searchlib/src/vespa/searchlib/attribute/enum_store_dictionary.h
@@ -18,9 +18,10 @@ protected:
using EntryRef = IEnumStoreDictionary::EntryRef;
using Index = IEnumStoreDictionary::Index;
using BTreeDictionaryType = BTreeDictionaryT;
+ using EntryComparator = IEnumStoreDictionary::EntryComparator;
private:
using EnumVector = IEnumStoreDictionary::EnumVector;
- using IndexSet = IEnumStoreDictionary::IndexSet;
+ using IndexList = IEnumStoreDictionary::IndexList;
using IndexVector = IEnumStoreDictionary::IndexVector;
using ParentUniqueStoreDictionary = vespalib::datastore::UniqueStoreDictionary<BTreeDictionaryT, IEnumStoreDictionary, HashDictionaryT>;
using generation_t = IEnumStoreDictionary::generation_t;
@@ -30,31 +31,28 @@ protected:
private:
IEnumStore& _enumStore;
- void remove_unused_values(const IndexSet& unused,
- const vespalib::datastore::EntryComparator& cmp);
+ void remove_unused_values(const IndexList& unused, const EntryComparator& cmp);
public:
- EnumStoreDictionary(IEnumStore& enumStore, std::unique_ptr<vespalib::datastore::EntryComparator> compare);
+ EnumStoreDictionary(IEnumStore& enumStore, std::unique_ptr<EntryComparator> compare);
~EnumStoreDictionary() override;
- void free_unused_values(const vespalib::datastore::EntryComparator& cmp) override;
+ void free_unused_values(const EntryComparator& cmp) override;
+ void free_unused_values(const IndexList& to_remove, const EntryComparator& cmp) override;
- void free_unused_values(const IndexSet& to_remove,
- const vespalib::datastore::EntryComparator& cmp) override;
-
- void remove(const vespalib::datastore::EntryComparator& comp, vespalib::datastore::EntryRef ref) override;
- bool find_index(const vespalib::datastore::EntryComparator& cmp, Index& idx) const override;
- bool find_frozen_index(const vespalib::datastore::EntryComparator& cmp, Index& idx) const override;
+ void remove(const EntryComparator& comp, EntryRef ref) override;
+ bool find_index(const EntryComparator& cmp, Index& idx) const override;
+ bool find_frozen_index(const EntryComparator& cmp, Index& idx) const override;
std::vector<attribute::IAttributeVector::EnumHandle>
- find_matching_enums(const vespalib::datastore::EntryComparator& cmp) const override;
+ find_matching_enums(const EntryComparator& cmp) const override;
EntryRef get_frozen_root() const override;
- std::pair<Index, EntryRef> find_posting_list(const vespalib::datastore::EntryComparator& cmp, EntryRef root) const override;
- void collect_folded(Index idx, EntryRef root, const std::function<void(vespalib::datastore::EntryRef)>& callback) const override;
+ std::pair<Index, EntryRef> find_posting_list(const EntryComparator& cmp, EntryRef root) const override;
+ void collect_folded(Index idx, EntryRef root, const std::function<void(EntryRef)>& callback) const override;
Index remap_index(Index idx) override;
void clear_all_posting_lists(std::function<void(EntryRef)> clearer) override;
- void update_posting_list(Index idx, const vespalib::datastore::EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater) override;
+ void update_posting_list(Index idx, const EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater) override;
bool normalize_posting_lists(std::function<EntryRef(EntryRef)> normalize) override;
const EnumPostingTree& get_posting_dictionary() const override;
};
@@ -71,14 +69,14 @@ public:
class EnumStoreFoldedDictionary : public EnumStoreDictionary<EnumPostingTree>
{
private:
- std::unique_ptr<vespalib::datastore::EntryComparator> _folded_compare;
+ std::unique_ptr<EntryComparator> _folded_compare;
public:
- EnumStoreFoldedDictionary(IEnumStore& enumStore, std::unique_ptr<vespalib::datastore::EntryComparator> compare, std::unique_ptr<vespalib::datastore::EntryComparator> folded_compare);
+ EnumStoreFoldedDictionary(IEnumStore& enumStore, std::unique_ptr<EntryComparator> compare, std::unique_ptr<EntryComparator> folded_compare);
~EnumStoreFoldedDictionary() override;
- vespalib::datastore::UniqueStoreAddResult add(const vespalib::datastore::EntryComparator& comp, std::function<vespalib::datastore::EntryRef(void)> insertEntry) override;
- void remove(const vespalib::datastore::EntryComparator& comp, vespalib::datastore::EntryRef ref) override;
- void collect_folded(Index idx, EntryRef root, const std::function<void(vespalib::datastore::EntryRef)>& callback) const override;
+ vespalib::datastore::UniqueStoreAddResult add(const EntryComparator& comp, std::function<EntryRef(void)> insertEntry) override;
+ void remove(const EntryComparator& comp, EntryRef ref) override;
+ void collect_folded(Index idx, EntryRef root, const std::function<void(EntryRef)>& callback) const override;
Index remap_index(Index idx) override;
};
diff --git a/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp
index fd576b3a9ba..164bb411061 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/enumattribute.hpp
@@ -63,7 +63,7 @@ void
EnumAttribute<B>::insertNewUniqueValues(EnumStoreBatchUpdater& updater)
{
// find and insert new unique strings
- for (const auto & data : this->_changes) {
+ for (const auto & data : this->_changes.getInsertOrder()) {
considerAttributeChange(data, updater);
}
}
diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.h b/searchlib/src/vespa/searchlib/attribute/enumstore.h
index 326e0916039..59d77ea0558 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumstore.h
+++ b/searchlib/src/vespa/searchlib/attribute/enumstore.h
@@ -63,7 +63,7 @@ private:
EnumStoreT(const EnumStoreT & rhs) = delete;
EnumStoreT & operator=(const EnumStoreT & rhs) = delete;
- void free_value_if_unused(Index idx, IndexSet &unused) override;
+ void free_value_if_unused(Index idx, IndexList &unused) override;
const vespalib::datastore::UniqueStoreEntryBase& get_entry_base(Index idx) const {
return _store.get_allocator().get_wrapped(idx);
@@ -153,7 +153,7 @@ public:
class BatchUpdater {
private:
EnumStoreType& _store;
- IndexSet _possibly_unused;
+ IndexList _possibly_unused;
public:
BatchUpdater(EnumStoreType& store)
@@ -168,11 +168,11 @@ public:
auto& entry = _store.get_entry_base(idx);
entry.dec_ref_count();
if (entry.get_ref_count() == 0) {
- _possibly_unused.insert(idx);
+ _possibly_unused.push_back(idx);
}
}
void commit() {
- _store.free_unused_values(_possibly_unused);
+ _store.free_unused_values(std::move(_possibly_unused));
}
};
@@ -198,7 +198,7 @@ public:
Index insert(EntryType value);
bool find_index(EntryType value, Index& idx) const;
void free_unused_values() override;
- void free_unused_values(const IndexSet& to_remove);
+ void free_unused_values(IndexList to_remove);
vespalib::MemoryUsage update_stat() override;
std::unique_ptr<EnumIndexRemapper> consider_compact_values(const CompactionStrategy& compaction_strategy) override;
std::unique_ptr<EnumIndexRemapper> compact_worst_values(bool compact_memory, bool compact_address_space) override;
diff --git a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp
index 90bcf92a103..771da8ffa01 100644
--- a/searchlib/src/vespa/searchlib/attribute/enumstore.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/enumstore.hpp
@@ -30,11 +30,11 @@ make_enum_store_dictionary(IEnumStore &store, bool has_postings, const search::D
std::unique_ptr<EntryComparator> folded_compare);
template <typename EntryT>
-void EnumStoreT<EntryT>::free_value_if_unused(Index idx, IndexSet& unused)
+void EnumStoreT<EntryT>::free_value_if_unused(Index idx, IndexList& unused)
{
const auto& entry = get_entry_base(idx);
if (entry.get_ref_count() == 0) {
- unused.insert(idx);
+ unused.push_back(idx);
_store.get_allocator().hold(idx);
}
}
@@ -140,7 +140,7 @@ EnumStoreT<EntryT>::BatchUpdater::insert(EntryType value)
auto cmp = _store.make_comparator(value);
auto result = _store._dict->add(cmp, [this, &value]() -> EntryRef { return _store._store.get_allocator().allocate(value); });
if (result.inserted()) {
- _possibly_unused.insert(result.ref());
+ _possibly_unused.push_back(result.ref());
}
return result.ref();
}
@@ -191,8 +191,16 @@ EnumStoreT<EntryT>::free_unused_values()
template <typename EntryT>
void
-EnumStoreT<EntryT>::free_unused_values(const IndexSet& to_remove)
+EnumStoreT<EntryT>::free_unused_values(IndexList to_remove)
{
+ struct CompareEnumIndex {
+ using Index = IEnumStore::Index;
+
+ bool operator()(const Index &lhs, const Index &rhs) const {
+ return lhs.ref() < rhs.ref();
+ }
+ };
+ std::sort(to_remove.begin(), to_remove.end(), CompareEnumIndex());
_dict->free_unused_values(to_remove, get_comparator());
}
@@ -207,7 +215,7 @@ template <typename EntryT>
vespalib::MemoryUsage
EnumStoreT<EntryT>::update_stat()
{
- auto &store = _store.get_allocator().get_data_store();
+ auto &store = _store.get_data_store();
_cached_values_memory_usage = store.getMemoryUsage();
_cached_values_address_space_usage = store.getAddressSpaceUsage();
_cached_dictionary_btree_usage = _dict->get_btree_memory_usage();
diff --git a/searchlib/src/vespa/searchlib/attribute/i_enum_store.h b/searchlib/src/vespa/searchlib/attribute/i_enum_store.h
index 6d714ec25ba..716609764f4 100644
--- a/searchlib/src/vespa/searchlib/attribute/i_enum_store.h
+++ b/searchlib/src/vespa/searchlib/attribute/i_enum_store.h
@@ -40,22 +40,14 @@ public:
using EnumIndexRemapper = vespalib::datastore::UniqueStoreRemapper<InternalIndex>;
using Enumerator = vespalib::datastore::UniqueStoreEnumerator<IEnumStore::InternalIndex>;
- struct CompareEnumIndex {
- using Index = IEnumStore::Index;
-
- bool operator()(const Index &lhs, const Index &rhs) const {
- return lhs.ref() < rhs.ref();
- }
- };
-
- using IndexSet = std::set<Index, CompareEnumIndex>;
+ using IndexList = std::vector<Index>;
virtual ~IEnumStore() = default;
virtual void write_value(BufferWriter& writer, Index idx) const = 0;
virtual ssize_t load_unique_values(const void* src, size_t available, IndexVector& idx) = 0;
virtual void set_ref_count(Index idx, uint32_t ref_count) = 0;
- virtual void free_value_if_unused(Index idx, IndexSet& unused) = 0;
+ virtual void free_value_if_unused(Index idx, IndexList& unused) = 0;
virtual void free_unused_values() = 0;
virtual bool is_folded_change(Index idx1, Index idx2) const = 0;
virtual IEnumStoreDictionary& get_dictionary() = 0;
diff --git a/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h b/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h
index f816177b06c..bef7384b0b7 100644
--- a/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h
+++ b/searchlib/src/vespa/searchlib/attribute/i_enum_store_dictionary.h
@@ -29,29 +29,29 @@ using EnumPostingTree = vespalib::btree::BTree<IEnumStore::Index, uint32_t,
class IEnumStoreDictionary : public vespalib::datastore::IUniqueStoreDictionary {
public:
using EntryRef = vespalib::datastore::EntryRef;
+ using EntryComparator = vespalib::datastore::EntryComparator;
using EnumVector = IEnumStore::EnumVector;
using Index = IEnumStore::Index;
- using IndexSet = IEnumStore::IndexSet;
+ using IndexList = IEnumStore::IndexList;
using IndexVector = IEnumStore::IndexVector;
using generation_t = vespalib::GenerationHandler::generation_t;
public:
virtual ~IEnumStoreDictionary() = default;
- virtual void free_unused_values(const vespalib::datastore::EntryComparator& cmp) = 0;
- virtual void free_unused_values(const IndexSet& to_remove,
- const vespalib::datastore::EntryComparator& cmp) = 0;
- virtual bool find_index(const vespalib::datastore::EntryComparator& cmp, Index& idx) const = 0;
- virtual bool find_frozen_index(const vespalib::datastore::EntryComparator& cmp, Index& idx) const = 0;
+ virtual void free_unused_values(const EntryComparator& cmp) = 0;
+ virtual void free_unused_values(const IndexList& to_remove, const EntryComparator& cmp) = 0;
+ virtual bool find_index(const EntryComparator& cmp, Index& idx) const = 0;
+ virtual bool find_frozen_index(const EntryComparator& cmp, Index& idx) const = 0;
virtual std::vector<attribute::IAttributeVector::EnumHandle>
- find_matching_enums(const vespalib::datastore::EntryComparator& cmp) const = 0;
+ find_matching_enums(const EntryComparator& cmp) const = 0;
virtual EntryRef get_frozen_root() const = 0;
- virtual std::pair<Index, EntryRef> find_posting_list(const vespalib::datastore::EntryComparator& cmp, EntryRef root) const = 0;
- virtual void collect_folded(Index idx, EntryRef root, const std::function<void(vespalib::datastore::EntryRef)>& callback) const = 0;
+ virtual std::pair<Index, EntryRef> find_posting_list(const EntryComparator& cmp, EntryRef root) const = 0;
+ virtual void collect_folded(Index idx, EntryRef root, const std::function<void(EntryRef)>& callback) const = 0;
virtual Index remap_index(Index idx) = 0;
virtual void clear_all_posting_lists(std::function<void(EntryRef)> clearer) = 0;
- virtual void update_posting_list(Index idx, const vespalib::datastore::EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater) = 0;
+ virtual void update_posting_list(Index idx, const EntryComparator& cmp, std::function<EntryRef(EntryRef)> updater) = 0;
virtual bool normalize_posting_lists(std::function<EntryRef(EntryRef)> normalize) = 0;
virtual const EnumPostingTree& get_posting_dictionary() const = 0;
};
diff --git a/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp
index 8475451ba60..072398abcf9 100644
--- a/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/multienumattribute.hpp
@@ -25,7 +25,7 @@ template <typename B, typename M>
bool
MultiValueEnumAttribute<B, M>::extractChangeData(const Change & c, EnumIndex & idx)
{
- if (c._enumScratchPad == Change::UNSET_ENUM) {
+ if ( ! c.isEnumValid() ) {
return this->_enumStore.find_index(c._data.raw(), idx);
}
idx = EnumIndex(vespalib::datastore::EntryRef(c._enumScratchPad));
diff --git a/searchlib/src/vespa/searchlib/attribute/multivalueattribute.h b/searchlib/src/vespa/searchlib/attribute/multivalueattribute.h
index 66ca6bd2eac..d36777a25a9 100644
--- a/searchlib/src/vespa/searchlib/attribute/multivalueattribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/multivalueattribute.h
@@ -20,7 +20,6 @@ protected:
typedef typename B::DocId DocId;
typedef typename B::Change Change;
typedef typename B::ChangeVector ChangeVector;
- typedef typename B::ChangeVector::const_iterator ChangeVectorIterator;
using MultiValueType = M;
using MultiValueMapping = attribute::MultiValueMapping<MultiValueType>;
diff --git a/searchlib/src/vespa/searchlib/attribute/multivalueattribute.hpp b/searchlib/src/vespa/searchlib/attribute/multivalueattribute.hpp
index 0cd2e0bbc27..2e73909ea1e 100644
--- a/searchlib/src/vespa/searchlib/attribute/multivalueattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/multivalueattribute.hpp
@@ -78,11 +78,12 @@ void
MultiValueAttribute<B, M>::apply_attribute_changes_to_array(DocumentValues& docValues)
{
// compute new values for each document with changes
- for (ChangeVectorIterator current(this->_changes.begin()), end(this->_changes.end()); (current != end); ) {
+ auto iterable = this->_changes.getDocIdInsertOrder();
+ for (auto current(iterable.begin()), end(iterable.end()); (current != end); ) {
DocId doc = current->_doc;
// find last clear doc
- ChangeVectorIterator last_clear_doc = end;
- for (ChangeVectorIterator iter = current; (iter != end) && (iter->_doc == doc); ++iter) {
+ auto last_clear_doc = end;
+ for (auto iter = current; (iter != end) && (iter->_doc == doc); ++iter) {
if (iter->_type == ChangeBase::CLEARDOC) {
last_clear_doc = iter;
}
@@ -137,12 +138,13 @@ void
MultiValueAttribute<B, M>::apply_attribute_changes_to_wset(DocumentValues& docValues)
{
// compute new values for each document with changes
- for (ChangeVectorIterator current(this->_changes.begin()), end(this->_changes.end()); (current != end); ) {
+ auto iterable = this->_changes.getDocIdInsertOrder();
+ for (auto current(iterable.begin()), end(iterable.end()); (current != end); ) {
const DocId doc = current->_doc;
// find last clear doc
- ChangeVectorIterator last_clear_doc = end;
+ auto last_clear_doc = end;
size_t max_elems_inserted = 0;
- for (ChangeVectorIterator iter = current; (iter != end) && (iter->_doc == doc); ++iter) {
+ for (auto iter = current; (iter != end) && (iter->_doc == doc); ++iter) {
if (iter->_type == ChangeBase::CLEARDOC) {
last_clear_doc = iter;
}
diff --git a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
index 6c62e650345..477917debf0 100644
--- a/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/postingstore.cpp
@@ -696,7 +696,10 @@ PostingStore<DataT>::move(EntryRef ref)
if (!_store.getCompacting(ref)) {
return ref;
}
- return allocBitVectorCopy(*bve).ref;
+ auto new_ref = allocBitVectorCopy(*bve).ref;
+ _bvs.erase(ref.ref());
+ _bvs.insert(new_ref.ref());
+ return new_ref;
} else {
if (!_store.getCompacting(ref)) {
return ref;
diff --git a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp
index 7913e617d70..555117126a9 100644
--- a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.cpp
@@ -26,7 +26,8 @@ constexpr uint8_t MAX_MIN_FEATURE = 255;
constexpr uint16_t MAX_INTERVAL_RANGE = static_cast<uint16_t>(predicate::MAX_INTERVAL);
-int64_t adjustBound(int32_t arity, int64_t bound) {
+int64_t
+adjustBound(int32_t arity, int64_t bound) {
int64_t adjusted = arity;
int64_t value = bound;
int64_t max = LLONG_MAX / arity;
@@ -39,7 +40,8 @@ int64_t adjustBound(int32_t arity, int64_t bound) {
return adjusted - 1;
}
-int64_t adjustLowerBound(int32_t arity, int64_t lower_bound) {
+int64_t
+adjustLowerBound(int32_t arity, int64_t lower_bound) {
if (lower_bound == LLONG_MIN) {
return lower_bound;
} else if (lower_bound > 0) {
@@ -49,7 +51,8 @@ int64_t adjustLowerBound(int32_t arity, int64_t lower_bound) {
}
}
-int64_t adjustUpperBound(int32_t arity, int64_t upper_bound) {
+int64_t
+adjustUpperBound(int32_t arity, int64_t upper_bound) {
if (upper_bound == LLONG_MAX) {
return upper_bound;
} else if (upper_bound < 0) {
@@ -66,13 +69,11 @@ SimpleIndexConfig createSimpleIndexConfig(const search::attribute::Config &confi
} // namespace
-PredicateAttribute::PredicateAttribute(const vespalib::string &base_file_name,
- const Config &config)
+PredicateAttribute::PredicateAttribute(const vespalib::string &base_file_name, const Config &config)
: NotImplementedAttribute(base_file_name, config),
- _base_file_name(base_file_name),
_limit_provider(*this),
- _index(new PredicateIndex(getGenerationHandler(), getGenerationHolder(),
- _limit_provider, createSimpleIndexConfig(config), config.predicateParams().arity())),
+ _index(std::make_unique<PredicateIndex>(getGenerationHolder(), _limit_provider,
+ createSimpleIndexConfig(config), config.predicateParams().arity())),
_lower_bound(adjustLowerBound(config.predicateParams().arity(), config.predicateParams().lower_bound())),
_upper_bound(adjustUpperBound(config.predicateParams().arity(), config.predicateParams().upper_bound())),
_min_feature(config.getGrowStrategy().to_generic_strategy(), getGenerationHolder()),
@@ -99,8 +100,8 @@ PredicateAttribute::getValueCount(DocId) const
void
PredicateAttribute::onCommit()
{
- populateIfNeeded();
_index->commit();
+ populateIfNeeded();
incGeneration();
}
@@ -183,7 +184,8 @@ struct DummyObserver : SimpleIndexDeserializeObserver<> {
}
-bool PredicateAttribute::onLoad()
+bool
+PredicateAttribute::onLoad()
{
auto loaded_buffer = attribute::LoadUtils::loadDAT(*this);
char *rawBuffer = const_cast<char *>(static_cast<const char *>(loaded_buffer->buffer()));
@@ -202,12 +204,12 @@ bool PredicateAttribute::onLoad()
DocId highest_doc_id;
if (version == 0) {
DocIdLimitFinderAndMinFeatureFiller<MinFeatureVector> observer(_min_feature, *_index);
- _index = std::make_unique<PredicateIndex>(getGenerationHandler(), getGenerationHolder(), _limit_provider,
+ _index = std::make_unique<PredicateIndex>(getGenerationHolder(), _limit_provider,
createSimpleIndexConfig(getConfig()), buffer, observer, 0);
highest_doc_id = observer._highest_doc_id;
} else {
DummyObserver observer;
- _index = std::make_unique<PredicateIndex>(getGenerationHandler(), getGenerationHolder(), _limit_provider,
+ _index = std::make_unique<PredicateIndex>(getGenerationHolder(), _limit_provider,
createSimpleIndexConfig(getConfig()), buffer, observer, version);
highest_doc_id = buffer.readInt32();
// Deserialize min feature vector
@@ -240,6 +242,7 @@ PredicateAttribute::addDoc(DocId &doc_id)
_min_feature.ensure_size(doc_id + 1);
return true;
}
+
uint32_t
PredicateAttribute::clearDoc(DocId doc_id)
{
diff --git a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.h b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.h
index 6e3f0c4399f..4d7fd3c235b 100644
--- a/searchlib/src/vespa/searchlib/attribute/predicate_attribute.h
+++ b/searchlib/src/vespa/searchlib/attribute/predicate_attribute.h
@@ -80,7 +80,6 @@ public:
void populateIfNeeded();
private:
- vespalib::string _base_file_name;
const AttributeVectorDocIdLimitProvider _limit_provider;
std::unique_ptr<predicate::PredicateIndex> _index;
int64_t _lower_bound;
diff --git a/searchlib/src/vespa/searchlib/attribute/singleboolattribute.cpp b/searchlib/src/vespa/searchlib/attribute/singleboolattribute.cpp
index 2b40150f87b..6c8edea13cf 100644
--- a/searchlib/src/vespa/searchlib/attribute/singleboolattribute.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/singleboolattribute.cpp
@@ -59,7 +59,7 @@ SingleBoolAttribute::onCommit() {
if ( ! _changes.empty()) {
// apply updates
ValueModifier valueGuard(getValueModifier());
- for (const auto & change : _changes) {
+ for (const auto & change : _changes.getInsertOrder()) {
if (change._type == ChangeBase::UPDATE) {
std::atomic_thread_fence(std::memory_order_release);
setBit(change._doc, change._data != 0);
diff --git a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp
index b39bdeb3b00..a9a94afb763 100644
--- a/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/singleenumattribute.hpp
@@ -175,7 +175,7 @@ void
SingleValueEnumAttribute<B>::applyValueChanges(EnumStoreBatchUpdater& updater)
{
ValueModifier valueGuard(this->getValueModifier());
- for (const auto& change : this->_changes) {
+ for (const auto& change : this->_changes.getInsertOrder()) {
if (change._type == ChangeBase::UPDATE) {
applyUpdateValueChange(change, updater);
} else if (change._type >= ChangeBase::ADD && change._type <= ChangeBase::DIV) {
@@ -312,9 +312,9 @@ SingleValueEnumAttribute<B>::onShrinkLidSpace()
uint32_t default_value_ref_count = this->_enumStore.get_ref_count(default_value_ref);
assert(default_value_ref_count >= shrink_docs);
this->_enumStore.set_ref_count(default_value_ref, default_value_ref_count - shrink_docs);
- IEnumStore::IndexSet possibly_unused;
- possibly_unused.insert(default_value_ref);
- this->_enumStore.free_unused_values(possibly_unused);
+ IEnumStore::IndexList possibly_unused;
+ possibly_unused.push_back(default_value_ref);
+ this->_enumStore.free_unused_values(std::move(possibly_unused));
}
_enumIndices.shrink(committedDocIdLimit);
this->setNumDocs(committedDocIdLimit);
diff --git a/searchlib/src/vespa/searchlib/attribute/singlenumericattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singlenumericattribute.hpp
index fd913f34c3a..671bdc44e22 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlenumericattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/singlenumericattribute.hpp
@@ -37,7 +37,7 @@ SingleValueNumericAttribute<B>::onCommit()
{
// apply updates
typename B::ValueModifier valueGuard(this->getValueModifier());
- for (const auto & change : this->_changes) {
+ for (const auto & change : this->_changes.getInsertOrder()) {
if (change._type == ChangeBase::UPDATE) {
std::atomic_thread_fence(std::memory_order_release);
_data[change._doc] = change._data;
diff --git a/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp
index f5ab855565c..e1c2a817af7 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/singlenumericpostattribute.hpp
@@ -84,7 +84,7 @@ SingleValueNumericPostingAttribute<B>::applyValueChanges(EnumStoreBatchUpdater&
// used to make sure several arithmetic operations on the same document in a single commit works
std::map<DocId, EnumIndex> currEnumIndices;
- for (const auto& change : this->_changes) {
+ for (const auto& change : this->_changes.getInsertOrder()) {
auto enumIter = currEnumIndices.find(change._doc);
EnumIndex oldIdx;
if (enumIter != currEnumIndices.end()) {
diff --git a/searchlib/src/vespa/searchlib/attribute/singlesmallnumericattribute.cpp b/searchlib/src/vespa/searchlib/attribute/singlesmallnumericattribute.cpp
index f1d0da42165..8d460b5c661 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlesmallnumericattribute.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/singlesmallnumericattribute.cpp
@@ -53,7 +53,7 @@ SingleValueSmallNumericAttribute::onCommit()
{
// apply updates
B::ValueModifier valueGuard(getValueModifier());
- for (const auto & change : _changes) {
+ for (const auto & change : _changes.getInsertOrder()) {
if (change._type == ChangeBase::UPDATE) {
std::atomic_thread_fence(std::memory_order_release);
set(change._doc, change._data);
diff --git a/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp b/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp
index 39ad8d71021..4432acf2c55 100644
--- a/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/singlestringpostattribute.hpp
@@ -85,7 +85,7 @@ SingleValueStringPostingAttributeT<B>::applyValueChanges(EnumStoreBatchUpdater&
// used to make sure several arithmetic operations on the same document in a single commit works
std::map<DocId, EnumIndex> currEnumIndices;
- for (const auto& change : this->_changes) {
+ for (const auto& change : this->_changes.getInsertOrder()) {
auto enumIter = currEnumIndices.find(change._doc);
EnumIndex oldIdx;
if (enumIter != currEnumIndices.end()) {
diff --git a/searchlib/src/vespa/searchlib/common/bitvector.cpp b/searchlib/src/vespa/searchlib/common/bitvector.cpp
index 8953710e8c7..fa63b846e17 100644
--- a/searchlib/src/vespa/searchlib/common/bitvector.cpp
+++ b/searchlib/src/vespa/searchlib/common/bitvector.cpp
@@ -6,6 +6,7 @@
#include "partialbitvector.h"
#include <vespa/vespalib/hwaccelrated/iaccelrated.h>
#include <vespa/vespalib/util/exceptions.h>
+#include <vespa/vespalib/util/size_literals.h>
#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/fastos/file.h>
#include <cassert>
@@ -32,6 +33,9 @@ void verifyInclusiveStart(const search::BitVector & a, const search::BitVector &
}
}
+constexpr size_t MMAP_LIMIT = 32_Mi;
+constexpr size_t DIRECTIO_ALIGNMENT = 4_Ki;
+
}
/////////////////////////////////
@@ -49,7 +53,7 @@ BitVector::allocatePaddedAndAligned(Index start, Index end, Index capacity)
uint32_t words = numActiveWords(start, capacity);
words += (-words & 15); // Pad to 64 byte alignment
const size_t sz(words * sizeof(Word));
- Alloc alloc = Alloc::alloc(sz);
+ Alloc alloc = Alloc::alloc(sz, MMAP_LIMIT);
assert(alloc.size()/sizeof(Word) >= words);
// Clear padding
size_t usedBytes = numBytes(end - start);
@@ -337,7 +341,7 @@ BitVector::create(Index numberOfElements, FastOS_FileInterface &file,
size_t vectorsize = getFileBytes(numberOfElements);
file.DirectIOPadding(offset, vectorsize, padbefore, padafter);
assert((padbefore & (getAlignment() - 1)) == 0);
- AllocatedBitVector::Alloc alloc = Alloc::alloc(padbefore + vectorsize + padafter, 0x1000000, 0x1000);
+ AllocatedBitVector::Alloc alloc = Alloc::alloc(padbefore + vectorsize + padafter, MMAP_LIMIT, DIRECTIO_ALIGNMENT);
void * alignedBuffer = alloc.get();
file.ReadBuf(alignedBuffer, alloc.size(), offset - padbefore);
bv = std::make_unique<AllocatedBitVector>(numberOfElements, std::move(alloc), padbefore);
diff --git a/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp b/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
index 81f01de0c33..8cfab1f64cf 100644
--- a/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
+++ b/searchlib/src/vespa/searchlib/common/bitvectorcache.cpp
@@ -87,10 +87,10 @@ BitVectorCache::lookupCachedSet(const KeyAndCountSet & keys)
BitVectorCache::SortedKeyMeta
BitVectorCache::getSorted(Key2Index & keys)
{
- std::vector<std::pair<Key, KeyMeta *>> sorted;
+ SortedKeyMeta sorted;
sorted.reserve(keys.size());
for (auto & e : keys) {
- sorted.push_back({e.first, &e.second});
+ sorted.emplace_back(e.first, &e.second);
}
std::sort(sorted.begin(), sorted.end(),
[&] (const auto & a, const auto & b) {
diff --git a/searchlib/src/vespa/searchlib/common/bitvectorcache.h b/searchlib/src/vespa/searchlib/common/bitvectorcache.h
index c1415d9130f..a642d66f42f 100644
--- a/searchlib/src/vespa/searchlib/common/bitvectorcache.h
+++ b/searchlib/src/vespa/searchlib/common/bitvectorcache.h
@@ -3,6 +3,7 @@
#include "condensedbitvectors.h"
#include <vespa/vespalib/stllike/hash_set.h>
+#include <vespa/vespalib/stllike/hash_map.h>
#include <vespa/fastos/dynamiclibrary.h>
#include <mutex>
@@ -40,6 +41,7 @@ public:
void adjustDocIdLimit(uint32_t docId);
void populate(uint32_t count, const PopulateInterface &);
bool needPopulation() const { return _needPopulation; }
+ void requirePopulation() { _needPopulation = true; }
private:
class KeyMeta {
public:
@@ -76,12 +78,12 @@ private:
VESPA_DLL_LOCAL static void populate(Key2Index & newKeys, CondensedBitVector & chunk, const PopulateInterface & lookup);
VESPA_DLL_LOCAL bool hasCostChanged(const std::lock_guard<std::mutex> &);
- uint64_t _lookupCount;
- bool _needPopulation;
+ uint64_t _lookupCount;
+ bool _needPopulation;
mutable std::mutex _lock;
- Key2Index _keys;
- ChunkV _chunks;
- GenerationHolder &_genHolder;
+ Key2Index _keys;
+ ChunkV _chunks;
+ GenerationHolder &_genHolder;
};
}
diff --git a/searchlib/src/vespa/searchlib/common/condensedbitvectors.cpp b/searchlib/src/vespa/searchlib/common/condensedbitvectors.cpp
index d6efc4fddc2..50b971f499f 100644
--- a/searchlib/src/vespa/searchlib/common/condensedbitvectors.cpp
+++ b/searchlib/src/vespa/searchlib/common/condensedbitvectors.cpp
@@ -129,9 +129,7 @@ void throwIllegalKey(size_t numKeys, size_t key)
}
-CondensedBitVector::~CondensedBitVector()
-{
-}
+CondensedBitVector::~CondensedBitVector() = default;
void
CondensedBitVector::addKey(Key key) const
@@ -144,7 +142,7 @@ CondensedBitVector::addKey(Key key) const
CondensedBitVector::UP
CondensedBitVector::create(size_t size, GenerationHolder &genHolder)
{
- return UP(new CondensedBitVectorT<uint32_t>(size, genHolder));
+ return std::make_unique<CondensedBitVectorT<uint32_t>>(size, genHolder);
}
}
diff --git a/searchlib/src/vespa/searchlib/common/condensedbitvectors.h b/searchlib/src/vespa/searchlib/common/condensedbitvectors.h
index 4bda29894cc..02355a61e40 100644
--- a/searchlib/src/vespa/searchlib/common/condensedbitvectors.h
+++ b/searchlib/src/vespa/searchlib/common/condensedbitvectors.h
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vespa/vespalib/stllike/hash_map.h>
#include <vespa/vespalib/util/generationholder.h>
#include <vespa/vespalib/util/arrayref.h>
#include <set>
@@ -31,9 +30,6 @@ public:
bool hasKey(Key key) const { return key < getKeyCapacity(); }
void addKey(Key key) const;
static CondensedBitVector::UP create(size_t size, vespalib::GenerationHolder &genHolder);
-private:
- typedef vespalib::hash_map<Key, uint32_t> Key2Index;
- Key2Index _keys;
};
}
diff --git a/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp b/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp
index 837c38eb340..25bc754a86f 100644
--- a/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp
+++ b/searchlib/src/vespa/searchlib/common/indexmetainfo.cpp
@@ -5,7 +5,6 @@
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/util/guard.h>
#include <cassert>
-#include <algorithm>
#include <vespa/log/log.h>
LOG_SETUP(".indexmetainfo");
@@ -14,13 +13,13 @@ namespace {
class Parser {
private:
- vespalib::string _name;
+ vespalib::string _name;
vespalib::FilePointer _file;
uint32_t _line;
char _buf[2048];
bool _error;
- vespalib::string _lastKey;
- vespalib::string _lastValue;
+ vespalib::string _lastKey;
+ vespalib::string _lastValue;
uint32_t _lastIdx;
bool _matched;
@@ -44,8 +43,7 @@ public:
return false;
}
bool illegalLine() {
- LOG(warning, "%s:%d: illegal line: %s",
- _name.c_str(), _line, _buf);
+ LOG(warning, "%s:%d: illegal line: %s", _name.c_str(), _line, _buf);
_error = true;
return false;
}
@@ -57,8 +55,7 @@ public:
}
bool illegalValue() {
LOG(warning, "%s:%d: illegal value for '%s': %s",
- _name.c_str(), _line, _lastKey.c_str(),
- _lastValue.c_str());
+ _name.c_str(), _line, _lastKey.c_str(), _lastValue.c_str());
_error = true;
return false;
}
@@ -79,7 +76,7 @@ public:
if (!_file.valid()) {
return openFailed();
}
- if (fgets(_buf, sizeof(_buf), _file) == NULL) {
+ if (fgets(_buf, sizeof(_buf), _file) == nullptr) {
return false; // EOF
}
++_line;
@@ -88,7 +85,7 @@ public:
_buf[--len] = '\0';
}
char *split = strchr(_buf, '=');
- if (split == NULL || (split - _buf) == 0) {
+ if (split == nullptr || (split - _buf) == 0) {
return illegalLine();
}
_lastKey = vespalib::string(_buf, split - _buf);
@@ -119,9 +116,9 @@ public:
void parseInt64(const vespalib::string &k, uint64_t &v) {
if (!_matched && !_error && _lastKey == k) {
_matched = true;
- char *end = NULL;
+ char *end = nullptr;
uint64_t val = strtoull(_lastValue.c_str(), &end, 10);
- if (end == NULL || *end != '\0' ||
+ if (end == nullptr || *end != '\0' ||
val == static_cast<uint64_t>(-1)) {
illegalValue();
return;
@@ -141,10 +138,10 @@ public:
if (dot2 == vespalib::string::npos) {
return illegalArrayKey();
}
- char *end = NULL;
+ char *end = nullptr;
const char *pt = _lastKey.c_str() + name.length() + 1;
uint32_t val = strtoul(pt, &end, 10);
- if (end == NULL || end == pt || *end != '.'
+ if (end == nullptr || end == pt || *end != '.'
|| val > size || size > val + 1)
{
return illegalArrayKey();
@@ -200,7 +197,7 @@ IndexMetaInfo::IndexMetaInfo(const vespalib::string &path)
{
}
-IndexMetaInfo::~IndexMetaInfo() {}
+IndexMetaInfo::~IndexMetaInfo() = default;
IndexMetaInfo::Snapshot
IndexMetaInfo::getBestSnapshot() const
@@ -209,11 +206,7 @@ IndexMetaInfo::getBestSnapshot() const
while (idx >= 0 && !_snapshots[idx].valid) {
--idx;
}
- if (idx >= 0) {
- return _snapshots[idx];
- } else {
- return Snapshot();
- }
+ return (idx >= 0) ? _snapshots[idx] : Snapshot();
}
@@ -233,7 +226,7 @@ bool
IndexMetaInfo::addSnapshot(const Snapshot &snap)
{
if (snap.dirName.empty()
- || findSnapshot(snap.syncToken) != _snapshots.end())
+ || (findSnapshot(snap.syncToken) != _snapshots.end()))
{
return false;
}
@@ -324,32 +317,23 @@ IndexMetaInfo::save(const vespalib::string &baseName)
fprintf(f, "snapshot.%d.dirName=%s\n", i, snap.dirName.c_str());
}
if (ferror(f) != 0) {
- LOG(error,
- "Could not write to file %s",
- newName.c_str());
+ LOG(error, "Could not write to file %s", newName.c_str());
return false;
}
if (fflush(f) != 0) {
- LOG(error,
- "Could not flush file %s",
- newName.c_str());
+ LOG(error, "Could not flush file %s", newName.c_str());
return false;
}
if (fsync(fileno(f)) != 0) {
- LOG(error,
- "Could not fsync file %s",
- newName.c_str());
+ LOG(error, "Could not fsync file %s", newName.c_str());
return false;
}
if (fclose(f.release()) != 0) {
- LOG(error,
- "Could not close file %s",
- newName.c_str());
+ LOG(error, "Could not close file %s", newName.c_str());
return false;
}
if (rename(newName.c_str(), fileName.c_str()) != 0) {
- LOG(warning, "could not rename: %s->%s",
- newName.c_str(), fileName.c_str());
+ LOG(warning, "could not rename: %s->%s", newName.c_str(), fileName.c_str());
return false;
}
vespalib::File::sync(vespalib::dirname(fileName));
diff --git a/searchlib/src/vespa/searchlib/common/partialbitvector.h b/searchlib/src/vespa/searchlib/common/partialbitvector.h
index f1d0716ed60..0c44378e78a 100644
--- a/searchlib/src/vespa/searchlib/common/partialbitvector.h
+++ b/searchlib/src/vespa/searchlib/common/partialbitvector.h
@@ -25,7 +25,7 @@ public:
PartialBitVector(Index start, Index end);
PartialBitVector(const BitVector & org, Index start, Index end);
- virtual ~PartialBitVector();
+ ~PartialBitVector() override;
private:
vespalib::alloc::Alloc _alloc;
diff --git a/searchlib/src/vespa/searchlib/common/tunefileinfo.hpp b/searchlib/src/vespa/searchlib/common/tunefileinfo.hpp
index 7cba56d5d72..a99bf8fcfd0 100644
--- a/searchlib/src/vespa/searchlib/common/tunefileinfo.hpp
+++ b/searchlib/src/vespa/searchlib/common/tunefileinfo.hpp
@@ -26,7 +26,6 @@ TuneFileRandRead::setFromMmapConfig(const MMapConfig & mmapFlags) {
for (size_t i(0), m(mmapFlags.options.size()); i < m; i++) {
#ifdef __linux__
switch (mmapFlags.options[i]) {
- case MMapConfig::Options::MLOCK: _mmapFlags |= MAP_LOCKED; break;
case MMapConfig::Options::POPULATE: _mmapFlags |= MAP_POPULATE; break;
case MMapConfig::Options::HUGETLB: _mmapFlags |= MAP_HUGETLB; break;
}
diff --git a/searchlib/src/vespa/searchlib/features/onnx_feature.cpp b/searchlib/src/vespa/searchlib/features/onnx_feature.cpp
index 87e5ef2a5c2..e9fecb3578e 100644
--- a/searchlib/src/vespa/searchlib/features/onnx_feature.cpp
+++ b/searchlib/src/vespa/searchlib/features/onnx_feature.cpp
@@ -69,6 +69,8 @@ public:
OnnxBlueprint::OnnxBlueprint()
: Blueprint("onnxModel"),
+ _cache_token(),
+ _debug_model(),
_model(nullptr),
_wire_info()
{
@@ -80,15 +82,18 @@ bool
OnnxBlueprint::setup(const IIndexEnvironment &env,
const ParameterList &params)
{
- auto optimize = (env.getFeatureMotivation() == env.FeatureMotivation::VERIFY_SETUP)
- ? Onnx::Optimize::DISABLE
- : Onnx::Optimize::ENABLE;
auto model_cfg = env.getOnnxModel(params[0].getValue());
if (!model_cfg) {
return fail("no model with name '%s' found", params[0].getValue().c_str());
}
try {
- _model = std::make_unique<Onnx>(model_cfg->file_path(), optimize);
+ if (env.getFeatureMotivation() == env.FeatureMotivation::VERIFY_SETUP) {
+ _debug_model = std::make_unique<Onnx>(model_cfg->file_path(), Optimize::DISABLE);
+ _model = _debug_model.get();
+ } else {
+ _cache_token = OnnxModelCache::load(model_cfg->file_path());
+ _model = &(_cache_token->get());
+ }
} catch (std::exception &ex) {
return fail("model setup failed: %s", ex.what());
}
@@ -132,7 +137,7 @@ OnnxBlueprint::setup(const IIndexEnvironment &env,
FeatureExecutor &
OnnxBlueprint::createExecutor(const IQueryEnvironment &, Stash &stash) const
{
- assert(_model);
+ assert(_model != nullptr);
return stash.create<OnnxFeatureExecutor>(*_model, _wire_info);
}
diff --git a/searchlib/src/vespa/searchlib/features/onnx_feature.h b/searchlib/src/vespa/searchlib/features/onnx_feature.h
index 6a63e7276c2..ed0fbc502f0 100644
--- a/searchlib/src/vespa/searchlib/features/onnx_feature.h
+++ b/searchlib/src/vespa/searchlib/features/onnx_feature.h
@@ -3,7 +3,7 @@
#pragma once
#include <vespa/searchlib/fef/blueprint.h>
-#include <vespa/eval/onnx/onnx_wrapper.h>
+#include <vespa/eval/onnx/onnx_model_cache.h>
namespace search::features {
@@ -13,7 +13,11 @@ namespace search::features {
class OnnxBlueprint : public fef::Blueprint {
private:
using Onnx = vespalib::eval::Onnx;
- std::unique_ptr<Onnx> _model;
+ using Optimize = vespalib::eval::Onnx::Optimize;
+ using OnnxModelCache = vespalib::eval::OnnxModelCache;
+ OnnxModelCache::Token::UP _cache_token;
+ std::unique_ptr<Onnx> _debug_model;
+ const Onnx *_model;
Onnx::WireInfo _wire_info;
public:
OnnxBlueprint();
diff --git a/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp b/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp
index 76a6e908fcb..24e06cfe639 100644
--- a/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp
+++ b/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp
@@ -45,7 +45,7 @@ TensorFromLabelsBlueprint::setup(const search::fef::IIndexEnvironment &env,
_dimension = _sourceParam;
}
describeOutput("tensor",
- "The tensor created from the given array source (attribute field or query parameter)",
+ "The tensor created from the given source (attribute field or query parameter)",
FeatureType::object(ValueType::make_type(CellType::DOUBLE, {{_dimension}})));
return validSource;
}
@@ -63,10 +63,14 @@ createAttributeExecutor(const search::fef::IQueryEnvironment &env,
" Returning empty tensor.", attrName.c_str());
return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash);
}
- if (attribute->getCollectionType() != search::attribute::CollectionType::ARRAY ||
- attribute->isFloatingPointType()) {
- LOG(warning, "The attribute vector '%s' is NOT of type array of string or integer."
- " Returning empty tensor.", attrName.c_str());
+ if (attribute->isFloatingPointType()) {
+ LOG(warning, "The attribute vector '%s' must have basic type string or integer."
+ " Returning empty tensor.", attrName.c_str());
+ return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash);
+ }
+ if (attribute->getCollectionType() == search::attribute::CollectionType::WSET) {
+ LOG(warning, "The attribute vector '%s' is a weighted set - use tensorFromWeightedSet instead."
+ " Returning empty tensor.", attrName.c_str());
return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash);
}
// Note that for array attribute vectors the default weight is 1.0 for all values.
diff --git a/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp b/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp
index dcda13cac54..ad7d6fe3456 100644
--- a/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp
+++ b/searchlib/src/vespa/searchlib/predicate/document_features_store.cpp
@@ -7,8 +7,6 @@
#include <vespa/vespalib/btree/btreeroot.hpp>
#include <vespa/vespalib/btree/btreenodeallocator.hpp>
-//#include "predicate_index.h"
-
using vespalib::btree::BTreeNoLeafData;
using vespalib::datastore::EntryRef;
using vespalib::DataBuffer;
@@ -38,10 +36,8 @@ DocumentFeaturesStore::DocumentFeaturesStore(uint32_t arity)
namespace {
template <typename KeyComp, typename WordIndex>
-void deserializeWords(DataBuffer &buffer,
- memoryindex::WordStore &word_store,
- WordIndex &word_index,
- vector<EntryRef> &word_refs) {
+void
+deserializeWords(DataBuffer &buffer, memoryindex::WordStore &word_store, WordIndex &word_index, vector<EntryRef> &word_refs) {
uint32_t word_list_size = buffer.readInt32();
word_refs.reserve(word_list_size);
vector<char> word;
@@ -57,8 +53,8 @@ void deserializeWords(DataBuffer &buffer,
}
template <typename RangeFeaturesMap>
-void deserializeRanges(DataBuffer &buffer, vector<EntryRef> &word_refs,
- RangeFeaturesMap &ranges, size_t &num_ranges) {
+void
+deserializeRanges(DataBuffer &buffer, vector<EntryRef> &word_refs, RangeFeaturesMap &ranges, size_t &num_ranges) {
typedef typename RangeFeaturesMap::mapped_type::value_type Range;
uint32_t ranges_size = buffer.readInt32();
for (uint32_t i = 0; i < ranges_size; ++i) {
@@ -78,8 +74,8 @@ void deserializeRanges(DataBuffer &buffer, vector<EntryRef> &word_refs,
}
template <typename DocumentFeaturesMap>
-void deserializeDocs(DataBuffer &buffer, DocumentFeaturesMap &docs,
- size_t &num_features) {
+void
+deserializeDocs(DataBuffer &buffer, DocumentFeaturesMap &docs, size_t &num_features) {
uint32_t docs_size = buffer.readInt32();
for (uint32_t i = 0; i < docs_size; ++i) {
uint32_t doc_id = buffer.readInt32();
@@ -111,7 +107,8 @@ DocumentFeaturesStore::~DocumentFeaturesStore() {
_word_index.clear();
}
-void DocumentFeaturesStore::insert(uint64_t featureId, uint32_t docId) {
+void
+DocumentFeaturesStore::insert(uint64_t featureId, uint32_t docId) {
assert(docId != 0);
if (_currDocId != docId) {
auto docsItr = _docs.find(docId);
@@ -125,8 +122,8 @@ void DocumentFeaturesStore::insert(uint64_t featureId, uint32_t docId) {
++_numFeatures;
}
-void DocumentFeaturesStore::insert(const PredicateTreeAnnotations &annotations,
- uint32_t doc_id) {
+void
+DocumentFeaturesStore::insert(const PredicateTreeAnnotations &annotations, uint32_t doc_id) {
assert(doc_id != 0);
if (!annotations.features.empty()) {
auto it = _docs.find(doc_id);
@@ -172,15 +169,15 @@ DocumentFeaturesStore::get(uint32_t docId) const {
if (rangeItr != _ranges.end()) {
for (auto range : rangeItr->second) {
const char *label = _word_store.getWord(range.label_ref);
- PredicateRangeExpander::expandRange(
- label, range.from, range.to, _arity,
- std::inserter(features, features.end()));
+ PredicateRangeExpander::expandRange(label, range.from, range.to, _arity,
+ std::inserter(features, features.end()));
}
}
return features;
}
-void DocumentFeaturesStore::remove(uint32_t doc_id) {
+void
+DocumentFeaturesStore::remove(uint32_t doc_id) {
auto itr = _docs.find(doc_id);
if (itr != _docs.end()) {
_numFeatures = _numFeatures >= itr->second.size() ?
@@ -198,7 +195,8 @@ void DocumentFeaturesStore::remove(uint32_t doc_id) {
}
}
-vespalib::MemoryUsage DocumentFeaturesStore::getMemoryUsage() const {
+vespalib::MemoryUsage
+DocumentFeaturesStore::getMemoryUsage() const {
vespalib::MemoryUsage usage;
usage.incAllocatedBytes(_docs.getMemoryConsumption());
usage.incUsedBytes(_docs.getMemoryUsed());
@@ -219,9 +217,11 @@ vespalib::MemoryUsage DocumentFeaturesStore::getMemoryUsage() const {
namespace {
template <typename RangeFeaturesMap>
-void findUsedWords(const RangeFeaturesMap &ranges,
- unordered_map<uint32_t, uint32_t> &word_map,
- vector<EntryRef> &word_list) {
+void
+findUsedWords(const RangeFeaturesMap &ranges,
+ unordered_map<uint32_t, uint32_t> &word_map,
+ vector<EntryRef> &word_list)
+{
for (const auto &range_features_entry : ranges) {
for (const auto &range : range_features_entry.second) {
if (!word_map.count(range.label_ref.ref())) {
@@ -232,8 +232,10 @@ void findUsedWords(const RangeFeaturesMap &ranges,
}
}
-void serializeWords(DataBuffer &buffer, const vector<EntryRef> &word_list,
- const memoryindex::WordStore &word_store) {
+void
+serializeWords(DataBuffer &buffer, const vector<EntryRef> &word_list,
+ const memoryindex::WordStore &word_store)
+{
buffer.writeInt32(word_list.size());
for (const auto &word_ref : word_list) {
const char *word = word_store.getWord(word_ref);
@@ -244,8 +246,10 @@ void serializeWords(DataBuffer &buffer, const vector<EntryRef> &word_list,
}
template <typename RangeFeaturesMap>
-void serializeRanges(DataBuffer &buffer, RangeFeaturesMap &ranges,
- unordered_map<uint32_t, uint32_t> &word_map) {
+void
+serializeRanges(DataBuffer &buffer, RangeFeaturesMap &ranges,
+ unordered_map<uint32_t, uint32_t> &word_map)
+{
buffer.writeInt32(ranges.size());
for (const auto &range_features_entry : ranges) {
buffer.writeInt32(range_features_entry.first); // doc id
@@ -259,7 +263,8 @@ void serializeRanges(DataBuffer &buffer, RangeFeaturesMap &ranges,
}
template <typename DocumentFeaturesMap>
-void serializeDocs(DataBuffer &buffer, DocumentFeaturesMap &docs) {
+void
+serializeDocs(DataBuffer &buffer, DocumentFeaturesMap &docs) {
buffer.writeInt32(docs.size());
for (const auto &doc_features_entry : docs) {
buffer.writeInt32(doc_features_entry.first); // doc id
@@ -271,7 +276,8 @@ void serializeDocs(DataBuffer &buffer, DocumentFeaturesMap &docs) {
}
} // namespace
-void DocumentFeaturesStore::serialize(DataBuffer &buffer) const {
+void
+DocumentFeaturesStore::serialize(DataBuffer &buffer) const {
vector<EntryRef> word_list;
unordered_map<uint32_t, uint32_t> word_map;
diff --git a/searchlib/src/vespa/searchlib/predicate/document_features_store.h b/searchlib/src/vespa/searchlib/predicate/document_features_store.h
index a45c7ba043a..442249d619a 100644
--- a/searchlib/src/vespa/searchlib/predicate/document_features_store.h
+++ b/searchlib/src/vespa/searchlib/predicate/document_features_store.h
@@ -54,14 +54,14 @@ class DocumentFeaturesStore {
vespalib::btree::NoAggregated, const KeyComp &> WordIndex;
DocumentFeaturesMap _docs;
- RangeFeaturesMap _ranges;
- WordStore _word_store;
- WordIndex _word_index;
- uint32_t _currDocId;
- FeatureVector *_currFeatures;
- size_t _numFeatures;
- size_t _numRanges;
- uint32_t _arity;
+ RangeFeaturesMap _ranges;
+ WordStore _word_store;
+ WordIndex _word_index;
+ uint32_t _currDocId;
+ FeatureVector *_currFeatures;
+ size_t _numFeatures;
+ size_t _numRanges;
+ uint32_t _arity;
void setCurrent(uint32_t docId, FeatureVector *features);
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_bounds_posting_list.h b/searchlib/src/vespa/searchlib/predicate/predicate_bounds_posting_list.h
index 0ef2d81f094..9d2e90af7a5 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_bounds_posting_list.h
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_bounds_posting_list.h
@@ -53,7 +53,8 @@ namespace {
} // namespace
template<typename Iterator>
-bool PredicateBoundsPostingList<Iterator>::next(uint32_t doc_id) {
+bool
+PredicateBoundsPostingList<Iterator>::next(uint32_t doc_id) {
if (_iterator.valid() && _iterator.getKey() <= doc_id) {
_iterator.linearSeek(doc_id + 1);
}
@@ -74,7 +75,8 @@ bool PredicateBoundsPostingList<Iterator>::next(uint32_t doc_id) {
}
template<typename Iterator>
-bool PredicateBoundsPostingList<Iterator>::nextInterval() {
+bool
+PredicateBoundsPostingList<Iterator>::nextInterval() {
uint32_t next_bounds;
do {
if (__builtin_expect(_interval_count == 1, true)) {
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp b/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp
index e9b1a6bd685..6cbe11e2240 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_index.cpp
@@ -17,16 +17,19 @@ using std::vector;
namespace search::predicate {
template <>
-void PredicateIndex::addPosting<Interval>(uint64_t feature, uint32_t doc_id, EntryRef ref) {
+void
+PredicateIndex::addPosting<Interval>(uint64_t feature, uint32_t doc_id, EntryRef ref) {
_interval_index.addPosting(feature, doc_id, ref);
}
template <>
-void PredicateIndex::addPosting<IntervalWithBounds>(uint64_t feature, uint32_t doc_id, EntryRef ref) {
+void
+PredicateIndex::addPosting<IntervalWithBounds>(uint64_t feature, uint32_t doc_id, EntryRef ref) {
_bounds_index.addPosting(feature, doc_id, ref);
}
template <typename IntervalT>
-void PredicateIndex::indexDocumentFeatures(uint32_t doc_id, const PredicateIndex::FeatureMap<IntervalT> &interval_map) {
+void
+PredicateIndex::indexDocumentFeatures(uint32_t doc_id, const PredicateIndex::FeatureMap<IntervalT> &interval_map) {
if (interval_map.empty()) {
return;
}
@@ -80,11 +83,10 @@ public:
} // namespace
-PredicateIndex::PredicateIndex(GenerationHandler &generation_handler, GenerationHolder &genHolder,
+PredicateIndex::PredicateIndex(GenerationHolder &genHolder,
const DocIdLimitProvider &limit_provider,
const SimpleIndexConfig &simple_index_config, uint32_t arity)
: _arity(arity),
- _generation_handler(generation_handler),
_limit_provider(limit_provider),
_interval_index(genHolder, limit_provider, simple_index_config),
_bounds_index(genHolder, limit_provider, simple_index_config),
@@ -95,12 +97,11 @@ PredicateIndex::PredicateIndex(GenerationHandler &generation_handler, Generation
{
}
-PredicateIndex::PredicateIndex(GenerationHandler &generation_handler, GenerationHolder &genHolder,
+PredicateIndex::PredicateIndex(GenerationHolder &genHolder,
const DocIdLimitProvider &limit_provider,
const SimpleIndexConfig &simple_index_config, DataBuffer &buffer,
SimpleIndexDeserializeObserver<> & observer, uint32_t version)
: _arity(0),
- _generation_handler(generation_handler),
_limit_provider(limit_provider),
_interval_index(genHolder, limit_provider, simple_index_config),
_bounds_index(genHolder, limit_provider, simple_index_config),
@@ -121,15 +122,15 @@ PredicateIndex::PredicateIndex(GenerationHandler &generation_handler, Generation
_zero_constraint_docs.assign(builder);
IntervalDeserializer<Interval> interval_deserializer(_interval_store);
_interval_index.deserialize(buffer, interval_deserializer, observer, version);
- IntervalDeserializer<IntervalWithBounds>
- bounds_deserializer(_interval_store);
+ IntervalDeserializer<IntervalWithBounds> bounds_deserializer(_interval_store);
_bounds_index.deserialize(buffer, bounds_deserializer, observer, version);
commit();
}
PredicateIndex::~PredicateIndex() = default;
-void PredicateIndex::serialize(DataBuffer &buffer) const {
+void
+PredicateIndex::serialize(DataBuffer &buffer) const {
_features_store.serialize(buffer);
buffer.writeInt16(_arity);
buffer.writeInt32(_zero_constraint_docs.size());
@@ -142,25 +143,29 @@ void PredicateIndex::serialize(DataBuffer &buffer) const {
_bounds_index.serialize(buffer, bounds_serializer);
}
-void PredicateIndex::onDeserializationCompleted() {
+void
+PredicateIndex::onDeserializationCompleted() {
_interval_index.promoteOverThresholdVectors();
_bounds_index.promoteOverThresholdVectors();
}
-void PredicateIndex::indexDocument(uint32_t doc_id, const PredicateTreeAnnotations &annotations) {
+void
+PredicateIndex::indexDocument(uint32_t doc_id, const PredicateTreeAnnotations &annotations) {
indexDocumentFeatures(doc_id, annotations.interval_map);
indexDocumentFeatures(doc_id, annotations.bounds_map);
_features_store.insert(annotations, doc_id);
}
-void PredicateIndex::indexEmptyDocument(uint32_t doc_id)
+void
+PredicateIndex::indexEmptyDocument(uint32_t doc_id)
{
_zero_constraint_docs.insert(doc_id, vespalib::btree::BTreeNoLeafData::_instance);
}
namespace {
-void removeFromIndex(
- uint64_t feature, uint32_t doc_id, SimpleIndex<vespalib::datastore::EntryRef> &index, PredicateIntervalStore &interval_store)
+void
+removeFromIndex(uint64_t feature, uint32_t doc_id, SimpleIndex<vespalib::datastore::EntryRef> &index,
+ PredicateIntervalStore &interval_store)
{
auto result = index.removeFromPostingList(feature, doc_id);
if (result.second) { // Posting was removed
@@ -189,7 +194,8 @@ private:
} // namespace
-void PredicateIndex::removeDocument(uint32_t doc_id) {
+void
+PredicateIndex::removeDocument(uint32_t doc_id) {
_zero_constraint_docs.remove(doc_id);
auto features = _features_store.get(doc_id);
@@ -203,27 +209,31 @@ void PredicateIndex::removeDocument(uint32_t doc_id) {
_features_store.remove(doc_id);
}
-void PredicateIndex::commit() {
+void
+PredicateIndex::commit() {
_interval_index.commit();
_bounds_index.commit();
_zero_constraint_docs.getAllocator().freeze();
}
-void PredicateIndex::trimHoldLists(generation_t used_generation) {
+void
+PredicateIndex::trimHoldLists(generation_t used_generation) {
_interval_index.trimHoldLists(used_generation);
_bounds_index.trimHoldLists(used_generation);
_interval_store.trimHoldLists(used_generation);
_zero_constraint_docs.getAllocator().trimHoldLists(used_generation);
}
-void PredicateIndex::transferHoldLists(generation_t generation) {
+void
+PredicateIndex::transferHoldLists(generation_t generation) {
_interval_index.transferHoldLists(generation);
_bounds_index.transferHoldLists(generation);
_interval_store.transferHoldLists(generation);
_zero_constraint_docs.getAllocator().transferHoldLists(generation);
}
-vespalib::MemoryUsage PredicateIndex::getMemoryUsage() const {
+vespalib::MemoryUsage
+PredicateIndex::getMemoryUsage() const {
// TODO Include bit vector cache memory usage
vespalib::MemoryUsage combined;
combined.merge(_interval_index.getMemoryUsage());
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_index.h b/searchlib/src/vespa/searchlib/predicate/predicate_index.h
index d2ed70694a2..49bf77f2fcc 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_index.h
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_index.h
@@ -38,16 +38,15 @@ public:
using BTreeIterator = SimpleIndex<vespalib::datastore::EntryRef>::BTreeIterator;
using VectorIterator = SimpleIndex<vespalib::datastore::EntryRef>::VectorIterator;
private:
- uint32_t _arity;
- GenerationHandler &_generation_handler;
+ uint32_t _arity;
const DocIdLimitProvider &_limit_provider;
- IntervalIndex _interval_index;
- BoundsIndex _bounds_index;
- PredicateIntervalStore _interval_store;
- BTreeSet _zero_constraint_docs;
+ IntervalIndex _interval_index;
+ BoundsIndex _bounds_index;
+ PredicateIntervalStore _interval_store;
+ BTreeSet _zero_constraint_docs;
- DocumentFeaturesStore _features_store;
- mutable BitVectorCache _cache;
+ DocumentFeaturesStore _features_store;
+ mutable BitVectorCache _cache;
template <typename IntervalT>
void addPosting(uint64_t feature, uint32_t doc_id, vespalib::datastore::EntryRef ref);
@@ -55,15 +54,13 @@ private:
template <typename IntervalT>
void indexDocumentFeatures(uint32_t doc_id, const FeatureMap<IntervalT> &interval_map);
- PopulateInterface::Iterator::UP lookup(uint64_t key) const override;
-
public:
- PredicateIndex(GenerationHandler &generation_handler, GenerationHolder &genHolder,
+ PredicateIndex(GenerationHolder &genHolder,
const DocIdLimitProvider &limit_provider,
const SimpleIndexConfig &simple_index_config, uint32_t arity);
// deserializes PredicateIndex from buffer.
// The observer can be used to gain some insight into what has been added to the index..
- PredicateIndex(GenerationHandler &generation_handler, GenerationHolder &genHolder,
+ PredicateIndex(GenerationHolder &genHolder,
const DocIdLimitProvider &limit_provider,
const SimpleIndexConfig &simple_index_config, vespalib::DataBuffer &buffer,
SimpleIndexDeserializeObserver<> & observer, uint32_t version);
@@ -106,6 +103,9 @@ public:
* Adjust size of structures to have space for docId.
*/
void adjustDocIdLimit(uint32_t docId);
+ PopulateInterface::Iterator::UP lookup(uint64_t key) const override;
+ // Exposed for testing
+ void requireCachePopulation() const { _cache.requirePopulation(); }
};
extern template class SimpleIndex<vespalib::datastore::EntryRef>;
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_interval.cpp b/searchlib/src/vespa/searchlib/predicate/predicate_interval.cpp
index a92c16de462..d98e8a151dc 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_interval.cpp
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_interval.cpp
@@ -5,14 +5,16 @@
namespace search::predicate {
-std::ostream &operator<<(std::ostream &out, const Interval &i) {
+std::ostream &
+operator<<(std::ostream &out, const Interval &i) {
std::ios_base::fmtflags flags = out.flags();
out << "0x" << std::hex << i.interval;
out.flags(flags);
return out;
}
-std::ostream &operator<<(std::ostream &out, const IntervalWithBounds &i) {
+std::ostream &
+operator<<(std::ostream &out, const IntervalWithBounds &i) {
std::ios_base::fmtflags flags = out.flags();
out << "0x" << std::hex << i.interval << ", 0x" << i.bounds;
out.flags(flags);
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_interval_posting_list.h b/searchlib/src/vespa/searchlib/predicate/predicate_interval_posting_list.h
index f93d99b550b..33e15b2be33 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_interval_posting_list.h
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_interval_posting_list.h
@@ -14,10 +14,10 @@ namespace search::predicate {
template<typename Iterator>
class PredicateIntervalPostingList : public PredicatePostingList {
const PredicateIntervalStore &_interval_store;
- Iterator _iterator;
- const Interval *_current_interval;
- uint32_t _interval_count;
- Interval _single_buf;
+ Iterator _iterator;
+ const Interval *_current_interval;
+ uint32_t _interval_count;
+ Interval _single_buf;
public:
PredicateIntervalPostingList(const PredicateIntervalStore &interval_store, Iterator it);
@@ -46,7 +46,8 @@ PredicateIntervalPostingList<Iterator>::PredicateIntervalPostingList(
}
template<typename Iterator>
-bool PredicateIntervalPostingList<Iterator>::next(uint32_t doc_id) {
+bool
+PredicateIntervalPostingList<Iterator>::next(uint32_t doc_id) {
if (!_iterator.valid()) {
return false;
}
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp
index 28c82cb7a97..13be0f0127b 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.cpp
@@ -21,7 +21,8 @@ PredicateIntervalStore::PredicateIntervalStore()
: _store(),
_size1Type(1, 1024u, RefType::offsetSize()),
_store_adapter(_store),
- _ref_cache(_store_adapter) {
+ _ref_cache(_store_adapter)
+{
// This order determines type ids.
_store.addType(&_size1Type);
@@ -46,7 +47,8 @@ PredicateIntervalStore::~PredicateIntervalStore() {
// anyway.
//
template <typename IntervalT>
-EntryRef PredicateIntervalStore::insert(const vector<IntervalT> &intervals) {
+EntryRef
+PredicateIntervalStore::insert(const vector<IntervalT> &intervals) {
const uint32_t size = entrySize<IntervalT>() * intervals.size();
if (size == 0) {
return EntryRef();
@@ -81,7 +83,8 @@ EntryRef PredicateIntervalStore::insert(const vector<Interval> &);
template
EntryRef PredicateIntervalStore::insert(const vector<IntervalWithBounds> &);
-void PredicateIntervalStore::remove(EntryRef ref) {
+void
+PredicateIntervalStore::remove(EntryRef ref) {
if (ref.valid()) {
uint32_t buffer_id = RefType(ref).bufferId();
if (buffer_id == 0) { // single interval optimization.
@@ -96,11 +99,13 @@ void PredicateIntervalStore::remove(EntryRef ref) {
}
}
-void PredicateIntervalStore::trimHoldLists(generation_t used_generation) {
+void
+PredicateIntervalStore::trimHoldLists(generation_t used_generation) {
_store.trimHoldLists(used_generation);
}
-void PredicateIntervalStore::transferHoldLists(generation_t generation) {
+void
+PredicateIntervalStore::transferHoldLists(generation_t generation) {
_store.transferHoldLists(generation);
}
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.h b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.h
index e4573866eb8..5f55a2d3d5f 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.h
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_interval_store.h
@@ -34,7 +34,7 @@ class PredicateIntervalStore {
}
};
DataStoreAdapter _store_adapter;
- RefCacheType _ref_cache;
+ RefCacheType _ref_cache;
// Return type for private allocation functions
template <typename T>
@@ -89,7 +89,8 @@ public:
* single interval optimization.
*/
template <typename IntervalT>
- const IntervalT *get(vespalib::datastore::EntryRef btree_ref,
+ const IntervalT
+ *get(vespalib::datastore::EntryRef btree_ref,
uint32_t &size_out,
IntervalT *single_buf) const
{
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_posting_list.h b/searchlib/src/vespa/searchlib/predicate/predicate_posting_list.h
index 93e671f603f..50024913dcb 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_posting_list.h
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_posting_list.h
@@ -16,9 +16,9 @@ class PredicatePostingList {
protected:
PredicatePostingList()
- : _docId(0),
- _subquery(UINT64_MAX) {
- }
+ : _docId(0),
+ _subquery(UINT64_MAX)
+ { }
void setDocId(uint32_t docId) { _docId = docId; }
diff --git a/searchlib/src/vespa/searchlib/predicate/predicate_zstar_compressed_posting_list.h b/searchlib/src/vespa/searchlib/predicate/predicate_zstar_compressed_posting_list.h
index 965c4ad3042..0268d2bdb0c 100644
--- a/searchlib/src/vespa/searchlib/predicate/predicate_zstar_compressed_posting_list.h
+++ b/searchlib/src/vespa/searchlib/predicate/predicate_zstar_compressed_posting_list.h
@@ -41,7 +41,8 @@ PredicateZstarCompressedPostingList<Iterator>::PredicateZstarCompressedPostingLi
}
template<typename Iterator>
-bool PredicateZstarCompressedPostingList<Iterator>::next(uint32_t doc_id) {
+bool
+PredicateZstarCompressedPostingList<Iterator>::next(uint32_t doc_id) {
if (_iterator.valid() && _iterator.getKey() <= doc_id) {
_iterator.linearSeek(doc_id + 1);
}
@@ -57,7 +58,8 @@ bool PredicateZstarCompressedPostingList<Iterator>::next(uint32_t doc_id) {
}
template<typename Iterator>
-bool PredicateZstarCompressedPostingList<Iterator>::nextInterval() {
+bool
+PredicateZstarCompressedPostingList<Iterator>::nextInterval() {
uint32_t next_interval = UINT32_MAX;
if (_interval_count > 1) {
next_interval = _current_interval[1].interval;
diff --git a/searchlib/src/vespa/searchlib/predicate/simple_index.cpp b/searchlib/src/vespa/searchlib/predicate/simple_index.cpp
index 1b0db8f52d4..b0ef11e1c25 100644
--- a/searchlib/src/vespa/searchlib/predicate/simple_index.cpp
+++ b/searchlib/src/vespa/searchlib/predicate/simple_index.cpp
@@ -6,14 +6,12 @@
#include <vespa/vespalib/btree/btreeiterator.hpp>
#include <vespa/vespalib/btree/btreestore.hpp>
#include <vespa/vespalib/btree/btreenodeallocator.hpp>
-#include <vespa/vespalib/util/array.hpp>
#include <vespa/vespalib/datastore/buffer_type.hpp>
#include <vespa/log/log.h>
LOG_SETUP(".searchlib.predicate.simple_index");
-namespace search::predicate {
- namespace simpleindex {
+namespace search::predicate::simpleindex {
bool log_enabled() {
return LOG_WOULD_LOG(debug);
@@ -25,6 +23,8 @@ void log_debug(vespalib::string &str) {
} // namespace simpleindex
+namespace search::predicate {
+
template class SimpleIndex<vespalib::datastore::EntryRef>;
}
diff --git a/searchlib/src/vespa/searchlib/predicate/simple_index.h b/searchlib/src/vespa/searchlib/predicate/simple_index.h
index cfc288770c8..75dc540f787 100644
--- a/searchlib/src/vespa/searchlib/predicate/simple_index.h
+++ b/searchlib/src/vespa/searchlib/predicate/simple_index.h
@@ -141,12 +141,12 @@ private:
template <typename T>
using optional = std::optional<T>;
- Dictionary _dictionary;
- BTreeStore _btree_posting_lists;
- VectorStore _vector_posting_lists;
- GenerationHolder &_generation_holder;
- uint32_t _insert_remove_counter = 0;
- const SimpleIndexConfig _config;
+ Dictionary _dictionary;
+ BTreeStore _btree_posting_lists;
+ VectorStore _vector_posting_lists;
+ GenerationHolder &_generation_holder;
+ uint32_t _insert_remove_counter = 0;
+ const SimpleIndexConfig _config;
const DocIdLimitProvider &_limit_provider;
void insertIntoPosting(vespalib::datastore::EntryRef &ref, Key key, DocId doc_id, const Posting &posting);
@@ -164,12 +164,10 @@ private:
bool shouldRemoveVectorPosting(size_t size, double ratio) const;
size_t getVectorPostingSize(const PostingVector &vector) const {
return std::min(vector.size(),
- static_cast<size_t>(_limit_provider.getCommittedDocIdLimit()));
+ static_cast<size_t>(_limit_provider.getCommittedDocIdLimit()));
}
public:
- SimpleIndex(GenerationHolder &generation_holder, const DocIdLimitProvider &provider) :
- SimpleIndex(generation_holder, provider, SimpleIndexConfig()) {}
SimpleIndex(GenerationHolder &generation_holder,
const DocIdLimitProvider &provider, const SimpleIndexConfig &config)
: _generation_holder(generation_holder), _config(config), _limit_provider(provider) {}
@@ -219,8 +217,8 @@ public:
template<typename Posting, typename Key, typename DocId>
template<typename FunctionType>
-void SimpleIndex<Posting, Key, DocId>::foreach_frozen_key(
- vespalib::datastore::EntryRef ref, Key key, FunctionType func) const {
+void
+SimpleIndex<Posting, Key, DocId>::foreach_frozen_key(vespalib::datastore::EntryRef ref, Key key, FunctionType func) const {
auto it = _vector_posting_lists.getFrozenView().find(key);
double ratio = getDocumentRatio(getDocumentCount(ref), _limit_provider.getDocIdLimit());
if (it.valid() && ratio > _config.foreach_vector_threshold) {
diff --git a/searchlib/src/vespa/searchlib/predicate/simple_index.hpp b/searchlib/src/vespa/searchlib/predicate/simple_index.hpp
index b49218f1ba6..ada77b9fe38 100644
--- a/searchlib/src/vespa/searchlib/predicate/simple_index.hpp
+++ b/searchlib/src/vespa/searchlib/predicate/simple_index.hpp
@@ -13,8 +13,8 @@ namespace simpleindex {
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::insertIntoPosting(
- vespalib::datastore::EntryRef &ref, Key key, DocId doc_id, const Posting &posting) {
+void
+SimpleIndex<Posting, Key, DocId>::insertIntoPosting(vespalib::datastore::EntryRef &ref, Key key, DocId doc_id, const Posting &posting) {
bool ok = _btree_posting_lists.insert(ref, doc_id, posting);
if (!ok) {
_btree_posting_lists.remove(ref, doc_id);
@@ -26,8 +26,8 @@ void SimpleIndex<Posting, Key, DocId>::insertIntoPosting(
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::insertIntoVectorPosting(
- vespalib::datastore::EntryRef ref, Key key, DocId doc_id, const Posting &posting) {
+void
+SimpleIndex<Posting, Key, DocId>::insertIntoVectorPosting(vespalib::datastore::EntryRef ref, Key key, DocId doc_id, const Posting &posting) {
assert(doc_id < _limit_provider.getDocIdLimit());
auto it = _vector_posting_lists.find(key);
if (it.valid()) {
@@ -69,9 +69,8 @@ SimpleIndex<Posting, Key, DocId>::~SimpleIndex() {
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::serialize(
- vespalib::DataBuffer &buffer,
- const PostingSerializer<Posting> &serializer) const {
+void
+SimpleIndex<Posting, Key, DocId>::serialize(vespalib::DataBuffer &buffer, const PostingSerializer<Posting> &serializer) const {
assert(sizeof(Key) <= sizeof(uint64_t));
assert(sizeof(DocId) <= sizeof(uint32_t));
buffer.writeInt32(_dictionary.size());
@@ -90,10 +89,10 @@ void SimpleIndex<Posting, Key, DocId>::serialize(
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::deserialize(
- vespalib::DataBuffer &buffer,
- PostingDeserializer<Posting> &deserializer,
- SimpleIndexDeserializeObserver<Key, DocId> &observer, uint32_t version) {
+void
+SimpleIndex<Posting, Key, DocId>::deserialize(vespalib::DataBuffer &buffer, PostingDeserializer<Posting> &deserializer,
+ SimpleIndexDeserializeObserver<Key, DocId> &observer, uint32_t version)
+{
typename Dictionary::Builder builder(_dictionary.getAllocator());
uint32_t size = buffer.readInt32();
std::vector<vespalib::btree::BTreeKeyData<DocId, Posting>> postings;
@@ -128,8 +127,8 @@ void SimpleIndex<Posting, Key, DocId>::deserialize(
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::addPosting(Key key, DocId doc_id,
- const Posting &posting) {
+void
+SimpleIndex<Posting, Key, DocId>::addPosting(Key key, DocId doc_id, const Posting &posting) {
auto iter = _dictionary.find(key);
vespalib::datastore::EntryRef ref;
if (iter.valid()) {
@@ -178,8 +177,8 @@ SimpleIndex<Posting, Key, DocId>::removeFromPostingList(Key key, DocId doc_id) {
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::removeFromVectorPostingList(
- vespalib::datastore::EntryRef ref, Key key, DocId doc_id) {
+void
+SimpleIndex<Posting, Key, DocId>::removeFromVectorPostingList(vespalib::datastore::EntryRef ref, Key key, DocId doc_id) {
auto it = _vector_posting_lists.find(key);
if (it.valid()) {
if (!removeVectorIfBelowThreshold(ref, it)) {
@@ -189,7 +188,8 @@ void SimpleIndex<Posting, Key, DocId>::removeFromVectorPostingList(
};
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::pruneBelowThresholdVectors() {
+void
+SimpleIndex<Posting, Key, DocId>::pruneBelowThresholdVectors() {
// Check if it is time to prune any vector postings
if (++_insert_remove_counter % _config.vector_prune_frequency > 0) return;
@@ -204,7 +204,8 @@ void SimpleIndex<Posting, Key, DocId>::pruneBelowThresholdVectors() {
};
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::promoteOverThresholdVectors() {
+void
+SimpleIndex<Posting, Key, DocId>::promoteOverThresholdVectors() {
for (auto it = _dictionary.begin(); it.valid(); ++it) {
Key key = it.getKey();
if (!_vector_posting_lists.find(key).valid()) {
@@ -214,8 +215,8 @@ void SimpleIndex<Posting, Key, DocId>::promoteOverThresholdVectors() {
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::logVector(
- const char *action, Key key, size_t document_count, double ratio, size_t vector_length) const {
+void
+SimpleIndex<Posting, Key, DocId>::logVector(const char *action, Key key, size_t document_count, double ratio, size_t vector_length) const {
if (!simpleindex::log_enabled()) return;
auto msg = vespalib::make_string(
"%s vector for key '%016" PRIx64 "' with length %zu. Contains %zu documents "
@@ -227,7 +228,8 @@ void SimpleIndex<Posting, Key, DocId>::logVector(
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::createVectorIfOverThreshold(vespalib::datastore::EntryRef ref, Key key) {
+void
+SimpleIndex<Posting, Key, DocId>::createVectorIfOverThreshold(vespalib::datastore::EntryRef ref, Key key) {
uint32_t doc_id_limit = _limit_provider.getDocIdLimit();
size_t size = getDocumentCount(ref);
double ratio = getDocumentRatio(size, doc_id_limit);
@@ -242,8 +244,8 @@ void SimpleIndex<Posting, Key, DocId>::createVectorIfOverThreshold(vespalib::dat
}
template <typename Posting, typename Key, typename DocId>
-bool SimpleIndex<Posting, Key, DocId>::removeVectorIfBelowThreshold(
- vespalib::datastore::EntryRef ref, typename VectorStore::Iterator &it) {
+bool
+SimpleIndex<Posting, Key, DocId>::removeVectorIfBelowThreshold(vespalib::datastore::EntryRef ref, typename VectorStore::Iterator &it) {
size_t size = getDocumentCount(ref);
double ratio = getDocumentRatio(size, _limit_provider.getDocIdLimit());
if (shouldRemoveVectorPosting(size, ratio)) {
@@ -257,36 +259,41 @@ bool SimpleIndex<Posting, Key, DocId>::removeVectorIfBelowThreshold(
}
template <typename Posting, typename Key, typename DocId>
-double SimpleIndex<Posting, Key, DocId>::getDocumentRatio(size_t document_count,
- uint32_t doc_id_limit) const {
+double
+SimpleIndex<Posting, Key, DocId>::getDocumentRatio(size_t document_count, uint32_t doc_id_limit) const {
assert(doc_id_limit > 1);
return document_count / static_cast<double>(doc_id_limit - 1);
};
template <typename Posting, typename Key, typename DocId>
-size_t SimpleIndex<Posting, Key, DocId>::getDocumentCount(vespalib::datastore::EntryRef ref) const {
+size_t
+SimpleIndex<Posting, Key, DocId>::getDocumentCount(vespalib::datastore::EntryRef ref) const {
return _btree_posting_lists.size(ref);
};
template <typename Posting, typename Key, typename DocId>
-bool SimpleIndex<Posting, Key, DocId>::shouldRemoveVectorPosting(size_t size, double ratio) const {
+bool
+SimpleIndex<Posting, Key, DocId>::shouldRemoveVectorPosting(size_t size, double ratio) const {
return size < _config.lower_vector_size_threshold || ratio < _config.lower_docid_freq_threshold;
};
template <typename Posting, typename Key, typename DocId>
-bool SimpleIndex<Posting, Key, DocId>::shouldCreateVectorPosting(size_t size, double ratio) const {
+bool
+SimpleIndex<Posting, Key, DocId>::shouldCreateVectorPosting(size_t size, double ratio) const {
return size >= _config.upper_vector_size_threshold && ratio >= _config.upper_docid_freq_threshold;
};
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::commit() {
+void
+SimpleIndex<Posting, Key, DocId>::commit() {
_dictionary.getAllocator().freeze();
_btree_posting_lists.freeze();
_vector_posting_lists.getAllocator().freeze();
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::trimHoldLists(generation_t used_generation) {
+void
+SimpleIndex<Posting, Key, DocId>::trimHoldLists(generation_t used_generation) {
_btree_posting_lists.trimHoldLists(used_generation);
_dictionary.getAllocator().trimHoldLists(used_generation);
_vector_posting_lists.getAllocator().trimHoldLists(used_generation);
@@ -294,14 +301,16 @@ void SimpleIndex<Posting, Key, DocId>::trimHoldLists(generation_t used_generatio
}
template <typename Posting, typename Key, typename DocId>
-void SimpleIndex<Posting, Key, DocId>::transferHoldLists(generation_t generation) {
+void
+SimpleIndex<Posting, Key, DocId>::transferHoldLists(generation_t generation) {
_dictionary.getAllocator().transferHoldLists(generation);
_btree_posting_lists.transferHoldLists(generation);
_vector_posting_lists.getAllocator().transferHoldLists(generation);
}
template <typename Posting, typename Key, typename DocId>
-vespalib::MemoryUsage SimpleIndex<Posting, Key, DocId>::getMemoryUsage() const {
+vespalib::MemoryUsage
+SimpleIndex<Posting, Key, DocId>::getMemoryUsage() const {
vespalib::MemoryUsage combined;
combined.merge(_dictionary.getMemoryUsage());
combined.merge(_btree_posting_lists.getMemoryUsage());
diff --git a/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp
index 66acc2f0836..24d731156b3 100644
--- a/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.cpp
@@ -8,13 +8,13 @@
#include <vespa/searchlib/predicate/predicate_hash.h>
#include <vespa/searchlib/predicate/predicate_index.h>
#include <vespa/searchlib/query/tree/termnodes.h>
-#include <vespa/vespalib/btree/btree.hpp>
#include <vespa/vespalib/btree/btreeroot.hpp>
#include <vespa/vespalib/btree/btreeiterator.hpp>
#include <vespa/vespalib/btree/btreestore.hpp>
#include <vespa/vespalib/btree/btreenodeallocator.hpp>
#include <vespa/vespalib/util/memory_allocator.h>
#include <algorithm>
+
#include <vespa/log/log.h>
LOG_SETUP(".searchlib.predicate.predicate_blueprint");
#include <vespa/searchlib/predicate/predicate_range_term_expander.h>
@@ -54,7 +54,8 @@ struct MyRangeHandler {
vector<BoundsEntry> &bounds_entries;
uint64_t subquery_bitmap;
- void handleRange(const string &label) {
+ void
+ handleRange(const string &label) {
uint64_t feature = PredicateHash::hash64(label);
auto iterator = interval_index.lookup(feature);
if (iterator.valid()) {
@@ -62,7 +63,8 @@ struct MyRangeHandler {
interval_entries.push_back({iterator.getData(), subquery_bitmap, sz, feature});
}
}
- void handleEdge(const string &label, uint32_t value) {
+ void
+ handleEdge(const string &label, uint32_t value) {
uint64_t feature = PredicateHash::hash64(label);
auto iterator = bounds_index.lookup(feature);
if (iterator.valid()) {
@@ -73,18 +75,19 @@ struct MyRangeHandler {
};
template <typename Entry>
-void pushRangeDictionaryEntries(
- const Entry &entry,
- const PredicateIndex &index,
- vector<IntervalEntry> &interval_entries,
- vector<BoundsEntry> &bounds_entries) {
+void
+pushRangeDictionaryEntries(const Entry &entry, const PredicateIndex &index,
+ vector<IntervalEntry> &interval_entries,
+ vector<BoundsEntry> &bounds_entries)
+{
PredicateRangeTermExpander expander(index.getArity());
MyRangeHandler handler{index.getIntervalIndex(), index.getBoundsIndex(), interval_entries,
bounds_entries, entry.getSubQueryBitmap()};
expander.expand(entry.getKey(), entry.getValue(), handler);
}
-void pushZStarPostingList(const SimpleIndex<vespalib::datastore::EntryRef> &interval_index,
+void
+pushZStarPostingList(const SimpleIndex<vespalib::datastore::EntryRef> &interval_index,
vector<IntervalEntry> &interval_entries) {
uint64_t feature = Constants::z_star_hash;
auto iterator = interval_index.lookup(feature);
@@ -96,7 +99,8 @@ void pushZStarPostingList(const SimpleIndex<vespalib::datastore::EntryRef> &inte
} // namespace
-void PredicateBlueprint::addPostingToK(uint64_t feature)
+void
+PredicateBlueprint::addPostingToK(uint64_t feature)
{
const auto &interval_index = _index.getIntervalIndex();
auto tmp = interval_index.lookup(feature);
@@ -115,7 +119,8 @@ void PredicateBlueprint::addPostingToK(uint64_t feature)
}
}
-void PredicateBlueprint::addBoundsPostingToK(uint64_t feature)
+void
+PredicateBlueprint::addBoundsPostingToK(uint64_t feature)
{
const auto &bounds_index = _index.getBoundsIndex();
auto tmp = bounds_index.lookup(feature);
@@ -134,7 +139,8 @@ void PredicateBlueprint::addBoundsPostingToK(uint64_t feature)
}
}
-void PredicateBlueprint::addZeroConstraintToK()
+void
+PredicateBlueprint::addZeroConstraintToK()
{
uint8_t *kVBase = &_kV[0];
size_t kVSize = _kV.size();
@@ -174,15 +180,14 @@ PredicateBlueprint::PredicateBlueprint(const FieldSpecBase &field,
pushValueDictionaryEntry(entry, interval_index, _interval_dict_entries);
}
for (const auto &entry : term.getRangeFeatures()) {
- pushRangeDictionaryEntries(entry, _index, _interval_dict_entries,
- _bounds_dict_entries);
+ pushRangeDictionaryEntries(entry, _index, _interval_dict_entries,_bounds_dict_entries);
}
pushZStarPostingList(interval_index, _interval_dict_entries);
BitVectorCache::KeyAndCountSet keys;
keys.reserve(_interval_dict_entries.size());
for (const auto & e : _interval_dict_entries) {
- keys.push_back({e.feature, e.size});
+ keys.emplace_back(e.feature, e.size);
}
_cachedFeatures = _index.lookupCachedSet(keys);
@@ -202,40 +207,43 @@ PredicateBlueprint::PredicateBlueprint(const FieldSpecBase &field,
});
- if (zero_constraints_docs.size() == 0 &&
+ if ((zero_constraints_docs.size() == 0) &&
_interval_dict_entries.empty() && _bounds_dict_entries.empty() &&
- !_zstar_dict_entry.valid()) {
+ !_zstar_dict_entry.valid())
+ {
setEstimate(HitEstimate(0, true));
} else {
setEstimate(HitEstimate(static_cast<uint32_t>(zero_constraints_docs.size()), false));
}
}
-PredicateBlueprint::~PredicateBlueprint() {}
+PredicateBlueprint::~PredicateBlueprint() = default;
namespace {
- template<typename DictEntry, typename VectorIteratorEntry, typename BTreeIteratorEntry>
- void lookupPostingLists(const std::vector<DictEntry> &dict_entries,
- std::vector<VectorIteratorEntry> &vector_iterators,
- std::vector<BTreeIteratorEntry> &btree_iterators,
- const SimpleIndex<vespalib::datastore::EntryRef> &index)
- {
- for (const auto &entry : dict_entries) {
- auto vector_iterator = index.getVectorPostingList(entry.feature);
- if (vector_iterator) {
- vector_iterators.push_back(VectorIteratorEntry{*vector_iterator, entry});
- } else {
- auto btree_iterator = index.getBTreePostingList(entry.entry_ref);
- btree_iterators.push_back(BTreeIteratorEntry{btree_iterator, entry});
- }
+template<typename DictEntry, typename VectorIteratorEntry, typename BTreeIteratorEntry>
+void
+lookupPostingLists(const std::vector<DictEntry> &dict_entries,
+ std::vector<VectorIteratorEntry> &vector_iterators,
+ std::vector<BTreeIteratorEntry> &btree_iterators,
+ const SimpleIndex<vespalib::datastore::EntryRef> &index)
+{
+ for (const auto &entry : dict_entries) {
+ auto vector_iterator = index.getVectorPostingList(entry.feature);
+ if (vector_iterator) {
+ vector_iterators.push_back(VectorIteratorEntry{*vector_iterator, entry});
+ } else {
+ auto btree_iterator = index.getBTreePostingList(entry.entry_ref);
+ btree_iterators.push_back(BTreeIteratorEntry{btree_iterator, entry});
}
+ }
- };
+}
}
-void PredicateBlueprint::fetchPostings(const ExecuteInfo &) {
+void
+PredicateBlueprint::fetchPostings(const ExecuteInfo &) {
if (!_fetch_postings_done) {
const auto &interval_index = _index.getIntervalIndex();
const auto &bounds_index = _index.getBoundsIndex();
@@ -277,29 +285,31 @@ PredicateBlueprint::createLeafSearch(const fef::TermFieldMatchDataArray &tfmda,
PredicateAttribute::MinFeatureHandle mfh = attribute.getMinFeatureVector();
auto interval_range_vector = attribute.getIntervalRangeVector();
auto max_interval_range = attribute.getMaxIntervalRange();
- return SearchIterator::UP(new PredicateSearch(mfh.first, interval_range_vector, max_interval_range, _kV,
- createPostingLists(), tfmda));
+ return std::make_unique<PredicateSearch>(mfh.first, interval_range_vector, max_interval_range, _kV,
+ createPostingLists(), tfmda);
}
namespace {
- template<typename IteratorEntry, typename PostingListFactory>
- void createPredicatePostingLists(const std::vector<IteratorEntry> &iterator_entries,
- std::vector<PredicatePostingList::UP> &posting_lists,
- PostingListFactory posting_list_factory)
- {
- for (const auto &entry : iterator_entries) {
- if (entry.iterator.valid()) {
- auto posting_list = posting_list_factory(entry);
- posting_list->setSubquery(entry.entry.subquery);
- posting_lists.emplace_back(PredicatePostingList::UP(posting_list));
- }
+template<typename IteratorEntry, typename PostingListFactory>
+void
+createPredicatePostingLists(const std::vector<IteratorEntry> &iterator_entries,
+ std::vector<PredicatePostingList::UP> &posting_lists,
+ PostingListFactory posting_list_factory)
+{
+ for (const auto &entry : iterator_entries) {
+ if (entry.iterator.valid()) {
+ auto posting_list = posting_list_factory(entry);
+ posting_list->setSubquery(entry.entry.subquery);
+ posting_lists.emplace_back(PredicatePostingList::UP(posting_list));
}
}
+}
}
-std::vector<PredicatePostingList::UP> PredicateBlueprint::createPostingLists() const {
+std::vector<PredicatePostingList::UP>
+PredicateBlueprint::createPostingLists() const {
size_t total_size = _interval_btree_iterators.size() + _interval_vector_iterators.size() +
_bounds_btree_iterators.size() + _bounds_vector_iterators.size() + 2;
std::vector<PredicatePostingList::UP> posting_lists;
@@ -333,17 +343,15 @@ std::vector<PredicatePostingList::UP> PredicateBlueprint::createPostingLists() c
});
if (_zstar_vector_iterator && _zstar_vector_iterator->valid()) {
- auto posting_list = PredicatePostingList::UP(
- new PredicateZstarCompressedPostingList<VectorIterator>(interval_store, *_zstar_vector_iterator));
+ auto posting_list = std::make_unique<PredicateZstarCompressedPostingList<VectorIterator>>(interval_store, *_zstar_vector_iterator);
posting_lists.emplace_back(std::move(posting_list));
} else if (_zstar_btree_iterator && _zstar_btree_iterator->valid()) {
- auto posting_list = PredicatePostingList::UP(
- new PredicateZstarCompressedPostingList<BTreeIterator>(interval_store, *_zstar_btree_iterator));
+ auto posting_list = std::make_unique<PredicateZstarCompressedPostingList<BTreeIterator>>(interval_store, *_zstar_btree_iterator);
posting_lists.emplace_back(std::move(posting_list));
}
auto iterator = _index.getZeroConstraintDocs().begin();
if (iterator.valid()) {
- auto posting_list = PredicatePostingList::UP(new PredicateZeroConstraintPostingList(iterator));
+ auto posting_list = std::make_unique<PredicateZeroConstraintPostingList>(iterator);
posting_lists.emplace_back(std::move(posting_list));
}
return posting_lists;
diff --git a/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.h
index 9609cd4f6c9..ef225e86c50 100644
--- a/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/predicate_blueprint.h
@@ -50,8 +50,11 @@ public:
void fetchPostings(const ExecuteInfo &execInfo) override;
SearchIterator::UP
- createLeafSearch(const fef::TermFieldMatchDataArray &tfmda,
- bool strict) const override;
+ createLeafSearch(const fef::TermFieldMatchDataArray &tfmda, bool strict) const override;
+
+ // Exposed for testing
+ const BitVectorCache::CountVector & getKV() const { return _kV; }
+ const BitVectorCache::KeySet & getCachedFeatures() const { return _cachedFeatures; }
private:
using BTreeIterator = predicate::SimpleIndex<vespalib::datastore::EntryRef>::BTreeIterator;
using VectorIterator = predicate::SimpleIndex<vespalib::datastore::EntryRef>::VectorIterator;
@@ -70,24 +73,24 @@ private:
void addZeroConstraintToK();
std::vector<predicate::PredicatePostingList::UP> createPostingLists() const;
- const PredicateAttribute & _attribute;
+ const PredicateAttribute & _attribute;
const predicate::PredicateIndex &_index;
- Alloc _kVBacking;
- BitVectorCache::CountVector _kV;
- BitVectorCache::KeySet _cachedFeatures;
+ Alloc _kVBacking;
+ BitVectorCache::CountVector _kV;
+ BitVectorCache::KeySet _cachedFeatures;
- std::vector<IntervalEntry> _interval_dict_entries;
- std::vector<BoundsEntry> _bounds_dict_entries;
- vespalib::datastore::EntryRef _zstar_dict_entry;
+ std::vector<IntervalEntry> _interval_dict_entries;
+ std::vector<BoundsEntry> _bounds_dict_entries;
+ vespalib::datastore::EntryRef _zstar_dict_entry;
- std::vector<IntervalIteratorEntry<BTreeIterator>> _interval_btree_iterators;
+ std::vector<IntervalIteratorEntry<BTreeIterator>> _interval_btree_iterators;
std::vector<IntervalIteratorEntry<VectorIterator>> _interval_vector_iterators;
- std::vector<BoundsIteratorEntry<BTreeIterator>> _bounds_btree_iterators;
- std::vector<BoundsIteratorEntry<VectorIterator>> _bounds_vector_iterators;
+ std::vector<BoundsIteratorEntry<BTreeIterator>> _bounds_btree_iterators;
+ std::vector<BoundsIteratorEntry<VectorIterator>> _bounds_vector_iterators;
// The zstar iterator is either a vector or a btree iterator.
- optional<BTreeIterator> _zstar_btree_iterator;
+ optional<BTreeIterator> _zstar_btree_iterator;
optional<VectorIterator> _zstar_vector_iterator;
- bool _fetch_postings_done;
+ bool _fetch_postings_done;
};
}
diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp
index 31ff3845d74..545ee7cfa96 100644
--- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp
@@ -25,8 +25,8 @@ namespace {
// TODO: Move this to MemoryAllocator, with name PAGE_SIZE.
constexpr size_t small_page_size = 4_Ki;
-constexpr size_t min_num_arrays_for_new_buffer = 8_Ki;
-constexpr float alloc_grow_factor = 0.2;
+constexpr size_t min_num_arrays_for_new_buffer = 512_Ki;
+constexpr float alloc_grow_factor = 0.3;
// TODO: Adjust these numbers to what we accept as max in config.
constexpr size_t max_level_array_size = 16;
constexpr size_t max_link_array_size = 64;
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImpl.java b/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImpl.java
index 0c9148ad834..34e5fe49f69 100644
--- a/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImpl.java
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/slobrok/SlobrokMonitorManagerImpl.java
@@ -50,7 +50,7 @@ public class SlobrokMonitorManagerImpl extends AbstractComponent implements Slob
private SlobrokMonitorManagerImpl(Transport transport, Supervisor orb, DuperModelManager duperModel) {
this(() -> new SlobrokMonitor(orb), transport, duperModel);
- orb.useSmallBuffers();
+ orb.setDropEmptyBuffers(true);
}
SlobrokMonitorManagerImpl(Supplier<SlobrokMonitor> slobrokMonitorFactory, Transport transport, DuperModelManager duperModel) {
diff --git a/slobrok/src/tests/registerapi/registerapi.cpp b/slobrok/src/tests/registerapi/registerapi.cpp
index 59bc4690985..696812e2a3d 100644
--- a/slobrok/src/tests/registerapi/registerapi.cpp
+++ b/slobrok/src/tests/registerapi/registerapi.cpp
@@ -6,6 +6,7 @@
#include <vespa/slobrok/sbregister.h>
#include <vespa/slobrok/server/slobrokserver.h>
#include <vespa/fnet/frt/supervisor.h>
+#include <vespa/fnet/transport.h>
#include <sstream>
#include <algorithm>
#include <thread>
@@ -217,5 +218,6 @@ Test::Main()
.add("F/y/w", myspec.c_str())));
mock.stop();
+ server.shutdown();
TEST_DONE();
}
diff --git a/slobrok/src/vespa/slobrok/sbmirror.cpp b/slobrok/src/vespa/slobrok/sbmirror.cpp
index 5f6a54504e5..8102e1fecbf 100644
--- a/slobrok/src/vespa/slobrok/sbmirror.cpp
+++ b/slobrok/src/vespa/slobrok/sbmirror.cpp
@@ -26,7 +26,6 @@ MirrorAPI::MirrorAPI(FRT_Supervisor &orb, const ConfiguratorFactory & config)
_configurator(config.create(_slobrokSpecs)),
_currSlobrok(""),
_rpc_ms(100),
- _idx(0),
_backOff(),
_target(0),
_req(0)
diff --git a/slobrok/src/vespa/slobrok/sbmirror.h b/slobrok/src/vespa/slobrok/sbmirror.h
index ad86daa56bb..ec1ce22194b 100644
--- a/slobrok/src/vespa/slobrok/sbmirror.h
+++ b/slobrok/src/vespa/slobrok/sbmirror.h
@@ -101,7 +101,6 @@ private:
Configurator::UP _configurator;
std::string _currSlobrok;
int _rpc_ms;
- uint32_t _idx;
BackOff _backOff;
FRT_Target *_target;
FRT_RPCRequest *_req;
diff --git a/staging_vespalib/src/vespa/vespalib/util/programoptions.cpp b/staging_vespalib/src/vespa/vespalib/util/programoptions.cpp
index e38c54aaba0..8da7780f203 100644
--- a/staging_vespalib/src/vespa/vespalib/util/programoptions.cpp
+++ b/staging_vespalib/src/vespa/vespalib/util/programoptions.cpp
@@ -3,6 +3,7 @@
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/util/exceptions.h>
#include <boost/lexical_cast.hpp>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".programoptions");
diff --git a/standalone-container/src/main/java/com/yahoo/container/standalone/LocalFileDb.java b/standalone-container/src/main/java/com/yahoo/container/standalone/LocalFileDb.java
index 4d968914dfa..d21211bdffe 100644
--- a/standalone-container/src/main/java/com/yahoo/container/standalone/LocalFileDb.java
+++ b/standalone-container/src/main/java/com/yahoo/container/standalone/LocalFileDb.java
@@ -9,11 +9,11 @@ import com.yahoo.net.HostName;
import java.io.File;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
+import java.nio.ByteBuffer;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
-import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
@@ -78,12 +78,13 @@ public class LocalFileDb implements FileAcquirer, FileRegistry {
throw new RuntimeException("addUri(String uri) is not implemented here.");
}
- public String fileSourceHost() {
- return HostName.getLocalhost();
+ @Override
+ public FileReference addBlob(ByteBuffer blob) {
+ throw new RuntimeException("addBlob(ByteBuffer blob) is not implemented here.");
}
- public Set<String> allRelativePaths() {
- return fileReferenceToFile.values().stream().map(File::getPath).collect(Collectors.toSet());
+ public String fileSourceHost() {
+ return HostName.getLocalhost();
}
private static Constructor<FileReference> createFileReferenceConstructor() {
diff --git a/storage/src/tests/distributor/CMakeLists.txt b/storage/src/tests/distributor/CMakeLists.txt
index f43280a5b44..fad8ca0bb25 100644
--- a/storage/src/tests/distributor/CMakeLists.txt
+++ b/storage/src/tests/distributor/CMakeLists.txt
@@ -7,14 +7,12 @@ vespa_add_executable(storage_distributor_gtest_runner_app TEST
bucket_db_prune_elision_test.cpp
bucketdatabasetest.cpp
bucketdbmetricupdatertest.cpp
- bucketdbupdatertest.cpp
bucketgctimecalculatortest.cpp
bucketstateoperationtest.cpp
distributor_bucket_space_test.cpp
distributor_host_info_reporter_test.cpp
distributor_message_sender_stub.cpp
distributor_stripe_pool_test.cpp
- distributortest.cpp
distributortestutil.cpp
externaloperationhandlertest.cpp
garbagecollectiontest.cpp
@@ -22,6 +20,8 @@ vespa_add_executable(storage_distributor_gtest_runner_app TEST
gtest_runner.cpp
idealstatemanagertest.cpp
joinbuckettest.cpp
+ legacy_bucket_db_updater_test.cpp
+ legacy_distributor_test.cpp
maintenanceschedulertest.cpp
mergelimitertest.cpp
mergeoperationtest.cpp
diff --git a/storage/src/tests/distributor/blockingoperationstartertest.cpp b/storage/src/tests/distributor/blockingoperationstartertest.cpp
index 5203fec2462..861f8e72832 100644
--- a/storage/src/tests/distributor/blockingoperationstartertest.cpp
+++ b/storage/src/tests/distributor/blockingoperationstartertest.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/storage/frameworkimpl/component/storagecomponentregisterimpl.h>
#include <vespa/storage/distributor/blockingoperationstarter.h>
+#include <vespa/storage/distributor/distributor_stripe_operation_context.h>
#include <vespa/storage/distributor/pendingmessagetracker.h>
#include <vespa/storage/distributor/operation_sequencer.h>
#include <tests/distributor/maintenancemocks.h>
@@ -13,6 +14,86 @@ using namespace ::testing;
namespace storage::distributor {
+struct FakeDistributorStripeOperationContext : public DistributorStripeOperationContext {
+
+ PendingMessageTracker& _message_tracker;
+
+ explicit FakeDistributorStripeOperationContext(PendingMessageTracker& message_tracker)
+ : _message_tracker(message_tracker)
+ {}
+
+ ~FakeDistributorStripeOperationContext() override = default;
+
+ // From DistributorOperationContext:
+ api::Timestamp generate_unique_timestamp() override {
+ abort();
+ }
+ const DistributorBucketSpaceRepo& bucket_space_repo() const noexcept override {
+ abort();
+ }
+ DistributorBucketSpaceRepo& bucket_space_repo() noexcept override {
+ abort();
+ }
+ const DistributorBucketSpaceRepo& read_only_bucket_space_repo() const noexcept override {
+ abort();
+ }
+ DistributorBucketSpaceRepo& read_only_bucket_space_repo() noexcept override {
+ abort();
+ }
+ const DistributorConfiguration& distributor_config() const noexcept override {
+ abort();
+ }
+ // From DistributorStripeOperationContext:
+ void update_bucket_database(const document::Bucket&, const BucketCopy&, uint32_t) override {
+ abort();
+ }
+ void update_bucket_database(const document::Bucket&, const std::vector<BucketCopy>&, uint32_t) override {
+ abort();
+ }
+ void remove_node_from_bucket_database(const document::Bucket&, uint16_t) override {
+ abort();
+ }
+ void remove_nodes_from_bucket_database(const document::Bucket&, const std::vector<uint16_t>&) override {
+ abort();
+ }
+ document::BucketId make_split_bit_constrained_bucket_id(const document::DocumentId&) const override {
+ abort();
+ }
+ void recheck_bucket_info(uint16_t, const document::Bucket&) override {
+ abort();
+ }
+ document::BucketId get_sibling(const document::BucketId&) const override {
+ abort();
+ }
+ void send_inline_split_if_bucket_too_large(document::BucketSpace, const BucketDatabase::Entry&, uint8_t) override {
+ abort();
+ }
+ OperationRoutingSnapshot read_snapshot_for_bucket(const document::Bucket&) const override {
+ abort();
+ }
+ PendingMessageTracker& pending_message_tracker() noexcept override {
+ return _message_tracker;
+ }
+ const PendingMessageTracker& pending_message_tracker() const noexcept override {
+ return _message_tracker;
+ }
+ bool has_pending_message(uint16_t, const document::Bucket&, uint32_t) const override {
+ abort();
+ }
+ const lib::ClusterState* pending_cluster_state_or_null(const document::BucketSpace&) const override {
+ abort();
+ }
+ const lib::ClusterStateBundle& cluster_state_bundle() const override {
+ abort();
+ }
+ bool storage_node_is_up(document::BucketSpace, uint32_t) const override {
+ abort();
+ }
+ const BucketGcTimeCalculator::BucketIdHasher& bucket_id_hasher() const override {
+ abort();
+ }
+};
+
struct BlockingOperationStarterTest : Test {
std::shared_ptr<Operation> createMockOperation() {
return std::make_shared<MockOperation>(makeDocumentBucket(BucketId(16, 1)));
@@ -27,6 +108,7 @@ struct BlockingOperationStarterTest : Test {
std::unique_ptr<MockOperationStarter> _starterImpl;
std::unique_ptr<StorageComponentRegisterImpl> _compReg;
std::unique_ptr<PendingMessageTracker> _messageTracker;
+ std::unique_ptr<FakeDistributorStripeOperationContext> _fake_ctx;
std::unique_ptr<OperationSequencer> _operation_sequencer;
std::unique_ptr<BlockingOperationStarter> _operationStarter;
@@ -41,8 +123,9 @@ BlockingOperationStarterTest::SetUp()
_compReg->setClock(_clock);
_clock.setAbsoluteTimeInSeconds(1);
_messageTracker = std::make_unique<PendingMessageTracker>(*_compReg);
+ _fake_ctx = std::make_unique<FakeDistributorStripeOperationContext>(*_messageTracker);
_operation_sequencer = std::make_unique<OperationSequencer>();
- _operationStarter = std::make_unique<BlockingOperationStarter>(*_messageTracker, *_operation_sequencer, *_starterImpl);
+ _operationStarter = std::make_unique<BlockingOperationStarter>(*_fake_ctx, *_operation_sequencer, *_starterImpl);
}
TEST_F(BlockingOperationStarterTest, operation_not_blocked_when_no_messages_pending) {
diff --git a/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp b/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp
index e1010285dba..934ecc7456b 100644
--- a/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp
+++ b/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp
@@ -11,10 +11,12 @@
namespace storage::distributor {
-using PerNodeBucketSpacesStats = BucketSpacesStatsProvider::PerNodeBucketSpacesStats;
using End = vespalib::JsonStream::End;
using File = vespalib::File;
+using MinReplicaStats = std::unordered_map<uint16_t, uint32_t>;
using Object = vespalib::JsonStream::Object;
+using PerNodeBucketSpacesStats = BucketSpacesStatsProvider::PerNodeBucketSpacesStats;
+using BucketSpacesStats = BucketSpacesStatsProvider::BucketSpacesStats;
using namespace ::testing;
struct DistributorHostInfoReporterTest : Test {
@@ -35,7 +37,7 @@ namespace {
// My kingdom for GoogleMock!
struct MockedMinReplicaProvider : MinReplicaProvider
{
- std::unordered_map<uint16_t, uint32_t> minReplica;
+ MinReplicaStats minReplica;
std::unordered_map<uint16_t, uint32_t> getMinReplica() const override {
return minReplica;
@@ -121,7 +123,7 @@ struct Fixture {
TEST_F(DistributorHostInfoReporterTest, min_replica_stats_are_reported) {
Fixture f;
- std::unordered_map<uint16_t, uint32_t> minReplica;
+ MinReplicaStats minReplica;
minReplica[0] = 2;
minReplica[5] = 9;
f.minReplicaProvider.minReplica = minReplica;
@@ -133,10 +135,30 @@ TEST_F(DistributorHostInfoReporterTest, min_replica_stats_are_reported) {
EXPECT_EQ(9, getMinReplica(root, 5));
}
+TEST_F(DistributorHostInfoReporterTest, merge_min_replica_stats) {
+
+ MinReplicaStats min_replica_a;
+ min_replica_a[3] = 2;
+ min_replica_a[5] = 4;
+
+ MinReplicaStats min_replica_b;
+ min_replica_b[5] = 6;
+ min_replica_b[7] = 8;
+
+ MinReplicaStats result;
+ merge_min_replica_stats(result, min_replica_a);
+ merge_min_replica_stats(result, min_replica_b);
+
+ EXPECT_EQ(3, result.size());
+ EXPECT_EQ(2, result[3]);
+ EXPECT_EQ(4, result[5]);
+ EXPECT_EQ(8, result[7]);
+}
+
TEST_F(DistributorHostInfoReporterTest, generate_example_json) {
Fixture f;
- std::unordered_map<uint16_t, uint32_t> minReplica;
+ MinReplicaStats minReplica;
minReplica[0] = 2;
minReplica[5] = 9;
f.minReplicaProvider.minReplica = minReplica;
@@ -175,7 +197,7 @@ TEST_F(DistributorHostInfoReporterTest, no_report_generated_if_disabled) {
Fixture f;
f.reporter.enableReporting(false);
- std::unordered_map<uint16_t, uint32_t> minReplica;
+ MinReplicaStats minReplica;
minReplica[0] = 2;
minReplica[5] = 9;
f.minReplicaProvider.minReplica = minReplica;
@@ -210,5 +232,41 @@ TEST_F(DistributorHostInfoReporterTest, bucket_spaces_stats_are_reported) {
}
}
+TEST_F(DistributorHostInfoReporterTest, merge_per_node_bucket_spaces_stats) {
+
+ PerNodeBucketSpacesStats stats_a;
+ stats_a[3]["default"] = BucketSpaceStats(3, 2);
+ stats_a[3]["global"] = BucketSpaceStats(5, 4);
+ stats_a[5]["default"] = BucketSpaceStats(7, 6);
+ stats_a[5]["global"] = BucketSpaceStats(9, 8);
+
+ PerNodeBucketSpacesStats stats_b;
+ stats_b[5]["default"] = BucketSpaceStats(11, 10);
+ stats_b[5]["global"] = BucketSpaceStats(13, 12);
+ stats_b[7]["default"] = BucketSpaceStats(15, 14);
+
+ PerNodeBucketSpacesStats result;
+ merge_per_node_bucket_spaces_stats(result, stats_a);
+ merge_per_node_bucket_spaces_stats(result, stats_b);
+
+ PerNodeBucketSpacesStats exp;
+ exp[3]["default"] = BucketSpaceStats(3, 2);
+ exp[3]["global"] = BucketSpaceStats(5, 4);
+ exp[5]["default"] = BucketSpaceStats(7+11, 6+10);
+ exp[5]["global"] = BucketSpaceStats(9+13, 8+12);
+ exp[7]["default"] = BucketSpaceStats(15, 14);
+
+ EXPECT_EQ(exp, result);
}
+TEST_F(DistributorHostInfoReporterTest, merge_bucket_space_stats_maintains_valid_flag) {
+ BucketSpaceStats stats_a(5, 3);
+ BucketSpaceStats stats_b;
+
+ stats_a.merge(stats_b);
+ EXPECT_FALSE(stats_a.valid());
+ EXPECT_EQ(5, stats_a.bucketsTotal());
+ EXPECT_EQ(3, stats_a.bucketsPending());
+}
+
+}
diff --git a/storage/src/tests/distributor/distributor_message_sender_stub.h b/storage/src/tests/distributor/distributor_message_sender_stub.h
index 59a5a82b7df..18662fbce8f 100644
--- a/storage/src/tests/distributor/distributor_message_sender_stub.h
+++ b/storage/src/tests/distributor/distributor_message_sender_stub.h
@@ -87,6 +87,11 @@ public:
return dummy_cluster_context;
}
+ distributor::PendingMessageTracker& getPendingMessageTracker() override {
+ assert(_pending_message_tracker);
+ return *_pending_message_tracker;
+ }
+
const distributor::PendingMessageTracker& getPendingMessageTracker() const override {
assert(_pending_message_tracker);
return *_pending_message_tracker;
diff --git a/storage/src/tests/distributor/distributortestutil.cpp b/storage/src/tests/distributor/distributortestutil.cpp
index a2f32d8faa2..3ec1c95b206 100644
--- a/storage/src/tests/distributor/distributortestutil.cpp
+++ b/storage/src/tests/distributor/distributortestutil.cpp
@@ -101,6 +101,19 @@ DistributorTestUtil::triggerDistributionChange(lib::Distribution::SP distr)
}
void
+DistributorTestUtil::receive_set_system_state_command(const vespalib::string& state_str)
+{
+ auto state_cmd = std::make_shared<api::SetSystemStateCommand>(lib::ClusterState(state_str));
+ _distributor->handleMessage(state_cmd); // TODO move semantics
+}
+
+void
+DistributorTestUtil::handle_top_level_message(const std::shared_ptr<api::StorageMessage>& msg)
+{
+ _distributor->handleMessage(msg);
+}
+
+void
DistributorTestUtil::setTypeRepo(const std::shared_ptr<const document::DocumentTypeRepo> &repo)
{
_node->getComponentRegister().setDocumentTypeRepo(repo);
diff --git a/storage/src/tests/distributor/distributortestutil.h b/storage/src/tests/distributor/distributortestutil.h
index 6664b8d823d..533fd49811f 100644
--- a/storage/src/tests/distributor/distributortestutil.h
+++ b/storage/src/tests/distributor/distributortestutil.h
@@ -202,6 +202,12 @@ public:
void setSystemState(const lib::ClusterState& systemState);
+ // Invokes full cluster state transition pipeline rather than directly applying
+ // the state and just pretending everything has been completed.
+ void receive_set_system_state_command(const vespalib::string& state_str);
+
+ void handle_top_level_message(const std::shared_ptr<api::StorageMessage>& msg);
+
// Must be called prior to createLinks() to have any effect
void set_num_distributor_stripes(uint32_t n_stripes) noexcept {
_num_distributor_stripes = n_stripes;
diff --git a/storage/src/tests/distributor/idealstatemanagertest.cpp b/storage/src/tests/distributor/idealstatemanagertest.cpp
index 0a36e5cd0e5..e38e4b5b668 100644
--- a/storage/src/tests/distributor/idealstatemanagertest.cpp
+++ b/storage/src/tests/distributor/idealstatemanagertest.cpp
@@ -40,18 +40,18 @@ struct IdealStateManagerTest : Test, DistributorTestUtil {
bool checkBlock(const IdealStateOperation& op,
const document::Bucket& bucket,
- const PendingMessageTracker& tracker,
+ const DistributorStripeOperationContext& ctx,
const OperationSequencer& op_seq) const
{
- return op.checkBlock(bucket, tracker, op_seq);
+ return op.checkBlock(bucket, ctx, op_seq);
}
bool checkBlockForAllNodes(const IdealStateOperation& op,
const document::Bucket& bucket,
- const PendingMessageTracker& tracker,
+ const DistributorStripeOperationContext& ctx,
const OperationSequencer& op_seq) const
{
- return op.checkBlockForAllNodes(bucket, tracker, op_seq);
+ return op.checkBlockForAllNodes(bucket, ctx, op_seq);
}
std::vector<document::BucketSpace> _bucketSpaces;
@@ -170,92 +170,86 @@ TEST_F(IdealStateManagerTest, recheck_when_active) {
active_ideal_state_operations());
}
-TEST_F(IdealStateManagerTest, block_ideal_state_ops_on_full_request_bucket_info) {
+/**
+ * Don't schedule ideal state operations when there's a pending cluster state.
+ * This subsumes the legacy behavior of blocking ideal state ops when there is a
+ * zero-bucket RequestBucketInfoCommand pending towards a node (i.e. full bucket
+ * info fetch).
+ *
+ * This is for two reasons:
+ * - Avoids race conditions where we change the bucket set concurrently with
+ * requesting bucket info.
+ * - Once we get updated bucket info it's likely that the set of ideal state ops
+ * to execute will change anyway, so it makes sense to wait until it's ready.
+ */
+TEST_F(IdealStateManagerTest, block_ideal_state_ops_when_pending_cluster_state_is_present) {
+
+ setupDistributor(2, 10, "version:1 distributor:1 storage:1 .0.s:d");
+
+ // Trigger a pending cluster state with bucket info requests towards 1 node
+ receive_set_system_state_command("version:2 distributor:1 storage:1");
- setupDistributor(2, 10, "distributor:1 storage:2");
-
- framework::defaultimplementation::FakeClock clock;
- PendingMessageTracker tracker(_node->getComponentRegister());
OperationSequencer op_seq;
-
document::BucketId bid(16, 1234);
- std::vector<document::BucketId> buckets;
-
- // RequestBucketInfoCommand does not have a specific bucketid since it's
- // sent to the entire node. It will then use a null bucketid.
- {
- auto msg = std::make_shared<api::RequestBucketInfoCommand>(makeBucketSpace(), buckets);
- msg->setAddress(api::StorageMessageAddress::create(dummy_cluster_context.cluster_name_ptr(), lib::NodeType::STORAGE, 4));
- tracker.insert(msg);
- }
{
RemoveBucketOperation op(dummy_cluster_context,
BucketAndNodes(makeDocumentBucket(bid), toVector<uint16_t>(3, 4)));
- EXPECT_TRUE(op.isBlocked(tracker, op_seq));
- }
-
- {
- // Don't trigger on requests to other nodes.
- RemoveBucketOperation op(dummy_cluster_context,
- BucketAndNodes(makeDocumentBucket(bid), toVector<uint16_t>(3, 5)));
- EXPECT_FALSE(op.isBlocked(tracker, op_seq));
+ EXPECT_TRUE(op.isBlocked(operation_context(), op_seq));
}
- // Don't block on null-bucket messages that aren't RequestBucketInfo.
- {
- auto msg = std::make_shared<api::CreateVisitorCommand>(makeBucketSpace(), "foo", "bar", "baz");
- msg->setAddress(api::StorageMessageAddress::create(dummy_cluster_context.cluster_name_ptr(), lib::NodeType::STORAGE, 7));
- tracker.insert(msg);
+ // Clear pending by replying with zero buckets for all bucket spaces
+ ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
+ for (uint32_t i = 0; i < _sender.commands().size(); ++i) {
+ auto& bucket_req = dynamic_cast<api::RequestBucketInfoCommand&>(*_sender.command(i));
+ handle_top_level_message(bucket_req.makeReply());
}
{
RemoveBucketOperation op(dummy_cluster_context,
BucketAndNodes(makeDocumentBucket(bid), toVector<uint16_t>(7)));
- EXPECT_FALSE(op.isBlocked(tracker, op_seq));
+ EXPECT_FALSE(op.isBlocked(operation_context(), op_seq));
}
}
TEST_F(IdealStateManagerTest, block_check_for_all_operations_to_specific_bucket) {
setupDistributor(2, 10, "distributor:1 storage:2");
framework::defaultimplementation::FakeClock clock;
- PendingMessageTracker tracker(_node->getComponentRegister());
OperationSequencer op_seq;
document::BucketId bid(16, 1234);
{
auto msg = std::make_shared<api::JoinBucketsCommand>(makeDocumentBucket(bid));
msg->setAddress(api::StorageMessageAddress::create(dummy_cluster_context.cluster_name_ptr(), lib::NodeType::STORAGE, 4));
- tracker.insert(msg);
+ pending_message_tracker().insert(msg);
}
{
RemoveBucketOperation op(dummy_cluster_context,
BucketAndNodes(makeDocumentBucket(bid), toVector<uint16_t>(7)));
// Not blocked for exact node match.
- EXPECT_FALSE(checkBlock(op, makeDocumentBucket(bid), tracker, op_seq));
+ EXPECT_FALSE(checkBlock(op, makeDocumentBucket(bid), operation_context(), op_seq));
// But blocked for bucket match!
- EXPECT_TRUE(checkBlockForAllNodes(op, makeDocumentBucket(bid), tracker, op_seq));
+ EXPECT_TRUE(checkBlockForAllNodes(op, makeDocumentBucket(bid), operation_context(), op_seq));
}
}
TEST_F(IdealStateManagerTest, block_operations_with_locked_buckets) {
setupDistributor(2, 10, "distributor:1 storage:2");
framework::defaultimplementation::FakeClock clock;
- PendingMessageTracker tracker(_node->getComponentRegister());
OperationSequencer op_seq;
const auto bucket = makeDocumentBucket(document::BucketId(16, 1234));
{
auto msg = std::make_shared<api::JoinBucketsCommand>(bucket);
msg->setAddress(api::StorageMessageAddress::create(dummy_cluster_context.cluster_name_ptr(), lib::NodeType::STORAGE, 1));
- tracker.insert(msg);
+ pending_message_tracker().insert(msg);
}
auto token = op_seq.try_acquire(bucket, "foo");
EXPECT_TRUE(token.valid());
{
RemoveBucketOperation op(dummy_cluster_context, BucketAndNodes(bucket, toVector<uint16_t>(0)));
- EXPECT_TRUE(checkBlock(op, bucket, tracker, op_seq));
- EXPECT_TRUE(checkBlockForAllNodes(op, bucket, tracker, op_seq));
+ EXPECT_TRUE(checkBlock(op, bucket, operation_context(), op_seq));
+ EXPECT_TRUE(checkBlockForAllNodes(op, bucket, operation_context(), op_seq));
}
}
diff --git a/storage/src/tests/distributor/bucketdbupdatertest.cpp b/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp
index 7e8fec3b83a..e353e976081 100644
--- a/storage/src/tests/distributor/bucketdbupdatertest.cpp
+++ b/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp
@@ -57,12 +57,14 @@ getRequestBucketInfoStrings(uint32_t count)
}
-class BucketDBUpdaterTest : public Test,
- public DistributorTestUtil
+// TODO STRIPE: Add variant of this test for the new stripe mode.
+// TODO STRIPE: Remove this test when legacy mode is gone.
+class LegacyBucketDBUpdaterTest : public Test,
+ public DistributorTestUtil
{
public:
- BucketDBUpdaterTest();
- ~BucketDBUpdaterTest() override;
+ LegacyBucketDBUpdaterTest();
+ ~LegacyBucketDBUpdaterTest() override;
auto &defaultDistributorBucketSpace() { return getBucketSpaceRepo().get(makeBucketSpace()); }
@@ -501,7 +503,7 @@ public:
std::unique_ptr<PendingClusterState> state;
PendingClusterStateFixture(
- BucketDBUpdaterTest& owner,
+ LegacyBucketDBUpdaterTest& owner,
const std::string& oldClusterState,
const std::string& newClusterState)
{
@@ -520,7 +522,7 @@ public:
}
PendingClusterStateFixture(
- BucketDBUpdaterTest& owner,
+ LegacyBucketDBUpdaterTest& owner,
const std::string& oldClusterState)
{
ClusterInformation::CSP clusterInfo(
@@ -551,15 +553,15 @@ public:
}
};
-BucketDBUpdaterTest::BucketDBUpdaterTest()
+LegacyBucketDBUpdaterTest::LegacyBucketDBUpdaterTest()
: DistributorTestUtil(),
_bucketSpaces()
{
}
-BucketDBUpdaterTest::~BucketDBUpdaterTest() = default;
+LegacyBucketDBUpdaterTest::~LegacyBucketDBUpdaterTest() = default;
-TEST_F(BucketDBUpdaterTest, normal_usage) {
+TEST_F(LegacyBucketDBUpdaterTest, normal_usage) {
setSystemState(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"));
ASSERT_EQ(messageCount(3), _sender.commands().size());
@@ -590,7 +592,7 @@ TEST_F(BucketDBUpdaterTest, normal_usage) {
ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(10, "distributor:2 storage:3"));
}
-TEST_F(BucketDBUpdaterTest, distributor_change) {
+TEST_F(LegacyBucketDBUpdaterTest, distributor_change) {
int numBuckets = 100;
// First sends request
@@ -620,7 +622,7 @@ TEST_F(BucketDBUpdaterTest, distributor_change) {
ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(numBuckets, "distributor:2 storage:3"));
}
-TEST_F(BucketDBUpdaterTest, distributor_change_with_grouping) {
+TEST_F(LegacyBucketDBUpdaterTest, distributor_change_with_grouping) {
std::string distConfig(getDistConfig6Nodes2Groups());
setDistribution(distConfig);
int numBuckets = 100;
@@ -651,7 +653,7 @@ TEST_F(BucketDBUpdaterTest, distributor_change_with_grouping) {
ASSERT_EQ(messageCount(6), _sender.commands().size());
}
-TEST_F(BucketDBUpdaterTest, normal_usage_initializing) {
+TEST_F(LegacyBucketDBUpdaterTest, normal_usage_initializing) {
setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1 .0.s:i"));
ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
@@ -688,7 +690,7 @@ TEST_F(BucketDBUpdaterTest, normal_usage_initializing) {
ASSERT_NO_FATAL_FAILURE(assertCorrectBuckets(20, "distributor:1 storage:1"));
}
-TEST_F(BucketDBUpdaterTest, failed_request_bucket_info) {
+TEST_F(LegacyBucketDBUpdaterTest, failed_request_bucket_info) {
setSystemState(lib::ClusterState("distributor:1 .0.s:i storage:1"));
// 2 messages sent up: 1 to the nodes, and one reply to the setsystemstate.
@@ -730,7 +732,7 @@ TEST_F(BucketDBUpdaterTest, failed_request_bucket_info) {
EXPECT_EQ(std::string("Set system state"), _senderDown.getCommands());
}
-TEST_F(BucketDBUpdaterTest, down_while_init) {
+TEST_F(LegacyBucketDBUpdaterTest, down_while_init) {
ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
ASSERT_NO_FATAL_FAILURE(fakeBucketReply(lib::ClusterState("distributor:1 storage:3"),
@@ -746,7 +748,7 @@ TEST_F(BucketDBUpdaterTest, down_while_init) {
}
bool
-BucketDBUpdaterTest::bucketExistsThatHasNode(int bucketCount, uint16_t node) const
+LegacyBucketDBUpdaterTest::bucketExistsThatHasNode(int bucketCount, uint16_t node) const
{
for (int i=1; i<bucketCount; i++) {
if (bucketHasNode(document::BucketId(16, i), node)) {
@@ -758,7 +760,7 @@ BucketDBUpdaterTest::bucketExistsThatHasNode(int bucketCount, uint16_t node) con
}
std::string
-BucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes, size_t count)
+LegacyBucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes, size_t count)
{
std::ostringstream ost;
bool first = true;
@@ -775,13 +777,13 @@ BucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes, size_t count)
}
std::string
-BucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes)
+LegacyBucketDBUpdaterTest::getNodeList(std::vector<uint16_t> nodes)
{
return getNodeList(std::move(nodes), _bucketSpaces.size());
}
std::vector<uint16_t>
-BucketDBUpdaterTest::expandNodeVec(const std::vector<uint16_t> &nodes)
+LegacyBucketDBUpdaterTest::expandNodeVec(const std::vector<uint16_t> &nodes)
{
std::vector<uint16_t> res;
size_t count = _bucketSpaces.size();
@@ -793,7 +795,7 @@ BucketDBUpdaterTest::expandNodeVec(const std::vector<uint16_t> &nodes)
return res;
}
-TEST_F(BucketDBUpdaterTest, node_down) {
+TEST_F(LegacyBucketDBUpdaterTest, node_down) {
ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
enableDistributorClusterState("distributor:1 storage:3");
@@ -808,7 +810,7 @@ TEST_F(BucketDBUpdaterTest, node_down) {
EXPECT_FALSE(bucketExistsThatHasNode(100, 1));
}
-TEST_F(BucketDBUpdaterTest, storage_node_in_maintenance_clears_buckets_for_node) {
+TEST_F(LegacyBucketDBUpdaterTest, storage_node_in_maintenance_clears_buckets_for_node) {
ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
enableDistributorClusterState("distributor:1 storage:3");
@@ -823,7 +825,7 @@ TEST_F(BucketDBUpdaterTest, storage_node_in_maintenance_clears_buckets_for_node)
EXPECT_FALSE(bucketExistsThatHasNode(100, 1));
}
-TEST_F(BucketDBUpdaterTest, node_down_copies_get_in_sync) {
+TEST_F(LegacyBucketDBUpdaterTest, node_down_copies_get_in_sync) {
ASSERT_NO_FATAL_FAILURE(setStorageNodes(3));
lib::ClusterState systemState("distributor:1 storage:3");
@@ -840,7 +842,7 @@ TEST_F(BucketDBUpdaterTest, node_down_copies_get_in_sync) {
dumpBucket(bid));
}
-TEST_F(BucketDBUpdaterTest, initializing_while_recheck) {
+TEST_F(LegacyBucketDBUpdaterTest, initializing_while_recheck) {
lib::ClusterState systemState("distributor:1 storage:2 .0.s:i .0.i:0.1");
setSystemState(systemState);
@@ -858,7 +860,7 @@ TEST_F(BucketDBUpdaterTest, initializing_while_recheck) {
EXPECT_EQ(MessageType::SETSYSTEMSTATE, _senderDown.command(0)->getType());
}
-TEST_F(BucketDBUpdaterTest, bit_change) {
+TEST_F(LegacyBucketDBUpdaterTest, bit_change) {
std::vector<document::BucketId> bucketlist;
{
@@ -957,7 +959,7 @@ TEST_F(BucketDBUpdaterTest, bit_change) {
}
};
-TEST_F(BucketDBUpdaterTest, recheck_node_with_failure) {
+TEST_F(LegacyBucketDBUpdaterTest, recheck_node_with_failure) {
ASSERT_NO_FATAL_FAILURE(initializeNodesAndBuckets(3, 5));
_sender.clear();
@@ -1000,7 +1002,7 @@ TEST_F(BucketDBUpdaterTest, recheck_node_with_failure) {
EXPECT_EQ(size_t(2), _sender.commands().size());
}
-TEST_F(BucketDBUpdaterTest, recheck_node) {
+TEST_F(LegacyBucketDBUpdaterTest, recheck_node) {
ASSERT_NO_FATAL_FAILURE(initializeNodesAndBuckets(3, 5));
_sender.clear();
@@ -1038,7 +1040,7 @@ TEST_F(BucketDBUpdaterTest, recheck_node) {
EXPECT_EQ(api::BucketInfo(20,10,12, 50, 60, true, true), copy->getBucketInfo());
}
-TEST_F(BucketDBUpdaterTest, notify_bucket_change) {
+TEST_F(LegacyBucketDBUpdaterTest, notify_bucket_change) {
enableDistributorClusterState("distributor:1 storage:1");
addNodesToBucketDB(document::BucketId(16, 1), "0=1234");
@@ -1101,7 +1103,7 @@ TEST_F(BucketDBUpdaterTest, notify_bucket_change) {
dumpBucket(document::BucketId(16, 2)));
}
-TEST_F(BucketDBUpdaterTest, notify_bucket_change_from_node_down) {
+TEST_F(LegacyBucketDBUpdaterTest, notify_bucket_change_from_node_down) {
enableDistributorClusterState("distributor:1 storage:2");
addNodesToBucketDB(document::BucketId(16, 1), "1=1234");
@@ -1155,7 +1157,7 @@ TEST_F(BucketDBUpdaterTest, notify_bucket_change_from_node_down) {
* distributor in the pending state but not by the current state would be
* discarded when attempted inserted into the bucket database.
*/
-TEST_F(BucketDBUpdaterTest, notify_change_with_pending_state_queues_bucket_info_requests) {
+TEST_F(LegacyBucketDBUpdaterTest, notify_change_with_pending_state_queues_bucket_info_requests) {
setSystemState(lib::ClusterState("distributor:1 storage:1"));
ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
@@ -1194,7 +1196,7 @@ TEST_F(BucketDBUpdaterTest, notify_change_with_pending_state_queues_bucket_info_
}
}
-TEST_F(BucketDBUpdaterTest, merge_reply) {
+TEST_F(LegacyBucketDBUpdaterTest, merge_reply) {
enableDistributorClusterState("distributor:1 storage:3");
addNodesToBucketDB(document::BucketId(16, 1234),
@@ -1236,7 +1238,7 @@ TEST_F(BucketDBUpdaterTest, merge_reply) {
dumpBucket(document::BucketId(16, 1234)));
};
-TEST_F(BucketDBUpdaterTest, merge_reply_node_down) {
+TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down) {
enableDistributorClusterState("distributor:1 storage:3");
std::vector<api::MergeBucketCommand::Node> nodes;
@@ -1278,7 +1280,7 @@ TEST_F(BucketDBUpdaterTest, merge_reply_node_down) {
dumpBucket(document::BucketId(16, 1234)));
};
-TEST_F(BucketDBUpdaterTest, merge_reply_node_down_after_request_sent) {
+TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down_after_request_sent) {
enableDistributorClusterState("distributor:1 storage:3");
std::vector<api::MergeBucketCommand::Node> nodes;
@@ -1321,7 +1323,7 @@ TEST_F(BucketDBUpdaterTest, merge_reply_node_down_after_request_sent) {
};
-TEST_F(BucketDBUpdaterTest, flush) {
+TEST_F(LegacyBucketDBUpdaterTest, flush) {
enableDistributorClusterState("distributor:1 storage:3");
_sender.clear();
@@ -1348,7 +1350,7 @@ TEST_F(BucketDBUpdaterTest, flush) {
}
std::string
-BucketDBUpdaterTest::getSentNodes(
+LegacyBucketDBUpdaterTest::getSentNodes(
const std::string& oldClusterState,
const std::string& newClusterState)
{
@@ -1372,7 +1374,7 @@ BucketDBUpdaterTest::getSentNodes(
}
std::string
-BucketDBUpdaterTest::getSentNodesDistributionChanged(
+LegacyBucketDBUpdaterTest::getSentNodesDistributionChanged(
const std::string& oldClusterState)
{
DistributorMessageSenderStub sender;
@@ -1399,7 +1401,7 @@ BucketDBUpdaterTest::getSentNodesDistributionChanged(
return ost.str();
}
-TEST_F(BucketDBUpdaterTest, pending_cluster_state_send_messages) {
+TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_send_messages) {
EXPECT_EQ(getNodeList({0, 1, 2}),
getSentNodes("cluster:d",
"distributor:1 storage:3"));
@@ -1496,7 +1498,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_send_messages) {
"distributor:3 storage:3 .1.s:m"));
};
-TEST_F(BucketDBUpdaterTest, pending_cluster_state_receive) {
+TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_receive) {
DistributorMessageSenderStub sender;
auto cmd(std::make_shared<api::SetSystemStateCommand>(
@@ -1534,7 +1536,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_receive) {
EXPECT_EQ(3, (int)pendingTransition.results().size());
}
-TEST_F(BucketDBUpdaterTest, pending_cluster_state_with_group_down) {
+TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down) {
std::string config(getDistConfig6Nodes4Groups());
config += "distributor_auto_ownership_transfer_on_whole_group_down true\n";
setDistribution(config);
@@ -1553,7 +1555,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_with_group_down) {
"distributor:6 .2.s:d storage:6"));
}
-TEST_F(BucketDBUpdaterTest, pending_cluster_state_with_group_down_and_no_handover) {
+TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down_and_no_handover) {
std::string config(getDistConfig6Nodes4Groups());
config += "distributor_auto_ownership_transfer_on_whole_group_down false\n";
setDistribution(config);
@@ -1639,7 +1641,7 @@ struct BucketDumper : public BucketDatabase::EntryProcessor
};
std::string
-BucketDBUpdaterTest::mergeBucketLists(
+LegacyBucketDBUpdaterTest::mergeBucketLists(
const lib::ClusterState& oldState,
const std::string& existingData,
const lib::ClusterState& newState,
@@ -1694,7 +1696,7 @@ BucketDBUpdaterTest::mergeBucketLists(
}
std::string
-BucketDBUpdaterTest::mergeBucketLists(const std::string& existingData,
+LegacyBucketDBUpdaterTest::mergeBucketLists(const std::string& existingData,
const std::string& newData,
bool includeBucketInfo)
{
@@ -1706,7 +1708,7 @@ BucketDBUpdaterTest::mergeBucketLists(const std::string& existingData,
includeBucketInfo);
}
-TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge) {
+TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge) {
// Simple initializing case - ask all nodes for info
EXPECT_EQ(
// Result is on the form: [bucket w/o count bits]:[node indexes]|..
@@ -1745,7 +1747,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge) {
mergeBucketLists("", "0:5/0/0/0|1:5/2/3/4", true));
}
-TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) {
+TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) {
// Node went from initializing to up and non-invalid bucket changed.
EXPECT_EQ(
std::string("2:0/2/3/4/t|3:0/2/4/6/t|"),
@@ -1757,7 +1759,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) {
true));
}
-TEST_F(BucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_current_state) {
+TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_current_state) {
document::BucketId bucket(16, 3);
lib::ClusterState stateBefore("distributor:1 storage:1");
{
@@ -1786,7 +1788,7 @@ TEST_F(BucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_current_s
EXPECT_EQ(std::string("NONEXISTING"), dumpBucket(bucket));
}
-TEST_F(BucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) {
+TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) {
document::BucketId bucket(16, 3);
lib::ClusterState stateBefore("distributor:1 storage:1");
{
@@ -1820,7 +1822,7 @@ TEST_F(BucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pending_s
* will with a high likelihood end up not getting the complete view of the buckets in
* the cluster.
*/
-TEST_F(BucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_distribution_change_pending) {
+TEST_F(LegacyBucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_distribution_change_pending) {
lib::ClusterState stateBefore("distributor:6 storage:6");
{
uint32_t expectedMsgs = messageCount(6), dummyBucketsToReturn = 1;
@@ -1862,7 +1864,7 @@ TEST_F(BucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_distribut
EXPECT_EQ(size_t(0), _sender.commands().size());
}
-TEST_F(BucketDBUpdaterTest, changed_distribution_config_triggers_recovery_mode) {
+TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_triggers_recovery_mode) {
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"), messageCount(6), 20));
_sender.clear();
EXPECT_TRUE(distributor_is_in_recovery_mode());
@@ -1911,7 +1913,7 @@ std::unique_ptr<BucketDatabase::EntryProcessor> func_processor(Func&& f) {
}
-TEST_F(BucketDBUpdaterTest, changed_distribution_config_does_not_elide_bucket_db_pruning) {
+TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_does_not_elide_bucket_db_pruning) {
setDistribution(getDistConfig3Nodes1Group());
constexpr uint32_t n_buckets = 100;
@@ -1930,7 +1932,7 @@ TEST_F(BucketDBUpdaterTest, changed_distribution_config_does_not_elide_bucket_db
}));
}
-TEST_F(BucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_timestamp) {
+TEST_F(LegacyBucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_timestamp) {
getClock().setAbsoluteTimeInSeconds(101234);
lib::ClusterState stateBefore("distributor:1 storage:1");
{
@@ -1945,7 +1947,7 @@ TEST_F(BucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_timestam
EXPECT_EQ(uint32_t(101234), e->getLastGarbageCollectionTime());
}
-TEST_F(BucketDBUpdaterTest, newer_mutations_not_overwritten_by_earlier_bucket_fetch) {
+TEST_F(LegacyBucketDBUpdaterTest, newer_mutations_not_overwritten_by_earlier_bucket_fetch) {
{
lib::ClusterState stateBefore("distributor:1 storage:1 .0.s:i");
uint32_t expectedMsgs = _bucketSpaces.size(), dummyBucketsToReturn = 0;
@@ -1992,7 +1994,7 @@ TEST_F(BucketDBUpdaterTest, newer_mutations_not_overwritten_by_earlier_bucket_fe
}
std::vector<uint16_t>
-BucketDBUpdaterTest::getSendSet() const
+LegacyBucketDBUpdaterTest::getSendSet() const
{
std::vector<uint16_t> nodes;
std::transform(_sender.commands().begin(),
@@ -2007,7 +2009,7 @@ BucketDBUpdaterTest::getSendSet() const
}
std::vector<uint16_t>
-BucketDBUpdaterTest::getSentNodesWithPreemption(
+LegacyBucketDBUpdaterTest::getSentNodesWithPreemption(
const std::string& oldClusterState,
uint32_t expectedOldStateMessages,
const std::string& preemptedClusterState,
@@ -2040,7 +2042,7 @@ using nodeVec = std::vector<uint16_t>;
* database modifications caused by intermediate states will not be
* accounted for (basically the ABA problem in a distributed setting).
*/
-TEST_F(BucketDBUpdaterTest, preempted_distributor_change_carries_node_set_over_to_next_state_fetch) {
+TEST_F(LegacyBucketDBUpdaterTest, preempted_distributor_change_carries_node_set_over_to_next_state_fetch) {
EXPECT_EQ(
expandNodeVec({0, 1, 2, 3, 4, 5}),
getSentNodesWithPreemption("version:1 distributor:6 storage:6",
@@ -2049,7 +2051,7 @@ TEST_F(BucketDBUpdaterTest, preempted_distributor_change_carries_node_set_over_t
"version:3 distributor:6 storage:6"));
}
-TEST_F(BucketDBUpdaterTest, preempted_storage_change_carries_node_set_over_to_next_state_fetch) {
+TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_change_carries_node_set_over_to_next_state_fetch) {
EXPECT_EQ(
expandNodeVec({2, 3}),
getSentNodesWithPreemption(
@@ -2059,7 +2061,7 @@ TEST_F(BucketDBUpdaterTest, preempted_storage_change_carries_node_set_over_to_ne
"version:3 distributor:6 storage:6"));
}
-TEST_F(BucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched) {
+TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched) {
EXPECT_EQ(
expandNodeVec({2}),
getSentNodesWithPreemption(
@@ -2069,7 +2071,7 @@ TEST_F(BucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched) {
"version:3 distributor:6 storage:6"));
}
-TEST_F(BucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_state) {
+TEST_F(LegacyBucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_state) {
EXPECT_EQ(
nodeVec{},
getSentNodesWithPreemption(
@@ -2079,7 +2081,7 @@ TEST_F(BucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_state) {
"version:3 distributor:6 storage:6 .2.s:d")); // 2 down again.
}
-TEST_F(BucketDBUpdaterTest, doNotSendToPreemptedNodeNotPartOfNewState) {
+TEST_F(LegacyBucketDBUpdaterTest, doNotSendToPreemptedNodeNotPartOfNewState) {
// Even though 100 nodes are preempted, not all of these should be part
// of the request afterwards when only 6 are part of the state.
EXPECT_EQ(
@@ -2091,7 +2093,7 @@ TEST_F(BucketDBUpdaterTest, doNotSendToPreemptedNodeNotPartOfNewState) {
"version:3 distributor:6 storage:6"));
}
-TEST_F(BucketDBUpdaterTest, outdated_node_set_cleared_after_successful_state_completion) {
+TEST_F(LegacyBucketDBUpdaterTest, outdated_node_set_cleared_after_successful_state_completion) {
lib::ClusterState stateBefore(
"version:1 distributor:6 storage:6 .1.t:1234");
uint32_t expectedMsgs = messageCount(6), dummyBucketsToReturn = 10;
@@ -2111,7 +2113,7 @@ TEST_F(BucketDBUpdaterTest, outdated_node_set_cleared_after_successful_state_com
// distribution config will follow very shortly after the config has been
// applied to the node. The new cluster state will then send out requests to
// the correct node set.
-TEST_F(BucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to_available_nodes) {
+TEST_F(LegacyBucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to_available_nodes) {
uint32_t expectedMsgs = 6, dummyBucketsToReturn = 20;
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"),
expectedMsgs, dummyBucketsToReturn));
@@ -2134,7 +2136,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to_avail
*
* See VESPA-790 for details.
*/
-TEST_F(BucketDBUpdaterTest, node_missing_from_config_is_treated_as_needing_ownership_transfer) {
+TEST_F(LegacyBucketDBUpdaterTest, node_missing_from_config_is_treated_as_needing_ownership_transfer) {
uint32_t expectedMsgs = messageCount(3), dummyBucketsToReturn = 1;
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:3 storage:3"),
expectedMsgs, dummyBucketsToReturn));
@@ -2170,7 +2172,7 @@ TEST_F(BucketDBUpdaterTest, node_missing_from_config_is_treated_as_needing_owner
EXPECT_EQ(expandNodeVec({0, 1}), getSendSet());
}
-TEST_F(BucketDBUpdaterTest, changed_distributor_set_implies_ownership_transfer) {
+TEST_F(LegacyBucketDBUpdaterTest, changed_distributor_set_implies_ownership_transfer) {
auto fixture = createPendingStateFixtureForStateChange(
"distributor:2 storage:2", "distributor:1 storage:2");
EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer());
@@ -2180,7 +2182,7 @@ TEST_F(BucketDBUpdaterTest, changed_distributor_set_implies_ownership_transfer)
EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer());
}
-TEST_F(BucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership_transfer) {
+TEST_F(LegacyBucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership_transfer) {
auto fixture = createPendingStateFixtureForStateChange(
"distributor:2 storage:2", "distributor:2 storage:1");
EXPECT_FALSE(fixture->state->hasBucketOwnershipTransfer());
@@ -2190,26 +2192,26 @@ TEST_F(BucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership_trans
EXPECT_FALSE(fixture->state->hasBucketOwnershipTransfer());
}
-TEST_F(BucketDBUpdaterTest, changed_distribution_config_implies_ownership_transfer) {
+TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_implies_ownership_transfer) {
auto fixture = createPendingStateFixtureForDistributionChange(
"distributor:2 storage:2");
EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer());
}
-TEST_F(BucketDBUpdaterTest, transition_time_tracked_for_single_state_change) {
+TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_for_single_state_change) {
ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:2", 5, messageCount(2)));
EXPECT_EQ(uint64_t(5000), lastTransitionTimeInMillis());
}
-TEST_F(BucketDBUpdaterTest, transition_time_reset_across_non_preempting_state_changes) {
+TEST_F(LegacyBucketDBUpdaterTest, transition_time_reset_across_non_preempting_state_changes) {
ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:2", 5, messageCount(2)));
ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:3", 3, messageCount(1)));
EXPECT_EQ(uint64_t(3000), lastTransitionTimeInMillis());
}
-TEST_F(BucketDBUpdaterTest, transition_time_tracked_for_distribution_config_change) {
+TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_for_distribution_config_change) {
lib::ClusterState state("distributor:2 storage:2");
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(state, messageCount(2), 1));
@@ -2221,7 +2223,7 @@ TEST_F(BucketDBUpdaterTest, transition_time_tracked_for_distribution_config_chan
EXPECT_EQ(uint64_t(4000), lastTransitionTimeInMillis());
}
-TEST_F(BucketDBUpdaterTest, transition_time_tracked_across_preempted_transitions) {
+TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_across_preempted_transitions) {
_sender.clear();
lib::ClusterState state("distributor:2 storage:2");
setSystemState(state);
@@ -2245,7 +2247,7 @@ TEST_F(BucketDBUpdaterTest, transition_time_tracked_across_preempted_transitions
* Yes, the order of node<->bucket id is reversed between the two, perhaps to make sure you're awake.
*/
-TEST_F(BucketDBUpdaterTest, batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted) {
+TEST_F(LegacyBucketDBUpdaterTest, batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted) {
// Replacing bucket information for content node 0 should not mark existing
// untrusted replica as trusted as a side effect.
EXPECT_EQ(
@@ -2257,32 +2259,32 @@ TEST_F(BucketDBUpdaterTest, batch_update_of_existing_diverging_replicas_does_not
"0:5/1/2/3|1:5/7/8/9", true));
}
-TEST_F(BucketDBUpdaterTest, batch_add_of_new_diverging_replicas_does_not_mark_any_as_trusted) {
+TEST_F(LegacyBucketDBUpdaterTest, batch_add_of_new_diverging_replicas_does_not_mark_any_as_trusted) {
EXPECT_EQ(std::string("5:1/7/8/9/u,0/1/2/3/u|"),
mergeBucketLists("", "0:5/1/2/3|1:5/7/8/9", true));
}
-TEST_F(BucketDBUpdaterTest, batch_add_with_single_resulting_replica_implicitly_marks_as_trusted) {
+TEST_F(LegacyBucketDBUpdaterTest, batch_add_with_single_resulting_replica_implicitly_marks_as_trusted) {
EXPECT_EQ(std::string("5:0/1/2/3/t|"),
mergeBucketLists("", "0:5/1/2/3", true));
}
-TEST_F(BucketDBUpdaterTest, identity_update_of_single_replica_does_not_clear_trusted) {
+TEST_F(LegacyBucketDBUpdaterTest, identity_update_of_single_replica_does_not_clear_trusted) {
EXPECT_EQ(std::string("5:0/1/2/3/t|"),
mergeBucketLists("0:5/1/2/3", "0:5/1/2/3", true));
}
-TEST_F(BucketDBUpdaterTest, identity_update_of_diverging_untrusted_replicas_does_not_mark_any_as_trusted) {
+TEST_F(LegacyBucketDBUpdaterTest, identity_update_of_diverging_untrusted_replicas_does_not_mark_any_as_trusted) {
EXPECT_EQ(std::string("5:1/7/8/9/u,0/1/2/3/u|"),
mergeBucketLists("0:5/1/2/3|1:5/7/8/9", "0:5/1/2/3|1:5/7/8/9", true));
}
-TEST_F(BucketDBUpdaterTest, adding_diverging_replica_to_existing_trusted_does_not_remove_trusted) {
+TEST_F(LegacyBucketDBUpdaterTest, adding_diverging_replica_to_existing_trusted_does_not_remove_trusted) {
EXPECT_EQ(std::string("5:1/2/3/4/u,0/1/2/3/t|"),
mergeBucketLists("0:5/1/2/3", "0:5/1/2/3|1:5/2/3/4", true));
}
-TEST_F(BucketDBUpdaterTest, batch_update_from_distributor_change_does_not_mark_diverging_replicas_as_trusted) {
+TEST_F(LegacyBucketDBUpdaterTest, batch_update_from_distributor_change_does_not_mark_diverging_replicas_as_trusted) {
// This differs from batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted
// in that _all_ content nodes are considered outdated when distributor changes take place,
// and therefore a slightly different code path is taken. In particular, bucket info for
@@ -2298,7 +2300,7 @@ TEST_F(BucketDBUpdaterTest, batch_update_from_distributor_change_does_not_mark_d
}
// TODO remove on Vespa 8 - this is a workaround for https://github.com/vespa-engine/vespa/issues/8475
-TEST_F(BucketDBUpdaterTest, global_distribution_hash_falls_back_to_legacy_format_upon_request_rejection) {
+TEST_F(LegacyBucketDBUpdaterTest, global_distribution_hash_falls_back_to_legacy_format_upon_request_rejection) {
std::string distConfig(getDistConfig6Nodes2Groups());
setDistribution(distConfig);
@@ -2366,7 +2368,7 @@ void for_each_bucket(const DistributorBucketSpaceRepo& repo, Func&& f) {
}
-TEST_F(BucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_ownership_change) {
+TEST_F(LegacyBucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_ownership_change) {
getBucketDBUpdater().set_stale_reads_enabled(true);
lib::ClusterState initial_state("distributor:1 storage:4"); // All buckets owned by us by definition
@@ -2407,7 +2409,7 @@ TEST_F(BucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_ownership
});
}
-TEST_F(BucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_read_only_database) {
+TEST_F(LegacyBucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_read_only_database) {
constexpr uint32_t n_buckets = 10;
// No ownership change, just node down. Test redundancy is 2, so removing 2 nodes will
// cause some buckets to be entirely unavailable.
@@ -2418,7 +2420,7 @@ TEST_F(BucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_read_on
EXPECT_EQ(size_t(0), read_only_global_db().size());
}
-TEST_F(BucketDBUpdaterTest, non_owned_buckets_purged_when_read_only_support_is_config_disabled) {
+TEST_F(LegacyBucketDBUpdaterTest, non_owned_buckets_purged_when_read_only_support_is_config_disabled) {
getBucketDBUpdater().set_stale_reads_enabled(false);
lib::ClusterState initial_state("distributor:1 storage:4"); // All buckets owned by us by definition
@@ -2440,7 +2442,7 @@ TEST_F(BucketDBUpdaterTest, non_owned_buckets_purged_when_read_only_support_is_c
EXPECT_EQ(size_t(0), read_only_global_db().size());
}
-void BucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transition(
+void LegacyBucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transition(
vespalib::stringref initial_state_str,
uint32_t initial_buckets,
uint32_t initial_expected_msgs,
@@ -2463,7 +2465,7 @@ void BucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transition(
_sender.clear();
}
-TEST_F(BucketDBUpdaterTest, deferred_activated_state_does_not_enable_state_until_activation_received) {
+TEST_F(LegacyBucketDBUpdaterTest, deferred_activated_state_does_not_enable_state_until_activation_received) {
getBucketDBUpdater().set_stale_reads_enabled(true);
constexpr uint32_t n_buckets = 10;
ASSERT_NO_FATAL_FAILURE(
@@ -2483,7 +2485,7 @@ TEST_F(BucketDBUpdaterTest, deferred_activated_state_does_not_enable_state_until
EXPECT_EQ(uint64_t(n_buckets), mutable_global_db().size());
}
-TEST_F(BucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_activated) {
+TEST_F(LegacyBucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_activated) {
getBucketDBUpdater().set_stale_reads_enabled(true);
constexpr uint32_t n_buckets = 10;
ASSERT_NO_FATAL_FAILURE(
@@ -2495,7 +2497,7 @@ TEST_F(BucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_activated
EXPECT_EQ(uint64_t(0), read_only_global_db().size());
}
-TEST_F(BucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_marked_down) {
+TEST_F(LegacyBucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_marked_down) {
getBucketDBUpdater().set_stale_reads_enabled(true);
constexpr uint32_t n_buckets = 10;
ASSERT_NO_FATAL_FAILURE(
@@ -2509,7 +2511,7 @@ TEST_F(BucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_marked_d
EXPECT_EQ(uint64_t(n_buckets), read_only_global_db().size());
}
-TEST_F(BucketDBUpdaterTest, activate_cluster_state_request_with_mismatching_version_returns_actual_version) {
+TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_with_mismatching_version_returns_actual_version) {
getBucketDBUpdater().set_stale_reads_enabled(true);
constexpr uint32_t n_buckets = 10;
ASSERT_NO_FATAL_FAILURE(
@@ -2523,7 +2525,7 @@ TEST_F(BucketDBUpdaterTest, activate_cluster_state_request_with_mismatching_vers
ASSERT_NO_FATAL_FAILURE(assert_has_activate_cluster_state_reply_with_actual_version(5));
}
-TEST_F(BucketDBUpdaterTest, activate_cluster_state_request_without_pending_transition_passes_message_through) {
+TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_without_pending_transition_passes_message_through) {
getBucketDBUpdater().set_stale_reads_enabled(true);
constexpr uint32_t n_buckets = 10;
ASSERT_NO_FATAL_FAILURE(
@@ -2539,7 +2541,7 @@ TEST_F(BucketDBUpdaterTest, activate_cluster_state_request_without_pending_trans
EXPECT_EQ(size_t(0), _sender.replies().size());
}
-TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) {
+TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) {
// Need to trigger an initial edge to complete first bucket scan
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:2 storage:1"),
messageCount(1), 0));
@@ -2586,7 +2588,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) {
EXPECT_EQ(size_t(0), mutable_global_db().size());
}
-uint32_t BucketDBUpdaterTest::populate_bucket_db_via_request_bucket_info_for_benchmarking() {
+uint32_t LegacyBucketDBUpdaterTest::populate_bucket_db_via_request_bucket_info_for_benchmarking() {
// Need to trigger an initial edge to complete first bucket scan
setAndEnableClusterState(lib::ClusterState("distributor:2 storage:1"), messageCount(1), 0);
_sender.clear();
@@ -2622,7 +2624,7 @@ uint32_t BucketDBUpdaterTest::populate_bucket_db_via_request_bucket_info_for_ben
return n_buckets;
}
-TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_removing_buckets_for_unavailable_storage_nodes) {
+TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_removing_buckets_for_unavailable_storage_nodes) {
const uint32_t n_buckets = populate_bucket_db_via_request_bucket_info_for_benchmarking();
lib::ClusterState no_op_state("distributor:1 storage:1 .0.s:m"); // Removing all buckets via ownership
@@ -2633,7 +2635,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_removing_buckets_for_unavailable_
fprintf(stderr, "Took %g seconds to scan and remove %u buckets\n", timer.min_time(), n_buckets);
}
-TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_no_buckets_removed_during_node_remover_db_pass) {
+TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_no_buckets_removed_during_node_remover_db_pass) {
const uint32_t n_buckets = populate_bucket_db_via_request_bucket_info_for_benchmarking();
// TODO this benchmark is void if we further restrict the pruning elision logic to allow
@@ -2646,7 +2648,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_no_buckets_removed_during_node_re
fprintf(stderr, "Took %g seconds to scan %u buckets with no-op action\n", timer.min_time(), n_buckets);
}
-TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_all_buckets_removed_during_node_remover_db_pass) {
+TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_all_buckets_removed_during_node_remover_db_pass) {
const uint32_t n_buckets = populate_bucket_db_via_request_bucket_info_for_benchmarking();
lib::ClusterState no_op_state("distributor:1 storage:1 .0.s:m"); // Removing all buckets via all replicas gone
@@ -2657,7 +2659,7 @@ TEST_F(BucketDBUpdaterTest, DISABLED_benchmark_all_buckets_removed_during_node_r
fprintf(stderr, "Took %g seconds to scan and remove %u buckets\n", timer.min_time(), n_buckets);
}
-TEST_F(BucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_when_state_is_pending) {
+TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_when_state_is_pending) {
auto initial_baseline = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d");
auto initial_default = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:m");
@@ -2682,7 +2684,7 @@ TEST_F(BucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_when_s
EXPECT_TRUE(state == nullptr);
}
-struct BucketDBUpdaterSnapshotTest : BucketDBUpdaterTest {
+struct BucketDBUpdaterSnapshotTest : LegacyBucketDBUpdaterTest {
lib::ClusterState empty_state;
std::shared_ptr<lib::ClusterState> initial_baseline;
std::shared_ptr<lib::ClusterState> initial_default;
@@ -2691,7 +2693,7 @@ struct BucketDBUpdaterSnapshotTest : BucketDBUpdaterTest {
Bucket global_bucket;
BucketDBUpdaterSnapshotTest()
- : BucketDBUpdaterTest(),
+ : LegacyBucketDBUpdaterTest(),
empty_state(),
initial_baseline(std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d")),
initial_default(std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:m")),
@@ -2704,7 +2706,7 @@ struct BucketDBUpdaterSnapshotTest : BucketDBUpdaterTest {
~BucketDBUpdaterSnapshotTest() override;
void SetUp() override {
- BucketDBUpdaterTest::SetUp();
+ LegacyBucketDBUpdaterTest::SetUp();
getBucketDBUpdater().set_stale_reads_enabled(true);
};
diff --git a/storage/src/tests/distributor/distributortest.cpp b/storage/src/tests/distributor/legacy_distributor_test.cpp
index 3d1c6165946..3123b7fc91c 100644
--- a/storage/src/tests/distributor/distributortest.cpp
+++ b/storage/src/tests/distributor/legacy_distributor_test.cpp
@@ -33,9 +33,11 @@ using namespace ::testing;
namespace storage::distributor {
-struct DistributorTest : Test, DistributorTestUtil {
- DistributorTest();
- ~DistributorTest() override;
+// TODO STRIPE: Add variant of this test for the new stripe mode.
+// TODO STRIPE: Remove this test when legacy mode is gone.
+struct LegacyDistributorTest : Test, DistributorTestUtil {
+ LegacyDistributorTest();
+ ~LegacyDistributorTest() override;
// TODO handle edge case for window between getnodestate reply already
// sent and new request not yet received
@@ -222,7 +224,6 @@ struct DistributorTest : Test, DistributorTestUtil {
}
void configureMaxClusterClockSkew(int seconds);
- void sendDownClusterStateCommand();
void replyToSingleRequestBucketInfoCommandWith1Bucket();
void sendDownDummyRemoveCommand();
void assertSingleBouncedRemoveReplyPresent();
@@ -234,17 +235,17 @@ struct DistributorTest : Test, DistributorTestUtil {
void set_up_and_start_get_op_with_stale_reads_enabled(bool enabled);
};
-DistributorTest::DistributorTest()
+LegacyDistributorTest::LegacyDistributorTest()
: Test(),
DistributorTestUtil(),
_bucketSpaces()
{
}
-DistributorTest::~DistributorTest() = default;
+LegacyDistributorTest::~LegacyDistributorTest() = default;
// TODO -> stripe test
-TEST_F(DistributorTest, operation_generation) {
+TEST_F(LegacyDistributorTest, operation_generation) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
document::BucketId bid;
@@ -263,7 +264,7 @@ TEST_F(DistributorTest, operation_generation) {
}
// TODO -> stripe test
-TEST_F(DistributorTest, operations_generated_and_started_without_duplicates) {
+TEST_F(LegacyDistributorTest, operations_generated_and_started_without_duplicates) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
for (uint32_t i = 0; i < 6; ++i) {
@@ -279,7 +280,7 @@ TEST_F(DistributorTest, operations_generated_and_started_without_duplicates) {
// TODO -> stripe test
// TODO also need to impl/test cross-stripe cluster state changes
-TEST_F(DistributorTest, recovery_mode_on_cluster_state_change) {
+TEST_F(LegacyDistributorTest, recovery_mode_on_cluster_state_change) {
setupDistributor(Redundancy(1), NodeCount(2),
"storage:1 .0.s:d distributor:1");
enableDistributorClusterState("storage:1 distributor:1");
@@ -301,7 +302,7 @@ TEST_F(DistributorTest, recovery_mode_on_cluster_state_change) {
// TODO -> stripe test
// TODO how to throttle across stripes?
-TEST_F(DistributorTest, operations_are_throttled) {
+TEST_F(LegacyDistributorTest, operations_are_throttled) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
getConfig().setMinPendingMaintenanceOps(1);
getConfig().setMaxPendingMaintenanceOps(1);
@@ -314,7 +315,7 @@ TEST_F(DistributorTest, operations_are_throttled) {
}
// TODO -> stripe test
-TEST_F(DistributorTest, handle_unknown_maintenance_reply) {
+TEST_F(LegacyDistributorTest, handle_unknown_maintenance_reply) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
{
@@ -334,7 +335,7 @@ TEST_F(DistributorTest, handle_unknown_maintenance_reply) {
}
// TODO -> generic, non distr/stripe test
-TEST_F(DistributorTest, contains_time_statement) {
+TEST_F(LegacyDistributorTest, contains_time_statement) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
EXPECT_FALSE(getConfig().containsTimeStatement(""));
@@ -346,7 +347,7 @@ TEST_F(DistributorTest, contains_time_statement) {
}
// TODO -> stripe test
-TEST_F(DistributorTest, update_bucket_database) {
+TEST_F(LegacyDistributorTest, update_bucket_database) {
enableDistributorClusterState("distributor:1 storage:3");
EXPECT_EQ("BucketId(0x4000000000000001) : "
@@ -417,7 +418,7 @@ public:
// TODO -> stripe test
// TODO need to impl/test cross-stripe status requests
-TEST_F(DistributorTest, tick_processes_status_requests) {
+TEST_F(LegacyDistributorTest, tick_processes_status_requests) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t");
@@ -447,7 +448,7 @@ TEST_F(DistributorTest, tick_processes_status_requests) {
// TODO -> distributor test since it owns metric hook
// TODO need to impl/test cross-stripe metrics aggregation
-TEST_F(DistributorTest, metric_update_hook_updates_pending_maintenance_metrics) {
+TEST_F(LegacyDistributorTest, metric_update_hook_updates_pending_maintenance_metrics) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
// To ensure we count all operations, not just those fitting within the
// pending window.
@@ -494,7 +495,7 @@ TEST_F(DistributorTest, metric_update_hook_updates_pending_maintenance_metrics)
}
// TODO -> stripe test
-TEST_F(DistributorTest, bucket_db_memory_usage_metrics_only_updated_at_fixed_time_intervals) {
+TEST_F(LegacyDistributorTest, bucket_db_memory_usage_metrics_only_updated_at_fixed_time_intervals) {
getClock().setAbsoluteTimeInSeconds(1000);
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
@@ -533,7 +534,7 @@ TEST_F(DistributorTest, bucket_db_memory_usage_metrics_only_updated_at_fixed_tim
// TODO -> stripe test
// TODO need to impl/test cross-stripe config propagation
-TEST_F(DistributorTest, priority_config_is_propagated_to_distributor_configuration) {
+TEST_F(LegacyDistributorTest, priority_config_is_propagated_to_distributor_configuration) {
using namespace vespa::config::content::core;
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
@@ -570,7 +571,7 @@ TEST_F(DistributorTest, priority_config_is_propagated_to_distributor_configurati
}
// TODO -> stripe test
-TEST_F(DistributorTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) {
+TEST_F(LegacyDistributorTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) {
setupDistributor(Redundancy(1), NodeCount(10), "storage:2 distributor:2");
lib::ClusterState newState("storage:10 distributor:10");
auto stateCmd = std::make_shared<api::SetSystemStateCommand>(newState);
@@ -592,7 +593,7 @@ TEST_F(DistributorTest, no_db_resurrection_for_bucket_not_owned_in_pending_state
}
// TODO -> stripe test
-TEST_F(DistributorTest, added_db_buckets_without_gc_timestamp_implicitly_get_current_time) {
+TEST_F(LegacyDistributorTest, added_db_buckets_without_gc_timestamp_implicitly_get_current_time) {
setupDistributor(Redundancy(1), NodeCount(10), "storage:2 distributor:2");
getClock().setAbsoluteTimeInSeconds(101234);
document::BucketId bucket(16, 7654);
@@ -606,7 +607,7 @@ TEST_F(DistributorTest, added_db_buckets_without_gc_timestamp_implicitly_get_cur
}
// TODO -> stripe test
-TEST_F(DistributorTest, merge_stats_are_accumulated_during_database_iteration) {
+TEST_F(LegacyDistributorTest, merge_stats_are_accumulated_during_database_iteration) {
setupDistributor(Redundancy(2), NodeCount(3), "storage:3 distributor:1");
// Copies out of sync. Not possible for distributor to _reliably_ tell
// which direction(s) data will flow, so for simplicity assume that we
@@ -657,9 +658,9 @@ TEST_F(DistributorTest, merge_stats_are_accumulated_during_database_iteration) {
}
void
-DistributorTest::assertBucketSpaceStats(size_t expBucketPending, size_t expBucketTotal, uint16_t node,
- const vespalib::string& bucketSpace,
- const BucketSpacesStatsProvider::PerNodeBucketSpacesStats& stats)
+LegacyDistributorTest::assertBucketSpaceStats(size_t expBucketPending, size_t expBucketTotal, uint16_t node,
+ const vespalib::string& bucketSpace,
+ const BucketSpacesStatsProvider::PerNodeBucketSpacesStats& stats)
{
auto nodeItr = stats.find(node);
ASSERT_TRUE(nodeItr != stats.end());
@@ -678,7 +679,7 @@ DistributorTest::assertBucketSpaceStats(size_t expBucketPending, size_t expBucke
* operations for the bucket.
*/
// TODO -> stripe test
-TEST_F(DistributorTest, stats_generated_for_preempted_operations) {
+TEST_F(LegacyDistributorTest, stats_generated_for_preempted_operations) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
// For this test it suffices to have a single bucket with multiple aspects
// wrong about it. In this case, let a bucket be both out of sync _and_
@@ -703,7 +704,7 @@ TEST_F(DistributorTest, stats_generated_for_preempted_operations) {
}
// TODO -> distributor test
-TEST_F(DistributorTest, host_info_reporter_config_is_propagated_to_reporter) {
+TEST_F(LegacyDistributorTest, host_info_reporter_config_is_propagated_to_reporter) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
// Default is enabled=true.
@@ -717,13 +718,13 @@ TEST_F(DistributorTest, host_info_reporter_config_is_propagated_to_reporter) {
}
// TODO -> stripe test (though config is a bit of a special case...)
-TEST_F(DistributorTest, replica_counting_mode_is_configured_to_trusted_by_default) {
+TEST_F(LegacyDistributorTest, replica_counting_mode_is_configured_to_trusted_by_default) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
EXPECT_EQ(ConfigBuilder::MinimumReplicaCountingMode::TRUSTED, currentReplicaCountingMode());
}
// TODO -> stripe test
-TEST_F(DistributorTest, replica_counting_mode_config_is_propagated_to_metric_updater) {
+TEST_F(LegacyDistributorTest, replica_counting_mode_config_is_propagated_to_metric_updater) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
ConfigBuilder builder;
builder.minimumReplicaCountingMode = ConfigBuilder::MinimumReplicaCountingMode::ANY;
@@ -732,7 +733,7 @@ TEST_F(DistributorTest, replica_counting_mode_config_is_propagated_to_metric_upd
}
// TODO -> stripe test
-TEST_F(DistributorTest, max_consecutively_inhibited_maintenance_ticks_config_is_propagated_to_internal_config) {
+TEST_F(LegacyDistributorTest, max_consecutively_inhibited_maintenance_ticks_config_is_propagated_to_internal_config) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
ConfigBuilder builder;
builder.maxConsecutivelyInhibitedMaintenanceTicks = 123;
@@ -741,13 +742,13 @@ TEST_F(DistributorTest, max_consecutively_inhibited_maintenance_ticks_config_is_
}
// TODO -> stripe test
-TEST_F(DistributorTest, bucket_activation_is_enabled_by_default) {
+TEST_F(LegacyDistributorTest, bucket_activation_is_enabled_by_default) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
EXPECT_FALSE(getConfig().isBucketActivationDisabled());
}
// TODO -> stripe test
-TEST_F(DistributorTest, bucket_activation_config_is_propagated_to_distributor_configuration) {
+TEST_F(LegacyDistributorTest, bucket_activation_config_is_propagated_to_distributor_configuration) {
using namespace vespa::config::content::core;
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
@@ -760,7 +761,7 @@ TEST_F(DistributorTest, bucket_activation_config_is_propagated_to_distributor_co
}
void
-DistributorTest::configureMaxClusterClockSkew(int seconds) {
+LegacyDistributorTest::configureMaxClusterClockSkew(int seconds) {
using namespace vespa::config::content::core;
ConfigBuilder builder;
@@ -770,7 +771,7 @@ DistributorTest::configureMaxClusterClockSkew(int seconds) {
}
// TODO -> stripe test
-TEST_F(DistributorTest, max_clock_skew_config_is_propagated_to_distributor_config) {
+TEST_F(LegacyDistributorTest, max_clock_skew_config_is_propagated_to_distributor_config) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
configureMaxClusterClockSkew(5);
@@ -795,13 +796,7 @@ auto make_dummy_get_command_for_bucket_1() {
}
-void DistributorTest::sendDownClusterStateCommand() {
- lib::ClusterState newState("bits:1 storage:1 distributor:1");
- auto stateCmd = std::make_shared<api::SetSystemStateCommand>(newState);
- _distributor->handleMessage(stateCmd);
-}
-
-void DistributorTest::replyToSingleRequestBucketInfoCommandWith1Bucket() {
+void LegacyDistributorTest::replyToSingleRequestBucketInfoCommandWith1Bucket() {
ASSERT_EQ(_bucketSpaces.size(), _sender.commands().size());
for (uint32_t i = 0; i < _sender.commands().size(); ++i) {
ASSERT_EQ(api::MessageType::REQUESTBUCKETINFO, _sender.command(i)->getType());
@@ -821,11 +816,11 @@ void DistributorTest::replyToSingleRequestBucketInfoCommandWith1Bucket() {
_sender.commands().clear();
}
-void DistributorTest::sendDownDummyRemoveCommand() {
+void LegacyDistributorTest::sendDownDummyRemoveCommand() {
_distributor->handleMessage(makeDummyRemoveCommand());
}
-void DistributorTest::assertSingleBouncedRemoveReplyPresent() {
+void LegacyDistributorTest::assertSingleBouncedRemoveReplyPresent() {
ASSERT_EQ(1, _sender.replies().size()); // Rejected remove
ASSERT_EQ(api::MessageType::REMOVE_REPLY, _sender.reply(0)->getType());
auto& reply(static_cast<api::RemoveReply&>(*_sender.reply(0)));
@@ -833,7 +828,7 @@ void DistributorTest::assertSingleBouncedRemoveReplyPresent() {
_sender.replies().clear();
}
-void DistributorTest::assertNoMessageBounced() {
+void LegacyDistributorTest::assertNoMessageBounced() {
ASSERT_EQ(0, _sender.replies().size());
}
@@ -841,13 +836,13 @@ void DistributorTest::assertNoMessageBounced() {
// reply once we have the "highest timestamp across all owned buckets" feature
// in place.
// TODO where does this truly belong?
-TEST_F(DistributorTest, configured_safe_time_point_rejection_works_end_to_end) {
+TEST_F(LegacyDistributorTest, configured_safe_time_point_rejection_works_end_to_end) {
setupDistributor(Redundancy(2), NodeCount(2),
"bits:1 storage:1 distributor:2");
getClock().setAbsoluteTimeInSeconds(1000);
configureMaxClusterClockSkew(10);
- sendDownClusterStateCommand();
+ receive_set_system_state_command("bits:1 storage:1 distributor:1");
ASSERT_NO_FATAL_FAILURE(replyToSingleRequestBucketInfoCommandWith1Bucket());
// SetSystemStateCommand sent down chain at this point.
sendDownDummyRemoveCommand();
@@ -861,7 +856,7 @@ TEST_F(DistributorTest, configured_safe_time_point_rejection_works_end_to_end) {
ASSERT_NO_FATAL_FAILURE(assertNoMessageBounced());
}
-void DistributorTest::configure_mutation_sequencing(bool enabled) {
+void LegacyDistributorTest::configure_mutation_sequencing(bool enabled) {
using namespace vespa::config::content::core;
ConfigBuilder builder;
@@ -871,7 +866,7 @@ void DistributorTest::configure_mutation_sequencing(bool enabled) {
}
// TODO -> stripe test
-TEST_F(DistributorTest, sequencing_config_is_propagated_to_distributor_config) {
+TEST_F(LegacyDistributorTest, sequencing_config_is_propagated_to_distributor_config) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
// Should be enabled by default
@@ -887,7 +882,7 @@ TEST_F(DistributorTest, sequencing_config_is_propagated_to_distributor_config) {
}
void
-DistributorTest::configure_merge_busy_inhibit_duration(int seconds) {
+LegacyDistributorTest::configure_merge_busy_inhibit_duration(int seconds) {
using namespace vespa::config::content::core;
ConfigBuilder builder;
@@ -897,7 +892,7 @@ DistributorTest::configure_merge_busy_inhibit_duration(int seconds) {
}
// TODO -> stripe test
-TEST_F(DistributorTest, merge_busy_inhibit_duration_config_is_propagated_to_distributor_config) {
+TEST_F(LegacyDistributorTest, merge_busy_inhibit_duration_config_is_propagated_to_distributor_config) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:2 distributor:1");
configure_merge_busy_inhibit_duration(7);
@@ -905,7 +900,7 @@ TEST_F(DistributorTest, merge_busy_inhibit_duration_config_is_propagated_to_dist
}
// TODO -> stripe test
-TEST_F(DistributorTest, merge_busy_inhibit_duration_is_propagated_to_pending_message_tracker) {
+TEST_F(LegacyDistributorTest, merge_busy_inhibit_duration_is_propagated_to_pending_message_tracker) {
setupDistributor(Redundancy(2), NodeCount(2), "storage:1 distributor:1");
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t");
@@ -931,7 +926,7 @@ TEST_F(DistributorTest, merge_busy_inhibit_duration_is_propagated_to_pending_mes
}
// TODO -> stripe test
-TEST_F(DistributorTest, external_client_requests_are_handled_individually_in_priority_order) {
+TEST_F(LegacyDistributorTest, external_client_requests_are_handled_individually_in_priority_order) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a");
@@ -960,7 +955,7 @@ TEST_F(DistributorTest, external_client_requests_are_handled_individually_in_pri
}
// TODO -> stripe test
-TEST_F(DistributorTest, internal_messages_are_started_in_fifo_order_batch) {
+TEST_F(LegacyDistributorTest, internal_messages_are_started_in_fifo_order_batch) {
// To test internal request ordering, we use NotifyBucketChangeCommand
// for the reason that it explicitly updates the bucket database for
// each individual invocation.
@@ -990,7 +985,7 @@ TEST_F(DistributorTest, internal_messages_are_started_in_fifo_order_batch) {
// TODO -> stripe test
// TODO also test that closing distributor closes stripes
-TEST_F(DistributorTest, closing_aborts_priority_queued_client_requests) {
+TEST_F(LegacyDistributorTest, closing_aborts_priority_queued_client_requests) {
setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
document::BucketId bucket(16, 1);
addNodesToBucketDB(bucket, "0=1/1/1/t");
@@ -1031,7 +1026,7 @@ void assert_invalid_stats_for_all_spaces(
// TODO -> stripe test
// TODO must impl/test cross-stripe bucket space stats
// TODO cross-stripe recovery mode handling how?
-TEST_F(DistributorTest, entering_recovery_mode_resets_bucket_space_stats) {
+TEST_F(LegacyDistributorTest, entering_recovery_mode_resets_bucket_space_stats) {
// Set up a cluster state + DB contents which implies merge maintenance ops
setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a");
@@ -1053,7 +1048,7 @@ TEST_F(DistributorTest, entering_recovery_mode_resets_bucket_space_stats) {
}
// TODO figure out interaction between stripes and distributors on this one
-TEST_F(DistributorTest, leaving_recovery_mode_immediately_sends_getnodestate_replies) {
+TEST_F(LegacyDistributorTest, leaving_recovery_mode_immediately_sends_getnodestate_replies) {
setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
// Should not send explicit replies during init stage
ASSERT_EQ(0, explicit_node_state_reply_send_invocations());
@@ -1074,7 +1069,7 @@ TEST_F(DistributorTest, leaving_recovery_mode_immediately_sends_getnodestate_rep
EXPECT_EQ(1, explicit_node_state_reply_send_invocations());
}
-void DistributorTest::do_test_pending_merge_getnodestate_reply_edge(BucketSpace space) {
+void LegacyDistributorTest::do_test_pending_merge_getnodestate_reply_edge(BucketSpace space) {
setupDistributor(Redundancy(2), NodeCount(2), "version:1 distributor:1 storage:2");
EXPECT_TRUE(distributor_is_in_recovery_mode());
// 2 buckets with missing replicas triggering merge pending stats
@@ -1110,15 +1105,15 @@ void DistributorTest::do_test_pending_merge_getnodestate_reply_edge(BucketSpace
EXPECT_EQ(2, explicit_node_state_reply_send_invocations());
}
-TEST_F(DistributorTest, pending_to_no_pending_default_merges_edge_immediately_sends_getnodestate_replies) {
+TEST_F(LegacyDistributorTest, pending_to_no_pending_default_merges_edge_immediately_sends_getnodestate_replies) {
do_test_pending_merge_getnodestate_reply_edge(FixedBucketSpaces::default_space());
}
-TEST_F(DistributorTest, pending_to_no_pending_global_merges_edge_immediately_sends_getnodestate_replies) {
+TEST_F(LegacyDistributorTest, pending_to_no_pending_global_merges_edge_immediately_sends_getnodestate_replies) {
do_test_pending_merge_getnodestate_reply_edge(FixedBucketSpaces::global_space());
}
-TEST_F(DistributorTest, stale_reads_config_is_propagated_to_external_operation_handler) {
+TEST_F(LegacyDistributorTest, stale_reads_config_is_propagated_to_external_operation_handler) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1129,7 +1124,7 @@ TEST_F(DistributorTest, stale_reads_config_is_propagated_to_external_operation_h
EXPECT_FALSE(getExternalOperationHandler().concurrent_gets_enabled());
}
-TEST_F(DistributorTest, fast_path_on_consistent_gets_config_is_propagated_to_internal_config) {
+TEST_F(LegacyDistributorTest, fast_path_on_consistent_gets_config_is_propagated_to_internal_config) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1140,7 +1135,7 @@ TEST_F(DistributorTest, fast_path_on_consistent_gets_config_is_propagated_to_int
EXPECT_FALSE(getConfig().update_fast_path_restart_enabled());
}
-TEST_F(DistributorTest, merge_disabling_config_is_propagated_to_internal_config) {
+TEST_F(LegacyDistributorTest, merge_disabling_config_is_propagated_to_internal_config) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1151,7 +1146,7 @@ TEST_F(DistributorTest, merge_disabling_config_is_propagated_to_internal_config)
EXPECT_FALSE(getConfig().merge_operations_disabled());
}
-TEST_F(DistributorTest, metadata_update_phase_config_is_propagated_to_internal_config) {
+TEST_F(LegacyDistributorTest, metadata_update_phase_config_is_propagated_to_internal_config) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1162,7 +1157,7 @@ TEST_F(DistributorTest, metadata_update_phase_config_is_propagated_to_internal_c
EXPECT_FALSE(getConfig().enable_metadata_only_fetch_phase_for_inconsistent_updates());
}
-TEST_F(DistributorTest, weak_internal_read_consistency_config_is_propagated_to_internal_configs) {
+TEST_F(LegacyDistributorTest, weak_internal_read_consistency_config_is_propagated_to_internal_configs) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1175,7 +1170,7 @@ TEST_F(DistributorTest, weak_internal_read_consistency_config_is_propagated_to_i
EXPECT_FALSE(getExternalOperationHandler().use_weak_internal_read_consistency_for_gets());
}
-void DistributorTest::set_up_and_start_get_op_with_stale_reads_enabled(bool enabled) {
+void LegacyDistributorTest::set_up_and_start_get_op_with_stale_reads_enabled(bool enabled) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
configure_stale_reads_enabled(enabled);
@@ -1185,7 +1180,7 @@ void DistributorTest::set_up_and_start_get_op_with_stale_reads_enabled(bool enab
_distributor->onDown(make_dummy_get_command_for_bucket_1());
}
-TEST_F(DistributorTest, gets_are_started_outside_main_distributor_logic_if_stale_reads_enabled) {
+TEST_F(LegacyDistributorTest, gets_are_started_outside_main_distributor_logic_if_stale_reads_enabled) {
set_up_and_start_get_op_with_stale_reads_enabled(true);
ASSERT_THAT(_sender.commands(), SizeIs(1));
EXPECT_THAT(_sender.replies(), SizeIs(0));
@@ -1197,7 +1192,7 @@ TEST_F(DistributorTest, gets_are_started_outside_main_distributor_logic_if_stale
EXPECT_THAT(_sender.replies(), SizeIs(1));
}
-TEST_F(DistributorTest, gets_are_not_started_outside_main_distributor_logic_if_stale_reads_disabled) {
+TEST_F(LegacyDistributorTest, gets_are_not_started_outside_main_distributor_logic_if_stale_reads_disabled) {
set_up_and_start_get_op_with_stale_reads_enabled(false);
// Get has been placed into distributor queue, so no external messages are produced.
EXPECT_THAT(_sender.commands(), SizeIs(0));
@@ -1207,21 +1202,21 @@ TEST_F(DistributorTest, gets_are_not_started_outside_main_distributor_logic_if_s
// There's no need or desire to track "lockfree" Gets in the main pending message tracker,
// as we only have to track mutations to inhibit maintenance ops safely. Furthermore,
// the message tracker is a multi-index and therefore has some runtime cost.
-TEST_F(DistributorTest, gets_started_outside_main_thread_are_not_tracked_by_main_pending_message_tracker) {
+TEST_F(LegacyDistributorTest, gets_started_outside_main_thread_are_not_tracked_by_main_pending_message_tracker) {
set_up_and_start_get_op_with_stale_reads_enabled(true);
Bucket bucket(FixedBucketSpaces::default_space(), BucketId(16, 1));
EXPECT_FALSE(pending_message_tracker().hasPendingMessage(
0, bucket, api::MessageType::GET_ID));
}
-TEST_F(DistributorTest, closing_aborts_gets_started_outside_main_distributor_thread) {
+TEST_F(LegacyDistributorTest, closing_aborts_gets_started_outside_main_distributor_thread) {
set_up_and_start_get_op_with_stale_reads_enabled(true);
_distributor->close();
ASSERT_EQ(1, _sender.replies().size());
EXPECT_EQ(api::ReturnCode::ABORTED, _sender.reply(0)->getResult().getResult());
}
-TEST_F(DistributorTest, prioritize_global_bucket_merges_config_is_propagated_to_internal_config) {
+TEST_F(LegacyDistributorTest, prioritize_global_bucket_merges_config_is_propagated_to_internal_config) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1232,7 +1227,7 @@ TEST_F(DistributorTest, prioritize_global_bucket_merges_config_is_propagated_to_
EXPECT_FALSE(getConfig().prioritize_global_bucket_merges());
}
-TEST_F(DistributorTest, max_activation_inhibited_out_of_sync_groups_config_is_propagated_to_internal_config) {
+TEST_F(LegacyDistributorTest, max_activation_inhibited_out_of_sync_groups_config_is_propagated_to_internal_config) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1243,7 +1238,7 @@ TEST_F(DistributorTest, max_activation_inhibited_out_of_sync_groups_config_is_pr
EXPECT_EQ(getConfig().max_activation_inhibited_out_of_sync_groups(), 0);
}
-TEST_F(DistributorTest, wanted_split_bit_count_is_lower_bounded) {
+TEST_F(LegacyDistributorTest, wanted_split_bit_count_is_lower_bounded) {
createLinks();
setupDistributor(Redundancy(1), NodeCount(1), "distributor:1 storage:1");
@@ -1254,7 +1249,7 @@ TEST_F(DistributorTest, wanted_split_bit_count_is_lower_bounded) {
EXPECT_EQ(getConfig().getMinimalBucketSplit(), 8);
}
-TEST_F(DistributorTest, host_info_sent_immediately_once_all_stripes_first_reported) {
+TEST_F(LegacyDistributorTest, host_info_sent_immediately_once_all_stripes_first_reported) {
set_num_distributor_stripes(4);
createLinks();
getClock().setAbsoluteTimeInSeconds(1000);
@@ -1283,7 +1278,7 @@ TEST_F(DistributorTest, host_info_sent_immediately_once_all_stripes_first_report
}
// TODO STRIPE make delay configurable instead of hardcoded
-TEST_F(DistributorTest, non_bootstrap_host_info_send_request_delays_sending) {
+TEST_F(LegacyDistributorTest, non_bootstrap_host_info_send_request_delays_sending) {
set_num_distributor_stripes(4);
createLinks();
getClock().setAbsoluteTimeInSeconds(1000);
diff --git a/storage/src/tests/distributor/maintenancemocks.h b/storage/src/tests/distributor/maintenancemocks.h
index fff798d4413..1245c9bb15d 100644
--- a/storage/src/tests/distributor/maintenancemocks.h
+++ b/storage/src/tests/distributor/maintenancemocks.h
@@ -51,7 +51,7 @@ public:
}
void onStart(DistributorStripeMessageSender&) override {}
void onReceive(DistributorStripeMessageSender&, const std::shared_ptr<api::StorageReply>&) override {}
- bool isBlocked(const PendingMessageTracker&, const OperationSequencer&) const override {
+ bool isBlocked(const DistributorStripeOperationContext&, const OperationSequencer&) const override {
return _shouldBlock;
}
void setShouldBlock(bool shouldBlock) {
diff --git a/storage/src/tests/distributor/mergeoperationtest.cpp b/storage/src/tests/distributor/mergeoperationtest.cpp
index 1026ea2855e..52a8bfc41b6 100644
--- a/storage/src/tests/distributor/mergeoperationtest.cpp
+++ b/storage/src/tests/distributor/mergeoperationtest.cpp
@@ -18,13 +18,11 @@ using namespace ::testing;
namespace storage::distributor {
struct MergeOperationTest : Test, DistributorTestUtil {
- std::unique_ptr<PendingMessageTracker> _pendingTracker;
OperationSequencer _operation_sequencer;
void SetUp() override {
createLinks();
- _pendingTracker = std::make_unique<PendingMessageTracker>(getComponentRegister());
- _sender.setPendingMessageTracker(*_pendingTracker);
+ _sender.setPendingMessageTracker(pending_message_tracker());
_sender.set_operation_sequencer(_operation_sequencer);
}
@@ -256,7 +254,7 @@ TEST_F(MergeOperationTest, do_not_remove_copies_with_pending_messages) {
makeDocumentBucket(bucket), api::SetBucketStateCommand::ACTIVE);
vespalib::string storage("storage");
msg->setAddress(api::StorageMessageAddress::create(&storage, lib::NodeType::STORAGE, 1));
- _pendingTracker->insert(msg);
+ pending_message_tracker().insert(msg);
sendReply(op);
// Should not be a remove here!
@@ -400,19 +398,19 @@ TEST_F(MergeOperationTest, merge_operation_is_blocked_by_any_busy_target_node) {
op.setIdealStateManager(&getIdealStateManager());
// Should not block on nodes _not_ included in operation node set
- _pendingTracker->getNodeInfo().setBusy(3, std::chrono::seconds(10));
- EXPECT_FALSE(op.isBlocked(*_pendingTracker, _operation_sequencer));
+ pending_message_tracker().getNodeInfo().setBusy(3, std::chrono::seconds(10));
+ EXPECT_FALSE(op.isBlocked(operation_context(), _operation_sequencer));
// Node 1 is included in operation node set and should cause a block
- _pendingTracker->getNodeInfo().setBusy(0, std::chrono::seconds(10));
- EXPECT_TRUE(op.isBlocked(*_pendingTracker, _operation_sequencer));
+ pending_message_tracker().getNodeInfo().setBusy(0, std::chrono::seconds(10));
+ EXPECT_TRUE(op.isBlocked(operation_context(), _operation_sequencer));
getClock().addSecondsToTime(11);
- EXPECT_FALSE(op.isBlocked(*_pendingTracker, _operation_sequencer)); // No longer busy
+ EXPECT_FALSE(op.isBlocked(operation_context(), _operation_sequencer)); // No longer busy
// Should block on other operation nodes than the first listed as well
- _pendingTracker->getNodeInfo().setBusy(1, std::chrono::seconds(10));
- EXPECT_TRUE(op.isBlocked(*_pendingTracker, _operation_sequencer));
+ pending_message_tracker().getNodeInfo().setBusy(1, std::chrono::seconds(10));
+ EXPECT_TRUE(op.isBlocked(operation_context(), _operation_sequencer));
}
@@ -426,8 +424,8 @@ TEST_F(MergeOperationTest, global_bucket_merges_are_not_blocked_by_busy_nodes) {
op.setIdealStateManager(&getIdealStateManager());
// Node 1 is included in operation node set but should not cause a block of global bucket merge
- _pendingTracker->getNodeInfo().setBusy(0, std::chrono::seconds(10));
- EXPECT_FALSE(op.isBlocked(*_pendingTracker, _operation_sequencer));
+ pending_message_tracker().getNodeInfo().setBusy(0, std::chrono::seconds(10));
+ EXPECT_FALSE(op.isBlocked(operation_context(), _operation_sequencer));
}
TEST_F(MergeOperationTest, merge_operation_is_blocked_by_locked_bucket) {
@@ -437,10 +435,10 @@ TEST_F(MergeOperationTest, merge_operation_is_blocked_by_locked_bucket) {
MergeOperation op(BucketAndNodes(makeDocumentBucket(document::BucketId(16, 1)), toVector<uint16_t>(0, 1, 2)));
op.setIdealStateManager(&getIdealStateManager());
- EXPECT_FALSE(op.isBlocked(*_pendingTracker, _operation_sequencer));
+ EXPECT_FALSE(op.isBlocked(operation_context(), _operation_sequencer));
auto token = _operation_sequencer.try_acquire(makeDocumentBucket(document::BucketId(16, 1)), "foo");
EXPECT_TRUE(token.valid());
- EXPECT_TRUE(op.isBlocked(*_pendingTracker, _operation_sequencer));
+ EXPECT_TRUE(op.isBlocked(operation_context(), _operation_sequencer));
}
TEST_F(MergeOperationTest, missing_replica_is_included_in_limited_node_list) {
diff --git a/storage/src/tests/distributor/simplemaintenancescannertest.cpp b/storage/src/tests/distributor/simplemaintenancescannertest.cpp
index 58dc2430041..1bf3809b135 100644
--- a/storage/src/tests/distributor/simplemaintenancescannertest.cpp
+++ b/storage/src/tests/distributor/simplemaintenancescannertest.cpp
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <tests/distributor/maintenancemocks.h>
+#include <vespa/document/bucket/fixed_bucket_spaces.h>
#include <vespa/document/test/make_bucket_space.h>
#include <vespa/storage/distributor/distributor_bucket_space.h>
#include <vespa/storage/distributor/distributor_bucket_space_repo.h>
@@ -209,4 +210,88 @@ TEST_F(SimpleMaintenanceScannerTest, per_node_maintenance_stats_are_tracked) {
}
}
+TEST_F(SimpleMaintenanceScannerTest, merge_node_maintenance_stats) {
+
+ NodeMaintenanceStats stats_a;
+ stats_a.movingOut = 1;
+ stats_a.syncing = 2;
+ stats_a.copyingIn = 3;
+ stats_a.copyingOut = 4;
+ stats_a.total = 5;
+
+ NodeMaintenanceStats stats_b;
+ stats_b.movingOut = 10;
+ stats_b.syncing = 20;
+ stats_b.copyingIn = 30;
+ stats_b.copyingOut = 40;
+ stats_b.total = 50;
+
+ NodeMaintenanceStats result;
+ result.merge(stats_a);
+ result.merge(stats_b);
+
+ NodeMaintenanceStats exp;
+ exp.movingOut = 11;
+ exp.syncing = 22;
+ exp.copyingIn = 33;
+ exp.copyingOut = 44;
+ exp.total = 55;
+ EXPECT_EQ(exp, result);
+}
+
+TEST_F(SimpleMaintenanceScannerTest, merge_pending_maintenance_stats) {
+ auto default_space = document::FixedBucketSpaces::default_space();
+ auto global_space = document::FixedBucketSpaces::global_space();
+
+ PendingStats stats_a;
+ stats_a.global.pending[MaintenanceOperation::DELETE_BUCKET] = 1;
+ stats_a.global.pending[MaintenanceOperation::MERGE_BUCKET] = 2;
+ stats_a.global.pending[MaintenanceOperation::SPLIT_BUCKET] = 3;
+ stats_a.global.pending[MaintenanceOperation::JOIN_BUCKET] = 4;
+ stats_a.global.pending[MaintenanceOperation::SET_BUCKET_STATE] = 5;
+ stats_a.global.pending[MaintenanceOperation::GARBAGE_COLLECTION] = 6;
+ stats_a.perNodeStats.incMovingOut(3, default_space);
+ stats_a.perNodeStats.incSyncing(3, global_space);
+ stats_a.perNodeStats.incCopyingIn(5, default_space);
+ stats_a.perNodeStats.incCopyingOut(5, global_space);
+ stats_a.perNodeStats.incTotal(5, default_space);
+
+ PendingStats stats_b;
+ stats_b.global.pending[MaintenanceOperation::DELETE_BUCKET] = 10;
+ stats_b.global.pending[MaintenanceOperation::MERGE_BUCKET] = 20;
+ stats_b.global.pending[MaintenanceOperation::SPLIT_BUCKET] = 30;
+ stats_b.global.pending[MaintenanceOperation::JOIN_BUCKET] = 40;
+ stats_b.global.pending[MaintenanceOperation::SET_BUCKET_STATE] = 50;
+ stats_b.global.pending[MaintenanceOperation::GARBAGE_COLLECTION] = 60;
+ stats_b.perNodeStats.incMovingOut(7, default_space);
+ stats_b.perNodeStats.incSyncing(7, global_space);
+ stats_b.perNodeStats.incCopyingIn(5, default_space);
+ stats_b.perNodeStats.incCopyingOut(5, global_space);
+ stats_b.perNodeStats.incTotal(5, default_space);
+
+ PendingStats result;
+ result.merge(stats_a);
+ result.merge(stats_b);
+
+ PendingStats exp;
+ exp.global.pending[MaintenanceOperation::DELETE_BUCKET] = 11;
+ exp.global.pending[MaintenanceOperation::MERGE_BUCKET] = 22;
+ exp.global.pending[MaintenanceOperation::SPLIT_BUCKET] = 33;
+ exp.global.pending[MaintenanceOperation::JOIN_BUCKET] = 44;
+ exp.global.pending[MaintenanceOperation::SET_BUCKET_STATE] = 55;
+ exp.global.pending[MaintenanceOperation::GARBAGE_COLLECTION] = 66;
+ exp.perNodeStats.incMovingOut(3, default_space);
+ exp.perNodeStats.incSyncing(3, global_space);
+ exp.perNodeStats.incCopyingIn(5, default_space);
+ exp.perNodeStats.incCopyingIn(5, default_space);
+ exp.perNodeStats.incCopyingOut(5, global_space);
+ exp.perNodeStats.incCopyingOut(5, global_space);
+ exp.perNodeStats.incTotal(5, default_space);
+ exp.perNodeStats.incTotal(5, default_space);
+ exp.perNodeStats.incMovingOut(7, default_space);
+ exp.perNodeStats.incSyncing(7, global_space);
+ EXPECT_EQ(exp.global, result.global);
+ EXPECT_EQ(exp.perNodeStats, result.perNodeStats);
+}
+
}
diff --git a/storage/src/tests/distributor/splitbuckettest.cpp b/storage/src/tests/distributor/splitbuckettest.cpp
index 8c8da1bb197..ec58992ed3e 100644
--- a/storage/src/tests/distributor/splitbuckettest.cpp
+++ b/storage/src/tests/distributor/splitbuckettest.cpp
@@ -261,7 +261,6 @@ TEST_F(SplitOperationTest, operation_blocked_by_pending_join) {
framework::defaultimplementation::FakeClock clock;
compReg.setClock(clock);
clock.setAbsoluteTimeInSeconds(1);
- PendingMessageTracker tracker(compReg);
OperationSequencer op_seq;
enableDistributorClusterState("distributor:1 storage:2");
@@ -274,7 +273,7 @@ TEST_F(SplitOperationTest, operation_blocked_by_pending_join) {
joinCmd->getSourceBuckets() = joinSources;
joinCmd->setAddress(_Storage0Address);
- tracker.insert(joinCmd);
+ pending_message_tracker().insert(joinCmd);
insertBucketInfo(joinTarget, 0, 0xabc, 1000, 1234, true);
@@ -284,18 +283,18 @@ TEST_F(SplitOperationTest, operation_blocked_by_pending_join) {
splitCount,
splitByteSize);
- EXPECT_TRUE(op.isBlocked(tracker, op_seq));
+ EXPECT_TRUE(op.isBlocked(operation_context(), op_seq));
// Now, pretend there's a join for another node in the same bucket. This
// will happen when a join is partially completed.
- tracker.clearMessagesForNode(0);
- EXPECT_FALSE(op.isBlocked(tracker, op_seq));
+ pending_message_tracker().clearMessagesForNode(0);
+ EXPECT_FALSE(op.isBlocked(operation_context(), op_seq));
joinCmd->setAddress(api::StorageMessageAddress::create(dummy_cluster_context.cluster_name_ptr(),
lib::NodeType::STORAGE, 1));
- tracker.insert(joinCmd);
+ pending_message_tracker().insert(joinCmd);
- EXPECT_TRUE(op.isBlocked(tracker, op_seq));
+ EXPECT_TRUE(op.isBlocked(operation_context(), op_seq));
}
TEST_F(SplitOperationTest, split_is_blocked_by_locked_bucket) {
@@ -303,7 +302,6 @@ TEST_F(SplitOperationTest, split_is_blocked_by_locked_bucket) {
framework::defaultimplementation::FakeClock clock;
compReg.setClock(clock);
clock.setAbsoluteTimeInSeconds(1);
- PendingMessageTracker tracker(compReg);
OperationSequencer op_seq;
enableDistributorClusterState("distributor:1 storage:2");
@@ -314,10 +312,10 @@ TEST_F(SplitOperationTest, split_is_blocked_by_locked_bucket) {
SplitOperation op(dummy_cluster_context, BucketAndNodes(makeDocumentBucket(source_bucket), toVector<uint16_t>(0)),
maxSplitBits, splitCount, splitByteSize);
- EXPECT_FALSE(op.isBlocked(tracker, op_seq));
+ EXPECT_FALSE(op.isBlocked(operation_context(), op_seq));
auto token = op_seq.try_acquire(makeDocumentBucket(source_bucket), "foo");
EXPECT_TRUE(token.valid());
- EXPECT_TRUE(op.isBlocked(tracker, op_seq));
+ EXPECT_TRUE(op.isBlocked(operation_context(), op_seq));
}
} // storage::distributor
diff --git a/storage/src/tests/storageserver/mergethrottlertest.cpp b/storage/src/tests/storageserver/mergethrottlertest.cpp
index 12ed9ead1b6..dfeaee031ba 100644
--- a/storage/src/tests/storageserver/mergethrottlertest.cpp
+++ b/storage/src/tests/storageserver/mergethrottlertest.cpp
@@ -1220,6 +1220,7 @@ TEST_F(MergeThrottlerTest, unknown_merge_with_self_in_chain) {
TEST_F(MergeThrottlerTest, busy_returned_on_full_queue) {
size_t maxPending = _throttlers[0]->getThrottlePolicy().getMaxPendingCount();
size_t maxQueue = _throttlers[0]->getMaxQueueSize();
+ ASSERT_EQ(20, maxQueue);
ASSERT_LT(maxPending, 100);
for (std::size_t i = 0; i < maxPending + maxQueue; ++i) {
std::vector<MergeBucketCommand::Node> nodes;
@@ -1234,6 +1235,7 @@ TEST_F(MergeThrottlerTest, busy_returned_on_full_queue) {
// Wait till we have maxPending replies and maxQueue queued
_topLinks[0]->waitForMessages(maxPending, _messageWaitTime);
waitUntilMergeQueueIs(*_throttlers[0], maxQueue, _messageWaitTime);
+ EXPECT_EQ(maxQueue, _throttlers[0]->getMetrics().queueSize.getMaximum());
// Clear all forwarded merges
_topLinks[0]->getRepliesOnce();
diff --git a/storage/src/vespa/storage/config/distributorconfiguration.h b/storage/src/vespa/storage/config/distributorconfiguration.h
index 479298ff082..7aa10893b80 100644
--- a/storage/src/vespa/storage/config/distributorconfiguration.h
+++ b/storage/src/vespa/storage/config/distributorconfiguration.h
@@ -9,7 +9,7 @@
namespace storage {
-namespace distributor { struct DistributorTest; }
+namespace distributor { struct LegacyDistributorTest; }
class DistributorConfiguration {
public:
@@ -323,7 +323,7 @@ private:
DistrConfig::MinimumReplicaCountingMode _minimumReplicaCountingMode;
- friend struct distributor::DistributorTest;
+ friend struct distributor::LegacyDistributorTest;
void configureMaintenancePriorities(
const vespa::config::content::core::StorDistributormanagerConfig&);
};
diff --git a/storage/src/vespa/storage/distributor/CMakeLists.txt b/storage/src/vespa/storage/distributor/CMakeLists.txt
index 7b048e9f109..eba76c91af0 100644
--- a/storage/src/vespa/storage/distributor/CMakeLists.txt
+++ b/storage/src/vespa/storage/distributor/CMakeLists.txt
@@ -7,6 +7,7 @@ vespa_add_library(storage_distributor
bucket_space_distribution_configs.cpp
bucket_space_distribution_context.cpp
bucket_space_state_map.cpp
+ bucket_spaces_stats_provider.cpp
bucketdbupdater.cpp
bucketgctimecalculator.cpp
bucketlistmerger.cpp
@@ -22,13 +23,16 @@ vespa_add_library(storage_distributor
distributor_stripe_component.cpp
distributor_stripe_pool.cpp
distributor_stripe_thread.cpp
+ distributor_total_metrics.cpp
distributormessagesender.cpp
distributormetricsset.cpp
externaloperationhandler.cpp
ideal_service_layer_nodes_bundle.cpp
+ ideal_state_total_metrics.cpp
idealstatemanager.cpp
idealstatemetricsset.cpp
messagetracker.cpp
+ min_replica_provider.cpp
multi_threaded_stripe_access_guard.cpp
nodeinfo.cpp
operation_routing_snapshot.cpp
diff --git a/storage/src/vespa/storage/distributor/blockingoperationstarter.cpp b/storage/src/vespa/storage/distributor/blockingoperationstarter.cpp
index 25c38888098..e9b53e35b61 100644
--- a/storage/src/vespa/storage/distributor/blockingoperationstarter.cpp
+++ b/storage/src/vespa/storage/distributor/blockingoperationstarter.cpp
@@ -7,7 +7,7 @@ namespace storage::distributor {
bool
BlockingOperationStarter::start(const std::shared_ptr<Operation>& operation, Priority priority)
{
- if (operation->isBlocked(_messageTracker, _operation_sequencer)) {
+ if (operation->isBlocked(_operation_context, _operation_sequencer)) {
return true;
}
return _starterImpl.start(operation, priority);
diff --git a/storage/src/vespa/storage/distributor/blockingoperationstarter.h b/storage/src/vespa/storage/distributor/blockingoperationstarter.h
index e79ae6b4a79..180e617d08d 100644
--- a/storage/src/vespa/storage/distributor/blockingoperationstarter.h
+++ b/storage/src/vespa/storage/distributor/blockingoperationstarter.h
@@ -6,16 +6,16 @@
namespace storage::distributor {
-class PendingMessageTracker;
+class DistributorStripeOperationContext;
class OperationSequencer;
class BlockingOperationStarter : public OperationStarter
{
public:
- BlockingOperationStarter(PendingMessageTracker& messageTracker,
+ BlockingOperationStarter(DistributorStripeOperationContext& ctx,
OperationSequencer& operation_sequencer,
OperationStarter& starterImpl)
- : _messageTracker(messageTracker),
+ : _operation_context(ctx),
_operation_sequencer(operation_sequencer),
_starterImpl(starterImpl)
{}
@@ -24,9 +24,9 @@ public:
bool start(const std::shared_ptr<Operation>& operation, Priority priority) override;
private:
- PendingMessageTracker& _messageTracker;
- OperationSequencer& _operation_sequencer;
- OperationStarter& _starterImpl;
+ DistributorStripeOperationContext& _operation_context;
+ OperationSequencer& _operation_sequencer;
+ OperationStarter& _starterImpl;
};
}
diff --git a/storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.cpp b/storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.cpp
new file mode 100644
index 00000000000..2b12d437aaa
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.cpp
@@ -0,0 +1,40 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "bucket_spaces_stats_provider.h"
+
+namespace storage::distributor {
+
+std::ostream&
+operator<<(std::ostream& out, const BucketSpaceStats& stats)
+{
+ out << "{valid=" << stats.valid() << ", bucketsTotal=" << stats.bucketsTotal() << ", bucketsPending=" << stats.bucketsPending() << "}";
+ return out;
+}
+
+void
+merge_bucket_spaces_stats(BucketSpacesStatsProvider::BucketSpacesStats& dest,
+ const BucketSpacesStatsProvider::BucketSpacesStats& src)
+{
+ for (const auto& entry : src) {
+ const auto& bucket_space_name = entry.first;
+ auto itr = dest.find(bucket_space_name);
+ if (itr != dest.end()) {
+ itr->second.merge(entry.second);
+ } else {
+ // We need to explicitly handle this case to avoid creating an empty BucketSpaceStats that is not valid.
+ dest[bucket_space_name] = entry.second;
+ }
+ }
+}
+
+void
+merge_per_node_bucket_spaces_stats(BucketSpacesStatsProvider::PerNodeBucketSpacesStats& dest,
+ const BucketSpacesStatsProvider::PerNodeBucketSpacesStats& src)
+{
+ for (const auto& entry : src) {
+ auto node_index = entry.first;
+ merge_bucket_spaces_stats(dest[node_index], entry.second);
+ }
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.h b/storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.h
index 3d7b60f4471..c8ba04ed1ab 100644
--- a/storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.h
+++ b/storage/src/vespa/storage/distributor/bucket_spaces_stats_provider.h
@@ -3,6 +3,7 @@
#include <vespa/vespalib/stllike/string.h>
#include <map>
+#include <ostream>
#include <unordered_map>
namespace storage::distributor {
@@ -32,8 +33,22 @@ public:
bool valid() const noexcept { return _valid; }
size_t bucketsTotal() const noexcept { return _bucketsTotal; }
size_t bucketsPending() const noexcept { return _bucketsPending; }
+
+ bool operator==(const BucketSpaceStats& rhs) const {
+ return (_valid == rhs._valid) &&
+ (_bucketsTotal == rhs._bucketsTotal) &&
+ (_bucketsPending == rhs._bucketsPending);
+ }
+
+ void merge(const BucketSpaceStats& rhs) {
+ _valid = _valid && rhs._valid;
+ _bucketsTotal += rhs._bucketsTotal;
+ _bucketsPending += rhs._bucketsPending;
+ }
};
+std::ostream& operator<<(std::ostream& out, const BucketSpaceStats& stats);
+
/**
* Interface that provides snapshots of bucket spaces statistics per content node.
*/
@@ -48,4 +63,10 @@ public:
virtual PerNodeBucketSpacesStats getBucketSpacesStats() const = 0;
};
+void merge_bucket_spaces_stats(BucketSpacesStatsProvider::BucketSpacesStats& dest,
+ const BucketSpacesStatsProvider::BucketSpacesStats& src);
+
+void merge_per_node_bucket_spaces_stats(BucketSpacesStatsProvider::PerNodeBucketSpacesStats& dest,
+ const BucketSpacesStatsProvider::PerNodeBucketSpacesStats& src);
+
}
diff --git a/storage/src/vespa/storage/distributor/distributor.cpp b/storage/src/vespa/storage/distributor/distributor.cpp
index 368a25315fb..6f9cbf3b0f2 100644
--- a/storage/src/vespa/storage/distributor/distributor.cpp
+++ b/storage/src/vespa/storage/distributor/distributor.cpp
@@ -9,7 +9,7 @@
#include "distributor_stripe.h"
#include "distributor_stripe_pool.h"
#include "distributor_stripe_thread.h"
-#include "distributormetricsset.h"
+#include "distributor_total_metrics.h"
#include "idealstatemetricsset.h"
#include "multi_threaded_stripe_access_guard.h"
#include "operation_sequencer.h"
@@ -24,6 +24,7 @@
#include <vespa/storage/config/distributorconfiguration.h>
#include <vespa/storage/distributor/maintenance/simplebucketprioritydatabase.h>
#include <vespa/storageapi/message/persistence.h>
+#include <vespa/storageapi/message/visitor.h>
#include <vespa/storageframework/generic/status/xmlstatusreporter.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/util/memoryusage.h>
@@ -58,18 +59,29 @@ Distributor::Distributor(DistributorComponentRegister& compReg,
: StorageLink("distributor"),
framework::StatusReporter("distributor", "Distributor"),
_comp_reg(compReg),
+ _use_legacy_mode(num_distributor_stripes == 0),
_metrics(std::make_shared<DistributorMetricSet>()),
+ _total_metrics(_use_legacy_mode ? std::shared_ptr<DistributorTotalMetrics>() :
+ std::make_shared<DistributorTotalMetrics>(num_distributor_stripes)),
+ _ideal_state_metrics(_use_legacy_mode ? std::make_shared<IdealStateMetricSet>() : std::shared_ptr<IdealStateMetricSet>()),
+ _ideal_state_total_metrics(_use_legacy_mode ? std::shared_ptr<IdealStateTotalMetrics>() :
+ std::make_shared<IdealStateTotalMetrics>(num_distributor_stripes)),
_messageSender(messageSender),
- _use_legacy_mode(num_distributor_stripes == 0),
_n_stripe_bits(0),
- _stripe(std::make_unique<DistributorStripe>(compReg, *_metrics, node_identity, threadPool,
+ _stripe(std::make_unique<DistributorStripe>(compReg,
+ _use_legacy_mode ? *_metrics : _total_metrics->stripe(0),
+ _use_legacy_mode ? *_ideal_state_metrics : _ideal_state_total_metrics->stripe(0),
+ node_identity, threadPool,
doneInitHandler, *this, *this, _use_legacy_mode)),
_stripe_pool(stripe_pool),
_stripes(),
_stripe_accessor(),
+ _random_stripe_gen(),
+ _random_stripe_gen_mutex(),
_message_queue(),
_fetched_messages(),
_component(*this, compReg, "distributor"),
+ _ideal_state_component(compReg, "Ideal state manager"),
_total_config(_component.total_distributor_config_sp()),
_bucket_db_updater(),
_distributorStatusDelegate(compReg, *this, *this),
@@ -88,7 +100,9 @@ Distributor::Distributor(DistributorComponentRegister& compReg,
_next_distribution(),
_current_internal_config_generation(_component.internal_config_generation())
{
- _component.registerMetric(*_metrics);
+ _component.registerMetric(_use_legacy_mode ? *_metrics : *_total_metrics);
+ _ideal_state_component.registerMetric(_use_legacy_mode ? *_ideal_state_metrics :
+ *_ideal_state_total_metrics);
_component.registerMetricUpdateHook(_metricUpdateHook, framework::SecondTime(0));
if (!_use_legacy_mode) {
assert(num_distributor_stripes == adjusted_num_stripes(num_distributor_stripes));
@@ -102,7 +116,10 @@ Distributor::Distributor(DistributorComponentRegister& compReg,
*_stripe_accessor);
_stripes.emplace_back(std::move(_stripe));
for (size_t i = 1; i < num_distributor_stripes; ++i) {
- _stripes.emplace_back(std::make_unique<DistributorStripe>(compReg, *_metrics, node_identity, threadPool,
+ _stripes.emplace_back(std::make_unique<DistributorStripe>(compReg,
+ _total_metrics->stripe(i),
+ _ideal_state_total_metrics->stripe(i),
+ node_identity, threadPool,
doneInitHandler, *this, *this, _use_legacy_mode, i));
}
_stripe_scan_stats.resize(num_distributor_stripes);
@@ -121,16 +138,10 @@ Distributor::~Distributor()
closeNextLink();
}
-// TODO STRIPE remove
-DistributorStripe&
-Distributor::first_stripe() noexcept {
- return *_stripes[0];
-}
-
-// TODO STRIPE remove
-const DistributorStripe&
-Distributor::first_stripe() const noexcept {
- return *_stripes[0];
+DistributorMetricSet&
+Distributor::getMetrics()
+{
+ return _use_legacy_mode ? *_metrics : _total_metrics->bucket_db_updater_metrics();
}
// TODO STRIPE figure out how to handle inspection functions used by tests when legacy mode no longer exists.
@@ -319,6 +330,7 @@ namespace {
bool should_be_handled_by_top_level_bucket_db_updater(const api::StorageMessage& msg) noexcept {
switch (msg.getType().getId()) {
case api::MessageType::SETSYSTEMSTATE_ID:
+ case api::MessageType::GETNODESTATE_ID:
case api::MessageType::ACTIVATE_CLUSTER_STATE_VERSION_ID:
return true;
case api::MessageType::REQUESTBUCKETINFO_REPLY_ID:
@@ -341,15 +353,13 @@ get_bucket_id_for_striping(const api::StorageMessage& msg, const DistributorNode
case api::MessageType::REMOVE_ID:
return node_ctx.bucket_id_factory().getBucketId(dynamic_cast<const api::TestAndSetCommand&>(msg).getDocumentId());
case api::MessageType::REQUESTBUCKETINFO_REPLY_ID:
- {
- const auto& reply = dynamic_cast<const api::RequestBucketInfoReply&>(msg);
- if (!reply.getBucketInfo().empty()) {
- // Note: All bucket ids in this reply belong to the same distributor stripe, so we just use the first entry.
- return reply.getBucketInfo()[0]._bucketId;
- } else {
- return reply.getBucketId();
- }
- }
+ return dynamic_cast<const api::RequestBucketInfoReply&>(msg).super_bucket_id();
+ case api::MessageType::GET_ID:
+ return node_ctx.bucket_id_factory().getBucketId(dynamic_cast<const api::GetCommand&>(msg).getDocumentId());
+ case api::MessageType::VISITOR_CREATE_ID:
+ return dynamic_cast<const api::CreateVisitorCommand&>(msg).super_bucket_id();
+ case api::MessageType::VISITOR_CREATE_REPLY_ID:
+ return dynamic_cast<const api::CreateVisitorReply&>(msg).super_bucket_id();
default:
return msg.getBucketId();
}
@@ -357,16 +367,31 @@ get_bucket_id_for_striping(const api::StorageMessage& msg, const DistributorNode
return msg.getBucketId();
}
+}
+
uint32_t
-stripe_of_bucket_id(const document::BucketId& bucketd_id, uint8_t n_stripe_bits)
+Distributor::random_stripe_idx()
{
- if (!bucketd_id.isSet()) {
- // TODO STRIPE: Messages with a non-set bucket id should be handled by the top-level distributor instead.
- return 0;
- }
- return storage::stripe_of_bucket_key(bucketd_id.toKey(), n_stripe_bits);
+ std::lock_guard lock(_random_stripe_gen_mutex);
+ return _random_stripe_gen.nextUint32() % _stripes.size();
}
+uint32_t
+Distributor::stripe_of_bucket_id(const document::BucketId& bucket_id, const api::StorageMessage& msg)
+{
+ if (!bucket_id.isSet()) {
+ LOG(error, "Message (%s) has a bucket id (%s) that is not set. Cannot route to stripe",
+ msg.toString(true).c_str(), bucket_id.toString().c_str());
+ }
+ assert(bucket_id.isSet());
+ if (bucket_id.getUsedBits() < spi::BucketLimits::MinUsedBits) {
+ if (msg.getType().getId() == api::MessageType::VISITOR_CREATE_ID) {
+ // This message will eventually be bounced with api::ReturnCode::WRONG_DISTRIBUTION,
+ // so we can just route it to a random distributor stripe.
+ return random_stripe_idx();
+ }
+ }
+ return storage::stripe_of_bucket_key(bucket_id.toKey(), _n_stripe_bits);
}
bool
@@ -382,7 +407,7 @@ Distributor::onDown(const std::shared_ptr<api::StorageMessage>& msg)
return true;
}
auto bucket_id = get_bucket_id_for_striping(*msg, _component);
- uint32_t stripe_idx = stripe_of_bucket_id(bucket_id, _n_stripe_bits);
+ uint32_t stripe_idx = stripe_of_bucket_id(bucket_id, *msg);
MBUS_TRACE(msg->getTrace(), 9,
vespalib::make_string("Distributor::onDown(): Dispatch message to stripe %u", stripe_idx));
bool handled = _stripes[stripe_idx]->handle_or_enqueue_message(msg);
@@ -499,44 +524,55 @@ Distributor::propagateDefaultDistribution(
std::unordered_map<uint16_t, uint32_t>
Distributor::getMinReplica() const
{
- // TODO STRIPE merged snapshot from all stripes
if (_use_legacy_mode) {
return _stripe->getMinReplica();
} else {
- return first_stripe().getMinReplica();
+ std::unordered_map<uint16_t, uint32_t> result;
+ for (const auto& stripe : _stripes) {
+ merge_min_replica_stats(result, stripe->getMinReplica());
+ }
+ return result;
}
}
BucketSpacesStatsProvider::PerNodeBucketSpacesStats
Distributor::getBucketSpacesStats() const
{
- // TODO STRIPE merged snapshot from all stripes
if (_use_legacy_mode) {
return _stripe->getBucketSpacesStats();
} else {
- return first_stripe().getBucketSpacesStats();
+ BucketSpacesStatsProvider::PerNodeBucketSpacesStats result;
+ for (const auto& stripe : _stripes) {
+ merge_per_node_bucket_spaces_stats(result, stripe->getBucketSpacesStats());
+ }
+ return result;
}
}
SimpleMaintenanceScanner::PendingMaintenanceStats
Distributor::pending_maintenance_stats() const {
- // TODO STRIPE merged snapshot from all stripes
if (_use_legacy_mode) {
return _stripe->pending_maintenance_stats();
} else {
- return first_stripe().pending_maintenance_stats();
+ SimpleMaintenanceScanner::PendingMaintenanceStats result;
+ for (const auto& stripe : _stripes) {
+ result.merge(stripe->pending_maintenance_stats());
+ }
+ return result;
}
}
void
Distributor::propagateInternalScanMetricsToExternal()
{
- // TODO STRIPE propagate to all stripes
- // TODO STRIPE reconsider metric wiring...
if (_use_legacy_mode) {
_stripe->propagateInternalScanMetricsToExternal();
} else {
- first_stripe().propagateInternalScanMetricsToExternal();
+ for (auto &stripe : _stripes) {
+ stripe->propagateInternalScanMetricsToExternal();
+ }
+ _total_metrics->aggregate();
+ _ideal_state_total_metrics->aggregate();
}
}
@@ -732,13 +768,8 @@ Distributor::getReportContentType(const framework::HttpUrlPath& path) const
std::string
Distributor::getActiveIdealStateOperations() const
{
- // TODO STRIPE need to aggregate status responses _across_ stripes..!
- if (_use_legacy_mode) {
- return _stripe->getActiveIdealStateOperations();
- } else {
- auto guard = _stripe_accessor->rendezvous_and_hold_all();
- return first_stripe().getActiveIdealStateOperations();
- }
+ assert(_use_legacy_mode);
+ return _stripe->getActiveIdealStateOperations();
}
bool
diff --git a/storage/src/vespa/storage/distributor/distributor.h b/storage/src/vespa/storage/distributor/distributor.h
index 61a1f06309d..41d88f5dba1 100644
--- a/storage/src/vespa/storage/distributor/distributor.h
+++ b/storage/src/vespa/storage/distributor/distributor.h
@@ -9,6 +9,7 @@
#include "distributor_interface.h"
#include "distributor_stripe_interface.h"
#include "externaloperationhandler.h"
+#include "ideal_state_total_metrics.h"
#include "idealstatemanager.h"
#include "min_replica_provider.h"
#include "pendingmessagetracker.h"
@@ -24,6 +25,7 @@
#include <vespa/storageapi/message/state.h>
#include <vespa/storageframework/generic/metric/metricupdatehook.h>
#include <vespa/storageframework/generic/thread/tickingthread.h>
+#include <vespa/vdslib/state/random.h>
#include <chrono>
#include <queue>
#include <unordered_map>
@@ -43,6 +45,7 @@ class DistributorBucketSpaceRepo;
class DistributorStatus;
class DistributorStripe;
class DistributorStripePool;
+class DistributorTotalMetrics;
class StripeAccessor;
class OperationSequencer;
class OwnershipTransferSafeTimePointCalculator;
@@ -77,7 +80,7 @@ public:
void sendUp(const std::shared_ptr<api::StorageMessage>&) override;
void sendDown(const std::shared_ptr<api::StorageMessage>&) override;
- DistributorMetricSet& getMetrics() { return *_metrics; }
+ DistributorMetricSet& getMetrics();
// Implements DistributorInterface and DistributorMessageSender.
DistributorMetricSet& metrics() override { return getMetrics(); }
@@ -122,14 +125,10 @@ public:
};
private:
- friend struct DistributorTest;
- friend class BucketDBUpdaterTest;
friend class DistributorTestUtil;
+ friend class LegacyBucketDBUpdaterTest;
friend class MetricUpdateHook;
-
- // TODO STRIPE remove
- DistributorStripe& first_stripe() noexcept;
- const DistributorStripe& first_stripe() const noexcept;
+ friend struct LegacyDistributorTest;
void setNodeStateUp();
bool handleMessage(const std::shared_ptr<api::StorageMessage>& msg);
@@ -189,6 +188,9 @@ private:
// Precondition: _stripe_scan_notify_mutex is held
[[nodiscard]] bool may_send_host_info_on_behalf_of_stripes(std::lock_guard<std::mutex>& held_lock) noexcept;
+ uint32_t random_stripe_idx();
+ uint32_t stripe_of_bucket_id(const document::BucketId& bucket_id, const api::StorageMessage& msg);
+
struct StripeScanStats {
bool wants_to_send_host_info = false;
bool has_reported_in_at_least_once = false;
@@ -197,18 +199,24 @@ private:
using MessageQueue = std::vector<std::shared_ptr<api::StorageMessage>>;
DistributorComponentRegister& _comp_reg;
+ const bool _use_legacy_mode;
std::shared_ptr<DistributorMetricSet> _metrics;
+ std::shared_ptr<DistributorTotalMetrics> _total_metrics;
+ std::shared_ptr<IdealStateMetricSet> _ideal_state_metrics;
+ std::shared_ptr<IdealStateTotalMetrics> _ideal_state_total_metrics;
ChainedMessageSender* _messageSender;
- const bool _use_legacy_mode;
// TODO STRIPE multiple stripes...! This is for proof of concept of wiring.
uint8_t _n_stripe_bits;
std::unique_ptr<DistributorStripe> _stripe;
DistributorStripePool& _stripe_pool;
std::vector<std::unique_ptr<DistributorStripe>> _stripes;
std::unique_ptr<StripeAccessor> _stripe_accessor;
+ storage::lib::RandomGen _random_stripe_gen;
+ std::mutex _random_stripe_gen_mutex;
MessageQueue _message_queue; // Queue for top-level ops
MessageQueue _fetched_messages;
distributor::DistributorComponent _component;
+ storage::DistributorComponent _ideal_state_component;
std::shared_ptr<const DistributorConfiguration> _total_config;
std::unique_ptr<BucketDBUpdater> _bucket_db_updater;
StatusReporterDelegate _distributorStatusDelegate;
diff --git a/storage/src/vespa/storage/distributor/distributor_operation_context.h b/storage/src/vespa/storage/distributor/distributor_operation_context.h
index aa598835cdb..e0d481a322a 100644
--- a/storage/src/vespa/storage/distributor/distributor_operation_context.h
+++ b/storage/src/vespa/storage/distributor/distributor_operation_context.h
@@ -20,7 +20,7 @@ public:
virtual ~DistributorOperationContext() {}
virtual api::Timestamp generate_unique_timestamp() = 0;
// TODO STRIPE: Access to bucket space repos is only temporary at this level.
- virtual const DistributorBucketSpaceRepo& bucket_space_repo() const noexcept= 0;
+ virtual const DistributorBucketSpaceRepo& bucket_space_repo() const noexcept = 0;
virtual DistributorBucketSpaceRepo& bucket_space_repo() noexcept = 0;
virtual const DistributorBucketSpaceRepo& read_only_bucket_space_repo() const noexcept = 0;
virtual DistributorBucketSpaceRepo& read_only_bucket_space_repo() noexcept = 0;
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.cpp b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
index bf78707cfd9..837193a1e7c 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
@@ -36,6 +36,7 @@ namespace storage::distributor {
*/
DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
DistributorMetricSet& metrics,
+ IdealStateMetricSet& ideal_state_metrics,
const NodeIdentity& node_identity,
framework::TickingThreadPool& threadPool,
DoneInitializeHandler& doneInitHandler,
@@ -58,7 +59,7 @@ DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
_bucketDBUpdater(_component, _component, *this, *this, use_legacy_mode),
_distributorStatusDelegate(compReg, *this, *this),
_bucketDBStatusDelegate(compReg, *this, _bucketDBUpdater),
- _idealStateManager(*this, *_bucketSpaceRepo, *_readOnlyBucketSpaceRepo, compReg, stripe_index),
+ _idealStateManager(_component, _component, ideal_state_metrics),
_messageSender(messageSender),
_stripe_host_info_notifier(stripe_host_info_notifier),
_externalOperationHandler(_component, _component, getMetrics(), getMessageSender(),
@@ -71,7 +72,7 @@ DistributorStripe::DistributorStripe(DistributorComponentRegister& compReg,
_bucketPriorityDb(std::make_unique<SimpleBucketPriorityDatabase>()),
_scanner(std::make_unique<SimpleMaintenanceScanner>(*_bucketPriorityDb, _idealStateManager, *_bucketSpaceRepo)),
_throttlingStarter(std::make_unique<ThrottlingOperationStarter>(_maintenanceOperationOwner)),
- _blockingStarter(std::make_unique<BlockingOperationStarter>(_pendingMessageTracker, *_operation_sequencer,
+ _blockingStarter(std::make_unique<BlockingOperationStarter>(_component, *_operation_sequencer,
*_throttlingStarter)),
_scheduler(std::make_unique<MaintenanceScheduler>(_idealStateManager, *_bucketPriorityDb, *_blockingStarter)),
_schedulingMode(MaintenanceScheduler::NORMAL_SCHEDULING_MODE),
@@ -748,7 +749,7 @@ void DistributorStripe::send_updated_host_info_if_required() {
if (_use_legacy_mode) {
_component.getStateUpdater().immediately_send_get_node_state_replies();
} else {
- _stripe_host_info_notifier.notify_stripe_wants_to_send_host_info(0); // TODO STRIPE correct stripe index!
+ _stripe_host_info_notifier.notify_stripe_wants_to_send_host_info(_stripe_index);
}
_must_send_updated_host_info = false;
}
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.h b/storage/src/vespa/storage/distributor/distributor_stripe.h
index 347863b6d77..8f3de38aec7 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.h
@@ -59,6 +59,7 @@ class DistributorStripe final
public:
DistributorStripe(DistributorComponentRegister&,
DistributorMetricSet& metrics,
+ IdealStateMetricSet& ideal_state_metrics,
const NodeIdentity& node_identity,
framework::TickingThreadPool&,
DoneInitializeHandler&,
@@ -193,13 +194,13 @@ public:
bool tick() override;
private:
- // TODO reduce number of friends. DistributorStripe too popular for its own good.
- friend struct DistributorTest;
- friend class BucketDBUpdaterTest;
+ // TODO STRIPE: reduce number of friends. DistributorStripe too popular for its own good.
+ friend class Distributor;
friend class DistributorTestUtil;
+ friend class LegacyBucketDBUpdaterTest;
friend class MetricUpdateHook;
- friend class Distributor;
friend class MultiThreadedStripeAccessGuard;
+ friend struct LegacyDistributorTest;
bool handleMessage(const std::shared_ptr<api::StorageMessage>& msg);
bool isMaintenanceReply(const api::StorageReply& reply) const;
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_component.h b/storage/src/vespa/storage/distributor/distributor_stripe_component.h
index 31ee9ca88d2..e47d73cc4df 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_component.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_component.h
@@ -140,6 +140,9 @@ public:
PendingMessageTracker& pending_message_tracker() noexcept override {
return getDistributor().getPendingMessageTracker();
}
+ const PendingMessageTracker& pending_message_tracker() const noexcept override {
+ return getDistributor().getPendingMessageTracker();
+ }
bool has_pending_message(uint16_t node_index,
const document::Bucket& bucket,
uint32_t message_type) const override;
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_interface.h b/storage/src/vespa/storage/distributor/distributor_stripe_interface.h
index bd9a4e1de57..24db212c120 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_interface.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_interface.h
@@ -24,7 +24,6 @@ class PendingMessageTracker;
class DistributorStripeInterface : public DistributorStripeMessageSender
{
public:
- virtual PendingMessageTracker& getPendingMessageTracker() = 0;
virtual DistributorMetricSet& getMetrics() = 0;
virtual void enableClusterStateBundle(const lib::ClusterStateBundle& state) = 0;
virtual const lib::ClusterState* pendingClusterStateOrNull(const document::BucketSpace&) const = 0;
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h b/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h
index 8419abeadaa..518c83d7ffa 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_operation_context.h
@@ -22,7 +22,7 @@ class PendingMessageTracker;
*/
class DistributorStripeOperationContext : public DistributorOperationContext {
public:
- virtual ~DistributorStripeOperationContext() {}
+ virtual ~DistributorStripeOperationContext() = default;
virtual void update_bucket_database(const document::Bucket& bucket,
const BucketCopy& changed_node,
uint32_t update_flags = 0) = 0;
@@ -41,6 +41,7 @@ public:
uint8_t pri) = 0;
virtual OperationRoutingSnapshot read_snapshot_for_bucket(const document::Bucket& bucket) const = 0;
virtual PendingMessageTracker& pending_message_tracker() noexcept = 0;
+ virtual const PendingMessageTracker& pending_message_tracker() const noexcept = 0;
virtual bool has_pending_message(uint16_t node_index,
const document::Bucket& bucket,
uint32_t message_type) const = 0;
diff --git a/storage/src/vespa/storage/distributor/distributor_total_metrics.cpp b/storage/src/vespa/storage/distributor/distributor_total_metrics.cpp
new file mode 100644
index 00000000000..510b1df2ff3
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/distributor_total_metrics.cpp
@@ -0,0 +1,54 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "distributor_total_metrics.h"
+
+namespace storage::distributor {
+
+DistributorTotalMetrics::DistributorTotalMetrics(uint32_t num_distributor_stripes)
+ : DistributorMetricSet(),
+ _stripes_metrics(),
+ _bucket_db_updater_metrics()
+{
+ _stripes_metrics.reserve(num_distributor_stripes);
+ for (uint32_t i = 0; i < num_distributor_stripes; ++i) {
+ _stripes_metrics.emplace_back(std::make_shared<DistributorMetricSet>());
+ }
+}
+
+DistributorTotalMetrics::~DistributorTotalMetrics() = default;
+
+void
+DistributorTotalMetrics::aggregate_helper(DistributorMetricSet &total) const
+{
+ _bucket_db_updater_metrics.addToPart(total);
+ for (auto &stripe_metrics : _stripes_metrics) {
+ stripe_metrics->addToPart(total);
+ }
+}
+
+void
+DistributorTotalMetrics::aggregate()
+{
+ DistributorMetricSet::reset();
+ aggregate_helper(*this);
+}
+
+void
+DistributorTotalMetrics::addToSnapshot(Metric& m, std::vector<Metric::UP> &ownerList) const
+{
+ DistributorMetricSet total;
+ aggregate_helper(total);
+ total.addToSnapshot(m, ownerList);
+}
+
+void
+DistributorTotalMetrics::reset()
+{
+ DistributorMetricSet::reset();
+ _bucket_db_updater_metrics.reset();
+ for (auto &stripe_metrics : _stripes_metrics) {
+ stripe_metrics->reset();
+ }
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/distributor_total_metrics.h b/storage/src/vespa/storage/distributor/distributor_total_metrics.h
new file mode 100644
index 00000000000..f0457fe64c3
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/distributor_total_metrics.h
@@ -0,0 +1,29 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "distributormetricsset.h"
+
+namespace storage::distributor {
+
+/*
+ * Class presenting total metrics (as a DistributorMetricSet) to the
+ * metric framework, while managing a DistributorMetricSet for each
+ * stripe and an extra one for the top level bucket db updater.
+ */
+class DistributorTotalMetrics : public DistributorMetricSet
+{
+ std::vector<std::shared_ptr<DistributorMetricSet>> _stripes_metrics;
+ DistributorMetricSet _bucket_db_updater_metrics;
+ void aggregate_helper(DistributorMetricSet &total) const;
+public:
+ explicit DistributorTotalMetrics(uint32_t num_distributor_stripes);
+ ~DistributorTotalMetrics() override;
+ void aggregate();
+ void addToSnapshot(Metric& m, std::vector<Metric::UP> &ownerList) const override;
+ void reset() override;
+ DistributorMetricSet& stripe(uint32_t stripe_index) { return *_stripes_metrics[stripe_index]; }
+ DistributorMetricSet& bucket_db_updater_metrics() { return _bucket_db_updater_metrics; }
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/distributormessagesender.h b/storage/src/vespa/storage/distributor/distributormessagesender.h
index c39e3e8fe8a..c5a164ed036 100644
--- a/storage/src/vespa/storage/distributor/distributormessagesender.h
+++ b/storage/src/vespa/storage/distributor/distributormessagesender.h
@@ -26,6 +26,7 @@ public:
class DistributorStripeMessageSender : public DistributorMessageSender {
public:
+ virtual PendingMessageTracker& getPendingMessageTracker() = 0;
virtual const PendingMessageTracker& getPendingMessageTracker() const = 0;
virtual const OperationSequencer& operation_sequencer() const noexcept = 0;
};
diff --git a/storage/src/vespa/storage/distributor/externaloperationhandler.cpp b/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
index e703c5bfdb8..2bc779aa47e 100644
--- a/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
+++ b/storage/src/vespa/storage/distributor/externaloperationhandler.cpp
@@ -54,6 +54,9 @@ public:
const ClusterContext & cluster_context() const override {
return _node_ctx;
}
+ PendingMessageTracker& getPendingMessageTracker() override {
+ abort(); // Never called by the messages using this component.
+ }
const PendingMessageTracker& getPendingMessageTracker() const override {
abort(); // Never called by the messages using this component.
}
diff --git a/storage/src/vespa/storage/distributor/ideal_state_total_metrics.cpp b/storage/src/vespa/storage/distributor/ideal_state_total_metrics.cpp
new file mode 100644
index 00000000000..65dcad468fc
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/ideal_state_total_metrics.cpp
@@ -0,0 +1,51 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "ideal_state_total_metrics.h"
+
+namespace storage::distributor {
+
+void
+IdealStateTotalMetrics::aggregate_helper(IdealStateMetricSet& total) const
+{
+ for (auto& stripe_metrics : _stripes_metrics) {
+ stripe_metrics->addToPart(total);
+ }
+}
+
+IdealStateTotalMetrics::IdealStateTotalMetrics(uint32_t num_distributor_stripes)
+ : IdealStateMetricSet(),
+ _stripes_metrics()
+{
+ _stripes_metrics.reserve(num_distributor_stripes);
+ for (uint32_t i = 0; i < num_distributor_stripes; ++i) {
+ _stripes_metrics.emplace_back(std::make_shared<IdealStateMetricSet>());
+ }
+}
+
+IdealStateTotalMetrics::~IdealStateTotalMetrics() = default;
+
+void
+IdealStateTotalMetrics::aggregate()
+{
+ IdealStateMetricSet::reset();
+ aggregate_helper(*this);
+}
+
+void
+IdealStateTotalMetrics::addToSnapshot(Metric& m, std::vector<Metric::UP>& owner_list) const
+{
+ IdealStateMetricSet total;
+ aggregate_helper(total);
+ total.addToSnapshot(m, owner_list);
+}
+
+void
+IdealStateTotalMetrics::reset()
+{
+ IdealStateMetricSet::reset();
+ for (auto& stripe_metrics : _stripes_metrics) {
+ stripe_metrics->reset();
+ }
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/ideal_state_total_metrics.h b/storage/src/vespa/storage/distributor/ideal_state_total_metrics.h
new file mode 100644
index 00000000000..c3207baa2f0
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/ideal_state_total_metrics.h
@@ -0,0 +1,28 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "idealstatemetricsset.h"
+
+namespace storage::distributor {
+
+/*
+ * Class presenting total metrics (as an IdealStateMetricSet) to the metric framework,
+ * while managing an IdealStateMetricSet for each distributor stripe.
+ */
+class IdealStateTotalMetrics : public IdealStateMetricSet {
+private:
+ std::vector<std::shared_ptr<IdealStateMetricSet>> _stripes_metrics;
+
+ void aggregate_helper(IdealStateMetricSet& total) const;
+
+public:
+ explicit IdealStateTotalMetrics(uint32_t num_distributor_stripes);
+ ~IdealStateTotalMetrics() override;
+ void aggregate();
+ void addToSnapshot(Metric& m, std::vector<Metric::UP>& owner_list) const override;
+ void reset() override;
+ IdealStateMetricSet& stripe(uint32_t stripe_index) { return *_stripes_metrics[stripe_index]; }
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/idealstatemanager.cpp b/storage/src/vespa/storage/distributor/idealstatemanager.cpp
index 013551b8505..65e018765fe 100644
--- a/storage/src/vespa/storage/distributor/idealstatemanager.cpp
+++ b/storage/src/vespa/storage/distributor/idealstatemanager.cpp
@@ -25,21 +25,14 @@ namespace storage {
namespace distributor {
IdealStateManager::IdealStateManager(
- DistributorStripeInterface& owner,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
- DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
- DistributorComponentRegister& compReg,
- uint32_t stripe_index)
- : _metrics(new IdealStateMetricSet),
- _distributorComponent(owner, bucketSpaceRepo, readOnlyBucketSpaceRepo, compReg, "Ideal state manager"),
- _bucketSpaceRepo(bucketSpaceRepo),
+ const DistributorNodeContext& node_ctx,
+ DistributorStripeOperationContext& op_ctx,
+ IdealStateMetricSet& metrics)
+ : _metrics(metrics),
+ _node_ctx(node_ctx),
+ _op_ctx(op_ctx),
_has_logged_phantom_replica_warning(false)
{
- if (stripe_index == 0) {
- // TODO STRIPE: Add proper handling of metrics across distributor stripes
- _distributorComponent.registerMetric(*_metrics);
- }
-
LOG(debug, "Adding BucketStateStateChecker to state checkers");
_stateCheckers.push_back(StateChecker::SP(new BucketStateStateChecker()));
@@ -167,7 +160,7 @@ IdealStateManager::generateHighestPriority(
const document::Bucket &bucket,
NodeMaintenanceStatsTracker& statsTracker) const
{
- auto &distributorBucketSpace(_bucketSpaceRepo.get(bucket.getBucketSpace()));
+ auto& distributorBucketSpace = _op_ctx.bucket_space_repo().get(bucket.getBucketSpace());
StateChecker::Context c(node_context(), operation_context(), distributorBucketSpace, statsTracker, bucket);
fillParentAndChildBuckets(c);
fillSiblingBucket(c);
@@ -204,7 +197,7 @@ IdealStateManager::generateInterceptingSplit(BucketSpace bucketSpace,
{
NodeMaintenanceStatsTracker statsTracker;
document::Bucket bucket(bucketSpace, e.getBucketId());
- auto &distributorBucketSpace(_bucketSpaceRepo.get(bucket.getBucketSpace()));
+ auto& distributorBucketSpace = _op_ctx.bucket_space_repo().get(bucket.getBucketSpace());
StateChecker::Context c(node_context(), operation_context(), distributorBucketSpace, statsTracker, bucket);
if (e.valid()) {
c.entry = e;
@@ -239,7 +232,7 @@ std::vector<MaintenanceOperation::SP>
IdealStateManager::generateAll(const document::Bucket &bucket,
NodeMaintenanceStatsTracker& statsTracker) const
{
- auto &distributorBucketSpace(_bucketSpaceRepo.get(bucket.getBucketSpace()));
+ auto& distributorBucketSpace = _op_ctx.bucket_space_repo().get(bucket.getBucketSpace());
StateChecker::Context c(node_context(), operation_context(), distributorBucketSpace, statsTracker, bucket);
fillParentAndChildBuckets(c);
fillSiblingBucket(c);
@@ -291,7 +284,7 @@ IdealStateManager::getBucketStatus(
void IdealStateManager::dump_bucket_space_db_status(document::BucketSpace bucket_space, std::ostream& out) const {
StatusBucketVisitor proc(*this, bucket_space, out);
- auto &distributorBucketSpace(_bucketSpaceRepo.get(bucket_space));
+ auto& distributorBucketSpace = _op_ctx.bucket_space_repo().get(bucket_space);
distributorBucketSpace.getBucketDatabase().forEach(proc);
}
@@ -299,7 +292,7 @@ void IdealStateManager::getBucketStatus(std::ostream& out) const {
LOG(debug, "Dumping bucket database valid at cluster state version %u",
operation_context().cluster_state_bundle().getVersion());
- for (auto& space : _bucketSpaceRepo) {
+ for (auto& space : _op_ctx.bucket_space_repo()) {
out << "<h2>" << document::FixedBucketSpaces::to_string(space.first) << " - " << space.first << "</h2>\n";
dump_bucket_space_db_status(space.first, out);
}
diff --git a/storage/src/vespa/storage/distributor/idealstatemanager.h b/storage/src/vespa/storage/distributor/idealstatemanager.h
index 041e009ee9f..c0fa7dd70ab 100644
--- a/storage/src/vespa/storage/distributor/idealstatemanager.h
+++ b/storage/src/vespa/storage/distributor/idealstatemanager.h
@@ -33,11 +33,9 @@ class IdealStateManager : public MaintenancePriorityGenerator,
{
public:
- IdealStateManager(DistributorStripeInterface& owner,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
- DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
- DistributorComponentRegister& compReg,
- uint32_t stripe_index = 0);
+ IdealStateManager(const DistributorNodeContext& node_ctx,
+ DistributorStripeOperationContext& op_ctx,
+ IdealStateMetricSet& metrics);
~IdealStateManager() override;
@@ -66,18 +64,18 @@ public:
const BucketDatabase::Entry& e,
api::StorageMessage::Priority pri);
- IdealStateMetricSet& getMetrics() { return *_metrics; }
+ IdealStateMetricSet& getMetrics() { return _metrics; }
void dump_bucket_space_db_status(document::BucketSpace bucket_space, std::ostream& out) const;
void getBucketStatus(std::ostream& out) const;
- const DistributorNodeContext& node_context() const { return _distributorComponent; }
- DistributorStripeOperationContext& operation_context() { return _distributorComponent; }
- const DistributorStripeOperationContext& operation_context() const { return _distributorComponent; }
- DistributorBucketSpaceRepo &getBucketSpaceRepo() { return _bucketSpaceRepo; }
- const DistributorBucketSpaceRepo &getBucketSpaceRepo() const { return _bucketSpaceRepo; }
+ const DistributorNodeContext& node_context() const { return _node_ctx; }
+ DistributorStripeOperationContext& operation_context() { return _op_ctx; }
+ const DistributorStripeOperationContext& operation_context() const { return _op_ctx; }
+ DistributorBucketSpaceRepo &getBucketSpaceRepo() { return _op_ctx.bucket_space_repo(); }
+ const DistributorBucketSpaceRepo &getBucketSpaceRepo() const { return _op_ctx.bucket_space_repo(); }
private:
void verify_only_live_nodes_in_context(const StateChecker::Context& c) const;
@@ -90,7 +88,7 @@ private:
BucketDatabase::Entry* getEntryForPrimaryBucket(StateChecker::Context& c) const;
- std::shared_ptr<IdealStateMetricSet> _metrics;
+ IdealStateMetricSet& _metrics;
document::BucketId _lastPrioritizedBucket;
// Prioritized of state checkers that generate operations
@@ -98,8 +96,8 @@ private:
std::vector<StateChecker::SP> _stateCheckers;
SplitBucketStateChecker* _splitBucketStateChecker;
- DistributorStripeComponent _distributorComponent;
- DistributorBucketSpaceRepo& _bucketSpaceRepo;
+ const DistributorNodeContext& _node_ctx;
+ DistributorStripeOperationContext& _op_ctx;
mutable bool _has_logged_phantom_replica_warning;
bool iAmUp() const;
diff --git a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp
index b954ef93c76..4e7f7d9d89d 100644
--- a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp
+++ b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp
@@ -7,6 +7,39 @@ namespace storage::distributor {
const NodeMaintenanceStats NodeMaintenanceStatsTracker::_emptyNodeMaintenanceStats;
+void
+NodeMaintenanceStats::merge(const NodeMaintenanceStats& rhs)
+{
+ movingOut += rhs.movingOut;
+ syncing += rhs.syncing;
+ copyingIn += rhs.copyingIn;
+ copyingOut += rhs.copyingOut;
+ total += rhs.total;
+}
+
+namespace {
+
+void
+merge_bucket_spaces_stats(NodeMaintenanceStatsTracker::BucketSpacesStats& dest,
+ const NodeMaintenanceStatsTracker::BucketSpacesStats& src)
+{
+ for (const auto& entry : src) {
+ auto bucket_space = entry.first;
+ dest[bucket_space].merge(entry.second);
+ }
+}
+
+}
+
+void
+NodeMaintenanceStatsTracker::merge(const NodeMaintenanceStatsTracker& rhs)
+{
+ for (const auto& entry : rhs._stats) {
+ auto node_index = entry.first;
+ merge_bucket_spaces_stats(_stats[node_index], entry.second);
+ }
+}
+
std::ostream&
operator<<(std::ostream& os, const NodeMaintenanceStats& stats)
{
diff --git a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
index faf253fc84c..6399e53089b 100644
--- a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
+++ b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
@@ -37,6 +37,8 @@ struct NodeMaintenanceStats
bool operator!=(const NodeMaintenanceStats& other) const noexcept {
return !(*this == other);
}
+
+ void merge(const NodeMaintenanceStats& rhs);
};
std::ostream& operator<<(std::ostream&, const NodeMaintenanceStats&);
@@ -93,6 +95,11 @@ public:
const PerNodeStats& perNodeStats() const {
return _stats;
}
+
+ bool operator==(const NodeMaintenanceStatsTracker& rhs) const {
+ return _stats == rhs._stats;
+ }
+ void merge(const NodeMaintenanceStatsTracker& rhs);
};
} // storage::distributor
diff --git a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
index 15a57c1e7ee..2bfce9569cc 100644
--- a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
+++ b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
@@ -19,6 +19,28 @@ SimpleMaintenanceScanner::SimpleMaintenanceScanner(BucketPriorityDatabase& bucke
SimpleMaintenanceScanner::~SimpleMaintenanceScanner() = default;
+bool
+SimpleMaintenanceScanner::GlobalMaintenanceStats::operator==(const GlobalMaintenanceStats& rhs) const
+{
+ return pending == rhs.pending;
+}
+
+void
+SimpleMaintenanceScanner::GlobalMaintenanceStats::merge(const GlobalMaintenanceStats& rhs)
+{
+ assert(pending.size() == rhs.pending.size());
+ for (size_t i = 0; i < pending.size(); ++i) {
+ pending[i] += rhs.pending[i];
+ }
+}
+
+void
+SimpleMaintenanceScanner::PendingMaintenanceStats::merge(const PendingMaintenanceStats& rhs)
+{
+ global.merge(rhs.global);
+ perNodeStats.merge(rhs.perNodeStats);
+}
+
SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats() = default;
SimpleMaintenanceScanner::PendingMaintenanceStats::~PendingMaintenanceStats() = default;
SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats(const PendingMaintenanceStats &) = default;
diff --git a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h
index 254b3244171..69e63fd4c65 100644
--- a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h
+++ b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h
@@ -18,6 +18,9 @@ public:
GlobalMaintenanceStats()
: pending(MaintenanceOperation::OPERATION_COUNT)
{ }
+
+ bool operator==(const GlobalMaintenanceStats& rhs) const;
+ void merge(const GlobalMaintenanceStats& rhs);
};
struct PendingMaintenanceStats {
PendingMaintenanceStats();
@@ -26,6 +29,8 @@ public:
~PendingMaintenanceStats();
GlobalMaintenanceStats global;
NodeMaintenanceStatsTracker perNodeStats;
+
+ void merge(const PendingMaintenanceStats& rhs);
};
private:
BucketPriorityDatabase& _bucketPriorityDb;
diff --git a/storage/src/vespa/storage/distributor/min_replica_provider.cpp b/storage/src/vespa/storage/distributor/min_replica_provider.cpp
new file mode 100644
index 00000000000..c9929940560
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/min_replica_provider.cpp
@@ -0,0 +1,19 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "min_replica_provider.h"
+
+namespace storage::distributor {
+
+void
+merge_min_replica_stats(std::unordered_map<uint16_t, uint32_t>& dest,
+ const std::unordered_map<uint16_t, uint32_t>& src)
+{
+ for (const auto& entry : src) {
+ auto node_index = entry.first;
+ auto itr = dest.find(node_index);
+ auto new_min_replica = (itr != dest.end()) ? std::min(itr->second, entry.second) : entry.second;
+ dest[node_index] = new_min_replica;
+ }
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/min_replica_provider.h b/storage/src/vespa/storage/distributor/min_replica_provider.h
index 6d644f4e9d4..ba946cd5a7f 100644
--- a/storage/src/vespa/storage/distributor/min_replica_provider.h
+++ b/storage/src/vespa/storage/distributor/min_replica_provider.h
@@ -4,8 +4,7 @@
#include <stdint.h>
#include <unordered_map>
-namespace storage {
-namespace distributor {
+namespace storage::distributor {
class MinReplicaProvider
{
@@ -21,5 +20,8 @@ public:
virtual std::unordered_map<uint16_t, uint32_t> getMinReplica() const = 0;
};
-} // distributor
-} // storage
+void merge_min_replica_stats(std::unordered_map<uint16_t, uint32_t>& dest,
+ const std::unordered_map<uint16_t, uint32_t>& src);
+
+}
+
diff --git a/storage/src/vespa/storage/distributor/operationowner.h b/storage/src/vespa/storage/distributor/operationowner.h
index d3f46343ebc..c469b35a8dc 100644
--- a/storage/src/vespa/storage/distributor/operationowner.h
+++ b/storage/src/vespa/storage/distributor/operationowner.h
@@ -43,6 +43,10 @@ public:
return _sender.cluster_context();
}
+ PendingMessageTracker& getPendingMessageTracker() override {
+ return _sender.getPendingMessageTracker();
+ }
+
const PendingMessageTracker& getPendingMessageTracker() const override {
return _sender.getPendingMessageTracker();
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
index 9077f3dc288..db30fcc7196 100644
--- a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
@@ -86,6 +86,10 @@ struct IntermediateMessageSender : DistributorStripeMessageSender {
return forward.cluster_context();
}
+ PendingMessageTracker& getPendingMessageTracker() override {
+ return forward.getPendingMessageTracker();
+ }
+
const PendingMessageTracker& getPendingMessageTracker() const override {
return forward.getPendingMessageTracker();
}
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.cpp
index 1a48df0fd7c..f11d1c26da2 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.cpp
@@ -156,41 +156,26 @@ public:
}
};
-// TODO STRIPE replace with check for pending cluster state transition.
-// Null-bucket messages are not intercepted nor observeable by stripes,
-// only by the top-level distributor.
-bool
-checkNullBucketRequestBucketInfoMessage(uint16_t node,
- document::BucketSpace bucketSpace,
- const PendingMessageTracker& tracker)
-{
- RequestBucketInfoChecker rchk;
- // Check messages sent to null-bucket (i.e. any bucket) for the node.
- document::Bucket nullBucket(bucketSpace, document::BucketId());
- tracker.checkPendingMessages(node, nullBucket, rchk);
- return rchk.blocked;
-}
-
}
bool
IdealStateOperation::checkBlock(const document::Bucket &bucket,
- const PendingMessageTracker& tracker,
+ const DistributorStripeOperationContext& ctx,
const OperationSequencer& seq) const
{
if (seq.is_blocked(bucket)) {
return true;
}
+ if (ctx.pending_cluster_state_or_null(bucket.getBucketSpace())) {
+ return true;
+ }
IdealStateOpChecker ichk(*this);
const std::vector<uint16_t>& nodes(getNodes());
for (auto node : nodes) {
- tracker.checkPendingMessages(node, bucket, ichk);
+ ctx.pending_message_tracker().checkPendingMessages(node, bucket, ichk);
if (ichk.blocked) {
return true;
}
- if (checkNullBucketRequestBucketInfoMessage(node, bucket.getBucketSpace(), tracker)) {
- return true;
- }
}
return false;
}
@@ -198,32 +183,25 @@ IdealStateOperation::checkBlock(const document::Bucket &bucket,
bool
IdealStateOperation::checkBlockForAllNodes(
const document::Bucket &bucket,
- const PendingMessageTracker& tracker,
+ const DistributorStripeOperationContext& ctx,
const OperationSequencer& seq) const
{
if (seq.is_blocked(bucket)) {
return true;
}
- IdealStateOpChecker ichk(*this);
- // Check messages sent to _any node_ for _this_ particular bucket.
- tracker.checkPendingMessages(bucket, ichk);
- if (ichk.blocked) {
+ if (ctx.pending_cluster_state_or_null(bucket.getBucketSpace())) {
return true;
}
- const std::vector<uint16_t>& nodes(getNodes());
- for (auto node : nodes) {
- if (checkNullBucketRequestBucketInfoMessage(node, bucket.getBucketSpace(), tracker)) {
- return true;
- }
- }
- return false;
+ IdealStateOpChecker ichk(*this);
+ // Check messages sent to _any node_ for _this_ particular bucket.
+ ctx.pending_message_tracker().checkPendingMessages(bucket, ichk);
+ return ichk.blocked;
}
-
bool
-IdealStateOperation::isBlocked(const PendingMessageTracker& tracker, const OperationSequencer& op_seq) const
+IdealStateOperation::isBlocked(const DistributorStripeOperationContext& ctx, const OperationSequencer& op_seq) const
{
- return checkBlock(getBucket(), tracker, op_seq);
+ return checkBlock(getBucket(), ctx, op_seq);
}
std::string
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h
index 0e45d7f3b3a..d41640b468e 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/idealstateoperation.h
@@ -182,7 +182,7 @@ public:
* Returns true if we are blocked to start this operation given
* the pending messages.
*/
- bool isBlocked(const PendingMessageTracker& pendingMessages, const OperationSequencer&) const override;
+ bool isBlocked(const DistributorStripeOperationContext& ctx, const OperationSequencer&) const override;
/**
Returns the priority we should send messages with.
@@ -234,10 +234,10 @@ protected:
* the set of messages checked.
*/
bool checkBlock(const document::Bucket& bucket,
- const PendingMessageTracker& tracker,
+ const DistributorStripeOperationContext& ctx,
const OperationSequencer&) const;
bool checkBlockForAllNodes(const document::Bucket& bucket,
- const PendingMessageTracker& tracker,
+ const DistributorStripeOperationContext& ctx,
const OperationSequencer&) const;
};
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
index d9e411bc44e..15d3129b309 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
@@ -161,10 +161,10 @@ JoinOperation::getJoinBucket(size_t idx) const
}
bool
-JoinOperation::isBlocked(const PendingMessageTracker& tracker, const OperationSequencer& op_seq) const
+JoinOperation::isBlocked(const DistributorStripeOperationContext& ctx, const OperationSequencer& op_seq) const
{
- return (checkBlock(getBucket(), tracker, op_seq) ||
- checkBlock(getJoinBucket(0), tracker, op_seq) ||
- (_bucketsToJoin.size() > 1 && checkBlock(getJoinBucket(1), tracker, op_seq)));
+ return (checkBlock(getBucket(), ctx, op_seq) ||
+ checkBlock(getJoinBucket(0), ctx, op_seq) ||
+ (_bucketsToJoin.size() > 1 && checkBlock(getJoinBucket(1), ctx, op_seq)));
}
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.h
index 5796b8d3fa1..4515092cfef 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.h
@@ -35,7 +35,7 @@ public:
return JOIN_BUCKET;
}
- bool isBlocked(const PendingMessageTracker& pendingMessages,
+ bool isBlocked(const DistributorStripeOperationContext& ctx,
const OperationSequencer& op_seq) const override;
protected:
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
index 27e203a9060..749787c51b9 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.cpp
@@ -235,7 +235,7 @@ MergeOperation::deleteSourceOnlyNodes(
BucketAndNodes(getBucket(), sourceOnlyNodes));
// Must not send removes to source only copies if something has caused
// pending load to the copy after the merge was sent!
- if (_removeOperation->isBlocked(sender.getPendingMessageTracker(), sender.operation_sequencer())) {
+ if (_removeOperation->isBlocked(_manager->operation_context(), sender.operation_sequencer())) {
LOG(debug, "Source only removal for %s was blocked by a pending operation",
getBucketId().toString().c_str());
_ok = false;
@@ -324,7 +324,7 @@ bool MergeOperation::shouldBlockThisOperation(uint32_t messageType, uint8_t pri)
return IdealStateOperation::shouldBlockThisOperation(messageType, pri);
}
-bool MergeOperation::isBlocked(const PendingMessageTracker& pending_tracker,
+bool MergeOperation::isBlocked(const DistributorStripeOperationContext& ctx,
const OperationSequencer& op_seq) const {
// To avoid starvation of high priority global bucket merges, we do not consider
// these for blocking due to a node being "busy" (usually caused by a full merge
@@ -338,14 +338,14 @@ bool MergeOperation::isBlocked(const PendingMessageTracker& pending_tracker,
// 2. Global bucket merges have high priority and will most likely be allowed
// to enter the merge throttler queues, displacing lower priority merges.
if (!is_global_bucket_merge()) {
- const auto& node_info = pending_tracker.getNodeInfo();
+ const auto& node_info = ctx.pending_message_tracker().getNodeInfo();
for (auto node : getNodes()) {
if (node_info.isBusy(node)) {
return true;
}
}
}
- return IdealStateOperation::isBlocked(pending_tracker, op_seq);
+ return IdealStateOperation::isBlocked(ctx, op_seq);
}
bool MergeOperation::is_global_bucket_merge() const noexcept {
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h
index 11b5494fd9b..945b9318482 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/mergeoperation.h
@@ -47,7 +47,7 @@ public:
std::vector<MergeMetaData>&);
bool shouldBlockThisOperation(uint32_t messageType, uint8_t pri) const override;
- bool isBlocked(const PendingMessageTracker& pendingMessages, const OperationSequencer&) const override;
+ bool isBlocked(const DistributorStripeOperationContext& ctx, const OperationSequencer&) const override;
private:
static void addIdealNodes(
const std::vector<uint16_t>& idealNodes,
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
index 437c4ed6033..d7f03740e4d 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
@@ -143,9 +143,9 @@ SplitOperation::onReceive(DistributorStripeMessageSender&, const api::StorageRep
}
bool
-SplitOperation::isBlocked(const PendingMessageTracker& tracker, const OperationSequencer& op_seq) const
+SplitOperation::isBlocked(const DistributorStripeOperationContext& ctx, const OperationSequencer& op_seq) const
{
- return checkBlockForAllNodes(getBucket(), tracker, op_seq);
+ return checkBlockForAllNodes(getBucket(), ctx, op_seq);
}
bool
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.h
index eccbdc69869..5581edf41bd 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.h
@@ -20,7 +20,7 @@ public:
void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
const char* getName() const override { return "split"; };
Type getType() const override { return SPLIT_BUCKET; }
- bool isBlocked(const PendingMessageTracker&, const OperationSequencer&) const override;
+ bool isBlocked(const DistributorStripeOperationContext&, const OperationSequencer&) const override;
bool shouldBlockThisOperation(uint32_t, uint8_t) const override;
protected:
MessageTracker _tracker;
diff --git a/storage/src/vespa/storage/distributor/operations/operation.h b/storage/src/vespa/storage/distributor/operations/operation.h
index 5099762fd6a..18f7214c498 100644
--- a/storage/src/vespa/storage/distributor/operations/operation.h
+++ b/storage/src/vespa/storage/distributor/operations/operation.h
@@ -16,6 +16,7 @@ class StorageComponent;
namespace distributor {
+class DistributorStripeOperationContext;
class PendingMessageTracker;
class OperationSequencer;
@@ -61,7 +62,7 @@ public:
* Returns true if we are blocked to start this operation given
* the pending messages.
*/
- virtual bool isBlocked(const PendingMessageTracker&, const OperationSequencer&) const {
+ virtual bool isBlocked(const DistributorStripeOperationContext&, const OperationSequencer&) const {
return false;
}
diff --git a/storage/src/vespa/storage/storageserver/mergethrottler.cpp b/storage/src/vespa/storage/storageserver/mergethrottler.cpp
index dc8457769a2..1761abfc097 100644
--- a/storage/src/vespa/storage/storageserver/mergethrottler.cpp
+++ b/storage/src/vespa/storage/storageserver/mergethrottler.cpp
@@ -71,6 +71,7 @@ MergeThrottler::ChainedMergeState::~ChainedMergeState() = default;
MergeThrottler::Metrics::Metrics(metrics::MetricSet* owner)
: metrics::MetricSet("mergethrottler", {}, "", owner),
averageQueueWaitingTime("averagequeuewaitingtime", {}, "Average time a merge spends in the throttler queue", this),
+ queueSize("queuesize", {}, "Length of merge queue", this),
bounced_due_to_back_pressure("bounced_due_to_back_pressure", {}, "Number of merges bounced due to resource exhaustion back-pressure", this),
chaining("mergechains", this),
local("locallyexecutedmerges", this)
@@ -416,6 +417,7 @@ MergeThrottler::enqueueMerge(
return;
}
_queue.insert(MergePriorityQueue::value_type(msg, _queueSequence++));
+ _metrics->queueSize.set(_queue.size());
}
bool
diff --git a/storage/src/vespa/storage/storageserver/mergethrottler.h b/storage/src/vespa/storage/storageserver/mergethrottler.h
index e8815eee680..0c608f29196 100644
--- a/storage/src/vespa/storage/storageserver/mergethrottler.h
+++ b/storage/src/vespa/storage/storageserver/mergethrottler.h
@@ -57,12 +57,13 @@ public:
MergeFailureMetrics failures;
MergeOperationMetrics(const std::string& name, metrics::MetricSet* owner);
- ~MergeOperationMetrics();
+ ~MergeOperationMetrics() override;
};
class Metrics : public metrics::MetricSet {
public:
metrics::DoubleAverageMetric averageQueueWaitingTime;
+ metrics::LongValueMetric queueSize;
metrics::LongCountMetric bounced_due_to_back_pressure;
MergeOperationMetrics chaining;
MergeOperationMetrics local;
diff --git a/storageapi/src/vespa/storageapi/message/bucket.cpp b/storageapi/src/vespa/storageapi/message/bucket.cpp
index 2e2ca82079d..2323a1ab0a4 100644
--- a/storageapi/src/vespa/storageapi/message/bucket.cpp
+++ b/storageapi/src/vespa/storageapi/message/bucket.cpp
@@ -476,6 +476,12 @@ RequestBucketInfoCommand::getBucket() const
return document::Bucket(_bucketSpace, document::BucketId());
}
+document::BucketId
+RequestBucketInfoCommand::super_bucket_id() const
+{
+ return _buckets.empty() ? document::BucketId() : _buckets[0];
+}
+
void
RequestBucketInfoCommand::print(std::ostream& out, bool verbose,
const std::string& indent) const
@@ -510,7 +516,8 @@ std::ostream& operator<<(std::ostream& out, const RequestBucketInfoReply::Entry&
RequestBucketInfoReply::RequestBucketInfoReply(const RequestBucketInfoCommand& cmd)
: StorageReply(cmd),
_buckets(),
- _full_bucket_fetch(cmd.hasSystemState())
+ _full_bucket_fetch(cmd.hasSystemState()),
+ _super_bucket_id(cmd.super_bucket_id())
{ }
RequestBucketInfoReply::~RequestBucketInfoReply() = default;
diff --git a/storageapi/src/vespa/storageapi/message/bucket.h b/storageapi/src/vespa/storageapi/message/bucket.h
index 61766fb1f11..98445745753 100644
--- a/storageapi/src/vespa/storageapi/message/bucket.h
+++ b/storageapi/src/vespa/storageapi/message/bucket.h
@@ -358,6 +358,7 @@ public:
const vespalib::string& getDistributionHash() const { return _distributionHash; }
document::BucketSpace getBucketSpace() const { return _bucketSpace; }
document::Bucket getBucket() const override;
+ document::BucketId super_bucket_id() const;
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
@@ -388,6 +389,7 @@ public:
private:
EntryVector _buckets;
bool _full_bucket_fetch;
+ document::BucketId _super_bucket_id;
public:
@@ -396,6 +398,7 @@ public:
const EntryVector & getBucketInfo() const { return _buckets; }
EntryVector & getBucketInfo() { return _buckets; }
[[nodiscard]] bool full_bucket_fetch() const noexcept { return _full_bucket_fetch; }
+ const document::BucketId& super_bucket_id() const { return _super_bucket_id; }
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
DECLARE_STORAGEREPLY(RequestBucketInfoReply, onRequestBucketInfoReply)
};
diff --git a/storageapi/src/vespa/storageapi/message/visitor.cpp b/storageapi/src/vespa/storageapi/message/visitor.cpp
index d87f65a72cf..ec7fb1dc2d4 100644
--- a/storageapi/src/vespa/storageapi/message/visitor.cpp
+++ b/storageapi/src/vespa/storageapi/message/visitor.cpp
@@ -74,6 +74,16 @@ CreateVisitorCommand::getBucket() const
return document::Bucket(_bucketSpace, document::BucketId());
}
+document::BucketId
+CreateVisitorCommand::super_bucket_id() const
+{
+ if (_buckets.empty()) {
+ // TODO STRIPE: Is this actually an error situation? Should be fixed elsewhere.
+ return document::BucketId();
+ }
+ return _buckets[0];
+}
+
void
CreateVisitorCommand::print(std::ostream& out, bool verbose,
const std::string& indent) const
@@ -120,6 +130,7 @@ CreateVisitorCommand::print(std::ostream& out, bool verbose,
CreateVisitorReply::CreateVisitorReply(const CreateVisitorCommand& cmd)
: StorageReply(cmd),
+ _super_bucket_id(cmd.super_bucket_id()),
_lastBucket(document::BucketId(INT_MAX))
{
}
diff --git a/storageapi/src/vespa/storageapi/message/visitor.h b/storageapi/src/vespa/storageapi/message/visitor.h
index 8440591ecde..1313b275e95 100644
--- a/storageapi/src/vespa/storageapi/message/visitor.h
+++ b/storageapi/src/vespa/storageapi/message/visitor.h
@@ -79,6 +79,7 @@ public:
uint32_t getVisitorCmdId() const { return _visitorCmdId; }
document::BucketSpace getBucketSpace() const { return _bucketSpace; }
document::Bucket getBucket() const override;
+ document::BucketId super_bucket_id() const;
const vespalib::string & getLibraryName() const { return _libName; }
const vespalib::string & getInstanceId() const { return _instanceId; }
const vespalib::string & getControlDestination() const { return _controlDestination; }
@@ -114,6 +115,7 @@ public:
*/
class CreateVisitorReply : public StorageReply {
private:
+ document::BucketId _super_bucket_id;
document::BucketId _lastBucket;
vdslib::VisitorStatistics _visitorStatistics;
@@ -124,6 +126,7 @@ public:
void setLastBucket(const document::BucketId& lastBucket) { _lastBucket = lastBucket; }
+ const document::BucketId& super_bucket_id() const { return _super_bucket_id; }
const document::BucketId& getLastBucket() const { return _lastBucket; }
void setVisitorStatistics(const vdslib::VisitorStatistics& stats) { _visitorStatistics = stats; }
diff --git a/vdslib/src/vespa/vdslib/distribution/redundancygroupdistribution.cpp b/vdslib/src/vespa/vdslib/distribution/redundancygroupdistribution.cpp
index 6ac521a0f01..863ac45baa8 100644
--- a/vdslib/src/vespa/vdslib/distribution/redundancygroupdistribution.cpp
+++ b/vdslib/src/vespa/vdslib/distribution/redundancygroupdistribution.cpp
@@ -5,6 +5,7 @@
#include <vespa/vespalib/text/stringtokenizer.h>
#include <boost/lexical_cast.hpp>
#include <algorithm>
+#include <cassert>
namespace storage::lib {
diff --git a/vdstestlib/src/vespa/vdstestlib/config/dirconfig.cpp b/vdstestlib/src/vespa/vdstestlib/config/dirconfig.cpp
index 3a26ed9dec8..6cd9a132d5e 100644
--- a/vdstestlib/src/vespa/vdstestlib/config/dirconfig.cpp
+++ b/vdstestlib/src/vespa/vdstestlib/config/dirconfig.cpp
@@ -7,6 +7,7 @@
#include <vespa/vespalib/util/stringfmt.h>
#include <fstream>
#include <atomic>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".dirconfig");
diff --git a/vespa-athenz/pom.xml b/vespa-athenz/pom.xml
index 653eb58d76d..90ab2a81e0c 100644
--- a/vespa-athenz/pom.xml
+++ b/vespa-athenz/pom.xml
@@ -166,6 +166,13 @@
</exclusion>
</exclusions>
</dependency>
+ <dependency>
+ <!-- required by java-jwt -->
+ <groupId>commons-codec</groupId>
+ <artifactId>commons-codec</artifactId>
+ <version>1.15</version>
+ <scope>compile</scope>
+ </dependency>
</dependencies>
<build>
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzAccessToken.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzAccessToken.java
index c085be7c205..561b20a9c8a 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzAccessToken.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzAccessToken.java
@@ -6,7 +6,10 @@ import com.auth0.jwt.interfaces.DecodedJWT;
import com.yahoo.vespa.athenz.utils.AthenzIdentities;
import java.time.Instant;
+import java.util.List;
import java.util.Objects;
+import java.util.Optional;
+import java.util.stream.Collectors;
/**
* Represents an Athenz Access Token
@@ -18,6 +21,8 @@ public class AthenzAccessToken {
public static final String HTTP_HEADER_NAME = "Authorization";
private static final String BEARER_TOKEN_PREFIX = "Bearer ";
+ private static final String SCOPE_CLAIM = "scp";
+ private static final String AUDIENCE_CLAIM = "aud";
private final String value;
private volatile DecodedJWT jwt;
@@ -43,6 +48,12 @@ public class AthenzAccessToken {
return jwt().getExpiresAt().toInstant();
}
public AthenzIdentity getAthenzIdentity() { return AthenzIdentities.from(jwt().getClaim("client_id").asString()); }
+ public List<AthenzRole> roles() {
+ String domain = Optional.ofNullable(jwt().getClaim(AUDIENCE_CLAIM).asString()).orElse("");
+ return Optional.ofNullable(jwt().getClaim(SCOPE_CLAIM).asList(String.class)).orElse(List.of()).stream()
+ .map(role -> new AthenzRole(domain, role))
+ .collect(Collectors.toList());
+ }
private DecodedJWT jwt() {
if (jwt == null) {
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzGroup.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzGroup.java
new file mode 100644
index 00000000000..2608af381a2
--- /dev/null
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/api/AthenzGroup.java
@@ -0,0 +1,41 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.athenz.api;
+
+import java.util.Objects;
+
+public class AthenzGroup {
+ private final AthenzDomain domain;
+ private final String groupName;
+
+ public AthenzGroup(AthenzDomain domain, String groupName) {
+ this.domain = domain;
+ this.groupName = groupName;
+ }
+
+ public AthenzGroup(String domain, String groupName) {
+ this.domain = new AthenzDomain(domain);
+ this.groupName = groupName;
+ }
+
+ public AthenzDomain domain() {
+ return domain;
+ }
+
+ public String groupName() {
+ return groupName;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AthenzGroup that = (AthenzGroup) o;
+ return Objects.equals(domain, that.domain) && Objects.equals(groupName, that.groupName);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(domain, groupName);
+ }
+}
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java
index f9fd1c5e7e9..7503b5a39ed 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java
@@ -2,9 +2,11 @@
package com.yahoo.vespa.athenz.client.zms;
import com.yahoo.vespa.athenz.api.AthenzDomain;
+import com.yahoo.vespa.athenz.api.AthenzGroup;
import com.yahoo.vespa.athenz.api.AthenzIdentity;
import com.yahoo.vespa.athenz.api.AthenzResourceName;
import com.yahoo.vespa.athenz.api.AthenzRole;
+import com.yahoo.vespa.athenz.api.AthenzUser;
import com.yahoo.vespa.athenz.api.OktaAccessToken;
import com.yahoo.vespa.athenz.api.OktaIdentityToken;
import com.yahoo.vespa.athenz.client.ErrorHandler;
@@ -12,23 +14,31 @@ import com.yahoo.vespa.athenz.client.common.ClientBase;
import com.yahoo.vespa.athenz.client.zms.bindings.AccessResponseEntity;
import com.yahoo.vespa.athenz.client.zms.bindings.AssertionEntity;
import com.yahoo.vespa.athenz.client.zms.bindings.DomainListResponseEntity;
-import com.yahoo.vespa.athenz.client.zms.bindings.MembershipResponseEntity;
+import com.yahoo.vespa.athenz.client.zms.bindings.MembershipEntity;
import com.yahoo.vespa.athenz.client.zms.bindings.PolicyEntity;
import com.yahoo.vespa.athenz.client.zms.bindings.ProviderResourceGroupRolesRequestEntity;
+import com.yahoo.vespa.athenz.client.zms.bindings.RoleEntity;
import com.yahoo.vespa.athenz.client.zms.bindings.TenancyRequestEntity;
import com.yahoo.vespa.athenz.identity.ServiceIdentityProvider;
+import com.yahoo.vespa.athenz.utils.AthenzIdentities;
import org.apache.http.Header;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.methods.RequestBuilder;
+import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader;
import javax.net.ssl.SSLContext;
import java.net.URI;
+import java.time.Instant;
import java.util.Collections;
import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
import java.util.OptionalInt;
import java.util.Set;
+import java.util.function.Function;
import java.util.function.Supplier;
+import java.util.stream.Collectors;
import static java.util.stream.Collectors.toList;
@@ -103,10 +113,17 @@ public class DefaultZmsClient extends ClientBase implements ZmsClient {
}
@Override
- public void addRoleMember(AthenzRole role, AthenzIdentity member) {
+ public void addRoleMember(AthenzRole role, AthenzIdentity member, Optional<String> reason) {
URI uri = zmsUrl.resolve(String.format("domain/%s/role/%s/member/%s", role.domain().getName(), role.roleName(), member.getFullName()));
- HttpUriRequest request = RequestBuilder.put(uri).build();
- execute(request, response -> readEntity(response, Void.class));
+ MembershipEntity membership = new MembershipEntity.RoleMembershipEntity(member.getFullName(), true, role.roleName(), null);
+
+
+ RequestBuilder requestBuilder = RequestBuilder.put(uri)
+ .setEntity(toJsonStringEntity(membership));
+ if (reason.filter(s -> !s.isBlank()).isPresent()) {
+ requestBuilder.addHeader("Y-Audit-Ref", reason.get());
+ }
+ execute(requestBuilder.build(), response -> readEntity(response, Void.class));
}
@Override
@@ -123,7 +140,19 @@ public class DefaultZmsClient extends ClientBase implements ZmsClient {
.setUri(uri)
.build();
return execute(request, response -> {
- MembershipResponseEntity membership = readEntity(response, MembershipResponseEntity.class);
+ MembershipEntity membership = readEntity(response, MembershipEntity.GroupMembershipEntity.class);
+ return membership.isMember;
+ });
+ }
+
+ @Override
+ public boolean getGroupMembership(AthenzGroup group, AthenzIdentity identity) {
+ URI uri = zmsUrl.resolve(String.format("domain/%s/group/%s/member/%s", group.domain().getName(), group.groupName(), identity.getFullName()));
+ HttpUriRequest request = RequestBuilder.get()
+ .setUri(uri)
+ .build();
+ return execute(request, response -> {
+ MembershipEntity membership = readEntity(response, MembershipEntity.class);
return membership.isMember;
});
}
@@ -194,6 +223,44 @@ public class DefaultZmsClient extends ClientBase implements ZmsClient {
return true;
}
+ @Override
+ public List<AthenzUser> listPendingRoleApprovals(AthenzRole athenzRole) {
+ URI uri = zmsUrl.resolve(String.format("domain/%s/role/%s?pending=true", athenzRole.domain().getName(), athenzRole.roleName()));
+ HttpUriRequest request = RequestBuilder.get()
+ .setUri(uri)
+ .build();
+ RoleEntity roleEntity = execute(request, response -> readEntity(response, RoleEntity.class));
+ return roleEntity.roleMembers().stream()
+ .filter(RoleEntity.Member::pendingApproval)
+ .map(RoleEntity.Member::memberName)
+ .map(AthenzIdentities::from)
+ .filter(identity -> AthenzIdentities.USER_PRINCIPAL_DOMAIN.equals(identity.getDomain()))
+ .map(AthenzUser.class::cast)
+ .collect(Collectors.toList());
+ }
+
+ @Override
+ public void approvePendingRoleMembership(AthenzRole athenzRole, AthenzUser athenzUser, Instant expiry) {
+ URI uri = zmsUrl.resolve(String.format("domain/%s/role/%s/member/%s/decision", athenzRole.domain().getName(), athenzRole.roleName(), athenzUser.getFullName()));
+ MembershipEntity membership = new MembershipEntity.RoleMembershipEntity(athenzUser.getFullName(), true, athenzRole.roleName(), Long.toString(expiry.getEpochSecond()));
+ HttpUriRequest request = RequestBuilder.put()
+ .setUri(uri)
+ .setEntity(toJsonStringEntity(membership))
+ .build();
+ execute(request, response -> readEntity(response, Void.class));
+ }
+
+ @Override
+ public List<AthenzIdentity> listMembers(AthenzRole athenzRole) {
+ URI uri = zmsUrl.resolve(String.format("domain/%s/role/%s", athenzRole.domain().getName(), athenzRole.roleName()));
+ RoleEntity execute = execute(RequestBuilder.get(uri).build(), response -> readEntity(response, RoleEntity.class));
+ return execute.roleMembers().stream()
+ .filter(member -> ! member.pendingApproval())
+ .map(RoleEntity.Member::memberName)
+ .map(AthenzIdentities::from)
+ .collect(Collectors.toList());
+ }
+
private static Header createCookieHeaderWithOktaTokens(OktaIdentityToken identityToken, OktaAccessToken accessToken) {
return new BasicHeader("Cookie", String.format("okta_at=%s; okta_it=%s", accessToken.token(), identityToken.token()));
}
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java
index c7f865a58bb..03afc9278cc 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/ZmsClient.java
@@ -2,13 +2,17 @@
package com.yahoo.vespa.athenz.client.zms;
import com.yahoo.vespa.athenz.api.AthenzDomain;
+import com.yahoo.vespa.athenz.api.AthenzGroup;
import com.yahoo.vespa.athenz.api.AthenzIdentity;
import com.yahoo.vespa.athenz.api.AthenzResourceName;
import com.yahoo.vespa.athenz.api.AthenzRole;
+import com.yahoo.vespa.athenz.api.AthenzUser;
import com.yahoo.vespa.athenz.api.OktaAccessToken;
import com.yahoo.vespa.athenz.api.OktaIdentityToken;
+import java.time.Instant;
import java.util.List;
+import java.util.Optional;
import java.util.Set;
/**
@@ -28,12 +32,14 @@ public interface ZmsClient extends AutoCloseable {
void deleteProviderResourceGroup(AthenzDomain tenantDomain, AthenzIdentity providerService, String resourceGroup,
OktaIdentityToken identityToken, OktaAccessToken accessToken);
- void addRoleMember(AthenzRole role, AthenzIdentity member);
+ void addRoleMember(AthenzRole role, AthenzIdentity member, Optional<String> reason);
void deleteRoleMember(AthenzRole role, AthenzIdentity member);
boolean getMembership(AthenzRole role, AthenzIdentity identity);
+ boolean getGroupMembership(AthenzGroup group, AthenzIdentity identity);
+
List<AthenzDomain> getDomainList(String prefix);
boolean hasAccess(AthenzResourceName resource, String action, AthenzIdentity identity);
@@ -42,5 +48,11 @@ public interface ZmsClient extends AutoCloseable {
boolean deletePolicyRule(AthenzDomain athenzDomain, String athenzPolicy, String action, AthenzResourceName resourceName, AthenzRole athenzRole);
+ List<AthenzUser> listPendingRoleApprovals(AthenzRole athenzRole);
+
+ void approvePendingRoleMembership(AthenzRole athenzRole, AthenzUser athenzUser, Instant expiry);
+
+ List<AthenzIdentity> listMembers(AthenzRole athenzRole);
+
void close();
}
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/MembershipEntity.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/MembershipEntity.java
new file mode 100644
index 00000000000..33acf0e1c90
--- /dev/null
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/MembershipEntity.java
@@ -0,0 +1,81 @@
+// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.athenz.client.zms.bindings;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonGetter;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+/**
+ * @author bjorncs
+ * @author mortent
+ */
+@JsonInclude(JsonInclude.Include.NON_NULL)
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class MembershipEntity {
+ public final String memberName;
+ public final boolean isMember;
+ public final String expiration;
+
+ @JsonCreator
+ public MembershipEntity(@JsonProperty("memberName") String memberName,
+ @JsonProperty("isMember") boolean isMember,
+ @JsonProperty("expiration") String expiration) {
+ this.memberName = memberName;
+ this.isMember = isMember;
+ this.expiration = expiration;
+ }
+
+ @JsonGetter("memberName")
+ public String memberName() {
+ return memberName;
+ }
+
+ @JsonGetter("isMember")
+ public boolean isMember() {
+ return isMember;
+ }
+
+ @JsonGetter("expiration")
+ public String expiration() {
+ return expiration;
+ }
+
+ public static class RoleMembershipEntity extends MembershipEntity {
+ public final String roleName;
+
+ @JsonCreator
+ public RoleMembershipEntity(@JsonProperty("memberName") String memberName,
+ @JsonProperty("isMember") boolean isMember,
+ @JsonProperty("roleName") String roleName,
+ @JsonProperty("expiration") String expiration) {
+ super(memberName, isMember, expiration);
+ this.roleName = roleName;
+ }
+
+ @JsonGetter("roleName")
+ public String roleName() {
+ return roleName;
+ }
+
+ }
+
+ public static class GroupMembershipEntity extends MembershipEntity {
+ public final String groupName;
+
+ @JsonCreator
+ public GroupMembershipEntity(@JsonProperty("memberName") String memberName,
+ @JsonProperty("isMember") boolean isMember,
+ @JsonProperty("groupName") String groupName,
+ @JsonProperty("expiration") String expiration) {
+ super(memberName, isMember, expiration);
+ this.groupName = groupName;
+ }
+
+ @JsonGetter("groupName")
+ public String roleName() {
+ return groupName;
+ }
+ }
+} \ No newline at end of file
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/MembershipResponseEntity.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/MembershipResponseEntity.java
deleted file mode 100644
index 499afb48f25..00000000000
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/MembershipResponseEntity.java
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.athenz.client.zms.bindings;
-
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-/**
- * @author bjorncs
- */
-@JsonIgnoreProperties(ignoreUnknown = true)
-public class MembershipResponseEntity {
- public final String memberName;
- public final boolean isMember;
- public final String roleName;
- public final String expiration;
-
- @JsonCreator
- public MembershipResponseEntity(@JsonProperty("memberName") String memberName,
- @JsonProperty("isMember") boolean isMember,
- @JsonProperty("roleName") String roleName,
- @JsonProperty("expiration") String expiration) {
- this.memberName = memberName;
- this.isMember = isMember;
- this.roleName = roleName;
- this.expiration = expiration;
- }
-}
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/RoleEntity.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/RoleEntity.java
new file mode 100644
index 00000000000..5babe292138
--- /dev/null
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/bindings/RoleEntity.java
@@ -0,0 +1,54 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.athenz.client.zms.bindings;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.List;
+
+/**
+ * @author mortent
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class RoleEntity {
+ private final String roleName;
+ private final List<Member> roleMembers;
+
+ @JsonCreator
+ public RoleEntity(@JsonProperty("roleName") String roleName, @JsonProperty("roleMembers") List<Member> roleMembers) {
+ this.roleName = roleName;
+ this.roleMembers = roleMembers;
+ }
+
+ public String roleName() {
+ return roleName;
+ }
+
+ public List<Member> roleMembers() {
+ return roleMembers;
+ }
+
+ @JsonIgnoreProperties(ignoreUnknown = true)
+ public static final class Member {
+ private final String memberName;
+ private final boolean active;
+ private final boolean approved;
+
+ @JsonCreator
+ public Member(@JsonProperty("memberName") String memberName, @JsonProperty("active") boolean active, @JsonProperty("approved") boolean approved) {
+ this.memberName = memberName;
+ this.active = active;
+ this.approved = approved;
+ }
+
+ public String memberName() {
+ return memberName;
+ }
+
+ public boolean pendingApproval() {
+ return !approved;
+ }
+ }
+}
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/DefaultZtsClient.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/DefaultZtsClient.java
index cdbf0755059..24234757590 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/DefaultZtsClient.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/DefaultZtsClient.java
@@ -165,7 +165,7 @@ public class DefaultZtsClient extends ClientBase implements ZtsClient {
@Override
public X509Certificate getRoleCertificate(AthenzRole role, Pkcs10Csr csr, Duration expiry) {
RoleCertificateRequestEntity requestEntity = new RoleCertificateRequestEntity(csr, expiry);
- URI uri = ztsUrl.resolve(String.format("domain/%s/role/%s/token", role.domain().getName(), role.roleName()));
+ URI uri = ztsUrl.resolve("rolecert");
HttpUriRequest request = RequestBuilder.post(uri)
.setEntity(toJsonStringEntity(requestEntity))
.build();
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/bindings/RoleCertificateRequestEntity.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/bindings/RoleCertificateRequestEntity.java
index 89bfce91154..16dd1d914ef 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/bindings/RoleCertificateRequestEntity.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/bindings/RoleCertificateRequestEntity.java
@@ -38,7 +38,7 @@ public class RoleCertificateRequestEntity {
public void serialize(Duration duration,
JsonGenerator jsonGenerator,
SerializerProvider serializerProvider) throws IOException {
- jsonGenerator.writeNumber(duration.getSeconds());
+ jsonGenerator.writeNumber(duration.toMinutes());
}
}
}
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/bindings/RoleCertificateResponseEntity.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/bindings/RoleCertificateResponseEntity.java
index 857bfad9143..cd9a12c0074 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/bindings/RoleCertificateResponseEntity.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zts/bindings/RoleCertificateResponseEntity.java
@@ -16,12 +16,9 @@ import java.time.Instant;
@JsonIgnoreProperties(ignoreUnknown = true)
public class RoleCertificateResponseEntity {
public final X509Certificate certificate;
- public final Instant expiry;
@JsonCreator
- public RoleCertificateResponseEntity(@JsonProperty("token") @JsonDeserialize(using = X509CertificateDeserializer.class) X509Certificate certificate,
- @JsonProperty("expiryTime") Instant expiry) {
+ public RoleCertificateResponseEntity(@JsonProperty("x509Certificate") @JsonDeserialize(using = X509CertificateDeserializer.class) X509Certificate certificate) {
this.certificate = certificate;
- this.expiry = expiry;
}
}
diff --git a/vespa-documentgen-plugin/src/main/java/com/yahoo/vespa/DocumentGenMojo.java b/vespa-documentgen-plugin/src/main/java/com/yahoo/vespa/DocumentGenMojo.java
index c23b68ad515..3b9babf1782 100644
--- a/vespa-documentgen-plugin/src/main/java/com/yahoo/vespa/DocumentGenMojo.java
+++ b/vespa-documentgen-plugin/src/main/java/com/yahoo/vespa/DocumentGenMojo.java
@@ -962,7 +962,7 @@ public class DocumentGenMojo extends AbstractMojo {
if (DataType.BYTE.equals(dt)) return "com.yahoo.document.DataType.BYTE";
if (DataType.BOOL.equals(dt)) return "com.yahoo.document.DataType.BOOL";
if (DataType.TAG.equals(dt)) return "com.yahoo.document.DataType.TAG";
- if (dt instanceof StructDataType) return "new com.yahoo.document.StructDataType(\""+dt.getName()+"\")";
+ if (dt instanceof StructDataType) return className(dt.getName()) +".type";
if (dt instanceof WeightedSetDataType) return "new com.yahoo.document.WeightedSetDataType("+toJavaReference(((WeightedSetDataType)dt).getNestedType())+", "+
((WeightedSetDataType)dt).createIfNonExistent()+", "+ ((WeightedSetDataType)dt).removeIfZero()+","+dt.getId()+")";
if (dt instanceof ArrayDataType) return "new com.yahoo.document.ArrayDataType("+toJavaReference(((ArrayDataType)dt).getNestedType())+")";
diff --git a/vespa-feed-client-cli/CMakeLists.txt b/vespa-feed-client-cli/CMakeLists.txt
index 3967c135d1c..1d4966ac4a2 100644
--- a/vespa-feed-client-cli/CMakeLists.txt
+++ b/vespa-feed-client-cli/CMakeLists.txt
@@ -1,5 +1,5 @@
# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-install_java_artifact(vespa-feed-client-cli)
+install_fat_java_artifact(vespa-feed-client-cli)
vespa_install_script(src/main/sh/vespa-feed-client.sh vespa-feed-client bin)
install(FILES src/main/resources/logging.properties DESTINATION conf/vespa-feed-client)
diff --git a/vespa-feed-client-cli/pom.xml b/vespa-feed-client-cli/pom.xml
index 9fd59f1cfa4..ebbea35f4a4 100644
--- a/vespa-feed-client-cli/pom.xml
+++ b/vespa-feed-client-cli/pom.xml
@@ -71,25 +71,38 @@
</configuration>
</plugin>
<plugin>
+ <groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
- <configuration>
- <archive>
- <manifest>
- <mainClass>ai.vespa.feed.client.CliClient</mainClass>
- </manifest>
- </archive>
- <descriptorRefs>
- <descriptorRef>jar-with-dependencies</descriptorRef>
- </descriptorRefs>
- <appendAssemblyId>false</appendAssemblyId>
- </configuration>
<executions>
<execution>
- <id>make-assembly</id>
+ <id>make-fatjar</id>
+ <phase>prepare-package</phase>
+ <goals>
+ <goal>single</goal>
+ </goals>
+ <configuration>
+ <attach>false</attach>
+ <archive>
+ <manifest>
+ <mainClass>ai.vespa.feed.client.CliClient</mainClass>
+ </manifest>
+ </archive>
+ <descriptorRefs>
+ <descriptorRef>jar-with-dependencies</descriptorRef>
+ </descriptorRefs>
+ </configuration>
+ </execution>
+ <execution>
+ <id>make-zip</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
+ <configuration>
+ <descriptors>
+ <descriptor>src/maven/create-zip.xml</descriptor>
+ </descriptors>
+ </configuration>
</execution>
</executions>
</plugin>
diff --git a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java
index 2f15f468588..83abe0bb872 100644
--- a/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java
+++ b/vespa-feed-client-cli/src/main/java/ai/vespa/feed/client/CliClient.java
@@ -8,11 +8,14 @@ import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.SSLSession;
import java.io.IOException;
import java.io.InputStream;
+import java.io.OutputStream;
import java.io.PrintStream;
import java.nio.file.Files;
+import java.time.Duration;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
+import java.util.concurrent.atomic.AtomicInteger;
/**
* Main method for CLI interface
@@ -55,11 +58,12 @@ public class CliClient {
return 0;
}
try (InputStream in = createFeedInputStream(cliArgs);
- JsonStreamFeeder feeder = createJsonFeeder(cliArgs)) {
+ FeedClient feedClient = createFeedClient(cliArgs);
+ JsonFeeder feeder = createJsonFeeder(feedClient, cliArgs)) {
+ long startNanos = System.nanoTime();
+ feeder.feedMany(in).join();
if (cliArgs.benchmarkModeEnabled()) {
- printBenchmarkResult(feeder.benchmark(in));
- } else {
- feeder.feed(in);
+ printBenchmarkResult(System.nanoTime() - startNanos, feedClient.stats(), systemOut);
}
}
return 0;
@@ -74,20 +78,19 @@ public class CliClient {
private static FeedClient createFeedClient(CliArguments cliArgs) throws CliArguments.CliArgumentsException {
FeedClientBuilder builder = FeedClientBuilder.create(cliArgs.endpoint());
- cliArgs.connections().ifPresent(builder::setMaxConnections);
+ cliArgs.connections().ifPresent(builder::setConnectionsPerEndpoint);
cliArgs.maxStreamsPerConnection().ifPresent(builder::setMaxStreamPerConnection);
if (cliArgs.sslHostnameVerificationDisabled()) {
builder.setHostnameVerifier(AcceptAllHostnameVerifier.INSTANCE);
}
cliArgs.certificateAndKey().ifPresent(c -> builder.setCertificate(c.certificateFile, c.privateKeyFile));
- cliArgs.caCertificates().ifPresent(builder::setCaCertificates);
+ cliArgs.caCertificates().ifPresent(builder::setCaCertificatesFile);
cliArgs.headers().forEach(builder::addRequestHeader);
return builder.build();
}
- private static JsonStreamFeeder createJsonFeeder(CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
- FeedClient feedClient = createFeedClient(cliArgs);
- JsonStreamFeeder.Builder builder = JsonStreamFeeder.builder(feedClient);
+ private static JsonFeeder createJsonFeeder(FeedClient feedClient, CliArguments cliArgs) throws CliArguments.CliArgumentsException, IOException {
+ JsonFeeder.Builder builder = JsonFeeder.builder(feedClient);
cliArgs.timeout().ifPresent(builder::withTimeout);
cliArgs.route().ifPresent(builder::withRoute);
cliArgs.traceLevel().ifPresent(builder::withTracelevel);
@@ -98,18 +101,6 @@ public class CliClient {
return cliArgs.readFeedFromStandardInput() ? systemIn : Files.newInputStream(cliArgs.inputFile().get());
}
- private void printBenchmarkResult(JsonStreamFeeder.BenchmarkResult result) throws IOException {
- JsonFactory factory = new JsonFactory();
- try (JsonGenerator generator = factory.createGenerator(systemOut).useDefaultPrettyPrinter()) {
- generator.writeStartObject();
- generator.writeNumberField("feeder.runtime", result.duration.toMillis());
- generator.writeNumberField("feeder.okcount", result.okCount);
- generator.writeNumberField("feeder.errorcount", result.errorCount);
- generator.writeNumberField("feeder.throughput", result.throughput);
- generator.writeEndObject();
- }
- }
-
private int handleException(boolean verbose, Exception e) { return handleException(verbose, e.getMessage(), e); }
private int handleException(boolean verbose, String message, Exception exception) {
@@ -131,4 +122,31 @@ public class CliClient {
static final AcceptAllHostnameVerifier INSTANCE = new AcceptAllHostnameVerifier();
@Override public boolean verify(String hostname, SSLSession session) { return true; }
}
+
+ static void printBenchmarkResult(long durationNanos, OperationStats stats, OutputStream systemOut) throws IOException {
+ JsonFactory factory = new JsonFactory();
+ long okCount = stats.successes();
+ long errorCount = stats.requests() - okCount;
+ double throughput = okCount * 1e9 / Math.max(1, durationNanos);
+ try (JsonGenerator generator = factory.createGenerator(systemOut).useDefaultPrettyPrinter()) {
+ generator.writeStartObject();
+ generator.writeNumberField("feeder.runtime", durationNanos / 1_000_000);
+ generator.writeNumberField("feeder.okcount", okCount);
+ generator.writeNumberField("feeder.errorcount", errorCount);
+ generator.writeNumberField("feeder.throughput", throughput);
+ generator.writeNumberField("feeder.minlatency", stats.minLatencyMillis());
+ generator.writeNumberField("feeder.avglatency", stats.averageLatencyMillis());
+ generator.writeNumberField("feeder.maxlatency", stats.maxLatencyMillis());
+ generator.writeNumberField("feeder.bytessent", stats.bytesSent());
+ generator.writeNumberField("feeder.bytesreceived", stats.bytesReceived());
+
+ generator.writeObjectFieldStart("feeder.responsecodes");
+ for (Map.Entry<Integer, Long> entry : stats.responsesByCode().entrySet())
+ generator.writeNumberField(Integer.toString(entry.getKey()), entry.getValue());
+ generator.writeEndObject();
+
+ generator.writeEndObject();
+ }
+ }
+
}
diff --git a/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh b/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh
new file mode 100755
index 00000000000..57077205d18
--- /dev/null
+++ b/vespa-feed-client-cli/src/main/sh/vespa-feed-client-standalone.sh
@@ -0,0 +1,9 @@
+#!/usr/bin/env sh
+# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+exec java \
+-Djava.awt.headless=true \
+-Xms128m -Xmx2048m \
+--add-opens=java.base/sun.security.ssl=ALL-UNNAMED \
+-Djava.util.logging.config.file=logging.properties \
+-cp vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.CliClient "$@"
diff --git a/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh b/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh
index ab43fca2f67..43cde0894b9 100755
--- a/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh
+++ b/vespa-feed-client-cli/src/main/sh/vespa-feed-client.sh
@@ -81,4 +81,4 @@ exec java \
-Xms128m -Xmx2048m $(getJavaOptionsIPV46) \
--add-opens=java.base/sun.security.ssl=ALL-UNNAMED \
-Djava.util.logging.config.file=${VESPA_HOME}/conf/vespa-feed-client/logging.properties \
--cp ${VESPA_HOME}/lib/jars/vespa-feed-client-cli.jar ai.vespa.feed.client.CliClient "$@"
+-cp ${VESPA_HOME}/lib/jars/vespa-feed-client-cli-jar-with-dependencies.jar ai.vespa.feed.client.CliClient "$@"
diff --git a/vespa-feed-client-cli/src/maven/create-zip.xml b/vespa-feed-client-cli/src/maven/create-zip.xml
new file mode 100644
index 00000000000..45bbbea9f2d
--- /dev/null
+++ b/vespa-feed-client-cli/src/maven/create-zip.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
+<assembly xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
+ <id>zip</id>
+ <includeBaseDirectory>true</includeBaseDirectory>
+
+ <formats>
+ <format>zip</format>
+ </formats>
+ <files>
+ <file>
+ <source>${project.build.directory}/${project.artifactId}-jar-with-dependencies.jar</source>
+ </file>
+ <file>
+ <source>${project.basedir}/src/main/sh/vespa-feed-client-standalone.sh</source>
+ <destName>vespa-feed-client</destName>
+ </file>
+ <file>
+ <source>${project.basedir}/src/main/resources/logging.properties</source>
+ </file>
+ </files>
+</assembly>
diff --git a/vespa-feed-client/abi-spec.json b/vespa-feed-client/abi-spec.json
new file mode 100644
index 00000000000..70cb4c3f09f
--- /dev/null
+++ b/vespa-feed-client/abi-spec.json
@@ -0,0 +1,386 @@
+{
+ "ai.vespa.feed.client.BenchmarkingCluster": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "ai.vespa.feed.client.Cluster"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(ai.vespa.feed.client.Cluster)",
+ "public void dispatch(ai.vespa.feed.client.HttpRequest, java.util.concurrent.CompletableFuture)",
+ "public ai.vespa.feed.client.OperationStats stats()",
+ "public void close()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.DocumentId": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public static ai.vespa.feed.client.DocumentId of(java.lang.String, java.lang.String, java.lang.String)",
+ "public static ai.vespa.feed.client.DocumentId of(java.lang.String, java.lang.String, long, java.lang.String)",
+ "public static ai.vespa.feed.client.DocumentId of(java.lang.String, java.lang.String, java.lang.String, java.lang.String)",
+ "public static ai.vespa.feed.client.DocumentId of(java.lang.String)",
+ "public java.lang.String documentType()",
+ "public java.lang.String namespace()",
+ "public java.util.OptionalLong number()",
+ "public java.util.Optional group()",
+ "public java.lang.String userSpecific()",
+ "public boolean equals(java.lang.Object)",
+ "public int hashCode()",
+ "public java.lang.String toString()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.DynamicThrottler": {
+ "superClass": "ai.vespa.feed.client.StaticThrottler",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(ai.vespa.feed.client.FeedClientBuilder)",
+ "public void sent(long, java.util.concurrent.CompletableFuture)",
+ "public void success()",
+ "public void throttled(long)",
+ "public long targetInflight()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.FeedClient$CircuitBreaker$State": {
+ "superClass": "java.lang.Enum",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final",
+ "enum"
+ ],
+ "methods": [
+ "public static ai.vespa.feed.client.FeedClient$CircuitBreaker$State[] values()",
+ "public static ai.vespa.feed.client.FeedClient$CircuitBreaker$State valueOf(java.lang.String)"
+ ],
+ "fields": [
+ "public static final enum ai.vespa.feed.client.FeedClient$CircuitBreaker$State CLOSED",
+ "public static final enum ai.vespa.feed.client.FeedClient$CircuitBreaker$State HALF_OPEN",
+ "public static final enum ai.vespa.feed.client.FeedClient$CircuitBreaker$State OPEN"
+ ]
+ },
+ "ai.vespa.feed.client.FeedClient$CircuitBreaker": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract void success()",
+ "public abstract void failure()",
+ "public abstract ai.vespa.feed.client.FeedClient$CircuitBreaker$State state()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.FeedClient$OperationType": {
+ "superClass": "java.lang.Enum",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final",
+ "enum"
+ ],
+ "methods": [
+ "public static ai.vespa.feed.client.FeedClient$OperationType[] values()",
+ "public static ai.vespa.feed.client.FeedClient$OperationType valueOf(java.lang.String)"
+ ],
+ "fields": [
+ "public static final enum ai.vespa.feed.client.FeedClient$OperationType PUT",
+ "public static final enum ai.vespa.feed.client.FeedClient$OperationType UPDATE",
+ "public static final enum ai.vespa.feed.client.FeedClient$OperationType REMOVE"
+ ]
+ },
+ "ai.vespa.feed.client.FeedClient$RetryStrategy": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public boolean retry(ai.vespa.feed.client.FeedClient$OperationType)",
+ "public int retries()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.FeedClient$Throttler": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract void sent(long, java.util.concurrent.CompletableFuture)",
+ "public abstract void success()",
+ "public abstract void throttled(long)",
+ "public abstract long targetInflight()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.FeedClient": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "java.io.Closeable"
+ ],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public abstract java.util.concurrent.CompletableFuture put(ai.vespa.feed.client.DocumentId, java.lang.String, ai.vespa.feed.client.OperationParameters)",
+ "public abstract java.util.concurrent.CompletableFuture update(ai.vespa.feed.client.DocumentId, java.lang.String, ai.vespa.feed.client.OperationParameters)",
+ "public abstract java.util.concurrent.CompletableFuture remove(ai.vespa.feed.client.DocumentId, ai.vespa.feed.client.OperationParameters)",
+ "public abstract ai.vespa.feed.client.OperationStats stats()",
+ "public ai.vespa.feed.client.FeedClient$CircuitBreaker$State circuitBreakerState()",
+ "public abstract void close(boolean)",
+ "public void close()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.FeedClientBuilder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public static ai.vespa.feed.client.FeedClientBuilder create(java.net.URI)",
+ "public static ai.vespa.feed.client.FeedClientBuilder create(java.util.List)",
+ "public ai.vespa.feed.client.FeedClientBuilder setConnectionsPerEndpoint(int)",
+ "public ai.vespa.feed.client.FeedClientBuilder setMaxStreamPerConnection(int)",
+ "public ai.vespa.feed.client.FeedClientBuilder setSslContext(javax.net.ssl.SSLContext)",
+ "public ai.vespa.feed.client.FeedClientBuilder setHostnameVerifier(javax.net.ssl.HostnameVerifier)",
+ "public ai.vespa.feed.client.FeedClientBuilder setBenchmarkOn(boolean)",
+ "public ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.lang.String)",
+ "public ai.vespa.feed.client.FeedClientBuilder addRequestHeader(java.lang.String, java.util.function.Supplier)",
+ "public ai.vespa.feed.client.FeedClientBuilder setRetryStrategy(ai.vespa.feed.client.FeedClient$RetryStrategy)",
+ "public ai.vespa.feed.client.FeedClientBuilder setCircuitBreaker(ai.vespa.feed.client.FeedClient$CircuitBreaker)",
+ "public ai.vespa.feed.client.FeedClientBuilder setCertificate(java.nio.file.Path, java.nio.file.Path)",
+ "public ai.vespa.feed.client.FeedClientBuilder setCertificate(java.util.Collection, java.security.PrivateKey)",
+ "public ai.vespa.feed.client.FeedClientBuilder setCertificate(java.security.cert.X509Certificate, java.security.PrivateKey)",
+ "public ai.vespa.feed.client.FeedClientBuilder setCaCertificatesFile(java.nio.file.Path)",
+ "public ai.vespa.feed.client.FeedClientBuilder setCaCertificates(java.util.Collection)",
+ "public ai.vespa.feed.client.FeedClient build()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.FeedException": {
+ "superClass": "java.lang.RuntimeException",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String)",
+ "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.String)",
+ "public void <init>(java.lang.String, java.lang.Throwable)",
+ "public void <init>(java.lang.Throwable)",
+ "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.Throwable)",
+ "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.String, java.lang.Throwable)",
+ "public java.util.Optional documentId()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.GracePeriodCircuitBreaker": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "ai.vespa.feed.client.FeedClient$CircuitBreaker"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.time.Duration, java.time.Duration)",
+ "public void success()",
+ "public void failure()",
+ "public ai.vespa.feed.client.FeedClient$CircuitBreaker$State state()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.JsonFeeder$Builder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public ai.vespa.feed.client.JsonFeeder$Builder withTimeout(java.time.Duration)",
+ "public ai.vespa.feed.client.JsonFeeder$Builder withRoute(java.lang.String)",
+ "public ai.vespa.feed.client.JsonFeeder$Builder withTracelevel(int)",
+ "public ai.vespa.feed.client.JsonFeeder build()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.JsonFeeder$ResultCallback": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "interface",
+ "abstract"
+ ],
+ "methods": [
+ "public void onNextResult(ai.vespa.feed.client.Result, ai.vespa.feed.client.FeedException)",
+ "public void onError(ai.vespa.feed.client.FeedException)",
+ "public void onComplete()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.JsonFeeder": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "java.io.Closeable"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public static ai.vespa.feed.client.JsonFeeder$Builder builder(ai.vespa.feed.client.FeedClient)",
+ "public java.util.concurrent.CompletableFuture feedSingle(java.lang.String)",
+ "public java.util.concurrent.CompletableFuture feedMany(java.io.InputStream, ai.vespa.feed.client.JsonFeeder$ResultCallback)",
+ "public java.util.concurrent.CompletableFuture feedMany(java.io.InputStream)",
+ "public void close()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.OperationParameters": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public static ai.vespa.feed.client.OperationParameters empty()",
+ "public ai.vespa.feed.client.OperationParameters createIfNonExistent(boolean)",
+ "public ai.vespa.feed.client.OperationParameters testAndSetCondition(java.lang.String)",
+ "public ai.vespa.feed.client.OperationParameters timeout(java.time.Duration)",
+ "public ai.vespa.feed.client.OperationParameters route(java.lang.String)",
+ "public ai.vespa.feed.client.OperationParameters tracelevel(int)",
+ "public boolean createIfNonExistent()",
+ "public java.util.Optional testAndSetCondition()",
+ "public java.util.Optional timeout()",
+ "public java.util.Optional route()",
+ "public java.util.OptionalInt tracelevel()",
+ "public boolean equals(java.lang.Object)",
+ "public int hashCode()",
+ "public java.lang.String toString()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.OperationParseException": {
+ "superClass": "ai.vespa.feed.client.FeedException",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(java.lang.String)",
+ "public void <init>(java.lang.String, java.lang.Throwable)"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.OperationStats": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(long, java.util.Map, long, long, long, long, long, long, long)",
+ "public long requests()",
+ "public long responses()",
+ "public long successes()",
+ "public java.util.Map responsesByCode()",
+ "public long exceptions()",
+ "public long inflight()",
+ "public long averageLatencyMillis()",
+ "public long minLatencyMillis()",
+ "public long maxLatencyMillis()",
+ "public long bytesSent()",
+ "public long bytesReceived()",
+ "public java.lang.String toString()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.Result$Type": {
+ "superClass": "java.lang.Enum",
+ "interfaces": [],
+ "attributes": [
+ "public",
+ "final",
+ "enum"
+ ],
+ "methods": [
+ "public static ai.vespa.feed.client.Result$Type[] values()",
+ "public static ai.vespa.feed.client.Result$Type valueOf(java.lang.String)"
+ ],
+ "fields": [
+ "public static final enum ai.vespa.feed.client.Result$Type success",
+ "public static final enum ai.vespa.feed.client.Result$Type conditionNotMet",
+ "public static final enum ai.vespa.feed.client.Result$Type failure"
+ ]
+ },
+ "ai.vespa.feed.client.Result": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public ai.vespa.feed.client.Result$Type type()",
+ "public ai.vespa.feed.client.DocumentId documentId()",
+ "public java.util.Optional resultMessage()",
+ "public java.util.Optional traceMessage()"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.ResultParseException": {
+ "superClass": "ai.vespa.feed.client.FeedException",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.String)",
+ "public void <init>(ai.vespa.feed.client.DocumentId, java.lang.Throwable)"
+ ],
+ "fields": []
+ },
+ "ai.vespa.feed.client.StaticThrottler": {
+ "superClass": "java.lang.Object",
+ "interfaces": [
+ "ai.vespa.feed.client.FeedClient$Throttler"
+ ],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(ai.vespa.feed.client.FeedClientBuilder)",
+ "public void sent(long, java.util.concurrent.CompletableFuture)",
+ "public void success()",
+ "public void throttled(long)",
+ "public long targetInflight()"
+ ],
+ "fields": [
+ "protected final long maxInflight",
+ "protected final long minInflight"
+ ]
+ }
+} \ No newline at end of file
diff --git a/vespa-feed-client/pom.xml b/vespa-feed-client/pom.xml
index 7759e9d2308..7d4938c6fb0 100644
--- a/vespa-feed-client/pom.xml
+++ b/vespa-feed-client/pom.xml
@@ -20,6 +20,11 @@
<dependencies>
<!-- compile scope -->
<dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>annotations</artifactId>
+ <version>${project.version}</version>
+ </dependency>
+ <dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcpkix-jdk15on</artifactId>
<scope>compile</scope>
@@ -83,6 +88,10 @@
</execution>
</executions>
</plugin>
+ <plugin>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>abi-check-plugin</artifactId>
+ </plugin>
</plugins>
</build>
</project>
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ApacheCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/ApacheCluster.java
new file mode 100644
index 00000000000..e5d45a2f211
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/ApacheCluster.java
@@ -0,0 +1,165 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import org.apache.hc.client5.http.async.methods.SimpleHttpRequest;
+import org.apache.hc.client5.http.async.methods.SimpleHttpResponse;
+import org.apache.hc.client5.http.config.RequestConfig;
+import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient;
+import org.apache.hc.client5.http.impl.async.H2AsyncClientBuilder;
+import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder;
+import org.apache.hc.core5.concurrent.FutureCallback;
+import org.apache.hc.core5.http.ContentType;
+import org.apache.hc.core5.http.message.BasicHeader;
+import org.apache.hc.core5.http2.config.H2Config;
+import org.apache.hc.core5.net.URIAuthority;
+import org.apache.hc.core5.reactor.IOReactorConfig;
+import org.apache.hc.core5.util.Timeout;
+
+import javax.net.ssl.SSLContext;
+import java.io.IOException;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.apache.hc.core5.http.ssl.TlsCiphers.excludeH2Blacklisted;
+import static org.apache.hc.core5.http.ssl.TlsCiphers.excludeWeak;
+
+/**
+ * @author jonmv
+ */
+class ApacheCluster implements Cluster {
+
+ private final List<Endpoint> endpoints = new ArrayList<>();
+
+ ApacheCluster(FeedClientBuilder builder) throws IOException {
+ for (URI endpoint : builder.endpoints)
+ for (int i = 0; i < builder.connectionsPerEndpoint; i++)
+ endpoints.add(new Endpoint(createHttpClient(builder), endpoint));
+ }
+
+ @Override
+ public void dispatch(HttpRequest wrapped, CompletableFuture<HttpResponse> vessel) {
+ SimpleHttpRequest request = new SimpleHttpRequest(wrapped.method(), wrapped.path());
+ wrapped.headers().forEach((name, value) -> request.setHeader(name, value.get()));
+ if (wrapped.body() != null)
+ request.setBody(wrapped.body(), ContentType.APPLICATION_JSON);
+
+ int index = 0;
+ int min = Integer.MAX_VALUE;
+ for (int i = 0; i < endpoints.size(); i++)
+ if (endpoints.get(i).inflight.get() < min) {
+ index = i;
+ min = endpoints.get(i).inflight.get();
+ }
+
+ Endpoint endpoint = endpoints.get(index);
+ endpoint.inflight.incrementAndGet();
+ try {
+ request.setScheme(endpoint.url.getScheme());
+ request.setAuthority(new URIAuthority(endpoint.url.getHost(), endpoint.url.getPort()));
+ endpoint.client.execute(request,
+ new FutureCallback<SimpleHttpResponse>() {
+ @Override public void completed(SimpleHttpResponse response) { vessel.complete(new ApacheHttpResponse(response)); }
+ @Override public void failed(Exception ex) { vessel.completeExceptionally(ex); }
+ @Override public void cancelled() { vessel.cancel(false); }
+ });
+ }
+ catch (Throwable thrown) {
+ vessel.completeExceptionally(thrown);
+ }
+ vessel.whenComplete((__, ___) -> endpoint.inflight.decrementAndGet());
+ }
+
+ @Override
+ public void close() {
+ Throwable thrown = null;
+ for (Endpoint endpoint : endpoints)
+ try {
+ endpoint.client.close();
+ }
+ catch (Throwable t) {
+ if (thrown == null) thrown = t;
+ else thrown.addSuppressed(t);
+ }
+ if (thrown != null) throw new RuntimeException(thrown);
+ }
+
+
+ private static class Endpoint {
+
+ private final CloseableHttpAsyncClient client;
+ private final AtomicInteger inflight = new AtomicInteger(0);
+ private final URI url;
+
+ private Endpoint(CloseableHttpAsyncClient client, URI url) {
+ this.client = client;
+ this.url = url;
+
+ this.client.start();
+ }
+
+ }
+
+ private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilder builder) throws IOException {
+ H2AsyncClientBuilder httpClientBuilder = H2AsyncClientBuilder.create()
+ .setUserAgent(String.format("vespa-feed-client/%s", Vespa.VERSION))
+ .setDefaultHeaders(Collections.singletonList(new BasicHeader("Vespa-Client-Version", Vespa.VERSION)))
+ .disableCookieManagement()
+ .disableRedirectHandling()
+ .disableAutomaticRetries()
+ .setIOReactorConfig(IOReactorConfig.custom()
+ .setIoThreadCount(2)
+ .setTcpNoDelay(true)
+ .setSoTimeout(Timeout.ofSeconds(10))
+ .build())
+ .setDefaultRequestConfig(RequestConfig.custom()
+ .setConnectTimeout(Timeout.ofSeconds(10))
+ .setConnectionRequestTimeout(Timeout.DISABLED)
+ .setResponseTimeout(Timeout.ofMinutes(5))
+ .build())
+ .setH2Config(H2Config.custom()
+ .setMaxConcurrentStreams(builder.maxStreamsPerConnection)
+ .setCompressionEnabled(true)
+ .setPushEnabled(false)
+ .setInitialWindowSize(Integer.MAX_VALUE)
+ .build());
+
+ SSLContext sslContext = builder.constructSslContext();
+ String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites()));
+ if (allowedCiphers.length == 0)
+ throw new IllegalStateException("No adequate SSL cipher suites supported by the JVM");
+
+ ClientTlsStrategyBuilder tlsStrategyBuilder = ClientTlsStrategyBuilder.create()
+ .setCiphers(allowedCiphers)
+ .setSslContext(sslContext);
+ if (builder.hostnameVerifier != null) {
+ tlsStrategyBuilder.setHostnameVerifier(builder.hostnameVerifier);
+ }
+ return httpClientBuilder.setTlsStrategy(tlsStrategyBuilder.build())
+ .build();
+ }
+
+ private static class ApacheHttpResponse implements HttpResponse {
+
+ private final SimpleHttpResponse wrapped;
+
+ private ApacheHttpResponse(SimpleHttpResponse wrapped) {
+ this.wrapped = wrapped;
+ }
+
+ @Override
+ public int code() {
+ return wrapped.getCode();
+ }
+
+ @Override
+ public byte[] body() {
+ return wrapped.getBodyBytes();
+ }
+
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/BenchmarkingCluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/BenchmarkingCluster.java
new file mode 100644
index 00000000000..840219a6bf1
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/BenchmarkingCluster.java
@@ -0,0 +1,102 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.RejectedExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static java.util.Objects.requireNonNull;
+
+public class BenchmarkingCluster implements Cluster {
+
+ private final Cluster delegate;
+ private final ExecutorService executor = Executors.newSingleThreadExecutor(runnable -> {
+ Thread thread = new Thread(runnable, "cluster-stats-collector");
+ thread.setDaemon(true);
+ return thread;
+ });
+
+ private final AtomicLong requests = new AtomicLong();
+ private long results = 0;
+ private long responses = 0;
+ private final long[] responsesByCode = new long[600];
+ private long exceptions = 0;
+ private long totalLatencyMillis = 0;
+ private long minLatencyMillis = Long.MAX_VALUE;
+ private long maxLatencyMillis = 0;
+ private long bytesSent = 0;
+ private long bytesReceived = 0;
+
+ public BenchmarkingCluster(Cluster delegate) {
+ this.delegate = requireNonNull(delegate);
+ }
+
+ @Override
+ public void dispatch(HttpRequest request, CompletableFuture<HttpResponse> vessel) {
+ requests.incrementAndGet();
+ long startNanos = System.nanoTime();
+ delegate.dispatch(request, vessel);
+ vessel.whenCompleteAsync((response, thrown) -> {
+ results++;
+ if (thrown == null) {
+ responses++;
+ responsesByCode[response.code()]++;
+ long latency = (System.nanoTime() - startNanos) / 1_000_000;
+ totalLatencyMillis += latency;
+ minLatencyMillis = Math.min(minLatencyMillis, latency);
+ maxLatencyMillis = Math.max(maxLatencyMillis, latency);
+ bytesSent += request.body() == null ? 0 : request.body().length;
+ bytesReceived += response.body() == null ? 0 : response.body().length;
+ }
+ else
+ exceptions++;
+ },
+ executor);
+ }
+
+ @Override
+ public OperationStats stats() {
+ try {
+ try {
+ return executor.submit(this::getStats).get();
+ }
+ catch (RejectedExecutionException ignored) {
+ executor.awaitTermination(10, TimeUnit.SECONDS);
+ return getStats();
+ }
+ }
+ catch (InterruptedException | ExecutionException ignored) {
+ throw new RuntimeException(ignored);
+ }
+ }
+
+ private OperationStats getStats() {
+ Map<Integer, Long> responses = new HashMap<>();
+ for (int code = 0; code < responsesByCode.length; code++)
+ if (responsesByCode[code] > 0)
+ responses.put(code, responsesByCode[code]);
+
+ return new OperationStats(requests.get(),
+ responses,
+ exceptions,
+ requests.get() - results,
+ this.responses == 0 ? 0 : totalLatencyMillis / this.responses,
+ minLatencyMillis,
+ maxLatencyMillis,
+ bytesSent,
+ bytesReceived);
+ }
+
+ @Override
+ public void close() {
+ delegate.close();
+ executor.shutdown();
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java
new file mode 100644
index 00000000000..f428fb567e6
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/Cluster.java
@@ -0,0 +1,21 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.io.Closeable;
+import java.util.Collections;
+import java.util.concurrent.CompletableFuture;
+
+/**
+ * Allows dispatch to a Vespa cluster.
+ */
+interface Cluster extends Closeable {
+
+ /** Dispatch the request to the cluster, causing the response vessel to complete at a later time. May not throw. */
+ void dispatch(HttpRequest request, CompletableFuture<HttpResponse> vessel);
+
+ @Override
+ default void close() { }
+
+ default OperationStats stats() { return new OperationStats(0, Collections.emptyMap(), 0, 0, 0, 0, 0, 0, 0); }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java
index 21513a5dac2..39fc9fb28e0 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/DocumentId.java
@@ -8,6 +8,8 @@ import java.util.OptionalLong;
import static java.util.Objects.requireNonNull;
/**
+ * Represents a Vespa document id
+ *
* @author jonmv
*/
public class DocumentId {
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/DynamicThrottler.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/DynamicThrottler.java
new file mode 100644
index 00000000000..6f4e4e752f0
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/DynamicThrottler.java
@@ -0,0 +1,86 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static java.lang.Math.log;
+import static java.lang.Math.max;
+import static java.lang.Math.min;
+import static java.lang.Math.pow;
+import static java.lang.Math.random;
+
+/**
+ * Samples latency as a function of inflight requests, and regularly adjusts to the optimal value.
+ *
+ * @author jonmv
+ */
+public class DynamicThrottler extends StaticThrottler {
+
+ private final AtomicLong targetInflight;
+ private long updateNanos = 0;
+ private final List<AtomicLong> latencies = new ArrayList<>();
+ private final double weight = 0.9; // Higher weight favours higher (own) throughput, at the cost of (shared) latency.
+
+ public DynamicThrottler(FeedClientBuilder builder) {
+ super(builder);
+ this.targetInflight = new AtomicLong(128L * builder.connectionsPerEndpoint * builder.endpoints.size());
+ for (int i = 0; i < 128; i++)
+ latencies.add(new AtomicLong(-1));
+ }
+
+ @Override
+ public void sent(long inflight, CompletableFuture<HttpResponse> vessel) {
+ long startNanos = System.nanoTime();
+ if (updateNanos == 0) updateNanos = System.nanoTime();
+ boolean update = startNanos - updateNanos >= 1e8; // Ship ten updates per second.
+ if (update) updateNanos = startNanos;
+
+ vessel.whenComplete((response, thrown) -> {
+ // Use buckets for latency measurements, with inflight along a log scale,
+ // and with minInflight and maxInflight at the ends.
+ int index = (int) (latencies.size() * log(max(1, (double) inflight / minInflight))
+ / log(256)); // 4096 (server max streams per connection) / 16 (our min per connection)
+ long nowNanos = System.nanoTime();
+ long latencyNanos = nowNanos - startNanos;
+ latencies.get(index).set(latencyNanos);
+ if ( ! update)
+ return;
+
+ // Loop over latency measurements and pick the one which optimises throughput and latency.
+ double choice = -1;
+ double max = -1;
+ for (int i = latencies.size(); i-- > 0; ) {
+ double latency = latencies.get(i).get();
+ if (latency < 0) continue; // Skip unknown values.
+ double target = minInflight * pow(256, (i + 0.5) / latencies.size());
+ double objective = pow(target, weight) / latency; // Optimise throughput (weight), but also latency (1 - weight).
+ if (objective > max) {
+ max = objective;
+ choice = target;
+ }
+ }
+ long target = (long) ((random() * 0.25 + 0.90) * choice); // Random walk, skewed towards increase.
+ targetInflight.set(max(minInflight, min(maxInflight, target)));
+ });
+ }
+
+ @Override
+ public void success() {
+ super.success();
+ }
+
+ @Override
+ public void throttled(long inflight) {
+ super.throttled(inflight);
+ }
+
+ @Override
+ public long targetInflight() {
+ return min(super.targetInflight(), targetInflight.get());
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java
index 455a79060ee..f39b56ad50f 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClient.java
@@ -5,15 +5,43 @@ import java.io.Closeable;
import java.util.concurrent.CompletableFuture;
/**
+ * Asynchronous feed client accepting document operations as JSON
+ *
* @author bjorncs
* @author jonmv
*/
public interface FeedClient extends Closeable {
+ /**
+ * Send a document put with the given parameters, returning a future with the result of the operation.
+ * Exceptional completion will use be an instance of {@link FeedException} or one of its sub-classes.
+ * */
CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params);
+
+ /**
+ * Send a document update with the given parameters, returning a future with the result of the operation.
+ * Exceptional completion will use be an instance of {@link FeedException} or one of its sub-classes.
+ * */
CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params);
+
+ /** Send a document remove with the given parameters, returning a future with the result of the operation.
+ * Exceptional completion will use be an instance of {@link FeedException} or one of its sub-classes.
+ * */
CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params);
+ /** Returns a snapshot of the stats for this feed client, such as requests made, and responses by status. */
+ OperationStats stats();
+
+ /** Current state of the circuit breaker. */
+ default CircuitBreaker.State circuitBreakerState() { return CircuitBreaker.State.CLOSED; }
+
+ /** Shut down, and reject new operations. Operations in flight are allowed to complete normally if graceful. */
+ void close(boolean graceful);
+
+ /** Initiates graceful shutdown. See {@link #close(boolean)}. */
+ default void close() { close(true); }
+
+ /** Controls what to retry, and how many times. */
interface RetryStrategy {
/** Whether to retry operations of the given type. */
@@ -24,10 +52,62 @@ public interface FeedClient extends Closeable {
}
+ /** Allows slowing down or halting completely operations against the configured endpoint on high failure rates. */
+ interface CircuitBreaker {
+
+ /** Called by the client whenever a successful response is obtained. */
+ void success();
+
+ /** Called by the client whenever a transient or fatal error occurs. */
+ void failure();
+
+ /** The current state of the circuit breaker. */
+ State state();
+
+ enum State {
+
+ /** Circuit is closed: business as usual. */
+ CLOSED,
+
+ /** Circuit is half-open: something is wrong, perhaps it recovers? */
+ HALF_OPEN,
+
+ /** Circuit is open: we have given up. */
+ OPEN;
+
+ }
+
+ }
+
enum OperationType {
- put,
- update,
- remove;
+
+ /** A document put operation. This is idempotent. */
+ PUT,
+
+ /** A document update operation. This is idempotent if all its contained updates are. */
+ UPDATE,
+
+ /** A document remove operation. This is idempotent. */
+ REMOVE;
+
+ }
+
+
+ /** Determines the number of requests to have inflight at any point. */
+ interface Throttler {
+
+ /** A request was just sent with {@code vessel}, with {@code inflight} total in flight. */
+ void sent(long inflight, CompletableFuture<HttpResponse> vessel);
+
+ /** A successful response was obtained. */
+ void success();
+
+ /** A throttle signal was obtained from the server. */
+ void throttled(long inflight);
+
+ /** The target inflight operations right now. */
+ long targetInflight();
+
}
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java
index eaf84c67ac4..0f685ec5b7f 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedClientBuilder.java
@@ -7,8 +7,14 @@ import java.io.IOException;
import java.io.UncheckedIOException;
import java.net.URI;
import java.nio.file.Path;
-import java.time.Clock;
+import java.security.PrivateKey;
+import java.security.cert.X509Certificate;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
import java.util.function.Supplier;
@@ -22,38 +28,51 @@ import static java.util.Objects.requireNonNull;
*/
public class FeedClientBuilder {
- FeedClient.RetryStrategy defaultRetryStrategy = new FeedClient.RetryStrategy() { };
+ static final FeedClient.RetryStrategy defaultRetryStrategy = new FeedClient.RetryStrategy() { };
- final URI endpoint;
+ final List<URI> endpoints;
final Map<String, Supplier<String>> requestHeaders = new HashMap<>();
SSLContext sslContext;
HostnameVerifier hostnameVerifier;
- int maxConnections = 4;
- int maxStreamsPerConnection = 1024;
+ int connectionsPerEndpoint = 4;
+ int maxStreamsPerConnection = 4096;
FeedClient.RetryStrategy retryStrategy = defaultRetryStrategy;
- Path certificate;
- Path privateKey;
- Path caCertificates;
- Clock clock;
+ FeedClient.CircuitBreaker circuitBreaker = new GracePeriodCircuitBreaker(Duration.ofSeconds(1), Duration.ofMinutes(10));
+ Path certificateFile;
+ Path privateKeyFile;
+ Path caCertificatesFile;
+ Collection<X509Certificate> certificate;
+ PrivateKey privateKey;
+ Collection<X509Certificate> caCertificates;
+ boolean benchmark;
- public static FeedClientBuilder create(URI endpoint) { return new FeedClientBuilder(endpoint); }
+ /** Creates a builder for a single container endpoint **/
+ public static FeedClientBuilder create(URI endpoint) { return new FeedClientBuilder(Collections.singletonList(endpoint)); }
- private FeedClientBuilder(URI endpoint) {
- requireNonNull(endpoint.getHost());
- this.endpoint = endpoint;
+ /** Creates a builder for multiple container endpoints **/
+ public static FeedClientBuilder create(List<URI> endpoints) { return new FeedClientBuilder(endpoints); }
+
+ private FeedClientBuilder(List<URI> endpoints) {
+ if (endpoints.isEmpty())
+ throw new IllegalArgumentException("At least one endpoint must be provided");
+
+ for (URI endpoint : endpoints)
+ requireNonNull(endpoint.getHost());
+
+ this.endpoints = new ArrayList<>(endpoints);
}
/**
- * Sets the maximum number of connections this client will use.
+ * Sets the number of connections this client will use per endpoint.
*
* A reasonable value here is a small multiple of the numbers of containers in the
* cluster to feed, so load can be balanced across these.
* In general, this value should be kept as low as possible, but poor connectivity
* between feeder and cluster may also warrant a higher number of connections.
*/
- public FeedClientBuilder setMaxConnections(int max) {
+ public FeedClientBuilder setConnectionsPerEndpoint(int max) {
if (max < 1) throw new IllegalArgumentException("Max connections must be at least 1, but was " + max);
- this.maxConnections = max;
+ this.connectionsPerEndpoint = max;
return this;
}
@@ -70,52 +89,137 @@ public class FeedClientBuilder {
return this;
}
+ /** Sets {@link SSLContext} instance. */
public FeedClientBuilder setSslContext(SSLContext context) {
- if (certificate != null || caCertificates != null || privateKey != null) {
- throw new IllegalArgumentException("Cannot set both SSLContext and certificate / CA certificates");
- }
this.sslContext = requireNonNull(context);
return this;
}
+ /** Sets {@link HostnameVerifier} instance (e.g for disabling default SSL hostname verification). */
public FeedClientBuilder setHostnameVerifier(HostnameVerifier verifier) {
this.hostnameVerifier = requireNonNull(verifier);
return this;
}
+ /** Turns on/off benchmarking, aggregated in {@link FeedClient#stats()}. */
+ public FeedClientBuilder setBenchmarkOn(boolean on) {
+ this.benchmark = on;
+ return this;
+ }
+
+ /** Adds HTTP request header to all client requests. */
public FeedClientBuilder addRequestHeader(String name, String value) {
return addRequestHeader(name, () -> requireNonNull(value));
}
+ /**
+ * Adds HTTP request header to all client requests. Value {@link Supplier} is invoked for each HTTP request,
+ * i.e. value can be dynamically updated during a feed.
+ */
public FeedClientBuilder addRequestHeader(String name, Supplier<String> valueSupplier) {
this.requestHeaders.put(requireNonNull(name), requireNonNull(valueSupplier));
return this;
}
+ /**
+ * Overrides default retry strategy.
+ * @see FeedClient.RetryStrategy
+ */
public FeedClientBuilder setRetryStrategy(FeedClient.RetryStrategy strategy) {
this.retryStrategy = requireNonNull(strategy);
return this;
}
+ /**
+ * Overrides default circuit breaker.
+ * @see FeedClient.CircuitBreaker
+ */
+ public FeedClientBuilder setCircuitBreaker(FeedClient.CircuitBreaker breaker) {
+ this.circuitBreaker = requireNonNull(breaker);
+ return this;
+ }
+
+ /** Sets path to client SSL certificate/key PEM files */
public FeedClientBuilder setCertificate(Path certificatePemFile, Path privateKeyPemFile) {
- if (sslContext != null) throw new IllegalArgumentException("Cannot set both SSLContext and certificate");
- this.certificate = certificatePemFile;
- this.privateKey = privateKeyPemFile;
+ this.certificateFile = certificatePemFile;
+ this.privateKeyFile = privateKeyPemFile;
+ return this;
+ }
+
+ /** Sets client SSL certificates/key */
+ public FeedClientBuilder setCertificate(Collection<X509Certificate> certificate, PrivateKey privateKey) {
+ this.certificate = certificate;
+ this.privateKey = privateKey;
return this;
}
- public FeedClientBuilder setCaCertificates(Path caCertificatesFile) {
- if (sslContext != null) throw new IllegalArgumentException("Cannot set both SSLContext and CA certificate");
- this.caCertificates = caCertificatesFile;
+ /** Sets client SSL certificate/key */
+ public FeedClientBuilder setCertificate(X509Certificate certificate, PrivateKey privateKey) {
+ return setCertificate(Collections.singletonList(certificate), privateKey);
+ }
+
+ /**
+ * Overrides JVM default SSL truststore
+ * @param caCertificatesFile Path to PEM encoded file containing trusted certificates
+ */
+ public FeedClientBuilder setCaCertificatesFile(Path caCertificatesFile) {
+ this.caCertificatesFile = caCertificatesFile;
+ return this;
+ }
+
+ /** Overrides JVM default SSL truststore */
+ public FeedClientBuilder setCaCertificates(Collection<X509Certificate> caCertificates) {
+ this.caCertificates = caCertificates;
return this;
}
+ /** Constructs instance of {@link ai.vespa.feed.client.FeedClient} from builder configuration */
public FeedClient build() {
try {
+ validateConfiguration();
return new HttpFeedClient(this);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
+ SSLContext constructSslContext() throws IOException {
+ if (sslContext != null) return sslContext;
+ SslContextBuilder sslContextBuilder = new SslContextBuilder();
+ if (certificateFile != null && privateKeyFile != null) {
+ sslContextBuilder.withCertificateAndKey(certificateFile, privateKeyFile);
+ } else if (certificate != null && privateKey != null) {
+ sslContextBuilder.withCertificateAndKey(certificate, privateKey);
+ }
+ if (caCertificatesFile != null) {
+ sslContextBuilder.withCaCertificates(caCertificatesFile);
+ } else if (caCertificates != null) {
+ sslContextBuilder.withCaCertificates(caCertificates);
+ }
+ return sslContextBuilder.build();
+ }
+
+ private void validateConfiguration() {
+ if (sslContext != null && (
+ certificateFile != null || caCertificatesFile != null || privateKeyFile != null ||
+ certificate != null || caCertificates != null || privateKey != null)) {
+ throw new IllegalArgumentException("Cannot set both SSLContext and certificate / CA certificates");
+ }
+ if (certificate != null && certificateFile != null) {
+ throw new IllegalArgumentException("Cannot set both certificate directly and as file");
+ }
+ if (privateKey != null && privateKeyFile != null) {
+ throw new IllegalArgumentException("Cannot set both private key directly and as file");
+ }
+ if (caCertificates != null && caCertificatesFile != null) {
+ throw new IllegalArgumentException("Cannot set both CA certificates directly and as file");
+ }
+ if (certificate != null && certificate.isEmpty()) {
+ throw new IllegalArgumentException("Certificate cannot be empty");
+ }
+ if (caCertificates != null && caCertificates.isEmpty()) {
+ throw new IllegalArgumentException("CA certificates cannot be empty");
+ }
+ }
+
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java
index eb31d1aa808..54e11d3a185 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/FeedException.java
@@ -1,8 +1,47 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client;
+import java.util.Optional;
+
/**
+ * Signals that an error occurred during feeding
+ *
* @author bjorncs
*/
public class FeedException extends RuntimeException {
+
+ private final DocumentId documentId;
+
+ public FeedException(String message) {
+ super(message);
+ this.documentId = null;
+ }
+
+ public FeedException(DocumentId documentId, String message) {
+ super(message);
+ this.documentId = documentId;
+ }
+
+ public FeedException(String message, Throwable cause) {
+ super(message, cause);
+ this.documentId = null;
+ }
+
+ public FeedException(Throwable cause) {
+ super(cause);
+ this.documentId = null;
+ }
+
+ public FeedException(DocumentId documentId, Throwable cause) {
+ super(cause);
+ this.documentId = documentId;
+ }
+
+ public FeedException(DocumentId documentId, String message, Throwable cause) {
+ super(message, cause);
+ this.documentId = documentId;
+ }
+
+ public Optional<DocumentId> documentId() { return Optional.ofNullable(documentId); }
+
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java
new file mode 100644
index 00000000000..2c5c2dccf19
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/GracePeriodCircuitBreaker.java
@@ -0,0 +1,71 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.time.Duration;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.LongSupplier;
+import java.util.logging.Logger;
+
+import static java.util.Objects.requireNonNull;
+import static java.util.logging.Level.INFO;
+import static java.util.logging.Level.WARNING;
+
+/**
+ * Breaks the circuit when no successes have been recorded for a specified time.
+ *
+ * @author jonmv
+ */
+public class GracePeriodCircuitBreaker implements FeedClient.CircuitBreaker {
+
+ private static final Logger log = Logger.getLogger(GracePeriodCircuitBreaker.class.getName());
+ private static final long NEVER = 1L << 60;
+
+ private final AtomicLong failingSinceMillis = new AtomicLong(NEVER);
+ private final AtomicBoolean halfOpen = new AtomicBoolean(false);
+ private final AtomicBoolean open = new AtomicBoolean(false);
+ private final LongSupplier clock;
+ private final long graceMillis;
+ private final long doomMillis;
+
+ public GracePeriodCircuitBreaker(Duration grace, Duration doom) {
+ this(System::currentTimeMillis, grace, doom);
+ }
+
+ GracePeriodCircuitBreaker(LongSupplier clock, Duration grace, Duration doom) {
+ if (grace.isNegative())
+ throw new IllegalArgumentException("Grace delay must be non-negative");
+
+ if (doom.isNegative())
+ throw new IllegalArgumentException("Doom delay must be non-negative");
+
+ this.clock = requireNonNull(clock);
+ this.graceMillis = grace.toMillis();
+ this.doomMillis = doom.toMillis();
+ }
+
+ @Override
+ public void success() {
+ failingSinceMillis.set(NEVER);
+ if ( ! open.get() && halfOpen.compareAndSet(true, false))
+ log.log(INFO, "Circuit breaker is now closed");
+ }
+
+ @Override
+ public void failure() {
+ failingSinceMillis.compareAndSet(NEVER, clock.getAsLong());
+ }
+
+ @Override
+ public State state() {
+ long failingMillis = clock.getAsLong() - failingSinceMillis.get();
+ if (failingMillis > graceMillis && halfOpen.compareAndSet(false, true))
+ log.log(INFO, "Circuit breaker is now half-open");
+
+ if (failingMillis > doomMillis && open.compareAndSet(false, true))
+ log.log(WARNING, "Circuit breaker is now open");
+
+ return open.get() ? State.OPEN : halfOpen.get() ? State.HALF_OPEN : State.CLOSED;
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java
index 8a38e859ca4..2269c56cde4 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpFeedClient.java
@@ -1,40 +1,22 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client;
-import org.apache.hc.client5.http.async.methods.SimpleHttpRequest;
-import org.apache.hc.client5.http.async.methods.SimpleHttpResponse;
-import org.apache.hc.client5.http.config.RequestConfig;
-import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient;
-import org.apache.hc.client5.http.impl.async.H2AsyncClientBuilder;
-import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder;
-import org.apache.hc.core5.concurrent.FutureCallback;
-import org.apache.hc.core5.http.ContentType;
-import org.apache.hc.core5.http.message.BasicHeader;
-import org.apache.hc.core5.http2.config.H2Config;
-import org.apache.hc.core5.net.URIBuilder;
-import org.apache.hc.core5.reactor.IOReactorConfig;
-import org.apache.hc.core5.util.Timeout;
-
-import javax.net.ssl.SSLContext;
-import java.io.ByteArrayOutputStream;
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonToken;
+
import java.io.IOException;
-import java.io.PrintStream;
-import java.net.URI;
-import java.net.URISyntaxException;
-import java.time.Clock;
-import java.util.ArrayList;
-import java.util.Collections;
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
import java.util.HashMap;
-import java.util.List;
import java.util.Map;
+import java.util.StringJoiner;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Supplier;
+import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Objects.requireNonNull;
-import static org.apache.hc.core5.http.ssl.TlsCiphers.excludeH2Blacklisted;
-import static org.apache.hc.core5.http.ssl.TlsCiphers.excludeWeak;
/**
* HTTP implementation of {@link FeedClient}
@@ -44,73 +26,19 @@ import static org.apache.hc.core5.http.ssl.TlsCiphers.excludeWeak;
*/
class HttpFeedClient implements FeedClient {
- private final URI endpoint;
+ private static final JsonFactory factory = new JsonFactory();
+
private final Map<String, Supplier<String>> requestHeaders;
private final RequestStrategy requestStrategy;
- private final List<CloseableHttpAsyncClient> httpClients = new ArrayList<>();
- private final List<AtomicInteger> inflight = new ArrayList<>();
private final AtomicBoolean closed = new AtomicBoolean();
HttpFeedClient(FeedClientBuilder builder) throws IOException {
- this.endpoint = builder.endpoint;
- this.requestHeaders = new HashMap<>(builder.requestHeaders);
- this.requestStrategy = new HttpRequestStrategy(builder, Clock.systemUTC());
-
- for (int i = 0; i < builder.maxConnections; i++) {
- CloseableHttpAsyncClient client = createHttpClient(builder);
- client.start();
- httpClients.add(client);
- inflight.add(new AtomicInteger());
- }
+ this(builder, new HttpRequestStrategy(builder));
}
- private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilder builder) throws IOException {
- H2AsyncClientBuilder httpClientBuilder = H2AsyncClientBuilder.create()
- .setUserAgent(String.format("vespa-feed-client/%s", Vespa.VERSION))
- .setDefaultHeaders(Collections.singletonList(new BasicHeader("Vespa-Client-Version", Vespa.VERSION)))
- .disableCookieManagement()
- .disableRedirectHandling()
- .disableAutomaticRetries()
- .setIOReactorConfig(IOReactorConfig.custom()
- .setSoTimeout(Timeout.ofSeconds(10))
- .build())
- .setDefaultRequestConfig(
- RequestConfig.custom()
- .setConnectTimeout(Timeout.ofSeconds(10))
- .setConnectionRequestTimeout(Timeout.DISABLED)
- .setResponseTimeout(Timeout.ofMinutes(5))
- .build())
- .setH2Config(H2Config.initial()
- .setMaxConcurrentStreams(builder.maxStreamsPerConnection)
- .setCompressionEnabled(true)
- .setPushEnabled(false)
- .build());
-
- SSLContext sslContext = constructSslContext(builder);
- String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites()));
- if (allowedCiphers.length == 0)
- throw new IllegalStateException("No adequate SSL cipher suites supported by the JVM");
-
- ClientTlsStrategyBuilder tlsStrategyBuilder = ClientTlsStrategyBuilder.create()
- .setCiphers(allowedCiphers)
- .setSslContext(sslContext);
- if (builder.hostnameVerifier != null) {
- tlsStrategyBuilder.setHostnameVerifier(builder.hostnameVerifier);
- }
- return httpClientBuilder.setTlsStrategy(tlsStrategyBuilder.build())
- .build();
- }
-
- private static SSLContext constructSslContext(FeedClientBuilder builder) throws IOException {
- if (builder.sslContext != null) return builder.sslContext;
- SslContextBuilder sslContextBuilder = new SslContextBuilder();
- if (builder.certificate != null && builder.privateKey != null) {
- sslContextBuilder.withCertificateAndKey(builder.certificate, builder.privateKey);
- }
- if (builder.caCertificates != null) {
- sslContextBuilder.withCaCertificates(builder.caCertificates);
- }
- return sslContextBuilder.build();
+ HttpFeedClient(FeedClientBuilder builder, RequestStrategy requestStrategy) {
+ this.requestHeaders = new HashMap<>(builder.requestHeaders);
+ this.requestStrategy = requestStrategy;
}
@Override
@@ -129,107 +57,122 @@ class HttpFeedClient implements FeedClient {
}
@Override
- public void close() throws IOException {
- if ( ! closed.getAndSet(true))
- for (CloseableHttpAsyncClient hc : httpClients)
- hc.close();
+ public OperationStats stats() {
+ return requestStrategy.stats();
}
- private CompletableFuture<Result> send(String method, DocumentId documentId, String operationJson, OperationParameters params) {
- SimpleHttpRequest request = new SimpleHttpRequest(method, operationUrl(endpoint, documentId, params));
- requestHeaders.forEach((name, value) -> request.setHeader(name, value.get()));
- if (operationJson != null)
- request.setBody(operationJson, ContentType.APPLICATION_JSON);
-
- return requestStrategy.enqueue(documentId, request, this::send)
- .handle((response, thrown) -> {
- if (thrown != null) {
- if (requestStrategy.hasFailed()) {
- try { close(); }
- catch (IOException exception) { thrown.addSuppressed(exception); }
- }
- ByteArrayOutputStream buffer = new ByteArrayOutputStream();
- thrown.printStackTrace(new PrintStream(buffer));
- return new Result(Result.Type.failure, documentId, buffer.toString(), null);
- }
- return toResult(response, documentId);
- });
+ @Override
+ public CircuitBreaker.State circuitBreakerState() {
+ return requestStrategy.circuitBreakerState();
}
- /** Sends the given request to the client with the least current inflight requests, completing the given vessel when done. */
- private void send(SimpleHttpRequest request, CompletableFuture<SimpleHttpResponse> vessel) {
- int index = 0;
- int min = Integer.MAX_VALUE;
- for (int i = 0; i < httpClients.size(); i++)
- if (inflight.get(i).get() < min) {
- min = inflight.get(i).get();
- index = i;
- }
+ @Override
+ public void close(boolean graceful) {
+ closed.set(true);
+ if (graceful)
+ requestStrategy.await();
- inflight.get(index).incrementAndGet();
- try {
- httpClients.get(index).execute(request,
- new FutureCallback<SimpleHttpResponse>() {
- @Override public void completed(SimpleHttpResponse response) { vessel.complete(response); }
- @Override public void failed(Exception ex) { vessel.completeExceptionally(ex); }
- @Override public void cancelled() { vessel.cancel(false); }
- });
- }
- catch (Throwable thrown) {
- vessel.completeExceptionally(thrown);
- }
- vessel.thenRun(inflight.get(index)::decrementAndGet);
+ requestStrategy.destroy();
+ }
+
+ private CompletableFuture<Result> send(String method, DocumentId documentId, String operationJson, OperationParameters params) {
+ HttpRequest request = new HttpRequest(method,
+ getPath(documentId) + getQuery(params),
+ requestHeaders,
+ operationJson.getBytes(UTF_8)); // TODO: make it bytes all the way?
+
+ return requestStrategy.enqueue(documentId, request)
+ .thenApply(response -> toResult(request, response, documentId));
}
- static Result toResult(SimpleHttpResponse response, DocumentId documentId) {
+ static Result toResult(HttpRequest request, HttpResponse response, DocumentId documentId) {
Result.Type type;
- switch (response.getCode()) {
+ switch (response.code()) {
case 200: type = Result.Type.success; break;
case 412: type = Result.Type.conditionNotMet; break;
- default: type = Result.Type.failure;
+ case 502:
+ case 504:
+ case 507: type = Result.Type.failure; break;
+ default: type = null;
}
- Map<String, String> responseJson = null; // TODO: parse JSON on error.
- return new Result(type, documentId, response.getBodyText(), "trace");
+
+ String message = null;
+ String trace = null;
+ try {
+ JsonParser parser = factory.createParser(response.body());
+ if (parser.nextToken() != JsonToken.START_OBJECT)
+ throw new ResultParseException(
+ documentId,
+ "Expected '" + JsonToken.START_OBJECT + "', but found '" + parser.currentToken() + "' in: "
+ + new String(response.body(), UTF_8));
+
+ String name;
+ while ((name = parser.nextFieldName()) != null) {
+ switch (name) {
+ case "message": message = parser.nextTextValue(); break;
+ case "trace": trace = parser.nextTextValue(); break;
+ default: parser.nextToken();
+ }
+ }
+
+ if (parser.currentToken() != JsonToken.END_OBJECT)
+ throw new ResultParseException(
+ documentId,
+ "Expected '" + JsonToken.END_OBJECT + "', but found '" + parser.currentToken() + "' in: "
+ + new String(response.body(), UTF_8));
+ }
+ catch (IOException e) {
+ throw new ResultParseException(documentId, e);
+ }
+
+ if (type == null) // Not a Vespa response, but a failure in the HTTP layer.
+ throw new ResultParseException(
+ documentId,
+ "Status " + response.code() + " executing '" + request + "': "
+ + (message == null ? new String(response.body(), UTF_8) : message));
+
+ return new Result(type, documentId, message, trace);
}
- static List<String> toPath(DocumentId documentId) {
- List<String> path = new ArrayList<>();
+ static String getPath(DocumentId documentId) {
+ StringJoiner path = new StringJoiner("/", "/", "");
path.add("document");
path.add("v1");
- path.add(documentId.namespace());
- path.add(documentId.documentType());
+ path.add(encode(documentId.namespace()));
+ path.add(encode(documentId.documentType()));
if (documentId.number().isPresent()) {
path.add("number");
path.add(Long.toUnsignedString(documentId.number().getAsLong()));
}
else if (documentId.group().isPresent()) {
path.add("group");
- path.add(documentId.group().get());
+ path.add(encode(documentId.group().get()));
}
else {
path.add("docid");
}
- path.add(documentId.userSpecific());
+ path.add(encode(documentId.userSpecific()));
- return path;
+ return path.toString();
}
- static URI operationUrl(URI endpoint, DocumentId documentId, OperationParameters params) {
- URIBuilder url = new URIBuilder(endpoint);
- url.setPathSegments(toPath(documentId));
-
- if (params.createIfNonExistent()) url.addParameter("create", "true");
- params.testAndSetCondition().ifPresent(condition -> url.addParameter("condition", condition));
- params.timeout().ifPresent(timeout -> url.addParameter("timeout", timeout.toMillis() + "ms"));
- params.route().ifPresent(route -> url.addParameter("route", route));
- params.tracelevel().ifPresent(tracelevel -> url.addParameter("tracelevel", Integer.toString(tracelevel)));
-
+ static String encode(String raw) {
try {
- return url.build();
+ return URLEncoder.encode(raw, UTF_8.name());
}
- catch (URISyntaxException e) {
+ catch (UnsupportedEncodingException e) {
throw new IllegalStateException(e);
}
}
+ static String getQuery(OperationParameters params) {
+ StringJoiner query = new StringJoiner("&", "?", "").setEmptyValue("");
+ if (params.createIfNonExistent()) query.add("create=true");
+ params.testAndSetCondition().ifPresent(condition -> query.add("condition=" + encode(condition)));
+ params.timeout().ifPresent(timeout -> query.add("timeout=" + timeout.toMillis() + "ms"));
+ params.route().ifPresent(route -> query.add("route=" + encode(route)));
+ params.tracelevel().ifPresent(tracelevel -> query.add("tracelevel=" + tracelevel));
+ return query.toString();
+ }
+
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequest.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequest.java
new file mode 100644
index 00000000000..8da2f46def2
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequest.java
@@ -0,0 +1,42 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.util.Map;
+import java.util.function.Supplier;
+
+class HttpRequest {
+
+ private final String method;
+ private final String path;
+ private final Map<String, Supplier<String>> headers;
+ private final byte[] body;
+
+ public HttpRequest(String method, String path, Map<String, Supplier<String>> headers, byte[] body) {
+ this.method = method;
+ this.path = path;
+ this.headers = headers;
+ this.body = body;
+ }
+
+ public String method() {
+ return method;
+ }
+
+ public String path() {
+ return path;
+ }
+
+ public Map<String, Supplier<String>> headers() {
+ return headers;
+ }
+
+ public byte[] body() {
+ return body;
+ }
+
+ @Override
+ public String toString() {
+ return method + " " + path;
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java
index d0d67d65446..e9cd0baba5b 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpRequestStrategy.java
@@ -1,26 +1,31 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client;
+import ai.vespa.feed.client.FeedClient.CircuitBreaker;
import ai.vespa.feed.client.FeedClient.RetryStrategy;
-import org.apache.hc.client5.http.async.methods.SimpleHttpRequest;
-import org.apache.hc.client5.http.async.methods.SimpleHttpResponse;
import java.io.IOException;
-import java.time.Clock;
-import java.time.Instant;
-import java.util.HashMap;
+import java.nio.channels.CancelledKeyException;
import java.util.Map;
-import java.util.concurrent.BlockingQueue;
+import java.util.Queue;
+import java.util.concurrent.CancellationException;
import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.function.BiConsumer;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
import java.util.logging.Logger;
-import static java.lang.Math.max;
-import static java.lang.Math.min;
+import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.CLOSED;
+import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.HALF_OPEN;
+import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.OPEN;
+import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.logging.Level.FINE;
-import static java.util.logging.Level.INFO;
+import static java.util.logging.Level.WARNING;
+// TODO: update doc
/**
* Controls request execution and retries:
* <ul>
@@ -31,58 +36,94 @@ import static java.util.logging.Level.INFO;
*
* @author jonmv
*/
-class HttpRequestStrategy implements RequestStrategy, AutoCloseable {
+class HttpRequestStrategy implements RequestStrategy {
private static final Logger log = Logger.getLogger(HttpRequestStrategy.class.getName());
- private final Map<DocumentId, CompletableFuture<Void>> inflightById = new HashMap<>();
- private final Object monitor = new Object();
- private final Clock clock;
- private final RetryStrategy wrapped;
- private final Thread delayer = new Thread(this::drainDelayed, "feed-client-retry-delayer");
- private final BlockingQueue<CompletableFuture<Void>> delayed = new LinkedBlockingQueue<>();
- private final long maxInflight;
- private final long minInflight;
- private double targetInflight;
- private long inflight = 0;
- private long consecutiveSuccesses = 0;
- private Instant lastSuccess;
- private boolean failed = false;
- private boolean closed = false;
-
- HttpRequestStrategy(FeedClientBuilder builder, Clock clock) {
- this.wrapped = builder.retryStrategy;
- this.maxInflight = builder.maxConnections * (long) builder.maxStreamsPerConnection;
- this.minInflight = builder.maxConnections * (long) min(16, builder.maxStreamsPerConnection);
- this.targetInflight = Math.sqrt(maxInflight) * (Math.sqrt(minInflight));
- this.clock = clock;
- this.lastSuccess = clock.instant();
- this.delayer.start();
- }
-
- private void drainDelayed() {
- try {
- while (true) {
- do delayed.take().complete(null);
- while ( ! hasFailed());
+ private final Cluster cluster;
+ private final Map<DocumentId, CompletableFuture<?>> inflightById = new ConcurrentHashMap<>();
+ private final RetryStrategy strategy;
+ private final CircuitBreaker breaker;
+ final FeedClient.Throttler throttler;
+ private final Queue<Runnable> queue = new ConcurrentLinkedQueue<>();
+ private final AtomicLong inflight = new AtomicLong(0);
+ private final AtomicBoolean destroyed = new AtomicBoolean(false);
+ private final AtomicLong delayedCount = new AtomicLong(0);
+ private final ExecutorService resultExecutor = Executors.newSingleThreadExecutor(runnable -> {
+ Thread thread = new Thread(runnable, "feed-client-result-executor");
+ thread.setDaemon(true);
+ return thread;
+ });
+
+ HttpRequestStrategy(FeedClientBuilder builder) throws IOException {
+ this(builder, new ApacheCluster(builder));
+ }
+
+ HttpRequestStrategy(FeedClientBuilder builder, Cluster cluster) {
+ this.cluster = builder.benchmark ? new BenchmarkingCluster(cluster) : cluster;
+ this.strategy = builder.retryStrategy;
+ this.breaker = builder.circuitBreaker;
+ this.throttler = new DynamicThrottler(builder);
+
+ Thread dispatcher = new Thread(this::dispatch, "feed-client-dispatcher");
+ dispatcher.setDaemon(true);
+ dispatcher.start();
+ }
+
+ @Override
+ public OperationStats stats() {
+ return cluster.stats();
+ }
+
+ @Override
+ public CircuitBreaker.State circuitBreakerState() {
+ return breaker.state();
+ }
- Thread.sleep(1000);
+ private void dispatch() {
+ try {
+ while (breaker.state() != OPEN && ! destroyed.get()) {
+ while ( ! isInExcess() && poll() && breaker.state() == CLOSED);
+ // Sleep when circuit is half-open, nap when queue is empty, or we are throttled.
+ Thread.sleep(breaker.state() == HALF_OPEN ? 1000 : 10); // TODO: Reduce throughput when turning half-open?
}
}
catch (InterruptedException e) {
- delayed.forEach(action -> action.cancel(true));
+ Thread.currentThread().interrupt();
+ log.log(WARNING, "Dispatch thread interrupted; shutting down");
}
+ destroy();
+ }
+
+ private void offer(HttpRequest request, CompletableFuture<HttpResponse> vessel) {
+ delayedCount.incrementAndGet();
+ queue.offer(() -> {
+ cluster.dispatch(request, vessel);
+ });
+ }
+
+ private boolean poll() {
+ Runnable task = queue.poll();
+ if (task == null) return false;
+ delayedCount.decrementAndGet();
+ task.run();
+ return true;
}
- private boolean retry(SimpleHttpRequest request, int attempt) {
- if (attempt >= wrapped.retries())
+
+ private boolean isInExcess() {
+ return inflight.get() - delayedCount.get() > throttler.targetInflight();
+ }
+
+ private boolean retry(HttpRequest request, int attempt) {
+ if (attempt > strategy.retries())
return false;
- switch (request.getMethod().toUpperCase()) {
- case "POST": return wrapped.retry(FeedClient.OperationType.put);
- case "PUT": return wrapped.retry(FeedClient.OperationType.update);
- case "DELETE": return wrapped.retry(FeedClient.OperationType.remove);
- default: throw new IllegalStateException("Unexpected HTTP method: " + request.getMethod());
+ switch (request.method().toUpperCase()) {
+ case "POST": return strategy.retry(FeedClient.OperationType.PUT);
+ case "PUT": return strategy.retry(FeedClient.OperationType.UPDATE);
+ case "DELETE": return strategy.retry(FeedClient.OperationType.REMOVE);
+ default: throw new IllegalStateException("Unexpected HTTP method: " + request.method());
}
}
@@ -90,158 +131,126 @@ class HttpRequestStrategy implements RequestStrategy, AutoCloseable {
* Retries all IOExceptions, unless error rate has converged to a value higher than the threshold,
* or the user has turned off retries for this type of operation.
*/
- private boolean retry(SimpleHttpRequest request, Throwable thrown, int attempt) {
- failure();
- log.log(INFO, thrown, () -> "Failed attempt " + attempt + " at " + request + ", " + consecutiveSuccesses + " successes since last error");
-
- if ( ! (thrown instanceof IOException))
- return false;
+ private boolean retry(HttpRequest request, Throwable thrown, int attempt) {
+ breaker.failure();
+ log.log(FINE, thrown, () -> "Failed attempt " + attempt + " at " + request);
- return retry(request, attempt);
- }
-
- void success() {
- Instant now = clock.instant();
- synchronized (monitor) {
- ++consecutiveSuccesses;
- lastSuccess = now;
- targetInflight = min(targetInflight + 0.1, maxInflight);
- }
- }
+ if ( (thrown instanceof IOException) // General IO problems.
+ || (thrown instanceof CancellationException) // TLS session disconnect.
+ || (thrown instanceof CancelledKeyException)) // Selection cancelled.
+ return retry(request, attempt);
- void failure() {
- Instant threshold = clock.instant().minusSeconds(300);
- synchronized (monitor) {
- consecutiveSuccesses = 0;
- if (lastSuccess.isBefore(threshold))
- failed = true;
- }
+ return false;
}
/** Retries throttled requests (429, 503), adjusting the target inflight count, and server errors (500, 502). */
- private boolean retry(SimpleHttpRequest request, SimpleHttpResponse response, int attempt) {
- if (response.getCode() / 100 == 2) {
- success();
+ private boolean retry(HttpRequest request, HttpResponse response, int attempt) {
+ if (response.code() / 100 == 2) {
+ breaker.success();
+ throttler.success();
return false;
}
- if (response.getCode() == 429 || response.getCode() == 503) { // Throttling; reduce target inflight.
- synchronized (monitor) {
- targetInflight = max(inflight * 0.9, minInflight);
- }
- log.log(FINE, () -> "Status code " + response.getCode() + " (" + response.getBodyText() + ") on attempt " + attempt +
- " at " + request + ", " + consecutiveSuccesses + " successes since last error");
+ log.log(FINE, () -> "Status code " + response.code() + " (" + new String(response.body(), UTF_8) +
+ ") on attempt " + attempt + " at " + request);
+ if (response.code() == 429 || response.code() == 503) { // Throttling; reduce target inflight.
+ throttler.throttled((inflight.get() - delayedCount.get()));
return true;
}
- log.log(INFO, () -> "Status code " + response.getCode() + " (" + response.getBodyText() + ") on attempt " + attempt +
- " at " + request + ", " + consecutiveSuccesses + " successes since last error");
+ breaker.failure();
+ if (response.code() == 500 || response.code() == 502 || response.code() == 504) // Hopefully temporary errors.
+ return retry(request, attempt);
- failure();
- if (response.getCode() != 500 && response.getCode() != 502)
- return false;
-
- return retry(request, attempt); // Hopefully temporary errors.
+ return false;
}
- // Must hold lock.
private void acquireSlot() {
try {
- while (inflight >= targetInflight)
- monitor.wait();
+ while (inflight.get() >= throttler.targetInflight())
+ Thread.sleep(1);
- ++inflight;
+ inflight.incrementAndGet();
}
catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
- // Must hold lock.
private void releaseSlot() {
- for (long i = --inflight; i < targetInflight; i++)
- monitor.notify();
+ inflight.decrementAndGet();
}
- @Override
- public boolean hasFailed() {
- synchronized (monitor) {
- return failed;
+ public void await() {
+ try {
+ while (inflight.get() > 0)
+ Thread.sleep(10);
+ }
+ catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
}
}
@Override
- public CompletableFuture<SimpleHttpResponse> enqueue(DocumentId documentId, SimpleHttpRequest request,
- BiConsumer<SimpleHttpRequest, CompletableFuture<SimpleHttpResponse>> dispatch) {
- CompletableFuture<SimpleHttpResponse> result = new CompletableFuture<>(); // Carries the aggregate result of the operation, including retries.
- CompletableFuture<SimpleHttpResponse> vessel = new CompletableFuture<>(); // Holds the computation of a single dispatch to the HTTP client.
- CompletableFuture<Void> blocker = new CompletableFuture<>(); // Blocks the next operation with same doc-id, then triggers it when complete.
-
- // Get the previous inflight operation for this doc-id, or acquire a send slot.
- CompletableFuture<Void> previous;
- synchronized (monitor) {
- previous = inflightById.put(documentId, blocker);
- if (previous == null)
- acquireSlot();
+ public CompletableFuture<HttpResponse> enqueue(DocumentId documentId, HttpRequest request) {
+ CompletableFuture<HttpResponse> result = new CompletableFuture<>(); // Carries the aggregate result of the operation, including retries.
+ CompletableFuture<HttpResponse> vessel = new CompletableFuture<>(); // Holds the computation of a single dispatch to the HTTP client.
+ CompletableFuture<?> previous = inflightById.put(documentId, result);
+ if (destroyed.get()) {
+ result.cancel(true);
+ return result;
+ }
+
+ if (previous == null) {
+ acquireSlot();
+ offer(request, vessel);
+ throttler.sent(inflight.get(), result);
}
- if (previous == null) // Send immediately if none inflight ...
- dispatch.accept(request, vessel);
- else // ... or send when the previous inflight is done.
- previous.thenRun(() -> dispatch.accept(request, vessel));
-
- handleAttempt(vessel, dispatch, request, result, 1);
-
- result.thenRun(() -> {
- CompletableFuture<Void> current;
- synchronized (monitor) {
- current = inflightById.get(documentId);
- if (current == blocker) { // Release slot and clear map if no other operations enqueued for this doc-id ...
- releaseSlot();
- inflightById.put(documentId, null);
- }
+ else
+ previous.whenComplete((__, ___) -> offer(request, vessel));
+
+ handleAttempt(vessel, request, result, 1);
+
+ return result.handle((response, error) -> {
+ if (inflightById.compute(documentId, (____, current) -> current == result ? null : current) == null)
+ releaseSlot();
+
+ if (error != null) {
+ if (error instanceof FeedException) throw (FeedException) error;
+ throw new FeedException(documentId, error);
}
- if (current != blocker) // ... or trigger sending the next enqueued operation.
- blocker.complete(null);
+ return response;
});
-
- return result;
}
/** Handles the result of one attempt at the given operation, retrying if necessary. */
- private void handleAttempt(CompletableFuture<SimpleHttpResponse> vessel, BiConsumer<SimpleHttpRequest, CompletableFuture<SimpleHttpResponse>> dispatch,
- SimpleHttpRequest request, CompletableFuture<SimpleHttpResponse> result, int attempt) {
- vessel.whenComplete((response, thrown) -> {
- // Retry the operation if it failed with a transient error ...
- if (thrown != null ? retry(request, thrown, attempt)
- : retry(request, response, attempt)) {
- CompletableFuture<SimpleHttpResponse> retry = new CompletableFuture<>();
- boolean hasFailed = hasFailed();
- if (hasFailed)
- delayed.add(new CompletableFuture<>().thenRun(() -> dispatch.accept(request, retry)));
- else
- dispatch.accept(request, retry);
- handleAttempt(retry, dispatch, request, result, attempt + (hasFailed ? 0 : 1));
- return;
- }
-
- // ... or accept the outcome and mark the operation as complete.
- if (thrown == null) result.complete(response);
- else result.completeExceptionally(thrown);
- });
+ private void handleAttempt(CompletableFuture<HttpResponse> vessel, HttpRequest request, CompletableFuture<HttpResponse> result, int attempt) {
+ vessel.whenCompleteAsync((response, thrown) -> {
+ // Retry the operation if it failed with a transient error ...
+ if (thrown != null ? retry(request, thrown, attempt)
+ : retry(request, response, attempt)) {
+ CircuitBreaker.State state = breaker.state();
+ CompletableFuture<HttpResponse> retry = new CompletableFuture<>();
+ offer(request, retry);
+ handleAttempt(retry, request, result, attempt + (state == HALF_OPEN ? 0 : 1));
+ }
+ // ... or accept the outcome and mark the operation as complete.
+ else {
+ if (thrown == null) result.complete(response);
+ else result.completeExceptionally(thrown);
+ }
+ },
+ resultExecutor);
}
@Override
- public void close() {
- synchronized (monitor) {
- if (closed)
- return;
-
- closed = true;
+ public void destroy() {
+ if ( ! destroyed.getAndSet(true)) {
+ inflightById.values().forEach(result -> result.cancel(true));
+ cluster.close();
+ resultExecutor.shutdown();
}
- delayer.interrupt();
- try { delayer.join(); }
- catch (InterruptedException e) { Thread.currentThread().interrupt(); }
}
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpResponse.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpResponse.java
new file mode 100644
index 00000000000..b1dd54240eb
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/HttpResponse.java
@@ -0,0 +1,16 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+interface HttpResponse {
+
+ int code();
+ byte[] body();
+
+ static HttpResponse of(int code, byte[] body) {
+ return new HttpResponse() {
+ @Override public int code() { return code; }
+ @Override public byte[] body() { return body; }
+ };
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java
new file mode 100644
index 00000000000..0ba373eef18
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonFeeder.java
@@ -0,0 +1,484 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import ai.vespa.feed.client.FeedClient.OperationType;
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonToken;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InterruptedIOException;
+import java.time.Duration;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static ai.vespa.feed.client.FeedClient.OperationType.PUT;
+import static ai.vespa.feed.client.FeedClient.OperationType.REMOVE;
+import static ai.vespa.feed.client.FeedClient.OperationType.UPDATE;
+import static com.fasterxml.jackson.core.JsonToken.END_ARRAY;
+import static com.fasterxml.jackson.core.JsonToken.START_ARRAY;
+import static com.fasterxml.jackson.core.JsonToken.START_OBJECT;
+import static com.fasterxml.jackson.core.JsonToken.VALUE_FALSE;
+import static com.fasterxml.jackson.core.JsonToken.VALUE_STRING;
+import static com.fasterxml.jackson.core.JsonToken.VALUE_TRUE;
+import static java.lang.Math.min;
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static java.util.Objects.requireNonNull;
+
+/**
+ * @author jonmv
+ * @author bjorncs
+ */
+public class JsonFeeder implements Closeable {
+
+ private final ExecutorService resultExecutor = Executors.newSingleThreadExecutor(r -> {
+ Thread t = new Thread(r, "json-feeder-result-executor");
+ t.setDaemon(true);
+ return t;
+ });
+ private final FeedClient client;
+ private final OperationParameters protoParameters;
+
+ private JsonFeeder(FeedClient client, OperationParameters protoParameters) {
+ this.client = client;
+ this.protoParameters = protoParameters;
+ }
+
+ public interface ResultCallback {
+ /**
+ * Invoked after each operation has either completed successfully or failed
+ *
+ * @param result Non-null if operation completed successfully
+ * @param error Non-null if operation failed
+ */
+ default void onNextResult(Result result, FeedException error) { }
+
+ /**
+ * Invoked if an unrecoverable error occurred during feed processing,
+ * after which no other {@link ResultCallback} methods are invoked.
+ */
+ default void onError(FeedException error) { }
+
+ /**
+ * Invoked when all feed operations are either completed successfully or failed.
+ */
+ default void onComplete() { }
+ }
+
+ public static Builder builder(FeedClient client) { return new Builder(client); }
+
+ /** Feeds single JSON feed operations on the form
+ * <pre>
+ * {
+ * "id": "id:ns:type::boo",
+ * "fields": { ... document fields ... }
+ * }
+ * </pre>
+ * Exceptional completion will use be an instance of {@link FeedException} or one of its sub-classes.
+ */
+ public CompletableFuture<Result> feedSingle(String json) {
+ CompletableFuture<Result> result = new CompletableFuture<>();
+ try {
+ SingleOperationParserAndExecutor parser = new SingleOperationParserAndExecutor(json.getBytes(UTF_8));
+ parser.next().whenCompleteAsync((operationResult, error) -> {
+ if (error != null) {
+ result.completeExceptionally(error);
+ } else {
+ result.complete(operationResult);
+ }
+ }, resultExecutor);
+ } catch (Exception e) {
+ resultExecutor.execute(() -> result.completeExceptionally(wrapException(e)));
+ }
+ return result;
+ }
+
+ /** Feeds a stream containing a JSON array of feed operations on the form
+ * <pre>
+ * [
+ * {
+ * "id": "id:ns:type::boo",
+ * "fields": { ... document fields ... }
+ * },
+ * {
+ * "put": "id:ns:type::foo",
+ * "fields": { ... document fields ... }
+ * },
+ * {
+ * "update": "id:ns:type:n=4:bar",
+ * "create": true,
+ * "fields": { ... partial update fields ... }
+ * },
+ * {
+ * "remove": "id:ns:type:g=foo:bar",
+ * "condition": "type.baz = \"bax\""
+ * },
+ * ...
+ * ]
+ * </pre>
+ * Note that {@code "id"} is an alias for the document put operation.
+ * Exceptional completion will use be an instance of {@link FeedException} or one of its sub-classes.
+ */
+ public CompletableFuture<Void> feedMany(InputStream jsonStream, ResultCallback resultCallback) {
+ return feedMany(jsonStream, 1 << 26, resultCallback);
+ }
+
+ /**
+ * Same as {@link #feedMany(InputStream, ResultCallback)}, but without a provided {@link ResultCallback} instance.
+ * @see JsonFeeder#feedMany(InputStream, ResultCallback) for details.
+ */
+ public CompletableFuture<Void> feedMany(InputStream jsonStream) {
+ return feedMany(jsonStream, new ResultCallback() { });
+ }
+
+ CompletableFuture<Void> feedMany(InputStream jsonStream, int size, ResultCallback resultCallback) {
+ CompletableFuture<Void> overallResult = new CompletableFuture<>();
+ CompletableFuture<Result> result;
+ AtomicInteger pending = new AtomicInteger(1); // The below dispatch loop itself is counted as a single pending operation
+ AtomicBoolean finalCallbackInvoked = new AtomicBoolean();
+ try {
+ RingBufferStream buffer = new RingBufferStream(jsonStream, size);
+ while ((result = buffer.next()) != null) {
+ pending.incrementAndGet();
+ result.whenCompleteAsync((r, t) -> {
+ if (!finalCallbackInvoked.get()) {
+ resultCallback.onNextResult(r, (FeedException) t);
+ }
+ if (pending.decrementAndGet() == 0 && finalCallbackInvoked.compareAndSet(false, true)) {
+ resultCallback.onComplete();
+ overallResult.complete(null);
+ }
+ }, resultExecutor);
+ }
+ if (pending.decrementAndGet() == 0 && finalCallbackInvoked.compareAndSet(false, true)) {
+ resultExecutor.execute(() -> {
+ resultCallback.onComplete();
+ overallResult.complete(null);
+ });
+ }
+ } catch (Exception e) {
+ if (finalCallbackInvoked.compareAndSet(false, true)) {
+ resultExecutor.execute(() -> {
+ FeedException wrapped = wrapException(e);
+ resultCallback.onError(wrapped);
+ overallResult.completeExceptionally(wrapped);
+ });
+ }
+ }
+ return overallResult;
+ }
+
+ private static final JsonFactory factory = new JsonFactory();
+
+ @Override public void close() throws IOException {
+ client.close();
+ resultExecutor.shutdown();
+ try {
+ if (!resultExecutor.awaitTermination(30, TimeUnit.SECONDS)) {
+ throw new IOException("Failed to close client in time");
+ }
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ private FeedException wrapException(Exception e) {
+ if (e instanceof FeedException) return (FeedException) e;
+ if (e instanceof IOException) {
+ return new OperationParseException("Failed to parse document JSON: " + e.getMessage(), e);
+ }
+ return new FeedException(e);
+ }
+
+ private class RingBufferStream extends InputStream {
+
+ private final byte[] b = new byte[1];
+ private final InputStream in;
+ private final byte[] data;
+ private final int size;
+ private final Object lock = new Object();
+ private IOException thrown = null;
+ private long tail = 0;
+ private long pos = 0;
+ private long head = 0;
+ private boolean done = false;
+ private final OperationParserAndExecutor parserAndExecutor;
+
+ RingBufferStream(InputStream in, int size) throws IOException {
+ this.in = in;
+ this.data = new byte[size];
+ this.size = size;
+
+ new Thread(this::fill, "feed-reader").start();
+
+ this.parserAndExecutor = new RingBufferBackedOperationParserAndExecutor(factory.createParser(this));
+ }
+
+ @Override
+ public int read() throws IOException {
+ return read(b, 0, 1) == -1 ? -1 : b[0];
+ }
+
+ @Override
+ public int read(byte[] buffer, int off, int len) throws IOException {
+ try {
+ int ready;
+ synchronized (lock) {
+ while ((ready = (int) (head - pos)) == 0 && ! done)
+ lock.wait();
+ }
+ if (thrown != null) throw thrown;
+ if (ready == 0) return -1;
+
+ ready = min(ready, len);
+ int offset = (int) (pos % size);
+ int length = min(ready, size - offset);
+ System.arraycopy(data, offset, buffer, off, length);
+ if (length < ready)
+ System.arraycopy(data, 0, buffer, off + length, ready - length);
+
+ pos += ready;
+ return ready;
+ }
+ catch (InterruptedException e) {
+ throw new InterruptedIOException("Interrupted waiting for data: " + e.getMessage());
+ }
+ }
+
+ public CompletableFuture<Result> next() throws IOException {
+ return parserAndExecutor.next();
+ }
+
+ private final byte[] prefix = "{\"fields\":".getBytes(UTF_8);
+ private byte[] copy(long start, long end) {
+ int length = (int) (end - start);
+ byte[] buffer = new byte[prefix.length + length + 1];
+ System.arraycopy(prefix, 0, buffer, 0, prefix.length);
+
+ int offset = (int) (start % size);
+ int toWrite = min(length, size - offset);
+ System.arraycopy(data, offset, buffer, prefix.length, toWrite);
+ if (toWrite < length)
+ System.arraycopy(data, 0, buffer, prefix.length + toWrite, length - toWrite);
+
+ buffer[buffer.length - 1] = '}';
+ return buffer;
+ }
+
+
+ @Override
+ public void close() throws IOException {
+ synchronized (lock) {
+ done = true;
+ lock.notifyAll();
+ }
+ in.close();
+ }
+
+ private void fill() {
+ try {
+ while (true) {
+ int free;
+ synchronized (lock) {
+ while ((free = (int) (tail + size - head)) <= 0 && !done)
+ lock.wait();
+ }
+ if (done) break;
+
+ int off = (int) (head % size);
+ int len = min(min(free, size - off), 1 << 13);
+ int read = in.read(data, off, len);
+
+ synchronized (lock) {
+ if (read < 0) done = true;
+ else head += read;
+ lock.notify();
+ }
+ }
+ } catch (InterruptedException e) {
+ synchronized (lock) {
+ done = true;
+ thrown = new InterruptedIOException("Interrupted reading data: " + e.getMessage());
+ }
+ } catch (IOException e) {
+ synchronized (lock) {
+ done = true;
+ thrown = e;
+ }
+ }
+ }
+
+ private class RingBufferBackedOperationParserAndExecutor extends OperationParserAndExecutor {
+
+ RingBufferBackedOperationParserAndExecutor(JsonParser parser) { super(parser, true); }
+
+ @Override
+ String getDocumentJson(long start, long end) {
+ String payload = new String(copy(start, end), UTF_8);
+ synchronized (lock) {
+ tail = end;
+ lock.notify();
+ }
+ return payload;
+ }
+ }
+ }
+
+ private class SingleOperationParserAndExecutor extends OperationParserAndExecutor {
+
+ private final byte[] json;
+
+ SingleOperationParserAndExecutor(byte[] json) throws IOException {
+ super(factory.createParser(json), false);
+ this.json = json;
+ }
+
+ @Override
+ String getDocumentJson(long start, long end) {
+ return new String(json, (int) start, (int) (end - start), UTF_8);
+ }
+ }
+
+ private abstract class OperationParserAndExecutor {
+
+ private final JsonParser parser;
+ private final boolean multipleOperations;
+ private boolean arrayPrefixParsed;
+
+ protected OperationParserAndExecutor(JsonParser parser, boolean multipleOperations) {
+ this.parser = parser;
+ this.multipleOperations = multipleOperations;
+ }
+
+ abstract String getDocumentJson(long start, long end);
+
+ CompletableFuture<Result> next() throws IOException {
+ if (multipleOperations && !arrayPrefixParsed){
+ expect(START_ARRAY);
+ arrayPrefixParsed = true;
+ }
+
+ JsonToken token = parser.nextToken();
+ if (token == END_ARRAY && multipleOperations) return null;
+ else if (token == null && !multipleOperations) return null;
+ else if (token == START_OBJECT);
+ else throw new OperationParseException("Unexpected token '" + parser.currentToken() + "' at offset " + parser.getTokenLocation().getByteOffset());
+ long start = 0, end = -1;
+ OperationType type = null;
+ DocumentId id = null;
+ OperationParameters parameters = protoParameters;
+ loop: while (true) {
+ switch (parser.nextToken()) {
+ case FIELD_NAME:
+ switch (parser.getText()) {
+ case "id":
+ case "put": type = PUT; id = readId(); break;
+ case "update": type = UPDATE; id = readId(); break;
+ case "remove": type = REMOVE; id = readId(); break;
+ case "condition": parameters = parameters.testAndSetCondition(readString()); break;
+ case "create": parameters = parameters.createIfNonExistent(readBoolean()); break;
+ case "fields": {
+ expect(START_OBJECT);
+ start = parser.getTokenLocation().getByteOffset();
+ int depth = 1;
+ while (depth > 0) switch (parser.nextToken()) {
+ case START_OBJECT: ++depth; break;
+ case END_OBJECT: --depth; break;
+ }
+ end = parser.getTokenLocation().getByteOffset() + 1;
+ break;
+ }
+ default: throw new OperationParseException("Unexpected field name '" + parser.getText() + "' at offset " +
+ parser.getTokenLocation().getByteOffset());
+ }
+ break;
+
+ case END_OBJECT:
+ break loop;
+
+ default:
+ throw new OperationParseException("Unexpected token '" + parser.currentToken() + "' at offset " +
+ parser.getTokenLocation().getByteOffset());
+ }
+ }
+ if (id == null)
+ throw new OperationParseException("No document id for document at offset " + start);
+
+ if (end < start)
+ throw new OperationParseException("No 'fields' object for document at offset " + parser.getTokenLocation().getByteOffset());
+ String payload = getDocumentJson(start, end);
+ switch (type) {
+ case PUT: return client.put (id, payload, parameters);
+ case UPDATE: return client.update(id, payload, parameters);
+ case REMOVE: return client.remove(id, parameters);
+ default: throw new OperationParseException("Unexpected operation type '" + type + "'");
+ }
+ }
+
+ private void expect(JsonToken token) throws IOException {
+ if (parser.nextToken() != token)
+ throw new OperationParseException("Expected '" + token + "' at offset " + parser.getTokenLocation().getByteOffset() +
+ ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
+ }
+
+ private String readString() throws IOException {
+ String value = parser.nextTextValue();
+ if (value == null)
+ throw new OperationParseException("Expected '" + VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() +
+ ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
+
+ return value;
+ }
+
+ private boolean readBoolean() throws IOException {
+ Boolean value = parser.nextBooleanValue();
+ if (value == null)
+ throw new OperationParseException("Expected '" + VALUE_FALSE + "' or '" + VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() +
+ ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
+
+ return value;
+
+ }
+
+ private DocumentId readId() throws IOException {
+ return DocumentId.of(readString());
+ }
+
+ }
+
+ public static class Builder {
+
+ final FeedClient client;
+ OperationParameters parameters = OperationParameters.empty();
+
+ private Builder(FeedClient client) {
+ this.client = requireNonNull(client);
+ }
+
+ public Builder withTimeout(Duration timeout) {
+ parameters = parameters.timeout(timeout);
+ return this;
+ }
+
+ public Builder withRoute(String route) {
+ parameters = parameters.route(route);
+ return this;
+ }
+
+ public Builder withTracelevel(int tracelevel) {
+ parameters = parameters.tracelevel(tracelevel);
+ return this;
+ }
+
+ public JsonFeeder build() {
+ return new JsonFeeder(client, parameters);
+ }
+
+ }
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonStreamFeeder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonStreamFeeder.java
deleted file mode 100644
index 17162f19d3f..00000000000
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/JsonStreamFeeder.java
+++ /dev/null
@@ -1,364 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
-
-import ai.vespa.feed.client.FeedClient.OperationType;
-import com.fasterxml.jackson.core.JsonFactory;
-import com.fasterxml.jackson.core.JsonParser;
-import com.fasterxml.jackson.core.JsonToken;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InterruptedIOException;
-import java.io.UncheckedIOException;
-import java.time.Duration;
-import java.util.Optional;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
-
-import static ai.vespa.feed.client.FeedClient.OperationType.put;
-import static ai.vespa.feed.client.FeedClient.OperationType.remove;
-import static ai.vespa.feed.client.FeedClient.OperationType.update;
-import static com.fasterxml.jackson.core.JsonToken.START_OBJECT;
-import static com.fasterxml.jackson.core.JsonToken.VALUE_FALSE;
-import static com.fasterxml.jackson.core.JsonToken.VALUE_STRING;
-import static com.fasterxml.jackson.core.JsonToken.VALUE_TRUE;
-import static java.lang.Math.min;
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static java.util.Objects.requireNonNull;
-
-/**
- * @author jonmv
- */
-public class JsonStreamFeeder implements Closeable {
-
- private final FeedClient client;
- private final OperationParameters protoParameters;
-
- private JsonStreamFeeder(FeedClient client, OperationParameters protoParameters) {
- this.client = client;
- this.protoParameters = protoParameters;
- }
-
- public static Builder builder(FeedClient client) { return new Builder(client); }
-
- /** Feeds a stream containing a JSON array of feed operations on the form
- * <pre>
- * [
- * {
- * "id": "id:ns:type::boo",
- * "fields": { ... document fields ... }
- * },
- * {
- * "put": "id:ns:type::foo",
- * "fields": { ... document fields ... }
- * },
- * {
- * "update": "id:ns:type:n=4:bar",
- * "create": true,
- * "fields": { ... partial update fields ... }
- * },
- * {
- * "remove": "id:ns:type:g=foo:bar",
- * "condition": "type.baz = \"bax\""
- * },
- * ...
- * ]
- * </pre>
- * Note that {@code "id"} is an alias for the document put operation.
- */
- public void feed(InputStream jsonStream) throws IOException {
- feed(jsonStream, 1 << 26, false);
- }
-
- BenchmarkResult benchmark(InputStream jsonStream) throws IOException {
- return feed(jsonStream, 1 << 26, true).get();
- }
-
- Optional<BenchmarkResult> feed(InputStream jsonStream, int size, boolean benchmark) throws IOException {
- RingBufferStream buffer = new RingBufferStream(jsonStream, size);
- buffer.expect(JsonToken.START_ARRAY);
- AtomicInteger okCount = new AtomicInteger();
- AtomicInteger failedCount = new AtomicInteger();
- long startTime = System.nanoTime();
- CompletableFuture<Result> result;
- AtomicReference<Throwable> thrown = new AtomicReference<>();
- while ((result = buffer.next()) != null) {
- result.whenComplete((r, t) -> {
- if (t != null) {
- failedCount.incrementAndGet();
- if (!benchmark) thrown.set(t);
- } else
- okCount.incrementAndGet();
- });
- if (thrown.get() != null)
- sneakyThrow(thrown.get());
- }
- if (!benchmark) return Optional.empty();
- Duration duration = Duration.ofNanos(System.nanoTime() - startTime);
- double throughPut = (double)okCount.get() / duration.toMillis() * 1000D;
- return Optional.of(new BenchmarkResult(okCount.get(), failedCount.get(), duration, throughPut));
- }
-
- @SuppressWarnings("unchecked")
- static <T extends Throwable> void sneakyThrow(Throwable thrown) throws T { throw (T) thrown; }
-
- private static final JsonFactory factory = new JsonFactory();
-
- @Override public void close() throws IOException { client.close(); }
-
- private class RingBufferStream extends InputStream {
-
- private final byte[] b = new byte[1];
- private final InputStream in;
- private final byte[] data;
- private final int size;
- private final Object lock = new Object();
- private final JsonParser parser;
- private Throwable thrown = null;
- private long tail = 0;
- private long pos = 0;
- private long head = 0;
- private boolean done = false;
-
- RingBufferStream(InputStream in, int size) {
- this.in = in;
- this.data = new byte[size];
- this.size = size;
-
- new Thread(this::fill, "feed-reader").start();
-
- try { this.parser = factory.createParser(this); }
- catch (IOException e) { throw new UncheckedIOException(e); }
- }
-
- @Override
- public int read() throws IOException {
- return read(b, 0, 1) == -1 ? -1 : b[0];
- }
-
- @Override
- public int read(byte[] buffer, int off, int len) throws IOException {
- try {
- int ready;
- synchronized (lock) {
- while ((ready = (int) (head - pos)) == 0 && ! done)
- lock.wait();
- }
- if (thrown != null) throw new RuntimeException("Error reading input", thrown);
- if (ready == 0) return -1;
-
- ready = min(ready, len);
- int offset = (int) (pos % size);
- int length = min(ready, size - offset);
- System.arraycopy(data, offset, buffer, off, length);
- if (length < ready)
- System.arraycopy(data, 0, buffer, off + length, ready - length);
-
- pos += ready;
- return ready;
- }
- catch (InterruptedException e) {
- throw new InterruptedIOException("Interrupted waiting for data: " + e.getMessage());
- }
- }
-
- void expect(JsonToken token) throws IOException {
- if (parser.nextToken() != token)
- throw new IllegalArgumentException("Expected '" + token + "' at offset " + parser.getTokenLocation().getByteOffset() +
- ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
- }
-
- public CompletableFuture<Result> next() throws IOException {
- long start = 0, end = -1;
- OperationType type = null;
- DocumentId id = null;
- OperationParameters parameters = protoParameters;
- switch (parser.nextToken()) {
- case END_ARRAY: return null;
- case START_OBJECT: break;
- default: throw new IllegalArgumentException("Unexpected token '" + parser.currentToken() + "' at offset " +
- parser.getTokenLocation().getByteOffset());
- }
-
- loop: while (true) {
- switch (parser.nextToken()) {
- case FIELD_NAME:
- switch (parser.getText()) {
- case "id":
- case "put": type = put; id = readId(); break;
- case "update": type = update; id = readId(); break;
- case "remove": type = remove; id = readId(); break;
- case "condition": parameters = parameters.testAndSetCondition(readString()); break;
- case "create": parameters = parameters.createIfNonExistent(readBoolean()); break;
- case "fields": {
- expect(START_OBJECT);
- start = parser.getTokenLocation().getByteOffset();
- int depth = 1;
- while (depth > 0) switch (parser.nextToken()) {
- case START_OBJECT: ++depth; break;
- case END_OBJECT: --depth; break;
- }
- end = parser.getTokenLocation().getByteOffset() + 1;
- break;
- }
- default: throw new IllegalArgumentException("Unexpected field name '" + parser.getText() + "' at offset " +
- parser.getTokenLocation().getByteOffset());
- }
- break;
-
- case END_OBJECT:
- break loop;
-
- default:
- throw new IllegalArgumentException("Unexpected token '" + parser.currentToken() + "' at offset " +
- parser.getTokenLocation().getByteOffset());
- }
- }
-
- if (id == null)
- throw new IllegalArgumentException("No document id for document at offset " + start);
-
- if (end < start)
- throw new IllegalArgumentException("No 'fields' object for document at offset " + parser.getTokenLocation().getByteOffset());
-
- String payload = new String(copy(start, end), UTF_8);
- synchronized (lock) {
- tail = end;
- lock.notify();
- }
-
- switch (type) {
- case put: return client.put (id, payload, parameters);
- case update: return client.update(id, payload, parameters);
- case remove: return client.remove(id, parameters);
- default: throw new IllegalStateException("Unexpected operation type '" + type + "'");
- }
- }
-
- private final byte[] prefix = "{\"fields\":".getBytes(UTF_8);
- private byte[] copy(long start, long end) {
- int length = (int) (end - start);
- byte[] buffer = new byte[prefix.length + length + 1];
- System.arraycopy(prefix, 0, buffer, 0, prefix.length);
-
- int offset = (int) (start % size);
- int toWrite = min(length, size - offset);
- System.arraycopy(data, offset, buffer, prefix.length, toWrite);
- if (toWrite < length)
- System.arraycopy(data, 0, buffer, prefix.length + toWrite, length - toWrite);
-
- buffer[buffer.length - 1] = '}';
- return buffer;
- }
-
- private String readString() throws IOException {
- String value = parser.nextTextValue();
- if (value == null)
- throw new IllegalArgumentException("Expected '" + VALUE_STRING + "' at offset " + parser.getTokenLocation().getByteOffset() +
- ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
-
- return value;
- }
-
- private boolean readBoolean() throws IOException {
- Boolean value = parser.nextBooleanValue();
- if (value == null)
- throw new IllegalArgumentException("Expected '" + VALUE_FALSE + "' or '" + VALUE_TRUE + "' at offset " + parser.getTokenLocation().getByteOffset() +
- ", but found '" + parser.currentToken() + "' (" + parser.getText() + ")");
-
- return value;
-
- }
-
- private DocumentId readId() throws IOException {
- return DocumentId.of(readString());
- }
-
- @Override
- public void close() throws IOException {
- synchronized (lock) {
- done = true;
- lock.notifyAll();
- }
- in.close();
- }
-
- private void fill() {
- try {
- while (true) {
- int free;
- synchronized (lock) {
- while ((free = (int) (tail + size - head)) <= 0 && ! done)
- lock.wait();
- }
- if (done) break;
-
- int off = (int) (head % size);
- int len = min(min(free, size - off), 1 << 13);
- int read = in.read(data, off, len);
-
- synchronized (lock) {
- if (read < 0) done = true;
- else head += read;
- lock.notify();
- }
- }
- }
- catch (Throwable t) {
- synchronized (lock) {
- done = true;
- thrown = t;
- }
- }
- }
-
- }
-
-
- public static class Builder {
-
- final FeedClient client;
- OperationParameters parameters = OperationParameters.empty();
-
- private Builder(FeedClient client) {
- this.client = requireNonNull(client);
- }
-
- public Builder withTimeout(Duration timeout) {
- parameters = parameters.timeout(timeout);
- return this;
- }
-
- public Builder withRoute(String route) {
- parameters = parameters.route(route);
- return this;
- }
-
- public Builder withTracelevel(int tracelevel) {
- parameters = parameters.tracelevel(tracelevel);
- return this;
- }
-
- public JsonStreamFeeder build() {
- return new JsonStreamFeeder(client, parameters);
- }
-
- }
-
- static class BenchmarkResult {
- final int okCount;
- final int errorCount;
- final Duration duration;
- final double throughput;
-
- BenchmarkResult(int okCount, int errorCount, Duration duration, double throughput) {
- this.okCount = okCount;
- this.errorCount = errorCount;
- this.duration = duration;
- this.throughput = throughput;
- }
- }
-
-}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java
index 22546f89ccb..8c20a37d224 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParameters.java
@@ -7,6 +7,8 @@ import java.util.Optional;
import java.util.OptionalInt;
/**
+ * Per-operation feed parameters
+ *
* @author bjorncs
* @author jonmv
*/
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java
new file mode 100644
index 00000000000..15ba024bb4e
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationParseException.java
@@ -0,0 +1,15 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+/**
+ * Signals that supplied JSON for a document/operation is invalid
+ *
+ * @author bjorncs
+ */
+public class OperationParseException extends FeedException {
+
+ public OperationParseException(String message) { super(message); }
+
+ public OperationParseException(String message, Throwable cause) { super(message, cause); }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationStats.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationStats.java
new file mode 100644
index 00000000000..d36475a51fb
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/OperationStats.java
@@ -0,0 +1,96 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.util.Map;
+
+/**
+ * Statistics for feed operations over HTTP against a Vespa cluster.
+ *
+ * @author jonmv
+ */
+public class OperationStats {
+
+ private final long requests;
+ private final Map<Integer, Long> responsesByCode;
+ private final long inflight;
+ private final long exceptions;
+ private final long averageLatencyMillis;
+ private final long minLatencyMillis;
+ private final long maxLatencyMillis;
+ private final long bytesSent;
+ private final long bytesReceived;
+
+ public OperationStats(long requests, Map<Integer, Long> responsesByCode, long exceptions, long inflight,
+ long averageLatencyMillis, long minLatencyMillis, long maxLatencyMillis,
+ long bytesSent, long bytesReceived) {
+ this.requests = requests;
+ this.responsesByCode = responsesByCode;
+ this.exceptions = exceptions;
+ this.inflight = inflight;
+ this.averageLatencyMillis = averageLatencyMillis;
+ this.minLatencyMillis = minLatencyMillis;
+ this.maxLatencyMillis = maxLatencyMillis;
+ this.bytesSent = bytesSent;
+ this.bytesReceived = bytesReceived;
+ }
+
+ public long requests() {
+ return requests;
+ }
+
+ public long responses() {
+ return requests - inflight;
+ }
+
+ public long successes() {
+ return responsesByCode.getOrDefault(200, 0L);
+ }
+
+ public Map<Integer, Long> responsesByCode() {
+ return responsesByCode;
+ }
+
+ public long exceptions() {
+ return exceptions;
+ }
+
+ public long inflight() {
+ return inflight;
+ }
+
+ public long averageLatencyMillis() {
+ return averageLatencyMillis;
+ }
+
+ public long minLatencyMillis() {
+ return minLatencyMillis;
+ }
+
+ public long maxLatencyMillis() {
+ return maxLatencyMillis;
+ }
+
+ public long bytesSent() {
+ return bytesSent;
+ }
+
+ public long bytesReceived() {
+ return bytesReceived;
+ }
+
+ @Override
+ public String toString() {
+ return "Stats{" +
+ "requests=" + requests +
+ ", responsesByCode=" + responsesByCode +
+ ", exceptions=" + exceptions +
+ ", inflight=" + inflight +
+ ", averageLatencyMillis=" + averageLatencyMillis +
+ ", minLatencyMillis=" + minLatencyMillis +
+ ", maxLatencyMillis=" + maxLatencyMillis +
+ ", bytesSent=" + bytesSent +
+ ", bytesReceived=" + bytesReceived +
+ '}';
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java
index 1787d8d65c6..a1101eb0ebb 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/RequestStrategy.java
@@ -1,24 +1,30 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.feed.client;
-import org.apache.hc.client5.http.async.methods.SimpleHttpRequest;
-import org.apache.hc.client5.http.async.methods.SimpleHttpResponse;
+import ai.vespa.feed.client.FeedClient.CircuitBreaker.State;
import java.util.concurrent.CompletableFuture;
-import java.util.function.BiConsumer;
/**
* Controls execution of feed operations.
*
* @author jonmv
*/
-public interface RequestStrategy {
+interface RequestStrategy {
- /** Whether this has failed fatally, and we should cease sending further operations. */
- boolean hasFailed();
+ /** Stats for operations sent through this. */
+ OperationStats stats();
+
+ /** State of the circuit breaker. */
+ State circuitBreakerState();
+
+ /** Forcibly terminates this, causing all inflight operations to complete immediately. */
+ void destroy();
+
+ /** Wait for all inflight requests to complete. */
+ void await();
/** Enqueue the given operation, returning its future result. This may block if the client send queue is full. */
- CompletableFuture<SimpleHttpResponse> enqueue(DocumentId documentId, SimpleHttpRequest request,
- BiConsumer<SimpleHttpRequest, CompletableFuture<SimpleHttpResponse>> dispatch);
+ CompletableFuture<HttpResponse> enqueue(DocumentId documentId, HttpRequest request);
}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java
index 31a6cf6e893..b29d65e193b 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/Result.java
@@ -4,6 +4,8 @@ package ai.vespa.feed.client;
import java.util.Optional;
/**
+ * Result for a document operation
+ *
* @author bjorncs
* @author jonmv
*/
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java
new file mode 100644
index 00000000000..3fd5143e2f4
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/ResultParseException.java
@@ -0,0 +1,14 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+/**
+ * Signals that the client was unable to parse the result/response from container
+ *
+ * @author bjorncs
+ */
+public class ResultParseException extends FeedException {
+
+ public ResultParseException(DocumentId documentId, String message) { super(documentId, message); }
+
+ public ResultParseException(DocumentId documentId, Throwable cause) { super(documentId, cause); }
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java
index 7200d5fd943..9114e22f4a6 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/SslContextBuilder.java
@@ -20,11 +20,14 @@ import java.nio.file.Path;
import java.security.GeneralSecurityException;
import java.security.KeyFactory;
import java.security.KeyStore;
+import java.security.KeyStoreException;
import java.security.PrivateKey;
import java.security.cert.Certificate;
import java.security.cert.X509Certificate;
import java.security.spec.PKCS8EncodedKeySpec;
import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
import java.util.List;
/**
@@ -39,6 +42,9 @@ class SslContextBuilder {
private Path certificateFile;
private Path privateKeyFile;
private Path caCertificatesFile;
+ private Collection<X509Certificate> certificate;
+ private PrivateKey privateKey;
+ private Collection<X509Certificate> caCertificates;
SslContextBuilder withCertificateAndKey(Path certificate, Path privateKey) {
this.certificateFile = certificate;
@@ -46,20 +52,35 @@ class SslContextBuilder {
return this;
}
+ SslContextBuilder withCertificateAndKey(Collection<X509Certificate> certificate, PrivateKey privateKey) {
+ this.certificate = certificate;
+ this.privateKey = privateKey;
+ return this;
+ }
+
SslContextBuilder withCaCertificates(Path caCertificates) {
this.caCertificatesFile = caCertificates;
return this;
}
+ SslContextBuilder withCaCertificates(Collection<X509Certificate> caCertificates) {
+ this.caCertificates = caCertificates;
+ return this;
+ }
+
SSLContext build() throws IOException {
try {
KeyStore keystore = KeyStore.getInstance("PKCS12");
keystore.load(null);
if (certificateFile != null && privateKeyFile != null) {
keystore.setKeyEntry("cert", privateKey(privateKeyFile), new char[0], certificates(certificateFile));
+ } else if (certificate != null && privateKey != null) {
+ keystore.setKeyEntry("cert", privateKey, new char[0], certificate.toArray(new Certificate[0]));
}
if (caCertificatesFile != null) {
- keystore.setCertificateEntry("ca-cert", certificates(caCertificatesFile)[0]);
+ addCaCertificates(keystore, Arrays.asList(certificates(caCertificatesFile)));
+ } else if (caCertificates != null) {
+ addCaCertificates(keystore, caCertificates);
}
KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm());
kmf.init(keystore, new char[0]);
@@ -73,6 +94,13 @@ class SslContextBuilder {
}
}
+ private static void addCaCertificates(KeyStore keystore, Collection<? extends Certificate> certificates) throws KeyStoreException {
+ int i = 0;
+ for (Certificate cert : certificates) {
+ keystore.setCertificateEntry("ca-cert-" + ++i, cert);
+ }
+ }
+
private static Certificate[] certificates(Path file) throws IOException, GeneralSecurityException {
try (PEMParser parser = new PEMParser(Files.newBufferedReader(file))) {
List<X509Certificate> result = new ArrayList<>();
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/StaticThrottler.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/StaticThrottler.java
new file mode 100644
index 00000000000..4e0c4fe90f0
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/StaticThrottler.java
@@ -0,0 +1,45 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static java.lang.Math.max;
+import static java.lang.Math.min;
+
+/**
+ * Reduces max throughput whenever throttled; increases it slowly whenever successful responses are obtained.
+ *
+ * @author jonmv
+ */
+public class StaticThrottler implements FeedClient.Throttler {
+
+ protected final long maxInflight;
+ protected final long minInflight;
+ private final AtomicLong targetX10;
+
+ public StaticThrottler(FeedClientBuilder builder) {
+ this.maxInflight = builder.connectionsPerEndpoint * (long) builder.maxStreamsPerConnection;
+ this.minInflight = builder.connectionsPerEndpoint * (long) min(16, builder.maxStreamsPerConnection);
+ this.targetX10 = new AtomicLong(10 * maxInflight); // 10x the actual value to allow for smaller updates.
+ }
+
+ @Override
+ public void sent(long inflight, CompletableFuture<HttpResponse> vessel) { }
+
+ @Override
+ public void success() {
+ targetX10.incrementAndGet();
+ }
+
+ @Override
+ public void throttled(long inflight) {
+ targetX10.set(max(inflight * 5, minInflight * 10));
+ }
+
+ @Override
+ public long targetInflight() {
+ return min(maxInflight, targetX10.get() / 10);
+ }
+
+}
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/package-info.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/package-info.java
new file mode 100644
index 00000000000..e058b9b921e
--- /dev/null
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/package-info.java
@@ -0,0 +1,9 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+/**
+ * @author bjorncs
+ */
+
+@PublicApi
+package ai.vespa.feed.client;
+
+import com.yahoo.api.annotations.PublicApi; \ No newline at end of file
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/GracePeriodCircuitBreakerTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/GracePeriodCircuitBreakerTest.java
new file mode 100644
index 00000000000..9b30ebfd0aa
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/GracePeriodCircuitBreakerTest.java
@@ -0,0 +1,60 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import ai.vespa.feed.client.FeedClient.CircuitBreaker;
+import org.junit.jupiter.api.Test;
+
+import java.time.Duration;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.CLOSED;
+import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.HALF_OPEN;
+import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.OPEN;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * @author jonmv
+ */
+class GracePeriodCircuitBreakerTest {
+
+ @Test
+ void testCircuitBreaker() {
+ AtomicLong now = new AtomicLong(0);
+ long SECOND = 1000;
+ CircuitBreaker breaker = new GracePeriodCircuitBreaker(now::get, Duration.ofSeconds(1), Duration.ofMinutes(1));
+
+ assertEquals(CLOSED, breaker.state(), "Initial state is closed");
+
+ now.addAndGet(100 * SECOND);
+ assertEquals(CLOSED, breaker.state(), "State is closed after some time without activity");
+
+ breaker.success();
+ assertEquals(CLOSED, breaker.state(), "State is closed after a success");
+
+ now.addAndGet(100 * SECOND);
+ assertEquals(CLOSED, breaker.state(), "State is closed some time after a success");
+
+ breaker.failure();
+ assertEquals(CLOSED, breaker.state(), "State is closed right after a failure");
+
+ now.addAndGet(SECOND);
+ assertEquals(CLOSED, breaker.state(), "State is closed until grace period has passed");
+
+ now.addAndGet(1);
+ assertEquals(HALF_OPEN, breaker.state(), "State is half-open when grace period has passed");
+
+ breaker.success();
+ assertEquals(CLOSED, breaker.state(), "State is closed after a new success");
+
+ breaker.failure();
+ now.addAndGet(60 * SECOND);
+ assertEquals(HALF_OPEN, breaker.state(), "State is half-open until doom period has passed");
+
+ now.addAndGet(1);
+ assertEquals(OPEN, breaker.state(), "State is open when doom period has passed");
+
+ breaker.success();
+ assertEquals(OPEN, breaker.state(), "State remains open in spite of new successes");
+ }
+
+}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpFeedClientTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpFeedClientTest.java
new file mode 100644
index 00000000000..d8090549420
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpFeedClientTest.java
@@ -0,0 +1,101 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import org.junit.jupiter.api.Test;
+
+import java.net.URI;
+import java.time.Duration;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BiFunction;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+/**
+ * @author jonmv
+ */
+class HttpFeedClientTest {
+
+ @Test
+ void testFeeding() throws ExecutionException, InterruptedException {
+ DocumentId id = DocumentId.of("ns", "type", "0");
+ AtomicReference<BiFunction<DocumentId, HttpRequest, CompletableFuture<HttpResponse>>> dispatch = new AtomicReference<>();
+ class MockRequestStrategy implements RequestStrategy {
+ @Override public OperationStats stats() { throw new UnsupportedOperationException(); }
+ @Override public FeedClient.CircuitBreaker.State circuitBreakerState() { return FeedClient.CircuitBreaker.State.CLOSED; }
+ @Override public void destroy() { throw new UnsupportedOperationException(); }
+ @Override public void await() { throw new UnsupportedOperationException(); }
+ @Override public CompletableFuture<HttpResponse> enqueue(DocumentId documentId, HttpRequest request) { return dispatch.get().apply(documentId, request); }
+ }
+ FeedClient client = new HttpFeedClient(FeedClientBuilder.create(URI.create("https://dummy:123")), new MockRequestStrategy());
+
+ // Vespa error is an error result.
+ dispatch.set((documentId, request) -> {
+ try {
+ assertEquals(id, documentId);
+ assertEquals("/document/v1/ns/type/docid/0?create=true&condition=false&timeout=5000ms&route=route",
+ request.path());
+ assertEquals("json", new String(request.body(), UTF_8));
+
+ HttpResponse response = HttpResponse.of(502,
+ ("{\n" +
+ " \"pathId\": \"/document/v1/ns/type/docid/0\",\n" +
+ " \"id\": \"id:ns:type::0\",\n" +
+ " \"message\": \"Ooops! ... I did it again.\",\n" +
+ " \"trace\": \"I played with your heart. Got lost in the game.\"\n" +
+ "}").getBytes(UTF_8));
+ return CompletableFuture.completedFuture(response);
+ }
+ catch (Throwable thrown) {
+ CompletableFuture<HttpResponse> failed = new CompletableFuture<>();
+ failed.completeExceptionally(thrown);
+ return failed;
+ }
+ });
+ Result result = client.put(id,
+ "json",
+ OperationParameters.empty()
+ .createIfNonExistent(true)
+ .testAndSetCondition("false")
+ .route("route")
+ .timeout(Duration.ofSeconds(5)))
+ .get();
+ assertEquals("Ooops! ... I did it again.", result.resultMessage().get());
+ assertEquals("I played with your heart. Got lost in the game.", result.traceMessage().get());
+
+
+ // Handler error is a FeedException.
+ dispatch.set((documentId, request) -> {
+ try {
+ assertEquals(id, documentId);
+ assertEquals("/document/v1/ns/type/docid/0",
+ request.path());
+ assertEquals("json", new String(request.body(), UTF_8));
+
+ HttpResponse response = HttpResponse.of(500,
+ ("{\n" +
+ " \"pathId\": \"/document/v1/ns/type/docid/0\",\n" +
+ " \"id\": \"id:ns:type::0\",\n" +
+ " \"message\": \"Alla ska i jorden.\",\n" +
+ " \"trace\": \"Din tid den kom, och senn så for den. \"\n" +
+ "}").getBytes(UTF_8));
+ return CompletableFuture.completedFuture(response);
+ }
+ catch (Throwable thrown) {
+ CompletableFuture<HttpResponse> failed = new CompletableFuture<>();
+ failed.completeExceptionally(thrown);
+ return failed;
+ }
+ });
+ ExecutionException expected = assertThrows(ExecutionException.class,
+ () -> client.put(id,
+ "json",
+ OperationParameters.empty())
+ .get());
+ assertEquals("Status 500 executing 'POST /document/v1/ns/type/docid/0': Alla ska i jorden.", expected.getCause().getMessage());
+ }
+
+}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpRequestStrategyTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpRequestStrategyTest.java
new file mode 100644
index 00000000000..21ab6889e6e
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/HttpRequestStrategyTest.java
@@ -0,0 +1,203 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import ai.vespa.feed.client.FeedClient.CircuitBreaker;
+import org.apache.hc.core5.http.ContentType;
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.net.URI;
+import java.time.Duration;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.BiConsumer;
+
+import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.CLOSED;
+import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.HALF_OPEN;
+import static ai.vespa.feed.client.FeedClient.CircuitBreaker.State.OPEN;
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+class HttpRequestStrategyTest {
+
+ @Test
+ void testConcurrency() {
+ int documents = 1 << 16;
+ HttpRequest request = new HttpRequest("PUT", "/", null, null);
+ HttpResponse response = HttpResponse.of(200, "{}".getBytes(UTF_8));
+ ScheduledExecutorService executor = Executors.newScheduledThreadPool(1);
+ Cluster cluster = new BenchmarkingCluster((__, vessel) -> executor.schedule(() -> vessel.complete(response), (int) (Math.random() * 2 * 10), TimeUnit.MILLISECONDS));
+
+ HttpRequestStrategy strategy = new HttpRequestStrategy(FeedClientBuilder.create(URI.create("https://dummy.com:123"))
+ .setConnectionsPerEndpoint(1 << 10)
+ .setMaxStreamPerConnection(1 << 12),
+ cluster);
+ CountDownLatch latch = new CountDownLatch(1);
+ new Thread(() -> {
+ try {
+ while ( ! latch.await(1, TimeUnit.SECONDS)) {
+ System.err.println(cluster.stats().inflight());
+ System.err.println(strategy.throttler.targetInflight());
+ System.err.println();
+ }
+ }
+ catch (InterruptedException ignored) { }
+ }).start();
+ long startNanos = System.nanoTime();
+ for (int i = 0; i < documents; i++)
+ strategy.enqueue(DocumentId.of("ns", "type", Integer.toString(i)), request);
+
+ strategy.await();
+ latch.countDown();
+ executor.shutdown();
+ cluster.close();
+ OperationStats stats = cluster.stats();
+ long successes = stats.responsesByCode().get(200);
+ System.err.println(successes + " successes in " + (System.nanoTime() - startNanos) * 1e-9 + " seconds");
+ System.err.println(stats);
+
+ assertEquals(documents, stats.requests());
+ assertEquals(documents, stats.responses());
+ assertEquals(documents, stats.responsesByCode().get(200));
+ assertEquals(0, stats.inflight());
+ assertEquals(0, stats.exceptions());
+ assertEquals(0, stats.bytesSent());
+ assertEquals(2 * documents, stats.bytesReceived());
+ }
+
+ @Test
+ void testLogic() throws ExecutionException, InterruptedException {
+ int minStreams = 16; // Hard limit for minimum number of streams per connection.
+ MockCluster cluster = new MockCluster();
+ AtomicLong now = new AtomicLong(0);
+ CircuitBreaker breaker = new GracePeriodCircuitBreaker(now::get, Duration.ofSeconds(1), Duration.ofMinutes(10));
+ HttpRequestStrategy strategy = new HttpRequestStrategy(FeedClientBuilder.create(URI.create("https://dummy.com:123"))
+ .setRetryStrategy(new FeedClient.RetryStrategy() {
+ @Override public boolean retry(FeedClient.OperationType type) { return type == FeedClient.OperationType.PUT; }
+ @Override public int retries() { return 1; }
+ })
+ .setCircuitBreaker(breaker)
+ .setConnectionsPerEndpoint(1)
+ .setMaxStreamPerConnection(minStreams),
+ new BenchmarkingCluster(cluster));
+
+ DocumentId id1 = DocumentId.of("ns", "type", "1");
+ DocumentId id2 = DocumentId.of("ns", "type", "2");
+ HttpRequest request = new HttpRequest("POST", "/", null, null);
+
+ // Runtime exception is not retried.
+ cluster.expect((__, vessel) -> vessel.completeExceptionally(new FeedException("boom")));
+ ExecutionException expected = assertThrows(ExecutionException.class,
+ () -> strategy.enqueue(id1, request).get());
+ assertEquals("boom", expected.getCause().getMessage());
+ assertEquals(1, strategy.stats().requests());
+
+ // IOException is retried.
+ cluster.expect((__, vessel) -> vessel.completeExceptionally(new IOException("retry me")));
+ expected = assertThrows(ExecutionException.class,
+ () -> strategy.enqueue(id1, request).get());
+ assertEquals("retry me", expected.getCause().getCause().getMessage());
+ assertEquals(3, strategy.stats().requests());
+
+ // Successful response is returned
+ HttpResponse success = HttpResponse.of(200, null);
+ cluster.expect((__, vessel) -> vessel.complete(success));
+ assertEquals(success, strategy.enqueue(id1, request).get());
+ assertEquals(4, strategy.stats().requests());
+
+ // Throttled requests are retried. Concurrent operations to same ID (only) are serialised.
+ now.set(2000);
+ HttpResponse throttled = HttpResponse.of(429, null);
+ AtomicInteger count = new AtomicInteger(3);
+ CountDownLatch latch = new CountDownLatch(1);
+ AtomicReference<CompletableFuture<HttpResponse>> completion = new AtomicReference<>();
+ cluster.expect((req, vessel) -> {
+ if (req == request) {
+ if (count.decrementAndGet() > 0)
+ vessel.complete(throttled);
+ else {
+ completion.set(vessel);
+ latch.countDown();
+ }
+ }
+ else vessel.complete(success);
+ });
+ CompletableFuture<HttpResponse> delayed = strategy.enqueue(id1, request);
+ CompletableFuture<HttpResponse> serialised = strategy.enqueue(id1, new HttpRequest("PUT", "/", null, null));
+ assertEquals(success, strategy.enqueue(id2, new HttpRequest("DELETE", "/", null, null)).get());
+ latch.await();
+ assertEquals(8, strategy.stats().requests()); // 3 attempts at throttled and one at id2.
+ now.set(4000);
+ assertEquals(CLOSED, breaker.state()); // Circuit not broken due to throttled requests.
+ completion.get().complete(success);
+ assertEquals(success, delayed.get());
+ assertEquals(success, serialised.get());
+
+ // Some error responses are retried.
+ HttpResponse serverError = HttpResponse.of(500, null);
+ cluster.expect((__, vessel) -> vessel.complete(serverError));
+ assertEquals(serverError, strategy.enqueue(id1, request).get());
+ assertEquals(11, strategy.stats().requests());
+ assertEquals(CLOSED, breaker.state()); // Circuit not broken due to throttled requests.
+
+ // Error responses are not retried when not of appropriate type.
+ cluster.expect((__, vessel) -> vessel.complete(serverError));
+ assertEquals(serverError, strategy.enqueue(id1, new HttpRequest("PUT", "/", null, null)).get());
+ assertEquals(12, strategy.stats().requests());
+
+ // Some error responses are not retried.
+ HttpResponse badRequest = HttpResponse.of(400, null);
+ cluster.expect((__, vessel) -> vessel.complete(badRequest));
+ assertEquals(badRequest, strategy.enqueue(id1, request).get());
+ assertEquals(13, strategy.stats().requests());
+
+ // Circuit breaker opens some time after starting to fail.
+ now.set(6000);
+ assertEquals(HALF_OPEN, breaker.state()); // Circuit broken due to failed requests.
+ now.set(605000);
+ assertEquals(OPEN, breaker.state()); // Circuit broken due to failed requests.
+
+ Map<Integer, Long> codes = new HashMap<>();
+ codes.put(200, 4L);
+ codes.put(400, 1L);
+ codes.put(429, 2L);
+ codes.put(500, 3L);
+ assertEquals(codes, strategy.stats().responsesByCode());
+ assertEquals(3, strategy.stats().exceptions());
+ }
+
+ static class MockCluster implements Cluster {
+
+ final AtomicReference<BiConsumer<HttpRequest, CompletableFuture<HttpResponse>>> dispatch = new AtomicReference<>();
+
+ void expect(BiConsumer<HttpRequest, CompletableFuture<HttpResponse>> expected) {
+ dispatch.set(expected);
+ }
+
+ @Override
+ public void dispatch(HttpRequest request, CompletableFuture<HttpResponse> vessel) {
+ dispatch.get().accept(request, vessel);
+ }
+
+ @Override
+ public void close() { }
+
+ @Override
+ public OperationStats stats() {
+ return null;
+ }
+
+ }
+
+}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java
new file mode 100644
index 00000000000..3e0f886a40a
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonFeederTest.java
@@ -0,0 +1,124 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import org.junit.jupiter.api.Test;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.stream.IntStream;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static java.util.stream.Collectors.joining;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+class JsonFeederTest {
+
+ @Test
+ void test() throws IOException {
+ int docs = 1 << 14;
+ String json = "[\n" +
+
+ IntStream.range(0, docs).mapToObj(i ->
+ " {\n" +
+ " \"id\": \"id:ns:type::abc" + i + "\",\n" +
+ " \"fields\": {\n" +
+ " \"lul\":\"lal\"\n" +
+ " }\n" +
+ " },\n"
+ ).collect(joining()) +
+
+ " {\n" +
+ " \"id\": \"id:ns:type::abc" + docs + "\",\n" +
+ " \"fields\": {\n" +
+ " \"lul\":\"lal\"\n" +
+ " }\n" +
+ " }\n" +
+ "]";
+ AtomicReference<FeedException> exceptionThrow = new AtomicReference<>();
+ Path tmpFile = Files.createTempFile(null, null);
+ Files.write(tmpFile, json.getBytes(UTF_8));
+ try (InputStream in = Files.newInputStream(tmpFile, StandardOpenOption.READ, StandardOpenOption.DELETE_ON_CLOSE)) {
+ AtomicInteger resultsReceived = new AtomicInteger();
+ AtomicBoolean completedSuccessfully = new AtomicBoolean();
+ long startNanos = System.nanoTime();
+ SimpleClient feedClient = new SimpleClient();
+ JsonFeeder.builder(feedClient).build()
+ .feedMany(in, 1 << 7,
+ new JsonFeeder.ResultCallback() { // TODO: hangs when buffer is smaller than largest document
+ @Override
+ public void onNextResult(Result result, FeedException error) { resultsReceived.incrementAndGet(); }
+
+ @Override
+ public void onError(FeedException error) { exceptionThrow.set(error); }
+
+ @Override
+ public void onComplete() { completedSuccessfully.set(true); }
+ })
+ .join();
+
+ System.err.println((json.length() / 1048576.0) + " MB in " + (System.nanoTime() - startNanos) * 1e-9 + " seconds");
+ assertEquals(docs + 1, feedClient.ids.size());
+ assertEquals(docs + 1, resultsReceived.get());
+ assertTrue(completedSuccessfully.get());
+ assertNull(exceptionThrow.get());
+ }
+ }
+
+ @Test
+ public void singleJsonOperationIsDispatchedToFeedClient() throws IOException, ExecutionException, InterruptedException {
+ try (JsonFeeder feeder = JsonFeeder.builder(new SimpleClient()).build()) {
+ String json = "{\"put\": \"id:ns:type::abc1\",\n" +
+ " \"fields\": {\n" +
+ " \"lul\":\"lal\"\n" +
+ " }\n" +
+ " }\n";
+ Result result = feeder.feedSingle(json).get();
+ assertEquals(DocumentId.of("id:ns:type::abc1"), result.documentId());
+ assertEquals(Result.Type.success, result.type());
+ assertEquals("success", result.resultMessage().get());
+ }
+ }
+
+ private static class SimpleClient implements FeedClient {
+ final Set<String> ids = new HashSet<>();
+
+ @Override
+ public CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params) {
+ ids.add(documentId.userSpecific());
+ return createSuccessResult(documentId);
+ }
+
+ @Override
+ public CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params) {
+ return createSuccessResult(documentId);
+ }
+
+ @Override
+ public CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params) {
+ return createSuccessResult(documentId);
+ }
+
+ @Override
+ public OperationStats stats() { return null; }
+
+ @Override
+ public void close(boolean graceful) { }
+
+ private CompletableFuture<Result> createSuccessResult(DocumentId documentId) {
+ return CompletableFuture.completedFuture(new Result(Result.Type.success, documentId, "success", null));
+ }
+ }
+
+}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonStreamFeederTest.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonStreamFeederTest.java
deleted file mode 100644
index 8ef8ae57f5e..00000000000
--- a/vespa-feed-client/src/test/java/ai/vespa/feed/client/JsonStreamFeederTest.java
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.feed.client;
-
-import org.junit.jupiter.api.Test;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.util.Set;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ConcurrentSkipListSet;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
-class JsonStreamFeederTest {
-
- @Test
- void test() throws IOException {
- int docs = 1 << 10;
- String json = "[\n" +
-
- IntStream.range(0, docs).mapToObj(i ->
- " {\n" +
- " \"id\": \"id:ns:type::abc" + i + "\",\n" +
- " \"fields\": {\n" +
- " \"lul\":\"lal\"\n" +
- " }\n" +
- " },\n"
- ).collect(Collectors.joining()) +
-
- " {\n" +
- " \"id\": \"id:ns:type::abc" + docs + "\",\n" +
- " \"fields\": {\n" +
- " \"lul\":\"lal\"\n" +
- " }\n" +
- " }\n" +
- "]";
- ByteArrayInputStream in = new ByteArrayInputStream(json.getBytes(UTF_8));
- Set<String> ids = new ConcurrentSkipListSet<>();
- JsonStreamFeeder.builder(new FeedClient() {
- @Override
- public CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params) {
- ids.add(documentId.userSpecific());
- return new CompletableFuture<>();
- }
-
- @Override
- public CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params) {
- return new CompletableFuture<>();
- }
-
- @Override
- public CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params) {
- return new CompletableFuture<>();
- }
-
- @Override
- public void close() throws IOException {
-
- }
- }).build().feed(in, 1 << 7, false); // TODO: hangs on 1 << 6.
- assertEquals(docs + 1, ids.size());
- }
-
-}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java
new file mode 100644
index 00000000000..1e616f2625a
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonFileFeederExample.java
@@ -0,0 +1,92 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client.examples;
+
+import ai.vespa.feed.client.DocumentId;
+import ai.vespa.feed.client.FeedClient;
+import ai.vespa.feed.client.FeedClientBuilder;
+import ai.vespa.feed.client.FeedException;
+import ai.vespa.feed.client.JsonFeeder;
+import ai.vespa.feed.client.Result;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.time.Duration;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.logging.Logger;
+
+/**
+ * Sample feeder demonstrating how to programmatically feed to a Vespa cluster.
+ */
+class JsonFileFeederExample implements Closeable {
+
+ private final static Logger log = Logger.getLogger(JsonFileFeederExample.class.getName());
+
+ private final JsonFeeder jsonFeeder;
+ private final URI endpoint;
+
+ static class ResultCallBack implements JsonFeeder.ResultCallback {
+
+ final AtomicInteger resultsReceived = new AtomicInteger(0);
+ final AtomicInteger errorsReceived = new AtomicInteger(0);
+ final long startTimeMillis = System.currentTimeMillis();;
+
+ @Override
+ public void onNextResult(Result result, FeedException error) {
+ resultsReceived.incrementAndGet();
+ if (error != null) {
+ log.warning("Problems with feeding document "
+ + error.documentId().map(DocumentId::toString).orElse("<unknown>"));
+ errorsReceived.incrementAndGet();
+ } else if (result.type() == Result.Type.failure) {
+ log.warning("Problems with docID " + result.documentId() + ":" + error);
+ errorsReceived.incrementAndGet();
+ }
+ }
+
+ @Override
+ public void onError(FeedException error) {
+ log.severe("Feeding failed for d: " + error.getMessage());
+ }
+
+ @Override
+ public void onComplete() {
+ log.info("Feeding completed");
+ }
+
+ void dumpStatsToLog() {
+ log.info("Received in total " + resultsReceived.get() + ", " + errorsReceived.get() + " errors.");
+ log.info("Time spent receiving is " + (System.currentTimeMillis() - startTimeMillis) + " ms.");
+ }
+
+ }
+
+ JsonFileFeederExample(URI endpoint) {
+ this.endpoint = endpoint;
+ FeedClient feedClient = FeedClientBuilder.create(endpoint)
+ .build();
+ this.jsonFeeder = JsonFeeder.builder(feedClient)
+ .withTimeout(Duration.ofSeconds(30))
+ .build();
+ }
+
+ /**
+ * Feed all operations from a stream.
+ *
+ * @param stream The input stream to read operations from (JSON array containing one or more document operations).
+ */
+ void batchFeed(InputStream stream, String batchId) {
+ ResultCallBack callback = new ResultCallBack();
+ log.info("Starting feed to " + endpoint + " for batch '" + batchId + "'");
+ CompletableFuture<Void> promise = jsonFeeder.feedMany(stream, callback);
+ promise.join(); // wait for feeding to complete
+ callback.dumpStatsToLog();
+ }
+
+ @Override
+ public void close() throws IOException {
+ jsonFeeder.close();
+ }
+}
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java
new file mode 100644
index 00000000000..5cee776b244
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/JsonStreamFeederExample.java
@@ -0,0 +1,117 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client.examples;
+
+import ai.vespa.feed.client.DocumentId;
+import ai.vespa.feed.client.FeedClient;
+import ai.vespa.feed.client.FeedClientBuilder;
+import ai.vespa.feed.client.OperationParameters;
+import ai.vespa.feed.client.Result;
+
+import java.net.URI;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * Simple Streaming feeder implementation which will send operations to a Vespa endpoint.
+ * Other threads communicate with the feeder by adding new operations on the BlockingQueue
+ */
+
+class JsonStreamFeederExample extends Thread implements AutoCloseable {
+
+ static class Operation {
+ final String type;
+ final String documentId;
+ final String documentFieldsJson;
+
+ Operation(String type, String id, String fields) {
+ this.type = type;
+ this.documentId = id;
+ this.documentFieldsJson = fields;
+ }
+ }
+
+ private final static Logger log = Logger.getLogger(JsonStreamFeederExample.class.getName());
+
+ private final BlockingQueue<Operation> operations;
+ private final FeedClient feedClient;
+ private final AtomicBoolean drain = new AtomicBoolean(false);
+ private final CountDownLatch finishedDraining = new CountDownLatch(1);
+ private final AtomicInteger resultCounter = new AtomicInteger();
+
+ /**
+ * Constructor
+ * @param operations The shared blocking queue where other threads can put document operations to.
+ * @param endpoint The endpoint to feed to
+ */
+ JsonStreamFeederExample(BlockingQueue<JsonStreamFeederExample.Operation> operations, URI endpoint) {
+ this.operations = operations;
+ this.feedClient = FeedClientBuilder.create(endpoint).build();
+ }
+
+ /**
+ * Shutdown this feeder, waits until operations on queue is drained
+ */
+ @Override
+ public void close() {
+ log.info("Shutdown initiated, awaiting operations queue to be drained. Queue size is " + operations.size());
+ drain.set(true);
+ try {
+ finishedDraining.await();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ @Override
+ public void run() {
+ while (!drain.get() || !operations.isEmpty()) {
+ try {
+ JsonStreamFeederExample.Operation op = operations.poll(1, TimeUnit.SECONDS);
+ if(op == null) // no operations available
+ continue;
+ log.info("Put document " + op.documentId);
+ CompletableFuture<Result> promise;
+ DocumentId docId = DocumentId.of(op.documentId);
+ OperationParameters params = OperationParameters.empty();
+ String json = op.documentFieldsJson;
+ switch (op.type) {
+ case "put":
+ promise = feedClient.put(docId, json, params);
+ break;
+ case "remove":
+ promise = feedClient.remove(docId, params);
+ break;
+ case "update":
+ promise = feedClient.update(docId, json, params);
+ break;
+ default:
+ throw new IllegalArgumentException("Invalid operation: " + op.type);
+ }
+ promise.whenComplete((result, throwable) -> {
+ if (resultCounter.getAndIncrement() % 10 == 0) {
+ System.err.println(feedClient.stats());
+ }
+ if (throwable != null) {
+ System.err.printf("Failure for '%s': %s", docId, throwable);
+ throwable.printStackTrace();
+ } else if (result.type() == Result.Type.failure) {
+ System.err.printf("Failure for '%s': %s", docId, result.resultMessage().orElse("<no messsage>"));
+ }
+ });
+ } catch (InterruptedException e) {
+ log.log(Level.SEVERE, "Got interrupt exception.", e);
+ break;
+ }
+ }
+ log.info("Shutting down feeding thread");
+ this.feedClient.close();
+ finishedDraining.countDown();
+ }
+
+} \ No newline at end of file
diff --git a/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java
new file mode 100644
index 00000000000..5ece9051e41
--- /dev/null
+++ b/vespa-feed-client/src/test/java/ai/vespa/feed/client/examples/SimpleExample.java
@@ -0,0 +1,34 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client.examples;
+
+import ai.vespa.feed.client.DocumentId;
+import ai.vespa.feed.client.FeedClient;
+import ai.vespa.feed.client.FeedClientBuilder;
+import ai.vespa.feed.client.OperationParameters;
+import ai.vespa.feed.client.Result;
+
+import java.net.URI;
+import java.time.Duration;
+import java.util.concurrent.CompletableFuture;
+
+class SimpleExample {
+
+ public static void main(String[] args) {
+ try (FeedClient client = FeedClientBuilder.create(URI.create("https://my-container-endpoint-with-http2:8080/")).build()) {
+ DocumentId id = DocumentId.of("namespace", "documenttype", "1");
+ String json = "{\"fields\": {\"title\": \"hello world\"}}";
+ OperationParameters params = OperationParameters.empty()
+ .timeout(Duration.ofSeconds(5))
+ .route("myvesparoute");
+ CompletableFuture<Result> promise = client.put(id, json, params);
+ promise.whenComplete(((result, throwable) -> {
+ if (throwable != null) {
+ throwable.printStackTrace();
+ } else {
+ System.out.printf("'%s' for document '%s': %s%n", result.type(), result.documentId(), result.resultMessage());
+ }
+ }));
+ }
+ }
+
+}
diff --git a/vespa-hadoop/pom.xml b/vespa-hadoop/pom.xml
index 382c28dc884..39f10d84f9b 100644
--- a/vespa-hadoop/pom.xml
+++ b/vespa-hadoop/pom.xml
@@ -101,17 +101,22 @@
<scope>test</scope>
</dependency>
<dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
-
<!-- Vespa feeding dependencies -->
<dependency>
<groupId>com.yahoo.vespa</groupId>
<artifactId>vespa-http-client</artifactId>
<version>${project.version}</version>
</dependency>
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>vespa-feed-client</artifactId>
+ <version>${project.version}</version>
+ <scope>compile</scope>
+ </dependency>
</dependencies>
diff --git a/vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java b/vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java
new file mode 100644
index 00000000000..5974a8df271
--- /dev/null
+++ b/vespa-hadoop/src/main/java/ai/vespa/feed/client/DryrunResult.java
@@ -0,0 +1,18 @@
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package ai.vespa.feed.client;
+
+import ai.vespa.feed.client.Result.Type;
+
+/**
+ * Workaround for package-private {@link Result} constructor.
+ *
+ * @author bjorncs
+ */
+public class DryrunResult {
+
+ private DryrunResult() {}
+
+ public static Result create(Type type, DocumentId documentId, String resultMessage, String traceMessage) {
+ return new Result(type, documentId, resultMessage, traceMessage);
+ }
+}
diff --git a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/LegacyVespaRecordWriter.java b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/LegacyVespaRecordWriter.java
new file mode 100644
index 00000000000..b716c55beb5
--- /dev/null
+++ b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/LegacyVespaRecordWriter.java
@@ -0,0 +1,235 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hadoop.mapreduce;
+
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonParseException;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonToken;
+import com.yahoo.vespa.hadoop.mapreduce.util.VespaConfiguration;
+import com.yahoo.vespa.hadoop.mapreduce.util.VespaCounters;
+import com.yahoo.vespa.hadoop.pig.VespaDocumentOperation;
+import com.yahoo.vespa.http.client.FeedClient;
+import com.yahoo.vespa.http.client.FeedClientFactory;
+import com.yahoo.vespa.http.client.Result;
+import com.yahoo.vespa.http.client.config.Cluster;
+import com.yahoo.vespa.http.client.config.ConnectionParams;
+import com.yahoo.vespa.http.client.config.Endpoint;
+import com.yahoo.vespa.http.client.config.FeedParams;
+import com.yahoo.vespa.http.client.config.FeedParams.DataFormat;
+import com.yahoo.vespa.http.client.config.SessionParams;
+import org.apache.hadoop.mapreduce.RecordWriter;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+
+import javax.xml.namespace.QName;
+import javax.xml.stream.FactoryConfigurationError;
+import javax.xml.stream.XMLEventReader;
+import javax.xml.stream.XMLInputFactory;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.stream.events.StartElement;
+import javax.xml.stream.events.XMLEvent;
+import java.io.IOException;
+import java.io.StringReader;
+import java.time.Duration;
+import java.util.List;
+import java.util.StringTokenizer;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.logging.Logger;
+
+/**
+ * {@link LegacyVespaRecordWriter} sends the output &lt;key, value&gt; to one or more Vespa endpoints using vespa-http-client.
+ *
+ * @author lesters
+ */
+@SuppressWarnings("rawtypes")
+public class LegacyVespaRecordWriter extends RecordWriter {
+
+ private final static Logger log = Logger.getLogger(LegacyVespaRecordWriter.class.getCanonicalName());
+
+ private boolean initialized = false;
+ private FeedClient feedClient;
+ private final VespaCounters counters;
+ private final int progressInterval;
+
+ final VespaConfiguration configuration;
+
+ LegacyVespaRecordWriter(VespaConfiguration configuration, VespaCounters counters) {
+ this.counters = counters;
+ this.configuration = configuration;
+ this.progressInterval = configuration.progressInterval();
+ }
+
+
+ @Override
+ public void write(Object key, Object data) throws IOException, InterruptedException {
+ if (!initialized) {
+ initialize();
+ }
+
+ String doc = data.toString().trim();
+
+ // Parse data to find document id - if none found, skip this write
+ String docId = DataFormat.JSON_UTF8.equals(configuration.dataFormat()) ? findDocId(doc)
+ : findDocIdFromXml(doc);
+ if (docId != null && docId.length() >= 0) {
+ feedClient.stream(docId, doc);
+ counters.incrementDocumentsSent(1);
+ } else {
+ counters.incrementDocumentsSkipped(1);
+ }
+
+ if (counters.getDocumentsSent() % progressInterval == 0) {
+ String progress = String.format("Feed progress: %d / %d / %d / %d (sent, ok, failed, skipped)",
+ counters.getDocumentsSent(),
+ counters.getDocumentsOk(),
+ counters.getDocumentsFailed(),
+ counters.getDocumentsSkipped());
+ log.info(progress);
+ }
+
+ }
+
+
+ @Override
+ public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
+ if (feedClient != null) {
+ feedClient.close();
+ }
+ }
+
+ protected ConnectionParams.Builder configureConnectionParams() {
+ ConnectionParams.Builder connParamsBuilder = new ConnectionParams.Builder();
+ connParamsBuilder.setDryRun(configuration.dryrun());
+ connParamsBuilder.setUseCompression(configuration.useCompression());
+ connParamsBuilder.setNumPersistentConnectionsPerEndpoint(configuration.numConnections());
+ connParamsBuilder.setMaxRetries(configuration.numRetries());
+ if (configuration.proxyHost() != null) {
+ connParamsBuilder.setProxyHost(configuration.proxyHost());
+ }
+ if (configuration.proxyPort() >= 0) {
+ connParamsBuilder.setProxyPort(configuration.proxyPort());
+ }
+ return connParamsBuilder;
+ }
+
+ protected FeedParams.Builder configureFeedParams() {
+ FeedParams.Builder feedParamsBuilder = new FeedParams.Builder();
+ feedParamsBuilder.setDataFormat(configuration.dataFormat());
+ feedParamsBuilder.setRoute(configuration.route());
+ feedParamsBuilder.setMaxSleepTimeMs(configuration.maxSleepTimeMs());
+ feedParamsBuilder.setMaxInFlightRequests(configuration.maxInFlightRequests());
+ feedParamsBuilder.setLocalQueueTimeOut(Duration.ofMinutes(10).toMillis());
+ return feedParamsBuilder;
+ }
+
+ protected SessionParams.Builder configureSessionParams() {
+ SessionParams.Builder sessionParamsBuilder = new SessionParams.Builder();
+ sessionParamsBuilder.setThrottlerMinSize(configuration.throttlerMinSize());
+ sessionParamsBuilder.setClientQueueSize(configuration.maxInFlightRequests()*2);
+ return sessionParamsBuilder;
+ }
+
+ private void initialize() {
+ if (!configuration.dryrun() && configuration.randomStartupSleepMs() > 0) {
+ int delay = ThreadLocalRandom.current().nextInt(configuration.randomStartupSleepMs());
+ log.info("VespaStorage: Delaying startup by " + delay + " ms");
+ try {
+ Thread.sleep(delay);
+ } catch (Exception e) {}
+ }
+
+ ConnectionParams.Builder connParamsBuilder = configureConnectionParams();
+ FeedParams.Builder feedParamsBuilder = configureFeedParams();
+ SessionParams.Builder sessionParams = configureSessionParams();
+
+ sessionParams.setConnectionParams(connParamsBuilder.build());
+ sessionParams.setFeedParams(feedParamsBuilder.build());
+
+ String endpoints = configuration.endpoint();
+ StringTokenizer tokenizer = new StringTokenizer(endpoints, ",");
+ while (tokenizer.hasMoreTokens()) {
+ String endpoint = tokenizer.nextToken().trim();
+ sessionParams.addCluster(new Cluster.Builder().addEndpoint(
+ Endpoint.create(endpoint, configuration.defaultPort(), configuration.useSSL())
+ ).build());
+ }
+
+ ResultCallback resultCallback = new ResultCallback(counters);
+ feedClient = FeedClientFactory.create(sessionParams.build(), resultCallback);
+
+ initialized = true;
+ log.info("VespaStorage configuration:\n" + configuration.toString());
+ log.info(feedClient.getStatsAsJson());
+ }
+
+ private String findDocIdFromXml(String xml) {
+ try {
+ XMLEventReader eventReader = XMLInputFactory.newInstance().createXMLEventReader(new StringReader(xml));
+ while (eventReader.hasNext()) {
+ XMLEvent event = eventReader.nextEvent();
+ if (event.getEventType() == XMLEvent.START_ELEMENT) {
+ StartElement element = event.asStartElement();
+ String elementName = element.getName().getLocalPart();
+ if (VespaDocumentOperation.Operation.valid(elementName)) {
+ return element.getAttributeByName(QName.valueOf("documentid")).getValue();
+ }
+ }
+ }
+ } catch (XMLStreamException | FactoryConfigurationError e) {
+ // as json dude does
+ return null;
+ }
+ return null;
+ }
+
+ private String findDocId(String json) throws IOException {
+ JsonFactory factory = new JsonFactory();
+ try(JsonParser parser = factory.createParser(json)) {
+ if (parser.nextToken() != JsonToken.START_OBJECT) {
+ return null;
+ }
+ while (parser.nextToken() != JsonToken.END_OBJECT) {
+ String fieldName = parser.getCurrentName();
+ parser.nextToken();
+ if (VespaDocumentOperation.Operation.valid(fieldName)) {
+ String docId = parser.getText();
+ return docId;
+ } else {
+ parser.skipChildren();
+ }
+ }
+ } catch (JsonParseException ex) {
+ return null;
+ }
+ return null;
+ }
+
+
+ static class ResultCallback implements FeedClient.ResultCallback {
+ final VespaCounters counters;
+
+ public ResultCallback(VespaCounters counters) {
+ this.counters = counters;
+ }
+
+ @Override
+ public void onCompletion(String docId, Result documentResult) {
+ if (!documentResult.isSuccess()) {
+ counters.incrementDocumentsFailed(1);
+ StringBuilder sb = new StringBuilder();
+ sb.append("Problems with docid ");
+ sb.append(docId);
+ sb.append(": ");
+ List<Result.Detail> details = documentResult.getDetails();
+ for (Result.Detail detail : details) {
+ sb.append(detail.toString());
+ sb.append(" ");
+ }
+ log.warning(sb.toString());
+ return;
+ }
+ counters.incrementDocumentsOk(1);
+ }
+
+ }
+
+}
diff --git a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaOutputFormat.java b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaOutputFormat.java
index bef51e9ae08..97bc7dc838e 100644
--- a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaOutputFormat.java
+++ b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaOutputFormat.java
@@ -10,7 +10,7 @@ import java.util.Properties;
/**
* An output specification for writing to Vespa instances in a Map-Reduce job.
- * Mainly returns an instance of a {@link VespaRecordWriter} that does the
+ * Mainly returns an instance of a {@link LegacyVespaRecordWriter} that does the
* actual feeding to Vespa.
*
* @author lesters
@@ -35,7 +35,9 @@ public class VespaOutputFormat extends OutputFormat {
public RecordWriter getRecordWriter(TaskAttemptContext context) throws IOException, InterruptedException {
VespaCounters counters = VespaCounters.get(context);
VespaConfiguration configuration = VespaConfiguration.get(context.getConfiguration(), configOverride);
- return new VespaRecordWriter(configuration, counters);
+ return configuration.useLegacyClient()
+ ? new LegacyVespaRecordWriter(configuration, counters)
+ : new VespaRecordWriter(configuration, counters);
}
diff --git a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java
index 4cc93bfd538..1c370b14b82 100644
--- a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java
+++ b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java
@@ -1,83 +1,75 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hadoop.mapreduce;
-import com.fasterxml.jackson.core.JsonFactory;
-import com.fasterxml.jackson.core.JsonParseException;
-import com.fasterxml.jackson.core.JsonParser;
-import com.fasterxml.jackson.core.JsonToken;
+import ai.vespa.feed.client.DocumentId;
+import ai.vespa.feed.client.DryrunResult;
+import ai.vespa.feed.client.FeedClient;
+import ai.vespa.feed.client.FeedClientBuilder;
+import ai.vespa.feed.client.JsonFeeder;
+import ai.vespa.feed.client.OperationParseException;
+import ai.vespa.feed.client.OperationParameters;
+import ai.vespa.feed.client.OperationStats;
+import ai.vespa.feed.client.Result;
import com.yahoo.vespa.hadoop.mapreduce.util.VespaConfiguration;
import com.yahoo.vespa.hadoop.mapreduce.util.VespaCounters;
-import com.yahoo.vespa.hadoop.pig.VespaDocumentOperation;
-import com.yahoo.vespa.http.client.FeedClient;
-import com.yahoo.vespa.http.client.FeedClientFactory;
-import com.yahoo.vespa.http.client.Result;
-import com.yahoo.vespa.http.client.config.Cluster;
-import com.yahoo.vespa.http.client.config.ConnectionParams;
-import com.yahoo.vespa.http.client.config.Endpoint;
import com.yahoo.vespa.http.client.config.FeedParams;
-import com.yahoo.vespa.http.client.config.FeedParams.DataFormat;
-import com.yahoo.vespa.http.client.config.SessionParams;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
-import javax.xml.namespace.QName;
-import javax.xml.stream.FactoryConfigurationError;
-import javax.xml.stream.XMLEventReader;
-import javax.xml.stream.XMLInputFactory;
-import javax.xml.stream.XMLStreamException;
-import javax.xml.stream.events.StartElement;
-import javax.xml.stream.events.XMLEvent;
import java.io.IOException;
-import java.io.StringReader;
+import java.net.URI;
import java.time.Duration;
+import java.util.Arrays;
import java.util.List;
-import java.util.StringTokenizer;
+import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ThreadLocalRandom;
+import java.util.logging.Level;
import java.util.logging.Logger;
+import static java.util.stream.Collectors.toList;
+
/**
- * VespaRecordWriter sends the output &lt;key, value&gt; to one or more Vespa endpoints.
+ * {@link VespaRecordWriter} sends the output &lt;key, value&gt; to one or more Vespa endpoints using vespa-feed-client.
*
- * @author lesters
+ * @author bjorncs
*/
-@SuppressWarnings("rawtypes")
-public class VespaRecordWriter extends RecordWriter {
+public class VespaRecordWriter extends RecordWriter<Object, Object> {
private final static Logger log = Logger.getLogger(VespaRecordWriter.class.getCanonicalName());
- private boolean initialized = false;
- private FeedClient feedClient;
private final VespaCounters counters;
- private final int progressInterval;
+ private final VespaConfiguration config;
- final VespaConfiguration configuration;
+ private boolean initialized = false;
+ private JsonFeeder feeder;
- VespaRecordWriter(VespaConfiguration configuration, VespaCounters counters) {
+ protected VespaRecordWriter(VespaConfiguration config, VespaCounters counters) {
this.counters = counters;
- this.configuration = configuration;
- this.progressInterval = configuration.progressInterval();
+ this.config = config;
}
-
@Override
- public void write(Object key, Object data) throws IOException, InterruptedException {
- if (!initialized) {
- initialize();
- }
-
- String doc = data.toString().trim();
-
- // Parse data to find document id - if none found, skip this write
- String docId = DataFormat.JSON_UTF8.equals(configuration.dataFormat()) ? findDocId(doc)
- : findDocIdFromXml(doc);
- if (docId != null && docId.length() >= 0) {
- feedClient.stream(docId, doc);
- counters.incrementDocumentsSent(1);
- } else {
- counters.incrementDocumentsSkipped(1);
- }
-
- if (counters.getDocumentsSent() % progressInterval == 0) {
+ public void write(Object key, Object data) throws IOException {
+ initializeOnFirstWrite();
+ String json = data.toString().trim();
+ feeder.feedSingle(json)
+ .whenComplete((result, error) -> {
+ if (error != null) {
+ if (error instanceof OperationParseException) {
+ counters.incrementDocumentsSkipped(1);
+ } else {
+ String msg = "Failed to feed single document: " + error;
+ System.out.println(msg);
+ System.err.println(msg);
+ log.log(Level.WARNING, msg, error);
+ counters.incrementDocumentsFailed(1);
+ }
+ } else {
+ counters.incrementDocumentsOk(1);
+ }
+ });
+ counters.incrementDocumentsSent(1);
+ if (counters.getDocumentsSent() % config.progressInterval() == 0) {
String progress = String.format("Feed progress: %d / %d / %d / %d (sent, ok, failed, skipped)",
counters.getDocumentsSent(),
counters.getDocumentsOk(),
@@ -85,151 +77,115 @@ public class VespaRecordWriter extends RecordWriter {
counters.getDocumentsSkipped());
log.info(progress);
}
-
}
-
@Override
- public void close(TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
- if (feedClient != null) {
- feedClient.close();
+ public void close(TaskAttemptContext context) throws IOException {
+ if (feeder != null) {
+ feeder.close();
+ feeder = null;
+ initialized = false;
}
}
- protected ConnectionParams.Builder configureConnectionParams() {
- ConnectionParams.Builder connParamsBuilder = new ConnectionParams.Builder();
- connParamsBuilder.setDryRun(configuration.dryrun());
- connParamsBuilder.setUseCompression(configuration.useCompression());
- connParamsBuilder.setNumPersistentConnectionsPerEndpoint(configuration.numConnections());
- connParamsBuilder.setMaxRetries(configuration.numRetries());
- if (configuration.proxyHost() != null) {
- connParamsBuilder.setProxyHost(configuration.proxyHost());
- }
- if (configuration.proxyPort() >= 0) {
- connParamsBuilder.setProxyPort(configuration.proxyPort());
- }
- return connParamsBuilder;
- }
+ /** Override method to alter {@link FeedClient} configuration */
+ protected void onFeedClientInitialization(FeedClientBuilder builder) {}
- protected FeedParams.Builder configureFeedParams() {
- FeedParams.Builder feedParamsBuilder = new FeedParams.Builder();
- feedParamsBuilder.setDataFormat(configuration.dataFormat());
- feedParamsBuilder.setRoute(configuration.route());
- feedParamsBuilder.setMaxSleepTimeMs(configuration.maxSleepTimeMs());
- feedParamsBuilder.setMaxInFlightRequests(configuration.maxInFlightRequests());
- feedParamsBuilder.setLocalQueueTimeOut(Duration.ofMinutes(10).toMillis());
- return feedParamsBuilder;
+ private void initializeOnFirstWrite() {
+ if (initialized) return;
+ validateConfig();
+ useRandomizedStartupDelayIfEnabled();
+ feeder = createJsonStreamFeeder();
+ initialized = true;
}
- protected SessionParams.Builder configureSessionParams() {
- SessionParams.Builder sessionParamsBuilder = new SessionParams.Builder();
- sessionParamsBuilder.setThrottlerMinSize(configuration.throttlerMinSize());
- sessionParamsBuilder.setClientQueueSize(configuration.maxInFlightRequests()*2);
- return sessionParamsBuilder;
+ private void validateConfig() {
+ if (!config.useSSL()) {
+ throw new IllegalArgumentException("SSL is required for this feed client implementation");
+ }
+ if (config.dataFormat() != FeedParams.DataFormat.JSON_UTF8) {
+ throw new IllegalArgumentException("Only JSON is support by this feed client implementation");
+ }
+ if (config.proxyHost() != null) {
+ log.warning(String.format("Ignoring proxy config (host='%s', port=%d)", config.proxyHost(), config.proxyPort()));
+ }
}
-
- private void initialize() {
- if (!configuration.dryrun() && configuration.randomStartupSleepMs() > 0) {
- int delay = ThreadLocalRandom.current().nextInt(configuration.randomStartupSleepMs());
- log.info("VespaStorage: Delaying startup by " + delay + " ms");
+
+ private void useRandomizedStartupDelayIfEnabled() {
+ if (!config.dryrun() && config.randomStartupSleepMs() > 0) {
+ int delay = ThreadLocalRandom.current().nextInt(config.randomStartupSleepMs());
+ log.info("Delaying startup by " + delay + " ms");
try {
Thread.sleep(delay);
} catch (Exception e) {}
}
+ }
- ConnectionParams.Builder connParamsBuilder = configureConnectionParams();
- FeedParams.Builder feedParamsBuilder = configureFeedParams();
- SessionParams.Builder sessionParams = configureSessionParams();
-
- sessionParams.setConnectionParams(connParamsBuilder.build());
- sessionParams.setFeedParams(feedParamsBuilder.build());
- String endpoints = configuration.endpoint();
- StringTokenizer tokenizer = new StringTokenizer(endpoints, ",");
- while (tokenizer.hasMoreTokens()) {
- String endpoint = tokenizer.nextToken().trim();
- sessionParams.addCluster(new Cluster.Builder().addEndpoint(
- Endpoint.create(endpoint, configuration.defaultPort(), configuration.useSSL())
- ).build());
+ private JsonFeeder createJsonStreamFeeder() {
+ FeedClient feedClient = createFeedClient();
+ JsonFeeder.Builder builder = JsonFeeder.builder(feedClient)
+ .withTimeout(Duration.ofMinutes(10));
+ if (config.route() != null) {
+ builder.withRoute(config.route());
}
+ return builder.build();
- ResultCallback resultCallback = new ResultCallback(counters);
- feedClient = FeedClientFactory.create(sessionParams.build(), resultCallback);
-
- initialized = true;
- log.info("VespaStorage configuration:\n" + configuration.toString());
- log.info(feedClient.getStatsAsJson());
}
- private String findDocIdFromXml(String xml) {
- try {
- XMLEventReader eventReader = XMLInputFactory.newInstance().createXMLEventReader(new StringReader(xml));
- while (eventReader.hasNext()) {
- XMLEvent event = eventReader.nextEvent();
- if (event.getEventType() == XMLEvent.START_ELEMENT) {
- StartElement element = event.asStartElement();
- String elementName = element.getName().getLocalPart();
- if (VespaDocumentOperation.Operation.valid(elementName)) {
- return element.getAttributeByName(QName.valueOf("documentid")).getValue();
- }
- }
- }
- } catch (XMLStreamException | FactoryConfigurationError e) {
- // as json dude does
- return null;
+ private FeedClient createFeedClient() {
+ if (config.dryrun()) {
+ return new DryrunClient();
+ } else {
+ FeedClientBuilder feedClientBuilder = FeedClientBuilder.create(endpointUris(config))
+ .setConnectionsPerEndpoint(config.numConnections())
+ .setMaxStreamPerConnection(streamsPerConnection(config))
+ .setRetryStrategy(retryStrategy(config));
+
+ onFeedClientInitialization(feedClientBuilder);
+ return feedClientBuilder.build();
}
- return null;
}
-
- private String findDocId(String json) throws IOException {
- JsonFactory factory = new JsonFactory();
- try(JsonParser parser = factory.createParser(json)) {
- if (parser.nextToken() != JsonToken.START_OBJECT) {
- return null;
- }
- while (parser.nextToken() != JsonToken.END_OBJECT) {
- String fieldName = parser.getCurrentName();
- parser.nextToken();
- if (VespaDocumentOperation.Operation.valid(fieldName)) {
- String docId = parser.getText();
- return docId;
- } else {
- parser.skipChildren();
- }
- }
- } catch (JsonParseException ex) {
- return null;
- }
- return null;
+
+ private static FeedClient.RetryStrategy retryStrategy(VespaConfiguration config) {
+ int maxRetries = config.numRetries();
+ return new FeedClient.RetryStrategy() {
+ @Override public int retries() { return maxRetries; }
+ };
}
+ private static int streamsPerConnection(VespaConfiguration config) {
+ return Math.min(256, config.maxInFlightRequests() / config.numConnections());
+ }
+
+ private static List<URI> endpointUris(VespaConfiguration config) {
+ return Arrays.stream(config.endpoint().split(","))
+ .map(hostname -> URI.create(String.format("https://%s:%d/", hostname, config.defaultPort())))
+ .collect(toList());
+ }
- static class ResultCallback implements FeedClient.ResultCallback {
- final VespaCounters counters;
+ private static class DryrunClient implements FeedClient {
- public ResultCallback(VespaCounters counters) {
- this.counters = counters;
+ @Override
+ public CompletableFuture<Result> put(DocumentId documentId, String documentJson, OperationParameters params) {
+ return createSuccessResult(documentId);
}
@Override
- public void onCompletion(String docId, Result documentResult) {
- if (!documentResult.isSuccess()) {
- counters.incrementDocumentsFailed(1);
- StringBuilder sb = new StringBuilder();
- sb.append("Problems with docid ");
- sb.append(docId);
- sb.append(": ");
- List<Result.Detail> details = documentResult.getDetails();
- for (Result.Detail detail : details) {
- sb.append(detail.toString());
- sb.append(" ");
- }
- log.warning(sb.toString());
- return;
- }
- counters.incrementDocumentsOk(1);
+ public CompletableFuture<Result> update(DocumentId documentId, String updateJson, OperationParameters params) {
+ return createSuccessResult(documentId);
}
- }
+ @Override
+ public CompletableFuture<Result> remove(DocumentId documentId, OperationParameters params) {
+ return createSuccessResult(documentId);
+ }
+
+ @Override public OperationStats stats() { return null; }
+ @Override public void close(boolean graceful) {}
+ private static CompletableFuture<Result> createSuccessResult(DocumentId documentId) {
+ return CompletableFuture.completedFuture(DryrunResult.create(Result.Type.success, documentId, "ok", null));
+ }
+ }
}
diff --git a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/util/VespaConfiguration.java b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/util/VespaConfiguration.java
index 2a1179dbec6..7219e621486 100644
--- a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/util/VespaConfiguration.java
+++ b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/util/VespaConfiguration.java
@@ -27,6 +27,7 @@ public class VespaConfiguration {
public static final String MAX_IN_FLIGHT_REQUESTS = "vespa.feed.max.in.flight.requests";
public static final String RANDOM_STARTUP_SLEEP = "vespa.feed.random.startup.sleep.ms";
public static final String NUM_RETRIES = "vespa.feed.num.retries";
+ public static final String USE_LEGACY_CLIENT = "vespa.feed.uselegacyclient";
private final Configuration conf;
private final Properties override;
@@ -130,6 +131,7 @@ public class VespaConfiguration {
return getInt(PROGRESS_REPORT, 1000);
}
+ public boolean useLegacyClient() { return getBoolean(USE_LEGACY_CLIENT, true); }
public String getString(String name) {
if (override != null && override.containsKey(name)) {
diff --git a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/MapReduceTest.java b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/MapReduceTest.java
index ebb34dbc1b1..fa7965acbc1 100644
--- a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/MapReduceTest.java
+++ b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/MapReduceTest.java
@@ -4,10 +4,9 @@ package com.yahoo.vespa.hadoop.pig;
import com.fasterxml.jackson.core.JsonEncoding;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
+import com.yahoo.vespa.hadoop.mapreduce.VespaOutputFormat;
import com.yahoo.vespa.hadoop.mapreduce.util.VespaConfiguration;
import com.yahoo.vespa.hadoop.mapreduce.util.VespaCounters;
-import com.yahoo.vespa.hadoop.mapreduce.VespaOutputFormat;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
@@ -17,22 +16,25 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.test.PathUtils;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.*;
-import java.util.*;
+import java.io.BufferedInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.StringTokenizer;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
public class MapReduceTest {
@@ -44,7 +46,7 @@ public class MapReduceTest {
protected static Path metricsJsonPath;
protected static Path metricsCsvPath;
- @BeforeClass
+ @BeforeAll
public static void setUp() throws IOException {
hdfsBaseDir = new File(PathUtils.getTestDir(MapReduceTest.class).getCanonicalPath());
@@ -62,7 +64,7 @@ public class MapReduceTest {
copyToHdfs("src/test/resources/tabular_data.csv", metricsCsvPath, "data");
}
- @AfterClass
+ @AfterAll
public static void tearDown() throws IOException {
Path testDir = new Path(hdfsBaseDir.getParent());
hdfs.delete(testDir, true);
@@ -82,7 +84,7 @@ public class MapReduceTest {
FileInputFormat.setInputPaths(job, metricsJsonPath);
boolean success = job.waitForCompletion(true);
- assertTrue("Job Failed", success);
+ assertTrue(success, "Job Failed");
VespaCounters counters = VespaCounters.get(job);
assertEquals(10, counters.getDocumentsSent());
@@ -103,7 +105,7 @@ public class MapReduceTest {
FileInputFormat.setInputPaths(job, metricsJsonPath);
boolean success = job.waitForCompletion(true);
- assertTrue("Job Failed", success);
+ assertTrue(success, "Job Failed");
VespaCounters counters = VespaCounters.get(job);
assertEquals(10, counters.getDocumentsSent());
@@ -125,7 +127,7 @@ public class MapReduceTest {
FileInputFormat.setInputPaths(job, metricsCsvPath);
boolean success = job.waitForCompletion(true);
- assertTrue("Job Failed", success);
+ assertTrue(success, "Job Failed");
VespaCounters counters = VespaCounters.get(job);
assertEquals(10, counters.getDocumentsSent());
diff --git a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaDocumentOperationTest.java b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaDocumentOperationTest.java
index bafeb593e4f..db2fab9b05e 100644
--- a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaDocumentOperationTest.java
+++ b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaDocumentOperationTest.java
@@ -12,9 +12,9 @@ import org.apache.pig.impl.logicalLayer.FrontendException;
import org.apache.pig.impl.logicalLayer.schema.Schema;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
@@ -23,22 +23,22 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
@SuppressWarnings("serial")
public class VespaDocumentOperationTest {
private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
private final PrintStream originalOut = System.out;
- @Before
+ @BeforeEach
public void setUpStreams() {
System.setOut(new PrintStream(outContent));
}
- @After
+ @AfterEach
public void restoreStreams() {
System.setOut(originalOut);
}
diff --git a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaQueryTest.java b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaQueryTest.java
index 2d55017b13e..b0e2dd32c04 100644
--- a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaQueryTest.java
+++ b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaQueryTest.java
@@ -8,12 +8,16 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.pig.ExecType;
import org.apache.pig.PigServer;
import org.apache.pig.data.Tuple;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.net.InetSocketAddress;
-import java.util.*;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
public class VespaQueryTest {
diff --git a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaStorageTest.java b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaStorageTest.java
index 7ca401a0cc8..3565db37126 100644
--- a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaStorageTest.java
+++ b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/pig/VespaStorageTest.java
@@ -1,14 +1,8 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hadoop.pig;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.function.Supplier;
-
+import com.yahoo.vespa.hadoop.mapreduce.util.VespaConfiguration;
+import com.yahoo.vespa.hadoop.mapreduce.util.VespaCounters;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.mapred.Counters;
@@ -18,11 +12,14 @@ import org.apache.pig.backend.executionengine.ExecJob;
import org.apache.pig.tools.pigstats.JobStats;
import org.apache.pig.tools.pigstats.PigStats;
import org.apache.pig.tools.pigstats.mapreduce.MRJobStats;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
-import com.yahoo.vespa.hadoop.mapreduce.util.VespaConfiguration;
-import com.yahoo.vespa.hadoop.mapreduce.util.VespaCounters;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
public class VespaStorageTest {
@@ -51,6 +48,13 @@ public class VespaStorageTest {
assertAllDocumentsOk("src/test/pig/feed_operations_with_json_loader.pig");
}
+ @Test
+ public void requireThatPremadeOperationsWithJsonLoaderFeedAndNonLegacyClientSucceeds() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ conf.set(VespaConfiguration.USE_SSL, Boolean.TRUE.toString());
+ conf.set(VespaConfiguration.USE_LEGACY_CLIENT, Boolean.FALSE.toString());
+ assertAllDocumentsOk("src/test/pig/feed_operations_with_json_loader.pig", conf);
+ }
@Test
public void requireThatCreateOperationsFeedSucceeds() throws Exception {
diff --git a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/util/TupleToolsTest.java b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/util/TupleToolsTest.java
index 27080a8b2af..93e6a0abfdd 100644
--- a/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/util/TupleToolsTest.java
+++ b/vespa-hadoop/src/test/java/com/yahoo/vespa/hadoop/util/TupleToolsTest.java
@@ -6,11 +6,11 @@ import org.apache.pig.data.DataType;
import org.apache.pig.data.Tuple;
import org.apache.pig.data.TupleFactory;
import org.apache.pig.impl.logicalLayer.schema.Schema;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.io.IOException;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
public class TupleToolsTest {
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ApacheGatewayConnection.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ApacheGatewayConnection.java
index 547ea524041..68cca286dac 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ApacheGatewayConnection.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ApacheGatewayConnection.java
@@ -8,11 +8,11 @@ import com.yahoo.security.SslContextBuilder;
import com.yahoo.vespa.http.client.config.ConnectionParams;
import com.yahoo.vespa.http.client.config.Endpoint;
import com.yahoo.vespa.http.client.config.FeedParams;
-import com.yahoo.vespa.http.client.core.Vtag;
import com.yahoo.vespa.http.client.core.Document;
import com.yahoo.vespa.http.client.core.Encoder;
import com.yahoo.vespa.http.client.core.Headers;
import com.yahoo.vespa.http.client.core.ServerResponseException;
+import com.yahoo.vespa.http.client.core.Vtag;
import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
@@ -392,10 +392,12 @@ class ApacheGatewayConnection implements GatewayConnection {
*/
public static class HttpClientFactory {
+ private final FeedParams feedParams;
final ConnectionParams connectionParams;
final boolean useSsl;
- public HttpClientFactory(ConnectionParams connectionParams, boolean useSsl) {
+ public HttpClientFactory(FeedParams feedParams, ConnectionParams connectionParams, boolean useSsl) {
+ this.feedParams = feedParams;
this.connectionParams = connectionParams;
this.useSsl = useSsl;
}
@@ -427,8 +429,10 @@ class ApacheGatewayConnection implements GatewayConnection {
clientBuilder.setMaxConnTotal(1);
clientBuilder.setUserAgent(String.format("vespa-http-client (%s)", Vtag.V_TAG_COMPONENT));
clientBuilder.setDefaultHeaders(Collections.singletonList(new BasicHeader(Headers.CLIENT_VERSION, Vtag.V_TAG_COMPONENT)));
- RequestConfig.Builder requestConfigBuilder = RequestConfig.custom();
- requestConfigBuilder.setSocketTimeout(0);
+ int millisTotalTimeout = (int) (feedParams.getClientTimeout(TimeUnit.MILLISECONDS) + feedParams.getServerTimeout(TimeUnit.MILLISECONDS));
+ RequestConfig.Builder requestConfigBuilder = RequestConfig.custom()
+ .setSocketTimeout(millisTotalTimeout)
+ .setConnectTimeout(millisTotalTimeout);
if (connectionParams.getProxyHost() != null) {
requestConfigBuilder.setProxy(new HttpHost(connectionParams.getProxyHost(), connectionParams.getProxyPort()));
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java
index 16afd001c46..9dc214fb93d 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java
@@ -78,7 +78,7 @@ public class ClusterConnection implements AutoCloseable {
feedParams,
cluster.getRoute(),
connectionParams,
- new ApacheGatewayConnection.HttpClientFactory(connectionParams, endpoint.isUseSsl()),
+ new ApacheGatewayConnection.HttpClientFactory(feedParams, connectionParams, endpoint.isUseSsl()),
operationProcessor.getClientId(),
clock
);
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
index 58857d1d8e6..9db296e33cd 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/DocumentV1ApiHandler.java
@@ -122,7 +122,7 @@ import static java.util.stream.Collectors.toUnmodifiableMap;
*/
public class DocumentV1ApiHandler extends AbstractRequestHandler {
- private static final Duration defaultTimeout = Duration.ofSeconds(175);
+ private static final Duration defaultTimeout = Duration.ofSeconds(180); // Match document API default timeout.
private static final Logger log = Logger.getLogger(DocumentV1ApiHandler.class.getName());
private static final Parser<Integer> integerParser = Integer::parseInt;
@@ -160,6 +160,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
private static final String TRACELEVEL = "tracelevel";
private final Clock clock;
+ private final Duration handlerTimeout;
private final Metric metric;
private final DocumentApiMetrics metrics;
private final DocumentOperationParser parser;
@@ -184,14 +185,15 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
ClusterListConfig clusterListConfig,
AllClustersBucketSpacesConfig bucketSpacesConfig,
DocumentOperationExecutorConfig executorConfig) {
- this(Clock.systemUTC(), metric, metricReceiver, documentAccess,
+ this(Clock.systemUTC(), Duration.ofSeconds(5), metric, metricReceiver, documentAccess,
documentManagerConfig, executorConfig, clusterListConfig, bucketSpacesConfig);
}
- DocumentV1ApiHandler(Clock clock, Metric metric, MetricReceiver metricReceiver, DocumentAccess access,
+ DocumentV1ApiHandler(Clock clock, Duration handlerTimeout, Metric metric, MetricReceiver metricReceiver, DocumentAccess access,
DocumentmanagerConfig documentmanagerConfig, DocumentOperationExecutorConfig executorConfig,
ClusterListConfig clusterListConfig, AllClustersBucketSpacesConfig bucketSpacesConfig) {
this.clock = clock;
+ this.handlerTimeout = handlerTimeout;
this.parser = new DocumentOperationParser(documentmanagerConfig);
this.metric = metric;
this.metrics = new DocumentApiMetrics(metricReceiver, "documentV1");
@@ -222,8 +224,9 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
HttpRequest request = (HttpRequest) rawRequest;
try {
- request.setTimeout(getProperty(request, TIMEOUT, timeoutMillisParser)
- .orElse(defaultTimeout.toMillis()),
+ // Set a higher HTTP layer timeout than the document API timeout, to prefer triggering the latter.
+ request.setTimeout( getProperty(request, TIMEOUT, timeoutMillisParser).orElse(defaultTimeout.toMillis())
+ + handlerTimeout.toMillis(),
TimeUnit.MILLISECONDS);
Path requestPath = new Path(request.getUri());
@@ -251,7 +254,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
@Override
public void handleTimeout(Request request, ResponseHandler responseHandler) {
- timeout((HttpRequest) request, "Request timeout after " + request.getTimeout(TimeUnit.MILLISECONDS) + "ms", responseHandler);
+ timeout((HttpRequest) request, "Timeout after " + (request.getTimeout(TimeUnit.MILLISECONDS) - handlerTimeout.toMillis()) + "ms", responseHandler);
}
@Override
@@ -743,11 +746,18 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
private static void serverError(HttpRequest request, Throwable t, ResponseHandler handler) {
loggingException(() -> {
- log.log(WARNING, "Uncaught exception handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ":", t);
+ log.log(WARNING, "Uncaught exception handling request " + request.getMethod() + " " + request.getUri().getRawPath(), t);
JsonResponse.create(request, Exceptions.toMessageString(t), handler).respond(Response.Status.INTERNAL_SERVER_ERROR);
});
}
+ private static void badGateway(HttpRequest request, Throwable t, ResponseHandler handler) {
+ loggingException(() -> {
+ log.log(FINE, t, () -> "Document access error handling request " + request.getMethod() + " " + request.getUri().getRawPath());
+ JsonResponse.create(request, Exceptions.toMessageString(t), handler).respond(Response.Status.BAD_GATEWAY);
+ });
+ }
+
private static void timeout(HttpRequest request, String message, ResponseHandler handler) {
loggingException(() -> {
log.log(FINE, () -> "Timeout handling request " + request.getMethod() + " " + request.getUri().getRawPath() + ": " + message);
@@ -803,6 +813,9 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
catch (IllegalArgumentException e) {
badRequest(request, e, handler);
}
+ catch (DispatchException e) {
+ badGateway(request, e, handler);
+ }
catch (RuntimeException e) {
serverError(request, e, handler);
}
@@ -821,12 +834,16 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
return false;
if (result.type() == Result.ResultType.FATAL_ERROR)
- throw new RuntimeException(result.getError());
+ throw new DispatchException(result.getError());
outstanding.incrementAndGet();
return true;
}
+ private static class DispatchException extends RuntimeException {
+ private DispatchException(Throwable cause) { super(cause); }
+ }
+
/** Readable content channel which forwards data to a reader when closed. */
static class ForwardingContentChannel implements ContentChannel {
@@ -923,7 +940,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
log.log(WARNING, "Unexpected document API operation outcome '" + response.outcome() + "'");
case ERROR:
log.log(FINE, () -> "Exception performing document operation: " + response.getTextMessage());
- jsonResponse.commit(Response.Status.INTERNAL_SERVER_ERROR);
+ jsonResponse.commit(Response.Status.BAD_GATEWAY);
}
}
}
@@ -956,7 +973,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
parameters.setMaxTotalHits(wantedDocumentCount);
parameters.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(concurrency));
parameters.visitInconsistentBuckets(true);
- parameters.setSessionTimeoutMs(Math.max(1, request.getTimeout(TimeUnit.MILLISECONDS) - 5000));
+ parameters.setSessionTimeoutMs(Math.max(1, request.getTimeout(TimeUnit.MILLISECONDS) - handlerTimeout.toMillis()));
return parameters;
}
@@ -966,7 +983,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
VisitorParameters parameters = parseCommonParameters(request, path, Optional.of(requireProperty(request, CLUSTER)));
parameters.setThrottlePolicy(new DynamicThrottlePolicy().setMinWindowSize(1).setWindowSizeIncrement(1));
long timeChunk = getProperty(request, TIME_CHUNK, timeoutMillisParser).orElse(60_000L);
- parameters.setSessionTimeoutMs(Math.max(1, Math.min(timeChunk, request.getTimeout(TimeUnit.MILLISECONDS) - 5000L)));
+ parameters.setSessionTimeoutMs(Math.max(1, Math.min(timeChunk, request.getTimeout(TimeUnit.MILLISECONDS) - handlerTimeout.toMillis())));
return parameters;
}
@@ -1118,7 +1135,7 @@ public class DocumentV1ApiHandler extends AbstractRequestHandler {
if (getVisitorStatistics() != null)
response.writeDocumentCount(getVisitorStatistics().getDocumentsReturned());
- response.respond(Response.Status.INTERNAL_SERVER_ERROR);
+ response.respond(Response.Status.BAD_GATEWAY);
}
});
visitDispatcher.execute(() -> {
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
index 6f1b0466350..29ae7f52265 100644
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
+++ b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/DocumentV1ApiTest.java
@@ -60,17 +60,20 @@ import org.junit.Test;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.UncheckedIOException;
+import java.time.Duration;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
+import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
-import java.util.concurrent.Phaser;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiFunction;
import java.util.function.Consumer;
@@ -83,6 +86,7 @@ import static com.yahoo.jdisc.http.HttpRequest.Method.POST;
import static com.yahoo.jdisc.http.HttpRequest.Method.PUT;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -130,7 +134,7 @@ public class DocumentV1ApiTest {
access = new MockDocumentAccess(docConfig);
metric = new NullMetric();
metrics = new MetricReceiver.MockReceiver();
- handler = new DocumentV1ApiHandler(clock, metric, metrics, access, docConfig, executorConfig, clusterConfig, bucketConfig);
+ handler = new DocumentV1ApiHandler(clock, Duration.ofMillis(1), metric, metrics, access, docConfig, executorConfig, clusterConfig, bucketConfig);
}
@After
@@ -176,7 +180,7 @@ public class DocumentV1ApiTest {
}
@Test
- public void testResponses() {
+ public void testResponses() throws InterruptedException {
RequestHandlerTestDriver driver = new RequestHandlerTestDriver(handler);
List<AckToken> tokens = List.of(new AckToken(null), new AckToken(null), new AckToken(null));
// GET at non-existent path returns 404 with available paths
@@ -204,7 +208,7 @@ public class DocumentV1ApiTest {
assertEquals(100, ((StaticThrottlePolicy) parameters.getThrottlePolicy()).getMaxPendingCount());
assertEquals("[id]", parameters.getFieldSet());
assertEquals("(all the things)", parameters.getDocumentSelection());
- assertEquals(1000, parameters.getSessionTimeoutMs());
+ assertEquals(6000, parameters.getSessionTimeoutMs());
// Put some documents in the response
parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc1)), tokens.get(0));
parameters.getLocalDataHandler().onMessage(new PutDocumentMessage(new DocumentPut(doc2)), tokens.get(1));
@@ -269,7 +273,7 @@ public class DocumentV1ApiTest {
access.expect(parameters -> {
assertEquals("[Content:cluster=content]", parameters.getRemoteDataHandler());
assertEquals("[all]", parameters.fieldSet());
- assertEquals(55_000L, parameters.getSessionTimeoutMs());
+ assertEquals(60_000L, parameters.getSessionTimeoutMs());
parameters.getControlHandler().onDone(VisitorControlHandler.CompletionCode.SUCCESS, "We made it!");
});
response = driver.sendRequest("http://localhost/document/v1/space/music/docid?destinationCluster=content&selection=true&cluster=content&timeout=60", POST);
@@ -330,6 +334,7 @@ public class DocumentV1ApiTest {
assertEquals(400, response.getStatus());
// DELETE with namespace and document type is a restricted visit which deletes visited documents.
+ // When visiting fails fatally, a 502 BAD GATEWAY is returned.
access.expect(tokens.subList(0, 1));
access.expect(parameters -> {
assertEquals("(false) and (music) and (id.namespace=='space')", parameters.getDocumentSelection());
@@ -351,7 +356,7 @@ public class DocumentV1ApiTest {
" \"pathId\": \"/document/v1/space/music/docid\"," +
" \"message\": \"boom\"" +
"}", response.readAll());
- assertEquals(500, response.getStatus());
+ assertEquals(502, response.getStatus());
// DELETE at the root is also a deletion visit. These also require a selection.
access.expect(parameters -> {
@@ -386,7 +391,7 @@ public class DocumentV1ApiTest {
" \"documents\": []," +
" \"message\": \"error\"" +
"}", response.readAll());
- assertEquals(500, response.getStatus());
+ assertEquals(502, response.getStatus());
// GET with namespace, document type and number is a restricted visit.
access.expect(parameters -> {
@@ -649,12 +654,12 @@ public class DocumentV1ApiTest {
" \"pathId\": \"/document/v1/space/music/number/1/two\"," +
" \"message\": \"error\"" +
"}", response1.readAll());
- assertEquals(500, response1.getStatus());
+ assertEquals(502, response1.getStatus());
assertSameJson("{" +
" \"pathId\": \"/document/v1/space/music/number/1/two\"," +
" \"message\": \"error\"" +
"}", response2.readAll());
- assertEquals(500, response2.getStatus());
+ assertEquals(502, response2.getStatus());
// Request response does not arrive before timeout has passed.
AtomicReference<ResponseHandler> handler = new AtomicReference<>();
@@ -662,15 +667,89 @@ public class DocumentV1ApiTest {
handler.set(parameters.responseHandler().get());
return new Result(Result.ResultType.SUCCESS, null);
});
- var response4 = driver.sendRequest("http://localhost/document/v1/space/music/docid/one?timeout=1ms");
- assertSameJson("{" +
- " \"pathId\": \"/document/v1/space/music/docid/one\"," +
- " \"message\": \"Request timeout after 1ms\"" +
- "}", response4.readAll());
- assertEquals(504, response4.getStatus());
- if (handler.get() != null) // Timeout may have occurred before dispatch, or ...
- handler.get().handleResponse(new Response(0)); // response may eventually arrive, but too late.
+ try {
+ var response4 = driver.sendRequest("http://localhost/document/v1/space/music/docid/one?timeout=1ms");
+ assertSameJson("{" +
+ " \"pathId\": \"/document/v1/space/music/docid/one\"," +
+ " \"message\": \"Timeout after 1ms\"" +
+ "}", response4.readAll());
+ assertEquals(504, response4.getStatus());
+ }
+ finally {
+ if (handler.get() != null) // Timeout may have occurred before dispatch, or ...
+ handler.get().handleResponse(new Response(0)); // response may eventually arrive, but too late.
+ }
+
+ driver.close();
+ }
+
+ @Test
+ public void testThroughput() throws InterruptedException {
+ DocumentOperationExecutorConfig executorConfig = new DocumentOperationExecutorConfig.Builder().build();
+ handler = new DocumentV1ApiHandler(clock, Duration.ofMillis(1), metric, metrics, access, docConfig, executorConfig, clusterConfig, bucketConfig);
+
+ int writers = 4;
+ int queueFill = executorConfig.maxThrottled() - writers;
+ RequestHandlerTestDriver driver = new RequestHandlerTestDriver(handler);
+ ScheduledExecutorService writer = Executors.newScheduledThreadPool(writers);
+ ScheduledExecutorService reader = Executors.newScheduledThreadPool(1);
+ ScheduledExecutorService replier = Executors.newScheduledThreadPool(writers);
+ BlockingQueue<RequestHandlerTestDriver.MockResponseHandler> responses = new LinkedBlockingQueue<>();
+
+ Response success = new Response(0, null, Response.Outcome.SUCCESS);
+ int docs = 1 << 14;
+ assertTrue(docs >= writers);
+ AtomicReference<com.yahoo.jdisc.Response> failed = new AtomicReference<>();
+
+ CountDownLatch latch = new CountDownLatch(docs);
+ reader.execute(() -> {
+ while ( ! reader.isShutdown()) {
+ try {
+ var response = responses.take();
+ response.awaitResponse().readAll();
+ if (response.getStatus() != 200)
+ failed.set(response.getResponse());
+ latch.countDown();
+ }
+ catch (InterruptedException e) { break; }
+ }
+ });
+
+ // Fill the handler resend queue.
+ long startNanos = System.nanoTime();
+ CountDownLatch setup = new CountDownLatch(queueFill);
+ access.session.expect((id, parameters) -> {
+ setup.countDown();
+ return new Result(Result.ResultType.TRANSIENT_ERROR, new Error());
+ });
+ for (int i = 0; i < queueFill; i++) {
+ int j = i;
+ writer.execute(() -> {
+ responses.add(driver.sendRequest("http://localhost/document/v1/ns/music/docid/" + j,
+ POST,
+ "{ \"fields\": { \"artist\": \"Sigrid\" } }"));
+ });
+ }
+ setup.await();
+
+ // Let "messagebus" start accepting messages.
+ access.session.expect((id, parameters) -> {
+ replier.schedule(() -> parameters.responseHandler().get().handleResponse(success), 10, TimeUnit.MILLISECONDS);
+ return new Result(0);
+ });
+ // Send the rest of the documents. Rely on resender to empty queue of throttled oppperations.
+ for (int i = queueFill; i < docs; i++) {
+ int j = i;
+ writer.execute(() -> {
+ responses.add(driver.sendRequest("http://localhost/document/v1/ns/music/docid/" + j,
+ POST,
+ "{ \"fields\": { \"artist\": \"Sigrid\" } }"));
+ });
+ }
+ latch.await();
+ System.err.println(docs + " requests in " + (System.nanoTime() - startNanos) * 1e-9 + " seconds");
+ assertNull(failed.get());
driver.close();
}
diff --git a/vespaclient-java/src/main/sh/vespa-visit.sh b/vespaclient-java/src/main/sh/vespa-visit.sh
index 92d6bc67f3f..e4fec2857fe 100755
--- a/vespaclient-java/src/main/sh/vespa-visit.sh
+++ b/vespaclient-java/src/main/sh/vespa-visit.sh
@@ -74,16 +74,25 @@ findhost
# END environment bootstrap section
+Xmx="-Xmx1024m"
+# Allow -Xmx to be specified in args
+for arg in "$@"; do
+ shift
+ case $arg in -Xmx*) Xmx=$arg ;;
+ *) set -- "$@" "$arg" ;;
+ esac
+done
+
if [ "${VESPA_LOG_LEVEL}" = "" ]; then
export VESPA_LOG_LEVEL=error,warning
fi
-export MALLOC_ARENA_MAX=1 #Does not need fast allocation
+export MALLOC_ARENA_MAX=1 # Does not need fast allocation
exec java \
-server -enableassertions \
-XX:ThreadStackSize=512 \
-XX:MaxJavaStackTraceDepth=1000000 \
-Djava.library.path=${VESPA_HOME}/libexec64/native:${VESPA_HOME}/lib64 \
-XX:MaxDirectMemorySize=32m -Djava.awt.headless=true \
--Xms128m -Xmx1024m $(getJavaOptionsIPV46) \
+-Xms128m $(getJavaOptionsIPV46) ${Xmx} \
-cp ${VESPA_HOME}/lib/jars/vespaclient-java-jar-with-dependencies.jar com.yahoo.vespavisit.VdsVisit "$@"
diff --git a/vespajlib/abi-spec.json b/vespajlib/abi-spec.json
index 4ddf8b83cdc..c0ac0d0f3be 100644
--- a/vespajlib/abi-spec.json
+++ b/vespajlib/abi-spec.json
@@ -1201,6 +1201,7 @@
"public com.yahoo.tensor.Tensor equal(com.yahoo.tensor.Tensor)",
"public com.yahoo.tensor.Tensor notEqual(com.yahoo.tensor.Tensor)",
"public com.yahoo.tensor.Tensor approxEqual(com.yahoo.tensor.Tensor)",
+ "public com.yahoo.tensor.Tensor bit(com.yahoo.tensor.Tensor)",
"public com.yahoo.tensor.Tensor avg()",
"public com.yahoo.tensor.Tensor avg(java.lang.String)",
"public com.yahoo.tensor.Tensor avg(java.util.List)",
diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/JobMetrics.java b/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/JobMetrics.java
index fcc5b8e57a2..da5a596edea 100644
--- a/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/JobMetrics.java
+++ b/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/JobMetrics.java
@@ -2,39 +2,18 @@
package com.yahoo.concurrent.maintenance;
import java.util.concurrent.ConcurrentHashMap;
-import java.util.function.BiConsumer;
/**
* Tracks and forwards maintenance job metrics.
*
* @author mpolden
*/
-public class JobMetrics {
+public abstract class JobMetrics {
- private final BiConsumer<String, Long> metricConsumer;
-
- private final ConcurrentHashMap<String, Long> incompleteRuns = new ConcurrentHashMap<>();
-
- public JobMetrics(BiConsumer<String, Long> metricConsumer) {
- this.metricConsumer = metricConsumer;
- }
-
- /** Record a run for given job */
- public void recordRunOf(String job) {
- incompleteRuns.merge(job, 1L, Long::sum);
- }
-
- /** Record completion of given job */
- public void recordCompletionOf(String job) {
- incompleteRuns.put(job, 0L);
- }
-
- /** Forward metrics for given job to metric consumer */
- public void forward(String job) {
- Long incompleteRuns = this.incompleteRuns.get(job);
- if (incompleteRuns != null) {
- metricConsumer.accept(job, incompleteRuns);
- }
- }
+ /**
+ * Records completion of a run of a job.
+ * This is guaranteed to always be called once after each maintainer run.
+ */
+ public abstract void completed(String job, double successFactor);
}
diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java b/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java
index 734c46a2819..3a5c7e3421d 100644
--- a/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java
+++ b/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java
@@ -83,8 +83,19 @@ public abstract class Maintainer implements Runnable {
@Override
public final String toString() { return name(); }
- /** Called once each time this maintenance job should run. Returns whether the maintenance run was successful */
- protected abstract boolean maintain();
+ /**
+ * Called once each time this maintenance job should run.
+ *
+ * @return the degree to which the run was successful - a number between 0 (no success), to 1 (complete success).
+ * Note that this indicates whether something is wrong, so e.g if the call did nothing because it should do
+ * nothing, 1.0 should be returned.
+ */
+ protected abstract double maintain();
+
+ /** Convenience methods to convert attempts and failures into a success factor */
+ protected final double asSuccessFactor(int attempts, int failures) {
+ return attempts == 0 ? 1.0 : 1 - (double)failures / attempts;
+ }
/** Returns the interval at which this job is set to run */
protected Duration interval() { return interval; }
@@ -93,19 +104,20 @@ public abstract class Maintainer implements Runnable {
public final void lockAndMaintain(boolean force) {
if (!force && !jobControl.isActive(name())) return;
log.log(Level.FINE, () -> "Running " + this.getClass().getSimpleName());
- jobMetrics.recordRunOf(name());
+
+ double successFactor = 0;
try (var lock = jobControl.lockJob(name())) {
- if (maintain()) jobMetrics.recordCompletionOf(name());
- } catch (UncheckedTimeoutException e) {
- if (ignoreCollision) {
- jobMetrics.recordCompletionOf(name());
- } else {
+ successFactor = maintain();
+ }
+ catch (UncheckedTimeoutException e) {
+ if ( ! ignoreCollision)
log.log(Level.WARNING, this + " collided with another run. Will retry in " + interval);
- }
- } catch (Throwable e) {
+ }
+ catch (Throwable e) {
log.log(Level.WARNING, this + " failed. Will retry in " + interval, e);
- } finally {
- jobMetrics.forward(name());
+ }
+ finally {
+ jobMetrics.completed(name(), successFactor);
}
log.log(Level.FINE, () -> "Finished " + this.getClass().getSimpleName());
}
diff --git a/vespajlib/src/main/java/com/yahoo/slime/SlimeUtils.java b/vespajlib/src/main/java/com/yahoo/slime/SlimeUtils.java
index 144a9a585f6..4a53d70ff38 100644
--- a/vespajlib/src/main/java/com/yahoo/slime/SlimeUtils.java
+++ b/vespajlib/src/main/java/com/yahoo/slime/SlimeUtils.java
@@ -4,8 +4,13 @@ package com.yahoo.slime;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
+import java.time.Duration;
+import java.time.Instant;
import java.util.Iterator;
import java.util.Optional;
+import java.util.OptionalDouble;
+import java.util.OptionalInt;
+import java.util.OptionalLong;
import java.util.Spliterator;
import java.util.Spliterators;
import java.util.stream.Stream;
@@ -57,7 +62,7 @@ public class SlimeUtils {
}
}
- private static void copyArray(Inspector from, final Cursor to) {
+ private static void copyArray(Inspector from, Cursor to) {
from.traverse((ArrayTraverser) (i, inspector) -> addValue(inspector, to));
}
@@ -124,15 +129,32 @@ public class SlimeUtils {
return slime;
}
+ public static Instant instant(Inspector field) {
+ return Instant.ofEpochMilli(field.asLong());
+ }
+
public static Optional<String> optionalString(Inspector inspector) {
return Optional.of(inspector.asString()).filter(s -> !s.isEmpty());
}
- public static Optional<Long> optionalLong(Inspector inspector) {
- if (inspector.type() == Type.LONG) {
- return Optional.of(inspector.asLong());
- }
- return Optional.empty();
+ public static OptionalLong optionalLong(Inspector field) {
+ return field.valid() ? OptionalLong.of(field.asLong()) : OptionalLong.empty();
+ }
+
+ public static OptionalInt optionalInteger(Inspector field) {
+ return field.valid() ? OptionalInt.of((int) field.asLong()) : OptionalInt.empty();
+ }
+
+ public static OptionalDouble optionalDouble(Inspector field) {
+ return field.valid() ? OptionalDouble.of(field.asDouble()) : OptionalDouble.empty();
+ }
+
+ public static Optional<Instant> optionalInstant(Inspector field) {
+ return optionalLong(field).stream().mapToObj(Instant::ofEpochMilli).findFirst();
+ }
+
+ public static Optional<Duration> optionalDuration(Inspector field) {
+ return optionalLong(field).stream().mapToObj(Duration::ofMillis).findFirst();
}
public static Iterator<Inspector> entriesIterator(Inspector inspector) {
@@ -146,8 +168,9 @@ public class SlimeUtils {
/** Returns stream of entries for given inspector. If the inspector is not an array, empty stream is returned */
public static Stream<Inspector> entriesStream(Inspector inspector) {
int characteristics = Spliterator.NONNULL | Spliterator.SIZED | Spliterator.ORDERED;
- return StreamSupport.stream(
- Spliterators.spliteratorUnknownSize(entriesIterator(inspector), characteristics),
- false);
+ return StreamSupport.stream(Spliterators.spliteratorUnknownSize(entriesIterator(inspector),
+ characteristics),
+ false);
}
+
}
diff --git a/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java b/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java
index 3133752bc49..ab475e25387 100644
--- a/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java
+++ b/vespajlib/src/main/java/com/yahoo/tensor/Tensor.java
@@ -240,6 +240,7 @@ public interface Tensor {
default Tensor equal(Tensor argument) { return join(argument, (a, b) -> ( a == b ? 1.0 : 0.0)); }
default Tensor notEqual(Tensor argument) { return join(argument, (a, b) -> ( a != b ? 1.0 : 0.0)); }
default Tensor approxEqual(Tensor argument) { return join(argument, (a, b) -> ( approxEquals(a,b) ? 1.0 : 0.0)); }
+ default Tensor bit(Tensor argument) { return join(argument, (a,b) -> ((int)b < 8 && (int)b >= 0 && ((int)a & (1 << (int)b)) != 0) ? 1.0 : 0.0); }
default Tensor avg() { return avg(Collections.emptyList()); }
default Tensor avg(String dimension) { return avg(Collections.singletonList(dimension)); }
diff --git a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/JobControlTest.java b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/JobControlTest.java
index 139a2901cd3..5700be36413 100644
--- a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/JobControlTest.java
+++ b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/JobControlTest.java
@@ -3,6 +3,8 @@ package com.yahoo.concurrent.maintenance;
import org.junit.Test;
+import java.util.concurrent.atomic.AtomicLong;
+
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@@ -19,9 +21,8 @@ public class JobControlTest {
String job1 = "Job1";
String job2 = "Job2";
- JobMetrics metrics = new JobMetrics((job, instant) -> {});
- TestMaintainer maintainer1 = new TestMaintainer(job1, jobControl, metrics);
- TestMaintainer maintainer2 = new TestMaintainer(job2, jobControl, metrics);
+ TestMaintainer maintainer1 = new TestMaintainer(job1, jobControl, new NoopJobMetrics());
+ TestMaintainer maintainer2 = new TestMaintainer(job2, jobControl, new NoopJobMetrics());
assertEquals(2, jobControl.jobs().size());
assertTrue(jobControl.jobs().contains(job1));
assertTrue(jobControl.jobs().contains(job2));
@@ -62,7 +63,7 @@ public class JobControlTest {
public void testJobControlMayDeactivateJobs() {
JobControlStateMock state = new JobControlStateMock();
JobControl jobControl = new JobControl(state);
- TestMaintainer mockMaintainer = new TestMaintainer(null, jobControl, new JobMetrics((job, instant) -> {}));
+ TestMaintainer mockMaintainer = new TestMaintainer(null, jobControl, new NoopJobMetrics());
assertTrue(jobControl.jobs().contains("TestMaintainer"));
@@ -80,4 +81,11 @@ public class JobControlTest {
assertEquals(2, mockMaintainer.totalRuns());
}
+ private static class NoopJobMetrics extends JobMetrics {
+
+ @Override
+ public void completed(String job, double successFactor) { }
+
+ }
+
}
diff --git a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/MaintainerTest.java b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/MaintainerTest.java
index e881d4b3ff6..7c196dc6627 100644
--- a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/MaintainerTest.java
+++ b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/MaintainerTest.java
@@ -7,7 +7,6 @@ import org.junit.Test;
import java.time.Duration;
import java.time.Instant;
import java.util.List;
-import java.util.concurrent.atomic.AtomicLong;
import static org.junit.Assert.assertEquals;
@@ -16,6 +15,8 @@ import static org.junit.Assert.assertEquals;
*/
public class MaintainerTest {
+ private static final double delta = 0.000001;
+
private final JobControl jobControl = new JobControl(new JobControlStateMock());
@Test
@@ -42,35 +43,39 @@ public class MaintainerTest {
@Test
public void success_metric() {
- AtomicLong consecutiveFailures = new AtomicLong();
- JobMetrics jobMetrics = new JobMetrics((job, count) -> consecutiveFailures.set(count));
+ TestJobMetrics jobMetrics = new TestJobMetrics();
TestMaintainer maintainer = new TestMaintainer(null, jobControl, jobMetrics);
- // Maintainer fails twice in a row
- maintainer.successOnNextRun(false).run();
- assertEquals(1, consecutiveFailures.get());
- maintainer.successOnNextRun(false).run();
- assertEquals(2, consecutiveFailures.get());
-
- // Maintainer runs successfully
- maintainer.successOnNextRun(true).run();
- assertEquals(0, consecutiveFailures.get());
-
- // Maintainer runs successfully again
- maintainer.run();
- assertEquals(0, consecutiveFailures.get());
+ maintainer.successOnNextRun(1.0).run();
+ assertEquals(1, jobMetrics.successFactor, delta);
+ maintainer.successOnNextRun(0.0).run();
+ assertEquals(0, jobMetrics.successFactor, delta);
+ maintainer.successOnNextRun(0.1).run();
+ assertEquals(0.1, jobMetrics.successFactor, delta);
// Maintainer throws
maintainer.throwOnNextRun(new RuntimeException()).run();
- assertEquals(1, consecutiveFailures.get());
+ assertEquals(0, jobMetrics.successFactor, delta);
// Maintainer recovers
maintainer.throwOnNextRun(null).run();
- assertEquals(0, consecutiveFailures.get());
+ maintainer.successOnNextRun(1.0).run();
+ assertEquals(1, jobMetrics.successFactor, delta);
// Lock exception is treated as a failure
maintainer.throwOnNextRun(new UncheckedTimeoutException()).run();
- assertEquals(1, consecutiveFailures.get());
+ assertEquals(0, jobMetrics.successFactor, delta);
+ }
+
+ private static class TestJobMetrics extends JobMetrics {
+
+ double successFactor = 0.0;
+
+ @Override
+ public void completed(String job, double successFactor) {
+ this.successFactor = successFactor;
+ }
+
}
}
diff --git a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/TestMaintainer.java b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/TestMaintainer.java
index 44a00a37a83..a109064e101 100644
--- a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/TestMaintainer.java
+++ b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/TestMaintainer.java
@@ -11,7 +11,7 @@ import java.util.List;
class TestMaintainer extends Maintainer {
private int totalRuns = 0;
- private boolean success = true;
+ private double success = 1.0;
private RuntimeException exceptionToThrow = null;
public TestMaintainer(String name, JobControl jobControl, JobMetrics jobMetrics) {
@@ -22,7 +22,7 @@ class TestMaintainer extends Maintainer {
return totalRuns;
}
- public TestMaintainer successOnNextRun(boolean success) {
+ public TestMaintainer successOnNextRun(double success) {
this.success = success;
return this;
}
@@ -33,7 +33,7 @@ class TestMaintainer extends Maintainer {
}
@Override
- protected boolean maintain() {
+ protected double maintain() {
if (exceptionToThrow != null) throw exceptionToThrow;
totalRuns++;
return success;
diff --git a/vespalib/src/tests/datastore/datastore/datastore_test.cpp b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
index 11b4f5e6631..90281acb0d3 100644
--- a/vespalib/src/tests/datastore/datastore/datastore_test.cpp
+++ b/vespalib/src/tests/datastore/datastore/datastore_test.cpp
@@ -141,6 +141,38 @@ assertMemStats(const DataStoreBase::MemStats &exp,
EXPECT_EQ(exp._holdBuffers, act._holdBuffers);
}
+TEST(DataStoreTest, require_that_invalid_entry_ref_can_be_ordered) {
+ EntryRef inValid;
+ EntryRef a(1);
+ EXPECT_EQ(inValid, inValid);
+ EXPECT_EQ(a, a);
+ EXPECT_NE(inValid, a);
+ EXPECT_NE(a, inValid);
+ EXPECT_LT(inValid, a);
+ EXPECT_LE(inValid, a);
+}
+
+TEST(DataStoreTest, require_that_entry_ref_can_be_ordered) {
+ EntryRef a(1);
+ EntryRef b(2);
+ EntryRef c(3);
+ EXPECT_EQ(a, a);
+ EXPECT_EQ(b, b);
+ EXPECT_EQ(c, c);
+ EXPECT_NE(a, b);
+ EXPECT_NE(a, c);
+ EXPECT_NE(b, c);
+ EXPECT_LT(a, b);
+ EXPECT_LT(b, c);
+ EXPECT_LT(a, c);
+ EXPECT_LE(a, a);
+ EXPECT_LE(b, b);
+ EXPECT_LE(c, c);
+ EXPECT_LE(a, b);
+ EXPECT_LE(b, c);
+ EXPECT_LE(a, c);
+}
+
TEST(DataStoreTest, require_that_entry_ref_is_working)
{
using MyRefType = EntryRefT<22>;
@@ -643,6 +675,7 @@ TEST(DataStoreTest, control_static_sizes) {
EXPECT_EQ(0, bs.size());
}
+
}
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/stllike/hashtable_test.cpp b/vespalib/src/tests/stllike/hashtable_test.cpp
index cbd8b28d9a8..ac364fdf0df 100644
--- a/vespalib/src/tests/stllike/hashtable_test.cpp
+++ b/vespalib/src/tests/stllike/hashtable_test.cpp
@@ -5,9 +5,9 @@
#include <vespa/vespalib/stllike/hash_fun.h>
#include <vespa/vespalib/stllike/identity.h>
#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/vespalib/stllike/hash_map.hpp>
#include <memory>
#include <vector>
-#include <vespa/vespalib/stllike/hash_map.h>
using vespalib::hashtable;
using std::vector;
@@ -134,6 +134,79 @@ TEST("require that hashtable<vector<int>> can be copied") {
EXPECT_EQUAL(6, (*table.find(2))[2]);
}
+/**
+ * Test to profile destruction and recreation of hash map.
+ * It revealed some unexpected behaviour. Results with 10k iterations on 2018 macbook pro 2.6 Ghz i7
+ * 1 - previous - 14.7s hash_node() : _node(), _next(invalid) {}
+ * 2 - test - 6.6s hash_node() : _next(invalid) { memset(_node, 0, sizeof(node)); }
+ * 3 - current - 2.3s hash_node() : _next(invalid) {}
+ */
+TEST("benchmark hash table reconstruction with POD objects") {
+ vespalib::hash_map<uint32_t, uint32_t> m(1000000);
+ constexpr size_t NUM_ITER = 10; // Set to 1k-10k to get measurable numbers 10k ~= 2.3s
+ for (size_t i(0); i < NUM_ITER; i++) {
+ m[46] = 17;
+ EXPECT_FALSE(m.empty());
+ EXPECT_EQUAL(1u, m.size());
+ EXPECT_EQUAL(1048576u, m.capacity());
+ m.clear();
+ EXPECT_TRUE(m.empty());
+ EXPECT_EQUAL(1048576u, m.capacity());
+ }
+}
+
+class NonPOD {
+public:
+ NonPOD() noexcept
+ : _v(rand())
+ {
+ construction_count++;
+ }
+ NonPOD(NonPOD && rhs) noexcept { _v = rhs._v; rhs._v = -1; }
+ NonPOD & operator =(NonPOD && rhs) noexcept { _v = rhs._v; rhs._v = -1; return *this; }
+ NonPOD(const NonPOD &) = delete;
+ NonPOD & operator =(const NonPOD &) = delete;
+ ~NonPOD() {
+ if (_v != -1) {
+ destruction_count++;
+ }
+ }
+ int32_t _v;
+ static size_t construction_count;
+ static size_t destruction_count;
+};
+
+size_t NonPOD::construction_count = 0;
+size_t NonPOD::destruction_count = 0;
+
+/**
+ * Performance is identical for NonPOD objects as with POD object.
+ * Object are are only constructed on insert, and destructed on erase/clear.
+ */
+TEST("benchmark hash table reconstruction with non POD objects") {
+ vespalib::hash_map<uint32_t, NonPOD> m(1000000);
+ constexpr size_t NUM_ITER = 10; // Set to 1k-10k to get measurable numbers 10k ~= 2.3s
+ NonPOD::construction_count = 0;
+ NonPOD::destruction_count = 0;
+ for (size_t i(0); i < NUM_ITER; i++) {
+ EXPECT_EQUAL(i, NonPOD::construction_count);
+ EXPECT_EQUAL(i, NonPOD::destruction_count);
+ m.insert(std::make_pair(46, NonPOD()));
+ EXPECT_EQUAL(i+1, NonPOD::construction_count);
+ EXPECT_EQUAL(i, NonPOD::destruction_count);
+ EXPECT_FALSE(m.empty());
+ EXPECT_EQUAL(1u, m.size());
+ EXPECT_EQUAL(1048576u, m.capacity());
+ m.clear();
+ EXPECT_EQUAL(i+1, NonPOD::construction_count);
+ EXPECT_EQUAL(i+1, NonPOD::destruction_count);
+ EXPECT_TRUE(m.empty());
+ EXPECT_EQUAL(1048576u, m.capacity());
+ }
+ EXPECT_EQUAL(NUM_ITER, NonPOD::construction_count);
+ EXPECT_EQUAL(NUM_ITER, NonPOD::destruction_count);
+}
+
} // namespace
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.h b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
index 13c242e85a7..5a50dba6a3c 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
@@ -27,11 +27,11 @@ public:
{
public:
EntryRef _ref;
- size_t _len; // Aligned length
+ size_t _len; // Aligned length
ElemHold1ListElem(EntryRef ref, size_t len)
- : _ref(ref),
- _len(len)
+ : _ref(ref),
+ _len(len)
{ }
};
diff --git a/vespalib/src/vespa/vespalib/datastore/entryref.h b/vespalib/src/vespa/vespalib/datastore/entryref.h
index 046d9089580..01f473fcf17 100644
--- a/vespalib/src/vespa/vespalib/datastore/entryref.h
+++ b/vespalib/src/vespa/vespalib/datastore/entryref.h
@@ -21,6 +21,7 @@ public:
bool operator==(const EntryRef &rhs) const noexcept { return _ref == rhs._ref; }
bool operator!=(const EntryRef &rhs) const noexcept { return _ref != rhs._ref; }
bool operator <(const EntryRef &rhs) const noexcept { return _ref < rhs._ref; }
+ bool operator <=(const EntryRef &rhs) const noexcept { return _ref <= rhs._ref; }
};
/**
diff --git a/vespalib/src/vespa/vespalib/net/tls/crypto_codec_adapter.h b/vespalib/src/vespa/vespalib/net/tls/crypto_codec_adapter.h
index 372a2191a88..11c87d3b7e9 100644
--- a/vespalib/src/vespa/vespalib/net/tls/crypto_codec_adapter.h
+++ b/vespalib/src/vespa/vespalib/net/tls/crypto_codec_adapter.h
@@ -33,7 +33,7 @@ private:
ssize_t flush_all(); // -1/0 -> error/ok
public:
CryptoCodecAdapter(SocketHandle socket, std::unique_ptr<CryptoCodec> codec)
- : _input(64_Ki), _output(64_Ki), _socket(std::move(socket)), _codec(std::move(codec)),
+ : _input(0), _output(0), _socket(std::move(socket)), _codec(std::move(codec)),
_got_tls_close(false), _encoded_tls_close(false) {}
void inject_read_data(const char *buf, size_t len) override;
int get_fd() const override { return _socket.get(); }
diff --git a/vespalib/src/vespa/vespalib/stllike/hashtable.h b/vespalib/src/vespa/vespalib/stllike/hashtable.h
index b94672aaa06..ede18f89dc2 100644
--- a/vespalib/src/vespa/vespalib/stllike/hashtable.h
+++ b/vespalib/src/vespa/vespalib/stllike/hashtable.h
@@ -102,7 +102,9 @@ class hash_node {
public:
using next_t=hashtable_base::next_t;
enum {npos=-1u, invalid=-2u};
- hash_node() : _node(), _next(invalid) {}
+ hash_node()
+ : _next(invalid)
+ {}
hash_node(const V & node, next_t next=npos)
: _next(next)
{
diff --git a/vespalib/src/vespa/vespalib/stllike/hashtable.hpp b/vespalib/src/vespa/vespalib/stllike/hashtable.hpp
index d80113a8f55..494dc223f5b 100644
--- a/vespalib/src/vespa/vespalib/stllike/hashtable.hpp
+++ b/vespalib/src/vespa/vespalib/stllike/hashtable.hpp
@@ -146,6 +146,8 @@ hashtable<Key, Value, Hash, Equal, KeyExtract, Modulator>::erase(const Key & key
template< typename Key, typename Value, typename Hash, typename Equal, typename KeyExtract, typename Modulator >
void
hashtable<Key, Value, Hash, Equal, KeyExtract, Modulator>::clear() {
+ if (_count == 0) return; // Already empty and properly initialized
+
_nodes.clear();
_count = 0;
_nodes.resize(getTableSize());
diff --git a/vespalib/src/vespa/vespalib/util/alloc.h b/vespalib/src/vespa/vespalib/util/alloc.h
index f608a244035..a97e8c9f25e 100644
--- a/vespalib/src/vespa/vespalib/util/alloc.h
+++ b/vespalib/src/vespa/vespalib/util/alloc.h
@@ -102,13 +102,21 @@ private:
namespace vespalib {
/// Rounds up to the closest number that is a power of 2
-inline size_t roundUp2inN(size_t minimum) {
+inline size_t
+roundUp2inN(size_t minimum) {
return 2ul << Optimized::msbIdx(minimum - 1);
}
/// Rounds minElems up to the closest number where minElems*elemSize is a power of 2
-inline size_t roundUp2inN(size_t minElems, size_t elemSize) {
+inline size_t
+roundUp2inN(size_t minElems, size_t elemSize) {
return roundUp2inN(minElems * elemSize)/elemSize;
}
+template <typename T>
+size_t
+roundUp2inN(size_t elems) {
+ return roundUp2inN(elems, sizeof(T));
+}
+
}
diff --git a/vespalib/src/vespa/vespalib/util/exception.h b/vespalib/src/vespa/vespalib/util/exception.h
index 097ecc131c7..6fb53c035eb 100644
--- a/vespalib/src/vespa/vespalib/util/exception.h
+++ b/vespalib/src/vespa/vespalib/util/exception.h
@@ -216,6 +216,9 @@ public:
/** @brief Returns the msg parameter that this Exception was constructed with */
const string &getMessage() const { return _msg; }
+ /** @brief Returns the message string */
+ const char *message() const { return _msg.c_str(); }
+
/** @brief Returns the location parameter that this Exception was constructed with */
const string &getLocation() const { return _location; }
diff --git a/vespalib/src/vespa/vespalib/util/optimized.h b/vespalib/src/vespa/vespalib/util/optimized.h
index 92cf1f0ca24..6c6d1b12a71 100644
--- a/vespalib/src/vespa/vespalib/util/optimized.h
+++ b/vespalib/src/vespa/vespalib/util/optimized.h
@@ -22,9 +22,9 @@ public:
static int lsbIdx(unsigned int v);
static int lsbIdx(unsigned long v);
static int lsbIdx(unsigned long long v);
- static int popCount(unsigned int v) { return __builtin_popcount(v); }
- static int popCount(unsigned long v) { return __builtin_popcountl(v); }
- static int popCount(unsigned long long v) { return __builtin_popcountll(v); }
+ static constexpr int popCount(unsigned int v) { return __builtin_popcount(v); }
+ static constexpr int popCount(unsigned long v) { return __builtin_popcountl(v); }
+ static constexpr int popCount(unsigned long long v) { return __builtin_popcountll(v); }
};
/**