summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--abi-check-plugin/pom.xml9
-rw-r--r--application/pom.xml6
-rw-r--r--client/go/Makefile1
-rw-r--r--client/go/go.mod20
-rw-r--r--client/go/go.sum20
-rw-r--r--client/go/internal/admin/jvm/env.go17
-rw-r--r--client/go/internal/cli/cmd/cert.go13
-rw-r--r--client/go/internal/cli/cmd/clone.go26
-rw-r--r--client/go/internal/cli/cmd/clone_test.go42
-rw-r--r--client/go/internal/cli/cmd/config.go9
-rw-r--r--client/go/internal/cli/cmd/curl.go22
-rw-r--r--client/go/internal/cli/cmd/curl_test.go1
-rw-r--r--client/go/internal/cli/cmd/deploy.go48
-rw-r--r--client/go/internal/cli/cmd/deploy_test.go33
-rw-r--r--client/go/internal/cli/cmd/destroy.go19
-rw-r--r--client/go/internal/cli/cmd/destroy_test.go29
-rw-r--r--client/go/internal/cli/cmd/document.go5
-rw-r--r--client/go/internal/cli/cmd/document_test.go41
-rw-r--r--client/go/internal/cli/cmd/feed.go10
-rw-r--r--client/go/internal/cli/cmd/feed_test.go4
-rw-r--r--client/go/internal/cli/cmd/prod.go2
-rw-r--r--client/go/internal/cli/cmd/query.go3
-rw-r--r--client/go/internal/cli/cmd/query_test.go8
-rw-r--r--client/go/internal/cli/cmd/root.go61
-rw-r--r--client/go/internal/cli/cmd/status.go145
-rw-r--r--client/go/internal/cli/cmd/status_test.go206
-rw-r--r--client/go/internal/cli/cmd/test.go19
-rw-r--r--client/go/internal/cli/cmd/test_test.go25
-rw-r--r--client/go/internal/cli/cmd/testutil_test.go32
-rw-r--r--client/go/internal/cli/cmd/visit_test.go8
-rw-r--r--client/go/internal/cli/cmd/waiter.go96
-rw-r--r--client/go/internal/mock/http.go5
-rw-r--r--client/go/internal/vespa/application.go21
-rw-r--r--client/go/internal/vespa/deploy.go72
-rw-r--r--client/go/internal/vespa/deploy_test.go35
-rw-r--r--client/go/internal/vespa/document/dispatcher.go4
-rw-r--r--client/go/internal/vespa/log.go2
-rw-r--r--client/go/internal/vespa/system.go59
-rw-r--r--client/go/internal/vespa/target.go162
-rw-r--r--client/go/internal/vespa/target_cloud.go205
-rw-r--r--client/go/internal/vespa/target_custom.go193
-rw-r--r--client/go/internal/vespa/target_test.go339
-rw-r--r--client/js/app/package.json6
-rw-r--r--client/js/app/yarn.lock1775
-rw-r--r--client/src/main/java/ai/vespa/client/dsl/NearestNeighbor.java2
-rw-r--r--cloud-tenant-base-dependencies-enforcer/pom.xml6
-rw-r--r--clustercontroller-core/pom.xml2
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeChecker.java8
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/hostinfo/Metrics.java8
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GroupAutoTakedownTest.java2
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/ClusterEventWithDescription.java2
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventForNode.java2
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTimeIs.java2
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTypeIs.java2
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasMetricContext.java2
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasStateReasonForNode.java2
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventForBucketSpace.java3
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventWithDescription.java2
-rw-r--r--clustercontroller-reindexer/pom.xml6
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java2
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java18
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java4
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java4
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java8
-rw-r--r--config-model-api/src/test/java/com/yahoo/config/application/api/ValidationOverrideTest.java20
-rw-r--r--config-model-fat/pom.xml3
-rw-r--r--config-model/pom.xml9
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/ConfigModelContext.java10
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java14
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java16
-rw-r--r--config-model/src/main/java/com/yahoo/schema/RankProfile.java9
-rw-r--r--config-model/src/main/java/com/yahoo/schema/Schema.java10
-rw-r--r--config-model/src/main/java/com/yahoo/schema/derived/AttributeFields.java4
-rw-r--r--config-model/src/main/java/com/yahoo/schema/derived/ImportedFields.java4
-rw-r--r--config-model/src/main/java/com/yahoo/schema/derived/RawRankProfile.java5
-rw-r--r--config-model/src/main/java/com/yahoo/schema/document/ComplexAttributeFieldUtils.java25
-rw-r--r--config-model/src/main/java/com/yahoo/schema/document/ImmutableSDField.java1
-rw-r--r--config-model/src/main/java/com/yahoo/schema/parser/ConvertParsedRanking.java2
-rw-r--r--config-model/src/main/java/com/yahoo/schema/parser/IntermediateCollection.java2
-rw-r--r--config-model/src/main/java/com/yahoo/schema/parser/ParsedRankProfile.java7
-rw-r--r--config-model/src/main/java/com/yahoo/schema/processing/ImportedFieldsResolver.java4
-rw-r--r--config-model/src/main/java/com/yahoo/schema/processing/IndexingInputs.java10
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/Logserver.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java14
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/MetricsConsumer.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/PredefinedMetricSets.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/ComplexFieldsWithStructFieldAttributesValidator.java18
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidator.java30
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/UriBindingsValidator.java22
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomHandlerBuilder.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java37
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/Container.java3
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/component/BindingPattern.java12
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/component/UserBindingPattern.java27
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java17
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/ClusterControllerConfig.java25
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/StorageNode.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/IntegrityCheckerProducer.java49
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java17
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java4
-rw-r--r--config-model/src/main/javacc/SchemaParser.jj16
-rw-r--r--config-model/src/main/resources/schema/content.rnc4
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java4
-rw-r--r--config-model/src/test/java/com/yahoo/schema/RankProfileTestCase.java59
-rw-r--r--config-model/src/test/java/com/yahoo/schema/document/ComplexAttributeFieldUtilsTestCase.java4
-rw-r--r--config-model/src/test/java/com/yahoo/schema/processing/IndexingInputsTestCase.java10
-rw-r--r--config-model/src/test/java/com/yahoo/schema/processing/RankingExpressionWithOnnxTestCase.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/ClusterInfoTest.java43
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsConsumersTest.java13
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/BundleValidatorTest.java9
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/ComplexFieldsValidatorTestCase.java12
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidatorTest.java48
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/UriBindingsValidatorTest.java36
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/http/DefaultFilterTest.java17
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/search/searchchain/FederationTest.java1
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/xml/HandlerBuilderTest.java33
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java56
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java24
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java10
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java4
-rw-r--r--config-provisioning/pom.xml6
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/Deployer.java6
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/SystemName.java2
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/zone/AuthMethod.java14
-rw-r--r--config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainer.java126
-rw-r--r--config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/UrlDownloadRpcServer.java72
-rw-r--r--config-proxy/src/test/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainerTest.java75
-rwxr-xr-xconfig/pom.xml2
-rw-r--r--configgen/README5
-rw-r--r--configgen/src/main/java/com/yahoo/config/codegen/CNode.java6
-rw-r--r--configgen/src/main/java/com/yahoo/config/codegen/ConfigGenerator.java2
-rw-r--r--configgen/src/main/java/com/yahoo/config/codegen/CppClassBuilder.java8
-rw-r--r--configgen/src/main/java/com/yahoo/config/codegen/DefLine.java17
-rw-r--r--configgen/src/main/java/com/yahoo/config/codegen/DefParser.java9
-rw-r--r--configgen/src/main/java/com/yahoo/config/codegen/InnerCNode.java4
-rw-r--r--configgen/src/main/java/com/yahoo/config/codegen/LeafCNode.java28
-rw-r--r--configserver-flags/pom.xml11
-rw-r--r--configserver-flags/src/test/java/com/yahoo/vespa/configserver/flags/http/FlagsHandlerTest.java101
-rw-r--r--configserver/pom.xml7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java54
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ConfigActivationListener.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelManager.java6
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelRequestHandler.java8
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabase.java82
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationData.java81
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationMapper.java22
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationReindexing.java15
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationVersions.java (renamed from configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationSet.java)18
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigInstanceBuilder.java3
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigNotConvergedException.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/FileDistributionStatus.java13
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/TenantApplications.java68
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java6
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java288
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployer.java302
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDirectory.java45
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java84
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java8
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java12
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java15
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java67
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/RemoteSession.java18
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/Session.java56
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionData.java108
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java52
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java136
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionSerializer.java72
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java49
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/DataplaneTokenSerializer.java7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/OperatorCertificateSerializer.java15
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantSecretStoreSerializer.java7
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java43
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelRequestHandlerTest.java6
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabaseTest.java103
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationMapperTest.java8
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationReindexingTest.java5
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationVersionsTest.java (renamed from configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationSetTest.java)24
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/application/TenantApplicationsTest.java66
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClientTest.java174
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployerTest.java159
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileDirectoryTest.java18
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java51
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/HostHandlerTest.java2
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ListApplicationsHandlerTest.java19
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/rpc/RpcServerTest.java4
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/rpc/RpcTester.java78
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java2
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionRepositoryTest.java12
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClientTest.java32
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/tenant/TenantRepositoryTest.java19
-rw-r--r--container-core/pom.xml12
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/state/HostLifeGatherer.java20
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/state/MetricGatherer.java24
-rw-r--r--container-core/src/main/java/com/yahoo/container/jdisc/state/MetricsPacketsHandler.java13
-rw-r--r--container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectionThrottler.java22
-rw-r--r--container-core/src/test/java/com/yahoo/container/jdisc/ThreadedHttpRequestHandlerTest.java11
-rw-r--r--container-core/src/test/java/com/yahoo/container/jdisc/state/HostLifeGathererTest.java19
-rw-r--r--container-core/src/test/java/com/yahoo/container/jdisc/state/MetricsPacketsHandlerTest.java14
-rw-r--r--container-core/src/test/java/com/yahoo/container/logging/CircularArrayAccessLogKeeperTest.java2
-rw-r--r--container-core/src/test/java/com/yahoo/container/logging/LogFileHandlerTestCase.java40
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectionThrottlerTest.java2
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/FilterTestCase.java4
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java7
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java59
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ProxyProtocolTest.java13
-rw-r--r--container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/SslHandshakeMetricsTest.java15
-rw-r--r--container-dependencies-enforcer/pom.xml6
-rw-r--r--container-disc/pom.xml21
-rw-r--r--container-disc/src/main/java/com/yahoo/container/jdisc/metric/JrtMetrics.java2
-rw-r--r--container-search/abi-spec.json3
-rw-r--r--container-search/pom.xml11
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/ReconfigurableDispatcher.java27
-rw-r--r--container-search/src/main/java/com/yahoo/search/handler/Json2SingleLevelMap.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/logging/AbstractSpoolingLogger.java12
-rw-r--r--container-search/src/main/java/com/yahoo/search/logging/AbstractThreadedLogger.java11
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/profile/QueryProfileProperties.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/properties/QueryProperties.java3
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/properties/RankProfileInputProperties.java5
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/ranking/Matching.java16
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/ranking/RankFeatures.java4
-rw-r--r--container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java5
-rw-r--r--container-search/src/test/java/com/yahoo/search/grouping/vespa/RequestBuilderTestCase.java30
-rw-r--r--container-search/src/test/java/com/yahoo/search/handler/JSONSearchHandlerTestCase.java135
-rw-r--r--container-search/src/test/java/com/yahoo/search/logging/LocalDiskLoggerTest.java12
-rw-r--r--container-search/src/test/java/com/yahoo/search/query/MatchingTestCase.java6
-rw-r--r--container-search/src/test/java/com/yahoo/search/query/SortingTestCase.java4
-rw-r--r--controller-api/pom.xml5
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/DeploymentData.java20
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/DeploymentEndpoints.java25
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java74
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java17
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java8
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/AccountId.java12
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/DeploymentIssues.java2
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/Issue.java33
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/IssueHandler.java8
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/IssueInfo.java11
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/MockIssueHandler.java21
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/OwnershipIssues.java5
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/DummyOwnershipIssues.java5
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/LoggingDeploymentIssues.java3
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagValidationException.java11
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java50
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java348
-rw-r--r--controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTargetTest.java41
-rw-r--r--controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java234
-rw-r--r--controller-server/pom.xml6
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java19
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java103
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java50
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/OsController.java8
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java288
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java9
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointList.java10
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/GeneratedEndpoint.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/SystemApplication.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java22
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/BasicServicesXml.java92
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackage.java13
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java61
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Run.java39
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java12
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainer.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java7
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java10
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgrader.java9
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java14
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java10
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java8
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java8
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java19
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiHandler.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployer.java12
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsHandler.java19
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/GeneratedEndpoints.java32
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/PreparedEndpoints.java121
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java58
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java41
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java51
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java22
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/DeploymentQuotaCalculatorTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/BasicServicesXmlTest.java54
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackageTest.java10
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java29
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmerTest.java13
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainerTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainerTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsVersionStatusUpdaterTest.java8
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgraderTest.java19
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java42
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java11
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java10
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java21
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java12
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/instance1-recursive.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-enclave.json30
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/recursive-root.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/tenant1-recursive.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/dataplanetoken/DataplaneTokenServiceTest.java9
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiTest.java3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployResultTest.java25
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployerTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java68
-rw-r--r--controller-server/src/test/resources/test_runner_services.xml-cd2
-rw-r--r--controller-server/src/test/resources/test_runner_services_with_legacy_tests.xml-cd2
-rw-r--r--dependency-versions/pom.xml105
-rw-r--r--dist/vespa.spec12
-rw-r--r--document/abi-spec.json6
-rw-r--r--document/src/main/java/com/yahoo/document/json/readers/TensorModifyUpdateReader.java18
-rw-r--r--document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializerHead.java19
-rw-r--r--document/src/main/java/com/yahoo/document/serialization/VespaDocumentSerializerHead.java14
-rw-r--r--document/src/main/java/com/yahoo/document/update/TensorModifyUpdate.java86
-rw-r--r--document/src/test/java/com/yahoo/document/DocumentTestCase.java2
-rw-r--r--document/src/test/java/com/yahoo/document/DocumentUpdateTestCase.java5
-rw-r--r--document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java31
-rw-r--r--document/src/test/java/com/yahoo/document/update/TensorModifyUpdateTest.java36
-rw-r--r--document/src/tests/data/serialize-tensor-update-cpp.datbin231 -> 348 bytes
-rw-r--r--document/src/tests/data/serialize-tensor-update-java.datbin231 -> 348 bytes
-rw-r--r--document/src/tests/documentupdatetestcase.cpp37
-rw-r--r--document/src/tests/serialization/vespadocumentserializer_test.cpp2
-rw-r--r--document/src/tests/tensor_fieldvalue/partial_modify/partial_modify_test.cpp63
-rw-r--r--document/src/vespa/document/fieldvalue/tensorfieldvalue.h1
-rw-r--r--document/src/vespa/document/serialization/vespadocumentserializer.cpp20
-rw-r--r--document/src/vespa/document/update/tensor_modify_update.cpp102
-rw-r--r--document/src/vespa/document/update/tensor_modify_update.h5
-rw-r--r--document/src/vespa/document/update/tensor_partial_update.cpp118
-rw-r--r--document/src/vespa/document/update/tensor_partial_update.h9
-rw-r--r--eval/CMakeLists.txt3
-rw-r--r--eval/src/tests/eval/fast_value/fast_value_test.cpp75
-rw-r--r--eval/src/tests/eval/nested_loop/nested_loop_test.cpp80
-rw-r--r--eval/src/tests/eval/value_type/value_type_test.cpp14
-rw-r--r--eval/src/tests/instruction/dense_join_reduce_plan/CMakeLists.txt9
-rw-r--r--eval/src/tests/instruction/dense_join_reduce_plan/dense_join_reduce_plan_test.cpp101
-rw-r--r--eval/src/tests/instruction/sparse_join_reduce_plan/CMakeLists.txt9
-rw-r--r--eval/src/tests/instruction/sparse_join_reduce_plan/sparse_join_reduce_plan_test.cpp205
-rw-r--r--eval/src/tests/instruction/universal_dot_product/CMakeLists.txt9
-rw-r--r--eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp89
-rw-r--r--eval/src/vespa/eval/eval/fast_value.hpp7
-rw-r--r--eval/src/vespa/eval/eval/inline_operation.h28
-rw-r--r--eval/src/vespa/eval/eval/nested_loop.h37
-rw-r--r--eval/src/vespa/eval/eval/optimize_tensor_function.cpp2
-rw-r--r--eval/src/vespa/eval/eval/value_codec.cpp4
-rw-r--r--eval/src/vespa/eval/eval/value_type.cpp31
-rw-r--r--eval/src/vespa/eval/eval/value_type.h4
-rw-r--r--eval/src/vespa/eval/instruction/CMakeLists.txt3
-rw-r--r--eval/src/vespa/eval/instruction/best_similarity_function.cpp5
-rw-r--r--eval/src/vespa/eval/instruction/dense_dot_product_function.cpp40
-rw-r--r--eval/src/vespa/eval/instruction/dense_join_reduce_plan.cpp95
-rw-r--r--eval/src/vespa/eval/instruction/dense_join_reduce_plan.h27
-rw-r--r--eval/src/vespa/eval/instruction/mixed_112_dot_product.cpp15
-rw-r--r--eval/src/vespa/eval/instruction/mixed_inner_product_function.cpp31
-rw-r--r--eval/src/vespa/eval/instruction/sparse_join_reduce_plan.cpp186
-rw-r--r--eval/src/vespa/eval/instruction/sparse_join_reduce_plan.h38
-rw-r--r--eval/src/vespa/eval/instruction/sum_max_dot_product_function.cpp6
-rw-r--r--eval/src/vespa/eval/instruction/universal_dot_product.cpp119
-rw-r--r--eval/src/vespa/eval/instruction/universal_dot_product.h22
-rw-r--r--eval/src/vespa/eval/onnx/onnx_wrapper.cpp2
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/EmptyFileReferenceData.java55
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java2
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceData.java2
-rw-r--r--flags/pom.xml8
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/FetchVector.java6
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java79
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java42
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/json/DimensionHelper.java1
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java4
-rw-r--r--fnet/src/tests/sync_execute/sync_execute.cpp8
-rw-r--r--fnet/src/vespa/fnet/transport_thread.cpp37
-rw-r--r--fnet/src/vespa/fnet/transport_thread.h5
-rw-r--r--hosted-api/src/main/java/ai/vespa/hosted/api/ControllerHttpClient.java24
-rw-r--r--hosted-tenant-base/pom.xml15
-rw-r--r--indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpression.java51
-rw-r--r--indexinglanguage/src/main/javacc/IndexingParser.jj8
-rw-r--r--indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpressionTestCase.java51
-rw-r--r--integration/intellij/build.gradle.kts2
-rw-r--r--jdisc_core/pom.xml6
-rw-r--r--jdisc_core/src/test/resources/exportPackages.properties2
-rw-r--r--maven-plugins/allowed-maven-dependencies.txt91
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/SourceSession.java14
-rw-r--r--messagebus/src/vespa/messagebus/network/rpctarget.cpp9
-rw-r--r--messagebus/src/vespa/messagebus/network/rpctarget.h8
-rw-r--r--messagebus/src/vespa/messagebus/network/rpctargetpool.cpp2
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/yamas/YamasHandler.java10
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/yamas/YamasResponse.java9
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/json/YamasJsonUtil.java10
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/prometheus/PrometheusUtil.java5
-rw-r--r--metrics-proxy/src/main/java/ai/vespa/metricsproxy/node/NodeMetricGatherer.java6
-rw-r--r--metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/yamas/YamasHandlerTest.java7
-rw-r--r--metrics-proxy/src/test/java/ai/vespa/metricsproxy/metric/model/json/YamasJsonModelTest.java20
-rw-r--r--metrics/pom.xml21
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/ContainerMetrics.java13
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/ControllerMetrics.java1
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/HostedNodeAdminMetrics.java4
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/VespaMetrics.java2
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/BasicMetricSets.java23
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/DefaultMetrics.java230
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/DefaultVespaMetrics.java13
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/InfrastructureMetricSet.java114
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/MetricSet.java44
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/SystemMetrics.java4
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/Vespa9DefaultMetricSet.java175
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java640
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java92
-rw-r--r--metrics/src/test/java/ai/vespa/metrics/MetricSetTest.java (renamed from config-model/src/test/java/com/yahoo/vespa/model/admin/monitoring/MetricSetTest.java)53
-rw-r--r--metrics/src/test/java/ai/vespa/metrics/MetricTest.java (renamed from config-model/src/test/java/com/yahoo/vespa/model/admin/monitoring/MetricTest.java)2
-rw-r--r--metrics/src/tests/metricmanagertest.cpp14
-rw-r--r--metrics/src/vespa/metrics/countmetric.h2
-rw-r--r--metrics/src/vespa/metrics/jsonwriter.h5
-rw-r--r--metrics/src/vespa/metrics/metricmanager.cpp14
-rw-r--r--metrics/src/vespa/metrics/metricmanager.h6
-rw-r--r--metrics/src/vespa/metrics/metricsnapshot.cpp11
-rw-r--r--metrics/src/vespa/metrics/metricsnapshot.h17
-rw-r--r--metrics/src/vespa/metrics/state_api_adapter.cpp4
-rw-r--r--metrics/src/vespa/metrics/textwriter.h2
-rw-r--r--model-integration/src/main/java/ai/vespa/rankingexpression/importer/configmodelview/ImportedMlModels.java25
-rw-r--r--node-admin/pom.xml6
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/Cgroup.java5
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/IoController.java111
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/MemoryController.java50
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/Size.java5
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java6
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerStats.java8
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerStatsCollector.java6
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloader.java15
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/metrics/Metrics.java11
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java37
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfo.java3
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java9
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java62
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/UserScope.java6
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileAttributes.java38
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixUser.java1
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/cgroup/CgroupTest.java84
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/cgroup/IoControllerTest.java19
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/ContainerStatsCollectorTest.java4
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloaderTest.java9
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java16
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfoTest.java8
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java14
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileAttributesCacheTest.java10
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileAttributesTest.java20
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPathTest.java5
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/fs/ContainerFileSystemTest.java4
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/fs/ContainerUserPrincipalLookupServiceTest.java10
-rw-r--r--node-repository/pom.xml2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java (renamed from node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java)116
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java46
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java42
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaling.java20
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java44
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ResourceChange.java94
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java6
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DiskReplacer.java46
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java34
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/NodeAcl.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java15
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ContainerImages.java29
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java113
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java1
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java26
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java12
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java42
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java24
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java75
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java47
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java28
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java33
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTester.java3
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainerTest.java14
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java34
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java70
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java8
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainerTest.java29
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ContainerImagesTest.java9
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java3
-rw-r--r--orchestrator/pom.xml2
-rw-r--r--parent/pom.xml82
-rw-r--r--persistence/src/vespa/persistence/spi/clusterstate.cpp2
-rwxr-xr-xprovided-dependencies/pom.xml2
-rw-r--r--renovate.json2
-rw-r--r--searchcore/src/tests/grouping/CMakeLists.txt2
-rw-r--r--searchcore/src/tests/grouping/grouping_test.cpp (renamed from searchcore/src/tests/grouping/grouping.cpp)0
-rw-r--r--searchcore/src/tests/proton/common/attribute_updater/attribute_updater_test.cpp9
-rw-r--r--searchcore/src/tests/proton/common/cachedselect_test.cpp2
-rw-r--r--searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp6
-rw-r--r--searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp2
-rw-r--r--searchcore/src/tests/proton/feed_and_search/CMakeLists.txt2
-rw-r--r--searchcore/src/tests/proton/feed_and_search/feed_and_search_test.cpp (renamed from searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp)0
-rw-r--r--searchcore/src/tests/proton/feedtoken/CMakeLists.txt2
-rw-r--r--searchcore/src/tests/proton/feedtoken/feedtoken_test.cpp (renamed from searchcore/src/tests/proton/feedtoken/feedtoken.cpp)0
-rw-r--r--searchcore/src/tests/proton/flushengine/flushengine_test.cpp2
-rw-r--r--searchcore/src/tests/proton/index/diskindexcleaner_test.cpp10
-rw-r--r--searchcore/src/tests/proton/index/fusionrunner_test.cpp25
-rw-r--r--searchcore/src/tests/proton/index/indexmanager_test.cpp24
-rw-r--r--searchcore/src/tests/proton/matchengine/CMakeLists.txt2
-rw-r--r--searchcore/src/tests/proton/matchengine/matchengine_test.cpp (renamed from searchcore/src/tests/proton/matchengine/matchengine.cpp)0
-rw-r--r--searchcore/src/tests/proton/matching/matching_test.cpp21
-rw-r--r--searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp6
-rw-r--r--searchcore/src/tests/proton/statusreport/CMakeLists.txt2
-rw-r--r--searchcore/src/tests/proton/statusreport/statusreport_test.cpp (renamed from searchcore/src/tests/proton/statusreport/statusreport.cpp)0
-rw-r--r--searchcore/src/tests/proton/summaryengine/CMakeLists.txt2
-rw-r--r--searchcore/src/tests/proton/summaryengine/summaryengine_test.cpp (renamed from searchcore/src/tests/proton/summaryengine/summaryengine.cpp)0
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp5
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/attributedisklayout.cpp11
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/attribute_updater.cpp3
-rw-r--r--searchcore/src/vespa/searchcore/proton/feedoperation/lidvectorcontext.cpp6
-rw-r--r--searchcore/src/vespa/searchcore/proton/feedoperation/removedocumentsoperation.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp16
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/match_tools.h10
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/matcher.cpp12
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/matching_stats.cpp4
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/matching_stats.h73
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/viewresolver.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/metrics/metrics_engine.cpp11
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp10
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/matchers.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/userdocuments.h2
-rw-r--r--searchcore/src/vespa/searchcorespi/index/diskindexcleaner.cpp10
-rw-r--r--searchcore/src/vespa/searchcorespi/index/indexreadutilities.cpp30
-rw-r--r--searchlib/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/aggregator/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/aggregator/perdocexpr_test.cpp (renamed from searchlib/src/tests/aggregator/perdocexpr.cpp)0
-rw-r--r--searchlib/src/tests/alignment/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/alignment/alignment_test.cpp (renamed from searchlib/src/tests/alignment/alignment.cpp)0
-rw-r--r--searchlib/src/tests/attribute/attribute_test.cpp29
-rw-r--r--searchlib/src/tests/attribute/extendattributes/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/attribute/extendattributes/extendattribute_test.cpp (renamed from searchlib/src/tests/attribute/extendattributes/extendattribute.cpp)0
-rw-r--r--searchlib/src/tests/attribute/guard/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/attribute/guard/attributeguard_test.cpp (renamed from searchlib/src/tests/attribute/guard/attributeguard.cpp)0
-rw-r--r--searchlib/src/tests/attribute/postinglist/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/attribute/postinglist/postinglist_test.cpp (renamed from searchlib/src/tests/attribute/postinglist/postinglist.cpp)0
-rw-r--r--searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp22
-rw-r--r--searchlib/src/tests/common/summaryfeatures/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/common/summaryfeatures/summaryfeatures_test.cpp (renamed from searchlib/src/tests/common/summaryfeatures/summaryfeatures.cpp)0
-rw-r--r--searchlib/src/tests/diskindex/pagedict4/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/diskindex/pagedict4/pagedict4_test.cpp (renamed from searchlib/src/tests/diskindex/pagedict4/pagedict4test.cpp)0
-rw-r--r--searchlib/src/tests/features/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/features/beta/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/features/beta/beta_features_test.cpp (renamed from searchlib/src/tests/features/beta/beta_features.cpp)0
-rw-r--r--searchlib/src/tests/features/prod_features_attributematch.cpp2
-rw-r--r--searchlib/src/tests/features/prod_features_fieldmatch.cpp2
-rw-r--r--searchlib/src/tests/features/prod_features_fieldtermmatch.cpp2
-rw-r--r--searchlib/src/tests/features/prod_features_framework.cpp2
-rw-r--r--searchlib/src/tests/features/prod_features_test.cpp (renamed from searchlib/src/tests/features/prod_features.cpp)2
-rw-r--r--searchlib/src/tests/features/prod_features_test.h (renamed from searchlib/src/tests/features/prod_features.h)0
-rw-r--r--searchlib/src/tests/fef/featureoverride/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/fef/featureoverride/featureoverride_test.cpp (renamed from searchlib/src/tests/fef/featureoverride/featureoverride.cpp)0
-rw-r--r--searchlib/src/tests/forcelink/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/forcelink/forcelink_test.cpp (renamed from searchlib/src/tests/forcelink/forcelink.cpp)0
-rw-r--r--searchlib/src/tests/ld-library-path/CMakeLists.txt7
-rw-r--r--searchlib/src/tests/ld_library_path/.gitignore (renamed from searchlib/src/tests/ld-library-path/.gitignore)0
-rw-r--r--searchlib/src/tests/ld_library_path/CMakeLists.txt7
-rw-r--r--searchlib/src/tests/ld_library_path/ld_library_path_test.cpp (renamed from searchlib/src/tests/ld-library-path/ld-library-path.cpp)0
-rw-r--r--searchlib/src/tests/nativerank/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/nativerank/nativerank_test.cpp (renamed from searchlib/src/tests/nativerank/nativerank.cpp)0
-rw-r--r--searchlib/src/tests/queryeval/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/queryeval/nearest_neighbor/nearest_neighbor_test.cpp10
-rw-r--r--searchlib/src/tests/queryeval/queryeval_test.cpp (renamed from searchlib/src/tests/queryeval/queryeval.cpp)0
-rw-r--r--searchlib/src/tests/queryeval/sourceblender/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/queryeval/sourceblender/sourceblender_test.cpp (renamed from searchlib/src/tests/queryeval/sourceblender/sourceblender.cpp)0
-rw-r--r--searchlib/src/tests/ranksetup/ranksetup_test.cpp7
-rw-r--r--searchlib/src/tests/sortresults/CMakeLists.txt6
-rw-r--r--searchlib/src/tests/sortresults/sortresults_test.cpp (renamed from searchlib/src/tests/sortresults/sorttest.cpp)0
-rw-r--r--searchlib/src/tests/sortspec/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/sortspec/multilevelsort_test.cpp (renamed from searchlib/src/tests/sortspec/multilevelsort.cpp)0
-rw-r--r--searchlib/src/tests/transactionlog/translogclient_test.cpp13
-rw-r--r--searchlib/src/tests/true/CMakeLists.txt2
-rw-r--r--searchlib/src/tests/true/true_test.cpp (renamed from searchlib/src/tests/true/true.cpp)0
-rw-r--r--searchlib/src/tests/url/CMakeLists.txt8
-rwxr-xr-xsearchlib/src/tests/url/dotest.sh2
-rw-r--r--searchlib/src/tests/url/url_test.cpp (renamed from searchlib/src/tests/url/testurl.cpp)0
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attribute_blueprint_params.h10
-rw-r--r--searchlib/src/vespa/searchlib/docstore/logdatastore.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/expression/resultnodes.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/features/tensor_factory_blueprint.cpp3
-rw-r--r--searchlib/src/vespa/searchlib/features/tensor_factory_blueprint.h2
-rw-r--r--searchlib/src/vespa/searchlib/features/tensor_from_attribute_executor.h4
-rw-r--r--searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp39
-rw-r--r--searchlib/src/vespa/searchlib/features/tensor_from_weighted_set_feature.cpp36
-rw-r--r--searchlib/src/vespa/searchlib/fef/indexproperties.cpp16
-rw-r--r--searchlib/src/vespa/searchlib/fef/indexproperties.h15
-rw-r--r--searchlib/src/vespa/searchlib/fef/ranksetup.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/fef/ranksetup.h19
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp9
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.h2
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.cpp21
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.h10
-rw-r--r--searchlib/src/vespa/searchlib/tensor/distance_calculator.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/tensor/distance_calculator.h6
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/domain.cpp36
-rw-r--r--searchlib/src/vespa/searchlib/util/fileutil.cpp1
-rw-r--r--searchsummary/src/vespa/juniper/sumdesc.cpp7
-rw-r--r--security-utils/src/main/java/com/yahoo/security/SharedKeyGenerator.java2
-rw-r--r--storage/specs/bucketinfo/bucketinfo.tla16
-rw-r--r--storage/src/tests/bucketdb/bucketmanagertest.cpp222
-rw-r--r--storage/src/tests/common/testhelper.cpp4
-rw-r--r--storage/src/tests/distributor/btree_bucket_database_test.cpp6
-rw-r--r--storage/src/tests/distributor/bucketdatabasetest.cpp16
-rw-r--r--storage/src/tests/distributor/bucketdatabasetest.h14
-rw-r--r--storage/src/tests/distributor/bucketdbmetricupdatertest.cpp31
-rw-r--r--storage/src/tests/distributor/bucketstateoperationtest.cpp1
-rw-r--r--storage/src/tests/distributor/check_condition_test.cpp17
-rw-r--r--storage/src/tests/distributor/distributor_bucket_space_test.cpp10
-rw-r--r--storage/src/tests/distributor/distributor_host_info_reporter_test.cpp4
-rw-r--r--storage/src/tests/distributor/distributor_stripe_test_util.cpp103
-rw-r--r--storage/src/tests/distributor/distributor_stripe_test_util.h1
-rw-r--r--storage/src/tests/distributor/garbagecollectiontest.cpp63
-rw-r--r--storage/src/tests/distributor/mock_tickable_stripe.h2
-rw-r--r--storage/src/tests/distributor/multi_thread_stripe_access_guard_test.cpp2
-rw-r--r--storage/src/tests/distributor/operationtargetresolvertest.cpp26
-rw-r--r--storage/src/tests/distributor/pendingmessagetrackertest.cpp6
-rw-r--r--storage/src/tests/distributor/putoperationtest.cpp155
-rw-r--r--storage/src/tests/distributor/removeoperationtest.cpp40
-rw-r--r--storage/src/tests/distributor/simplemaintenancescannertest.cpp18
-rw-r--r--storage/src/tests/distributor/statecheckerstest.cpp55
-rw-r--r--storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp70
-rw-r--r--storage/src/tests/distributor/top_level_distributor_test.cpp4
-rw-r--r--storage/src/tests/distributor/top_level_distributor_test_util.cpp12
-rw-r--r--storage/src/tests/distributor/top_level_distributor_test_util.h1
-rw-r--r--storage/src/tests/distributor/twophaseupdateoperationtest.cpp169
-rw-r--r--storage/src/tests/distributor/updateoperationtest.cpp40
-rw-r--r--storage/src/tests/persistence/filestorage/filestormanagertest.cpp27
-rw-r--r--storage/src/tests/storageserver/statemanagertest.cpp11
-rw-r--r--storage/src/vespa/storage/bucketdb/.gitignore2
-rw-r--r--storage/src/vespa/storage/bucketdb/CMakeLists.txt2
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketcopy.h20
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketdatabase.h33
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketinfo.cpp33
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketinfo.h66
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketinfo.hpp99
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketmanager.cpp199
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketmanager.h47
-rw-r--r--storage/src/vespa/storage/bucketdb/stor-bucket-init.def35
-rw-r--r--storage/src/vespa/storage/common/distributorcomponent.h7
-rw-r--r--storage/src/vespa/storage/common/nodestateupdater.h8
-rw-r--r--storage/src/vespa/storage/common/statusmetricconsumer.cpp1
-rw-r--r--storage/src/vespa/storage/config/CMakeLists.txt6
-rw-r--r--storage/src/vespa/storage/config/stor-bucketmover.def37
-rw-r--r--storage/src/vespa/storage/config/stor-integritychecker.def38
-rw-r--r--storage/src/vespa/storage/config/stor-messageforwarder.def4
-rw-r--r--storage/src/vespa/storage/distributor/CMakeLists.txt1
-rw-r--r--storage/src/vespa/storage/distributor/activecopy.cpp156
-rw-r--r--storage/src/vespa/storage/distributor/activecopy.h43
-rw-r--r--storage/src/vespa/storage/distributor/bucketdb/bucketdbmetricupdater.cpp35
-rw-r--r--storage/src/vespa/storage/distributor/bucketdb/bucketdbmetricupdater.h28
-rw-r--r--storage/src/vespa/storage/distributor/cancelled_replicas_pruner.cpp22
-rw-r--r--storage/src/vespa/storage/distributor/cancelled_replicas_pruner.h17
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space.cpp20
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space.h2
-rw-r--r--storage/src/vespa/storage/distributor/distributor_host_info_reporter.cpp15
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.cpp54
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.h5
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_component.cpp108
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_component.h37
-rw-r--r--storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.cpp55
-rw-r--r--storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.h68
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemanager.cpp67
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemanager.h14
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemetricsset.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemetricsset.h3
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/maintenancescanner.h16
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp51
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h66
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp32
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h24
-rw-r--r--storage/src/vespa/storage/distributor/messagetracker.cpp27
-rw-r--r--storage/src/vespa/storage/distributor/messagetracker.h25
-rw-r--r--storage/src/vespa/storage/distributor/min_replica_provider.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/min_replica_provider.h10
-rw-r--r--storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h2
-rw-r--r--storage/src/vespa/storage/distributor/nodeinfo.cpp26
-rw-r--r--storage/src/vespa/storage/distributor/nodeinfo.h14
-rw-r--r--storage/src/vespa/storage/distributor/operationowner.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/operations/CMakeLists.txt1
-rw-r--r--storage/src/vespa/storage/distributor/operations/cancel_scope.cpp52
-rw-r--r--storage/src/vespa/storage/distributor/operations/cancel_scope.h62
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/check_condition.cpp13
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/check_condition.h4
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/putoperation.cpp26
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/putoperation.h2
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp8
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp21
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removeoperation.h1
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/statbucketoperation.cpp1
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp76
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h21
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp16
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/updateoperation.h1
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp27
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h11
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp5
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp15
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp6
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/operations/operation.cpp8
-rw-r--r--storage/src/vespa/storage/distributor/operations/operation.h26
-rw-r--r--storage/src/vespa/storage/distributor/operationtargetresolver.h27
-rw-r--r--storage/src/vespa/storage/distributor/operationtargetresolverimpl.cpp88
-rw-r--r--storage/src/vespa/storage/distributor/operationtargetresolverimpl.h27
-rw-r--r--storage/src/vespa/storage/distributor/outdated_nodes.h4
-rw-r--r--storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.cpp8
-rw-r--r--storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.h4
-rw-r--r--storage/src/vespa/storage/distributor/pendingmessagetracker.cpp35
-rw-r--r--storage/src/vespa/storage/distributor/pendingmessagetracker.h11
-rw-r--r--storage/src/vespa/storage/distributor/persistencemessagetracker.cpp106
-rw-r--r--storage/src/vespa/storage/distributor/persistencemessagetracker.h37
-rw-r--r--storage/src/vespa/storage/distributor/sentmessagemap.h12
-rw-r--r--storage/src/vespa/storage/distributor/statechecker.cpp14
-rw-r--r--storage/src/vespa/storage/distributor/statechecker.h56
-rw-r--r--storage/src/vespa/storage/distributor/statecheckers.cpp470
-rw-r--r--storage/src/vespa/storage/distributor/stripe_access_guard.h4
-rw-r--r--storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp3
-rw-r--r--storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h3
-rw-r--r--storage/src/vespa/storage/distributor/tickable_stripe.h3
-rw-r--r--storage/src/vespa/storage/distributor/top_level_distributor.cpp13
-rw-r--r--storage/src/vespa/storage/distributor/top_level_distributor.h3
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp7
-rw-r--r--storage/src/vespa/storage/persistence/mergehandler.cpp1
-rw-r--r--storage/src/vespa/storage/persistence/persistenceutil.h1
-rw-r--r--storage/src/vespa/storage/persistence/processallhandler.cpp1
-rw-r--r--storage/src/vespa/storage/storageserver/communicationmanager.cpp22
-rw-r--r--storage/src/vespa/storage/storageserver/distributornode.cpp2
-rw-r--r--storage/src/vespa/storage/storageserver/distributornode.h7
-rw-r--r--storage/src/vespa/storage/storageserver/rpc/slime_cluster_state_bundle_codec.cpp2
-rw-r--r--storage/src/vespa/storage/storageserver/servicelayernode.cpp19
-rw-r--r--storage/src/vespa/storage/storageserver/servicelayernode.h20
-rw-r--r--storage/src/vespa/storage/storageserver/statemanager.cpp22
-rw-r--r--storage/src/vespa/storage/storageserver/statemanager.h7
-rw-r--r--storage/src/vespa/storage/storageserver/storagenode.cpp14
-rw-r--r--storage/src/vespa/storage/storageserver/storagenode.h3
-rw-r--r--storage/src/vespa/storage/storageutil/distributorstatecache.h12
-rw-r--r--storage/src/vespa/storage/storageutil/utils.h47
-rw-r--r--storage/src/vespa/storage/tools/getidealstate.cpp15
-rw-r--r--storage/src/vespa/storageframework/generic/status/htmlstatusreporter.cpp15
-rw-r--r--storage/src/vespa/storageframework/generic/status/htmlstatusreporter.h3
-rw-r--r--storageserver/src/tests/testhelper.cpp3
-rw-r--r--storageserver/src/vespa/storageserver/app/servicelayerprocess.cpp3
-rw-r--r--streamingvisitors/src/tests/charbuffer/CMakeLists.txt2
-rw-r--r--streamingvisitors/src/tests/charbuffer/charbuffer_test.cpp (renamed from streamingvisitors/src/tests/charbuffer/charbuffer.cpp)0
-rw-r--r--streamingvisitors/src/tests/docsum/CMakeLists.txt2
-rw-r--r--streamingvisitors/src/tests/docsum/docsum_test.cpp (renamed from streamingvisitors/src/tests/docsum/docsum.cpp)0
-rw-r--r--streamingvisitors/src/tests/document/CMakeLists.txt2
-rw-r--r--streamingvisitors/src/tests/document/document_test.cpp (renamed from streamingvisitors/src/tests/document/document.cpp)0
-rw-r--r--streamingvisitors/src/tests/textutil/CMakeLists.txt2
-rw-r--r--streamingvisitors/src/tests/textutil/textutil_test.cpp (renamed from streamingvisitors/src/tests/textutil/textutil.cpp)0
-rw-r--r--tenant-cd-api/pom.xml2
-rw-r--r--testutil/pom.xml9
-rw-r--r--testutil/src/main/java/com/yahoo/test/LinePatternMatcher.java4
-rw-r--r--testutil/src/main/java/com/yahoo/vespa/test/file/UnixUidGidAttributeProvider.java90
-rw-r--r--vdslib/src/tests/distribution/CMakeLists.txt1
-rw-r--r--vdslib/src/tests/distribution/distributiontest.cpp173
-rw-r--r--vdslib/src/tests/distribution/idealnodecalculatorimpltest.cpp35
-rw-r--r--vdslib/src/tests/state/clusterstatetest.cpp20
-rw-r--r--vdslib/src/vespa/vdslib/distribution/CMakeLists.txt1
-rw-r--r--vdslib/src/vespa/vdslib/distribution/distribution.cpp99
-rw-r--r--vdslib/src/vespa/vdslib/distribution/distribution.h80
-rw-r--r--vdslib/src/vespa/vdslib/distribution/group.cpp4
-rw-r--r--vdslib/src/vespa/vdslib/distribution/group.h25
-rw-r--r--vdslib/src/vespa/vdslib/distribution/idealnodecalculator.h94
-rw-r--r--vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.cpp72
-rw-r--r--vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.h31
-rw-r--r--vdslib/src/vespa/vdslib/state/clusterstate.cpp318
-rw-r--r--vdslib/src/vespa/vdslib/state/clusterstate.h54
-rw-r--r--vdslib/src/vespa/vdslib/state/node.h13
-rw-r--r--vespa-athenz/pom.xml56
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/gcp/GcpCredentials.java180
-rw-r--r--vespa-dependencies-enforcer/allowed-maven-dependencies.txt244
-rw-r--r--vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java6
-rw-r--r--vespa-maven-plugin/pom.xml5
-rw-r--r--vespa-maven-plugin/src/main/java/ai/vespa/hosted/plugin/SubmitMojo.java2
-rw-r--r--vespajlib/pom.xml2
-rw-r--r--vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java15
-rw-r--r--vespajlib/src/test/java/com/yahoo/concurrent/maintenance/MaintainerTest.java41
-rw-r--r--vespajlib/src/test/java/com/yahoo/concurrent/maintenance/TestMaintainer.java12
-rw-r--r--vespalib/src/tests/alloc/alloc_test.cpp6
-rw-r--r--vespalib/src/tests/arrayref/arrayref_test.cpp20
-rw-r--r--vespalib/src/tests/guard/guard_test.cpp152
-rw-r--r--vespalib/src/tests/io/fileutil/fileutiltest.cpp83
-rw-r--r--vespalib/src/tests/small_vector/small_vector_test.cpp26
-rw-r--r--vespalib/src/tests/util/file_area_freelist/file_area_freelist_test.cpp29
-rw-r--r--vespalib/src/tests/util/mmap_file_allocator/mmap_file_allocator_test.cpp72
-rw-r--r--vespalib/src/vespa/fastos/unix_file.cpp4
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.cpp18
-rw-r--r--vespalib/src/vespa/vespalib/datastore/datastorebase.h2
-rw-r--r--vespalib/src/vespa/vespalib/io/fileutil.cpp302
-rw-r--r--vespalib/src/vespa/vespalib/io/fileutil.h89
-rw-r--r--vespalib/src/vespa/vespalib/io/mapped_file_input.cpp4
-rw-r--r--vespalib/src/vespa/vespalib/stllike/hash_map.cpp2
-rw-r--r--vespalib/src/vespa/vespalib/stllike/hash_map.h2
-rw-r--r--vespalib/src/vespa/vespalib/stllike/hash_set.cpp2
-rw-r--r--vespalib/src/vespa/vespalib/stllike/hash_set_insert.hpp3
-rw-r--r--vespalib/src/vespa/vespalib/stllike/string.hpp4
-rw-r--r--vespalib/src/vespa/vespalib/util/arrayref.h5
-rw-r--r--vespalib/src/vespa/vespalib/util/file_area_freelist.cpp68
-rw-r--r--vespalib/src/vespa/vespalib/util/file_area_freelist.h4
-rw-r--r--vespalib/src/vespa/vespalib/util/guard.h250
-rw-r--r--vespalib/src/vespa/vespalib/util/jsonstream.cpp2
-rw-r--r--vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp114
-rw-r--r--vespalib/src/vespa/vespalib/util/mmap_file_allocator.h21
-rw-r--r--vespalib/src/vespa/vespalib/util/small_vector.h1
-rw-r--r--vespalib/src/vespa/vespalib/util/stash.cpp11
-rw-r--r--vespalib/src/vespa/vespalib/util/stash.h14
832 files changed, 15296 insertions, 9796 deletions
diff --git a/abi-check-plugin/pom.xml b/abi-check-plugin/pom.xml
index 87350af8289..a0a1c52428d 100644
--- a/abi-check-plugin/pom.xml
+++ b/abi-check-plugin/pom.xml
@@ -45,7 +45,7 @@
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-all</artifactId>
+ <artifactId>hamcrest</artifactId>
<scope>test</scope>
</dependency>
<dependency>
@@ -53,6 +53,11 @@
<artifactId>mockito-core</artifactId>
<scope>test</scope>
</dependency>
+ <dependency>
+ <groupId>com.google.errorprone</groupId>
+ <artifactId>error_prone_annotations</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
<build>
<finalName>${project.artifactId}</finalName>
@@ -64,7 +69,7 @@
<plugin>
<groupId>org.openclover</groupId>
<artifactId>clover-maven-plugin</artifactId>
- <version>4.3.1</version>
+ <version>${clover-maven-plugin.vespa.version}</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
diff --git a/application/pom.xml b/application/pom.xml
index 6e5c6426220..bb1ea4b30ee 100644
--- a/application/pom.xml
+++ b/application/pom.xml
@@ -117,11 +117,7 @@
</exclusion>
<exclusion>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-core</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
diff --git a/client/go/Makefile b/client/go/Makefile
index 95da52c2383..e0f22836c45 100644
--- a/client/go/Makefile
+++ b/client/go/Makefile
@@ -34,6 +34,7 @@ all: test checkfmt install
#
# $ make dist-homebrew
--dist-homebrew: dist-version
+ brew update
brew bump-formula-pr --version $(VERSION) --no-audit --no-browse vespa-cli
dist-homebrew:
diff --git a/client/go/go.mod b/client/go/go.mod
index 5d1f6175e55..0d67283104f 100644
--- a/client/go/go.mod
+++ b/client/go/go.mod
@@ -3,34 +3,34 @@ module github.com/vespa-engine/vespa/client/go
go 1.19
require (
- github.com/alessio/shellescape v1.4.1
+ github.com/alessio/shellescape v1.4.2
github.com/briandowns/spinner v1.23.0
github.com/fatih/color v1.15.0
// This is the most recent version compatible with Go 1.19. Upgrade when we upgrade our Go version
github.com/go-json-experiment/json v0.0.0-20230216065249-540f01442424
- github.com/klauspost/compress v1.16.5
+ github.com/klauspost/compress v1.16.7
github.com/mattn/go-colorable v0.1.13
- github.com/mattn/go-isatty v0.0.18
+ github.com/mattn/go-isatty v0.0.19
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8
github.com/spf13/cobra v1.7.0
github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.8.2
- github.com/zalando/go-keyring v0.2.2
- golang.org/x/net v0.9.0
- golang.org/x/sys v0.7.0
+ github.com/stretchr/testify v1.8.4
+ github.com/zalando/go-keyring v0.2.3
+ golang.org/x/net v0.14.0
+ golang.org/x/sys v0.11.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
- github.com/danieljoos/wincred v1.1.2 // indirect
+ github.com/danieljoos/wincred v1.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/godbus/dbus/v5 v5.1.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/kr/pretty v0.1.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
- golang.org/x/term v0.7.0 // indirect
- golang.org/x/text v0.9.0 // indirect
+ golang.org/x/term v0.11.0 // indirect
+ golang.org/x/text v0.12.0 // indirect
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect
)
diff --git a/client/go/go.sum b/client/go/go.sum
index 03206b0c5e8..fbe0fa1207e 100644
--- a/client/go/go.sum
+++ b/client/go/go.sum
@@ -1,11 +1,15 @@
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
+github.com/alessio/shellescape v1.4.2 h1:MHPfaU+ddJ0/bYWpgIeUnQUqKrlJ1S7BfEYPM4uEoM0=
+github.com/alessio/shellescape v1.4.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A=
github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0=
github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0=
+github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE=
+github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -19,6 +23,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
+github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
+github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
@@ -29,6 +35,8 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
+github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -48,20 +56,32 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/zalando/go-keyring v0.2.2 h1:f0xmpYiSrHtSNAVgwip93Cg8tuF45HJM6rHq/A5RI/4=
github.com/zalando/go-keyring v0.2.2/go.mod h1:sI3evg9Wvpw3+n4SqplGSJUMwtDeROfD4nsFz4z9PG0=
+github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms=
+github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk=
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
+golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
+golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
+golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/client/go/internal/admin/jvm/env.go b/client/go/internal/admin/jvm/env.go
index 7b1ce97a40a..1cbdb46648f 100644
--- a/client/go/internal/admin/jvm/env.go
+++ b/client/go/internal/admin/jvm/env.go
@@ -5,10 +5,12 @@ package jvm
import (
"fmt"
+ "strings"
"github.com/vespa-engine/vespa/client/go/internal/admin/defaults"
"github.com/vespa-engine/vespa/client/go/internal/admin/envvars"
"github.com/vespa-engine/vespa/client/go/internal/admin/prog"
+ "github.com/vespa-engine/vespa/client/go/internal/admin/trace"
"github.com/vespa-engine/vespa/client/go/internal/util"
)
@@ -29,8 +31,19 @@ func (opts *Options) exportEnvSettings(ps *prog.Spec) {
ps.Setenv(envvars.LD_LIBRARY_PATH, dlp)
ps.Setenv(envvars.MALLOC_ARENA_MAX, "1")
if preload := ps.Getenv(envvars.PRELOAD); preload != "" {
- ps.Setenv(envvars.JAVAVM_LD_PRELOAD, preload)
- ps.Setenv(envvars.LD_PRELOAD, preload)
+ checked := []string{}
+ for _, fileName := range strings.Split(preload, ":") {
+ if util.PathExists(fileName) {
+ checked = append(checked, fileName)
+ } else {
+ trace.Info("File in PRELOAD missing, skipped:", fileName)
+ }
+ }
+ if len(checked) > 0 {
+ preload := strings.Join(checked, ":")
+ ps.Setenv(envvars.JAVAVM_LD_PRELOAD, preload)
+ ps.Setenv(envvars.LD_PRELOAD, preload)
+ }
}
util.OptionallyReduceTimerFrequency()
c.exportExtraEnv(ps)
diff --git a/client/go/internal/cli/cmd/cert.go b/client/go/internal/cli/cmd/cert.go
index f7320e37626..7fbb357d1db 100644
--- a/client/go/internal/cli/cmd/cert.go
+++ b/client/go/internal/cli/cmd/cert.go
@@ -156,9 +156,16 @@ func doCertAdd(cli *CLI, overwriteCertificate bool, args []string) error {
}
func maybeCopyCertificate(force, ignoreZip bool, cli *CLI, target vespa.Target, pkg vespa.ApplicationPackage) error {
- if pkg.IsZip() && !ignoreZip {
- hint := "Try running 'mvn clean', then 'vespa auth cert add' and finally 'mvn package'"
- return errHint(fmt.Errorf("cannot add certificate to compressed application package: %s", pkg.Path), hint)
+ if pkg.IsZip() {
+ if ignoreZip {
+ cli.printWarning("Cannot verify existence of "+color.CyanString("security/clients.pem")+" since "+pkg.Path+" is compressed",
+ "Deployment to Vespa Cloud requires certificate in application package",
+ "See https://cloud.vespa.ai/en/security/guide")
+ return nil
+ } else {
+ hint := "Try running 'mvn clean', then 'vespa auth cert add' and finally 'mvn package'"
+ return errHint(fmt.Errorf("cannot add certificate to compressed application package: %s", pkg.Path), hint)
+ }
}
if force {
return copyCertificate(cli, target, pkg)
diff --git a/client/go/internal/cli/cmd/clone.go b/client/go/internal/cli/cmd/clone.go
index 6fb97581ea3..a835892990b 100644
--- a/client/go/internal/cli/cmd/clone.go
+++ b/client/go/internal/cli/cmd/clone.go
@@ -6,8 +6,10 @@ package cmd
import (
"archive/zip"
+ "errors"
"fmt"
"io"
+ "io/fs"
"log"
"net/http"
"os"
@@ -76,6 +78,23 @@ type zipFile struct {
modTime time.Time
}
+func (c *cloner) createDirectory(path string) error {
+ if err := os.Mkdir(path, 0755); err != nil {
+ if errors.Is(err, fs.ErrExist) {
+ entries, err := os.ReadDir(path)
+ if err != nil {
+ return err
+ }
+ if len(entries) > 0 {
+ return fmt.Errorf("%s already exists and is not empty", path)
+ }
+ } else {
+ return err
+ }
+ }
+ return nil
+}
+
// Clone copies the application identified by applicationName into given path. If the cached copy of sample applications
// has expired (as determined by its entity tag), a current copy will be downloaded from GitHub automatically.
func (c *cloner) Clone(applicationName, path string) error {
@@ -95,9 +114,8 @@ func (c *cloner) Clone(applicationName, path string) error {
dirPrefix := "sample-apps-master/" + applicationName + "/"
if strings.HasPrefix(f.Name, dirPrefix) {
if !found { // Create destination directory lazily when source is found
- createErr := os.Mkdir(path, 0755)
- if createErr != nil {
- return fmt.Errorf("could not create directory '%s': %w", color.CyanString(path), createErr)
+ if err := c.createDirectory(path); err != nil {
+ return fmt.Errorf("could not create directory: %w", err)
}
}
found = true
@@ -111,7 +129,7 @@ func (c *cloner) Clone(applicationName, path string) error {
if !found {
return errHint(fmt.Errorf("could not find source application '%s'", color.CyanString(applicationName)), "Use -f to ignore the cache")
} else {
- log.Print("Created ", color.CyanString(path))
+ log.Print("Cloned into ", color.CyanString(path))
}
return nil
}
diff --git a/client/go/internal/cli/cmd/clone_test.go b/client/go/internal/cli/cmd/clone_test.go
index f136299db85..3d7250cc760 100644
--- a/client/go/internal/cli/cmd/clone_test.go
+++ b/client/go/internal/cli/cmd/clone_test.go
@@ -18,14 +18,16 @@ import (
)
func TestClone(t *testing.T) {
- assertCreated("text-search", "mytestapp", t)
-}
-
-func assertCreated(sampleAppName string, app string, t *testing.T) {
+ origWd, err := os.Getwd()
+ require.Nil(t, err)
+ sampleAppName := "text-search"
+ app := "mytestapp"
tempDir := t.TempDir()
app1 := filepath.Join(tempDir, "app1")
- defer os.RemoveAll(app)
-
+ t.Cleanup(func() {
+ os.Chdir(origWd)
+ os.RemoveAll(app)
+ })
httpClient := &mock.HTTPClient{}
cli, stdout, stderr := newTestCLI(t)
cli.httpClient = httpClient
@@ -35,7 +37,7 @@ func assertCreated(sampleAppName string, app string, t *testing.T) {
// Initial cloning. GitHub includes the ETag header, but we don't require it
httpClient.NextResponseBytes(200, testdata)
require.Nil(t, cli.Run("clone", sampleAppName, app1))
- assert.Equal(t, "Created "+app1+"\n", stdout.String())
+ assert.Equal(t, "Cloned into "+app1+"\n", stdout.String())
assertFiles(t, app1)
// Clone with cache hit
@@ -43,8 +45,27 @@ func assertCreated(sampleAppName string, app string, t *testing.T) {
stdout.Reset()
app2 := filepath.Join(tempDir, "app2")
require.Nil(t, cli.Run("clone", sampleAppName, app2))
- assert.Equal(t, "Using cached sample apps ...\nCreated "+app2+"\n", stdout.String())
+ assert.Equal(t, "Using cached sample apps ...\nCloned into "+app2+"\n", stdout.String())
assertFiles(t, app2)
+ stdout.Reset()
+
+ // Clone to current directory (dot)
+ emptyDir := filepath.Join(tempDir, "mypath1")
+ require.Nil(t, os.Mkdir(emptyDir, 0755))
+ require.Nil(t, os.Chdir(emptyDir))
+ httpClient.NextStatus(http.StatusNotModified)
+ require.Nil(t, cli.Run("clone", sampleAppName, "."))
+ assert.Equal(t, "Using cached sample apps ...\nCloned into .\n", stdout.String())
+ assertFiles(t, ".")
+ stdout.Reset()
+
+ // Clone to non-empty directory
+ httpClient.NextStatus(http.StatusNotModified)
+ nonEmptyDir := filepath.Join(tempDir, "mypath2")
+ require.Nil(t, os.MkdirAll(filepath.Join(nonEmptyDir, "more"), 0755))
+ require.NotNil(t, cli.Run("clone", sampleAppName, nonEmptyDir))
+ assert.Equal(t, "Error: could not create directory: "+nonEmptyDir+" already exists and is not empty\n", stderr.String())
+ stderr.Reset()
// Clone while ignoring cache
headers := make(http.Header)
@@ -53,7 +74,7 @@ func assertCreated(sampleAppName string, app string, t *testing.T) {
stdout.Reset()
app3 := filepath.Join(tempDir, "app3")
require.Nil(t, cli.Run("clone", "-f", sampleAppName, app3))
- assert.Equal(t, "Created "+app3+"\n", stdout.String())
+ assert.Equal(t, "Cloned into "+app3+"\n", stdout.String())
assertFiles(t, app3)
// Cloning falls back to cached copy if GitHub is unavailable
@@ -62,7 +83,7 @@ func assertCreated(sampleAppName string, app string, t *testing.T) {
app4 := filepath.Join(tempDir, "app4")
require.Nil(t, cli.Run("clone", "-f=false", sampleAppName, app4))
assert.Equal(t, "Warning: could not download sample apps: github returned status 500\n", stderr.String())
- assert.Equal(t, "Using cached sample apps ...\nCreated "+app4+"\n", stdout.String())
+ assert.Equal(t, "Using cached sample apps ...\nCloned into "+app4+"\n", stdout.String())
assertFiles(t, app4)
// The only cached file is the latest one
@@ -79,6 +100,7 @@ func assertCreated(sampleAppName string, app string, t *testing.T) {
}
func assertFiles(t *testing.T, app string) {
+ t.Helper()
assert.True(t, util.PathExists(filepath.Join(app, "README.md")))
assert.True(t, util.PathExists(filepath.Join(app, "src", "main", "application")))
assert.True(t, util.IsDirectory(filepath.Join(app, "src", "main", "application")))
diff --git a/client/go/internal/cli/cmd/config.go b/client/go/internal/cli/cmd/config.go
index 0a03686dd33..2ebd6b0793e 100644
--- a/client/go/internal/cli/cmd/config.go
+++ b/client/go/internal/cli/cmd/config.go
@@ -52,7 +52,7 @@ most to least preferred:
3. Global config value
4. Default value
-The following flags/options can be configured:
+The following global flags/options can be configured:
application
@@ -96,13 +96,6 @@ e.g. vespa deploy or vespa query. Possible values are:
- hosted: Connect to hosted Vespa (internal platform)
- *url*: Connect to a platform running at given URL.
-wait
-
-Specifies the number of seconds to wait for a service to become ready or
-deployment to complete. Use this to have a potentially long-running command
-block until the operation is complete, e.g. with vespa deploy. Defaults to 0
-(no waiting)
-
zone
Specifies a custom dev or perf zone to use when connecting to a Vespa platform.
diff --git a/client/go/internal/cli/cmd/curl.go b/client/go/internal/cli/cmd/curl.go
index 3009cab2b5e..44540db9ccf 100644
--- a/client/go/internal/cli/cmd/curl.go
+++ b/client/go/internal/cli/cmd/curl.go
@@ -39,7 +39,17 @@ $ vespa curl -- -v --data-urlencode "yql=select * from music where album contain
if err != nil {
return err
}
- service, err := target.Service(curlService, time.Duration(waitSecs)*time.Second, 0, cli.config.cluster())
+ var service *vespa.Service
+ useDeploy := curlService == "deploy"
+ waiter := cli.waiter(false, time.Duration(waitSecs)*time.Second)
+ if useDeploy {
+ if cli.config.cluster() != "" {
+ return fmt.Errorf("cannot specify cluster for service %s", curlService)
+ }
+ service, err = target.DeployService()
+ } else {
+ service, err = waiter.Service(target, cli.config.cluster())
+ }
if err != nil {
return err
}
@@ -49,19 +59,15 @@ $ vespa curl -- -v --data-urlencode "yql=select * from music where album contain
if err != nil {
return err
}
- switch curlService {
- case vespa.DeployService:
+ if useDeploy {
if err := addAccessToken(c, target); err != nil {
return err
}
- case vespa.DocumentService, vespa.QueryService:
+ } else {
c.CaCertificate = service.TLSOptions.CACertificateFile
c.PrivateKey = service.TLSOptions.PrivateKeyFile
c.Certificate = service.TLSOptions.CertificateFile
- default:
- return fmt.Errorf("service not found: %s", curlService)
}
-
if dryRun {
log.Print(c.String())
} else {
@@ -73,7 +79,7 @@ $ vespa curl -- -v --data-urlencode "yql=select * from music where album contain
},
}
cmd.Flags().BoolVarP(&dryRun, "dry-run", "n", false, "Print the curl command that would be executed")
- cmd.Flags().StringVarP(&curlService, "service", "s", "query", "Which service to query. Must be \"deploy\", \"document\" or \"query\"")
+ cmd.Flags().StringVarP(&curlService, "service", "s", "container", "Which service to query. Must be \"deploy\" or \"container\"")
cli.bindWaitFlag(cmd, 60, &waitSecs)
return cmd
}
diff --git a/client/go/internal/cli/cmd/curl_test.go b/client/go/internal/cli/cmd/curl_test.go
index 3eca0726bb4..520cf41e308 100644
--- a/client/go/internal/cli/cmd/curl_test.go
+++ b/client/go/internal/cli/cmd/curl_test.go
@@ -14,7 +14,6 @@ func TestCurl(t *testing.T) {
cli.Environment["VESPA_CLI_ENDPOINTS"] = "{\"endpoints\":[{\"cluster\":\"container\",\"url\":\"http://127.0.0.1:8080\"}]}"
assert.Nil(t, cli.Run("config", "set", "application", "t1.a1.i1"))
assert.Nil(t, cli.Run("config", "set", "target", "cloud"))
- assert.Nil(t, cli.Run("config", "set", "cluster", "container"))
assert.Nil(t, cli.Run("auth", "api-key"))
assert.Nil(t, cli.Run("auth", "cert", "--no-add"))
diff --git a/client/go/internal/cli/cmd/deploy.go b/client/go/internal/cli/cmd/deploy.go
index ef32d7f01b7..fbd0529101f 100644
--- a/client/go/internal/cli/cmd/deploy.go
+++ b/client/go/internal/cli/cmd/deploy.go
@@ -25,7 +25,7 @@ func newDeployCmd(cli *CLI) *cobra.Command {
copyCert bool
)
cmd := &cobra.Command{
- Use: "deploy [application-directory]",
+ Use: "deploy [application-directory-or-file]",
Short: "Deploy (prepare and activate) an application package",
Long: `Deploy (prepare and activate) an application package.
@@ -73,8 +73,12 @@ $ vespa deploy -t cloud -z perf.aws-us-east-1c`,
return err
}
}
+ waiter := cli.waiter(false, timeout)
+ if _, err := waiter.DeployService(target); err != nil {
+ return err
+ }
var result vespa.PrepareResult
- if err := cli.spinner(cli.Stderr, "Uploading application package ...", func() error {
+ if err := cli.spinner(cli.Stderr, "Uploading application package...", func() error {
result, err = vespa.Deploy(opts)
return err
}); err != nil {
@@ -84,18 +88,14 @@ $ vespa deploy -t cloud -z perf.aws-us-east-1c`,
if opts.Target.IsCloud() {
cli.printSuccess("Triggered deployment of ", color.CyanString(pkg.Path), " with run ID ", color.CyanString(strconv.FormatInt(result.ID, 10)))
} else {
- cli.printSuccess("Deployed ", color.CyanString(pkg.Path))
+ cli.printSuccess("Deployed ", color.CyanString(pkg.Path), " with session ID ", color.CyanString(strconv.FormatInt(result.ID, 10)))
printPrepareLog(cli.Stderr, result)
}
if opts.Target.IsCloud() {
- log.Printf("\nUse %s for deployment status, or follow this deployment at", color.CyanString("vespa status"))
- log.Print(color.CyanString(fmt.Sprintf("%s/tenant/%s/application/%s/%s/instance/%s/job/%s-%s/run/%d",
- opts.Target.Deployment().System.ConsoleURL,
- opts.Target.Deployment().Application.Tenant, opts.Target.Deployment().Application.Application, opts.Target.Deployment().Zone.Environment,
- opts.Target.Deployment().Application.Instance, opts.Target.Deployment().Zone.Environment, opts.Target.Deployment().Zone.Region,
- result.ID)))
- }
- return waitForQueryService(cli, target, result.ID, timeout)
+ log.Printf("\nUse %s for deployment status, or follow this deployment at", color.CyanString("vespa status deployment"))
+ log.Print(color.CyanString(opts.Target.Deployment().System.ConsoleRunURL(opts.Target.Deployment(), result.ID)))
+ }
+ return waitForDeploymentReady(cli, target, result.ID, timeout)
},
}
cmd.Flags().StringVarP(&logLevelArg, "log-level", "l", "error", `Log level for Vespa logs. Must be "error", "warning", "info" or "debug"`)
@@ -107,7 +107,7 @@ $ vespa deploy -t cloud -z perf.aws-us-east-1c`,
func newPrepareCmd(cli *CLI) *cobra.Command {
return &cobra.Command{
- Use: "prepare application-directory",
+ Use: "prepare [application-directory-or-file]",
Short: "Prepare an application package for activation",
Args: cobra.MaximumNArgs(1),
DisableAutoGenTag: true,
@@ -123,7 +123,7 @@ func newPrepareCmd(cli *CLI) *cobra.Command {
}
opts := vespa.DeploymentOptions{ApplicationPackage: pkg, Target: target}
var result vespa.PrepareResult
- err = cli.spinner(cli.Stderr, "Uploading application package ...", func() error {
+ err = cli.spinner(cli.Stderr, "Uploading application package...", func() error {
result, err = vespa.Prepare(opts)
return err
})
@@ -149,10 +149,6 @@ func newActivateCmd(cli *CLI) *cobra.Command {
DisableAutoGenTag: true,
SilenceUsage: true,
RunE: func(cmd *cobra.Command, args []string) error {
- pkg, err := cli.applicationPackageFrom(args, true)
- if err != nil {
- return fmt.Errorf("could not find application package: %w", err)
- }
sessionID, err := cli.config.readSessionID(vespa.DefaultApplication)
if err != nil {
return fmt.Errorf("could not read session id: %w", err)
@@ -162,24 +158,32 @@ func newActivateCmd(cli *CLI) *cobra.Command {
return err
}
timeout := time.Duration(waitSecs) * time.Second
- opts := vespa.DeploymentOptions{ApplicationPackage: pkg, Target: target, Timeout: timeout}
+ waiter := cli.waiter(false, timeout)
+ if _, err := waiter.DeployService(target); err != nil {
+ return err
+ }
+ opts := vespa.DeploymentOptions{Target: target, Timeout: timeout}
err = vespa.Activate(sessionID, opts)
if err != nil {
return err
}
- cli.printSuccess("Activated ", color.CyanString(pkg.Path), " with session ", sessionID)
- return waitForQueryService(cli, target, sessionID, timeout)
+ cli.printSuccess("Activated application with session ", sessionID)
+ return waitForDeploymentReady(cli, target, sessionID, timeout)
},
}
cli.bindWaitFlag(cmd, 60, &waitSecs)
return cmd
}
-func waitForQueryService(cli *CLI, target vespa.Target, sessionOrRunID int64, timeout time.Duration) error {
+func waitForDeploymentReady(cli *CLI, target vespa.Target, sessionOrRunID int64, timeout time.Duration) error {
if timeout == 0 {
return nil
}
- _, err := cli.service(target, vespa.QueryService, sessionOrRunID, cli.config.cluster(), timeout)
+ waiter := cli.waiter(false, timeout)
+ if _, err := waiter.Deployment(target, sessionOrRunID); err != nil {
+ return err
+ }
+ _, err := waiter.Services(target)
return err
}
diff --git a/client/go/internal/cli/cmd/deploy_test.go b/client/go/internal/cli/cmd/deploy_test.go
index 16aa3fd0ed8..d578b2a4629 100644
--- a/client/go/internal/cli/cmd/deploy_test.go
+++ b/client/go/internal/cli/cmd/deploy_test.go
@@ -6,6 +6,7 @@ package cmd
import (
"bytes"
+ "io"
"path/filepath"
"strconv"
"strings"
@@ -61,6 +62,32 @@ Hint: Pass --add-cert to use the certificate of the current application
assert.Contains(t, stdout.String(), "Success: Triggered deployment")
}
+func TestDeployWait(t *testing.T) {
+ cli, stdout, _ := newTestCLI(t)
+ client := &mock.HTTPClient{}
+ cli.httpClient = client
+ cli.retryInterval = 0
+ pkg := "testdata/applications/withSource/src/main/application"
+ // Deploy service is initially unavailable
+ client.NextResponseError(io.EOF)
+ client.NextStatus(500)
+ client.NextStatus(500)
+ // ... then becomes healthy
+ client.NextStatus(200)
+ // Deployment succeeds
+ client.NextResponse(mock.HTTPResponse{
+ URI: "/application/v2/tenant/default/prepareandactivate",
+ Status: 200,
+ Body: []byte(`{"session-id": "1"}`),
+ })
+ mockServiceStatus(client, "foo") // Wait for deployment
+ mockServiceStatus(client, "foo") // Look up services
+ assert.Nil(t, cli.Run("deploy", "--wait=3", pkg))
+ assert.Equal(t,
+ "\nSuccess: Deployed "+pkg+" with session ID 1\n",
+ stdout.String())
+}
+
func TestPrepareZip(t *testing.T) {
assertPrepare("testdata/applications/withTarget/target/application.zip",
[]string{"prepare", "testdata/applications/withTarget/target/application.zip"}, t)
@@ -85,7 +112,7 @@ func TestDeployZipWithURLTargetArgument(t *testing.T) {
cli.httpClient = client
assert.Nil(t, cli.Run(arguments...))
assert.Equal(t,
- "\nSuccess: Deployed "+applicationPackage+"\n",
+ "\nSuccess: Deployed "+applicationPackage+" with session ID 0\n",
stdout.String())
assertDeployRequestMade("http://target:19071", client, t)
}
@@ -161,7 +188,7 @@ func assertDeploy(applicationPackage string, arguments []string, t *testing.T) {
cli.httpClient = client
assert.Nil(t, cli.Run(arguments...))
assert.Equal(t,
- "\nSuccess: Deployed "+applicationPackage+"\n",
+ "\nSuccess: Deployed "+applicationPackage+" with session ID 0\n",
stdout.String())
assertDeployRequestMade("http://127.0.0.1:19071", client, t)
}
@@ -194,7 +221,7 @@ func assertActivate(applicationPackage string, arguments []string, t *testing.T)
}
assert.Nil(t, cli.Run(arguments...))
assert.Equal(t,
- "Success: Activated "+applicationPackage+" with session 42\n",
+ "Success: Activated application with session 42\n",
stdout.String())
url := "http://127.0.0.1:19071/application/v2/tenant/default/session/42/active"
assert.Equal(t, url, client.LastRequest.URL.String())
diff --git a/client/go/internal/cli/cmd/destroy.go b/client/go/internal/cli/cmd/destroy.go
index 316eb6022ca..ca69f21a9b4 100644
--- a/client/go/internal/cli/cmd/destroy.go
+++ b/client/go/internal/cli/cmd/destroy.go
@@ -22,9 +22,13 @@ When run interactively, the command will prompt for confirmation before
removing the application. When run non-interactively, the command will refuse
to remove the application unless the --force option is given.
-This command cannot be used to remove production deployments in Vespa Cloud. See
-https://cloud.vespa.ai/en/deleting-applications for how to remove production
-deployments.
+This command can only be used to remove non-production deployments. See
+https://cloud.vespa.ai/en/deleting-applications for how to remove
+production deployments. This command can only be used for deployments to
+Vespa Cloud, for other systems destroy an application by cleaning up
+containers in use by the application, see e.g
+https://github.com/vespa-engine/sample-apps/tree/master/examples/operations/multinode-HA#clean-up-after-testing
+
`,
Example: `$ vespa destroy
$ vespa destroy -a mytenant.myapp.myinstance
@@ -36,9 +40,10 @@ $ vespa destroy --force`,
if err != nil {
return err
}
- description := "current deployment"
- if target.IsCloud() {
- description = target.Deployment().String()
+ description := target.Deployment().String()
+ if !target.IsCloud() {
+ return errHint(fmt.Errorf("cannot remove deployment, only supported for Vespa Cloud"))
+ } else {
env := target.Deployment().Zone.Environment
if env != "dev" && env != "perf" {
return errHint(fmt.Errorf("cannot remove production %s", description), "See https://cloud.vespa.ai/en/deleting-applications")
@@ -46,7 +51,7 @@ $ vespa destroy --force`,
}
ok := force
if !ok {
- cli.printWarning(fmt.Sprintf("This operation will irrecoverably remove %s and all of its data", color.RedString(description)))
+ cli.printWarning(fmt.Sprintf("This operation will irrecoverably remove the %s and all of its data", color.RedString(description)))
ok, _ = cli.confirm("Proceed with removal?", false)
}
if ok {
diff --git a/client/go/internal/cli/cmd/destroy_test.go b/client/go/internal/cli/cmd/destroy_test.go
index c6198b9b877..b23e524e0ab 100644
--- a/client/go/internal/cli/cmd/destroy_test.go
+++ b/client/go/internal/cli/cmd/destroy_test.go
@@ -19,33 +19,42 @@ func TestDestroy(t *testing.T) {
var buf bytes.Buffer
cli.Stdin = &buf
+ require.Nil(t, cli.Run("config", "set", "target", "cloud"))
+ require.Nil(t, cli.Run("config", "set", "application", "foo.bar.baz"))
+ require.Nil(t, cli.Run("auth", "api-key"))
+
// No removal without confirmation
+ stdout.Reset()
+ stderr.Reset()
buf.WriteString("\n")
- require.NotNil(t, cli.Run("destroy"))
- warning := "Warning: This operation will irrecoverably remove current deployment and all of its data"
+ require.NotNil(t, cli.Run("destroy", "-z", "dev.aws-us-east-1c"))
+ warning := "Warning: This operation will irrecoverably remove the deployment of foo.bar.baz in dev.aws-us-east-1c and all of its data"
confirmation := "Proceed with removal? [y/N] "
- assert.Equal(t, warning+"\nError: refusing to remove current deployment without confirmation\n", stderr.String())
+ assert.Equal(t, warning+"\nError: refusing to remove deployment of foo.bar.baz in dev.aws-us-east-1c without confirmation\n", stderr.String())
assert.Equal(t, confirmation, stdout.String())
// Removes deployment with confirmation
stdout.Reset()
stderr.Reset()
buf.WriteString("y\n")
- require.Nil(t, cli.Run("destroy"))
- success := "Success: Removed current deployment\n"
+ require.Nil(t, cli.Run("destroy", "-z", "dev.aws-us-east-1c"))
+ success := "Success: Removed deployment of foo.bar.baz in dev.aws-us-east-1c\n"
assert.Equal(t, confirmation+success, stdout.String())
// Force flag always removes deployment
stdout.Reset()
stderr.Reset()
- require.Nil(t, cli.Run("destroy", "--force"))
+ require.Nil(t, cli.Run("destroy", "-z", "dev.aws-us-east-1c", "--force"))
assert.Equal(t, success, stdout.String())
- // Cannot remove prod deployment in Vespa Cloud
+ // Cannot remove a prod deployment
+ require.NotNil(t, cli.Run("destroy", "-z", "prod.aws-us-east-1c"))
+ assert.Equal(t, "Error: cannot remove production deployment of foo.bar.baz in prod.aws-us-east-1c\nHint: See https://cloud.vespa.ai/en/deleting-applications\n", stderr.String())
+
+ // Cannot remove a local deployment at all
stderr.Reset()
- require.Nil(t, cli.Run("config", "set", "target", "cloud"))
+ require.Nil(t, cli.Run("config", "set", "target", "local"))
require.Nil(t, cli.Run("config", "set", "application", "foo.bar.baz"))
- require.Nil(t, cli.Run("auth", "api-key"))
require.NotNil(t, cli.Run("destroy", "-z", "prod.aws-us-east-1c"))
- assert.Equal(t, "Error: cannot remove production deployment of foo.bar.baz in prod.aws-us-east-1c\nHint: See https://cloud.vespa.ai/en/deleting-applications\n", stderr.String())
+ assert.Equal(t, "Error: cannot remove deployment, only supported for Vespa Cloud\n", stderr.String())
}
diff --git a/client/go/internal/cli/cmd/document.go b/client/go/internal/cli/cmd/document.go
index c31f8c34d14..6c46baa297a 100644
--- a/client/go/internal/cli/cmd/document.go
+++ b/client/go/internal/cli/cmd/document.go
@@ -171,7 +171,7 @@ https://docs.vespa.ai/en/reference/document-json-format.html#document-operations
When this returns successfully, the document is guaranteed to be visible in any
subsequent get or query operation.
-To feed with high throughput, https://docs.vespa.ai/en/vespa-feed-client.html
+To feed with high throughput, https://docs.vespa.ai/en/reference/vespa-cli/vespa_feed.html
should be used instead of this.`,
Example: `$ vespa document src/test/resources/A-Head-Full-of-Dreams.json`,
DisableAutoGenTag: true,
@@ -298,7 +298,8 @@ func documentService(cli *CLI, waitSecs int) (*vespa.Service, error) {
if err != nil {
return nil, err
}
- return cli.service(target, vespa.DocumentService, 0, cli.config.cluster(), time.Duration(waitSecs)*time.Second)
+ waiter := cli.waiter(false, time.Duration(waitSecs)*time.Second)
+ return waiter.Service(target, cli.config.cluster())
}
func printResult(cli *CLI, result util.OperationResult, payloadOnlyOnSuccess bool) error {
diff --git a/client/go/internal/cli/cmd/document_test.go b/client/go/internal/cli/cmd/document_test.go
index bce81da91c5..64319296299 100644
--- a/client/go/internal/cli/cmd/document_test.go
+++ b/client/go/internal/cli/cmd/document_test.go
@@ -79,7 +79,7 @@ func TestDocumentRemoveWithoutIdArgVerbose(t *testing.T) {
func TestDocumentSendMissingId(t *testing.T) {
cli, _, stderr := newTestCLI(t)
- assert.NotNil(t, cli.Run("document", "put", "testdata/A-Head-Full-of-Dreams-Without-Operation.json"))
+ assert.NotNil(t, cli.Run("-t", "http://127.0.0.1:8080", "document", "put", "testdata/A-Head-Full-of-Dreams-Without-Operation.json"))
assert.Equal(t,
"Error: no document id given neither as argument or as a 'put', 'update' or 'remove' key in the JSON file\n",
stderr.String())
@@ -87,7 +87,7 @@ func TestDocumentSendMissingId(t *testing.T) {
func TestDocumentSendWithDisagreeingOperations(t *testing.T) {
cli, _, stderr := newTestCLI(t)
- assert.NotNil(t, cli.Run("document", "update", "testdata/A-Head-Full-of-Dreams-Put.json"))
+ assert.NotNil(t, cli.Run("-t", "http://127.0.0.1:8080", "document", "update", "testdata/A-Head-Full-of-Dreams-Put.json"))
assert.Equal(t,
"Error: wanted document operation is update, but JSON file specifies put\n",
stderr.String())
@@ -110,20 +110,20 @@ func TestDocumentGet(t *testing.T) {
"id:mynamespace:music::a-head-full-of-dreams", t)
}
-func assertDocumentSend(arguments []string, expectedOperation string, expectedMethod string, expectedDocumentId string, expectedPayloadFile string, t *testing.T) {
+func assertDocumentSend(args []string, expectedOperation string, expectedMethod string, expectedDocumentId string, expectedPayloadFile string, t *testing.T) {
+ t.Helper()
client := &mock.HTTPClient{}
cli, stdout, stderr := newTestCLI(t)
cli.httpClient = client
- documentURL, err := documentServiceURL(client)
- if err != nil {
- t.Fatal(err)
- }
+ documentURL := "http://127.0.0.1:8080"
expectedPath, _ := vespa.IdToURLPath(expectedDocumentId)
expectedURL := documentURL + "/document/v1/" + expectedPath + "?timeout=60000ms"
- assert.Nil(t, cli.Run(arguments...))
+ finalArgs := []string{"-t", documentURL}
+ finalArgs = append(finalArgs, args...)
+ assert.Nil(t, cli.Run(finalArgs...))
verbose := false
- for _, a := range arguments {
+ for _, a := range args {
if a == "-v" {
verbose = true
}
@@ -154,16 +154,15 @@ func assertDocumentSend(arguments []string, expectedOperation string, expectedMe
}
}
-func assertDocumentGet(arguments []string, documentId string, t *testing.T) {
+func assertDocumentGet(args []string, documentId string, t *testing.T) {
client := &mock.HTTPClient{}
- documentURL, err := documentServiceURL(client)
- if err != nil {
- t.Fatal(err)
- }
+ documentURL := "http://127.0.0.1:8080"
client.NextResponseString(200, "{\"fields\":{\"foo\":\"bar\"}}")
cli, stdout, _ := newTestCLI(t)
cli.httpClient = client
- assert.Nil(t, cli.Run(arguments...))
+ finalArgs := []string{"-t", documentURL}
+ finalArgs = append(finalArgs, args...)
+ assert.Nil(t, cli.Run(finalArgs...))
assert.Equal(t,
`{
"fields": {
@@ -182,7 +181,7 @@ func assertDocumentTransportError(t *testing.T, errorMessage string) {
client.NextResponseError(fmt.Errorf(errorMessage))
cli, _, stderr := newTestCLI(t)
cli.httpClient = client
- assert.NotNil(t, cli.Run("document", "put",
+ assert.NotNil(t, cli.Run("-t", "http://127.0.0.1:8080", "document", "put",
"id:mynamespace:music::a-head-full-of-dreams",
"testdata/A-Head-Full-of-Dreams-Put.json"))
assert.Equal(t,
@@ -195,7 +194,7 @@ func assertDocumentError(t *testing.T, status int, errorMessage string) {
client.NextResponseString(status, errorMessage)
cli, _, stderr := newTestCLI(t)
cli.httpClient = client
- assert.NotNil(t, cli.Run("document", "put",
+ assert.NotNil(t, cli.Run("-t", "http://127.0.0.1:8080", "document", "put",
"id:mynamespace:music::a-head-full-of-dreams",
"testdata/A-Head-Full-of-Dreams-Put.json"))
assert.Equal(t,
@@ -208,14 +207,10 @@ func assertDocumentServerError(t *testing.T, status int, errorMessage string) {
client.NextResponseString(status, errorMessage)
cli, _, stderr := newTestCLI(t)
cli.httpClient = client
- assert.NotNil(t, cli.Run("document", "put",
+ assert.NotNil(t, cli.Run("-t", "http://127.0.0.1:8080", "document", "put",
"id:mynamespace:music::a-head-full-of-dreams",
"testdata/A-Head-Full-of-Dreams-Put.json"))
assert.Equal(t,
- "Error: Container (document API) at http://127.0.0.1:8080: Status "+strconv.Itoa(status)+"\n\n"+errorMessage+"\n",
+ "Error: container at http://127.0.0.1:8080: Status "+strconv.Itoa(status)+"\n\n"+errorMessage+"\n",
stderr.String())
}
-
-func documentServiceURL(client *mock.HTTPClient) (string, error) {
- return "http://127.0.0.1:8080", nil
-}
diff --git a/client/go/internal/cli/cmd/feed.go b/client/go/internal/cli/cmd/feed.go
index 7d4b9cc8042..8b8589baec3 100644
--- a/client/go/internal/cli/cmd/feed.go
+++ b/client/go/internal/cli/cmd/feed.go
@@ -12,7 +12,6 @@ import (
"github.com/spf13/cobra"
"github.com/vespa-engine/vespa/client/go/internal/util"
- "github.com/vespa-engine/vespa/client/go/internal/vespa"
"github.com/vespa-engine/vespa/client/go/internal/vespa/document"
)
@@ -57,17 +56,17 @@ type feedOptions struct {
func newFeedCmd(cli *CLI) *cobra.Command {
var options feedOptions
cmd := &cobra.Command{
- Use: "feed FILE [FILE]...",
+ Use: "feed json-file [json-file]...",
Short: "Feed multiple document operations to a Vespa cluster",
Long: `Feed multiple document operations to a Vespa cluster.
This command can be used to feed large amounts of documents to a Vespa cluster
efficiently.
-The contents of FILE must be either a JSON array or JSON objects separated by
+The contents of JSON-FILE must be either a JSON array or JSON objects separated by
newline (JSONL).
-If FILE is a single dash ('-'), documents will be read from standard input.
+If JSON-FILE is a single dash ('-'), documents will be read from standard input.
`,
Example: `$ vespa feed docs.jsonl moredocs.json
$ cat docs.jsonl | vespa feed -`,
@@ -108,8 +107,9 @@ func createServices(n int, timeout time.Duration, waitSecs int, cli *CLI) ([]uti
}
services := make([]util.HTTPClient, 0, n)
baseURL := ""
+ waiter := cli.waiter(false, time.Duration(waitSecs)*time.Second)
for i := 0; i < n; i++ {
- service, err := cli.service(target, vespa.DocumentService, 0, cli.config.cluster(), time.Duration(waitSecs)*time.Second)
+ service, err := waiter.Service(target, cli.config.cluster())
if err != nil {
return nil, "", err
}
diff --git a/client/go/internal/cli/cmd/feed_test.go b/client/go/internal/cli/cmd/feed_test.go
index fc2c5ec7520..84328cad5fb 100644
--- a/client/go/internal/cli/cmd/feed_test.go
+++ b/client/go/internal/cli/cmd/feed_test.go
@@ -43,7 +43,7 @@ func TestFeed(t *testing.T) {
httpClient.NextResponseString(200, `{"message":"OK"}`)
httpClient.NextResponseString(200, `{"message":"OK"}`)
- require.Nil(t, cli.Run("feed", jsonFile1, jsonFile2))
+ require.Nil(t, cli.Run("feed", "-t", "http://127.0.0.1:8080", jsonFile1, jsonFile2))
assert.Equal(t, "", stderr.String())
want := `{
@@ -113,7 +113,7 @@ func TestFeedInvalid(t *testing.T) {
jsonFile := filepath.Join(td, "docs.jsonl")
require.Nil(t, os.WriteFile(jsonFile, doc, 0644))
httpClient.NextResponseString(200, `{"message":"OK"}`)
- require.NotNil(t, cli.Run("feed", jsonFile))
+ require.NotNil(t, cli.Run("feed", "-t", "http://127.0.0.1:8080", jsonFile))
want := `{
"feeder.seconds": 3.000,
diff --git a/client/go/internal/cli/cmd/prod.go b/client/go/internal/cli/cmd/prod.go
index 3b37197340f..79a6907eef2 100644
--- a/client/go/internal/cli/cmd/prod.go
+++ b/client/go/internal/cli/cmd/prod.go
@@ -114,7 +114,7 @@ type prodDeployOptions struct {
func newProdDeployCmd(cli *CLI) *cobra.Command {
var options prodDeployOptions
cmd := &cobra.Command{
- Use: "deploy",
+ Use: "deploy [application-directory-or-file]",
Aliases: []string{"submit"}, // TODO: Remove in Vespa 9
Short: "Deploy an application to production",
Long: `Deploy an application to production.
diff --git a/client/go/internal/cli/cmd/query.go b/client/go/internal/cli/cmd/query.go
index a5b35052b11..3f849fae99e 100644
--- a/client/go/internal/cli/cmd/query.go
+++ b/client/go/internal/cli/cmd/query.go
@@ -64,7 +64,8 @@ func query(cli *CLI, arguments []string, timeoutSecs, waitSecs int, curl bool) e
if err != nil {
return err
}
- service, err := cli.service(target, vespa.QueryService, 0, cli.config.cluster(), time.Duration(waitSecs)*time.Second)
+ waiter := cli.waiter(false, time.Duration(waitSecs)*time.Second)
+ service, err := waiter.Service(target, cli.config.cluster())
if err != nil {
return err
}
diff --git a/client/go/internal/cli/cmd/query_test.go b/client/go/internal/cli/cmd/query_test.go
index 1caf6d33e70..6d5adc0508e 100644
--- a/client/go/internal/cli/cmd/query_test.go
+++ b/client/go/internal/cli/cmd/query_test.go
@@ -26,7 +26,7 @@ func TestQueryVerbose(t *testing.T) {
cli, stdout, stderr := newTestCLI(t)
cli.httpClient = client
- assert.Nil(t, cli.Run("query", "-v", "select from sources * where title contains 'foo'"))
+ assert.Nil(t, cli.Run("-t", "http://127.0.0.1:8080", "query", "-v", "select from sources * where title contains 'foo'"))
assert.Equal(t, "curl 'http://127.0.0.1:8080/search/?timeout=10s&yql=select+from+sources+%2A+where+title+contains+%27foo%27'\n", stderr.String())
assert.Equal(t, "{\n \"query\": \"result\"\n}\n", stdout.String())
}
@@ -75,7 +75,7 @@ func assertQuery(t *testing.T, expectedQuery string, query ...string) {
cli, stdout, _ := newTestCLI(t)
cli.httpClient = client
- args := []string{"query"}
+ args := []string{"-t", "http://127.0.0.1:8080", "query"}
assert.Nil(t, cli.Run(append(args, query...)...))
assert.Equal(t,
"{\n \"query\": \"result\"\n}\n",
@@ -91,7 +91,7 @@ func assertQueryError(t *testing.T, status int, errorMessage string) {
client.NextResponseString(status, errorMessage)
cli, _, stderr := newTestCLI(t)
cli.httpClient = client
- assert.NotNil(t, cli.Run("query", "yql=select from sources * where title contains 'foo'"))
+ assert.NotNil(t, cli.Run("-t", "http://127.0.0.1:8080", "query", "yql=select from sources * where title contains 'foo'"))
assert.Equal(t,
"Error: invalid query: Status "+strconv.Itoa(status)+"\n"+errorMessage+"\n",
stderr.String(),
@@ -103,7 +103,7 @@ func assertQueryServiceError(t *testing.T, status int, errorMessage string) {
client.NextResponseString(status, errorMessage)
cli, _, stderr := newTestCLI(t)
cli.httpClient = client
- assert.NotNil(t, cli.Run("query", "yql=select from sources * where title contains 'foo'"))
+ assert.NotNil(t, cli.Run("-t", "http://127.0.0.1:8080", "query", "yql=select from sources * where title contains 'foo'"))
assert.Equal(t,
"Error: Status "+strconv.Itoa(status)+" from container at 127.0.0.1:8080\n"+errorMessage+"\n",
stderr.String(),
diff --git a/client/go/internal/cli/cmd/root.go b/client/go/internal/cli/cmd/root.go
index ad42ea588b0..69fd88c1b2b 100644
--- a/client/go/internal/cli/cmd/root.go
+++ b/client/go/internal/cli/cmd/root.go
@@ -43,6 +43,13 @@ type CLI struct {
Stdout io.Writer
Stderr io.Writer
+ exec executor
+ isTerminal func() bool
+ spinner func(w io.Writer, message string, fn func() error) error
+
+ now func() time.Time
+ retryInterval time.Duration
+
cmd *cobra.Command
config *Config
version version.Version
@@ -51,10 +58,6 @@ type CLI struct {
httpClientFactory func(timeout time.Duration) util.HTTPClient
auth0Factory auth0Factory
ztsFactory ztsFactory
- exec executor
- isTerminal func() bool
- spinner func(w io.Writer, message string, fn func() error) error
- now func() time.Time
}
// ErrCLI is an error returned to the user. It wraps an exit status, a regular error and optional hints for resolving
@@ -100,7 +103,7 @@ type ztsFactory func(httpClient util.HTTPClient, domain, url string) (vespa.Auth
// New creates the Vespa CLI, writing output to stdout and stderr, and reading environment variables from environment.
func New(stdout, stderr io.Writer, environment []string) (*CLI, error) {
cmd := &cobra.Command{
- Use: "vespa command-name",
+ Use: "vespa",
Short: "The command-line tool for Vespa.ai",
Long: `The command-line tool for Vespa.ai.
@@ -134,12 +137,15 @@ For detailed description of flags and configuration, see 'vespa help config'.
Stdout: stdout,
Stderr: stderr,
- version: version,
- cmd: cmd,
+ exec: &execSubprocess{},
+ now: time.Now,
+ retryInterval: 2 * time.Second,
+
+ version: version,
+ cmd: cmd,
+
httpClient: httpClientFactory(time.Second * 10),
httpClientFactory: httpClientFactory,
- exec: &execSubprocess{},
- now: time.Now,
auth0Factory: func(httpClient util.HTTPClient, options auth0.Options) (vespa.Authenticator, error) {
return auth0.NewClient(httpClient, options)
},
@@ -267,9 +273,8 @@ func (c *CLI) configureCommands() {
prodCmd.AddCommand(newProdDeployCmd(c)) // prod deploy
rootCmd.AddCommand(prodCmd) // prod
rootCmd.AddCommand(newQueryCmd(c)) // query
- statusCmd.AddCommand(newStatusQueryCmd(c)) // status query
- statusCmd.AddCommand(newStatusDocumentCmd(c)) // status document
statusCmd.AddCommand(newStatusDeployCmd(c)) // status deploy
+ statusCmd.AddCommand(newStatusDeploymentCmd(c)) // status deployment
rootCmd.AddCommand(statusCmd) // status
rootCmd.AddCommand(newTestCmd(c)) // test
rootCmd.AddCommand(newVersionCmd(c)) // version
@@ -278,7 +283,7 @@ func (c *CLI) configureCommands() {
}
func (c *CLI) bindWaitFlag(cmd *cobra.Command, defaultSecs int, value *int) {
- desc := "Number of seconds to wait for a service to become ready. 0 to disable"
+ desc := "Number of seconds to wait for service(s) to become ready. 0 to disable"
if defaultSecs == 0 {
desc += " (default 0)"
}
@@ -296,6 +301,10 @@ func (c *CLI) printSuccess(msg ...interface{}) {
fmt.Fprintln(c.Stdout, color.GreenString("Success:"), fmt.Sprint(msg...))
}
+func (c *CLI) printInfo(msg ...interface{}) {
+ fmt.Fprintln(c.Stderr, fmt.Sprint(msg...))
+}
+
func (c *CLI) printDebug(msg ...interface{}) {
fmt.Fprintln(c.Stderr, color.CyanString("Debug:"), fmt.Sprint(msg...))
}
@@ -334,6 +343,10 @@ func (c *CLI) confirm(question string, confirmByDefault bool) (bool, error) {
}
}
+func (c *CLI) waiter(once bool, timeout time.Duration) *Waiter {
+ return &Waiter{Once: once, Timeout: timeout, cli: c}
+}
+
// target creates a target according the configuration of this CLI and given opts.
func (c *CLI) target(opts targetOptions) (vespa.Target, error) {
targetType, err := c.targetType()
@@ -402,9 +415,9 @@ func (c *CLI) createCustomTarget(targetType, customURL string) (vespa.Target, er
}
switch targetType {
case vespa.TargetLocal:
- return vespa.LocalTarget(c.httpClient, tlsOptions), nil
+ return vespa.LocalTarget(c.httpClient, tlsOptions, c.retryInterval), nil
case vespa.TargetCustom:
- return vespa.CustomTarget(c.httpClient, customURL, tlsOptions), nil
+ return vespa.CustomTarget(c.httpClient, customURL, tlsOptions, c.retryInterval), nil
default:
return nil, fmt.Errorf("invalid custom target: %s", targetType)
}
@@ -486,7 +499,7 @@ func (c *CLI) createCloudTarget(targetType string, opts targetOptions, customURL
Writer: c.Stdout,
Level: vespa.LogLevel(logLevel),
}
- return vespa.CloudTarget(c.httpClient, apiAuth, deploymentAuth, apiOptions, deploymentOptions, logOptions)
+ return vespa.CloudTarget(c.httpClient, apiAuth, deploymentAuth, apiOptions, deploymentOptions, logOptions, c.retryInterval)
}
// system returns the appropiate system for the target configured in this CLI.
@@ -504,24 +517,6 @@ func (c *CLI) system(targetType string) (vespa.System, error) {
return vespa.System{}, fmt.Errorf("no default system found for %s target", targetType)
}
-// service returns the service of given name located at target. If non-empty, cluster specifies a cluster to query. This
-// function blocks according to the wait period configured in this CLI. The parameter sessionOrRunID specifies either
-// the session ID (local target) or run ID (cloud target) to wait for.
-func (c *CLI) service(target vespa.Target, name string, sessionOrRunID int64, cluster string, timeout time.Duration) (*vespa.Service, error) {
- if timeout > 0 {
- log.Printf("Waiting up to %s for %s service to become available ...", color.CyanString(timeout.String()), color.CyanString(name))
- }
- s, err := target.Service(name, timeout, sessionOrRunID, cluster)
- if err != nil {
- err := fmt.Errorf("service '%s' is unavailable: %w", name, err)
- if target.IsCloud() {
- return nil, errHint(err, "Confirm that you're communicating with the correct zone and cluster", "The -z option controls the zone", "The -C option controls the cluster")
- }
- return nil, err
- }
- return s, nil
-}
-
// isCI returns true if running inside a continuous integration environment.
func (c *CLI) isCI() bool {
_, ok := c.Environment["CI"]
diff --git a/client/go/internal/cli/cmd/status.go b/client/go/internal/cli/cmd/status.go
index 6570aeff448..b88db6e0d0b 100644
--- a/client/go/internal/cli/cmd/status.go
+++ b/client/go/internal/cli/cmd/status.go
@@ -7,6 +7,8 @@ package cmd
import (
"fmt"
"log"
+ "strconv"
+ "strings"
"time"
"github.com/fatih/color"
@@ -17,94 +19,123 @@ import (
func newStatusCmd(cli *CLI) *cobra.Command {
var waitSecs int
cmd := &cobra.Command{
- Use: "status",
- Short: "Verify that a service is ready to use (query by default)",
- Example: `$ vespa status query`,
- DisableAutoGenTag: true,
- SilenceUsage: true,
- Args: cobra.MaximumNArgs(1),
- RunE: func(cmd *cobra.Command, args []string) error {
- return printServiceStatus(cli, vespa.QueryService, waitSecs)
+ Use: "status",
+ Aliases: []string{
+ "status container",
+ "status document", // TODO: Remove on Vespa 9
+ "status query", // TODO: Remove on Vespa 9
},
- }
- cli.bindWaitFlag(cmd, 0, &waitSecs)
- return cmd
-}
-
-func newStatusQueryCmd(cli *CLI) *cobra.Command {
- var waitSecs int
- cmd := &cobra.Command{
- Use: "query",
- Short: "Verify that the query service is ready to use (default)",
- Example: `$ vespa status query`,
+ Short: "Verify that container service(s) are ready to use",
+ Example: `$ vespa status
+$ vespa status --cluster mycluster`,
DisableAutoGenTag: true,
SilenceUsage: true,
- Args: cobra.ExactArgs(0),
+ Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
- return printServiceStatus(cli, vespa.QueryService, waitSecs)
+ cluster := cli.config.cluster()
+ t, err := cli.target(targetOptions{})
+ if err != nil {
+ return err
+ }
+ waiter := cli.waiter(true, time.Duration(waitSecs)*time.Second)
+ if cluster == "" {
+ services, err := waiter.Services(t)
+ if err != nil {
+ return err
+ }
+ if len(services) == 0 {
+ return errHint(fmt.Errorf("no services exist"), "Deployment may not be ready yet", "Try 'vespa status deployment'")
+ }
+ for _, s := range services {
+ printReadyService(s, cli)
+ }
+ return nil
+ } else {
+ s, err := waiter.Service(t, cluster)
+ if err != nil {
+ return err
+ }
+ printReadyService(s, cli)
+ return nil
+ }
},
}
cli.bindWaitFlag(cmd, 0, &waitSecs)
return cmd
}
-func newStatusDocumentCmd(cli *CLI) *cobra.Command {
+func newStatusDeployCmd(cli *CLI) *cobra.Command {
var waitSecs int
cmd := &cobra.Command{
- Use: "document",
- Short: "Verify that the document service is ready to use",
- Example: `$ vespa status document`,
+ Use: "deploy",
+ Short: "Verify that the deploy service is ready to use",
+ Example: `$ vespa status deploy`,
DisableAutoGenTag: true,
SilenceUsage: true,
Args: cobra.ExactArgs(0),
RunE: func(cmd *cobra.Command, args []string) error {
- return printServiceStatus(cli, vespa.DocumentService, waitSecs)
+ t, err := cli.target(targetOptions{})
+ if err != nil {
+ return err
+ }
+ waiter := cli.waiter(true, time.Duration(waitSecs)*time.Second)
+ s, err := waiter.DeployService(t)
+ if err != nil {
+ return err
+ }
+ printReadyService(s, cli)
+ return nil
},
}
cli.bindWaitFlag(cmd, 0, &waitSecs)
return cmd
}
-func newStatusDeployCmd(cli *CLI) *cobra.Command {
+func newStatusDeploymentCmd(cli *CLI) *cobra.Command {
var waitSecs int
cmd := &cobra.Command{
- Use: "deploy",
- Short: "Verify that the deploy service is ready to use",
- Example: `$ vespa status deploy`,
+ Use: "deployment",
+ Short: "Verify that deployment has converged on latest, or given, ID",
+ Example: `$ vespa status deployment
+$ vespa status deployment -t cloud [run-id]
+$ vespa status deployment -t local [session-id]
+`,
DisableAutoGenTag: true,
SilenceUsage: true,
- Args: cobra.ExactArgs(0),
+ Args: cobra.MaximumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
- return printServiceStatus(cli, vespa.DeployService, waitSecs)
+ wantedID := vespa.LatestDeployment
+ if len(args) > 0 {
+ n, err := strconv.ParseInt(args[0], 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid id: %s: %w", args[0], err)
+ }
+ wantedID = n
+ }
+ t, err := cli.target(targetOptions{logLevel: "none"})
+ if err != nil {
+ return err
+ }
+ waiter := cli.waiter(true, time.Duration(waitSecs)*time.Second)
+ id, err := waiter.Deployment(t, wantedID)
+ if err != nil {
+ return err
+ }
+ if t.IsCloud() {
+ log.Printf("Deployment run %s has completed", color.CyanString(strconv.FormatInt(id, 10)))
+ log.Printf("See %s for more details", color.CyanString(t.Deployment().System.ConsoleRunURL(t.Deployment(), id)))
+ } else {
+ log.Printf("Deployment is %s on config generation %s", color.GreenString("ready"), color.CyanString(strconv.FormatInt(id, 10)))
+ }
+ return nil
},
}
cli.bindWaitFlag(cmd, 0, &waitSecs)
return cmd
}
-func printServiceStatus(cli *CLI, name string, waitSecs int) error {
- t, err := cli.target(targetOptions{})
- if err != nil {
- return err
- }
- cluster := cli.config.cluster()
- s, err := cli.service(t, name, 0, cluster, 0)
- if err != nil {
- return err
- }
- // Wait explicitly
- status, err := s.Wait(time.Duration(waitSecs) * time.Second)
- clusterPart := ""
- if cluster != "" {
- clusterPart = fmt.Sprintf(" named %s", color.CyanString(cluster))
- }
- if status/100 == 2 {
- log.Print(s.Description(), clusterPart, " at ", color.CyanString(s.BaseURL), " is ", color.GreenString("ready"))
- } else {
- if err == nil {
- err = fmt.Errorf("status %d", status)
- }
- return fmt.Errorf("%s%s at %s is %s: %w", s.Description(), clusterPart, color.CyanString(s.BaseURL), color.RedString("not ready"), err)
- }
- return nil
+func printReadyService(s *vespa.Service, cli *CLI) {
+ desc := s.Description()
+ desc = strings.ToUpper(string(desc[0])) + string(desc[1:])
+ log.Print(desc, " at ", color.CyanString(s.BaseURL), " is ", color.GreenString("ready"))
}
diff --git a/client/go/internal/cli/cmd/status_test.go b/client/go/internal/cli/cmd/status_test.go
index 76efea55503..7e1e266f694 100644
--- a/client/go/internal/cli/cmd/status_test.go
+++ b/client/go/internal/cli/cmd/status_test.go
@@ -5,10 +5,12 @@
package cmd
import (
+ "io"
"testing"
"github.com/stretchr/testify/assert"
"github.com/vespa-engine/vespa/client/go/internal/mock"
+ "github.com/vespa-engine/vespa/client/go/internal/vespa"
)
func TestStatusDeployCommand(t *testing.T) {
@@ -23,63 +25,197 @@ func TestStatusDeployCommandWithLocalTarget(t *testing.T) {
assertDeployStatus("http://127.0.0.1:19071", []string{"-t", "local"}, t)
}
-func TestStatusQueryCommand(t *testing.T) {
- assertQueryStatus("http://127.0.0.1:8080", []string{}, t)
+func TestStatusCommand(t *testing.T) {
+ assertStatus("http://127.0.0.1:8080", []string{}, t)
}
-func TestStatusQueryCommandWithUrlTarget(t *testing.T) {
- assertQueryStatus("http://mycontainertarget:8080", []string{"-t", "http://mycontainertarget:8080"}, t)
+func TestStatusCommandMultiCluster(t *testing.T) {
+ client := &mock.HTTPClient{}
+ cli, stdout, stderr := newTestCLI(t)
+ cli.httpClient = client
+ cli.retryInterval = 0
+
+ mockServiceStatus(client)
+ assert.NotNil(t, cli.Run("status"))
+ assert.Equal(t, "Error: no services exist\nHint: Deployment may not be ready yet\nHint: Try 'vespa status deployment'\n", stderr.String())
+
+ mockServiceStatus(client, "foo", "bar")
+ assert.Nil(t, cli.Run("status"))
+ assert.Equal(t, `Container bar at http://127.0.0.1:8080 is ready
+Container foo at http://127.0.0.1:8080 is ready
+`, stdout.String())
+
+ stdout.Reset()
+ mockServiceStatus(client, "foo", "bar")
+ assert.Nil(t, cli.Run("status", "--cluster", "foo"))
+ assert.Equal(t, "Container foo at http://127.0.0.1:8080 is ready\n", stdout.String())
}
-func TestStatusQueryCommandWithLocalTarget(t *testing.T) {
- assertQueryStatus("http://127.0.0.1:8080", []string{"-t", "local"}, t)
+func TestStatusCommandMultiClusterWait(t *testing.T) {
+ client := &mock.HTTPClient{}
+ cli, _, stderr := newTestCLI(t)
+ cli.httpClient = client
+ cli.retryInterval = 0
+ mockServiceStatus(client, "foo", "bar")
+ client.NextStatus(400)
+ assert.NotNil(t, cli.Run("status", "--cluster", "foo", "--wait", "10"))
+ assert.Equal(t, "Waiting up to 10s for cluster discovery...\nWaiting up to 10s for container foo...\n"+
+ "Error: unhealthy container foo after waiting up to 10s: status 400 at http://127.0.0.1:8080/ApplicationStatus: aborting wait: got status 400\n", stderr.String())
}
-func TestStatusDocumentCommandWithLocalTarget(t *testing.T) {
- assertDocumentStatus("http://127.0.0.1:8080", []string{"-t", "local"}, t)
+func TestStatusCommandWithUrlTarget(t *testing.T) {
+ assertStatus("http://mycontainertarget:8080", []string{"-t", "http://mycontainertarget:8080"}, t)
}
-func TestStatusErrorResponse(t *testing.T) {
- assertQueryStatusError("http://127.0.0.1:8080", []string{}, t)
+func TestStatusCommandWithLocalTarget(t *testing.T) {
+ assertStatus("http://127.0.0.1:8080", []string{"-t", "local"}, t)
}
-func assertDeployStatus(target string, args []string, t *testing.T) {
+func TestStatusError(t *testing.T) {
client := &mock.HTTPClient{}
+ mockServiceStatus(client, "default")
+ client.NextStatus(500)
+ cli, _, stderr := newTestCLI(t)
+ cli.httpClient = client
+ assert.NotNil(t, cli.Run("status", "container"))
+ assert.Equal(t,
+ "Error: unhealthy container default: status 500 at http://127.0.0.1:8080/ApplicationStatus: wait timed out\n",
+ stderr.String())
+
+ stderr.Reset()
+ client.NextResponseError(io.EOF)
+ assert.NotNil(t, cli.Run("status", "container", "-t", "http://example.com"))
+ assert.Equal(t,
+ "Error: unhealthy container at http://example.com/ApplicationStatus: EOF\n",
+ stderr.String())
+}
+
+func TestStatusLocalDeployment(t *testing.T) {
+ client := &mock.HTTPClient{}
+ cli, stdout, stderr := newTestCLI(t)
+ cli.httpClient = client
+ resp := mock.HTTPResponse{
+ URI: "/application/v2/tenant/default/application/default/environment/prod/region/default/instance/default/serviceconverge",
+ Status: 200,
+ }
+ // Latest generation
+ resp.Body = []byte(`{"currentGeneration": 42, "converged": true}`)
+ client.NextResponse(resp)
+ assert.Nil(t, cli.Run("status", "deployment"))
+ assert.Equal(t, "", stderr.String())
+ assert.Equal(t, "Deployment is ready on config generation 42\n", stdout.String())
+
+ // Latest generation without convergence
+ resp.Body = []byte(`{"currentGeneration": 42, "converged": false}`)
+ client.NextResponse(resp)
+ assert.NotNil(t, cli.Run("status", "deployment"))
+ assert.Equal(t, "Error: deployment not converged on latest generation: wait timed out\n", stderr.String())
+
+ // Explicit generation
+ stderr.Reset()
+ client.NextResponse(resp)
+ assert.NotNil(t, cli.Run("status", "deployment", "41"))
+ assert.Equal(t, "Error: deployment not converged on generation 41: wait timed out\n", stderr.String())
+}
+
+func TestStatusCloudDeployment(t *testing.T) {
+ cli, stdout, stderr := newTestCLI(t, "CI=true")
+ app := vespa.ApplicationID{Tenant: "t1", Application: "a1", Instance: "i1"}
+ assert.Nil(t, cli.Run("config", "set", "application", app.String()))
+ assert.Nil(t, cli.Run("config", "set", "target", "cloud"))
+ assert.Nil(t, cli.Run("config", "set", "zone", "dev.us-north-1"))
+ assert.Nil(t, cli.Run("auth", "api-key"))
+ stdout.Reset()
+ client := &mock.HTTPClient{}
+ cli.httpClient = client
+ // Latest run
+ client.NextResponse(mock.HTTPResponse{
+ URI: "/application/v4/tenant/t1/application/a1/instance/i1/job/dev-us-north-1?limit=1",
+ Status: 200,
+ Body: []byte(`{"runs": [{"id": 1337}]}`),
+ })
+ client.NextResponse(mock.HTTPResponse{
+ URI: "/application/v4/tenant/t1/application/a1/instance/i1/job/dev-us-north-1/run/1337?after=-1",
+ Status: 200,
+ Body: []byte(`{"active": false, "status": "success"}`),
+ })
+ assert.Nil(t, cli.Run("status", "deployment"))
+ assert.Equal(t, "", stderr.String())
+ assert.Equal(t,
+ "Deployment run 1337 has completed\nSee https://console.vespa-cloud.com/tenant/t1/application/a1/dev/instance/i1/job/dev-us-north-1/run/1337 for more details\n",
+ stdout.String())
+ // Explicit run with waiting
+ client.NextResponse(mock.HTTPResponse{
+ URI: "/application/v4/tenant/t1/application/a1/instance/i1/job/dev-us-north-1/run/42?after=-1",
+ Status: 200,
+ Body: []byte(`{"active": false, "status": "failure"}`),
+ })
+ assert.NotNil(t, cli.Run("status", "deployment", "42", "-w", "10"))
+ assert.Equal(t, "Waiting up to 10s for deployment to converge...\nError: deployment run 42 incomplete after waiting up to 10s: aborting wait: run 42 ended with unsuccessful status: failure\n", stderr.String())
+}
+
+func isLocalTarget(args []string) bool {
+ for i := 0; i < len(args)-1; i++ {
+ if args[i] == "-t" {
+ return args[i+1] == "local"
+ }
+ }
+ return true // local is default
+}
+
+func assertDeployStatus(expectedTarget string, args []string, t *testing.T) {
+ t.Helper()
+ client := &mock.HTTPClient{}
+ client.NextResponse(mock.HTTPResponse{
+ URI: "/status.html",
+ Status: 200,
+ })
cli, stdout, _ := newTestCLI(t)
cli.httpClient = client
statusArgs := []string{"status", "deploy"}
assert.Nil(t, cli.Run(append(statusArgs, args...)...))
assert.Equal(t,
- "Deploy API at "+target+" is ready\n",
- stdout.String(),
- "vespa status config-server")
- assert.Equal(t, target+"/status.html", client.LastRequest.URL.String())
+ "Deploy API at "+expectedTarget+" is ready\n",
+ stdout.String())
+ assert.Equal(t, expectedTarget+"/status.html", client.LastRequest.URL.String())
}
-func assertQueryStatus(target string, args []string, t *testing.T) {
+func assertStatus(expectedTarget string, args []string, t *testing.T) {
+ t.Helper()
client := &mock.HTTPClient{}
+ clusterName := ""
+ for i := 0; i < 2; i++ {
+ if isLocalTarget(args) {
+ clusterName = "foo"
+ mockServiceStatus(client, clusterName)
+ }
+ client.NextResponse(mock.HTTPResponse{URI: "/ApplicationStatus", Status: 200})
+ }
cli, stdout, _ := newTestCLI(t)
cli.httpClient = client
- statusArgs := []string{"status", "query"}
+ statusArgs := []string{"status"}
assert.Nil(t, cli.Run(append(statusArgs, args...)...))
- assert.Equal(t,
- "Container (query API) at "+target+" is ready\n",
- stdout.String(),
- "vespa status container")
- assert.Equal(t, target+"/ApplicationStatus", client.LastRequest.URL.String())
-
- statusArgs = []string{"status"}
+ prefix := "Container"
+ if clusterName != "" {
+ prefix += " " + clusterName
+ }
+ assert.Equal(t, prefix+" at "+expectedTarget+" is ready\n", stdout.String())
+ assert.Equal(t, expectedTarget+"/ApplicationStatus", client.LastRequest.URL.String())
+
+ // Test legacy command
+ statusArgs = []string{"status query"}
stdout.Reset()
assert.Nil(t, cli.Run(append(statusArgs, args...)...))
- assert.Equal(t,
- "Container (query API) at "+target+" is ready\n",
- stdout.String(),
- "vespa status (the default)")
- assert.Equal(t, target+"/ApplicationStatus", client.LastRequest.URL.String())
+ assert.Equal(t, prefix+" at "+expectedTarget+" is ready\n", stdout.String())
+ assert.Equal(t, expectedTarget+"/ApplicationStatus", client.LastRequest.URL.String())
}
func assertDocumentStatus(target string, args []string, t *testing.T) {
+ t.Helper()
client := &mock.HTTPClient{}
+ if isLocalTarget(args) {
+ mockServiceStatus(client, "default")
+ }
cli, stdout, _ := newTestCLI(t)
cli.httpClient = client
assert.Nil(t, cli.Run("status", "document"))
@@ -89,15 +225,3 @@ func assertDocumentStatus(target string, args []string, t *testing.T) {
"vespa status container")
assert.Equal(t, target+"/ApplicationStatus", client.LastRequest.URL.String())
}
-
-func assertQueryStatusError(target string, args []string, t *testing.T) {
- client := &mock.HTTPClient{}
- client.NextStatus(500)
- cli, _, stderr := newTestCLI(t)
- cli.httpClient = client
- assert.NotNil(t, cli.Run("status", "container"))
- assert.Equal(t,
- "Error: Container (query API) at "+target+" is not ready: status 500\n",
- stderr.String(),
- "vespa status container")
-}
diff --git a/client/go/internal/cli/cmd/test.go b/client/go/internal/cli/cmd/test.go
index abee760efbb..5b99973d879 100644
--- a/client/go/internal/cli/cmd/test.go
+++ b/client/go/internal/cli/cmd/test.go
@@ -79,7 +79,7 @@ func runTests(cli *CLI, rootPath string, dryRun bool, waitSecs int) (int, []stri
if err != nil {
return 0, nil, errHint(err, "See https://docs.vespa.ai/en/reference/testing")
}
- context := testContext{testsPath: rootPath, dryRun: dryRun, cli: cli}
+ context := testContext{testsPath: rootPath, dryRun: dryRun, cli: cli, clusters: map[string]*vespa.Service{}}
previousFailed := false
for _, test := range tests {
if !test.IsDir() && filepath.Ext(test.Name()) == ".json" {
@@ -100,7 +100,7 @@ func runTests(cli *CLI, rootPath string, dryRun bool, waitSecs int) (int, []stri
}
}
} else if strings.HasSuffix(stat.Name(), ".json") {
- failure, err := runTest(rootPath, testContext{testsPath: filepath.Dir(rootPath), dryRun: dryRun, cli: cli}, waitSecs)
+ failure, err := runTest(rootPath, testContext{testsPath: filepath.Dir(rootPath), dryRun: dryRun, cli: cli, clusters: map[string]*vespa.Service{}}, waitSecs)
if err != nil {
return 0, nil, err
}
@@ -216,9 +216,16 @@ func verify(step step, defaultCluster string, defaultParameters map[string]strin
if err != nil {
return "", "", err
}
- service, err = target.Service(vespa.QueryService, time.Duration(waitSecs)*time.Second, 0, cluster)
- if err != nil {
- return "", "", err
+ ok := false
+ service, ok = context.clusters[cluster]
+ if !ok {
+ // Cache service so we don't have to discover it for every step
+ waiter := context.cli.waiter(false, time.Duration(waitSecs)*time.Second)
+ service, err = waiter.Service(target, cluster)
+ if err != nil {
+ return "", "", err
+ }
+ context.clusters[cluster] = service
}
requestUrl, err = url.ParseRequestURI(service.BaseURL + requestUri)
if err != nil {
@@ -474,6 +481,8 @@ type testContext struct {
lazyTarget vespa.Target
testsPath string
dryRun bool
+ // Cache of services by their cluster name
+ clusters map[string]*vespa.Service
}
func (t *testContext) target() (vespa.Target, error) {
diff --git a/client/go/internal/cli/cmd/test_test.go b/client/go/internal/cli/cmd/test_test.go
index 5d6bb441b2a..4f8e6d49a2a 100644
--- a/client/go/internal/cli/cmd/test_test.go
+++ b/client/go/internal/cli/cmd/test_test.go
@@ -23,20 +23,26 @@ import (
func TestSuite(t *testing.T) {
client := &mock.HTTPClient{}
searchResponse, _ := os.ReadFile("testdata/tests/response.json")
+ mockServiceStatus(client, "container")
client.NextStatus(200)
client.NextStatus(200)
- for i := 0; i < 11; i++ {
+ for i := 0; i < 2; i++ {
+ client.NextResponseString(200, string(searchResponse))
+ }
+ mockServiceStatus(client, "container") // Some tests do not specify cluster, which is fine since we only have one, but this causes a cache miss
+ for i := 0; i < 9; i++ {
client.NextResponseString(200, string(searchResponse))
}
-
expectedBytes, _ := os.ReadFile("testdata/tests/expected-suite.out")
cli, stdout, stderr := newTestCLI(t)
cli.httpClient = client
assert.NotNil(t, cli.Run("test", "testdata/tests/system-test"))
-
+ assert.Equal(t, "", stderr.String())
baseUrl := "http://127.0.0.1:8080"
urlWithQuery := baseUrl + "/search/?presentation.timing=true&query=artist%3A+foo&timeout=3.4s"
- requests := []*http.Request{createFeedRequest(baseUrl), createFeedRequest(baseUrl), createSearchRequest(urlWithQuery), createSearchRequest(urlWithQuery)}
+ discoveryRequest := createSearchRequest("http://127.0.0.1:19071/application/v2/tenant/default/application/default/environment/prod/region/default/instance/default/serviceconverge")
+ requests := []*http.Request{discoveryRequest, createFeedRequest(baseUrl), createFeedRequest(baseUrl), createSearchRequest(urlWithQuery), createSearchRequest(urlWithQuery)}
+ requests = append(requests, discoveryRequest)
requests = append(requests, createSearchRequest(baseUrl+"/search/"))
requests = append(requests, createSearchRequest(baseUrl+"/search/?foo=%2F"))
for i := 0; i < 7; i++ {
@@ -95,6 +101,7 @@ func TestSuiteWithoutTests(t *testing.T) {
func TestSingleTest(t *testing.T) {
client := &mock.HTTPClient{}
searchResponse, _ := os.ReadFile("testdata/tests/response.json")
+ mockServiceStatus(client, "container")
client.NextStatus(200)
client.NextStatus(200)
client.NextResponseString(200, string(searchResponse))
@@ -109,7 +116,8 @@ func TestSingleTest(t *testing.T) {
baseUrl := "http://127.0.0.1:8080"
rawUrl := baseUrl + "/search/?presentation.timing=true&query=artist%3A+foo&timeout=3.4s"
- assertRequests([]*http.Request{createFeedRequest(baseUrl), createFeedRequest(baseUrl), createSearchRequest(rawUrl), createSearchRequest(rawUrl)}, client, t)
+ discoveryRequest := createSearchRequest("http://127.0.0.1:19071/application/v2/tenant/default/application/default/environment/prod/region/default/instance/default/serviceconverge")
+ assertRequests([]*http.Request{discoveryRequest, createFeedRequest(baseUrl), createFeedRequest(baseUrl), createSearchRequest(rawUrl), createSearchRequest(rawUrl)}, client, t)
}
func TestSingleTestWithCloudAndEndpoints(t *testing.T) {
@@ -172,12 +180,17 @@ func createRequest(method string, uri string, body string) *http.Request {
}
func assertRequests(requests []*http.Request, client *mock.HTTPClient, t *testing.T) {
+ t.Helper()
if assert.Equal(t, len(requests), len(client.Requests)) {
for i, e := range requests {
a := client.Requests[i]
assert.Equal(t, e.URL.String(), a.URL.String())
assert.Equal(t, e.Method, a.Method)
- assert.Equal(t, util.ReaderToJSON(e.Body), util.ReaderToJSON(a.Body))
+ actualBody := a.Body
+ if actualBody == nil {
+ actualBody = io.NopCloser(strings.NewReader(""))
+ }
+ assert.Equal(t, util.ReaderToJSON(e.Body), util.ReaderToJSON(actualBody))
}
}
}
diff --git a/client/go/internal/cli/cmd/testutil_test.go b/client/go/internal/cli/cmd/testutil_test.go
index 61d6c15c5a0..c16c9f8dc50 100644
--- a/client/go/internal/cli/cmd/testutil_test.go
+++ b/client/go/internal/cli/cmd/testutil_test.go
@@ -3,8 +3,10 @@ package cmd
import (
"bytes"
+ "fmt"
"net/http"
"path/filepath"
+ "strings"
"testing"
"time"
@@ -41,6 +43,36 @@ func newTestCLI(t *testing.T, envVars ...string) (*CLI, *bytes.Buffer, *bytes.Bu
return cli, &stdout, &stderr
}
+func mockServiceStatus(client *mock.HTTPClient, clusterNames ...string) {
+ var serviceObjects []string
+ for _, name := range clusterNames {
+ service := fmt.Sprintf(`{
+ "clusterName": "%s",
+ "host": "localhost",
+ "port": 8080,
+ "type": "container",
+ "url": "http://localhost:19071/application/v2/tenant/default/application/default/environment/prod/region/default/instance/default/serviceconverge/localhost:8080",
+ "currentGeneration": 1
+ }
+`, name)
+ serviceObjects = append(serviceObjects, service)
+ }
+ services := "[]"
+ if len(serviceObjects) > 0 {
+ services = "[" + strings.Join(serviceObjects, ",") + "]"
+ }
+ response := fmt.Sprintf(`
+{
+ "services": %s,
+ "currentGeneration": 1
+}`, services)
+ client.NextResponse(mock.HTTPResponse{
+ URI: "/application/v2/tenant/default/application/default/environment/prod/region/default/instance/default/serviceconverge",
+ Status: 200,
+ Body: []byte(response),
+ })
+}
+
type mockAuthenticator struct{}
func (a *mockAuthenticator) Authenticate(request *http.Request) error { return nil }
diff --git a/client/go/internal/cli/cmd/visit_test.go b/client/go/internal/cli/cmd/visit_test.go
index f85fb739370..bd806f1d9c9 100644
--- a/client/go/internal/cli/cmd/visit_test.go
+++ b/client/go/internal/cli/cmd/visit_test.go
@@ -93,10 +93,14 @@ func TestRunOneVisit(t *testing.T) {
func withMockClient(t *testing.T, prepCli func(*mock.HTTPClient), runOp func(*vespa.Service)) *http.Request {
client := &mock.HTTPClient{}
+ mockServiceStatus(client, "container")
prepCli(client)
cli, _, _ := newTestCLI(t)
cli.httpClient = client
- service, _ := documentService(cli, 0)
+ service, err := documentService(cli, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
runOp(service)
return client.LastRequest
}
@@ -126,6 +130,7 @@ func TestVisitCommand(t *testing.T) {
}
func assertVisitResults(arguments []string, t *testing.T, responses []string, queryPart, output string) {
+ t.Helper()
client := &mock.HTTPClient{}
client.NextResponseString(200, handlersResponse)
client.NextResponseString(400, clusterStarResponse)
@@ -134,6 +139,7 @@ func assertVisitResults(arguments []string, t *testing.T, responses []string, qu
}
cli, stdout, stderr := newTestCLI(t)
cli.httpClient = client
+ arguments = append(arguments, "-t", "http://127.0.0.1:8080")
assert.Nil(t, cli.Run(arguments...))
assert.Equal(t, output, stdout.String())
assert.Equal(t, "", stderr.String())
diff --git a/client/go/internal/cli/cmd/waiter.go b/client/go/internal/cli/cmd/waiter.go
new file mode 100644
index 00000000000..40d1d76518e
--- /dev/null
+++ b/client/go/internal/cli/cmd/waiter.go
@@ -0,0 +1,96 @@
+package cmd
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/fatih/color"
+ "github.com/vespa-engine/vespa/client/go/internal/vespa"
+)
+
+// Waiter waits for Vespa services to become ready, within a timeout.
+type Waiter struct {
+ // Once species whether we should wait at least one time, irregardless of timeout.
+ Once bool
+
+ // Timeout specifies how long we should wait for an operation to complete.
+ Timeout time.Duration // TODO(mpolden): Consider making this a budget
+
+ cli *CLI
+}
+
+func (w *Waiter) wait() bool { return w.Once || w.Timeout > 0 }
+
+// DeployService returns the service providing the deploy API on given target,
+func (w *Waiter) DeployService(target vespa.Target) (*vespa.Service, error) {
+ s, err := target.DeployService()
+ if err != nil {
+ return nil, err
+ }
+ if err := w.maybeWaitFor(s); err != nil {
+ return nil, err
+ }
+ return s, nil
+}
+
+// Service returns the service identified by cluster ID, available on target.
+func (w *Waiter) Service(target vespa.Target, cluster string) (*vespa.Service, error) {
+ targetType, err := w.cli.targetType()
+ if err != nil {
+ return nil, err
+ }
+ if targetType.url != "" && cluster != "" {
+ return nil, fmt.Errorf("cluster cannot be specified when target is an URL")
+ }
+ services, err := w.services(target)
+ if err != nil {
+ return nil, err
+ }
+ service, err := vespa.FindService(cluster, services)
+ if err != nil {
+ return nil, errHint(err, "The --cluster option specifies the service to use")
+ }
+ if err := w.maybeWaitFor(service); err != nil {
+ return nil, err
+ }
+ return service, nil
+}
+
+// Services returns all container services available on target.
+func (w *Waiter) Services(target vespa.Target) ([]*vespa.Service, error) {
+ services, err := w.services(target)
+ if err != nil {
+ return nil, err
+ }
+ for _, s := range services {
+ if err := w.maybeWaitFor(s); err != nil {
+ return nil, err
+ }
+ }
+ return services, nil
+}
+
+func (w *Waiter) maybeWaitFor(service *vespa.Service) error {
+ if w.Timeout > 0 {
+ w.cli.printInfo("Waiting up to ", color.CyanString(w.Timeout.String()), " for ", service.Description(), "...")
+ }
+ if w.wait() {
+ return service.Wait(w.Timeout)
+ }
+ return nil
+}
+
+func (w *Waiter) services(target vespa.Target) ([]*vespa.Service, error) {
+ if w.Timeout > 0 {
+ w.cli.printInfo("Waiting up to ", color.CyanString(w.Timeout.String()), " for cluster discovery...")
+ }
+ return target.ContainerServices(w.Timeout)
+}
+
+// Deployment waits for a deployment to become ready, returning the ID of the converged deployment.
+func (w *Waiter) Deployment(target vespa.Target, id int64) (int64, error) {
+ if w.Timeout > 0 {
+ w.cli.printInfo("Waiting up to ", color.CyanString(w.Timeout.String()), " for deployment to converge...")
+ }
+ return target.AwaitDeployment(id, w.Timeout)
+}
diff --git a/client/go/internal/mock/http.go b/client/go/internal/mock/http.go
index 3d4ead596b0..8a17448957f 100644
--- a/client/go/internal/mock/http.go
+++ b/client/go/internal/mock/http.go
@@ -2,6 +2,7 @@ package mock
import (
"bytes"
+ "fmt"
"io"
"net/http"
"strconv"
@@ -32,6 +33,7 @@ type HTTPClient struct {
}
type HTTPResponse struct {
+ URI string
Status int
Body []byte
Header http.Header
@@ -65,6 +67,9 @@ func (c *HTTPClient) Do(request *http.Request, timeout time.Duration) (*http.Res
response := HTTPResponse{Status: 200}
if len(c.nextResponses) > 0 {
response = c.nextResponses[0]
+ if response.URI != "" && response.URI != request.URL.RequestURI() {
+ return nil, fmt.Errorf("uri of response is %s, which does not match request uri %s", response.URI, request.URL.RequestURI())
+ }
c.nextResponses = c.nextResponses[1:]
}
if c.ReadBody && request.Body != nil {
diff --git a/client/go/internal/vespa/application.go b/client/go/internal/vespa/application.go
index b31dde54d67..b6b5b9427b3 100644
--- a/client/go/internal/vespa/application.go
+++ b/client/go/internal/vespa/application.go
@@ -216,17 +216,28 @@ func copyFile(src *zip.File, dst string) error {
// FindApplicationPackage finds the path to an application package from the zip file or directory zipOrDir. If
// requirePackaging is true, the application package is required to be packaged with mvn package.
+//
+// Package to use is preferred in this order:
+// 1. Given path, if it's a zip
+// 2. target/application
+// 3. target/application.zip
+// 4. src/main/application
+// 5. Given path, if it contains services.xml
func FindApplicationPackage(zipOrDir string, requirePackaging bool) (ApplicationPackage, error) {
if isZip(zipOrDir) {
return ApplicationPackage{Path: zipOrDir}, nil
}
- if util.PathExists(filepath.Join(zipOrDir, "pom.xml")) {
- zip := filepath.Join(zipOrDir, "target", "application.zip")
- if util.PathExists(zip) {
+ // Prefer uncompressed application because this allows us to add security/clients.pem to the package on-demand
+ if path := filepath.Join(zipOrDir, "target", "application"); util.PathExists(path) {
+ return ApplicationPackage{Path: path}, nil
+ }
+ appZip := filepath.Join(zipOrDir, "target", "application.zip")
+ if util.PathExists(filepath.Join(zipOrDir, "pom.xml")) || util.PathExists(appZip) {
+ if util.PathExists(appZip) {
if testZip := filepath.Join(zipOrDir, "target", "application-test.zip"); util.PathExists(testZip) {
- return ApplicationPackage{Path: zip, TestPath: testZip}, nil
+ return ApplicationPackage{Path: appZip, TestPath: testZip}, nil
}
- return ApplicationPackage{Path: zip}, nil
+ return ApplicationPackage{Path: appZip}, nil
}
if requirePackaging {
return ApplicationPackage{}, errors.New("found pom.xml, but target/application.zip does not exist: run 'mvn package' first")
diff --git a/client/go/internal/vespa/deploy.go b/client/go/internal/vespa/deploy.go
index ae4d4678d66..1bfaf641243 100644
--- a/client/go/internal/vespa/deploy.go
+++ b/client/go/internal/vespa/deploy.go
@@ -92,7 +92,7 @@ func (d DeploymentOptions) String() string {
}
func (d *DeploymentOptions) url(path string) (*url.URL, error) {
- service, err := d.Target.Service(DeployService, 0, 0, "")
+ service, err := d.Target.DeployService()
if err != nil {
return nil, err
}
@@ -149,7 +149,7 @@ func Prepare(deployment DeploymentOptions) (PrepareResult, error) {
return PrepareResult{}, err
}
var jsonResponse struct {
- SessionID string `json:"session-id"`
+ SessionID string `json:"session-id"` // API returns ID as string
Log []LogLinePrepareResponse `json:"log"`
}
jsonDec := json.NewDecoder(response.Body)
@@ -189,25 +189,25 @@ func Activate(sessionID int64, deployment DeploymentOptions) error {
}
// Deactivate given deployment
-func Deactivate(opts DeploymentOptions) error {
- path := "/application/v2/tenant/default/application/default"
- if opts.Target.IsCloud() {
- if opts.Target.Deployment().Zone.Environment == "" || opts.Target.Deployment().Zone.Region == "" {
- return fmt.Errorf("%s: missing zone", opts)
+func Deactivate(deployment DeploymentOptions) error {
+ var (
+ u *url.URL
+ err error
+ )
+ if deployment.Target.IsCloud() {
+ if deployment.Target.Deployment().Zone.Environment == "" || deployment.Target.Deployment().Zone.Region == "" {
+ return fmt.Errorf("%s: missing zone", deployment)
}
- path = fmt.Sprintf("/application/v4/tenant/%s/application/%s/instance/%s/environment/%s/region/%s",
- opts.Target.Deployment().Application.Tenant,
- opts.Target.Deployment().Application.Application,
- opts.Target.Deployment().Application.Instance,
- opts.Target.Deployment().Zone.Environment,
- opts.Target.Deployment().Zone.Region)
- }
- u, err := opts.url(path)
+ deploymentURL := deployment.Target.Deployment().System.DeploymentURL(deployment.Target.Deployment())
+ u, err = url.Parse(deploymentURL)
+ } else {
+ u, err = deployment.url("/application/v2/tenant/default/application/default")
+ }
if err != nil {
return err
}
req := &http.Request{URL: u, Method: "DELETE"}
- resp, err := deployServiceDo(req, 30*time.Second, opts)
+ resp, err := deployServiceDo(req, 30*time.Second, deployment)
if err != nil {
return err
}
@@ -215,27 +215,27 @@ func Deactivate(opts DeploymentOptions) error {
return checkResponse(req, resp)
}
-func Deploy(opts DeploymentOptions) (PrepareResult, error) {
- path := "/application/v2/tenant/default/prepareandactivate"
- if opts.Target.IsCloud() {
- if err := checkDeploymentOpts(opts); err != nil {
+// Deploy deploys an application.
+func Deploy(deployment DeploymentOptions) (PrepareResult, error) {
+ var (
+ u *url.URL
+ err error
+ )
+ if deployment.Target.IsCloud() {
+ if err := checkDeploymentOpts(deployment); err != nil {
return PrepareResult{}, err
}
- if opts.Target.Deployment().Zone.Environment == "" || opts.Target.Deployment().Zone.Region == "" {
- return PrepareResult{}, fmt.Errorf("%s: missing zone", opts)
+ if deployment.Target.Deployment().Zone.Environment == "" || deployment.Target.Deployment().Zone.Region == "" {
+ return PrepareResult{}, fmt.Errorf("%s: missing zone", deployment)
}
- path = fmt.Sprintf("/application/v4/tenant/%s/application/%s/instance/%s/deploy/%s-%s",
- opts.Target.Deployment().Application.Tenant,
- opts.Target.Deployment().Application.Application,
- opts.Target.Deployment().Application.Instance,
- opts.Target.Deployment().Zone.Environment,
- opts.Target.Deployment().Zone.Region)
- }
- u, err := opts.url(path)
+ u, err = url.Parse(deployment.Target.Deployment().System.DeployURL(deployment.Target.Deployment()))
+ } else {
+ u, err = deployment.url("/application/v2/tenant/default/prepareandactivate")
+ }
if err != nil {
return PrepareResult{}, err
}
- return uploadApplicationPackage(u, opts)
+ return uploadApplicationPackage(u, deployment)
}
func copyToPart(dst *multipart.Writer, src io.Reader, fieldname, filename string) error {
@@ -262,8 +262,8 @@ func Submit(opts DeploymentOptions, submission Submission) error {
if err := checkDeploymentOpts(opts); err != nil {
return err
}
- path := fmt.Sprintf("/application/v4/tenant/%s/application/%s/submit", opts.Target.Deployment().Application.Tenant, opts.Target.Deployment().Application.Application)
- u, err := opts.url(path)
+ submitURL := opts.Target.Deployment().System.SubmitURL(opts.Target.Deployment())
+ u, err := opts.url(submitURL)
if err != nil {
return err
}
@@ -311,7 +311,7 @@ func Submit(opts DeploymentOptions, submission Submission) error {
}
func deployServiceDo(request *http.Request, timeout time.Duration, opts DeploymentOptions) (*http.Response, error) {
- s, err := opts.Target.Service(DeployService, 0, 0, "")
+ s, err := opts.Target.DeployService()
if err != nil {
return nil, err
}
@@ -373,7 +373,7 @@ func uploadApplicationPackage(url *url.URL, opts DeploymentOptions) (PrepareResu
if err != nil {
return PrepareResult{}, err
}
- service, err := opts.Target.Service(DeployService, opts.Timeout, 0, "")
+ service, err := opts.Target.DeployService()
if err != nil {
return PrepareResult{}, err
}
@@ -384,7 +384,7 @@ func uploadApplicationPackage(url *url.URL, opts DeploymentOptions) (PrepareResu
defer response.Body.Close()
var jsonResponse struct {
- SessionID string `json:"session-id"` // Config server
+ SessionID string `json:"session-id"` // Config server. API returns ID as string
RunID int64 `json:"run"` // Controller
Log []LogLinePrepareResponse `json:"log"`
diff --git a/client/go/internal/vespa/deploy_test.go b/client/go/internal/vespa/deploy_test.go
index 39a9f2bcdf2..9addf81138a 100644
--- a/client/go/internal/vespa/deploy_test.go
+++ b/client/go/internal/vespa/deploy_test.go
@@ -19,7 +19,7 @@ import (
func TestDeploy(t *testing.T) {
httpClient := mock.HTTPClient{}
- target := LocalTarget(&httpClient, TLSOptions{})
+ target := LocalTarget(&httpClient, TLSOptions{}, 0)
appDir, _ := mock.ApplicationPackageDir(t, false, false)
opts := DeploymentOptions{
Target: target,
@@ -38,7 +38,7 @@ func TestDeploy(t *testing.T) {
func TestDeployCloud(t *testing.T) {
httpClient := mock.HTTPClient{}
- target := createCloudTarget(t, "http://vespacloud", io.Discard)
+ target, _ := createCloudTarget(t, io.Discard)
cloudTarget, ok := target.(*cloudTarget)
require.True(t, ok)
cloudTarget.httpClient = &httpClient
@@ -51,7 +51,7 @@ func TestDeployCloud(t *testing.T) {
require.Nil(t, err)
assert.Equal(t, 1, len(httpClient.Requests))
req := httpClient.LastRequest
- assert.Equal(t, "http://vespacloud/application/v4/tenant/t1/application/a1/instance/i1/deploy/dev-us-north-1", req.URL.String())
+ assert.Equal(t, "https://api-ctl.vespa-cloud.com:4443/application/v4/tenant/t1/application/a1/instance/i1/deploy/dev-us-north-1", req.URL.String())
values := parseMultiPart(t, req)
zipData := values["applicationZip"]
@@ -71,7 +71,7 @@ func TestDeployCloud(t *testing.T) {
func TestSubmit(t *testing.T) {
httpClient := mock.HTTPClient{}
- target := createCloudTarget(t, "http://vespacloud", io.Discard)
+ target, _ := createCloudTarget(t, io.Discard)
cloudTarget, ok := target.(*cloudTarget)
require.True(t, ok)
cloudTarget.httpClient = &httpClient
@@ -131,6 +131,11 @@ func TestFindApplicationPackage(t *testing.T) {
existingFile: filepath.Join(dir, "services.xml"),
})
assertFindApplicationPackage(t, dir, pkgFixture{
+ expectedPath: dir,
+ expectedTestPath: dir,
+ existingFiles: []string{filepath.Join(dir, "services.xml"), filepath.Join(dir, "tests", "foo.json")},
+ })
+ assertFindApplicationPackage(t, dir, pkgFixture{
expectedPath: filepath.Join(dir, "src", "main", "application"),
existingFile: filepath.Join(dir, "src", "main", "application") + string(os.PathSeparator),
})
@@ -149,17 +154,23 @@ func TestFindApplicationPackage(t *testing.T) {
existingFiles: []string{filepath.Join(dir, "pom.xml"), filepath.Join(dir, "target", "application.zip")},
requirePackaging: true,
})
- dir2 := t.TempDir()
- assertFindApplicationPackage(t, dir2, pkgFixture{
- expectedPath: dir2,
- expectedTestPath: dir2,
- existingFiles: []string{filepath.Join(dir2, "services.xml"), filepath.Join(dir2, "tests", "foo.json")},
+ assertFindApplicationPackage(t, dir, pkgFixture{
+ expectedPath: filepath.Join(dir, "target", "application.zip"),
+ existingFiles: []string{filepath.Join(dir, "target", "application.zip")},
+ })
+ assertFindApplicationPackage(t, dir, pkgFixture{
+ expectedPath: filepath.Join(dir, "target", "application"),
+ existingFiles: []string{filepath.Join(dir, "target", "application"), filepath.Join(dir, "target", "application.zip")},
+ })
+ zip := filepath.Join(dir, "myapp.zip")
+ assertFindApplicationPackage(t, zip, pkgFixture{
+ expectedPath: zip,
})
}
func TestDeactivate(t *testing.T) {
httpClient := mock.HTTPClient{}
- target := LocalTarget(&httpClient, TLSOptions{})
+ target := LocalTarget(&httpClient, TLSOptions{}, 0)
opts := DeploymentOptions{Target: target}
require.Nil(t, Deactivate(opts))
assert.Equal(t, 1, len(httpClient.Requests))
@@ -170,7 +181,7 @@ func TestDeactivate(t *testing.T) {
func TestDeactivateCloud(t *testing.T) {
httpClient := mock.HTTPClient{}
- target := createCloudTarget(t, "http://vespacloud", io.Discard)
+ target, _ := createCloudTarget(t, io.Discard)
cloudTarget, ok := target.(*cloudTarget)
require.True(t, ok)
cloudTarget.httpClient = &httpClient
@@ -179,7 +190,7 @@ func TestDeactivateCloud(t *testing.T) {
assert.Equal(t, 1, len(httpClient.Requests))
req := httpClient.LastRequest
assert.Equal(t, "DELETE", req.Method)
- assert.Equal(t, "http://vespacloud/application/v4/tenant/t1/application/a1/instance/i1/environment/dev/region/us-north-1", req.URL.String())
+ assert.Equal(t, "https://api-ctl.vespa-cloud.com:4443/application/v4/tenant/t1/application/a1/instance/i1/environment/dev/region/us-north-1", req.URL.String())
}
type pkgFixture struct {
diff --git a/client/go/internal/vespa/document/dispatcher.go b/client/go/internal/vespa/document/dispatcher.go
index fb7a532e332..b76b6b44016 100644
--- a/client/go/internal/vespa/document/dispatcher.go
+++ b/client/go/internal/vespa/document/dispatcher.go
@@ -110,10 +110,10 @@ func (d *Dispatcher) shouldRetry(op documentOp, result Result) bool {
d.throttler.Success()
d.circuitBreaker.Success()
return false
- } else if result.HTTPStatus == 429 || result.HTTPStatus == 503 {
+ } else if result.HTTPStatus == 429 {
d.throttler.Throttled(d.inflightCount.Load())
return true
- } else if result.Err != nil || result.HTTPStatus == 500 || result.HTTPStatus == 502 || result.HTTPStatus == 504 {
+ } else if result.Err != nil || result.HTTPStatus == 500 || result.HTTPStatus == 502 || result.HTTPStatus == 503 || result.HTTPStatus == 504 {
d.circuitBreaker.Failure()
if retry {
return true
diff --git a/client/go/internal/vespa/log.go b/client/go/internal/vespa/log.go
index 0e2cb5d0bfd..81088b8c0a1 100644
--- a/client/go/internal/vespa/log.go
+++ b/client/go/internal/vespa/log.go
@@ -72,6 +72,8 @@ func ReadLogEntries(r io.Reader) ([]LogEntry, error) {
// LogLevel returns an int representing a named log level.
func LogLevel(name string) int {
switch name {
+ case "none":
+ return -1
case "error":
return 0
case "warning":
diff --git a/client/go/internal/vespa/system.go b/client/go/internal/vespa/system.go
index 96795cc0ef8..cdf401bf43c 100644
--- a/client/go/internal/vespa/system.go
+++ b/client/go/internal/vespa/system.go
@@ -56,7 +56,64 @@ type System struct {
}
// IsPublic returns whether system s is a public (Vespa Cloud) system.
-func (s *System) IsPublic() bool { return s.Name == PublicSystem.Name || s.Name == PublicCDSystem.Name }
+func (s System) IsPublic() bool { return s.Name == PublicSystem.Name || s.Name == PublicCDSystem.Name }
+
+// DeployURL returns the API URL to use for deploying to this system.
+func (s System) DeployURL(deployment Deployment) string {
+ return fmt.Sprintf("%s/application/v4/tenant/%s/application/%s/instance/%s/deploy/%s",
+ s.URL,
+ deployment.Application.Tenant,
+ deployment.Application.Application,
+ deployment.Application.Instance,
+ jobName(deployment.Zone))
+}
+
+// SubmitURL returns the API URL for submitting an application package for production deployment.
+func (s System) SubmitURL(deployment Deployment) string {
+ return fmt.Sprintf("%s/application/v4/tenant/%s/application/%s/submit", s.URL, deployment.Application.Tenant, deployment.Application.Application)
+}
+
+// DeploymentURL returns the API URL of given deployment.
+func (s System) DeploymentURL(deployment Deployment) string {
+ return fmt.Sprintf("%s/application/v4/tenant/%s/application/%s/instance/%s/environment/%s/region/%s",
+ s.URL,
+ deployment.Application.Tenant,
+ deployment.Application.Application,
+ deployment.Application.Instance,
+ deployment.Zone.Environment,
+ deployment.Zone.Region)
+}
+
+// RunURL returns the API URL for a given deployment job run.
+func (s System) RunURL(deployment Deployment, id int64) string {
+ return fmt.Sprintf("%s/application/v4/tenant/%s/application/%s/instance/%s/job/%s/run/%d",
+ s.URL,
+ deployment.Application.Tenant, deployment.Application.Application, deployment.Application.Instance,
+ jobName(deployment.Zone), id)
+}
+
+// RunsURL returns the API URL listing all runs for given deployment.
+func (s System) RunsURL(deployment Deployment) string {
+ return fmt.Sprintf("%s/application/v4/tenant/%s/application/%s/instance/%s/job/%s",
+ s.URL,
+ deployment.Application.Tenant, deployment.Application.Application, deployment.Application.Instance,
+ jobName(deployment.Zone))
+}
+
+// ConsoleRunURL returns the console URL for a deployment job run in this system.
+func (s System) ConsoleRunURL(deployment Deployment, run int64) string {
+ return fmt.Sprintf("%s/tenant/%s/application/%s/%s/instance/%s/job/%s/run/%d",
+ s.ConsoleURL, deployment.Application.Tenant, deployment.Application.Application, deployment.Zone.Environment,
+ deployment.Application.Instance, jobName(deployment.Zone), run)
+}
+
+func jobName(zone ZoneID) string {
+ env := zone.Environment
+ if env == "prod" {
+ env = "production"
+ }
+ return env + "-" + zone.Region
+}
// GetSystem returns the system of given name.
func GetSystem(name string) (System, error) {
diff --git a/client/go/internal/vespa/target.go b/client/go/internal/vespa/target.go
index 6dd64dd1275..f96723dc433 100644
--- a/client/go/internal/vespa/target.go
+++ b/client/go/internal/vespa/target.go
@@ -4,9 +4,11 @@ package vespa
import (
"crypto/tls"
+ "errors"
"fmt"
"io"
"net/http"
+ "strings"
"sync"
"time"
@@ -27,18 +29,15 @@ const (
// A hosted Vespa target
TargetHosted = "hosted"
- // A Vespa service that handles deployments, either a config server or a controller
- DeployService = "deploy"
+ // LatestDeployment waits for a deployment to converge to latest generation
+ LatestDeployment int64 = -1
- // A Vespa service that handles queries.
- QueryService = "query"
-
- // A Vespa service that handles feeding of document. This may point to the same service as QueryService.
- DocumentService = "document"
-
- retryInterval = 2 * time.Second
+ // AnyDeployment waits for a deployment to converge on any generation
+ AnyDeployment int64 = -2
)
+var errWaitTimeout = errors.New("wait timed out")
+
// Authenticator authenticates the given HTTP request.
type Authenticator interface {
Authenticate(request *http.Request) error
@@ -50,9 +49,11 @@ type Service struct {
Name string
TLSOptions TLSOptions
- once sync.Once
- auth Authenticator
- httpClient util.HTTPClient
+ deployAPI bool
+ once sync.Once
+ auth Authenticator
+ httpClient util.HTTPClient
+ retryInterval time.Duration
}
// Target represents a Vespa platform, running named Vespa services.
@@ -66,8 +67,16 @@ type Target interface {
// Deployment returns the deployment managed by this target.
Deployment() Deployment
- // Service returns the service for given name. If timeout is non-zero, wait for the service to converge.
- Service(name string, timeout time.Duration, sessionOrRunID int64, cluster string) (*Service, error)
+ // DeployService returns the service providing the deploy API on this target.
+ DeployService() (*Service, error)
+
+ // ContainerServices returns all container services of the current deployment. If timeout is positive, wait for
+ // services to be discovered.
+ ContainerServices(timeout time.Duration) ([]*Service, error)
+
+ // AwaitDeployment waits for a deployment identified by id to succeed. It returns the id that succeeded, or an
+ // error. The exact meaning of id depends on the implementation.
+ AwaitDeployment(id int64, timeout time.Duration) (int64, error)
// PrintLog writes the logs of this deployment using given options to control output.
PrintLog(options LogOptions) error
@@ -114,29 +123,66 @@ func (s *Service) Do(request *http.Request, timeout time.Duration) (*http.Respon
func (s *Service) SetClient(client util.HTTPClient) { s.httpClient = client }
// Wait polls the health check of this service until it succeeds or timeout passes.
-func (s *Service) Wait(timeout time.Duration) (int, error) {
+func (s *Service) Wait(timeout time.Duration) error {
url := s.BaseURL
- switch s.Name {
- case DeployService:
+ if s.deployAPI {
url += "/status.html" // because /ApplicationStatus is not publicly reachable in Vespa Cloud
- case QueryService, DocumentService:
+ } else {
url += "/ApplicationStatus"
- default:
- return 0, fmt.Errorf("invalid service: %s", s.Name)
}
- return waitForOK(s, url, timeout)
+ req, err := http.NewRequest("GET", url, nil)
+ if err != nil {
+ return err
+ }
+ okFunc := func(status int, response []byte) (bool, error) { return isOK(status) }
+ status, err := wait(s, okFunc, func() *http.Request { return req }, timeout, s.retryInterval)
+ if err != nil {
+ statusDesc := ""
+ if status > 0 {
+ statusDesc = fmt.Sprintf(": status %d", status)
+ }
+ return fmt.Errorf("unhealthy %s%s%s at %s: %w", s.Description(), waitDescription(timeout), statusDesc, url, err)
+ }
+ return nil
}
func (s *Service) Description() string {
- switch s.Name {
- case QueryService:
- return "Container (query API)"
- case DocumentService:
- return "Container (document API)"
- case DeployService:
- return "Deploy API"
+ if s.deployAPI {
+ return "deploy API"
+ }
+ if s.Name == "" {
+ return "container"
+ }
+ return "container " + s.Name
+}
+
+// FindService returns the service of given name, found among services, if any.
+func FindService(name string, services []*Service) (*Service, error) {
+ if name == "" && len(services) == 1 {
+ return services[0], nil
}
- return fmt.Sprintf("No description of service %s", s.Name)
+ names := make([]string, len(services))
+ for i, s := range services {
+ if name == s.Name {
+ return s, nil
+ }
+ names[i] = s.Name
+ }
+ found := "no services found"
+ if len(names) > 0 {
+ found = "known services: " + strings.Join(names, ", ")
+ }
+ if name != "" {
+ return nil, fmt.Errorf("no such service: %q: %s", name, found)
+ }
+ return nil, fmt.Errorf("no service specified: %s", found)
+}
+
+func waitDescription(d time.Duration) string {
+ if d > 0 {
+ return " after waiting up to " + d.String()
+ }
+ return ""
}
func isOK(status int) (bool, error) {
@@ -145,57 +191,48 @@ func isOK(status int) (bool, error) {
case 2: // success
return true, nil
case 4: // client error
- return false, fmt.Errorf("request failed with status %d", status)
- default: // retry
+ return false, fmt.Errorf("got status %d", status)
+ default: // retry on everything else
return false, nil
}
}
-type responseFunc func(status int, response []byte) (bool, error)
+// responseFunc returns whether a HTTP request is considered successful, based on its status and response data.
+// Returning false indicates that the operation should be retried. An error is returned if the response is considered
+// terminal and that the request should not be retried.
+type responseFunc func(status int, response []byte) (ok bool, err error)
type requestFunc func() *http.Request
-// waitForOK queries url and returns its status code. If response status is not 2xx or 4xx, it is repeatedly queried
-// until timeout elapses.
-func waitForOK(service *Service, url string, timeout time.Duration) (int, error) {
- req, err := http.NewRequest("GET", url, nil)
- if err != nil {
- return 0, err
- }
- okFunc := func(status int, response []byte) (bool, error) {
- ok, err := isOK(status)
- if err != nil {
- return false, fmt.Errorf("failed to query %s at %s: %w", service.Description(), url, err)
- }
- return ok, err
- }
- return wait(service, okFunc, func() *http.Request { return req }, timeout)
-}
-
-func wait(service *Service, fn responseFunc, reqFn requestFunc, timeout time.Duration) (int, error) {
+// wait queries service until one of the following conditions are satisfied:
+//
+// 1. okFn returns true or a non-nil error
+// 2. timeout is exceeded
+//
+// It returns the last received HTTP status code and error, if any.
+func wait(service *Service, okFn responseFunc, reqFn requestFunc, timeout, retryInterval time.Duration) (int, error) {
var (
- httpErr error
- response *http.Response
- statusCode int
+ status int
+ response *http.Response
+ err error
)
deadline := time.Now().Add(timeout)
loopOnce := timeout == 0
for time.Now().Before(deadline) || loopOnce {
- req := reqFn()
- response, httpErr = service.Do(req, 10*time.Second)
- if httpErr == nil {
- statusCode = response.StatusCode
+ response, err = service.Do(reqFn(), 10*time.Second)
+ if err == nil {
+ status = response.StatusCode
body, err := io.ReadAll(response.Body)
if err != nil {
return 0, err
}
response.Body.Close()
- ok, err := fn(statusCode, body)
+ ok, err := okFn(status, body)
if err != nil {
- return statusCode, err
+ return status, fmt.Errorf("aborting wait: %w", err)
}
if ok {
- return statusCode, nil
+ return status, nil
}
}
timeLeft := time.Until(deadline)
@@ -204,5 +241,8 @@ func wait(service *Service, fn responseFunc, reqFn requestFunc, timeout time.Dur
}
time.Sleep(retryInterval)
}
- return statusCode, httpErr
+ if err == nil {
+ return status, errWaitTimeout
+ }
+ return status, err
}
diff --git a/client/go/internal/vespa/target_cloud.go b/client/go/internal/vespa/target_cloud.go
index c0169f1a9bd..2063e15e3a2 100644
--- a/client/go/internal/vespa/target_cloud.go
+++ b/client/go/internal/vespa/target_cloud.go
@@ -3,12 +3,12 @@ package vespa
import (
"bytes"
"encoding/json"
+ "errors"
"fmt"
"math"
"net/http"
"sort"
"strconv"
- "strings"
"time"
"github.com/vespa-engine/vespa/client/go/internal/util"
@@ -37,6 +37,7 @@ type cloudTarget struct {
httpClient util.HTTPClient
apiAuth Authenticator
deploymentAuth Authenticator
+ retryInterval time.Duration
}
type deploymentEndpoint struct {
@@ -49,13 +50,21 @@ type deploymentResponse struct {
Endpoints []deploymentEndpoint `json:"endpoints"`
}
-type jobResponse struct {
+type runResponse struct {
Active bool `json:"active"`
Status string `json:"status"`
Log map[string][]logMessage `json:"log"`
LastID int64 `json:"lastId"`
}
+type jobResponse struct {
+ ID int64 `json:"id"`
+}
+
+type jobsResponse struct {
+ Runs []jobResponse `json:"runs"`
+}
+
type logMessage struct {
At int64 `json:"at"`
Type string `json:"type"`
@@ -63,7 +72,9 @@ type logMessage struct {
}
// CloudTarget creates a Target for the Vespa Cloud or hosted Vespa platform.
-func CloudTarget(httpClient util.HTTPClient, apiAuth Authenticator, deploymentAuth Authenticator, apiOptions APIOptions, deploymentOptions CloudDeploymentOptions, logOptions LogOptions) (Target, error) {
+func CloudTarget(httpClient util.HTTPClient, apiAuth Authenticator, deploymentAuth Authenticator,
+ apiOptions APIOptions, deploymentOptions CloudDeploymentOptions,
+ logOptions LogOptions, retryInterval time.Duration) (Target, error) {
return &cloudTarget{
httpClient: httpClient,
apiOptions: apiOptions,
@@ -71,40 +82,10 @@ func CloudTarget(httpClient util.HTTPClient, apiAuth Authenticator, deploymentAu
logOptions: logOptions,
apiAuth: apiAuth,
deploymentAuth: deploymentAuth,
+ retryInterval: retryInterval,
}, nil
}
-func (t *cloudTarget) findClusterURL(cluster string, timeout time.Duration, runID int64) (string, error) {
- if t.deploymentOptions.CustomURL != "" {
- return t.deploymentOptions.CustomURL, nil
- }
- if t.deploymentOptions.ClusterURLs == nil {
- if err := t.waitForEndpoints(timeout, runID); err != nil {
- return "", err
- }
- }
- clusters := make([]string, 0, len(t.deploymentOptions.ClusterURLs))
- for c := range t.deploymentOptions.ClusterURLs {
- clusters = append(clusters, c)
- }
- if cluster == "" {
- for _, url := range t.deploymentOptions.ClusterURLs {
- if len(t.deploymentOptions.ClusterURLs) == 1 {
- return url, nil
- } else {
- return "", fmt.Errorf("no cluster specified: found multiple clusters '%s'", strings.Join(clusters, "', '"))
- }
- }
- } else {
- url, ok := t.deploymentOptions.ClusterURLs[cluster]
- if !ok {
- return "", fmt.Errorf("invalid cluster '%s': must be one of '%s'", cluster, strings.Join(clusters, "', '"))
- }
- return url, nil
- }
- return "", fmt.Errorf("no endpoints found")
-}
-
func (t *cloudTarget) Type() string {
switch t.apiOptions.System.Name {
case MainSystem.Name, CDSystem.Name:
@@ -117,41 +98,52 @@ func (t *cloudTarget) IsCloud() bool { return true }
func (t *cloudTarget) Deployment() Deployment { return t.deploymentOptions.Deployment }
-func (t *cloudTarget) Service(name string, timeout time.Duration, runID int64, cluster string) (*Service, error) {
- switch name {
- case DeployService:
+func (t *cloudTarget) DeployService() (*Service, error) {
+ return &Service{
+ BaseURL: t.apiOptions.System.URL,
+ TLSOptions: t.apiOptions.TLSOptions,
+ deployAPI: true,
+ httpClient: t.httpClient,
+ auth: t.apiAuth,
+ retryInterval: t.retryInterval,
+ }, nil
+}
+
+func (t *cloudTarget) ContainerServices(timeout time.Duration) ([]*Service, error) {
+ var clusterUrls map[string]string
+ if t.deploymentOptions.CustomURL != "" {
+ // Custom URL is always preferred
+ clusterUrls = map[string]string{"": t.deploymentOptions.CustomURL}
+ } else if t.deploymentOptions.ClusterURLs != nil {
+ // ... then endpoints specified through environment
+ clusterUrls = t.deploymentOptions.ClusterURLs
+ } else {
+ // ... then discovered endpoints
+ endpoints, err := t.discoverEndpoints(timeout)
+ if err != nil {
+ return nil, err
+ }
+ clusterUrls = endpoints
+ }
+ services := make([]*Service, 0, len(clusterUrls))
+ for name, url := range clusterUrls {
service := &Service{
- Name: name,
- BaseURL: t.apiOptions.System.URL,
- TLSOptions: t.apiOptions.TLSOptions,
- httpClient: t.httpClient,
- auth: t.apiAuth,
+ Name: name,
+ BaseURL: url,
+ TLSOptions: t.deploymentOptions.TLSOptions,
+ httpClient: t.httpClient,
+ auth: t.deploymentAuth,
+ retryInterval: t.retryInterval,
}
if timeout > 0 {
- status, err := service.Wait(timeout)
- if err != nil {
+ if err := service.Wait(timeout); err != nil {
return nil, err
}
- if ok, _ := isOK(status); !ok {
- return nil, fmt.Errorf("got status %d from deploy service at %s", status, service.BaseURL)
- }
}
- return service, nil
- case QueryService, DocumentService:
- url, err := t.findClusterURL(cluster, timeout, runID)
- if err != nil {
- return nil, err
- }
- return &Service{
- Name: name,
- BaseURL: url,
- TLSOptions: t.deploymentOptions.TLSOptions,
- httpClient: t.httpClient,
- auth: t.deploymentAuth,
- }, nil
- default:
- return nil, fmt.Errorf("unknown service: %s", name)
+ services = append(services, service)
}
+ sort.Slice(services, func(i, j int) bool { return services[i].Name < services[j].Name })
+ return services, nil
}
func (t *cloudTarget) CheckVersion(clientVersion version.Version) error {
@@ -162,7 +154,7 @@ func (t *cloudTarget) CheckVersion(clientVersion version.Version) error {
if err != nil {
return err
}
- deployService, err := t.Service(DeployService, 0, 0, "")
+ deployService, err := t.DeployService()
if err != nil {
return err
}
@@ -218,7 +210,7 @@ func (t *cloudTarget) PrintLog(options LogOptions) error {
}
logEntries, err := ReadLogEntries(bytes.NewReader(response))
if err != nil {
- return true, err
+ return false, err
}
for _, le := range logEntries {
if !le.Time.After(lastFrom) {
@@ -238,35 +230,59 @@ func (t *cloudTarget) PrintLog(options LogOptions) error {
if options.Follow {
timeout = math.MaxInt64 // No timeout
}
- _, err = t.deployServiceWait(logFunc, requestFunc, timeout)
- return err
+ // Ignore wait error because logFunc has no concept of completion, we just want to print log entries until timeout is reached
+ if _, err := t.deployServiceWait(logFunc, requestFunc, timeout); err != nil && !errors.Is(err, errWaitTimeout) {
+ return fmt.Errorf("failed to read logs: %s", err)
+ }
+ return nil
}
func (t *cloudTarget) deployServiceWait(fn responseFunc, reqFn requestFunc, timeout time.Duration) (int, error) {
- deployService, err := t.Service(DeployService, 0, 0, "")
+ deployService, err := t.DeployService()
if err != nil {
return 0, err
}
- return wait(deployService, fn, reqFn, timeout)
+ return wait(deployService, fn, reqFn, timeout, t.retryInterval)
}
-func (t *cloudTarget) waitForEndpoints(timeout time.Duration, runID int64) error {
- if runID > 0 {
- if err := t.waitForRun(runID, timeout); err != nil {
- return err
+func (t *cloudTarget) discoverLatestRun(timeout time.Duration) (int64, error) {
+ runsURL := t.apiOptions.System.RunsURL(t.deploymentOptions.Deployment) + "?limit=1"
+ req, err := http.NewRequest("GET", runsURL, nil)
+ if err != nil {
+ return 0, err
+ }
+ requestFunc := func() *http.Request { return req }
+ var lastRunID int64
+ jobsSuccessFunc := func(status int, response []byte) (bool, error) {
+ if ok, err := isOK(status); !ok {
+ return ok, err
+ }
+ var resp jobsResponse
+ if err := json.Unmarshal(response, &resp); err != nil {
+ return false, err
}
+ if len(resp.Runs) > 0 {
+ lastRunID = resp.Runs[0].ID
+ return true, nil
+ }
+ return false, nil
}
- return t.discoverEndpoints(timeout)
+ _, err = t.deployServiceWait(jobsSuccessFunc, requestFunc, timeout)
+ return lastRunID, err
}
-func (t *cloudTarget) waitForRun(runID int64, timeout time.Duration) error {
- runURL := fmt.Sprintf("%s/application/v4/tenant/%s/application/%s/instance/%s/job/%s-%s/run/%d",
- t.apiOptions.System.URL,
- t.deploymentOptions.Deployment.Application.Tenant, t.deploymentOptions.Deployment.Application.Application, t.deploymentOptions.Deployment.Application.Instance,
- t.deploymentOptions.Deployment.Zone.Environment, t.deploymentOptions.Deployment.Zone.Region, runID)
+func (t *cloudTarget) AwaitDeployment(runID int64, timeout time.Duration) (int64, error) {
+ if runID == LatestDeployment {
+ lastRunID, err := t.discoverLatestRun(timeout)
+ if err != nil {
+ return 0, err
+ }
+ runID = lastRunID
+ }
+ runURL := t.apiOptions.System.RunURL(t.deploymentOptions.Deployment, runID)
req, err := http.NewRequest("GET", runURL, nil)
if err != nil {
- return err
+ return 0, err
}
lastID := int64(-1)
requestFunc := func() *http.Request {
@@ -275,13 +291,14 @@ func (t *cloudTarget) waitForRun(runID int64, timeout time.Duration) error {
req.URL.RawQuery = q.Encode()
return req
}
+ success := false
jobSuccessFunc := func(status int, response []byte) (bool, error) {
if ok, err := isOK(status); !ok {
return ok, err
}
- var resp jobResponse
+ var resp runResponse
if err := json.Unmarshal(response, &resp); err != nil {
- return false, nil
+ return false, err
}
if t.logOptions.Writer != nil {
lastID = t.printLog(resp, lastID)
@@ -292,20 +309,27 @@ func (t *cloudTarget) waitForRun(runID int64, timeout time.Duration) error {
if resp.Status != "success" {
return false, fmt.Errorf("run %d ended with unsuccessful status: %s", runID, resp.Status)
}
- return true, nil
+ success = true
+ return success, nil
}
_, err = t.deployServiceWait(jobSuccessFunc, requestFunc, timeout)
- return err
+ if err != nil {
+ return 0, fmt.Errorf("deployment run %d incomplete%s: %w", runID, waitDescription(timeout), err)
+ }
+ if !success {
+ return 0, fmt.Errorf("deployment run %d incomplete%s", runID, waitDescription(timeout))
+ }
+ return runID, nil
}
-func (t *cloudTarget) printLog(response jobResponse, last int64) int64 {
+func (t *cloudTarget) printLog(response runResponse, last int64) int64 {
if response.LastID == 0 {
return last
}
var msgs []logMessage
for step, stepMsgs := range response.Log {
for _, msg := range stepMsgs {
- if step == "copyVespaLogs" && LogLevel(msg.Type) > t.logOptions.Level || LogLevel(msg.Type) == 3 {
+ if (step == "copyVespaLogs" && LogLevel(msg.Type) > t.logOptions.Level) || LogLevel(msg.Type) == 3 {
continue
}
msgs = append(msgs, msg)
@@ -320,14 +344,14 @@ func (t *cloudTarget) printLog(response jobResponse, last int64) int64 {
return response.LastID
}
-func (t *cloudTarget) discoverEndpoints(timeout time.Duration) error {
+func (t *cloudTarget) discoverEndpoints(timeout time.Duration) (map[string]string, error) {
deploymentURL := fmt.Sprintf("%s/application/v4/tenant/%s/application/%s/instance/%s/environment/%s/region/%s",
t.apiOptions.System.URL,
t.deploymentOptions.Deployment.Application.Tenant, t.deploymentOptions.Deployment.Application.Application, t.deploymentOptions.Deployment.Application.Instance,
t.deploymentOptions.Deployment.Zone.Environment, t.deploymentOptions.Deployment.Zone.Region)
req, err := http.NewRequest("GET", deploymentURL, nil)
if err != nil {
- return err
+ return nil, err
}
urlsByCluster := make(map[string]string)
endpointFunc := func(status int, response []byte) (bool, error) {
@@ -350,11 +374,10 @@ func (t *cloudTarget) discoverEndpoints(timeout time.Duration) error {
return true, nil
}
if _, err := t.deployServiceWait(endpointFunc, func() *http.Request { return req }, timeout); err != nil {
- return err
+ return nil, fmt.Errorf("no endpoints found%s: %w", waitDescription(timeout), err)
}
if len(urlsByCluster) == 0 {
- return fmt.Errorf("no endpoints discovered for %s", t.deploymentOptions.Deployment)
+ return nil, fmt.Errorf("no endpoints found%s", waitDescription(timeout))
}
- t.deploymentOptions.ClusterURLs = urlsByCluster
- return nil
+ return urlsByCluster, nil
}
diff --git a/client/go/internal/vespa/target_custom.go b/client/go/internal/vespa/target_custom.go
index fd0af0e8d53..b120dd73d9d 100644
--- a/client/go/internal/vespa/target_custom.go
+++ b/client/go/internal/vespa/target_custom.go
@@ -3,8 +3,11 @@ package vespa
import (
"encoding/json"
"fmt"
+ "net"
"net/http"
"net/url"
+ "sort"
+ "strconv"
"time"
"github.com/vespa-engine/vespa/client/go/internal/util"
@@ -12,24 +15,45 @@ import (
)
type customTarget struct {
- targetType string
- baseURL string
- httpClient util.HTTPClient
- tlsOptions TLSOptions
+ targetType string
+ baseURL string
+ httpClient util.HTTPClient
+ tlsOptions TLSOptions
+ retryInterval time.Duration
}
-type serviceConvergeResponse struct {
- Converged bool `json:"converged"`
+type serviceStatus struct {
+ Converged bool `json:"converged"`
+ CurrentGeneration int64 `json:"currentGeneration"`
+ Services []serviceInfo `json:"services"`
+}
+
+type serviceInfo struct {
+ ClusterName string `json:"clusterName"`
+ Type string `json:"type"`
+ Port int `json:"port"`
}
// LocalTarget creates a target for a Vespa platform running locally.
-func LocalTarget(httpClient util.HTTPClient, tlsOptions TLSOptions) Target {
- return &customTarget{targetType: TargetLocal, baseURL: "http://127.0.0.1", httpClient: httpClient, tlsOptions: tlsOptions}
+func LocalTarget(httpClient util.HTTPClient, tlsOptions TLSOptions, retryInterval time.Duration) Target {
+ return &customTarget{
+ targetType: TargetLocal,
+ baseURL: "http://127.0.0.1",
+ httpClient: httpClient,
+ tlsOptions: tlsOptions,
+ retryInterval: retryInterval,
+ }
}
// CustomTarget creates a Target for a Vespa platform running at baseURL.
-func CustomTarget(httpClient util.HTTPClient, baseURL string, tlsOptions TLSOptions) Target {
- return &customTarget{targetType: TargetCustom, baseURL: baseURL, httpClient: httpClient, tlsOptions: tlsOptions}
+func CustomTarget(httpClient util.HTTPClient, baseURL string, tlsOptions TLSOptions, retryInterval time.Duration) Target {
+ return &customTarget{
+ targetType: TargetCustom,
+ baseURL: baseURL,
+ httpClient: httpClient,
+ tlsOptions: tlsOptions,
+ retryInterval: retryInterval,
+ }
}
func (t *customTarget) Type() string { return t.targetType }
@@ -38,95 +62,126 @@ func (t *customTarget) IsCloud() bool { return false }
func (t *customTarget) Deployment() Deployment { return DefaultDeployment }
-func (t *customTarget) createService(name string) (*Service, error) {
- switch name {
- case DeployService, QueryService, DocumentService:
- url, err := t.serviceURL(name, t.targetType)
- if err != nil {
- return nil, err
- }
- return &Service{BaseURL: url, Name: name, httpClient: t.httpClient, TLSOptions: t.tlsOptions}, nil
+func (t *customTarget) PrintLog(options LogOptions) error {
+ return fmt.Errorf("log access is only supported on cloud: run vespa-logfmt on the admin node instead, or export from a container image (here named 'vespa') using docker exec vespa vespa-logfmt")
+}
+
+func (t *customTarget) CheckVersion(version version.Version) error { return nil }
+
+func (t *customTarget) newService(url, name string, deployAPI bool) *Service {
+ return &Service{
+ BaseURL: url,
+ Name: name,
+ deployAPI: deployAPI,
+ httpClient: t.httpClient,
+ TLSOptions: t.tlsOptions,
+ retryInterval: t.retryInterval,
}
- return nil, fmt.Errorf("unknown service: %s", name)
}
-func (t *customTarget) Service(name string, timeout time.Duration, sessionOrRunID int64, cluster string) (*Service, error) {
- service, err := t.createService(name)
+func (t *customTarget) DeployService() (*Service, error) {
+ if t.targetType == TargetCustom {
+ return t.newService(t.baseURL, "", true), nil
+ }
+ u, err := t.urlWithPort(19071)
if err != nil {
return nil, err
}
- if timeout > 0 {
- if name == DeployService {
- status, err := service.Wait(timeout)
- if err != nil {
- return nil, err
- }
- if ok, _ := isOK(status); !ok {
- return nil, fmt.Errorf("got status %d from deploy service at %s", status, service.BaseURL)
- }
- } else {
- if err := t.waitForConvergence(timeout); err != nil {
- return nil, err
- }
+ return t.newService(u.String(), "", true), nil
+}
+
+func (t *customTarget) ContainerServices(timeout time.Duration) ([]*Service, error) {
+ if t.targetType == TargetCustom {
+ return []*Service{t.newService(t.baseURL, "", false)}, nil
+ }
+ status, err := t.serviceStatus(AnyDeployment, timeout)
+ if err != nil {
+ return nil, err
+ }
+ portsByCluster := make(map[string]int)
+ for _, serviceInfo := range status.Services {
+ if serviceInfo.Type != "container" {
+ continue
+ }
+ clusterName := serviceInfo.ClusterName
+ if clusterName == "" { // Vespa version older than 8.206.1, which does not include cluster name in the API
+ clusterName = serviceInfo.Type + strconv.Itoa(serviceInfo.Port)
}
+ portsByCluster[clusterName] = serviceInfo.Port
}
- return service, nil
+ var services []*Service
+ for cluster, port := range portsByCluster {
+ url, err := t.urlWithPort(port)
+ if err != nil {
+ return nil, err
+ }
+ service := t.newService(url.String(), cluster, false)
+ services = append(services, service)
+ }
+ sort.Slice(services, func(i, j int) bool { return services[i].Name < services[j].Name })
+ return services, nil
}
-func (t *customTarget) PrintLog(options LogOptions) error {
- return fmt.Errorf("log access is only supported on cloud: run vespa-logfmt on the admin node instead, or export from a container image (here named 'vespa') using docker exec vespa vespa-logfmt")
+func (t *customTarget) AwaitDeployment(generation int64, timeout time.Duration) (int64, error) {
+ status, err := t.serviceStatus(generation, timeout)
+ if err != nil {
+ return 0, err
+ }
+ return status.CurrentGeneration, nil
}
-func (t *customTarget) CheckVersion(version version.Version) error { return nil }
-
-func (t *customTarget) serviceURL(name string, targetType string) (string, error) {
+func (t *customTarget) urlWithPort(port int) (*url.URL, error) {
u, err := url.Parse(t.baseURL)
if err != nil {
- return "", err
- }
- if targetType == TargetLocal {
- // Use same ports as the vespaengine/vespa container image
- port := ""
- switch name {
- case DeployService:
- port = "19071"
- case QueryService, DocumentService:
- port = "8080"
- default:
- return "", fmt.Errorf("unknown service: %s", name)
- }
- u.Host = u.Host + ":" + port
+ return nil, err
}
- return u.String(), nil
+ if _, _, err := net.SplitHostPort(u.Host); err == nil {
+ return nil, fmt.Errorf("url %s already contains port", u)
+ }
+ u.Host = net.JoinHostPort(u.Host, strconv.Itoa(port))
+ return u, nil
}
-func (t *customTarget) waitForConvergence(timeout time.Duration) error {
- deployService, err := t.createService(DeployService)
+func (t *customTarget) serviceStatus(wantedGeneration int64, timeout time.Duration) (serviceStatus, error) {
+ deployService, err := t.DeployService()
if err != nil {
- return err
+ return serviceStatus{}, err
}
url := fmt.Sprintf("%s/application/v2/tenant/default/application/default/environment/prod/region/default/instance/default/serviceconverge", deployService.BaseURL)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
- return err
+ return serviceStatus{}, err
}
+ var status serviceStatus
converged := false
- convergedFunc := func(status int, response []byte) (bool, error) {
- if ok, err := isOK(status); !ok {
+ convergedFunc := func(httpStatus int, response []byte) (bool, error) {
+ if ok, err := isOK(httpStatus); !ok {
return ok, err
}
- var resp serviceConvergeResponse
- if err := json.Unmarshal(response, &resp); err != nil {
- return false, nil
+ if err := json.Unmarshal(response, &status); err != nil {
+ return false, err
}
- converged = resp.Converged
+ converged = wantedGeneration == AnyDeployment ||
+ (wantedGeneration == LatestDeployment && status.Converged) ||
+ status.CurrentGeneration == wantedGeneration
return converged, nil
}
- if _, err := wait(deployService, convergedFunc, func() *http.Request { return req }, timeout); err != nil {
- return err
+ if _, err := wait(deployService, convergedFunc, func() *http.Request { return req }, timeout, t.retryInterval); err != nil {
+ return serviceStatus{}, fmt.Errorf("deployment not converged%s%s: %w", generationDescription(wantedGeneration), waitDescription(timeout), err)
}
if !converged {
- return fmt.Errorf("services have not converged")
+ return serviceStatus{}, fmt.Errorf("deployment not converged%s%s", generationDescription(wantedGeneration), waitDescription(timeout))
+ }
+ return status, nil
+}
+
+func generationDescription(generation int64) string {
+ switch generation {
+ case AnyDeployment:
+ return ""
+ case LatestDeployment:
+ return " on latest generation"
+ default:
+ return fmt.Sprintf(" on generation %d", generation)
}
- return nil
}
diff --git a/client/go/internal/vespa/target_test.go b/client/go/internal/vespa/target_test.go
index 6dc97f496f5..68b60774c94 100644
--- a/client/go/internal/vespa/target_test.go
+++ b/client/go/internal/vespa/target_test.go
@@ -3,145 +3,224 @@ package vespa
import (
"bytes"
- "fmt"
"io"
"net/http"
- "net/http/httptest"
+ "strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
"github.com/vespa-engine/vespa/client/go/internal/mock"
- "github.com/vespa-engine/vespa/client/go/internal/util"
"github.com/vespa-engine/vespa/client/go/internal/version"
)
-type mockVespaApi struct {
- deploymentConverged bool
- authFailure bool
- serverURL string
-}
-
-func (v *mockVespaApi) mockVespaHandler(w http.ResponseWriter, req *http.Request) {
- if v.authFailure {
- response := `{"message":"unauthorized"}`
- w.WriteHeader(401)
- w.Write([]byte(response))
- }
- switch req.URL.Path {
- case "/cli/v1/":
- response := `{"minVersion":"8.0.0"}`
- w.Write([]byte(response))
- case "/application/v4/tenant/t1/application/a1/instance/i1/environment/dev/region/us-north-1":
- response := "{}"
- if v.deploymentConverged {
- response = fmt.Sprintf(`{"endpoints": [{"url": "%s","scope": "zone","cluster": "cluster1"}]}`, v.serverURL)
- }
- w.Write([]byte(response))
- case "/application/v4/tenant/t1/application/a1/instance/i1/job/dev-us-north-1/run/42":
- var response string
- if v.deploymentConverged {
- response = `{"active": false, "status": "success"}`
- } else {
- response = `{"active": true, "status": "running",
- "lastId": 42,
- "log": {"deployReal": [{"at": 1631707708431,
- "type": "info",
- "message": "Deploying platform version 7.465.17 and application version 1.0.2 ..."}]}}`
- }
- w.Write([]byte(response))
- case "/application/v2/tenant/default/application/default/environment/prod/region/default/instance/default/serviceconverge":
- response := fmt.Sprintf(`{"converged": %t}`, v.deploymentConverged)
- w.Write([]byte(response))
- case "/application/v4/tenant/t1/application/a1/instance/i1/environment/dev/region/us-north-1/logs":
- log := `1632738690.905535 host1a.dev.aws-us-east-1c 806/53 logserver-container Container.com.yahoo.container.jdisc.ConfiguredApplication info Switching to the latest deployed set of configurations and components. Application config generation: 52532
-1632738698.600189 host1a.dev.aws-us-east-1c 1723/33590 config-sentinel sentinel.sentinel.config-owner config Sentinel got 3 service elements [tenant(vespa-team), application(music), instance(mpolden)] for config generation 52532
-`
- w.Write([]byte(log))
- case "/status.html":
- w.Write([]byte("OK"))
- case "/ApplicationStatus":
- w.WriteHeader(500)
- w.Write([]byte("Unknown error"))
- default:
- w.WriteHeader(400)
- w.Write([]byte("Invalid path: " + req.URL.Path))
+func TestLocalTarget(t *testing.T) {
+ // Local target uses discovery
+ client := &mock.HTTPClient{}
+ lt := LocalTarget(client, TLSOptions{}, 0)
+ assertServiceURL(t, "http://127.0.0.1:19071", lt, "deploy")
+ for i := 0; i < 2; i++ {
+ response := `
+{
+ "services": [
+ {
+ "host": "foo",
+ "port": 8080,
+ "type": "container",
+ "url": "http://localhost:19071/application/v2/tenant/default/application/default/environment/prod/region/default/instance/default/serviceconverge/localhost:8080",
+ "currentGeneration": 1
+ },
+ {
+ "host": "bar",
+ "port": 8080,
+ "type": "container",
+ "url": "http://localhost:19071/application/v2/tenant/default/application/default/environment/prod/region/default/instance/default/serviceconverge/localhost:8080",
+ "currentGeneration": 1
+ },
+ {
+ "clusterName": "feed",
+ "host": "localhost",
+ "port": 8081,
+ "type": "container",
+ "url": "http://localhost:19071/application/v2/tenant/default/application/default/environment/prod/region/default/instance/default/serviceconverge/localhost:8081",
+ "currentGeneration": 1
+ },
+ {
+ "host": "localhost",
+ "port": 19112,
+ "type": "searchnode",
+ "url": "http://localhost:19071/application/v2/tenant/default/application/default/environment/prod/region/default/instance/default/serviceconverge/localhost:19112",
+ "currentGeneration": 1
+ }
+ ],
+ "currentGeneration": 1
+}`
+ client.NextResponse(mock.HTTPResponse{
+ URI: "/application/v2/tenant/default/application/default/environment/prod/region/default/instance/default/serviceconverge",
+ Status: 200,
+ Body: []byte(response),
+ })
}
+ assertServiceURL(t, "http://127.0.0.1:8080", lt, "container8080")
+ assertServiceURL(t, "http://127.0.0.1:8081", lt, "feed")
}
func TestCustomTarget(t *testing.T) {
- lt := LocalTarget(&mock.HTTPClient{}, TLSOptions{})
- assertServiceURL(t, "http://127.0.0.1:19071", lt, "deploy")
- assertServiceURL(t, "http://127.0.0.1:8080", lt, "query")
- assertServiceURL(t, "http://127.0.0.1:8080", lt, "document")
-
- ct := CustomTarget(&mock.HTTPClient{}, "http://192.0.2.42", TLSOptions{})
+ // Custom target always uses URL directly, without discovery
+ ct := CustomTarget(&mock.HTTPClient{}, "http://192.0.2.42", TLSOptions{}, 0)
assertServiceURL(t, "http://192.0.2.42", ct, "deploy")
- assertServiceURL(t, "http://192.0.2.42", ct, "query")
- assertServiceURL(t, "http://192.0.2.42", ct, "document")
-
- ct2 := CustomTarget(&mock.HTTPClient{}, "http://192.0.2.42:60000", TLSOptions{})
+ assertServiceURL(t, "http://192.0.2.42", ct, "")
+ ct2 := CustomTarget(&mock.HTTPClient{}, "http://192.0.2.42:60000", TLSOptions{}, 0)
assertServiceURL(t, "http://192.0.2.42:60000", ct2, "deploy")
- assertServiceURL(t, "http://192.0.2.42:60000", ct2, "query")
- assertServiceURL(t, "http://192.0.2.42:60000", ct2, "document")
+ assertServiceURL(t, "http://192.0.2.42:60000", ct2, "")
}
func TestCustomTargetWait(t *testing.T) {
- vc := mockVespaApi{}
- srv := httptest.NewServer(http.HandlerFunc(vc.mockVespaHandler))
- defer srv.Close()
- target := CustomTarget(util.CreateClient(time.Second*10), srv.URL, TLSOptions{})
+ client := &mock.HTTPClient{}
+ target := CustomTarget(client, "http://192.0.2.42", TLSOptions{}, 0)
+ // Fails once
+ client.NextStatus(500)
+ assertService(t, true, target, "", 0)
+ // Fails multiple times
+ for i := 0; i < 3; i++ {
+ client.NextStatus(500)
+ client.NextResponseError(io.EOF)
+ }
+ // Then succeeds
+ client.NextResponse(mock.HTTPResponse{URI: "/ApplicationStatus", Status: 200})
+ assertService(t, false, target, "", time.Second)
+}
+
+func TestCustomTargetAwaitDeployment(t *testing.T) {
+ client := &mock.HTTPClient{}
+ target := CustomTarget(client, "http://192.0.2.42", TLSOptions{}, 0)
- _, err := target.Service("query", time.Millisecond, 42, "")
+ // Not converged initially
+ _, err := target.AwaitDeployment(42, 0)
assert.NotNil(t, err)
- vc.deploymentConverged = true
- _, err = target.Service("query", time.Millisecond, 42, "")
- assert.Nil(t, err)
+ // Not converged on this generation
+ response := mock.HTTPResponse{
+ URI: "/application/v2/tenant/default/application/default/environment/prod/region/default/instance/default/serviceconverge",
+ Status: 200,
+ Body: []byte(`{"currentGeneration": 42}`),
+ }
+ client.NextResponse(response)
+ _, err = target.AwaitDeployment(41, 0)
+ assert.NotNil(t, err)
- assertServiceWait(t, 200, target, "deploy")
- assertServiceWait(t, 500, target, "query")
- assertServiceWait(t, 500, target, "document")
+ // Converged
+ client.NextResponse(response)
+ convergedID, err := target.AwaitDeployment(42, 0)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(42), convergedID)
}
func TestCloudTargetWait(t *testing.T) {
- vc := mockVespaApi{}
- srv := httptest.NewServer(http.HandlerFunc(vc.mockVespaHandler))
- defer srv.Close()
- vc.serverURL = srv.URL
-
var logWriter bytes.Buffer
- target := createCloudTarget(t, srv.URL, &logWriter)
- vc.authFailure = true
- assertServiceWaitErr(t, 401, true, target, "deploy")
- vc.authFailure = false
- assertServiceWait(t, 200, target, "deploy")
+ target, client := createCloudTarget(t, &logWriter)
+ client.NextStatus(401)
+ assertService(t, true, target, "deploy", time.Second) // No retrying on 4xx
+ client.NextStatus(500)
+ client.NextStatus(500)
+ client.NextResponse(mock.HTTPResponse{URI: "/status.html", Status: 200})
+ assertService(t, false, target, "deploy", time.Second)
- _, err := target.Service("query", time.Millisecond, 42, "")
+ client.NextResponse(mock.HTTPResponse{
+ URI: "/application/v4/tenant/t1/application/a1/instance/i1/environment/dev/region/us-north-1",
+ Status: 200,
+ Body: []byte(`{"endpoints":[]}`),
+ })
+ _, err := target.ContainerServices(time.Millisecond)
assert.NotNil(t, err)
- vc.deploymentConverged = true
- _, err = target.Service("query", time.Millisecond, 42, "")
+ response := mock.HTTPResponse{
+ URI: "/application/v4/tenant/t1/application/a1/instance/i1/environment/dev/region/us-north-1",
+ Status: 200,
+ Body: []byte(`{
+ "endpoints": [
+ {"url": "http://a.example.com","scope": "zone", "cluster": "default"},
+ {"url": "http://b.example.com","scope": "zone", "cluster": "feed"}
+ ]
+}`),
+ }
+ client.NextResponse(response)
+ services, err := target.ContainerServices(time.Millisecond)
assert.Nil(t, err)
+ assert.Equal(t, 2, len(services))
- assertServiceWait(t, 500, target, "query")
- assertServiceWait(t, 500, target, "document")
+ client.NextResponse(response)
+ client.NextResponse(mock.HTTPResponse{URI: "/ApplicationStatus", Status: 500})
+ assertService(t, true, target, "default", 0)
+ client.NextResponse(response)
+ client.NextResponse(mock.HTTPResponse{URI: "/ApplicationStatus", Status: 200})
+ assertService(t, false, target, "feed", 0)
+}
+
+func TestCloudTargetAwaitDeployment(t *testing.T) {
+ var logWriter bytes.Buffer
+ target, client := createCloudTarget(t, &logWriter)
+
+ runningResponse := mock.HTTPResponse{
+ URI: "/application/v4/tenant/t1/application/a1/instance/i1/job/dev-us-north-1/run/42?after=-1",
+ Status: 200,
+ Body: []byte(`{"active": true, "status": "running",
+ "lastId": 42,
+ "log": {"deployReal": [{"at": 1631707708431,
+ "type": "info",
+ "message": "Deploying platform version 7.465.17 and application version 1.0.2 ..."}]}}`),
+ }
+ client.NextResponse(runningResponse)
+ runningResponse.URI = "/application/v4/tenant/t1/application/a1/instance/i1/job/dev-us-north-1/run/42?after=42"
+ client.NextResponse(runningResponse)
+ // Deployment has not succeeded yet
+ _, err := target.AwaitDeployment(int64(42), time.Second)
+ assert.NotNil(t, err)
// Log timestamp is converted to local time, do the same here in case the local time where tests are run varies
tm := time.Unix(1631707708, 431000)
expectedTime := tm.Format("[15:04:05]")
- assert.Equal(t, expectedTime+" info Deploying platform version 7.465.17 and application version 1.0.2 ...\n", logWriter.String())
+ assert.Equal(t, strings.Repeat(expectedTime+" info Deploying platform version 7.465.17 and application version 1.0.2 ...\n", 2), logWriter.String())
+
+ // Wanted deployment run eventually succeeds
+ runningResponse.URI = "/application/v4/tenant/t1/application/a1/instance/i1/job/dev-us-north-1/run/42?after=-1"
+ client.NextResponse(runningResponse)
+ client.NextResponse(mock.HTTPResponse{
+ URI: "/application/v4/tenant/t1/application/a1/instance/i1/job/dev-us-north-1/run/42?after=42",
+ Status: 200,
+ Body: []byte(`{"active": false, "status": "success"}`),
+ })
+ convergedID, err := target.AwaitDeployment(int64(42), time.Second)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(42), convergedID)
+
+ // Await latest deployment
+ client.NextResponse(mock.HTTPResponse{
+ URI: "/application/v4/tenant/t1/application/a1/instance/i1/job/dev-us-north-1?limit=1",
+ Status: 200,
+ Body: []byte(`{"runs": [{"id": 1337}]}`),
+ })
+ client.NextResponse(mock.HTTPResponse{
+ URI: "/application/v4/tenant/t1/application/a1/instance/i1/job/dev-us-north-1/run/1337?after=-1",
+ Status: 200,
+ Body: []byte(`{"active": false, "status": "success"}`),
+ })
+ convergedID, err = target.AwaitDeployment(LatestDeployment, time.Second)
+ assert.Nil(t, err)
+ assert.Equal(t, int64(1337), convergedID)
}
func TestLog(t *testing.T) {
- vc := mockVespaApi{}
- srv := httptest.NewServer(http.HandlerFunc(vc.mockVespaHandler))
- defer srv.Close()
- vc.serverURL = srv.URL
- vc.deploymentConverged = true
-
+ target, client := createCloudTarget(t, io.Discard)
+ client.NextResponse(mock.HTTPResponse{
+ URI: "/application/v4/tenant/t1/application/a1/instance/i1/environment/dev/region/us-north-1/logs?from=-62135596800000",
+ Status: 200,
+ Body: []byte(`1632738690.905535 host1a.dev.aws-us-east-1c 806/53 logserver-container Container.com.yahoo.container.jdisc.ConfiguredApplication info Switching to the latest deployed set of configurations and components. Application config generation: 52532
+1632738698.600189 host1a.dev.aws-us-east-1c 1723/33590 config-sentinel sentinel.sentinel.config-owner config Sentinel got 3 service elements [tenant(vespa-team), application(music), instance(mpolden)] for config generation 52532
+`),
+ })
var buf bytes.Buffer
- target := createCloudTarget(t, srv.URL, io.Discard)
if err := target.PrintLog(LogOptions{Writer: &buf, Level: 3}); err != nil {
t.Fatal(err)
}
@@ -151,23 +230,22 @@ func TestLog(t *testing.T) {
}
func TestCheckVersion(t *testing.T) {
- vc := mockVespaApi{}
- srv := httptest.NewServer(http.HandlerFunc(vc.mockVespaHandler))
- defer srv.Close()
-
- target := createCloudTarget(t, srv.URL, io.Discard)
+ target, client := createCloudTarget(t, io.Discard)
+ for i := 0; i < 3; i++ {
+ client.NextResponse(mock.HTTPResponse{URI: "/cli/v1/", Status: 200, Body: []byte(`{"minVersion":"8.0.0"}`)})
+ }
assert.Nil(t, target.CheckVersion(version.MustParse("8.0.0")))
assert.Nil(t, target.CheckVersion(version.MustParse("8.1.0")))
assert.NotNil(t, target.CheckVersion(version.MustParse("7.0.0")))
}
-func createCloudTarget(t *testing.T, url string, logWriter io.Writer) Target {
+func createCloudTarget(t *testing.T, logWriter io.Writer) (Target, *mock.HTTPClient) {
apiKey, err := CreateAPIKey()
- assert.Nil(t, err)
-
+ require.Nil(t, err)
auth := &mockAuthenticator{}
+ client := &mock.HTTPClient{}
target, err := CloudTarget(
- util.CreateClient(time.Second*10),
+ client,
auth,
auth,
APIOptions{APIKey: apiKey, System: PublicSystem},
@@ -175,42 +253,43 @@ func createCloudTarget(t *testing.T, url string, logWriter io.Writer) Target {
Deployment: Deployment{
Application: ApplicationID{Tenant: "t1", Application: "a1", Instance: "i1"},
Zone: ZoneID{Environment: "dev", Region: "us-north-1"},
+ System: PublicSystem,
},
},
LogOptions{Writer: logWriter},
+ 0,
)
- if err != nil {
- t.Fatal(err)
- }
- if ct, ok := target.(*cloudTarget); ok {
- ct.apiOptions.System.URL = url
- } else {
- t.Fatalf("Wrong target type %T", ct)
- }
- return target
+ require.Nil(t, err)
+ return target, client
}
-func assertServiceURL(t *testing.T, url string, target Target, service string) {
- s, err := target.Service(service, 0, 42, "")
- assert.Nil(t, err)
- assert.Equal(t, url, s.BaseURL)
+func getService(t *testing.T, target Target, name string) (*Service, error) {
+ t.Helper()
+ if name == "deploy" {
+ return target.DeployService()
+ }
+ services, err := target.ContainerServices(0)
+ require.Nil(t, err)
+ return FindService(name, services)
}
-func assertServiceWait(t *testing.T, expectedStatus int, target Target, service string) {
- assertServiceWaitErr(t, expectedStatus, false, target, service)
+func assertServiceURL(t *testing.T, url string, target Target, serviceName string) {
+ t.Helper()
+ service, err := getService(t, target, serviceName)
+ require.Nil(t, err)
+ assert.Equal(t, url, service.BaseURL)
}
-func assertServiceWaitErr(t *testing.T, expectedStatus int, expectErr bool, target Target, service string) {
- s, err := target.Service(service, 0, 42, "")
- assert.Nil(t, err)
-
- status, err := s.Wait(0)
- if expectErr {
+func assertService(t *testing.T, fail bool, target Target, serviceName string, timeout time.Duration) {
+ t.Helper()
+ service, err := getService(t, target, serviceName)
+ require.Nil(t, err)
+ err = service.Wait(timeout)
+ if fail {
assert.NotNil(t, err)
} else {
assert.Nil(t, err)
}
- assert.Equal(t, expectedStatus, status)
}
type mockAuthenticator struct{}
diff --git a/client/js/app/package.json b/client/js/app/package.json
index ec3da2dd941..23ddccbe7e7 100644
--- a/client/js/app/package.json
+++ b/client/js/app/package.json
@@ -20,9 +20,9 @@
"@fortawesome/free-regular-svg-icons": "^6",
"@fortawesome/free-solid-svg-icons": "^6",
"@fortawesome/react-fontawesome": "^0",
- "@mantine/core": "^5",
- "@mantine/hooks": "^5",
- "@mantine/notifications": "^5",
+ "@mantine/core": "^6.0.0",
+ "@mantine/hooks": "^6.0.0",
+ "@mantine/notifications": "^6.0.0",
"@vitejs/plugin-react": "^4",
"esbuild-jest": "^0",
"eslint": "^8",
diff --git a/client/js/app/yarn.lock b/client/js/app/yarn.lock
index f7d4b7ea10d..df9465d2478 100644
--- a/client/js/app/yarn.lock
+++ b/client/js/app/yarn.lock
@@ -15,19 +15,20 @@
"@jridgewell/gen-mapping" "^0.3.0"
"@jridgewell/trace-mapping" "^0.3.9"
-"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.22.5":
- version "7.22.5"
- resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.22.5.tgz#234d98e1551960604f1246e6475891a570ad5658"
- integrity sha512-Xmwn266vad+6DAqEB2A6V/CcZVp62BbwVmcOJc2RPuwih1kw02TjQvWVWlcKGbBPd+8/0V5DEkOcizRGYsspYQ==
+"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.22.10", "@babel/code-frame@^7.22.5":
+ version "7.22.13"
+ resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.22.13.tgz#e3c1c099402598483b7a8c46a721d1038803755e"
+ integrity sha512-XktuhWlJ5g+3TJXc5upd9Ks1HutSArik6jf2eAjYFyIOf4ej3RN+184cZbzDvbPnuTJIUhPKKJE3cIsYTiAT3w==
dependencies:
- "@babel/highlight" "^7.22.5"
+ "@babel/highlight" "^7.22.13"
+ chalk "^2.4.2"
"@babel/compat-data@^7.22.9":
version "7.22.9"
resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.22.9.tgz#71cdb00a1ce3a329ce4cbec3a44f9fef35669730"
integrity sha512-5UamI7xkUcJ3i9qVDS+KFDEK8/7oJ55/sJMB1Ge7IEapr7KfdfV/HErR+koZwOfd+SgtFKOKRhRakdg++DcJpQ==
-"@babel/core@^7.1.0", "@babel/core@^7.11.6", "@babel/core@^7.12.17", "@babel/core@^7.12.3", "@babel/core@^7.22.5":
+"@babel/core@^7.1.0", "@babel/core@^7.12.17":
version "7.22.9"
resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.22.9.tgz#bd96492c68822198f33e8a256061da3cf391f58f"
integrity sha512-G2EgeufBcYw27U4hhoIwFcgc1XU7TlXJ3mv04oOv1WCuo900U/anZSPzEqNjwdjgffkk2Gs0AN0dW1CKVLcG7w==
@@ -48,20 +49,41 @@
json5 "^2.2.2"
semver "^6.3.1"
-"@babel/generator@^7.22.7", "@babel/generator@^7.22.9", "@babel/generator@^7.7.2":
- version "7.22.9"
- resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.22.9.tgz#572ecfa7a31002fa1de2a9d91621fd895da8493d"
- integrity sha512-KtLMbmicyuK2Ak/FTCJVbDnkN1SlT8/kceFTiuDiiRUUSMnHMidxSCdG4ndkTOHHpoomWe/4xkvHkEOncwjYIw==
+"@babel/core@^7.11.6", "@babel/core@^7.12.3", "@babel/core@^7.22.9":
+ version "7.22.11"
+ resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.22.11.tgz#8033acaa2aa24c3f814edaaa057f3ce0ba559c24"
+ integrity sha512-lh7RJrtPdhibbxndr6/xx0w8+CVlY5FJZiaSz908Fpy+G0xkBFTvwLcKJFF4PJxVfGhVWNebikpWGnOoC71juQ==
dependencies:
- "@babel/types" "^7.22.5"
+ "@ampproject/remapping" "^2.2.0"
+ "@babel/code-frame" "^7.22.10"
+ "@babel/generator" "^7.22.10"
+ "@babel/helper-compilation-targets" "^7.22.10"
+ "@babel/helper-module-transforms" "^7.22.9"
+ "@babel/helpers" "^7.22.11"
+ "@babel/parser" "^7.22.11"
+ "@babel/template" "^7.22.5"
+ "@babel/traverse" "^7.22.11"
+ "@babel/types" "^7.22.11"
+ convert-source-map "^1.7.0"
+ debug "^4.1.0"
+ gensync "^1.0.0-beta.2"
+ json5 "^2.2.3"
+ semver "^6.3.1"
+
+"@babel/generator@^7.22.10", "@babel/generator@^7.22.9", "@babel/generator@^7.7.2":
+ version "7.22.10"
+ resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.22.10.tgz#c92254361f398e160645ac58831069707382b722"
+ integrity sha512-79KIf7YiWjjdZ81JnLujDRApWtl7BxTqWD88+FFdQEIOG8LJ0etDOM7CXuIgGJa55sGOwZVwuEsaLEm0PJ5/+A==
+ dependencies:
+ "@babel/types" "^7.22.10"
"@jridgewell/gen-mapping" "^0.3.2"
"@jridgewell/trace-mapping" "^0.3.17"
jsesc "^2.5.1"
-"@babel/helper-compilation-targets@^7.22.9":
- version "7.22.9"
- resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.9.tgz#f9d0a7aaaa7cd32a3f31c9316a69f5a9bcacb892"
- integrity sha512-7qYrNM6HjpnPHJbopxmb8hSPoZ0gsX8IvUS32JGVoy+pU9e5N0nLr1VjJoR6kA4d9dmGLxNYOjeB8sUDal2WMw==
+"@babel/helper-compilation-targets@^7.22.10", "@babel/helper-compilation-targets@^7.22.9":
+ version "7.22.10"
+ resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.10.tgz#01d648bbc25dd88f513d862ee0df27b7d4e67024"
+ integrity sha512-JMSwHD4J7SLod0idLq5PKgI+6g/hLD/iuWBq08ZX49xE14VpVEojJ5rHWptpirV2j020MvypRLAXAO50igCJ5Q==
dependencies:
"@babel/compat-data" "^7.22.9"
"@babel/helper-validator-option" "^7.22.5"
@@ -141,28 +163,28 @@
resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.22.5.tgz#de52000a15a177413c8234fa3a8af4ee8102d0ac"
integrity sha512-R3oB6xlIVKUnxNUxbmgq7pKjxpru24zlimpE8WK47fACIlM0II/Hm1RS8IaOI7NgCr6LNS+jl5l75m20npAziw==
-"@babel/helpers@^7.22.6":
- version "7.22.6"
- resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.22.6.tgz#8e61d3395a4f0c5a8060f309fb008200969b5ecd"
- integrity sha512-YjDs6y/fVOYFV8hAf1rxd1QvR9wJe1pDBZ2AREKq/SDayfPzgk0PBnVuTCE5X1acEpMMNOVUqoe+OwiZGJ+OaA==
+"@babel/helpers@^7.22.11", "@babel/helpers@^7.22.6":
+ version "7.22.11"
+ resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.22.11.tgz#b02f5d5f2d7abc21ab59eeed80de410ba70b056a"
+ integrity sha512-vyOXC8PBWaGc5h7GMsNx68OH33cypkEDJCHvYVVgVbbxJDROYVtexSk0gK5iCF1xNjRIN2s8ai7hwkWDq5szWg==
dependencies:
"@babel/template" "^7.22.5"
- "@babel/traverse" "^7.22.6"
- "@babel/types" "^7.22.5"
+ "@babel/traverse" "^7.22.11"
+ "@babel/types" "^7.22.11"
-"@babel/highlight@^7.22.5":
- version "7.22.5"
- resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.22.5.tgz#aa6c05c5407a67ebce408162b7ede789b4d22031"
- integrity sha512-BSKlD1hgnedS5XRnGOljZawtag7H1yPfQp0tdNJCHoH6AZ+Pcm9VvkrK59/Yy593Ypg0zMxH2BxD1VPYUQ7UIw==
+"@babel/highlight@^7.22.13":
+ version "7.22.13"
+ resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.22.13.tgz#9cda839e5d3be9ca9e8c26b6dd69e7548f0cbf16"
+ integrity sha512-C/BaXcnnvBCmHTpz/VGZ8jgtE2aYlW4hxDhseJAWZb7gqGM/qtCK6iZUb0TyKFf7BOUsBH7Q7fkRsDRhg1XklQ==
dependencies:
"@babel/helper-validator-identifier" "^7.22.5"
- chalk "^2.0.0"
+ chalk "^2.4.2"
js-tokens "^4.0.0"
-"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.22.5", "@babel/parser@^7.22.7":
- version "7.22.7"
- resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.22.7.tgz#df8cf085ce92ddbdbf668a7f186ce848c9036cae"
- integrity sha512-7NF8pOkHP5o2vpmGgNGcfAeCvOYhGLyA3Z4eBQkT1RJlWu47n63bCs93QfJ2hIAFCil7L5P2IWhs1oToVgrL0Q==
+"@babel/parser@^7.1.0", "@babel/parser@^7.14.7", "@babel/parser@^7.20.7", "@babel/parser@^7.22.11", "@babel/parser@^7.22.5", "@babel/parser@^7.22.7":
+ version "7.22.13"
+ resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.22.13.tgz#23fb17892b2be7afef94f573031c2f4b42839a2b"
+ integrity sha512-3l6+4YOvc9wx7VlCSw4yQfcBo01ECA8TicQfbnCPuCEpRQrf+gTUyGdxNw+pyTUyywp6JRD1w0YQs9TpBXYlkw==
"@babel/plugin-syntax-async-generators@^7.8.4":
version "7.8.4"
@@ -301,26 +323,26 @@
"@babel/parser" "^7.22.5"
"@babel/types" "^7.22.5"
-"@babel/traverse@^7.22.6", "@babel/traverse@^7.22.8":
- version "7.22.8"
- resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.22.8.tgz#4d4451d31bc34efeae01eac222b514a77aa4000e"
- integrity sha512-y6LPR+wpM2I3qJrsheCTwhIinzkETbplIgPBbwvqPKc+uljeA5gP+3nP8irdYt1mjQaDnlIcG+dw8OjAco4GXw==
+"@babel/traverse@^7.22.11", "@babel/traverse@^7.22.8":
+ version "7.22.11"
+ resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.22.11.tgz#71ebb3af7a05ff97280b83f05f8865ac94b2027c"
+ integrity sha512-mzAenteTfomcB7mfPtyi+4oe5BZ6MXxWcn4CX+h4IRJ+OOGXBrWU6jDQavkQI9Vuc5P+donFabBfFCcmWka9lQ==
dependencies:
- "@babel/code-frame" "^7.22.5"
- "@babel/generator" "^7.22.7"
+ "@babel/code-frame" "^7.22.10"
+ "@babel/generator" "^7.22.10"
"@babel/helper-environment-visitor" "^7.22.5"
"@babel/helper-function-name" "^7.22.5"
"@babel/helper-hoist-variables" "^7.22.5"
"@babel/helper-split-export-declaration" "^7.22.6"
- "@babel/parser" "^7.22.7"
- "@babel/types" "^7.22.5"
+ "@babel/parser" "^7.22.11"
+ "@babel/types" "^7.22.11"
debug "^4.1.0"
globals "^11.1.0"
-"@babel/types@^7.0.0", "@babel/types@^7.20.7", "@babel/types@^7.22.5", "@babel/types@^7.3.3":
- version "7.22.5"
- resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.22.5.tgz#cd93eeaab025880a3a47ec881f4b096a5b786fbe"
- integrity sha512-zo3MIHGOkPOfoRXitsgHLjEXmlDaD/5KU1Uzuc9GNiZPhSqVxVRtxuPaSBZDsYZ9qV88AjtMtWW7ww98loJ9KA==
+"@babel/types@^7.0.0", "@babel/types@^7.20.7", "@babel/types@^7.22.10", "@babel/types@^7.22.11", "@babel/types@^7.22.5", "@babel/types@^7.3.3":
+ version "7.22.11"
+ resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.22.11.tgz#0e65a6a1d4d9cbaa892b2213f6159485fe632ea2"
+ integrity sha512-siazHiGuZRz9aB9NpHy9GOs9xiQPKnMzgdr493iI1M67vRXpnEq8ZOOKzezC5q7zwuQ6sDhdSp4SD9ixKSqKZg==
dependencies:
"@babel/helper-string-parser" "^7.22.5"
"@babel/helper-validator-identifier" "^7.22.5"
@@ -427,115 +449,115 @@
resolved "https://registry.yarnpkg.com/@emotion/weak-memoize/-/weak-memoize-0.3.1.tgz#d0fce5d07b0620caa282b5131c297bb60f9d87e6"
integrity sha512-EsBwpc7hBUJWAsNPBmJy4hxWx12v6bshQsldrVmjxJoc3isbxhOrF2IcCpaXxfvq03NwkI7sbsOLXbYuqF/8Ww==
-"@esbuild/android-arm64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.18.16.tgz#34f562abc0015933aabd41b3d50d8d3359e30155"
- integrity sha512-wsCqSPqLz+6Ov+OM4EthU43DyYVVyfn15S4j1bJzylDpc1r1jZFFfJQNfDuT8SlgwuqpmpJXK4uPlHGw6ve7eA==
-
-"@esbuild/android-arm@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.18.16.tgz#ef6f9aa59a79a9b9330a2e73f7eb402c6630c267"
- integrity sha512-gCHjjQmA8L0soklKbLKA6pgsLk1byULuHe94lkZDzcO3/Ta+bbeewJioEn1Fr7kgy9NWNFy/C+MrBwC6I/WCug==
-
-"@esbuild/android-x64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.18.16.tgz#ed7444cb17542932c67b15e20528686853239cfd"
- integrity sha512-ldsTXolyA3eTQ1//4DS+E15xl0H/3DTRJaRL0/0PgkqDsI0fV/FlOtD+h0u/AUJr+eOTlZv4aC9gvfppo3C4sw==
-
-"@esbuild/darwin-arm64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.18.16.tgz#3c5a083e6e08a50f478fa243939989d86be1c6bf"
- integrity sha512-aBxruWCII+OtluORR/KvisEw0ALuw/qDQWvkoosA+c/ngC/Kwk0lLaZ+B++LLS481/VdydB2u6tYpWxUfnLAIw==
-
-"@esbuild/darwin-x64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.18.16.tgz#a8f3b61bee2807131cbe28eb164ad2b0333b59f5"
- integrity sha512-6w4Dbue280+rp3LnkgmriS1icOUZDyPuZo/9VsuMUTns7SYEiOaJ7Ca1cbhu9KVObAWfmdjUl4gwy9TIgiO5eA==
-
-"@esbuild/freebsd-arm64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.18.16.tgz#9bdbb3f0e5f0842b21c9b8602e70c106174ac24c"
- integrity sha512-x35fCebhe9s979DGKbVAwXUOcTmCIE32AIqB9CB1GralMIvxdnMLAw5CnID17ipEw9/3MvDsusj/cspYt2ZLNQ==
-
-"@esbuild/freebsd-x64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.18.16.tgz#24f73956436495cc7a5a4bf06be6b661aea6a2c1"
- integrity sha512-YM98f+PeNXF3GbxIJlUsj+McUWG1irguBHkszCIwfr3BXtXZsXo0vqybjUDFfu9a8Wr7uUD/YSmHib+EeGAFlg==
-
-"@esbuild/linux-arm64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.18.16.tgz#244569757f9cbd912f5a595a8ad8144f8c915f13"
- integrity sha512-XIqhNUxJiuy+zsR77+H5Z2f7s4YRlriSJKtvx99nJuG5ATuJPjmZ9n0ANgnGlPCpXGSReFpgcJ7O3SMtzIFeiQ==
-
-"@esbuild/linux-arm@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.18.16.tgz#d63923c63af534032cc5ea0b2a0b3de10f8357f5"
- integrity sha512-b5ABb+5Ha2C9JkeZXV+b+OruR1tJ33ePmv9ZwMeETSEKlmu/WJ45XTTG+l6a2KDsQtJJ66qo/hbSGBtk0XVLHw==
-
-"@esbuild/linux-ia32@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.18.16.tgz#a8825ccea6309f0bccfc5d87b43163ba804c2f20"
- integrity sha512-no+pfEpwnRvIyH+txbBAWtjxPU9grslmTBfsmDndj7bnBmr55rOo/PfQmRfz7Qg9isswt1FP5hBbWb23fRWnow==
-
-"@esbuild/linux-loong64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.18.16.tgz#f530e820fc3c61cf2206155b994aeab53b6d25be"
- integrity sha512-Zbnczs9ZXjmo0oZSS0zbNlJbcwKXa/fcNhYQjahDs4Xg18UumpXG/lwM2lcSvHS3mTrRyCYZvJbmzYc4laRI1g==
-
-"@esbuild/linux-mips64el@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.18.16.tgz#2d47ace539257896865d243641bd6716684a1e82"
- integrity sha512-YMF7hih1HVR/hQVa/ot4UVffc5ZlrzEb3k2ip0nZr1w6fnYypll9td2qcoMLvd3o8j3y6EbJM3MyIcXIVzXvQQ==
-
-"@esbuild/linux-ppc64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.18.16.tgz#d6913e7e9be9e242a6a20402800141bdbe7009f7"
- integrity sha512-Wkz++LZ29lDwUyTSEnzDaaP5OveOgTU69q9IyIw9WqLRxM4BjTBjz9un4G6TOvehWpf/J3gYVFN96TjGHrbcNQ==
-
-"@esbuild/linux-riscv64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.18.16.tgz#8f33b627389c8234fe61f4636c134f17fb1d9b09"
- integrity sha512-LFMKZ30tk78/mUv1ygvIP+568bwf4oN6reG/uczXnz6SvFn4e2QUFpUpZY9iSJT6Qpgstrhef/nMykIXZtZWGQ==
-
-"@esbuild/linux-s390x@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.18.16.tgz#4d44c030f78962cf410f604f92fcc1505e4afdde"
- integrity sha512-3ZC0BgyYHYKfZo3AV2/66TD/I9tlSBaW7eWTEIkrQQKfJIifKMMttXl9FrAg+UT0SGYsCRLI35Gwdmm96vlOjg==
-
-"@esbuild/linux-x64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.18.16.tgz#8846d00e16b1e93eb488c8b4dd51c946adfc236f"
- integrity sha512-xu86B3647DihHJHv/wx3NCz2Dg1gjQ8bbf9cVYZzWKY+gsvxYmn/lnVlqDRazObc3UMwoHpUhNYaZset4X8IPA==
-
-"@esbuild/netbsd-x64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.18.16.tgz#6514a86bd07744f3100d2813ea2fb6520d53e72e"
- integrity sha512-uVAgpimx9Ffw3xowtg/7qQPwHFx94yCje+DoBx+LNm2ePDpQXHrzE+Sb0Si2VBObYz+LcRps15cq+95YM7gkUw==
-
-"@esbuild/openbsd-x64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.18.16.tgz#ae67ce766d58aab6c0e6037f1a76f15df4a2a5fe"
- integrity sha512-6OjCQM9wf7z8/MBi6BOWaTL2AS/SZudsZtBziXMtNI8r/U41AxS9x7jn0ATOwVy08OotwkPqGRMkpPR2wcTJXA==
-
-"@esbuild/sunos-x64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.18.16.tgz#998efe8a58374b7351ac710455051639a6ce6a05"
- integrity sha512-ZoNkruFYJp9d1LbUYCh8awgQDvB9uOMZqlQ+gGEZR7v6C+N6u7vPr86c+Chih8niBR81Q/bHOSKGBK3brJyvkQ==
-
-"@esbuild/win32-arm64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.18.16.tgz#8de33682243508eef8d4de1816df2c05adad2b21"
- integrity sha512-+j4anzQ9hrs+iqO+/wa8UE6TVkKua1pXUb0XWFOx0FiAj6R9INJ+WE//1/Xo6FG1vB5EpH3ko+XcgwiDXTxcdw==
-
-"@esbuild/win32-ia32@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.18.16.tgz#95c9f4274fb3ef9e449d464ffe3e3b7fa091503b"
- integrity sha512-5PFPmq3sSKTp9cT9dzvI67WNfRZGvEVctcZa1KGjDDu4n3H8k59Inbk0du1fz0KrAbKKNpJbdFXQMDUz7BG4rQ==
-
-"@esbuild/win32-x64@0.18.16":
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.18.16.tgz#5be58d31d0120c68af8e38b702e6937ce764cd68"
- integrity sha512-sCIVrrtcWN5Ua7jYXNG1xD199IalrbfV2+0k/2Zf2OyV2FtnQnMgdzgpRAbi4AWlKJj1jkX+M+fEGPQj6BQB4w==
+"@esbuild/android-arm64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.18.20.tgz#984b4f9c8d0377443cc2dfcef266d02244593622"
+ integrity sha512-Nz4rJcchGDtENV0eMKUNa6L12zz2zBDXuhj/Vjh18zGqB44Bi7MBMSXjgunJgjRhCmKOjnPuZp4Mb6OKqtMHLQ==
+
+"@esbuild/android-arm@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.18.20.tgz#fedb265bc3a589c84cc11f810804f234947c3682"
+ integrity sha512-fyi7TDI/ijKKNZTUJAQqiG5T7YjJXgnzkURqmGj13C6dCqckZBLdl4h7bkhHt/t0WP+zO9/zwroDvANaOqO5Sw==
+
+"@esbuild/android-x64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.18.20.tgz#35cf419c4cfc8babe8893d296cd990e9e9f756f2"
+ integrity sha512-8GDdlePJA8D6zlZYJV/jnrRAi6rOiNaCC/JclcXpB+KIuvfBN4owLtgzY2bsxnx666XjJx2kDPUmnTtR8qKQUg==
+
+"@esbuild/darwin-arm64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.18.20.tgz#08172cbeccf95fbc383399a7f39cfbddaeb0d7c1"
+ integrity sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA==
+
+"@esbuild/darwin-x64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.18.20.tgz#d70d5790d8bf475556b67d0f8b7c5bdff053d85d"
+ integrity sha512-pc5gxlMDxzm513qPGbCbDukOdsGtKhfxD1zJKXjCCcU7ju50O7MeAZ8c4krSJcOIJGFR+qx21yMMVYwiQvyTyQ==
+
+"@esbuild/freebsd-arm64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.18.20.tgz#98755cd12707f93f210e2494d6a4b51b96977f54"
+ integrity sha512-yqDQHy4QHevpMAaxhhIwYPMv1NECwOvIpGCZkECn8w2WFHXjEwrBn3CeNIYsibZ/iZEUemj++M26W3cNR5h+Tw==
+
+"@esbuild/freebsd-x64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.18.20.tgz#c1eb2bff03915f87c29cece4c1a7fa1f423b066e"
+ integrity sha512-tgWRPPuQsd3RmBZwarGVHZQvtzfEBOreNuxEMKFcd5DaDn2PbBxfwLcj4+aenoh7ctXcbXmOQIn8HI6mCSw5MQ==
+
+"@esbuild/linux-arm64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.18.20.tgz#bad4238bd8f4fc25b5a021280c770ab5fc3a02a0"
+ integrity sha512-2YbscF+UL7SQAVIpnWvYwM+3LskyDmPhe31pE7/aoTMFKKzIc9lLbyGUpmmb8a8AixOL61sQ/mFh3jEjHYFvdA==
+
+"@esbuild/linux-arm@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.18.20.tgz#3e617c61f33508a27150ee417543c8ab5acc73b0"
+ integrity sha512-/5bHkMWnq1EgKr1V+Ybz3s1hWXok7mDFUMQ4cG10AfW3wL02PSZi5kFpYKrptDsgb2WAJIvRcDm+qIvXf/apvg==
+
+"@esbuild/linux-ia32@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.18.20.tgz#699391cccba9aee6019b7f9892eb99219f1570a7"
+ integrity sha512-P4etWwq6IsReT0E1KHU40bOnzMHoH73aXp96Fs8TIT6z9Hu8G6+0SHSw9i2isWrD2nbx2qo5yUqACgdfVGx7TA==
+
+"@esbuild/linux-loong64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.18.20.tgz#e6fccb7aac178dd2ffb9860465ac89d7f23b977d"
+ integrity sha512-nXW8nqBTrOpDLPgPY9uV+/1DjxoQ7DoB2N8eocyq8I9XuqJ7BiAMDMf9n1xZM9TgW0J8zrquIb/A7s3BJv7rjg==
+
+"@esbuild/linux-mips64el@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.18.20.tgz#eeff3a937de9c2310de30622a957ad1bd9183231"
+ integrity sha512-d5NeaXZcHp8PzYy5VnXV3VSd2D328Zb+9dEq5HE6bw6+N86JVPExrA6O68OPwobntbNJ0pzCpUFZTo3w0GyetQ==
+
+"@esbuild/linux-ppc64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.18.20.tgz#2f7156bde20b01527993e6881435ad79ba9599fb"
+ integrity sha512-WHPyeScRNcmANnLQkq6AfyXRFr5D6N2sKgkFo2FqguP44Nw2eyDlbTdZwd9GYk98DZG9QItIiTlFLHJHjxP3FA==
+
+"@esbuild/linux-riscv64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.18.20.tgz#6628389f210123d8b4743045af8caa7d4ddfc7a6"
+ integrity sha512-WSxo6h5ecI5XH34KC7w5veNnKkju3zBRLEQNY7mv5mtBmrP/MjNBCAlsM2u5hDBlS3NGcTQpoBvRzqBcRtpq1A==
+
+"@esbuild/linux-s390x@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.18.20.tgz#255e81fb289b101026131858ab99fba63dcf0071"
+ integrity sha512-+8231GMs3mAEth6Ja1iK0a1sQ3ohfcpzpRLH8uuc5/KVDFneH6jtAJLFGafpzpMRO6DzJ6AvXKze9LfFMrIHVQ==
+
+"@esbuild/linux-x64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.18.20.tgz#c7690b3417af318a9b6f96df3031a8865176d338"
+ integrity sha512-UYqiqemphJcNsFEskc73jQ7B9jgwjWrSayxawS6UVFZGWrAAtkzjxSqnoclCXxWtfwLdzU+vTpcNYhpn43uP1w==
+
+"@esbuild/netbsd-x64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.18.20.tgz#30e8cd8a3dded63975e2df2438ca109601ebe0d1"
+ integrity sha512-iO1c++VP6xUBUmltHZoMtCUdPlnPGdBom6IrO4gyKPFFVBKioIImVooR5I83nTew5UOYrk3gIJhbZh8X44y06A==
+
+"@esbuild/openbsd-x64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.18.20.tgz#7812af31b205055874c8082ea9cf9ab0da6217ae"
+ integrity sha512-e5e4YSsuQfX4cxcygw/UCPIEP6wbIL+se3sxPdCiMbFLBWu0eiZOJ7WoD+ptCLrmjZBK1Wk7I6D/I3NglUGOxg==
+
+"@esbuild/sunos-x64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.18.20.tgz#d5c275c3b4e73c9b0ecd38d1ca62c020f887ab9d"
+ integrity sha512-kDbFRFp0YpTQVVrqUd5FTYmWo45zGaXe0X8E1G/LKFC0v8x0vWrhOWSLITcCn63lmZIxfOMXtCfti/RxN/0wnQ==
+
+"@esbuild/win32-arm64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.18.20.tgz#73bc7f5a9f8a77805f357fab97f290d0e4820ac9"
+ integrity sha512-ddYFR6ItYgoaq4v4JmQQaAI5s7npztfV4Ag6NrhiaW0RrnOXqBkgwZLofVTlq1daVTQNhtI5oieTvkRPfZrePg==
+
+"@esbuild/win32-ia32@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.18.20.tgz#ec93cbf0ef1085cc12e71e0d661d20569ff42102"
+ integrity sha512-Wv7QBi3ID/rROT08SABTS7eV4hX26sVduqDOTe1MvGMjNd3EjOz4b7zeexIR62GTIEKrfJXKL9LFxTYgkyeu7g==
+
+"@esbuild/win32-x64@0.18.20":
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.18.20.tgz#786c5f41f043b07afb1af37683d7c33668858f6d"
+ integrity sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ==
"@eslint-community/eslint-utils@^4.2.0":
version "4.4.0"
@@ -544,15 +566,15 @@
dependencies:
eslint-visitor-keys "^3.3.0"
-"@eslint-community/regexpp@^4.4.0":
- version "4.6.0"
- resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.6.0.tgz#5b63f0df5528a44e28aa8578d393de908cc3d4d0"
- integrity sha512-uiPeRISaglZnaZk8vwrjQZ1CxogZeY/4IYft6gBOTqu1WhVXWmCmZMWxUv2Q/pxSvPdp1JPaO62kLOcOkMqWrw==
+"@eslint-community/regexpp@^4.6.1":
+ version "4.8.0"
+ resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.8.0.tgz#11195513186f68d42fbf449f9a7136b2c0c92005"
+ integrity sha512-JylOEEzDiOryeUnFbQz+oViCXS0KsvR1mvHkoMiu5+UiBvy+RYX7tzlIIIEstF/gVa2tj9AQXk3dgnxv6KxhFg==
-"@eslint/eslintrc@^2.1.0":
- version "2.1.0"
- resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-2.1.0.tgz#82256f164cc9e0b59669efc19d57f8092706841d"
- integrity sha512-Lj7DECXqIVCqnqjjHMPna4vn6GJcMgul/wuS0je9OZ9gsL0zzDpKPVtcG1HaDVc+9y+qgXneTeUMbCqXJNpH1A==
+"@eslint/eslintrc@^2.1.2":
+ version "2.1.2"
+ resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-2.1.2.tgz#c6936b4b328c64496692f76944e755738be62396"
+ integrity sha512-+wvgpDsrB1YqAMdEUCcnTlpfVBH7Vqn6A/NT3D8WVXFIaKMlErPIZT3oCIAVCOtarRpMtelZLqJeU3t7WY6X6g==
dependencies:
ajv "^6.12.4"
debug "^4.3.2"
@@ -564,10 +586,10 @@
minimatch "^3.1.2"
strip-json-comments "^3.1.1"
-"@eslint/js@8.44.0":
- version "8.44.0"
- resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.44.0.tgz#961a5903c74139390478bdc808bcde3fc45ab7af"
- integrity sha512-Ag+9YM4ocKQx9AarydN0KY2j0ErMHNIocPDrVo8zAE44xLTjEtz81OdR68/cydGtk6m6jDb5Za3r2useMzYmSw==
+"@eslint/js@8.48.0":
+ version "8.48.0"
+ resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.48.0.tgz#642633964e217905436033a2bd08bf322849b7fb"
+ integrity sha512-ZSjtmelB7IJfWD2Fvb7+Z+ChTIKWq6kjda95fLcQKNS5aheVHn4IkfgRQE3sIIzTcSLwLcLZUD9UBt+V7+h+Pw==
"@floating-ui/core@^1.3.1":
version "1.3.1"
@@ -597,31 +619,31 @@
aria-hidden "^1.1.3"
tabbable "^6.0.1"
-"@fortawesome/fontawesome-common-types@6.4.0":
- version "6.4.0"
- resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.4.0.tgz#88da2b70d6ca18aaa6ed3687832e11f39e80624b"
- integrity sha512-HNii132xfomg5QVZw0HwXXpN22s7VBHQBv9CeOu9tfJnhsWQNd2lmTNi8CSrnw5B+5YOmzu1UoPAyxaXsJ6RgQ==
+"@fortawesome/fontawesome-common-types@6.4.2":
+ version "6.4.2"
+ resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-common-types/-/fontawesome-common-types-6.4.2.tgz#1766039cad33f8ad87f9467b98e0d18fbc8f01c5"
+ integrity sha512-1DgP7f+XQIJbLFCTX1V2QnxVmpLdKdzzo2k8EmvDOePfchaIGQ9eCHj2up3/jNEbZuBqel5OxiaOJf37TWauRA==
"@fortawesome/fontawesome-svg-core@^6":
- version "6.4.0"
- resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.4.0.tgz#3727552eff9179506e9203d72feb5b1063c11a21"
- integrity sha512-Bertv8xOiVELz5raB2FlXDPKt+m94MQ3JgDfsVbrqNpLU9+UE2E18GKjLKw+d3XbeYPqg1pzyQKGsrzbw+pPaw==
+ version "6.4.2"
+ resolved "https://registry.yarnpkg.com/@fortawesome/fontawesome-svg-core/-/fontawesome-svg-core-6.4.2.tgz#37f4507d5ec645c8b50df6db14eced32a6f9be09"
+ integrity sha512-gjYDSKv3TrM2sLTOKBc5rH9ckje8Wrwgx1CxAPbN5N3Fm4prfi7NsJVWd1jklp7i5uSCVwhZS5qlhMXqLrpAIg==
dependencies:
- "@fortawesome/fontawesome-common-types" "6.4.0"
+ "@fortawesome/fontawesome-common-types" "6.4.2"
"@fortawesome/free-regular-svg-icons@^6":
- version "6.4.0"
- resolved "https://registry.yarnpkg.com/@fortawesome/free-regular-svg-icons/-/free-regular-svg-icons-6.4.0.tgz#cacc53bd8d832d46feead412d9ea9ce80a55e13a"
- integrity sha512-ZfycI7D0KWPZtf7wtMFnQxs8qjBXArRzczABuMQqecA/nXohquJ5J/RCR77PmY5qGWkxAZDxpnUFVXKwtY/jPw==
+ version "6.4.2"
+ resolved "https://registry.yarnpkg.com/@fortawesome/free-regular-svg-icons/-/free-regular-svg-icons-6.4.2.tgz#aee79ed76ce5dd04931352f9d83700761b8b1b25"
+ integrity sha512-0+sIUWnkgTVVXVAPQmW4vxb9ZTHv0WstOa3rBx9iPxrrrDH6bNLsDYuwXF9b6fGm+iR7DKQvQshUH/FJm3ed9Q==
dependencies:
- "@fortawesome/fontawesome-common-types" "6.4.0"
+ "@fortawesome/fontawesome-common-types" "6.4.2"
"@fortawesome/free-solid-svg-icons@^6":
- version "6.4.0"
- resolved "https://registry.yarnpkg.com/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.4.0.tgz#48c0e790847fa56299e2f26b82b39663b8ad7119"
- integrity sha512-kutPeRGWm8V5dltFP1zGjQOEAzaLZj4StdQhWVZnfGFCvAPVvHh8qk5bRrU4KXnRRRNni5tKQI9PBAdI6MP8nQ==
+ version "6.4.2"
+ resolved "https://registry.yarnpkg.com/@fortawesome/free-solid-svg-icons/-/free-solid-svg-icons-6.4.2.tgz#33a02c4cb6aa28abea7bc082a9626b7922099df4"
+ integrity sha512-sYwXurXUEQS32fZz9hVCUUv/xu49PEJEyUOsA51l6PU/qVgfbTb2glsTEaJngVVT8VqBATRIdh7XVgV1JF1LkA==
dependencies:
- "@fortawesome/fontawesome-common-types" "6.4.0"
+ "@fortawesome/fontawesome-common-types" "6.4.2"
"@fortawesome/react-fontawesome@^0":
version "0.2.0"
@@ -665,109 +687,109 @@
resolved "https://registry.yarnpkg.com/@istanbuljs/schema/-/schema-0.1.3.tgz#e45e384e4b8ec16bce2fd903af78450f6bf7ec98"
integrity sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==
-"@jest/console@^29.6.1":
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/@jest/console/-/console-29.6.1.tgz#b48ba7b9c34b51483e6d590f46e5837f1ab5f639"
- integrity sha512-Aj772AYgwTSr5w8qnyoJ0eDYvN6bMsH3ORH1ivMotrInHLKdUz6BDlaEXHdM6kODaBIkNIyQGzsMvRdOv7VG7Q==
+"@jest/console@^29.6.4":
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/@jest/console/-/console-29.6.4.tgz#a7e2d84516301f986bba0dd55af9d5fe37f46527"
+ integrity sha512-wNK6gC0Ha9QeEPSkeJedQuTQqxZYnDPuDcDhVuVatRvMkL4D0VTvFVZj+Yuh6caG2aOfzkUZ36KtCmLNtR02hw==
dependencies:
- "@jest/types" "^29.6.1"
+ "@jest/types" "^29.6.3"
"@types/node" "*"
chalk "^4.0.0"
- jest-message-util "^29.6.1"
- jest-util "^29.6.1"
+ jest-message-util "^29.6.3"
+ jest-util "^29.6.3"
slash "^3.0.0"
-"@jest/core@^29.6.1":
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/@jest/core/-/core-29.6.1.tgz#fac0d9ddf320490c93356ba201451825231e95f6"
- integrity sha512-CcowHypRSm5oYQ1obz1wfvkjZZ2qoQlrKKvlfPwh5jUXVU12TWr2qMeH8chLMuTFzHh5a1g2yaqlqDICbr+ukQ==
+"@jest/core@^29.6.4":
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/@jest/core/-/core-29.6.4.tgz#265ebee05ec1ff3567757e7a327155c8d6bdb126"
+ integrity sha512-U/vq5ccNTSVgYH7mHnodHmCffGWHJnz/E1BEWlLuK5pM4FZmGfBn/nrJGLjUsSmyx3otCeqc1T31F4y08AMDLg==
dependencies:
- "@jest/console" "^29.6.1"
- "@jest/reporters" "^29.6.1"
- "@jest/test-result" "^29.6.1"
- "@jest/transform" "^29.6.1"
- "@jest/types" "^29.6.1"
+ "@jest/console" "^29.6.4"
+ "@jest/reporters" "^29.6.4"
+ "@jest/test-result" "^29.6.4"
+ "@jest/transform" "^29.6.4"
+ "@jest/types" "^29.6.3"
"@types/node" "*"
ansi-escapes "^4.2.1"
chalk "^4.0.0"
ci-info "^3.2.0"
exit "^0.1.2"
graceful-fs "^4.2.9"
- jest-changed-files "^29.5.0"
- jest-config "^29.6.1"
- jest-haste-map "^29.6.1"
- jest-message-util "^29.6.1"
- jest-regex-util "^29.4.3"
- jest-resolve "^29.6.1"
- jest-resolve-dependencies "^29.6.1"
- jest-runner "^29.6.1"
- jest-runtime "^29.6.1"
- jest-snapshot "^29.6.1"
- jest-util "^29.6.1"
- jest-validate "^29.6.1"
- jest-watcher "^29.6.1"
+ jest-changed-files "^29.6.3"
+ jest-config "^29.6.4"
+ jest-haste-map "^29.6.4"
+ jest-message-util "^29.6.3"
+ jest-regex-util "^29.6.3"
+ jest-resolve "^29.6.4"
+ jest-resolve-dependencies "^29.6.4"
+ jest-runner "^29.6.4"
+ jest-runtime "^29.6.4"
+ jest-snapshot "^29.6.4"
+ jest-util "^29.6.3"
+ jest-validate "^29.6.3"
+ jest-watcher "^29.6.4"
micromatch "^4.0.4"
- pretty-format "^29.6.1"
+ pretty-format "^29.6.3"
slash "^3.0.0"
strip-ansi "^6.0.0"
-"@jest/environment@^29.6.1":
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-29.6.1.tgz#ee358fff2f68168394b4a50f18c68278a21fe82f"
- integrity sha512-RMMXx4ws+Gbvw3DfLSuo2cfQlK7IwGbpuEWXCqyYDcqYTI+9Ju3a5hDnXaxjNsa6uKh9PQF2v+qg+RLe63tz5A==
+"@jest/environment@^29.6.4":
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-29.6.4.tgz#78ec2c9f8c8829a37616934ff4fea0c028c79f4f"
+ integrity sha512-sQ0SULEjA1XUTHmkBRl7A1dyITM9yb1yb3ZNKPX3KlTd6IG7mWUe3e2yfExtC2Zz1Q+mMckOLHmL/qLiuQJrBQ==
dependencies:
- "@jest/fake-timers" "^29.6.1"
- "@jest/types" "^29.6.1"
+ "@jest/fake-timers" "^29.6.4"
+ "@jest/types" "^29.6.3"
"@types/node" "*"
- jest-mock "^29.6.1"
+ jest-mock "^29.6.3"
-"@jest/expect-utils@^29.6.1":
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/@jest/expect-utils/-/expect-utils-29.6.1.tgz#ab83b27a15cdd203fe5f68230ea22767d5c3acc5"
- integrity sha512-o319vIf5pEMx0LmzSxxkYYxo4wrRLKHq9dP1yJU7FoPTB0LfAKSz8SWD6D/6U3v/O52t9cF5t+MeJiRsfk7zMw==
+"@jest/expect-utils@^29.6.4":
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/@jest/expect-utils/-/expect-utils-29.6.4.tgz#17c7dfe6cec106441f218b0aff4b295f98346679"
+ integrity sha512-FEhkJhqtvBwgSpiTrocquJCdXPsyvNKcl/n7A3u7X4pVoF4bswm11c9d4AV+kfq2Gpv/mM8x7E7DsRvH+djkrg==
dependencies:
- jest-get-type "^29.4.3"
+ jest-get-type "^29.6.3"
-"@jest/expect@^29.6.1":
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/@jest/expect/-/expect-29.6.1.tgz#fef18265188f6a97601f1ea0a2912d81a85b4657"
- integrity sha512-N5xlPrAYaRNyFgVf2s9Uyyvr795jnB6rObuPx4QFvNJz8aAjpZUDfO4bh5G/xuplMID8PrnuF1+SfSyDxhsgYg==
+"@jest/expect@^29.6.4":
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/@jest/expect/-/expect-29.6.4.tgz#1d6ae17dc68d906776198389427ab7ce6179dba6"
+ integrity sha512-Warhsa7d23+3X5bLbrbYvaehcgX5TLYhI03JKoedTiI8uJU4IhqYBWF7OSSgUyz4IgLpUYPkK0AehA5/fRclAA==
dependencies:
- expect "^29.6.1"
- jest-snapshot "^29.6.1"
+ expect "^29.6.4"
+ jest-snapshot "^29.6.4"
-"@jest/fake-timers@^29.6.1":
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/@jest/fake-timers/-/fake-timers-29.6.1.tgz#c773efddbc61e1d2efcccac008139f621de57c69"
- integrity sha512-RdgHgbXyosCDMVYmj7lLpUwXA4c69vcNzhrt69dJJdf8azUrpRh3ckFCaTPNjsEeRi27Cig0oKDGxy5j7hOgHg==
+"@jest/fake-timers@^29.6.4":
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/@jest/fake-timers/-/fake-timers-29.6.4.tgz#45a27f093c43d5d989362a3e7a8c70c83188b4f6"
+ integrity sha512-6UkCwzoBK60edXIIWb0/KWkuj7R7Qq91vVInOe3De6DSpaEiqjKcJw4F7XUet24Wupahj9J6PlR09JqJ5ySDHw==
dependencies:
- "@jest/types" "^29.6.1"
+ "@jest/types" "^29.6.3"
"@sinonjs/fake-timers" "^10.0.2"
"@types/node" "*"
- jest-message-util "^29.6.1"
- jest-mock "^29.6.1"
- jest-util "^29.6.1"
+ jest-message-util "^29.6.3"
+ jest-mock "^29.6.3"
+ jest-util "^29.6.3"
-"@jest/globals@^29.6.1":
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/@jest/globals/-/globals-29.6.1.tgz#c8a8923e05efd757308082cc22893d82b8aa138f"
- integrity sha512-2VjpaGy78JY9n9370H8zGRCFbYVWwjY6RdDMhoJHa1sYfwe6XM/azGN0SjY8kk7BOZApIejQ1BFPyH7FPG0w3A==
+"@jest/globals@^29.6.4":
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/@jest/globals/-/globals-29.6.4.tgz#4f04f58731b062b44ef23036b79bdb31f40c7f63"
+ integrity sha512-wVIn5bdtjlChhXAzVXavcY/3PEjf4VqM174BM3eGL5kMxLiZD5CLnbmkEyA1Dwh9q8XjP6E8RwjBsY/iCWrWsA==
dependencies:
- "@jest/environment" "^29.6.1"
- "@jest/expect" "^29.6.1"
- "@jest/types" "^29.6.1"
- jest-mock "^29.6.1"
+ "@jest/environment" "^29.6.4"
+ "@jest/expect" "^29.6.4"
+ "@jest/types" "^29.6.3"
+ jest-mock "^29.6.3"
-"@jest/reporters@^29.6.1":
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/@jest/reporters/-/reporters-29.6.1.tgz#3325a89c9ead3cf97ad93df3a427549d16179863"
- integrity sha512-9zuaI9QKr9JnoZtFQlw4GREQbxgmNYXU6QuWtmuODvk5nvPUeBYapVR/VYMyi2WSx3jXTLJTJji8rN6+Cm4+FA==
+"@jest/reporters@^29.6.4":
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/@jest/reporters/-/reporters-29.6.4.tgz#9d6350c8a2761ece91f7946e97ab0dabc06deab7"
+ integrity sha512-sxUjWxm7QdchdrD3NfWKrL8FBsortZeibSJv4XLjESOOjSUOkjQcb0ZHJwfhEGIvBvTluTzfG2yZWZhkrXJu8g==
dependencies:
"@bcoe/v8-coverage" "^0.2.3"
- "@jest/console" "^29.6.1"
- "@jest/test-result" "^29.6.1"
- "@jest/transform" "^29.6.1"
- "@jest/types" "^29.6.1"
+ "@jest/console" "^29.6.4"
+ "@jest/test-result" "^29.6.4"
+ "@jest/transform" "^29.6.4"
+ "@jest/types" "^29.6.3"
"@jridgewell/trace-mapping" "^0.3.18"
"@types/node" "*"
chalk "^4.0.0"
@@ -776,52 +798,52 @@
glob "^7.1.3"
graceful-fs "^4.2.9"
istanbul-lib-coverage "^3.0.0"
- istanbul-lib-instrument "^5.1.0"
+ istanbul-lib-instrument "^6.0.0"
istanbul-lib-report "^3.0.0"
istanbul-lib-source-maps "^4.0.0"
istanbul-reports "^3.1.3"
- jest-message-util "^29.6.1"
- jest-util "^29.6.1"
- jest-worker "^29.6.1"
+ jest-message-util "^29.6.3"
+ jest-util "^29.6.3"
+ jest-worker "^29.6.4"
slash "^3.0.0"
string-length "^4.0.1"
strip-ansi "^6.0.0"
v8-to-istanbul "^9.0.1"
-"@jest/schemas@^29.6.0":
- version "29.6.0"
- resolved "https://registry.yarnpkg.com/@jest/schemas/-/schemas-29.6.0.tgz#0f4cb2c8e3dca80c135507ba5635a4fd755b0040"
- integrity sha512-rxLjXyJBTL4LQeJW3aKo0M/+GkCOXsO+8i9Iu7eDb6KwtP65ayoDsitrdPBtujxQ88k4wI2FNYfa6TOGwSn6cQ==
+"@jest/schemas@^29.6.3":
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/@jest/schemas/-/schemas-29.6.3.tgz#430b5ce8a4e0044a7e3819663305a7b3091c8e03"
+ integrity sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==
dependencies:
"@sinclair/typebox" "^0.27.8"
-"@jest/source-map@^29.6.0":
- version "29.6.0"
- resolved "https://registry.yarnpkg.com/@jest/source-map/-/source-map-29.6.0.tgz#bd34a05b5737cb1a99d43e1957020ac8e5b9ddb1"
- integrity sha512-oA+I2SHHQGxDCZpbrsCQSoMLb3Bz547JnM+jUr9qEbuw0vQlWZfpPS7CO9J7XiwKicEz9OFn/IYoLkkiUD7bzA==
+"@jest/source-map@^29.6.3":
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/@jest/source-map/-/source-map-29.6.3.tgz#d90ba772095cf37a34a5eb9413f1b562a08554c4"
+ integrity sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==
dependencies:
"@jridgewell/trace-mapping" "^0.3.18"
callsites "^3.0.0"
graceful-fs "^4.2.9"
-"@jest/test-result@^29.6.1":
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-29.6.1.tgz#850e565a3f58ee8ca6ec424db00cb0f2d83c36ba"
- integrity sha512-Ynr13ZRcpX6INak0TPUukU8GWRfm/vAytE3JbJNGAvINySWYdfE7dGZMbk36oVuK4CigpbhMn8eg1dixZ7ZJOw==
+"@jest/test-result@^29.6.4":
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-29.6.4.tgz#adf5c79f6e1fb7405ad13d67d9e2b6ff54b54c6b"
+ integrity sha512-uQ1C0AUEN90/dsyEirgMLlouROgSY+Wc/JanVVk0OiUKa5UFh7sJpMEM3aoUBAz2BRNvUJ8j3d294WFuRxSyOQ==
dependencies:
- "@jest/console" "^29.6.1"
- "@jest/types" "^29.6.1"
+ "@jest/console" "^29.6.4"
+ "@jest/types" "^29.6.3"
"@types/istanbul-lib-coverage" "^2.0.0"
collect-v8-coverage "^1.0.0"
-"@jest/test-sequencer@^29.6.1":
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/@jest/test-sequencer/-/test-sequencer-29.6.1.tgz#e3e582ee074dd24ea9687d7d1aaf05ee3a9b068e"
- integrity sha512-oBkC36PCDf/wb6dWeQIhaviU0l5u6VCsXa119yqdUosYAt7/FbQU2M2UoziO3igj/HBDEgp57ONQ3fm0v9uyyg==
+"@jest/test-sequencer@^29.6.4":
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/@jest/test-sequencer/-/test-sequencer-29.6.4.tgz#86aef66aaa22b181307ed06c26c82802fb836d7b"
+ integrity sha512-E84M6LbpcRq3fT4ckfKs9ryVanwkaIB0Ws9bw3/yP4seRLg/VaCZ/LgW0MCq5wwk4/iP/qnilD41aj2fsw2RMg==
dependencies:
- "@jest/test-result" "^29.6.1"
+ "@jest/test-result" "^29.6.4"
graceful-fs "^4.2.9"
- jest-haste-map "^29.6.1"
+ jest-haste-map "^29.6.4"
slash "^3.0.0"
"@jest/transform@^26.6.2":
@@ -845,22 +867,22 @@
source-map "^0.6.1"
write-file-atomic "^3.0.0"
-"@jest/transform@^29.6.1":
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/@jest/transform/-/transform-29.6.1.tgz#acb5606019a197cb99beda3c05404b851f441c92"
- integrity sha512-URnTneIU3ZjRSaf906cvf6Hpox3hIeJXRnz3VDSw5/X93gR8ycdfSIEy19FlVx8NFmpN7fe3Gb1xF+NjXaQLWg==
+"@jest/transform@^29.6.4":
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/@jest/transform/-/transform-29.6.4.tgz#a6bc799ef597c5d85b2e65a11fd96b6b239bab5a"
+ integrity sha512-8thgRSiXUqtr/pPGY/OsyHuMjGyhVnWrFAwoxmIemlBuiMyU1WFs0tXoNxzcr4A4uErs/ABre76SGmrr5ab/AA==
dependencies:
"@babel/core" "^7.11.6"
- "@jest/types" "^29.6.1"
+ "@jest/types" "^29.6.3"
"@jridgewell/trace-mapping" "^0.3.18"
babel-plugin-istanbul "^6.1.1"
chalk "^4.0.0"
convert-source-map "^2.0.0"
fast-json-stable-stringify "^2.1.0"
graceful-fs "^4.2.9"
- jest-haste-map "^29.6.1"
- jest-regex-util "^29.4.3"
- jest-util "^29.6.1"
+ jest-haste-map "^29.6.4"
+ jest-regex-util "^29.6.3"
+ jest-util "^29.6.3"
micromatch "^4.0.4"
pirates "^4.0.4"
slash "^3.0.0"
@@ -877,12 +899,12 @@
"@types/yargs" "^15.0.0"
chalk "^4.0.0"
-"@jest/types@^29.6.1":
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/@jest/types/-/types-29.6.1.tgz#ae79080278acff0a6af5eb49d063385aaa897bf2"
- integrity sha512-tPKQNMPuXgvdOn2/Lg9HNfUvjYVGolt04Hp03f5hAk878uwOLikN+JzeLY0HcVgKgFl9Hs3EIqpu3WX27XNhnw==
+"@jest/types@^29.6.3":
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/@jest/types/-/types-29.6.3.tgz#1131f8cf634e7e84c5e77bab12f052af585fba59"
+ integrity sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw==
dependencies:
- "@jest/schemas" "^29.6.0"
+ "@jest/schemas" "^29.6.3"
"@types/istanbul-lib-coverage" "^2.0.0"
"@types/istanbul-reports" "^3.0.0"
"@types/node" "*"
@@ -898,70 +920,66 @@
"@jridgewell/sourcemap-codec" "^1.4.10"
"@jridgewell/trace-mapping" "^0.3.9"
-"@jridgewell/resolve-uri@3.1.0":
- version "3.1.0"
- resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz#2203b118c157721addfe69d47b70465463066d78"
- integrity sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==
+"@jridgewell/resolve-uri@^3.1.0":
+ version "3.1.1"
+ resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.1.tgz#c08679063f279615a3326583ba3a90d1d82cc721"
+ integrity sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==
"@jridgewell/set-array@^1.0.1":
version "1.1.2"
resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.1.2.tgz#7c6cf998d6d20b914c0a55a91ae928ff25965e72"
integrity sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==
-"@jridgewell/sourcemap-codec@1.4.14":
- version "1.4.14"
- resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.14.tgz#add4c98d341472a289190b424efbdb096991bb24"
- integrity sha512-XPSJHWmi394fuUuzDnGz1wiKqWfo1yXecHQMRf2l6hztTO+nPru658AyDngaBe7isIxEkRsPR3FZh+s7iVa4Uw==
-
-"@jridgewell/sourcemap-codec@^1.4.10":
+"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.14":
version "1.4.15"
resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz#d7c6e6755c78567a951e04ab52ef0fd26de59f32"
integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==
"@jridgewell/trace-mapping@^0.3.12", "@jridgewell/trace-mapping@^0.3.17", "@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.9":
- version "0.3.18"
- resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.18.tgz#25783b2086daf6ff1dcb53c9249ae480e4dd4cd6"
- integrity sha512-w+niJYzMHdd7USdiH2U6869nqhD2nbfZXND5Yp93qIbEmnDNk7PD48o+YchRVpzMU7M6jVCbenTR7PA1FLQ9pA==
+ version "0.3.19"
+ resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.19.tgz#f8a3249862f91be48d3127c3cfe992f79b4b8811"
+ integrity sha512-kf37QtfW+Hwx/buWGMPcR60iF9ziHa6r/CZJIHbmcm4+0qrXiVdxegAH0F6yddEVQ7zdkjcGCgCzUu+BcbhQxw==
dependencies:
- "@jridgewell/resolve-uri" "3.1.0"
- "@jridgewell/sourcemap-codec" "1.4.14"
+ "@jridgewell/resolve-uri" "^3.1.0"
+ "@jridgewell/sourcemap-codec" "^1.4.14"
-"@mantine/core@^5":
- version "5.10.5"
- resolved "https://registry.yarnpkg.com/@mantine/core/-/core-5.10.5.tgz#071e14dcf8b94a36d0243f1f4b30305ac0074afd"
- integrity sha512-F4tqHSEVM9D6/iSqHfPda+Xl5XgSEPHAAkT01Zwzj4Jnbd10qGrlqr/SFUop2CIcuKYnmra9XltUahUPXBC2BQ==
+"@mantine/core@^6.0.0":
+ version "6.0.19"
+ resolved "https://registry.yarnpkg.com/@mantine/core/-/core-6.0.19.tgz#612413f0e8eb117e6a39068a625c6ccf2ae2ccdd"
+ integrity sha512-SvMZCOgCc315SIg6hkuLM0ZnBaAac4VFDHZ0BM5LIE4MPJUpe4QOLsg/5RGxOa5s7JRCtu/dawH3/9frvfDrhw==
dependencies:
"@floating-ui/react" "^0.19.1"
- "@mantine/styles" "5.10.5"
- "@mantine/utils" "5.10.5"
+ "@mantine/styles" "6.0.19"
+ "@mantine/utils" "6.0.19"
"@radix-ui/react-scroll-area" "1.0.2"
+ react-remove-scroll "^2.5.5"
react-textarea-autosize "8.3.4"
-"@mantine/hooks@^5":
- version "5.10.5"
- resolved "https://registry.yarnpkg.com/@mantine/hooks/-/hooks-5.10.5.tgz#568586a0fa649be46f057ddc920bf98761017ffb"
- integrity sha512-hFQp71QZDfivPzfIUOQZfMKLiOL/Cn2EnzacRlbUr55myteTfzYN8YMt+nzniE/6c4IRopFHEAdbKEtfyQc6kg==
+"@mantine/hooks@^6.0.0":
+ version "6.0.19"
+ resolved "https://registry.yarnpkg.com/@mantine/hooks/-/hooks-6.0.19.tgz#39f61434304f687d3ba7bf0040c5adf380c7c4b3"
+ integrity sha512-YkmuB6kmoenU1PVuE8tLBA+6RJIY9hIsGyIQG1yuPAy6SLWNFT8g2T9YvI/psqsUbVIYGaNEXg8zq42xbxnD8Q==
-"@mantine/notifications@^5":
- version "5.10.5"
- resolved "https://registry.yarnpkg.com/@mantine/notifications/-/notifications-5.10.5.tgz#2f3f2d013ce4637e64e935aa5dd8c1df1a7acec0"
- integrity sha512-IzTAXA7Zb9DcI94Mv5O2OinhLmI7fvs/VutDw9uCpp6OHtLuF/XN1d262jrsGhMZT0c4nuUsotSLFZF3GWZwXg==
+"@mantine/notifications@^6.0.0":
+ version "6.0.19"
+ resolved "https://registry.yarnpkg.com/@mantine/notifications/-/notifications-6.0.19.tgz#1aaf82fd4ac5b32faca8baf17475199d3d6ff58e"
+ integrity sha512-Cr2y8g2nM8bUAP+JYcKdT+a3d+1awUd40EMrDMwb+yUXUSt1amZerYQ7qRuezqvBgiViy/HGnM4WfeF3sfWJRQ==
dependencies:
- "@mantine/utils" "5.10.5"
+ "@mantine/utils" "6.0.19"
react-transition-group "4.4.2"
-"@mantine/styles@5.10.5":
- version "5.10.5"
- resolved "https://registry.yarnpkg.com/@mantine/styles/-/styles-5.10.5.tgz#ace82a71b4fe3d14ee14638f1735d5680d93d36d"
- integrity sha512-0NXk8c/XGzuTUkZc6KceF2NaTCMEu5mHR4ru0x+ttb9DGnLpHuGWduTHjSfr4hl6eAJgedD0zauO+VAhDzO9zA==
+"@mantine/styles@6.0.19":
+ version "6.0.19"
+ resolved "https://registry.yarnpkg.com/@mantine/styles/-/styles-6.0.19.tgz#7d9a6f2c2a9b345dfd9d12f8fd66af3976d67ab2"
+ integrity sha512-0tg3Dvv/kxCc1mbQVFhZaIhlSbSbV1F/3xG0NRlP2DF23mw9088o5KaIXGKM6XkXU6OEt/f99nDCUHBk2ixtUg==
dependencies:
clsx "1.1.1"
csstype "3.0.9"
-"@mantine/utils@5.10.5":
- version "5.10.5"
- resolved "https://registry.yarnpkg.com/@mantine/utils/-/utils-5.10.5.tgz#ad620d714e545c6efb7f69d94ce46e3fd2fe01fb"
- integrity sha512-FGMq4dGs5HhDAtI0z46uzxzKKPmZ3h5uKUyKg1ZHoFR1mBtcUMbB6FylFmHqKFRWlJ5IXqX9dwmiVrLYUOfTmA==
+"@mantine/utils@6.0.19":
+ version "6.0.19"
+ resolved "https://registry.yarnpkg.com/@mantine/utils/-/utils-6.0.19.tgz#0197fccc5649259787d5468228139f8815909803"
+ integrity sha512-duvtnaW1gDR2gnvUqnWhl6DMW7sN0HEWqS8Z/BbwaMi75U+Xp17Q72R9JtiIrxQbzsq+KvH9L9B/pxMVwbLirg==
"@nodelib/fs.scandir@2.1.5":
version "2.1.5"
@@ -1086,10 +1104,10 @@
dependencies:
"@babel/runtime" "^7.13.10"
-"@remix-run/router@1.7.2":
- version "1.7.2"
- resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.7.2.tgz#cba1cf0a04bc04cb66027c51fa600e9cbc388bc8"
- integrity sha512-7Lcn7IqGMV+vizMPoEl5F0XDshcdDYtMI6uJLQdQz5CfZAwy3vvGKYSUk789qndt5dEC4HfSjviSYlSoHGL2+A==
+"@remix-run/router@1.8.0":
+ version "1.8.0"
+ resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.8.0.tgz#e848d2f669f601544df15ce2a313955e4bf0bafc"
+ integrity sha512-mrfKqIHnSZRyIzBcanNJmVQELTnX+qagEDlcKO90RgRBVOZGSGvZKeDihTRfWcqoDn5N/NkUcwWTccnpN18Tfg==
"@sinclair/typebox@^0.27.8":
version "0.27.8"
@@ -1180,20 +1198,15 @@
integrity sha512-Klz949h02Gz2uZCMGwDUSDS1YBlTdDDgbWHi+81l29tQALUtvz4rAYi5uoVhE5Lagoq6DeqAUlbrHvW/mXDgdQ==
"@types/node@*":
- version "20.4.4"
- resolved "https://registry.yarnpkg.com/@types/node/-/node-20.4.4.tgz#c79c7cc22c9d0e97a7944954c9e663bcbd92b0cb"
- integrity sha512-CukZhumInROvLq3+b5gLev+vgpsIqC2D0deQr/yS1WnxvmYLlJXZpaQrQiseMY+6xusl79E04UjWoqyr+t1/Ew==
+ version "20.5.7"
+ resolved "https://registry.yarnpkg.com/@types/node/-/node-20.5.7.tgz#4b8ecac87fbefbc92f431d09c30e176fc0a7c377"
+ integrity sha512-dP7f3LdZIysZnmvP3ANJYTSwg+wLLl8p7RqniVlV7j+oXSXAbt9h0WIBFmJy5inWZoX9wZN6eXx+YXd9Rh3RBA==
"@types/parse-json@^4.0.0":
version "4.0.0"
resolved "https://registry.yarnpkg.com/@types/parse-json/-/parse-json-4.0.0.tgz#2f8bb441434d163b35fb8ffdccd7138927ffb8c0"
integrity sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==
-"@types/prettier@^2.1.5":
- version "2.7.3"
- resolved "https://registry.yarnpkg.com/@types/prettier/-/prettier-2.7.3.tgz#3e51a17e291d01d17d3fc61422015a933af7a08f"
- integrity sha512-+68kP9yzs4LMp7VNh8gdzMSPZFL44MLGqiHWvttYJe+6qnuVr4Ek9wSBQoveqY/r+LwjCcU29kNVkidwim+kYA==
-
"@types/stack-utils@^2.0.0":
version "2.0.1"
resolved "https://registry.yarnpkg.com/@types/stack-utils/-/stack-utils-2.0.1.tgz#20f18294f797f2209b5f65c8e3b5c8e8261d127c"
@@ -1219,11 +1232,11 @@
"@types/yargs-parser" "*"
"@vitejs/plugin-react@^4":
- version "4.0.3"
- resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-4.0.3.tgz#007d27ad5ef1eac4bf8c29e168ba9be2203c371b"
- integrity sha512-pwXDog5nwwvSIzwrvYYmA2Ljcd/ZNlcsSG2Q9CNDBwnsd55UGAyr2doXtB5j+2uymRCnCfExlznzzSFbBRcoCg==
+ version "4.0.4"
+ resolved "https://registry.yarnpkg.com/@vitejs/plugin-react/-/plugin-react-4.0.4.tgz#31c3f779dc534e045c4b134e7cf7b150af0a7646"
+ integrity sha512-7wU921ABnNYkETiMaZy7XqpueMnpu5VxvVps13MjmCo+utBdD79sZzrApHawHtVX66cCJQQTXFcjH0y9dSUK8g==
dependencies:
- "@babel/core" "^7.22.5"
+ "@babel/core" "^7.22.9"
"@babel/plugin-transform-react-jsx-self" "^7.22.5"
"@babel/plugin-transform-react-jsx-source" "^7.22.5"
react-refresh "^0.14.0"
@@ -1238,7 +1251,7 @@ acorn@^8.9.0:
resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.10.0.tgz#8be5b3907a67221a81ab23c7889c4c5526b62ec5"
integrity sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==
-ajv@^6.10.0, ajv@^6.12.4:
+ajv@^6.12.4:
version "6.12.6"
resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4"
integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==
@@ -1363,6 +1376,17 @@ array-unique@^0.3.2:
resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.3.2.tgz#a894b75d4bc4f6cd679ef3244a9fd8f46ae2d428"
integrity sha512-SleRWjh9JUud2wH1hPs9rZBZ33H6T9HOiL0uwGnGx9FpE6wKGyfWugmbkEOIs6qWrZhg0LWeLziLrEwQJhs5mQ==
+array.prototype.findlastindex@^1.2.2:
+ version "1.2.2"
+ resolved "https://registry.yarnpkg.com/array.prototype.findlastindex/-/array.prototype.findlastindex-1.2.2.tgz#bc229aef98f6bd0533a2bc61ff95209875526c9b"
+ integrity sha512-tb5thFFlUcp7NdNF6/MpDk/1r/4awWG1FIz3YqDf+/zJSTezBb+/5WViH41obXULHVpDzoiCLpJ/ZO9YbJMsdw==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.4"
+ es-abstract "^1.20.4"
+ es-shim-unscopables "^1.0.0"
+ get-intrinsic "^1.1.3"
+
array.prototype.flat@^1.3.1:
version "1.3.1"
resolved "https://registry.yarnpkg.com/array.prototype.flat/-/array.prototype.flat-1.3.1.tgz#ffc6576a7ca3efc2f46a143b9d1dda9b4b3cf5e2"
@@ -1416,6 +1440,13 @@ assign-symbols@^1.0.0:
resolved "https://registry.yarnpkg.com/assign-symbols/-/assign-symbols-1.0.0.tgz#59667f41fadd4f20ccbc2bb96b8d4f7f78ec0367"
integrity sha512-Q+JC7Whu8HhmTdBph/Tq59IoRtoy6KAm5zzPv00WdujX82lbAL8K7WVjne7vdCsAmbF4AYaDOPyO3k0kl8qIrw==
+asynciterator.prototype@^1.0.0:
+ version "1.0.0"
+ resolved "https://registry.yarnpkg.com/asynciterator.prototype/-/asynciterator.prototype-1.0.0.tgz#8c5df0514936cdd133604dfcc9d3fb93f09b2b62"
+ integrity sha512-wwHYEIS0Q80f5mosx3L/dfG5t5rjEa9Ft51GTaNt862EnpyGHpgz2RkZvLPp1oF5TnAiTohkEKVEu8pQPJI7Vg==
+ dependencies:
+ has-symbols "^1.0.3"
+
atob@^2.1.2:
version "2.1.2"
resolved "https://registry.yarnpkg.com/atob/-/atob-2.1.2.tgz#6d9517eb9e030d2436666651e86bd9f6f13533c9"
@@ -1440,15 +1471,15 @@ babel-jest@^26.6.3:
graceful-fs "^4.2.4"
slash "^3.0.0"
-babel-jest@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-29.6.1.tgz#a7141ad1ed5ec50238f3cd36127636823111233a"
- integrity sha512-qu+3bdPEQC6KZSPz+4Fyjbga5OODNcp49j6GKzG1EKbkfyJBxEYGVUmVGpwCSeGouG52R4EgYMLb6p9YeEEQ4A==
+babel-jest@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-29.6.4.tgz#98dbc45d1c93319c82a8ab4a478b670655dd2585"
+ integrity sha512-meLj23UlSLddj6PC+YTOFRgDAtjnZom8w/ACsrx0gtPtv5cJZk0A5Unk5bV4wixD7XaPCN1fQvpww8czkZURmw==
dependencies:
- "@jest/transform" "^29.6.1"
+ "@jest/transform" "^29.6.4"
"@types/babel__core" "^7.1.14"
babel-plugin-istanbul "^6.1.1"
- babel-preset-jest "^29.5.0"
+ babel-preset-jest "^29.6.3"
chalk "^4.0.0"
graceful-fs "^4.2.9"
slash "^3.0.0"
@@ -1474,10 +1505,10 @@ babel-plugin-jest-hoist@^26.6.2:
"@types/babel__core" "^7.0.0"
"@types/babel__traverse" "^7.0.6"
-babel-plugin-jest-hoist@^29.5.0:
- version "29.5.0"
- resolved "https://registry.yarnpkg.com/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.5.0.tgz#a97db437936f441ec196990c9738d4b88538618a"
- integrity sha512-zSuuuAlTMT4mzLj2nPnUm6fsE6270vdOfnpbJ+RmruU75UhLFvL0N2NgI7xpeS7NaB6hGqmd5pVpGTDYvi4Q3w==
+babel-plugin-jest-hoist@^29.6.3:
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz#aadbe943464182a8922c3c927c3067ff40d24626"
+ integrity sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg==
dependencies:
"@babel/template" "^7.3.3"
"@babel/types" "^7.3.3"
@@ -1519,12 +1550,12 @@ babel-preset-jest@^26.6.2:
babel-plugin-jest-hoist "^26.6.2"
babel-preset-current-node-syntax "^1.0.0"
-babel-preset-jest@^29.5.0:
- version "29.5.0"
- resolved "https://registry.yarnpkg.com/babel-preset-jest/-/babel-preset-jest-29.5.0.tgz#57bc8cc88097af7ff6a5ab59d1cd29d52a5916e2"
- integrity sha512-JOMloxOqdiBSxMAzjRaH023/vvcaSaec49zvg+2LmNsktC7ei39LTJGw02J+9uUtTZUq6xbLyJ4dxe9sSmIuAg==
+babel-preset-jest@^29.6.3:
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz#fa05fa510e7d493896d7b0dd2033601c840f171c"
+ integrity sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA==
dependencies:
- babel-plugin-jest-hoist "^29.5.0"
+ babel-plugin-jest-hoist "^29.6.3"
babel-preset-current-node-syntax "^1.0.0"
balanced-match@^1.0.0:
@@ -1589,13 +1620,13 @@ braces@^3.0.2:
fill-range "^7.0.1"
browserslist@^4.21.9:
- version "4.21.9"
- resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.21.9.tgz#e11bdd3c313d7e2a9e87e8b4b0c7872b13897635"
- integrity sha512-M0MFoZzbUrRU4KNfCrDLnvyE7gub+peetoTid3TBIqtunaDJyXlwhakT+/VkvSXcfIzFfK/nkCs4nmyTmxdNSg==
+ version "4.21.10"
+ resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.21.10.tgz#dbbac576628c13d3b2231332cb2ec5a46e015bb0"
+ integrity sha512-bipEBdZfVH5/pwrvqc+Ub0kUPVfGUhlKxbvfD+z1BDnPEO/X98ruXGA1WP5ASpAFKan7Qr6j736IacbZQuAlKQ==
dependencies:
- caniuse-lite "^1.0.30001503"
- electron-to-chromium "^1.4.431"
- node-releases "^2.0.12"
+ caniuse-lite "^1.0.30001517"
+ electron-to-chromium "^1.4.477"
+ node-releases "^2.0.13"
update-browserslist-db "^1.0.11"
bser@2.1.1:
@@ -1655,10 +1686,10 @@ camelcase@^6.2.0:
resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a"
integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==
-caniuse-lite@^1.0.30001503:
- version "1.0.30001517"
- resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001517.tgz#90fabae294215c3495807eb24fc809e11dc2f0a8"
- integrity sha512-Vdhm5S11DaFVLlyiKu4hiUTkpZu+y1KA/rZZqVQfOD5YdDT/eQKlkt7NaE0WGOFgX32diqt9MiP9CAiFeRklaA==
+caniuse-lite@^1.0.30001517:
+ version "1.0.30001524"
+ resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001524.tgz#1e14bce4f43c41a7deaeb5ebfe86664fe8dadb80"
+ integrity sha512-Jj917pJtYg9HSJBF95HVX3Cdr89JUyLT4IZ8SvM5aDRni95swKgYi3TgYLH5hnGfPE/U1dg6IfZ50UsIlLkwSA==
capture-exit@^2.0.0:
version "2.0.0"
@@ -1667,7 +1698,7 @@ capture-exit@^2.0.0:
dependencies:
rsvp "^4.8.4"
-chalk@^2.0.0:
+chalk@^2.4.2:
version "2.4.2"
resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424"
integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==
@@ -1870,10 +1901,10 @@ decode-uri-component@^0.2.0:
resolved "https://registry.yarnpkg.com/decode-uri-component/-/decode-uri-component-0.2.2.tgz#e69dbe25d37941171dd540e024c444cd5188e1e9"
integrity sha512-FqUYQ+8o158GyGTrMFJms9qh3CqTKvAqgqsTnkLI8sKu0028orqBhxNMFkFen0zGyg6epACD32pjVk58ngIErQ==
-dedent@^0.7.0:
- version "0.7.0"
- resolved "https://registry.yarnpkg.com/dedent/-/dedent-0.7.0.tgz#2495ddbaf6eb874abb0e1be9df22d2e5a544326c"
- integrity sha512-Q6fKUPqnAHAyhiUgFU7BUzLiv0kd8saH9al7tnu5Q/okj6dnupxyTgFIBjVzJATdfIAm9NAsvXNzjaKa+bxVyA==
+dedent@^1.0.0:
+ version "1.5.1"
+ resolved "https://registry.yarnpkg.com/dedent/-/dedent-1.5.1.tgz#4f3fc94c8b711e9bb2800d185cd6ad20f2a90aff"
+ integrity sha512-+LxW+KLWxu3HW3M2w2ympwtqPrqYRzU8fqi6Fhd18fBALe15blJPI/I4+UHveMVG6lJqB4JNd4UG0S5cnVHwIg==
deep-is@^0.1.3:
version "0.1.4"
@@ -1943,10 +1974,15 @@ detect-newline@^3.0.0:
resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-3.1.0.tgz#576f5dfc63ae1a192ff192d8ad3af6308991b651"
integrity sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==
-diff-sequences@^29.4.3:
- version "29.4.3"
- resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-29.4.3.tgz#9314bc1fabe09267ffeca9cbafc457d8499a13f2"
- integrity sha512-ofrBgwpPhCD85kMKtE9RYFFq6OC1A89oW2vvgWZNCwxrUpRUILopY7lsYyMDSjc8g6U6aiO0Qubg6r4Wgt5ZnA==
+detect-node-es@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.yarnpkg.com/detect-node-es/-/detect-node-es-1.1.0.tgz#163acdf643330caa0b4cd7c21e7ee7755d6fa493"
+ integrity sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==
+
+diff-sequences@^29.6.3:
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-29.6.3.tgz#4deaf894d11407c51efc8418012f9e70b84ea921"
+ integrity sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==
doctrine@^2.1.0:
version "2.1.0"
@@ -1970,10 +2006,10 @@ dom-helpers@^5.0.1:
"@babel/runtime" "^7.8.7"
csstype "^3.0.2"
-electron-to-chromium@^1.4.431:
- version "1.4.468"
- resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.468.tgz#3cbf64ad67d9f12bfe69fefe5eb1935ec4f6ab7a"
- integrity sha512-6M1qyhaJOt7rQtNti1lBA0GwclPH+oKCmsra/hkcWs5INLxfXXD/dtdnaKUYQu/pjOBP/8Osoe4mAcNvvzoFag==
+electron-to-chromium@^1.4.477:
+ version "1.4.504"
+ resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.504.tgz#975522945676cf2d55910988a169f07b83081488"
+ integrity sha512-cSMwIAd8yUh54VwitVRVvHK66QqHWE39C3DRj8SWiXitEpVSY3wNPD9y1pxQtLIi4w3UdzF9klLsmuPshz09DQ==
emittery@^0.13.1:
version "0.13.1"
@@ -1999,7 +2035,7 @@ error-ex@^1.3.1:
dependencies:
is-arrayish "^0.2.1"
-es-abstract@^1.19.0, es-abstract@^1.20.4:
+es-abstract@^1.20.4, es-abstract@^1.22.1:
version "1.22.1"
resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.22.1.tgz#8b4e5fc5cefd7f1660f0f8e1a52900dfbc9d9ccc"
integrity sha512-ioRRcXMO6OFyRpyzV3kE1IIBd4WG5/kltnzdxSCqoP8CMGs/Li+M1uF5o7lOkZVFjDs+NLesthnF66Pg/0q0Lw==
@@ -2044,6 +2080,26 @@ es-abstract@^1.19.0, es-abstract@^1.20.4:
unbox-primitive "^1.0.2"
which-typed-array "^1.1.10"
+es-iterator-helpers@^1.0.12:
+ version "1.0.14"
+ resolved "https://registry.yarnpkg.com/es-iterator-helpers/-/es-iterator-helpers-1.0.14.tgz#19cd7903697d97e21198f3293b55e8985791c365"
+ integrity sha512-JgtVnwiuoRuzLvqelrvN3Xu7H9bu2ap/kQ2CrM62iidP8SKuD99rWU3CJy++s7IVL2qb/AjXPGR/E7i9ngd/Cw==
+ dependencies:
+ asynciterator.prototype "^1.0.0"
+ call-bind "^1.0.2"
+ define-properties "^1.2.0"
+ es-abstract "^1.22.1"
+ es-set-tostringtag "^2.0.1"
+ function-bind "^1.1.1"
+ get-intrinsic "^1.2.1"
+ globalthis "^1.0.3"
+ has-property-descriptors "^1.0.0"
+ has-proto "^1.0.1"
+ has-symbols "^1.0.3"
+ internal-slot "^1.0.5"
+ iterator.prototype "^1.1.0"
+ safe-array-concat "^1.0.0"
+
es-set-tostringtag@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.0.1.tgz#338d502f6f674301d710b80c8592de8a15f09cd8"
@@ -2079,32 +2135,32 @@ esbuild-jest@^0:
babel-jest "^26.6.3"
esbuild@^0.18.10:
- version "0.18.16"
- resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.18.16.tgz#bbeb058c556152bcbff4e8168e7c93020ccf09c3"
- integrity sha512-1xLsOXrDqwdHxyXb/x/SOyg59jpf/SH7YMvU5RNSU7z3TInaASNJWNFJ6iRvLvLETZMasF3d1DdZLg7sgRimRQ==
+ version "0.18.20"
+ resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.18.20.tgz#4709f5a34801b43b799ab7d6d82f7284a9b7a7a6"
+ integrity sha512-ceqxoedUrcayh7Y7ZX6NdbbDzGROiyVBgC4PriJThBKSVPWnnFHZAkfI1lJT8QFkOwH4qOS2SJkS4wvpGl8BpA==
optionalDependencies:
- "@esbuild/android-arm" "0.18.16"
- "@esbuild/android-arm64" "0.18.16"
- "@esbuild/android-x64" "0.18.16"
- "@esbuild/darwin-arm64" "0.18.16"
- "@esbuild/darwin-x64" "0.18.16"
- "@esbuild/freebsd-arm64" "0.18.16"
- "@esbuild/freebsd-x64" "0.18.16"
- "@esbuild/linux-arm" "0.18.16"
- "@esbuild/linux-arm64" "0.18.16"
- "@esbuild/linux-ia32" "0.18.16"
- "@esbuild/linux-loong64" "0.18.16"
- "@esbuild/linux-mips64el" "0.18.16"
- "@esbuild/linux-ppc64" "0.18.16"
- "@esbuild/linux-riscv64" "0.18.16"
- "@esbuild/linux-s390x" "0.18.16"
- "@esbuild/linux-x64" "0.18.16"
- "@esbuild/netbsd-x64" "0.18.16"
- "@esbuild/openbsd-x64" "0.18.16"
- "@esbuild/sunos-x64" "0.18.16"
- "@esbuild/win32-arm64" "0.18.16"
- "@esbuild/win32-ia32" "0.18.16"
- "@esbuild/win32-x64" "0.18.16"
+ "@esbuild/android-arm" "0.18.20"
+ "@esbuild/android-arm64" "0.18.20"
+ "@esbuild/android-x64" "0.18.20"
+ "@esbuild/darwin-arm64" "0.18.20"
+ "@esbuild/darwin-x64" "0.18.20"
+ "@esbuild/freebsd-arm64" "0.18.20"
+ "@esbuild/freebsd-x64" "0.18.20"
+ "@esbuild/linux-arm" "0.18.20"
+ "@esbuild/linux-arm64" "0.18.20"
+ "@esbuild/linux-ia32" "0.18.20"
+ "@esbuild/linux-loong64" "0.18.20"
+ "@esbuild/linux-mips64el" "0.18.20"
+ "@esbuild/linux-ppc64" "0.18.20"
+ "@esbuild/linux-riscv64" "0.18.20"
+ "@esbuild/linux-s390x" "0.18.20"
+ "@esbuild/linux-x64" "0.18.20"
+ "@esbuild/netbsd-x64" "0.18.20"
+ "@esbuild/openbsd-x64" "0.18.20"
+ "@esbuild/sunos-x64" "0.18.20"
+ "@esbuild/win32-arm64" "0.18.20"
+ "@esbuild/win32-ia32" "0.18.20"
+ "@esbuild/win32-x64" "0.18.20"
escalade@^3.1.1:
version "3.1.1"
@@ -2127,15 +2183,15 @@ escape-string-regexp@^4.0.0:
integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==
eslint-import-resolver-node@^0.3.7:
- version "0.3.7"
- resolved "https://registry.yarnpkg.com/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.7.tgz#83b375187d412324a1963d84fa664377a23eb4d7"
- integrity sha512-gozW2blMLJCeFpBwugLTGyvVjNoeo1knonXAcatC6bjPBZitotxdWf7Gimr25N4c0AAOo4eOUfaG82IJPDpqCA==
+ version "0.3.9"
+ resolved "https://registry.yarnpkg.com/eslint-import-resolver-node/-/eslint-import-resolver-node-0.3.9.tgz#d4eaac52b8a2e7c3cd1903eb00f7e053356118ac"
+ integrity sha512-WFj2isz22JahUv+B788TlO3N6zL3nNJGU8CcZbPZvVEkBPaJdCV4vy5wyghty5ROFbCRnm132v8BScu5/1BQ8g==
dependencies:
debug "^3.2.7"
- is-core-module "^2.11.0"
- resolve "^1.22.1"
+ is-core-module "^2.13.0"
+ resolve "^1.22.4"
-eslint-module-utils@^2.7.4:
+eslint-module-utils@^2.8.0:
version "2.8.0"
resolved "https://registry.yarnpkg.com/eslint-module-utils/-/eslint-module-utils-2.8.0.tgz#e439fee65fc33f6bba630ff621efc38ec0375c49"
integrity sha512-aWajIYfsqCKRDgUfjEXNN/JlrzauMuSEy5sbd7WXbtW3EH6A6MpwEh42c7qD+MqQo9QMJ6fWLAeIJynx0g6OAw==
@@ -2143,25 +2199,27 @@ eslint-module-utils@^2.7.4:
debug "^3.2.7"
eslint-plugin-import@^2:
- version "2.27.5"
- resolved "https://registry.yarnpkg.com/eslint-plugin-import/-/eslint-plugin-import-2.27.5.tgz#876a6d03f52608a3e5bb439c2550588e51dd6c65"
- integrity sha512-LmEt3GVofgiGuiE+ORpnvP+kAm3h6MLZJ4Q5HCyHADofsb4VzXFsRiWj3c0OFiV+3DWFh0qg3v9gcPlfc3zRow==
+ version "2.28.1"
+ resolved "https://registry.yarnpkg.com/eslint-plugin-import/-/eslint-plugin-import-2.28.1.tgz#63b8b5b3c409bfc75ebaf8fb206b07ab435482c4"
+ integrity sha512-9I9hFlITvOV55alzoKBI+K9q74kv0iKMeY6av5+umsNwayt59fz692daGyjR+oStBQgx6nwR9rXldDev3Clw+A==
dependencies:
array-includes "^3.1.6"
+ array.prototype.findlastindex "^1.2.2"
array.prototype.flat "^1.3.1"
array.prototype.flatmap "^1.3.1"
debug "^3.2.7"
doctrine "^2.1.0"
eslint-import-resolver-node "^0.3.7"
- eslint-module-utils "^2.7.4"
+ eslint-module-utils "^2.8.0"
has "^1.0.3"
- is-core-module "^2.11.0"
+ is-core-module "^2.13.0"
is-glob "^4.0.3"
minimatch "^3.1.2"
+ object.fromentries "^2.0.6"
+ object.groupby "^1.0.0"
object.values "^1.1.6"
- resolve "^1.22.1"
- semver "^6.3.0"
- tsconfig-paths "^3.14.1"
+ semver "^6.3.1"
+ tsconfig-paths "^3.14.2"
eslint-plugin-prettier@^5:
version "5.0.0"
@@ -2182,14 +2240,15 @@ eslint-plugin-react-perf@^3:
integrity sha512-iOx2UtEOH50TmQhezTS4jbBAj/2gbrUdX+ZM28c2K9mwTvtRX6gdnd2P4WPQrejITDsAMNTCz95zu5HcjCD0xg==
eslint-plugin-react@^7:
- version "7.33.0"
- resolved "https://registry.yarnpkg.com/eslint-plugin-react/-/eslint-plugin-react-7.33.0.tgz#6c356fb0862fec2cd1b04426c669ea746e9b6eb3"
- integrity sha512-qewL/8P34WkY8jAqdQxsiL82pDUeT7nhs8IsuXgfgnsEloKCT4miAV9N9kGtx7/KM9NH/NCGUE7Edt9iGxLXFw==
+ version "7.33.2"
+ resolved "https://registry.yarnpkg.com/eslint-plugin-react/-/eslint-plugin-react-7.33.2.tgz#69ee09443ffc583927eafe86ffebb470ee737608"
+ integrity sha512-73QQMKALArI8/7xGLNI/3LylrEYrlKZSb5C9+q3OtOewTnMQi5cT+aE9E41sLCmli3I9PGGmD1yiZydyo4FEPw==
dependencies:
array-includes "^3.1.6"
array.prototype.flatmap "^1.3.1"
array.prototype.tosorted "^1.1.1"
doctrine "^2.1.0"
+ es-iterator-helpers "^1.0.12"
estraverse "^5.3.0"
jsx-ast-utils "^2.4.1 || ^3.0.0"
minimatch "^3.1.2"
@@ -2214,40 +2273,40 @@ eslint-rule-composer@^0.3.0:
resolved "https://registry.yarnpkg.com/eslint-rule-composer/-/eslint-rule-composer-0.3.0.tgz#79320c927b0c5c0d3d3d2b76c8b4a488f25bbaf9"
integrity sha512-bt+Sh8CtDmn2OajxvNO+BX7Wn4CIWMpTRm3MaiKPCQcnnlm0CS2mhui6QaoeQugs+3Kj2ESKEEGJUdVafwhiCg==
-eslint-scope@^7.2.0:
- version "7.2.1"
- resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-7.2.1.tgz#936821d3462675f25a18ac5fd88a67cc15b393bd"
- integrity sha512-CvefSOsDdaYYvxChovdrPo/ZGt8d5lrJWleAc1diXRKhHGiTYEI26cvo8Kle/wGnsizoCJjK73FMg1/IkIwiNA==
+eslint-scope@^7.2.2:
+ version "7.2.2"
+ resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-7.2.2.tgz#deb4f92563390f32006894af62a22dba1c46423f"
+ integrity sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==
dependencies:
esrecurse "^4.3.0"
estraverse "^5.2.0"
-eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1:
- version "3.4.1"
- resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.1.tgz#c22c48f48942d08ca824cc526211ae400478a994"
- integrity sha512-pZnmmLwYzf+kWaM/Qgrvpen51upAktaaiI01nsJD/Yr3lMOdNtq0cxkrrg16w64VtisN6okbs7Q8AfGqj4c9fA==
+eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1, eslint-visitor-keys@^3.4.3:
+ version "3.4.3"
+ resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz#0cd72fe8550e3c2eae156a96a4dddcd1c8ac5800"
+ integrity sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==
eslint@^8:
- version "8.45.0"
- resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.45.0.tgz#bab660f90d18e1364352c0a6b7c6db8edb458b78"
- integrity sha512-pd8KSxiQpdYRfYa9Wufvdoct3ZPQQuVuU5O6scNgMuOMYuxvH0IGaYK0wUFjo4UYYQQCUndlXiMbnxopwvvTiw==
+ version "8.48.0"
+ resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.48.0.tgz#bf9998ba520063907ba7bfe4c480dc8be03c2155"
+ integrity sha512-sb6DLeIuRXxeM1YljSe1KEx9/YYeZFQWcV8Rq9HfigmdDEugjLEVEa1ozDjL6YDjBpQHPJxJzze+alxi4T3OLg==
dependencies:
"@eslint-community/eslint-utils" "^4.2.0"
- "@eslint-community/regexpp" "^4.4.0"
- "@eslint/eslintrc" "^2.1.0"
- "@eslint/js" "8.44.0"
+ "@eslint-community/regexpp" "^4.6.1"
+ "@eslint/eslintrc" "^2.1.2"
+ "@eslint/js" "8.48.0"
"@humanwhocodes/config-array" "^0.11.10"
"@humanwhocodes/module-importer" "^1.0.1"
"@nodelib/fs.walk" "^1.2.8"
- ajv "^6.10.0"
+ ajv "^6.12.4"
chalk "^4.0.0"
cross-spawn "^7.0.2"
debug "^4.3.2"
doctrine "^3.0.0"
escape-string-regexp "^4.0.0"
- eslint-scope "^7.2.0"
- eslint-visitor-keys "^3.4.1"
- espree "^9.6.0"
+ eslint-scope "^7.2.2"
+ eslint-visitor-keys "^3.4.3"
+ espree "^9.6.1"
esquery "^1.4.2"
esutils "^2.0.2"
fast-deep-equal "^3.1.3"
@@ -2270,7 +2329,7 @@ eslint@^8:
strip-ansi "^6.0.1"
text-table "^0.2.0"
-espree@^9.6.0:
+espree@^9.6.0, espree@^9.6.1:
version "9.6.1"
resolved "https://registry.yarnpkg.com/espree/-/espree-9.6.1.tgz#a2a17b8e434690a5432f2f8018ce71d331a48c6f"
integrity sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==
@@ -2389,17 +2448,16 @@ expand-brackets@^2.1.4:
snapdragon "^0.8.1"
to-regex "^3.0.1"
-expect@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/expect/-/expect-29.6.1.tgz#64dd1c8f75e2c0b209418f2b8d36a07921adfdf1"
- integrity sha512-XEdDLonERCU1n9uR56/Stx9OqojaLAQtZf9PrCHH9Hl8YXiEIka3H4NXJ3NOIBmQJTg7+j7buh34PMHfJujc8g==
+expect@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/expect/-/expect-29.6.4.tgz#a6e6f66d4613717859b2fe3da98a739437b6f4b8"
+ integrity sha512-F2W2UyQ8XYyftHT57dtfg8Ue3X5qLgm2sSug0ivvLRH/VKNRL/pDxg/TH7zVzbQB0tu80clNFy6LU7OS/VSEKA==
dependencies:
- "@jest/expect-utils" "^29.6.1"
- "@types/node" "*"
- jest-get-type "^29.4.3"
- jest-matcher-utils "^29.6.1"
- jest-message-util "^29.6.1"
- jest-util "^29.6.1"
+ "@jest/expect-utils" "^29.6.4"
+ jest-get-type "^29.6.3"
+ jest-matcher-utils "^29.6.4"
+ jest-message-util "^29.6.3"
+ jest-util "^29.6.3"
extend-shallow@^2.0.1:
version "2.0.1"
@@ -2521,14 +2579,15 @@ find-up@^5.0.0:
path-exists "^4.0.0"
flat-cache@^3.0.4:
- version "3.0.4"
- resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-3.0.4.tgz#61b0338302b2fe9f957dcc32fc2a87f1c3048b11"
- integrity sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==
+ version "3.1.0"
+ resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-3.1.0.tgz#0e54ab4a1a60fe87e2946b6b00657f1c99e1af3f"
+ integrity sha512-OHx4Qwrrt0E4jEIcI5/Xb+f+QmJYNj2rrK8wiIdQOIrB9WrrJL8cjZvXdXuBTkkEwEqLycb5BeZDV1o2i9bTew==
dependencies:
- flatted "^3.1.0"
+ flatted "^3.2.7"
+ keyv "^4.5.3"
rimraf "^3.0.2"
-flatted@^3.1.0:
+flatted@^3.2.7:
version "3.2.7"
resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.2.7.tgz#609f39207cb614b89d0765b477cb2d437fbf9787"
integrity sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==
@@ -2557,27 +2616,32 @@ fs.realpath@^1.0.0:
resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==
-fsevents@^2.1.2, fsevents@^2.3.2, fsevents@~2.3.2:
+fsevents@^2.1.2:
version "2.3.2"
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.2.tgz#8a526f78b8fdf4623b709e0b975c52c24c02fd1a"
integrity sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==
+fsevents@^2.3.2, fsevents@~2.3.2:
+ version "2.3.3"
+ resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6"
+ integrity sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==
+
function-bind@^1.1.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.1.tgz#a56899d3ea3c9bab874bb9773b7c5ede92f4895d"
integrity sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==
function.prototype.name@^1.1.5:
- version "1.1.5"
- resolved "https://registry.yarnpkg.com/function.prototype.name/-/function.prototype.name-1.1.5.tgz#cce0505fe1ffb80503e6f9e46cc64e46a12a9621"
- integrity sha512-uN7m/BzVKQnCUF/iW8jYea67v++2u7m5UgENbHRtdDVclOUP+FMPlCNdmk0h/ysGyo2tavMJEDqJAkJdRa1vMA==
+ version "1.1.6"
+ resolved "https://registry.yarnpkg.com/function.prototype.name/-/function.prototype.name-1.1.6.tgz#cdf315b7d90ee77a4c6ee216c3c3362da07533fd"
+ integrity sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==
dependencies:
call-bind "^1.0.2"
- define-properties "^1.1.3"
- es-abstract "^1.19.0"
- functions-have-names "^1.2.2"
+ define-properties "^1.2.0"
+ es-abstract "^1.22.1"
+ functions-have-names "^1.2.3"
-functions-have-names@^1.2.2, functions-have-names@^1.2.3:
+functions-have-names@^1.2.3:
version "1.2.3"
resolved "https://registry.yarnpkg.com/functions-have-names/-/functions-have-names-1.2.3.tgz#0404fe4ee2ba2f607f0e0ec3c80bae994133b834"
integrity sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==
@@ -2602,6 +2666,11 @@ get-intrinsic@^1.0.2, get-intrinsic@^1.1.1, get-intrinsic@^1.1.3, get-intrinsic@
has-proto "^1.0.1"
has-symbols "^1.0.3"
+get-nonce@^1.0.0:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/get-nonce/-/get-nonce-1.0.1.tgz#fdf3f0278073820d2ce9426c18f07481b1e0cdf3"
+ integrity sha512-FJhYRoDaiatfEkUK8HKlicmu/3SGFD51q3itKDGoSTysQJBnfOcxU5GxnhE1E6soB76MbT0MBtnKJuXyAx+96Q==
+
get-package-type@^0.1.0:
version "0.1.0"
resolved "https://registry.yarnpkg.com/get-package-type/-/get-package-type-0.1.0.tgz#8de2d803cff44df3bc6c456e6668b36c3926e11a"
@@ -2671,9 +2740,9 @@ globals@^11.1.0:
integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==
globals@^13.19.0:
- version "13.20.0"
- resolved "https://registry.yarnpkg.com/globals/-/globals-13.20.0.tgz#ea276a1e508ffd4f1612888f9d1bad1e2717bf82"
- integrity sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==
+ version "13.21.0"
+ resolved "https://registry.yarnpkg.com/globals/-/globals-13.21.0.tgz#163aae12f34ef502f5153cfbdd3600f36c63c571"
+ integrity sha512-ybyme3s4yy/t/3s35bewwXKOf7cvzfreG2lH0lZl0JB7I4GxRP2ghxOK/Nb9EkRXdbBXZLfq/p/0W2JUONB/Gg==
dependencies:
type-fest "^0.20.2"
@@ -2858,6 +2927,13 @@ internal-slot@^1.0.3, internal-slot@^1.0.5:
has "^1.0.3"
side-channel "^1.0.4"
+invariant@^2.2.4:
+ version "2.2.4"
+ resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6"
+ integrity sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==
+ dependencies:
+ loose-envify "^1.0.0"
+
is-accessor-descriptor@^0.1.6:
version "0.1.6"
resolved "https://registry.yarnpkg.com/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz#a9e12cb3ae8d876727eeef3843f8a0897b5c98d6"
@@ -2886,6 +2962,13 @@ is-arrayish@^0.2.1:
resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d"
integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==
+is-async-function@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.yarnpkg.com/is-async-function/-/is-async-function-2.0.0.tgz#8e4418efd3e5d3a6ebb0164c05ef5afb69aa9646"
+ integrity sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA==
+ dependencies:
+ has-tostringtag "^1.0.0"
+
is-bigint@^1.0.1:
version "1.0.4"
resolved "https://registry.yarnpkg.com/is-bigint/-/is-bigint-1.0.4.tgz#08147a1875bc2b32005d41ccd8291dffc6691df3"
@@ -2918,10 +3001,10 @@ is-ci@^2.0.0:
dependencies:
ci-info "^2.0.0"
-is-core-module@^2.11.0, is-core-module@^2.9.0:
- version "2.12.1"
- resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.12.1.tgz#0c0b6885b6f80011c71541ce15c8d66cf5a4f9fd"
- integrity sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==
+is-core-module@^2.11.0, is-core-module@^2.13.0, is-core-module@^2.9.0:
+ version "2.13.0"
+ resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.13.0.tgz#bb52aa6e2cbd49a30c2ba68c42bf3435ba6072db"
+ integrity sha512-Z7dk6Qo8pOCp3l4tsX2C5ZVas4V+UxwQodwZhLopL91TX8UyyHEXafPcyoeeWuLrwzHcr3igO78wNLwHJHsMCQ==
dependencies:
has "^1.0.3"
@@ -2939,7 +3022,7 @@ is-data-descriptor@^1.0.0:
dependencies:
kind-of "^6.0.0"
-is-date-object@^1.0.1:
+is-date-object@^1.0.1, is-date-object@^1.0.5:
version "1.0.5"
resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.5.tgz#0841d5536e724c25597bf6ea62e1bd38298df31f"
integrity sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==
@@ -2991,6 +3074,13 @@ is-extglob@^2.1.1:
resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2"
integrity sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==
+is-finalizationregistry@^1.0.2:
+ version "1.0.2"
+ resolved "https://registry.yarnpkg.com/is-finalizationregistry/-/is-finalizationregistry-1.0.2.tgz#c8749b65f17c133313e661b1289b95ad3dbd62e6"
+ integrity sha512-0by5vtUJs8iFQb5TYUHHPudOR+qXYIMKtiUzvLIZITZUjknFmziyBJuLhVRc+Ds0dREFlskDNJKYIdIzu/9pfw==
+ dependencies:
+ call-bind "^1.0.2"
+
is-fullwidth-code-point@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d"
@@ -3001,6 +3091,13 @@ is-generator-fn@^2.0.0:
resolved "https://registry.yarnpkg.com/is-generator-fn/-/is-generator-fn-2.1.0.tgz#7d140adc389aaf3011a8f2a2a4cfa6faadffb118"
integrity sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ==
+is-generator-function@^1.0.10:
+ version "1.0.10"
+ resolved "https://registry.yarnpkg.com/is-generator-function/-/is-generator-function-1.0.10.tgz#f1558baf1ac17e0deea7c0415c438351ff2b3c72"
+ integrity sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==
+ dependencies:
+ has-tostringtag "^1.0.0"
+
is-glob@^4.0.0, is-glob@^4.0.1, is-glob@^4.0.3:
version "4.0.3"
resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084"
@@ -3015,6 +3112,11 @@ is-inside-container@^1.0.0:
dependencies:
is-docker "^3.0.0"
+is-map@^2.0.1:
+ version "2.0.2"
+ resolved "https://registry.yarnpkg.com/is-map/-/is-map-2.0.2.tgz#00922db8c9bf73e81b7a335827bc2a43f2b91127"
+ integrity sha512-cOZFQQozTha1f4MxLFzlgKYPTyj26picdZTx82hbc/Xf4K/tZOOXSCkMvU4pKioRXGDLJRn0GM7Upe7kR721yg==
+
is-negative-zero@^2.0.2:
version "2.0.2"
resolved "https://registry.yarnpkg.com/is-negative-zero/-/is-negative-zero-2.0.2.tgz#7bf6f03a28003b8b3965de3ac26f664d765f3150"
@@ -3059,6 +3161,11 @@ is-regex@^1.1.4:
call-bind "^1.0.2"
has-tostringtag "^1.0.0"
+is-set@^2.0.1:
+ version "2.0.2"
+ resolved "https://registry.yarnpkg.com/is-set/-/is-set-2.0.2.tgz#90755fa4c2562dc1c5d4024760d6119b94ca18ec"
+ integrity sha512-+2cnTEZeY5z/iXGbLhPrOAaK/Mau5k5eXq9j14CpRTftq0pAJu2MwVRSZhyZWBzx3o6X795Lz6Bpb6R0GKf37g==
+
is-shared-array-buffer@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.2.tgz#8f259c573b60b6a32d4058a1a07430c0a7344c79"
@@ -3107,6 +3214,11 @@ is-typedarray@^1.0.0:
resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a"
integrity sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==
+is-weakmap@^2.0.1:
+ version "2.0.1"
+ resolved "https://registry.yarnpkg.com/is-weakmap/-/is-weakmap-2.0.1.tgz#5008b59bdc43b698201d18f62b37b2ca243e8cf2"
+ integrity sha512-NSBR4kH5oVj1Uwvv970ruUkCV7O1mzgVFO4/rev2cLRda9Tm9HrL70ZPut4rOHgY0FNrUu9BCbXA2sdQ+x0chA==
+
is-weakref@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/is-weakref/-/is-weakref-1.0.2.tgz#9529f383a9338205e89765e0392efc2f100f06f2"
@@ -3114,6 +3226,14 @@ is-weakref@^1.0.2:
dependencies:
call-bind "^1.0.2"
+is-weakset@^2.0.1:
+ version "2.0.2"
+ resolved "https://registry.yarnpkg.com/is-weakset/-/is-weakset-2.0.2.tgz#4569d67a747a1ce5a994dfd4ef6dcea76e7c0a1d"
+ integrity sha512-t2yVvttHkQktwnNNmBQ98AhENLdPUTDTE21uPqAQ0ARwQfGeQKRVS0NNurH7bTf7RrvcVn1OOge45CnBeHCSmg==
+ dependencies:
+ call-bind "^1.0.2"
+ get-intrinsic "^1.1.1"
+
is-windows@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/is-windows/-/is-windows-1.0.2.tgz#d1850eb9791ecd18e6182ce12a30f396634bb19d"
@@ -3158,7 +3278,7 @@ istanbul-lib-coverage@^3.0.0, istanbul-lib-coverage@^3.2.0:
resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.0.tgz#189e7909d0a39fa5a3dfad5b03f71947770191d3"
integrity sha512-eOeJ5BHCmHYvQK7xt9GkdHuzuCGS1Y6g9Gvnx3Ym33fz/HpLRYxiS0wHNr+m/MBC8B647Xt608vCDEvhl9c6Mw==
-istanbul-lib-instrument@^5.0.4, istanbul-lib-instrument@^5.1.0:
+istanbul-lib-instrument@^5.0.4:
version "5.2.1"
resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz#d10c8885c2125574e1c231cacadf955675e1ce3d"
integrity sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg==
@@ -3169,13 +3289,24 @@ istanbul-lib-instrument@^5.0.4, istanbul-lib-instrument@^5.1.0:
istanbul-lib-coverage "^3.2.0"
semver "^6.3.0"
+istanbul-lib-instrument@^6.0.0:
+ version "6.0.0"
+ resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.0.tgz#7a8af094cbfff1d5bb280f62ce043695ae8dd5b8"
+ integrity sha512-x58orMzEVfzPUKqlbLd1hXCnySCxKdDKa6Rjg97CwuLLRI4g3FHTdnExu1OqffVFay6zeMW+T6/DowFLndWnIw==
+ dependencies:
+ "@babel/core" "^7.12.3"
+ "@babel/parser" "^7.14.7"
+ "@istanbuljs/schema" "^0.1.2"
+ istanbul-lib-coverage "^3.2.0"
+ semver "^7.5.4"
+
istanbul-lib-report@^3.0.0:
- version "3.0.0"
- resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz#7518fe52ea44de372f460a76b5ecda9ffb73d8a6"
- integrity sha512-wcdi+uAKzfiGT2abPpKZ0hSU1rGQjUQnLvtY5MpQ7QCTahD3VODhcu4wcfY1YtkGaDD5yuydOLINXsfbus9ROw==
+ version "3.0.1"
+ resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz#908305bac9a5bd175ac6a74489eafd0fc2445a7d"
+ integrity sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==
dependencies:
istanbul-lib-coverage "^3.0.0"
- make-dir "^3.0.0"
+ make-dir "^4.0.0"
supports-color "^7.1.0"
istanbul-lib-source-maps@^4.0.0:
@@ -3188,137 +3319,149 @@ istanbul-lib-source-maps@^4.0.0:
source-map "^0.6.1"
istanbul-reports@^3.1.3:
- version "3.1.5"
- resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-3.1.5.tgz#cc9a6ab25cb25659810e4785ed9d9fb742578bae"
- integrity sha512-nUsEMa9pBt/NOHqbcbeJEgqIlY/K7rVWUX6Lql2orY5e9roQOthbR3vtY4zzf2orPELg80fnxxk9zUyPlgwD1w==
+ version "3.1.6"
+ resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-3.1.6.tgz#2544bcab4768154281a2f0870471902704ccaa1a"
+ integrity sha512-TLgnMkKg3iTDsQ9PbPTdpfAK2DzjF9mqUG7RMgcQl8oFjad8ob4laGxv5XV5U9MAfx8D6tSJiUyuAwzLicaxlg==
dependencies:
html-escaper "^2.0.0"
istanbul-lib-report "^3.0.0"
-jest-changed-files@^29.5.0:
- version "29.5.0"
- resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-29.5.0.tgz#e88786dca8bf2aa899ec4af7644e16d9dcf9b23e"
- integrity sha512-IFG34IUMUaNBIxjQXF/iu7g6EcdMrGRRxaUSw92I/2g2YC6vCdTltl4nHvt7Ci5nSJwXIkCu8Ka1DKF+X7Z1Ag==
+iterator.prototype@^1.1.0:
+ version "1.1.0"
+ resolved "https://registry.yarnpkg.com/iterator.prototype/-/iterator.prototype-1.1.0.tgz#690c88b043d821f783843aaf725d7ac3b62e3b46"
+ integrity sha512-rjuhAk1AJ1fssphHD0IFV6TWL40CwRZ53FrztKx43yk2v6rguBYsY4Bj1VU4HmoMmKwZUlx7mfnhDf9cOp4YTw==
+ dependencies:
+ define-properties "^1.1.4"
+ get-intrinsic "^1.1.3"
+ has-symbols "^1.0.3"
+ has-tostringtag "^1.0.0"
+ reflect.getprototypeof "^1.0.3"
+
+jest-changed-files@^29.6.3:
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-29.6.3.tgz#97cfdc93f74fb8af2a1acb0b78f836f1fb40c449"
+ integrity sha512-G5wDnElqLa4/c66ma5PG9eRjE342lIbF6SUnTJi26C3J28Fv2TVY2rOyKB9YGbSA5ogwevgmxc4j4aVjrEK6Yg==
dependencies:
execa "^5.0.0"
+ jest-util "^29.6.3"
p-limit "^3.1.0"
-jest-circus@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-circus/-/jest-circus-29.6.1.tgz#861dab37e71a89907d1c0fabc54a0019738ed824"
- integrity sha512-tPbYLEiBU4MYAL2XoZme/bgfUeotpDBd81lgHLCbDZZFaGmECk0b+/xejPFtmiBP87GgP/y4jplcRpbH+fgCzQ==
+jest-circus@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/jest-circus/-/jest-circus-29.6.4.tgz#f074c8d795e0cc0f2ebf0705086b1be6a9a8722f"
+ integrity sha512-YXNrRyntVUgDfZbjXWBMPslX1mQ8MrSG0oM/Y06j9EYubODIyHWP8hMUbjbZ19M3M+zamqEur7O80HODwACoJw==
dependencies:
- "@jest/environment" "^29.6.1"
- "@jest/expect" "^29.6.1"
- "@jest/test-result" "^29.6.1"
- "@jest/types" "^29.6.1"
+ "@jest/environment" "^29.6.4"
+ "@jest/expect" "^29.6.4"
+ "@jest/test-result" "^29.6.4"
+ "@jest/types" "^29.6.3"
"@types/node" "*"
chalk "^4.0.0"
co "^4.6.0"
- dedent "^0.7.0"
+ dedent "^1.0.0"
is-generator-fn "^2.0.0"
- jest-each "^29.6.1"
- jest-matcher-utils "^29.6.1"
- jest-message-util "^29.6.1"
- jest-runtime "^29.6.1"
- jest-snapshot "^29.6.1"
- jest-util "^29.6.1"
+ jest-each "^29.6.3"
+ jest-matcher-utils "^29.6.4"
+ jest-message-util "^29.6.3"
+ jest-runtime "^29.6.4"
+ jest-snapshot "^29.6.4"
+ jest-util "^29.6.3"
p-limit "^3.1.0"
- pretty-format "^29.6.1"
+ pretty-format "^29.6.3"
pure-rand "^6.0.0"
slash "^3.0.0"
stack-utils "^2.0.3"
-jest-cli@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-cli/-/jest-cli-29.6.1.tgz#99d9afa7449538221c71f358f0fdd3e9c6e89f72"
- integrity sha512-607dSgTA4ODIN6go9w6xY3EYkyPFGicx51a69H7yfvt7lN53xNswEVLovq+E77VsTRi5fWprLH0yl4DJgE8Ing==
+jest-cli@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/jest-cli/-/jest-cli-29.6.4.tgz#ad52f2dfa1b0291de7ec7f8d7c81ac435521ede0"
+ integrity sha512-+uMCQ7oizMmh8ZwRfZzKIEszFY9ksjjEQnTEMTaL7fYiL3Kw4XhqT9bYh+A4DQKUb67hZn2KbtEnDuHvcgK4pQ==
dependencies:
- "@jest/core" "^29.6.1"
- "@jest/test-result" "^29.6.1"
- "@jest/types" "^29.6.1"
+ "@jest/core" "^29.6.4"
+ "@jest/test-result" "^29.6.4"
+ "@jest/types" "^29.6.3"
chalk "^4.0.0"
exit "^0.1.2"
graceful-fs "^4.2.9"
import-local "^3.0.2"
- jest-config "^29.6.1"
- jest-util "^29.6.1"
- jest-validate "^29.6.1"
+ jest-config "^29.6.4"
+ jest-util "^29.6.3"
+ jest-validate "^29.6.3"
prompts "^2.0.1"
yargs "^17.3.1"
-jest-config@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-config/-/jest-config-29.6.1.tgz#d785344509065d53a238224c6cdc0ed8e2f2f0dd"
- integrity sha512-XdjYV2fy2xYixUiV2Wc54t3Z4oxYPAELUzWnV6+mcbq0rh742X2p52pii5A3oeRzYjLnQxCsZmp0qpI6klE2cQ==
+jest-config@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/jest-config/-/jest-config-29.6.4.tgz#eff958ee41d4e1ee7a6106d02b74ad9fc427d79e"
+ integrity sha512-JWohr3i9m2cVpBumQFv2akMEnFEPVOh+9L2xIBJhJ0zOaci2ZXuKJj0tgMKQCBZAKA09H049IR4HVS/43Qb19A==
dependencies:
"@babel/core" "^7.11.6"
- "@jest/test-sequencer" "^29.6.1"
- "@jest/types" "^29.6.1"
- babel-jest "^29.6.1"
+ "@jest/test-sequencer" "^29.6.4"
+ "@jest/types" "^29.6.3"
+ babel-jest "^29.6.4"
chalk "^4.0.0"
ci-info "^3.2.0"
deepmerge "^4.2.2"
glob "^7.1.3"
graceful-fs "^4.2.9"
- jest-circus "^29.6.1"
- jest-environment-node "^29.6.1"
- jest-get-type "^29.4.3"
- jest-regex-util "^29.4.3"
- jest-resolve "^29.6.1"
- jest-runner "^29.6.1"
- jest-util "^29.6.1"
- jest-validate "^29.6.1"
+ jest-circus "^29.6.4"
+ jest-environment-node "^29.6.4"
+ jest-get-type "^29.6.3"
+ jest-regex-util "^29.6.3"
+ jest-resolve "^29.6.4"
+ jest-runner "^29.6.4"
+ jest-util "^29.6.3"
+ jest-validate "^29.6.3"
micromatch "^4.0.4"
parse-json "^5.2.0"
- pretty-format "^29.6.1"
+ pretty-format "^29.6.3"
slash "^3.0.0"
strip-json-comments "^3.1.1"
-jest-diff@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-29.6.1.tgz#13df6db0a89ee6ad93c747c75c85c70ba941e545"
- integrity sha512-FsNCvinvl8oVxpNLttNQX7FAq7vR+gMDGj90tiP7siWw1UdakWUGqrylpsYrpvj908IYckm5Y0Q7azNAozU1Kg==
+jest-diff@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-29.6.4.tgz#85aaa6c92a79ae8cd9a54ebae8d5b6d9a513314a"
+ integrity sha512-9F48UxR9e4XOEZvoUXEHSWY4qC4zERJaOfrbBg9JpbJOO43R1vN76REt/aMGZoY6GD5g84nnJiBIVlscegefpw==
dependencies:
chalk "^4.0.0"
- diff-sequences "^29.4.3"
- jest-get-type "^29.4.3"
- pretty-format "^29.6.1"
+ diff-sequences "^29.6.3"
+ jest-get-type "^29.6.3"
+ pretty-format "^29.6.3"
-jest-docblock@^29.4.3:
- version "29.4.3"
- resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-29.4.3.tgz#90505aa89514a1c7dceeac1123df79e414636ea8"
- integrity sha512-fzdTftThczeSD9nZ3fzA/4KkHtnmllawWrXO69vtI+L9WjEIuXWs4AmyME7lN5hU7dB0sHhuPfcKofRsUb/2Fg==
+jest-docblock@^29.6.3:
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-29.6.3.tgz#293dca5188846c9f7c0c2b1bb33e5b11f21645f2"
+ integrity sha512-2+H+GOTQBEm2+qFSQ7Ma+BvyV+waiIFxmZF5LdpBsAEjWX8QYjSCa4FrkIYtbfXUJJJnFCYrOtt6TZ+IAiTjBQ==
dependencies:
detect-newline "^3.0.0"
-jest-each@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-each/-/jest-each-29.6.1.tgz#975058e5b8f55c6780beab8b6ab214921815c89c"
- integrity sha512-n5eoj5eiTHpKQCAVcNTT7DRqeUmJ01hsAL0Q1SMiBHcBcvTKDELixQOGMCpqhbIuTcfC4kMfSnpmDqRgRJcLNQ==
+jest-each@^29.6.3:
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/jest-each/-/jest-each-29.6.3.tgz#1956f14f5f0cb8ae0b2e7cabc10bb03ec817c142"
+ integrity sha512-KoXfJ42k8cqbkfshW7sSHcdfnv5agDdHCPA87ZBdmHP+zJstTJc0ttQaJ/x7zK6noAL76hOuTIJ6ZkQRS5dcyg==
dependencies:
- "@jest/types" "^29.6.1"
+ "@jest/types" "^29.6.3"
chalk "^4.0.0"
- jest-get-type "^29.4.3"
- jest-util "^29.6.1"
- pretty-format "^29.6.1"
-
-jest-environment-node@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-29.6.1.tgz#08a122dece39e58bc388da815a2166c58b4abec6"
- integrity sha512-ZNIfAiE+foBog24W+2caIldl4Irh8Lx1PUhg/GZ0odM1d/h2qORAsejiFc7zb+SEmYPn1yDZzEDSU5PmDkmVLQ==
- dependencies:
- "@jest/environment" "^29.6.1"
- "@jest/fake-timers" "^29.6.1"
- "@jest/types" "^29.6.1"
+ jest-get-type "^29.6.3"
+ jest-util "^29.6.3"
+ pretty-format "^29.6.3"
+
+jest-environment-node@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-29.6.4.tgz#4ce311549afd815d3cafb49e60a1e4b25f06d29f"
+ integrity sha512-i7SbpH2dEIFGNmxGCpSc2w9cA4qVD+wfvg2ZnfQ7XVrKL0NA5uDVBIiGH8SR4F0dKEv/0qI5r+aDomDf04DpEQ==
+ dependencies:
+ "@jest/environment" "^29.6.4"
+ "@jest/fake-timers" "^29.6.4"
+ "@jest/types" "^29.6.3"
"@types/node" "*"
- jest-mock "^29.6.1"
- jest-util "^29.6.1"
+ jest-mock "^29.6.3"
+ jest-util "^29.6.3"
-jest-get-type@^29.4.3:
- version "29.4.3"
- resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-29.4.3.tgz#1ab7a5207c995161100b5187159ca82dd48b3dd5"
- integrity sha512-J5Xez4nRRMjk8emnTpWrlkyb9pfRQQanDrvWHhsR1+VUfbwxi30eVcZFlcdGInRibU4G5LwHXpI7IRHU0CY+gg==
+jest-get-type@^29.6.3:
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-29.6.3.tgz#36f499fdcea197c1045a127319c0481723908fd1"
+ integrity sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==
jest-haste-map@^26.6.2:
version "26.6.2"
@@ -3341,66 +3484,66 @@ jest-haste-map@^26.6.2:
optionalDependencies:
fsevents "^2.1.2"
-jest-haste-map@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-haste-map/-/jest-haste-map-29.6.1.tgz#62655c7a1c1b349a3206441330fb2dbdb4b63803"
- integrity sha512-0m7f9PZXxOCk1gRACiVgX85knUKPKLPg4oRCjLoqIm9brTHXaorMA0JpmtmVkQiT8nmXyIVoZd/nnH1cfC33ig==
+jest-haste-map@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/jest-haste-map/-/jest-haste-map-29.6.4.tgz#97143ce833829157ea7025204b08f9ace609b96a"
+ integrity sha512-12Ad+VNTDHxKf7k+M65sviyynRoZYuL1/GTuhEVb8RYsNSNln71nANRb/faSyWvx0j+gHcivChXHIoMJrGYjog==
dependencies:
- "@jest/types" "^29.6.1"
+ "@jest/types" "^29.6.3"
"@types/graceful-fs" "^4.1.3"
"@types/node" "*"
anymatch "^3.0.3"
fb-watchman "^2.0.0"
graceful-fs "^4.2.9"
- jest-regex-util "^29.4.3"
- jest-util "^29.6.1"
- jest-worker "^29.6.1"
+ jest-regex-util "^29.6.3"
+ jest-util "^29.6.3"
+ jest-worker "^29.6.4"
micromatch "^4.0.4"
walker "^1.0.8"
optionalDependencies:
fsevents "^2.3.2"
-jest-leak-detector@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-leak-detector/-/jest-leak-detector-29.6.1.tgz#66a902c81318e66e694df7d096a95466cb962f8e"
- integrity sha512-OrxMNyZirpOEwkF3UHnIkAiZbtkBWiye+hhBweCHkVbCgyEy71Mwbb5zgeTNYWJBi1qgDVfPC1IwO9dVEeTLwQ==
+jest-leak-detector@^29.6.3:
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/jest-leak-detector/-/jest-leak-detector-29.6.3.tgz#b9661bc3aec8874e59aff361fa0c6d7cd507ea01"
+ integrity sha512-0kfbESIHXYdhAdpLsW7xdwmYhLf1BRu4AA118/OxFm0Ho1b2RcTmO4oF6aAMaxpxdxnJ3zve2rgwzNBD4Zbm7Q==
dependencies:
- jest-get-type "^29.4.3"
- pretty-format "^29.6.1"
+ jest-get-type "^29.6.3"
+ pretty-format "^29.6.3"
-jest-matcher-utils@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-matcher-utils/-/jest-matcher-utils-29.6.1.tgz#6c60075d84655d6300c5d5128f46531848160b53"
- integrity sha512-SLaztw9d2mfQQKHmJXKM0HCbl2PPVld/t9Xa6P9sgiExijviSp7TnZZpw2Fpt+OI3nwUO/slJbOfzfUMKKC5QA==
+jest-matcher-utils@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/jest-matcher-utils/-/jest-matcher-utils-29.6.4.tgz#327db7ababea49455df3b23e5d6109fe0c709d24"
+ integrity sha512-KSzwyzGvK4HcfnserYqJHYi7sZVqdREJ9DMPAKVbS98JsIAvumihaNUbjrWw0St7p9IY7A9UskCW5MYlGmBQFQ==
dependencies:
chalk "^4.0.0"
- jest-diff "^29.6.1"
- jest-get-type "^29.4.3"
- pretty-format "^29.6.1"
+ jest-diff "^29.6.4"
+ jest-get-type "^29.6.3"
+ pretty-format "^29.6.3"
-jest-message-util@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-29.6.1.tgz#d0b21d87f117e1b9e165e24f245befd2ff34ff8d"
- integrity sha512-KoAW2zAmNSd3Gk88uJ56qXUWbFk787QKmjjJVOjtGFmmGSZgDBrlIL4AfQw1xyMYPNVD7dNInfIbur9B2rd/wQ==
+jest-message-util@^29.6.3:
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-29.6.3.tgz#bce16050d86801b165f20cfde34dc01d3cf85fbf"
+ integrity sha512-FtzaEEHzjDpQp51HX4UMkPZjy46ati4T5pEMyM6Ik48ztu4T9LQplZ6OsimHx7EuM9dfEh5HJa6D3trEftu3dA==
dependencies:
"@babel/code-frame" "^7.12.13"
- "@jest/types" "^29.6.1"
+ "@jest/types" "^29.6.3"
"@types/stack-utils" "^2.0.0"
chalk "^4.0.0"
graceful-fs "^4.2.9"
micromatch "^4.0.4"
- pretty-format "^29.6.1"
+ pretty-format "^29.6.3"
slash "^3.0.0"
stack-utils "^2.0.3"
-jest-mock@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-mock/-/jest-mock-29.6.1.tgz#049ee26aea8cbf54c764af649070910607316517"
- integrity sha512-brovyV9HBkjXAEdRooaTQK42n8usKoSRR3gihzUpYeV/vwqgSoNfrksO7UfSACnPmxasO/8TmHM3w9Hp3G1dgw==
+jest-mock@^29.6.3:
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/jest-mock/-/jest-mock-29.6.3.tgz#433f3fd528c8ec5a76860177484940628bdf5e0a"
+ integrity sha512-Z7Gs/mOyTSR4yPsaZ72a/MtuK6RnC3JYqWONe48oLaoEcYwEDxqvbXz85G4SJrm2Z5Ar9zp6MiHF4AlFlRM4Pg==
dependencies:
- "@jest/types" "^29.6.1"
+ "@jest/types" "^29.6.3"
"@types/node" "*"
- jest-util "^29.6.1"
+ jest-util "^29.6.3"
jest-pnp-resolver@^1.2.2:
version "1.2.3"
@@ -3412,86 +3555,86 @@ jest-regex-util@^26.0.0:
resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-26.0.0.tgz#d25e7184b36e39fd466c3bc41be0971e821fee28"
integrity sha512-Gv3ZIs/nA48/Zvjrl34bf+oD76JHiGDUxNOVgUjh3j890sblXryjY4rss71fPtD/njchl6PSE2hIhvyWa1eT0A==
-jest-regex-util@^29.4.3:
- version "29.4.3"
- resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-29.4.3.tgz#a42616141e0cae052cfa32c169945d00c0aa0bb8"
- integrity sha512-O4FglZaMmWXbGHSQInfXewIsd1LMn9p3ZXB/6r4FOkyhX2/iP/soMG98jGvk/A3HAN78+5VWcBGO0BJAPRh4kg==
+jest-regex-util@^29.6.3:
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-29.6.3.tgz#4a556d9c776af68e1c5f48194f4d0327d24e8a52"
+ integrity sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg==
-jest-resolve-dependencies@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-resolve-dependencies/-/jest-resolve-dependencies-29.6.1.tgz#b85b06670f987a62515bbf625d54a499e3d708f5"
- integrity sha512-BbFvxLXtcldaFOhNMXmHRWx1nXQO5LoXiKSGQcA1LxxirYceZT6ch8KTE1bK3X31TNG/JbkI7OkS/ABexVahiw==
+jest-resolve-dependencies@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/jest-resolve-dependencies/-/jest-resolve-dependencies-29.6.4.tgz#20156b33c7eacbb6bb77aeba4bed0eab4a3f8734"
+ integrity sha512-7+6eAmr1ZBF3vOAJVsfLj1QdqeXG+WYhidfLHBRZqGN24MFRIiKG20ItpLw2qRAsW/D2ZUUmCNf6irUr/v6KHA==
dependencies:
- jest-regex-util "^29.4.3"
- jest-snapshot "^29.6.1"
+ jest-regex-util "^29.6.3"
+ jest-snapshot "^29.6.4"
-jest-resolve@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-29.6.1.tgz#4c3324b993a85e300add2f8609f51b80ddea39ee"
- integrity sha512-AeRkyS8g37UyJiP9w3mmI/VXU/q8l/IH52vj/cDAyScDcemRbSBhfX/NMYIGilQgSVwsjxrCHf3XJu4f+lxCMg==
+jest-resolve@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-29.6.4.tgz#e34cb06f2178b429c38455d98d1a07572ac9faa3"
+ integrity sha512-fPRq+0vcxsuGlG0O3gyoqGTAxasagOxEuyoxHeyxaZbc9QNek0AmJWSkhjlMG+mTsj+8knc/mWb3fXlRNVih7Q==
dependencies:
chalk "^4.0.0"
graceful-fs "^4.2.9"
- jest-haste-map "^29.6.1"
+ jest-haste-map "^29.6.4"
jest-pnp-resolver "^1.2.2"
- jest-util "^29.6.1"
- jest-validate "^29.6.1"
+ jest-util "^29.6.3"
+ jest-validate "^29.6.3"
resolve "^1.20.0"
resolve.exports "^2.0.0"
slash "^3.0.0"
-jest-runner@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-runner/-/jest-runner-29.6.1.tgz#54557087e7972d345540d622ab5bfc3d8f34688c"
- integrity sha512-tw0wb2Q9yhjAQ2w8rHRDxteryyIck7gIzQE4Reu3JuOBpGp96xWgF0nY8MDdejzrLCZKDcp8JlZrBN/EtkQvPQ==
+jest-runner@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/jest-runner/-/jest-runner-29.6.4.tgz#b3b8ccb85970fde0fae40c73ee11eb75adccfacf"
+ integrity sha512-SDaLrMmtVlQYDuG0iSPYLycG8P9jLI+fRm8AF/xPKhYDB2g6xDWjXBrR5M8gEWsK6KVFlebpZ4QsrxdyIX1Jaw==
dependencies:
- "@jest/console" "^29.6.1"
- "@jest/environment" "^29.6.1"
- "@jest/test-result" "^29.6.1"
- "@jest/transform" "^29.6.1"
- "@jest/types" "^29.6.1"
+ "@jest/console" "^29.6.4"
+ "@jest/environment" "^29.6.4"
+ "@jest/test-result" "^29.6.4"
+ "@jest/transform" "^29.6.4"
+ "@jest/types" "^29.6.3"
"@types/node" "*"
chalk "^4.0.0"
emittery "^0.13.1"
graceful-fs "^4.2.9"
- jest-docblock "^29.4.3"
- jest-environment-node "^29.6.1"
- jest-haste-map "^29.6.1"
- jest-leak-detector "^29.6.1"
- jest-message-util "^29.6.1"
- jest-resolve "^29.6.1"
- jest-runtime "^29.6.1"
- jest-util "^29.6.1"
- jest-watcher "^29.6.1"
- jest-worker "^29.6.1"
+ jest-docblock "^29.6.3"
+ jest-environment-node "^29.6.4"
+ jest-haste-map "^29.6.4"
+ jest-leak-detector "^29.6.3"
+ jest-message-util "^29.6.3"
+ jest-resolve "^29.6.4"
+ jest-runtime "^29.6.4"
+ jest-util "^29.6.3"
+ jest-watcher "^29.6.4"
+ jest-worker "^29.6.4"
p-limit "^3.1.0"
source-map-support "0.5.13"
-jest-runtime@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-runtime/-/jest-runtime-29.6.1.tgz#8a0fc9274ef277f3d70ba19d238e64334958a0dc"
- integrity sha512-D6/AYOA+Lhs5e5il8+5pSLemjtJezUr+8zx+Sn8xlmOux3XOqx4d8l/2udBea8CRPqqrzhsKUsN/gBDE/IcaPQ==
- dependencies:
- "@jest/environment" "^29.6.1"
- "@jest/fake-timers" "^29.6.1"
- "@jest/globals" "^29.6.1"
- "@jest/source-map" "^29.6.0"
- "@jest/test-result" "^29.6.1"
- "@jest/transform" "^29.6.1"
- "@jest/types" "^29.6.1"
+jest-runtime@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/jest-runtime/-/jest-runtime-29.6.4.tgz#b0bc495c9b6b12a0a7042ac34ca9bb85f8cd0ded"
+ integrity sha512-s/QxMBLvmwLdchKEjcLfwzP7h+jsHvNEtxGP5P+Fl1FMaJX2jMiIqe4rJw4tFprzCwuSvVUo9bn0uj4gNRXsbA==
+ dependencies:
+ "@jest/environment" "^29.6.4"
+ "@jest/fake-timers" "^29.6.4"
+ "@jest/globals" "^29.6.4"
+ "@jest/source-map" "^29.6.3"
+ "@jest/test-result" "^29.6.4"
+ "@jest/transform" "^29.6.4"
+ "@jest/types" "^29.6.3"
"@types/node" "*"
chalk "^4.0.0"
cjs-module-lexer "^1.0.0"
collect-v8-coverage "^1.0.0"
glob "^7.1.3"
graceful-fs "^4.2.9"
- jest-haste-map "^29.6.1"
- jest-message-util "^29.6.1"
- jest-mock "^29.6.1"
- jest-regex-util "^29.4.3"
- jest-resolve "^29.6.1"
- jest-snapshot "^29.6.1"
- jest-util "^29.6.1"
+ jest-haste-map "^29.6.4"
+ jest-message-util "^29.6.3"
+ jest-mock "^29.6.3"
+ jest-regex-util "^29.6.3"
+ jest-resolve "^29.6.4"
+ jest-snapshot "^29.6.4"
+ jest-util "^29.6.3"
slash "^3.0.0"
strip-bom "^4.0.0"
@@ -3503,31 +3646,30 @@ jest-serializer@^26.6.2:
"@types/node" "*"
graceful-fs "^4.2.4"
-jest-snapshot@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-snapshot/-/jest-snapshot-29.6.1.tgz#0d083cb7de716d5d5cdbe80d598ed2fbafac0239"
- integrity sha512-G4UQE1QQ6OaCgfY+A0uR1W2AY0tGXUPQpoUClhWHq1Xdnx1H6JOrC2nH5lqnOEqaDgbHFgIwZ7bNq24HpB180A==
+jest-snapshot@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/jest-snapshot/-/jest-snapshot-29.6.4.tgz#9833eb6b66ff1541c7fd8ceaa42d541f407b4876"
+ integrity sha512-VC1N8ED7+4uboUKGIDsbvNAZb6LakgIPgAF4RSpF13dN6YaMokfRqO+BaqK4zIh6X3JffgwbzuGqDEjHm/MrvA==
dependencies:
"@babel/core" "^7.11.6"
"@babel/generator" "^7.7.2"
"@babel/plugin-syntax-jsx" "^7.7.2"
"@babel/plugin-syntax-typescript" "^7.7.2"
"@babel/types" "^7.3.3"
- "@jest/expect-utils" "^29.6.1"
- "@jest/transform" "^29.6.1"
- "@jest/types" "^29.6.1"
- "@types/prettier" "^2.1.5"
+ "@jest/expect-utils" "^29.6.4"
+ "@jest/transform" "^29.6.4"
+ "@jest/types" "^29.6.3"
babel-preset-current-node-syntax "^1.0.0"
chalk "^4.0.0"
- expect "^29.6.1"
+ expect "^29.6.4"
graceful-fs "^4.2.9"
- jest-diff "^29.6.1"
- jest-get-type "^29.4.3"
- jest-matcher-utils "^29.6.1"
- jest-message-util "^29.6.1"
- jest-util "^29.6.1"
+ jest-diff "^29.6.4"
+ jest-get-type "^29.6.3"
+ jest-matcher-utils "^29.6.4"
+ jest-message-util "^29.6.3"
+ jest-util "^29.6.3"
natural-compare "^1.4.0"
- pretty-format "^29.6.1"
+ pretty-format "^29.6.3"
semver "^7.5.3"
jest-util@^26.6.2:
@@ -3542,42 +3684,42 @@ jest-util@^26.6.2:
is-ci "^2.0.0"
micromatch "^4.0.2"
-jest-util@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-29.6.1.tgz#c9e29a87a6edbf1e39e6dee2b4689b8a146679cb"
- integrity sha512-NRFCcjc+/uO3ijUVyNOQJluf8PtGCe/W6cix36+M3cTFgiYqFOOW5MgN4JOOcvbUhcKTYVd1CvHz/LWi8d16Mg==
+jest-util@^29.6.3:
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-29.6.3.tgz#e15c3eac8716440d1ed076f09bc63ace1aebca63"
+ integrity sha512-QUjna/xSy4B32fzcKTSz1w7YYzgiHrjjJjevdRf61HYk998R5vVMMNmrHESYZVDS5DSWs+1srPLPKxXPkeSDOA==
dependencies:
- "@jest/types" "^29.6.1"
+ "@jest/types" "^29.6.3"
"@types/node" "*"
chalk "^4.0.0"
ci-info "^3.2.0"
graceful-fs "^4.2.9"
picomatch "^2.2.3"
-jest-validate@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-29.6.1.tgz#765e684af6e2c86dce950aebefbbcd4546d69f7b"
- integrity sha512-r3Ds69/0KCN4vx4sYAbGL1EVpZ7MSS0vLmd3gV78O+NAx3PDQQukRU5hNHPXlyqCgFY8XUk7EuTMLugh0KzahA==
+jest-validate@^29.6.3:
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-29.6.3.tgz#a75fca774cfb1c5758c70d035d30a1f9c2784b4d"
+ integrity sha512-e7KWZcAIX+2W1o3cHfnqpGajdCs1jSM3DkXjGeLSNmCazv1EeI1ggTeK5wdZhF+7N+g44JI2Od3veojoaumlfg==
dependencies:
- "@jest/types" "^29.6.1"
+ "@jest/types" "^29.6.3"
camelcase "^6.2.0"
chalk "^4.0.0"
- jest-get-type "^29.4.3"
+ jest-get-type "^29.6.3"
leven "^3.1.0"
- pretty-format "^29.6.1"
+ pretty-format "^29.6.3"
-jest-watcher@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-watcher/-/jest-watcher-29.6.1.tgz#7c0c43ddd52418af134c551c92c9ea31e5ec942e"
- integrity sha512-d4wpjWTS7HEZPaaj8m36QiaP856JthRZkrgcIY/7ISoUWPIillrXM23WPboZVLbiwZBt4/qn2Jke84Sla6JhFA==
+jest-watcher@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/jest-watcher/-/jest-watcher-29.6.4.tgz#633eb515ae284aa67fd6831f1c9d1b534cf0e0ba"
+ integrity sha512-oqUWvx6+On04ShsT00Ir9T4/FvBeEh2M9PTubgITPxDa739p4hoQweWPRGyYeaojgT0xTpZKF0Y/rSY1UgMxvQ==
dependencies:
- "@jest/test-result" "^29.6.1"
- "@jest/types" "^29.6.1"
+ "@jest/test-result" "^29.6.4"
+ "@jest/types" "^29.6.3"
"@types/node" "*"
ansi-escapes "^4.2.1"
chalk "^4.0.0"
emittery "^0.13.1"
- jest-util "^29.6.1"
+ jest-util "^29.6.3"
string-length "^4.0.1"
jest-worker@^26.6.2:
@@ -3589,25 +3731,25 @@ jest-worker@^26.6.2:
merge-stream "^2.0.0"
supports-color "^7.0.0"
-jest-worker@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-29.6.1.tgz#64b015f0e985ef3a8ad049b61fe92b3db74a5319"
- integrity sha512-U+Wrbca7S8ZAxAe9L6nb6g8kPdia5hj32Puu5iOqBCMTMWFHXuK6dOV2IFrpedbTV8fjMFLdWNttQTBL6u2MRA==
+jest-worker@^29.6.4:
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-29.6.4.tgz#f34279f4afc33c872b470d4af21b281ac616abd3"
+ integrity sha512-6dpvFV4WjcWbDVGgHTWo/aupl8/LbBx2NSKfiwqf79xC/yeJjKHT1+StcKy/2KTmW16hE68ccKVOtXf+WZGz7Q==
dependencies:
"@types/node" "*"
- jest-util "^29.6.1"
+ jest-util "^29.6.3"
merge-stream "^2.0.0"
supports-color "^8.0.0"
jest@^29:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/jest/-/jest-29.6.1.tgz#74be1cb719c3abe439f2d94aeb18e6540a5b02ad"
- integrity sha512-Nirw5B4nn69rVUZtemCQhwxOBhm0nsp3hmtF4rzCeWD7BkjAXRIji7xWQfnTNbz9g0aVsBX6aZK3n+23LM6uDw==
+ version "29.6.4"
+ resolved "https://registry.yarnpkg.com/jest/-/jest-29.6.4.tgz#7c48e67a445ba264b778253b5d78d4ebc9d0a622"
+ integrity sha512-tEFhVQFF/bzoYV1YuGyzLPZ6vlPrdfvDmmAxudA1dLEuiztqg2Rkx20vkKY32xiDROcD2KXlgZ7Cu8RPeEHRKw==
dependencies:
- "@jest/core" "^29.6.1"
- "@jest/types" "^29.6.1"
+ "@jest/core" "^29.6.4"
+ "@jest/types" "^29.6.3"
import-local "^3.0.2"
- jest-cli "^29.6.1"
+ jest-cli "^29.6.4"
"js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0:
version "4.0.0"
@@ -3634,6 +3776,11 @@ jsesc@^2.5.1:
resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4"
integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==
+json-buffer@3.0.1:
+ version "3.0.1"
+ resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.1.tgz#9338802a30d3b6605fbe0613e094008ca8c05a13"
+ integrity sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==
+
json-parse-even-better-errors@^2.3.0:
version "2.3.1"
resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d"
@@ -3656,21 +3803,28 @@ json5@^1.0.2:
dependencies:
minimist "^1.2.0"
-json5@^2.2.2:
+json5@^2.2.2, json5@^2.2.3:
version "2.2.3"
resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.3.tgz#78cd6f1a19bdc12b73db5ad0c61efd66c1e29283"
integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==
"jsx-ast-utils@^2.4.1 || ^3.0.0":
- version "3.3.4"
- resolved "https://registry.yarnpkg.com/jsx-ast-utils/-/jsx-ast-utils-3.3.4.tgz#b896535fed5b867650acce5a9bd4135ffc7b3bf9"
- integrity sha512-fX2TVdCViod6HwKEtSWGHs57oFhVfCMwieb9PuRDgjDPh5XeqJiHFFFJCHxU5cnTc3Bu/GRL+kPiFmw8XWOfKw==
+ version "3.3.5"
+ resolved "https://registry.yarnpkg.com/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz#4766bd05a8e2a11af222becd19e15575e52a853a"
+ integrity sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==
dependencies:
array-includes "^3.1.6"
array.prototype.flat "^1.3.1"
object.assign "^4.1.4"
object.values "^1.1.6"
+keyv@^4.5.3:
+ version "4.5.3"
+ resolved "https://registry.yarnpkg.com/keyv/-/keyv-4.5.3.tgz#00873d2b046df737963157bd04f294ca818c9c25"
+ integrity sha512-QCiSav9WaX1PgETJ+SpNnx2PRRapJ/oRSXM4VO5OGYGSjrxbKPVFVhB3l2OCbLCk329N8qyAtsJjSjvVBWzEug==
+ dependencies:
+ json-buffer "3.0.1"
+
kind-of@^3.0.2, kind-of@^3.0.3, kind-of@^3.2.0:
version "3.2.2"
resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-3.2.2.tgz#31ea21a734bab9bbb0f32466d893aea51e4a3c64"
@@ -3742,7 +3896,7 @@ lodash@^4:
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c"
integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==
-loose-envify@^1.1.0, loose-envify@^1.4.0:
+loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.4.0:
version "1.4.0"
resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf"
integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==
@@ -3763,12 +3917,12 @@ lru-cache@^6.0.0:
dependencies:
yallist "^4.0.0"
-make-dir@^3.0.0:
- version "3.1.0"
- resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-3.1.0.tgz#415e967046b3a7f1d185277d84aa58203726a13f"
- integrity sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==
+make-dir@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-4.0.0.tgz#c3c2307a771277cd9638305f915c29ae741b614e"
+ integrity sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==
dependencies:
- semver "^6.0.0"
+ semver "^7.5.3"
makeerror@1.0.12:
version "1.0.12"
@@ -3924,7 +4078,7 @@ node-int64@^0.4.0:
resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b"
integrity sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw==
-node-releases@^2.0.12:
+node-releases@^2.0.13:
version "2.0.13"
resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.13.tgz#d5ed1627c23e3461e819b02e57b75e4899b1c81d"
integrity sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==
@@ -4004,13 +4158,13 @@ object.assign@^4.1.4:
object-keys "^1.1.1"
object.entries@^1.1.6:
- version "1.1.6"
- resolved "https://registry.yarnpkg.com/object.entries/-/object.entries-1.1.6.tgz#9737d0e5b8291edd340a3e3264bb8a3b00d5fa23"
- integrity sha512-leTPzo4Zvg3pmbQ3rDK69Rl8GQvIqMWubrkxONG9/ojtFE2rD9fjMKfSI5BxW3osRH1m6VdzmqK8oAY9aT4x5w==
+ version "1.1.7"
+ resolved "https://registry.yarnpkg.com/object.entries/-/object.entries-1.1.7.tgz#2b47760e2a2e3a752f39dd874655c61a7f03c131"
+ integrity sha512-jCBs/0plmPsOnrKAfFQXRG2NFjlhZgjjcBLSmTnEhU8U6vVTsVe8ANeQJCHTl3gSsI4J+0emOoCgoKlmQPMgmA==
dependencies:
call-bind "^1.0.2"
- define-properties "^1.1.4"
- es-abstract "^1.20.4"
+ define-properties "^1.2.0"
+ es-abstract "^1.22.1"
object.fromentries@^2.0.6:
version "2.0.6"
@@ -4021,13 +4175,23 @@ object.fromentries@^2.0.6:
define-properties "^1.1.4"
es-abstract "^1.20.4"
+object.groupby@^1.0.0:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/object.groupby/-/object.groupby-1.0.1.tgz#d41d9f3c8d6c778d9cbac86b4ee9f5af103152ee"
+ integrity sha512-HqaQtqLnp/8Bn4GL16cj+CUYbnpe1bh0TtEaWvybszDG4tgxCJuRpV8VGuvNaI1fAnI4lUJzDG55MXcOH4JZcQ==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.2.0"
+ es-abstract "^1.22.1"
+ get-intrinsic "^1.2.1"
+
object.hasown@^1.1.2:
- version "1.1.2"
- resolved "https://registry.yarnpkg.com/object.hasown/-/object.hasown-1.1.2.tgz#f919e21fad4eb38a57bc6345b3afd496515c3f92"
- integrity sha512-B5UIT3J1W+WuWIU55h0mjlwaqxiE5vYENJXIXZ4VFe05pNYrkKuK0U/6aFcb0pKywYJh7IhfoqUfKVmrJJHZHw==
+ version "1.1.3"
+ resolved "https://registry.yarnpkg.com/object.hasown/-/object.hasown-1.1.3.tgz#6a5f2897bb4d3668b8e79364f98ccf971bda55ae"
+ integrity sha512-fFI4VcYpRHvSLXxP7yiZOMAd331cPfd2p7PFDVbgUsYOfCT3tICVqXWngbjr4m49OvsBwUBQ6O2uQoJvy3RexA==
dependencies:
- define-properties "^1.1.4"
- es-abstract "^1.20.4"
+ define-properties "^1.2.0"
+ es-abstract "^1.22.1"
object.pick@^1.3.0:
version "1.3.0"
@@ -4037,13 +4201,13 @@ object.pick@^1.3.0:
isobject "^3.0.1"
object.values@^1.1.6:
- version "1.1.6"
- resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.1.6.tgz#4abbaa71eba47d63589d402856f908243eea9b1d"
- integrity sha512-FVVTkD1vENCsAcwNs9k6jea2uHC/X0+JcjG8YA60FN5CMaJmG95wT9jek/xX9nornqGRrBkKtzuAu2wuHpKqvw==
+ version "1.1.7"
+ resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.1.7.tgz#617ed13272e7e1071b43973aa1655d9291b8442a"
+ integrity sha512-aU6xnDFYT3x17e/f0IiiwlGPTy2jzMySGfUB4fq6z7CV8l85CWHDk5ErhyhpfDHhrOMwGFhSQkhMGHaIotA6Ng==
dependencies:
call-bind "^1.0.2"
- define-properties "^1.1.4"
- es-abstract "^1.20.4"
+ define-properties "^1.2.0"
+ es-abstract "^1.22.1"
once@^1.3.0, once@^1.3.1, once@^1.4.0:
version "1.4.0"
@@ -4210,10 +4374,10 @@ posix-character-classes@^0.1.0:
resolved "https://registry.yarnpkg.com/posix-character-classes/-/posix-character-classes-0.1.1.tgz#01eac0fe3b5af71a2a6c02feabb8c1fef7e00eab"
integrity sha512-xTgYBc3fuo7Yt7JbiuFxSYGToMoz8fLoE6TC9Wx1P/u+LfeThMOAqmuyECnlBaaJb+u1m9hHiXUEtwW4OzfUJg==
-postcss@^8.4.26:
- version "8.4.27"
- resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.27.tgz#234d7e4b72e34ba5a92c29636734349e0d9c3057"
- integrity sha512-gY/ACJtJPSmUFPDCHtX78+01fHa64FaU4zaaWfuh1MhGJISufJAH4cun6k/8fwsHYeK4UQmENQK+tRLCFJE8JQ==
+postcss@^8.4.27:
+ version "8.4.28"
+ resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.28.tgz#c6cc681ed00109072816e1557f889ef51cf950a5"
+ integrity sha512-Z7V5j0cq8oEKyejIKfpD8b4eBy9cwW2JWPk0+fB1HOAMsfHbnAXLLS+PfVWlzMSLQaWttKDt607I0XHmpE67Vw==
dependencies:
nanoid "^3.3.6"
picocolors "^1.0.0"
@@ -4232,16 +4396,16 @@ prettier-linter-helpers@^1.0.0:
fast-diff "^1.1.2"
prettier@3:
- version "3.0.0"
- resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.0.0.tgz#e7b19f691245a21d618c68bc54dc06122f6105ae"
- integrity sha512-zBf5eHpwHOGPC47h0zrPyNn+eAEIdEzfywMoYn2XPi0P44Zp0tSq64rq0xAREh4auw2cJZHo9QUob+NqCQky4g==
+ version "3.0.3"
+ resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.0.3.tgz#432a51f7ba422d1469096c0fdc28e235db8f9643"
+ integrity sha512-L/4pUDMxcNa8R/EthV08Zt42WBO4h1rarVtK0K+QJG0X187OLo7l699jWw0GKuwzkPQ//jMFA/8Xm6Fh3J/DAg==
-pretty-format@^29.6.1:
- version "29.6.1"
- resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-29.6.1.tgz#ec838c288850b7c4f9090b867c2d4f4edbfb0f3e"
- integrity sha512-7jRj+yXO0W7e4/tSJKoR7HRIHLPPjtNaUGG2xxKQnGvPNRkgWcQ0AZX6P4KBRJN4FcTBWb3sa7DVUJmocYuoog==
+pretty-format@^29.6.3:
+ version "29.6.3"
+ resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-29.6.3.tgz#d432bb4f1ca6f9463410c3fb25a0ba88e594ace7"
+ integrity sha512-ZsBgjVhFAj5KeK+nHfF1305/By3lechHQSMWCTl8iHSbfOm2TN5nHEtFc/+W7fAyUeCs2n5iow72gld4gW0xDw==
dependencies:
- "@jest/schemas" "^29.6.0"
+ "@jest/schemas" "^29.6.3"
ansi-styles "^5.0.0"
react-is "^18.0.0"
@@ -4320,20 +4484,48 @@ react-refresh@^0.14.0:
resolved "https://registry.yarnpkg.com/react-refresh/-/react-refresh-0.14.0.tgz#4e02825378a5f227079554d4284889354e5f553e"
integrity sha512-wViHqhAd8OHeLS/IRMJjTSDHF3U9eWi62F/MledQGPdJGDhodXJ9PBLNGr6WWL7qlH12Mt3TyTpbS+hGXMjCzQ==
+react-remove-scroll-bar@^2.3.4:
+ version "2.3.4"
+ resolved "https://registry.yarnpkg.com/react-remove-scroll-bar/-/react-remove-scroll-bar-2.3.4.tgz#53e272d7a5cb8242990c7f144c44d8bd8ab5afd9"
+ integrity sha512-63C4YQBUt0m6ALadE9XV56hV8BgJWDmmTPY758iIJjfQKt2nYwoUrPk0LXRXcB/yIj82T1/Ixfdpdk68LwIB0A==
+ dependencies:
+ react-style-singleton "^2.2.1"
+ tslib "^2.0.0"
+
+react-remove-scroll@^2.5.5:
+ version "2.5.6"
+ resolved "https://registry.yarnpkg.com/react-remove-scroll/-/react-remove-scroll-2.5.6.tgz#7510b8079e9c7eebe00e65a33daaa3aa29a10336"
+ integrity sha512-bO856ad1uDYLefgArk559IzUNeQ6SWH4QnrevIUjH+GczV56giDfl3h0Idptf2oIKxQmd1p9BN25jleKodTALg==
+ dependencies:
+ react-remove-scroll-bar "^2.3.4"
+ react-style-singleton "^2.2.1"
+ tslib "^2.1.0"
+ use-callback-ref "^1.3.0"
+ use-sidecar "^1.1.2"
+
react-router-dom@^6:
- version "6.14.2"
- resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-6.14.2.tgz#88f520118b91aa60233bd08dbd3fdcaea3a68488"
- integrity sha512-5pWX0jdKR48XFZBuJqHosX3AAHjRAzygouMTyimnBPOLdY3WjzUSKhus2FVMihUFWzeLebDgr4r8UeQFAct7Bg==
+ version "6.15.0"
+ resolved "https://registry.yarnpkg.com/react-router-dom/-/react-router-dom-6.15.0.tgz#6da7db61e56797266fbbef0d5e324d6ac443ee40"
+ integrity sha512-aR42t0fs7brintwBGAv2+mGlCtgtFQeOzK0BM1/OiqEzRejOZtpMZepvgkscpMUnKb8YO84G7s3LsHnnDNonbQ==
dependencies:
- "@remix-run/router" "1.7.2"
- react-router "6.14.2"
+ "@remix-run/router" "1.8.0"
+ react-router "6.15.0"
-react-router@6.14.2:
- version "6.14.2"
- resolved "https://registry.yarnpkg.com/react-router/-/react-router-6.14.2.tgz#1f60994d8c369de7b8ba7a78d8f7ec23df76b300"
- integrity sha512-09Zss2dE2z+T1D03IheqAFtK4UzQyX8nFPWx6jkwdYzGLXd5ie06A6ezS2fO6zJfEb/SpG6UocN2O1hfD+2urQ==
+react-router@6.15.0:
+ version "6.15.0"
+ resolved "https://registry.yarnpkg.com/react-router/-/react-router-6.15.0.tgz#bf2cb5a4a7ed57f074d4ea88db0d95033f39cac8"
+ integrity sha512-NIytlzvzLwJkCQj2HLefmeakxxWHWAP+02EGqWEZy+DgfHHKQMUoBBjUQLOtFInBMhWtb3hiUy6MfFgwLjXhqg==
dependencies:
- "@remix-run/router" "1.7.2"
+ "@remix-run/router" "1.8.0"
+
+react-style-singleton@^2.2.1:
+ version "2.2.1"
+ resolved "https://registry.yarnpkg.com/react-style-singleton/-/react-style-singleton-2.2.1.tgz#f99e420492b2d8f34d38308ff660b60d0b1205b4"
+ integrity sha512-ZWj0fHEMyWkHzKYUr2Bs/4zU6XLmq9HsgBURm7g5pAVfyn49DgUiNgY2d4lXRlYSiCif9YBGpQleewkcqddc7g==
+ dependencies:
+ get-nonce "^1.0.0"
+ invariant "^2.2.4"
+ tslib "^2.0.0"
react-textarea-autosize@8.3.4:
version "8.3.4"
@@ -4361,6 +4553,18 @@ react@^18:
dependencies:
loose-envify "^1.1.0"
+reflect.getprototypeof@^1.0.3:
+ version "1.0.3"
+ resolved "https://registry.yarnpkg.com/reflect.getprototypeof/-/reflect.getprototypeof-1.0.3.tgz#2738fd896fcc3477ffbd4190b40c2458026b6928"
+ integrity sha512-TTAOZpkJ2YLxl7mVHWrNo3iDMEkYlva/kgFcXndqMgbo/AZUmmavEkdXV+hXtE4P8xdyEKRzalaFqZVuwIk/Nw==
+ dependencies:
+ call-bind "^1.0.2"
+ define-properties "^1.1.4"
+ es-abstract "^1.20.4"
+ get-intrinsic "^1.1.1"
+ globalthis "^1.0.3"
+ which-builtin-type "^1.1.3"
+
regenerator-runtime@^0.13.11:
version "0.13.11"
resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz#f6dca3e7ceec20590d07ada785636a90cdca17f9"
@@ -4430,7 +4634,7 @@ resolve.exports@^2.0.0:
resolved "https://registry.yarnpkg.com/resolve.exports/-/resolve.exports-2.0.2.tgz#f8c934b8e6a13f539e38b7098e2e36134f01e800"
integrity sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==
-resolve@^1.19.0, resolve@^1.20.0, resolve@^1.22.1:
+resolve@^1.19.0:
version "1.22.2"
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.2.tgz#0ed0943d4e301867955766c9f3e1ae6d01c6845f"
integrity sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==
@@ -4439,6 +4643,15 @@ resolve@^1.19.0, resolve@^1.20.0, resolve@^1.22.1:
path-parse "^1.0.7"
supports-preserve-symlinks-flag "^1.0.0"
+resolve@^1.20.0, resolve@^1.22.4:
+ version "1.22.4"
+ resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.4.tgz#1dc40df46554cdaf8948a486a10f6ba1e2026c34"
+ integrity sha512-PXNdCiPqDqeUou+w1C2eTQbNfxKSuMxqTCuvlmmMsk1NWHL5fRrhY6Pl0qEYYc6+QqGClco1Qj8XnjPego4wfg==
+ dependencies:
+ is-core-module "^2.13.0"
+ path-parse "^1.0.7"
+ supports-preserve-symlinks-flag "^1.0.0"
+
resolve@^2.0.0-next.4:
version "2.0.0-next.4"
resolved "https://registry.yarnpkg.com/resolve/-/resolve-2.0.0-next.4.tgz#3d37a113d6429f496ec4752d2a2e58efb1fd4660"
@@ -4465,10 +4678,10 @@ rimraf@^3.0.2:
dependencies:
glob "^7.1.3"
-rollup@^3.25.2:
- version "3.26.3"
- resolved "https://registry.yarnpkg.com/rollup/-/rollup-3.26.3.tgz#bbc8818cadd0aebca348dbb3d68d296d220967b8"
- integrity sha512-7Tin0C8l86TkpcMtXvQu6saWH93nhG3dGQ1/+l5V2TDMceTxO7kDiK6GzbfLWNNxqJXm591PcEZUozZm51ogwQ==
+rollup@^3.27.1:
+ version "3.28.1"
+ resolved "https://registry.yarnpkg.com/rollup/-/rollup-3.28.1.tgz#fb44aa6d5e65c7e13fd5bcfff266d0c4ea9ba433"
+ integrity sha512-R9OMQmIHJm9znrU3m3cpE8uhN0fGdXiawME7aZIpQqvpS/85+Vt1Hq1/yVIcYfOmaQiHjvXkQAoJukvLpau6Yw==
optionalDependencies:
fsevents "~2.3.2"
@@ -4544,12 +4757,12 @@ semver@^5.5.0:
resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.2.tgz#48d55db737c3287cd4835e17fa13feace1c41ef8"
integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==
-semver@^6.0.0, semver@^6.3.0, semver@^6.3.1:
+semver@^6.3.0, semver@^6.3.1:
version "6.3.1"
resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4"
integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==
-semver@^7.5.3:
+semver@^7.5.3, semver@^7.5.4:
version "7.5.4"
resolved "https://registry.yarnpkg.com/semver/-/semver-7.5.4.tgz#483986ec4ed38e1c6c48c34894a9182dbff68a6e"
integrity sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==
@@ -4910,7 +5123,7 @@ to-regex@^3.0.1, to-regex@^3.0.2:
regex-not "^1.0.2"
safe-regex "^1.1.0"
-tsconfig-paths@^3.14.1:
+tsconfig-paths@^3.14.2:
version "3.14.2"
resolved "https://registry.yarnpkg.com/tsconfig-paths/-/tsconfig-paths-3.14.2.tgz#6e32f1f79412decd261f92d633a9dc1cfa99f088"
integrity sha512-o/9iXgCYc5L/JxCHPe3Hvh8Q/2xm5Z+p18PESBU6Ff33695QnCHBEjcytY2q19ua7Mbl/DavtBOLq+oG0RCL+g==
@@ -4925,6 +5138,11 @@ tslib@^2.0.0, tslib@^2.5.0, tslib@^2.6.0:
resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.0.tgz#b295854684dbda164e181d259a22cd779dcd7bc3"
integrity sha512-7At1WUettjcSRHXCyYtTselblcHl9PJFFVKiCAy/bY97+BPZXSQ2wbq0P9s8tK2G7dFQfNnlJnPAiArVBVBsfA==
+tslib@^2.1.0:
+ version "2.6.2"
+ resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae"
+ integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==
+
type-check@^0.4.0, type-check@~0.4.0:
version "0.4.0"
resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.4.0.tgz#07b8203bfa7056c0657050e3ccd2c37730bab8f1"
@@ -5046,6 +5264,13 @@ urix@^0.1.0:
resolved "https://registry.yarnpkg.com/urix/-/urix-0.1.0.tgz#da937f7a62e21fec1fd18d49b35c2935067a6c72"
integrity sha512-Am1ousAhSLBeB9cG/7k7r2R0zj50uDRlZHPGbazid5s9rlF1F/QKYObEKSIunSjIOkJZqwRRLpvewjEkM7pSqg==
+use-callback-ref@^1.3.0:
+ version "1.3.0"
+ resolved "https://registry.yarnpkg.com/use-callback-ref/-/use-callback-ref-1.3.0.tgz#772199899b9c9a50526fedc4993fc7fa1f7e32d5"
+ integrity sha512-3FT9PRuRdbB9HfXhEq35u4oZkvpJ5kuYbpqhCfmiZyReuRgpnhDlbr2ZEnnuS0RrJAPn6l23xjFg9kpDM+Ms7w==
+ dependencies:
+ tslib "^2.0.0"
+
use-composed-ref@^1.3.0:
version "1.3.0"
resolved "https://registry.yarnpkg.com/use-composed-ref/-/use-composed-ref-1.3.0.tgz#3d8104db34b7b264030a9d916c5e94fbe280dbda"
@@ -5068,6 +5293,14 @@ use-latest@^1.2.1:
dependencies:
use-isomorphic-layout-effect "^1.1.1"
+use-sidecar@^1.1.2:
+ version "1.1.2"
+ resolved "https://registry.yarnpkg.com/use-sidecar/-/use-sidecar-1.1.2.tgz#2f43126ba2d7d7e117aa5855e5d8f0276dfe73c2"
+ integrity sha512-epTbsLuzZ7lPClpz2TyryBfztm7m+28DlEv2ZCQ3MDr5ssiwyOwGH/e5F9CkfWjJ1t4clvI58yF822/GUkjjhw==
+ dependencies:
+ detect-node-es "^1.1.0"
+ tslib "^2.0.0"
+
use@^3.1.0:
version "3.1.1"
resolved "https://registry.yarnpkg.com/use/-/use-3.1.1.tgz#d50c8cac79a19fbc20f2911f56eb973f4e10070f"
@@ -5083,13 +5316,13 @@ v8-to-istanbul@^9.0.1:
convert-source-map "^1.6.0"
vite@^4:
- version "4.4.6"
- resolved "https://registry.yarnpkg.com/vite/-/vite-4.4.6.tgz#97a0a43868ec773fd88980d7c323c80233521cf1"
- integrity sha512-EY6Mm8vJ++S3D4tNAckaZfw3JwG3wa794Vt70M6cNJ6NxT87yhq7EC8Rcap3ahyHdo8AhCmV9PTk+vG1HiYn1A==
+ version "4.4.9"
+ resolved "https://registry.yarnpkg.com/vite/-/vite-4.4.9.tgz#1402423f1a2f8d66fd8d15e351127c7236d29d3d"
+ integrity sha512-2mbUn2LlUmNASWwSCNSJ/EG2HuSRTnVNaydp6vMCm5VIqJsjMfbIWtbH2kDuwUVW5mMUKKZvGPX/rqeqVvv1XA==
dependencies:
esbuild "^0.18.10"
- postcss "^8.4.26"
- rollup "^3.25.2"
+ postcss "^8.4.27"
+ rollup "^3.27.1"
optionalDependencies:
fsevents "~2.3.2"
@@ -5111,7 +5344,35 @@ which-boxed-primitive@^1.0.2:
is-string "^1.0.5"
is-symbol "^1.0.3"
-which-typed-array@^1.1.10, which-typed-array@^1.1.11:
+which-builtin-type@^1.1.3:
+ version "1.1.3"
+ resolved "https://registry.yarnpkg.com/which-builtin-type/-/which-builtin-type-1.1.3.tgz#b1b8443707cc58b6e9bf98d32110ff0c2cbd029b"
+ integrity sha512-YmjsSMDBYsM1CaFiayOVT06+KJeXf0o5M/CAd4o1lTadFAtacTUM49zoYxr/oroopFDfhvN6iEcBxUyc3gvKmw==
+ dependencies:
+ function.prototype.name "^1.1.5"
+ has-tostringtag "^1.0.0"
+ is-async-function "^2.0.0"
+ is-date-object "^1.0.5"
+ is-finalizationregistry "^1.0.2"
+ is-generator-function "^1.0.10"
+ is-regex "^1.1.4"
+ is-weakref "^1.0.2"
+ isarray "^2.0.5"
+ which-boxed-primitive "^1.0.2"
+ which-collection "^1.0.1"
+ which-typed-array "^1.1.9"
+
+which-collection@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/which-collection/-/which-collection-1.0.1.tgz#70eab71ebbbd2aefaf32f917082fc62cdcb70906"
+ integrity sha512-W8xeTUwaln8i3K/cY1nGXzdnVZlidBcagyNFtBdD5kxnb4TvGKR7FfSIS3mYpwWS1QUCutfKz8IY8RjftB0+1A==
+ dependencies:
+ is-map "^2.0.1"
+ is-set "^2.0.1"
+ is-weakmap "^2.0.1"
+ is-weakset "^2.0.1"
+
+which-typed-array@^1.1.10, which-typed-array@^1.1.11, which-typed-array@^1.1.9:
version "1.1.11"
resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.11.tgz#99d691f23c72aab6768680805a271b69761ed61a"
integrity sha512-qe9UWWpkeG5yzZ0tNYxDmd7vo58HDBc39mZ0xWWpolAGADdFOzkfamWLDxkOWcvHQKVmdTyQdLD4NOfjLWTKew==
diff --git a/client/src/main/java/ai/vespa/client/dsl/NearestNeighbor.java b/client/src/main/java/ai/vespa/client/dsl/NearestNeighbor.java
index 1ae7f5cdfde..7dd45153353 100644
--- a/client/src/main/java/ai/vespa/client/dsl/NearestNeighbor.java
+++ b/client/src/main/java/ai/vespa/client/dsl/NearestNeighbor.java
@@ -14,7 +14,7 @@ public class NearestNeighbor extends QueryChain {
this.nonEmpty = true;
}
- NearestNeighbor annotate(Annotation annotation) {
+ public NearestNeighbor annotate(Annotation annotation) {
this.annotation = annotation;
return this;
}
diff --git a/cloud-tenant-base-dependencies-enforcer/pom.xml b/cloud-tenant-base-dependencies-enforcer/pom.xml
index 08d7ee07b0f..8b069933970 100644
--- a/cloud-tenant-base-dependencies-enforcer/pom.xml
+++ b/cloud-tenant-base-dependencies-enforcer/pom.xml
@@ -49,7 +49,7 @@
<!-- Guava with its internal dependencies -->
<include>com.google.guava:guava:${guava.vespa.version}:provided</include>
- <include>com.google.errorprone:error_prone_annotations:[2.18.0, 3):provided</include>
+ <include>com.google.errorprone:error_prone_annotations:[2.21.1, 3):provided</include>
<include>com.google.guava:failureaccess:[1.0.1, 2):provided</include>
<include>com.google.j2objc:j2objc-annotations:[2.8, 3):provided</include>
@@ -146,6 +146,9 @@
<include>io.airlift:airline:${airline.vespa.version}:test</include>
<include>io.prometheus:simpleclient:${prometheus.client.vespa.version}:test</include>
<include>io.prometheus:simpleclient_common:${prometheus.client.vespa.version}:test</include>
+ <include>io.prometheus:simpleclient_tracer_common:${prometheus.client.vespa.version}:test</include>
+ <include>io.prometheus:simpleclient_tracer_otel:${prometheus.client.vespa.version}:test</include>
+ <include>io.prometheus:simpleclient_tracer_otel_agent:${prometheus.client.vespa.version}:test</include>
<include>junit:junit:${junit4.vespa.version}:test</include>
<include>net.java.dev.jna:jna:${jna.vespa.version}:test</include>
<include>net.openhft:zero-allocation-hashing:jar:${zero-allocation-hashing.vespa.version}:test</include>
@@ -185,6 +188,7 @@
<include>org.eclipse.jetty:jetty-servlet:${jetty.vespa.version}:test</include>
<include>org.eclipse.jetty:jetty-util:${jetty.vespa.version}:test</include>
+ <include>org.hamcrest:hamcrest:${hamcrest.vespa.version}:test</include>
<include>org.hamcrest:hamcrest-core:${hamcrest.vespa.version}:test</include>
<include>org.hdrhistogram:HdrHistogram:${hdrhistogram.vespa.version}:test</include>
<include>org.json:json:${org.json.vespa.version}:test</include>
diff --git a/clustercontroller-core/pom.xml b/clustercontroller-core/pom.xml
index 6fc29fc811c..2f5b95b1ce1 100644
--- a/clustercontroller-core/pom.xml
+++ b/clustercontroller-core/pom.xml
@@ -43,7 +43,7 @@
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>hamcrest</artifactId>
<scope>test</scope>
</dependency>
<dependency>
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeChecker.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeChecker.java
index 212748d6947..8453fb3450c 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeChecker.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateChangeChecker.java
@@ -39,7 +39,6 @@ import static com.yahoo.vespa.clustercontroller.core.NodeStateChangeChecker.Resu
import static com.yahoo.vespa.clustercontroller.utils.staterestapi.requests.SetUnitStateRequest.Condition.FORCE;
import static com.yahoo.vespa.clustercontroller.utils.staterestapi.requests.SetUnitStateRequest.Condition.SAFE;
import static java.util.logging.Level.FINE;
-import static java.util.logging.Level.INFO;
/**
* Checks if a node can be upgraded.
@@ -166,7 +165,6 @@ public class NodeStateChangeChecker {
if (anotherNodeInGroupAlreadyAllowed(nodeInfo, newDescription))
return allow();
} else {
- log.log(INFO, "Checking if we can set " + nodeInfo.getNode() + " to maintenance temporarily");
var optionalResult = checkIfOtherNodesHaveWantedState(nodeInfo, newDescription, clusterState);
if (optionalResult.isPresent())
return optionalResult.get();
@@ -245,13 +243,13 @@ public class NodeStateChangeChecker {
Set<Integer> groupsWithNodesWantedStateNotUp = groupsWithUserWantedStateNotUp();
if (groupsWithNodesWantedStateNotUp.size() == 0) {
- log.log(INFO, "groupsWithNodesWantedStateNotUp=0");
+ log.log(FINE, "groupsWithNodesWantedStateNotUp=0");
return Optional.empty();
}
Set<Integer> groupsWithSameStateAndDescription = groupsWithSameStateAndDescription(MAINTENANCE, newDescription);
if (aGroupContainsNode(groupsWithSameStateAndDescription, node)) {
- log.log(INFO, "Node is in group with same state and description, allow");
+ log.log(FINE, "Node is in group with same state and description, allow");
return Optional.of(allow());
}
// There are groups with nodes not up, but with another description, probably operator set
@@ -273,7 +271,7 @@ public class NodeStateChangeChecker {
return result;
if (numberOfGroupsToConsider < maxNumberOfGroupsAllowedToBeDown) {
- log.log(INFO, "Allow, retiredAndNotUpGroups=" + retiredAndNotUpGroups);
+ log.log(FINE, "Allow, retiredAndNotUpGroups=" + retiredAndNotUpGroups);
return Optional.of(allow());
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/hostinfo/Metrics.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/hostinfo/Metrics.java
index ec24eff6aa5..91f219cdb7f 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/hostinfo/Metrics.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/hostinfo/Metrics.java
@@ -67,11 +67,7 @@ public class Metrics {
}
public static class Value {
-
private final Long last;
- private final Double average;
- private final Long count;
-
public Value(
@JsonProperty("average") Double average,
@JsonProperty("count") Long count,
@@ -80,13 +76,9 @@ public class Metrics {
@JsonProperty("max") Long max,
@JsonProperty("last") Long last) {
this.last = last;
- this.average = average;
- this.count = count;
}
public Long getLast() { return last; }
- public Double getAverage() { return average; }
- public Long getCount() { return count; }
}
// We initialize it in case the metrics is missing in the JSON.
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GroupAutoTakedownTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GroupAutoTakedownTest.java
index 0f9fad2c76c..fafdbbaa121 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GroupAutoTakedownTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GroupAutoTakedownTest.java
@@ -18,7 +18,7 @@ import static com.yahoo.vespa.clustercontroller.core.matchers.EventForNode.event
import static com.yahoo.vespa.clustercontroller.core.matchers.NodeEventWithDescription.nodeEventWithDescription;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.AllOf.allOf;
-import static org.hamcrest.core.IsCollectionContaining.hasItem;
+import static org.hamcrest.core.IsIterableContaining.hasItem;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/ClusterEventWithDescription.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/ClusterEventWithDescription.java
index 9dc2fd97718..baaeeccf12c 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/ClusterEventWithDescription.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/ClusterEventWithDescription.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.clustercontroller.core.matchers;
import com.yahoo.vespa.clustercontroller.core.ClusterEvent;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
-import org.hamcrest.Factory;
public class ClusterEventWithDescription extends BaseMatcher<ClusterEvent> {
private final String expected;
@@ -35,7 +34,6 @@ public class ClusterEventWithDescription extends BaseMatcher<ClusterEvent> {
description.appendText(String.format("got description '%s'", other.getDescription()));
}
- @Factory
public static ClusterEventWithDescription clusterEventWithDescription(String description) {
return new ClusterEventWithDescription(description);
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventForNode.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventForNode.java
index 8ce0b52ee8b..55be4dbd709 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventForNode.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventForNode.java
@@ -5,7 +5,6 @@ import com.yahoo.vdslib.state.Node;
import com.yahoo.vespa.clustercontroller.core.NodeEvent;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
-import org.hamcrest.Factory;
public class EventForNode extends BaseMatcher<NodeEvent> {
private final Node expected;
@@ -30,7 +29,6 @@ public class EventForNode extends BaseMatcher<NodeEvent> {
description.appendText(String.format("got node %s", other.getNode().getNode()));
}
- @Factory
public static EventForNode eventForNode(Node expected) {
return new EventForNode(expected);
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTimeIs.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTimeIs.java
index 42a89b301fe..bcc02e1f4db 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTimeIs.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTimeIs.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.clustercontroller.core.matchers;
import com.yahoo.vespa.clustercontroller.core.Event;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
-import org.hamcrest.Factory;
public class EventTimeIs extends BaseMatcher<Event> {
private final long expected;
@@ -32,7 +31,6 @@ public class EventTimeIs extends BaseMatcher<Event> {
description.appendText(String.format("event time is %d", other.getTimeMs()));
}
- @Factory
public static EventTimeIs eventTimeIs(long time) {
return new EventTimeIs(time);
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTypeIs.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTypeIs.java
index 59a532763b0..228723e2fef 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTypeIs.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTypeIs.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.clustercontroller.core.matchers;
import com.yahoo.vespa.clustercontroller.core.NodeEvent;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
-import org.hamcrest.Factory;
public class EventTypeIs extends BaseMatcher<NodeEvent> {
private final NodeEvent.Type expected;
@@ -32,7 +31,6 @@ public class EventTypeIs extends BaseMatcher<NodeEvent> {
description.appendText(String.format("got description '%s'", other.getDescription()));
}
- @Factory
public static EventTypeIs eventTypeIs(NodeEvent.Type type) {
return new EventTypeIs(type);
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasMetricContext.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasMetricContext.java
index 92123752465..95a1d9f1a28 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasMetricContext.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasMetricContext.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.clustercontroller.core.matchers;
import com.yahoo.vespa.clustercontroller.utils.util.MetricReporter;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
-import org.hamcrest.Factory;
import java.util.Map;
import java.util.TreeMap;
@@ -64,7 +63,6 @@ public class HasMetricContext extends BaseMatcher<MetricReporter.Context> {
return new Dimension(name, value);
}
- @Factory
public static HasMetricContext hasMetricContext(Dimension... dimensions) {
return new HasMetricContext(Stream.of(dimensions).collect(Collectors.toMap(dim -> dim.name, dim -> dim.value)));
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasStateReasonForNode.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasStateReasonForNode.java
index 72f51e21c5a..bf87f58692b 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasStateReasonForNode.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasStateReasonForNode.java
@@ -5,7 +5,6 @@ import com.yahoo.vdslib.state.Node;
import com.yahoo.vespa.clustercontroller.core.NodeStateReason;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
-import org.hamcrest.Factory;
import java.util.Map;
@@ -42,7 +41,6 @@ public class HasStateReasonForNode extends BaseMatcher<Map<Node, NodeStateReason
}
}
- @Factory
public static HasStateReasonForNode hasStateReasonForNode(Node node, NodeStateReason reason) {
return new HasStateReasonForNode(node, reason);
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventForBucketSpace.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventForBucketSpace.java
index e479e031f10..9951bf50f5c 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventForBucketSpace.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventForBucketSpace.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.clustercontroller.core.matchers;
import com.yahoo.vespa.clustercontroller.core.NodeEvent;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
-import org.hamcrest.Factory;
import java.util.Optional;
@@ -34,12 +33,10 @@ public class NodeEventForBucketSpace extends BaseMatcher<NodeEvent> {
description.appendText(String.format("got bucket space '%s'", other.getBucketSpace().orElse("null")));
}
- @Factory
public static NodeEventForBucketSpace nodeEventForBucketSpace(String bucketSpace) {
return new NodeEventForBucketSpace(Optional.of(bucketSpace));
}
- @Factory
public static NodeEventForBucketSpace nodeEventForBaseline() {
return new NodeEventForBucketSpace(Optional.empty());
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventWithDescription.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventWithDescription.java
index b070fa27817..ca5720797e2 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventWithDescription.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventWithDescription.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.clustercontroller.core.matchers;
import com.yahoo.vespa.clustercontroller.core.NodeEvent;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
-import org.hamcrest.Factory;
public class NodeEventWithDescription extends BaseMatcher<NodeEvent> {
private final String expected;
@@ -35,7 +34,6 @@ public class NodeEventWithDescription extends BaseMatcher<NodeEvent> {
description.appendText(String.format("got description '%s'", other.getDescription()));
}
- @Factory
public static NodeEventWithDescription nodeEventWithDescription(String description) {
return new NodeEventWithDescription(description);
}
diff --git a/clustercontroller-reindexer/pom.xml b/clustercontroller-reindexer/pom.xml
index c94b5b134fe..67321197607 100644
--- a/clustercontroller-reindexer/pom.xml
+++ b/clustercontroller-reindexer/pom.xml
@@ -58,11 +58,7 @@
</exclusion>
<exclusion>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-core</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java b/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java
index e49619c45cf..fbca1eff46f 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java
@@ -212,7 +212,7 @@ public class DeploymentSpec {
.or(this::hostTTL);
}
- public Optional<Duration> hostTTL() { return hostTTL; }
+ Optional<Duration> hostTTL() { return hostTTL; }
/**
* Returns the most specific zone endpoint, where specificity is given, in decreasing order:
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java
index a07ef4b313a..f69cfa6d4c5 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/ValidationOverrides.java
@@ -104,7 +104,7 @@ public class ValidationOverrides {
try {
return fromXml(IOUtils.readAll(reader));
} catch (IOException e) {
- throw new IllegalArgumentException("Could not read deployment spec", e);
+ throw new IllegalArgumentException("Could not read validation-overrides", e);
}
}
@@ -123,12 +123,16 @@ public class ValidationOverrides {
Element root = XML.getDocument(xmlForm).getDocumentElement();
List<ValidationOverrides.Allow> overrides = new ArrayList<>();
for (Element allow : XML.getChildren(root, "allow")) {
- Instant until = LocalDate.parse(allow.getAttribute("until"), DateTimeFormatter.ISO_DATE)
- .atStartOfDay().atZone(ZoneOffset.UTC).toInstant()
- .plus(Duration.ofDays(1)); // Make the override valid *on* the "until" date
- Optional<ValidationId> validationId = ValidationId.from(XML.getValue(allow));
- // skip unknown ids as they may be valid for other model versions
- validationId.ifPresent(id -> overrides.add(new Allow(id, until)));
+ try {
+ Instant until = LocalDate.parse(allow.getAttribute("until"), DateTimeFormatter.ISO_DATE)
+ .atStartOfDay().atZone(ZoneOffset.UTC).toInstant()
+ .plus(Duration.ofDays(1)); // Make the override valid *on* the "until" date
+ Optional<ValidationId> validationId = ValidationId.from(XML.getValue(allow));
+ // skip unknown ids as they may be valid for other model versions
+ validationId.ifPresent(id -> overrides.add(new Allow(id, until)));
+ } catch (RuntimeException e) {
+ throw new IllegalArgumentException(e);
+ }
}
return new ValidationOverrides(overrides, xmlForm);
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java b/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java
index 4bfecabaf69..6e8b9836188 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java
@@ -166,7 +166,7 @@ public class DeploymentSpecXmlReader {
stringAttribute(athenzDomainAttribute, root).map(AthenzDomain::from),
stringAttribute(athenzServiceAttribute, root).map(AthenzService::from),
readCloudAccounts(root),
- stringAttribute(hostTTLAttribute, root).map(s -> toDuration(s, "empty host TTL")),
+ readHostTTL(root),
applicationEndpoints,
xmlForm,
deprecatedElements);
@@ -206,7 +206,7 @@ public class DeploymentSpecXmlReader {
List<DeploymentSpec.ChangeBlocker> changeBlockers = readChangeBlockers(instanceElement, parentTag);
Optional<AthenzService> athenzService = mostSpecificAttribute(instanceElement, athenzServiceAttribute).map(AthenzService::from);
Map<CloudName, CloudAccount> cloudAccounts = readCloudAccounts(instanceElement);
- Optional<Duration> hostTTL = mostSpecificAttribute(instanceElement, hostTTLAttribute).map(s -> toDuration(s, "empty host TTL"));
+ Optional<Duration> hostTTL = readHostTTL(instanceElement);
Notifications notifications = readNotifications(instanceElement, parentTag);
// Values where there is no default
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
index 1ab3cc30db7..bdad0dc4cee 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ModelContext.java
@@ -108,10 +108,10 @@ public interface ModelContext {
@ModelFeatureFlag(owners = {"hmusum"}) default Architecture adminClusterArchitecture() { return Architecture.getDefault(); }
@ModelFeatureFlag(owners = {"tokle"}) default boolean enableProxyProtocolMixedMode() { return true; }
@ModelFeatureFlag(owners = {"arnej"}) default String logFileCompressionAlgorithm(String defVal) { return defVal; }
- @ModelFeatureFlag(owners = {"tokle"}) default boolean useRestrictedDataPlaneBindings() { return false; }
+ @ModelFeatureFlag(owners = {"tokle"}, removeAfter = "8.210") default boolean useRestrictedDataPlaneBindings() { return true; }
@ModelFeatureFlag(owners = {"arnej, bjorncs"}) default boolean enableGlobalPhase() { return true; }
@ModelFeatureFlag(owners = {"baldersheim"}, comment = "Select summary decode type") default String summaryDecodePolicy() { return "eager"; }
- @ModelFeatureFlag(owners = {"hmusum"}) default boolean allowMoreThanOneContentGroupDown(ClusterSpec.Id id) { return false; }
+ @ModelFeatureFlag(owners = {"hmusum"}, removeAfter = "8.219") default boolean allowMoreThanOneContentGroupDown(ClusterSpec.Id id) { return true; }
@ModelFeatureFlag(owners = {"vekterli", "havardpe"}, removeAfter = "8.207") default boolean enableConditionalPutRemoveWriteRepair() { return true; }
@ModelFeatureFlag(owners = {"mortent", "olaa"}) default boolean enableDataplaneProxy() { return false; }
@ModelFeatureFlag(owners = {"baldersheim"}) default boolean enableNestedMultivalueGrouping() { return false; }
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java b/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java
index 7ef92bba7e9..20940989618 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.model.api;
+import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeUtils;
@@ -50,10 +51,13 @@ public class Quota {
public Slime toSlime() {
var slime = new Slime();
- var root = slime.setObject();
+ toSlime(slime.setObject());
+ return slime;
+ }
+
+ public void toSlime(Cursor root) {
maxClusterSize.ifPresent(clusterSize -> root.setLong("clusterSize", clusterSize));
budget.ifPresent(b -> root.setString("budget", b.toPlainString()));
- return slime;
}
public static Quota unlimited() { return UNLIMITED; }
diff --git a/config-model-api/src/test/java/com/yahoo/config/application/api/ValidationOverrideTest.java b/config-model-api/src/test/java/com/yahoo/config/application/api/ValidationOverrideTest.java
index bf71947a983..57f306bf989 100644
--- a/config-model-api/src/test/java/com/yahoo/config/application/api/ValidationOverrideTest.java
+++ b/config-model-api/src/test/java/com/yahoo/config/application/api/ValidationOverrideTest.java
@@ -69,6 +69,26 @@ public class ValidationOverrideTest {
e.getMessage());
}
}
+
+ @Test
+ public void testInvalidDate() {
+ String validationOverrides =
+ "<validation-overrides>" +
+ " <allow until='2000-02-31'>indexing-change</allow>" +
+ "</validation-overrides>";
+
+ try {
+ ValidationOverrides overrides = ValidationOverrides.fromXml(new StringReader(validationOverrides));
+ Instant now = ManualClock.at("2000-01-01T23:59:00");
+ overrides.allows("indexing-change", now);
+ overrides.validate(now);
+ Assert.fail("Expected validation interval override validation validation failure");
+ }
+ catch (IllegalArgumentException e) {
+ Assert.assertEquals("java.time.format.DateTimeParseException: Text '2000-02-31' could not be parsed: Invalid date 'FEBRUARY 31'",
+ e.getMessage());
+ }
+ }
@Test
public void testEmpty() {
diff --git a/config-model-fat/pom.xml b/config-model-fat/pom.xml
index 34034f5e53b..adf511faabe 100644
--- a/config-model-fat/pom.xml
+++ b/config-model-fat/pom.xml
@@ -204,6 +204,9 @@
<i>com.thaiopensource:jing:*:*</i>
<i>io.prometheus:simpleclient:*:*</i>
<i>io.prometheus:simpleclient_common:*:*</i>
+ <i>io.prometheus:simpleclient_tracer_common:*:*</i>
+ <i>io.prometheus:simpleclient_tracer_otel:*:*</i>
+ <i>io.prometheus:simpleclient_tracer_otel_agent:*:*</i>
<i>javax.inject:javax.inject:*:*</i>
<i>net.openhft:zero-allocation-hashing:*:*</i>
<i>org.antlr:antlr-runtime:*:*</i>
diff --git a/config-model/pom.xml b/config-model/pom.xml
index bfaa82a186f..58df48a5763 100644
--- a/config-model/pom.xml
+++ b/config-model/pom.xml
@@ -32,7 +32,7 @@
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-all</artifactId>
+ <artifactId>hamcrest</artifactId>
<scope>test</scope>
</dependency>
<dependency>
@@ -52,11 +52,6 @@
<scope>test</scope>
</dependency>
<dependency>
- <groupId>org.assertj</groupId>
- <artifactId>assertj-core</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
<version>${protobuf.vespa.version}</version>
@@ -332,7 +327,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
- <version>3.0.2</version>
+ <version>${maven-jar-plugin.vespa.version}</version>
<executions>
<!-- for testing -->
<execution>
diff --git a/config-model/src/main/java/com/yahoo/config/model/ConfigModelContext.java b/config-model/src/main/java/com/yahoo/config/model/ConfigModelContext.java
index b4b3dccd440..ad50ad02171 100644
--- a/config-model/src/main/java/com/yahoo/config/model/ConfigModelContext.java
+++ b/config-model/src/main/java/com/yahoo/config/model/ConfigModelContext.java
@@ -9,6 +9,7 @@ import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.producer.AnyConfigProducer;
import com.yahoo.config.model.producer.TreeConfigProducer;
+import com.yahoo.config.provision.CloudAccount;
import com.yahoo.config.provision.ClusterInfo;
import com.yahoo.config.provision.ClusterInfo.Builder;
import com.yahoo.config.provision.zone.ZoneId;
@@ -80,12 +81,9 @@ public final class ConfigModelContext {
DeploymentSpec spec = getApplicationPackage().getDeploymentSpec();
ClusterInfo.Builder builder = new ClusterInfo.Builder();
spec.hostTTL(properties().applicationId().instance(), deployState.zone().environment(), deployState.zone().region())
- .ifPresent(ttl -> {
- ZoneId zoneId = ZoneId.from(deployState.zone().environment(), deployState.zone().region());
- if (spec.cloudAccount(deployState.zone().cloud().name(), properties().applicationId().instance(), zoneId).isUnspecified())
- throw new IllegalArgumentException("deployment spec specifies host TTL for " + zoneId +
- " but no cloud account is specified for this zone");
- });
+ .filter(ttl -> ! ttl.isZero())
+ .filter(__ -> deployState.getProperties().cloudAccount().map(account -> ! account.isUnspecified()).orElse(false))
+ .ifPresent(builder::hostTTL);
spec.instance(properties().applicationId().instance())
.flatMap(instance -> instance.bcp().groups().stream()
.filter(group -> group.memberRegions().contains(properties().zone().region()))
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
index b06d3572fcb..31e066cdd8c 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/TestProperties.java
@@ -82,10 +82,8 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
private int mbus_network_threads = 1;
private int heapSizePercentage = ApplicationContainerCluster.defaultHeapSizePercentageOfAvailableMemory;
private Architecture adminClusterNodeResourcesArchitecture = Architecture.getDefault();
- private boolean useRestrictedDataPlaneBindings = false;
private Optional<CloudAccount> cloudAccount = Optional.empty();
private boolean allowUserFilters = true;
- private boolean allowMoreThanOneContentGroupDown = false;
private List<DataplaneToken> dataplaneTokens;
private boolean enableDataplaneProxy;
@@ -141,11 +139,9 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
@Override public int rpcEventsBeforeWakeup() { return rpc_events_before_wakeup; }
@Override public String queryDispatchPolicy() { return queryDispatchPolicy; }
@Override public String summaryDecodePolicy() { return summaryDecodePolicy; }
- @Override public boolean useRestrictedDataPlaneBindings() { return useRestrictedDataPlaneBindings; }
@Override public Optional<CloudAccount> cloudAccount() { return cloudAccount; }
@Override public boolean allowUserFilters() { return allowUserFilters; }
@Override public boolean enableGlobalPhase() { return true; } // Enable global-phase by default for unit tests only
- @Override public boolean allowMoreThanOneContentGroupDown(ClusterSpec.Id id) { return allowMoreThanOneContentGroupDown; }
@Override public List<DataplaneToken> dataplaneTokens() { return dataplaneTokens; }
@Override public boolean enableDataplaneProxy() { return enableDataplaneProxy; }
@@ -366,21 +362,11 @@ public class TestProperties implements ModelContext.Properties, ModelContext.Fea
return this;
}
- public TestProperties setUseRestrictedDataPlaneBindings(boolean useRestrictedDataPlaneBindings) {
- this.useRestrictedDataPlaneBindings = useRestrictedDataPlaneBindings;
- return this;
- }
-
public TestProperties setCloudAccount(CloudAccount cloudAccount) {
this.cloudAccount = Optional.ofNullable(cloudAccount);
return this;
}
- public TestProperties setAllowMoreThanOneContentGroupDown(boolean allowMoreThanOneContentGroupDown) {
- this.allowMoreThanOneContentGroupDown = allowMoreThanOneContentGroupDown;
- return this;
- }
-
public TestProperties setAllowUserFilters(boolean b) { this.allowUserFilters = b; return this; }
public TestProperties setDataplaneTokens(Collection<DataplaneToken> tokens) {
diff --git a/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java b/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java
index 3b715c63105..dbcd1cea2fa 100644
--- a/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java
+++ b/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java
@@ -63,6 +63,7 @@ public class MockApplicationPackage implements ApplicationPackage {
private final boolean failOnValidateXml;
private final QueryProfileRegistry queryProfileRegistry;
private final ApplicationMetaData applicationMetaData;
+ private final TenantName tenantName;
private DeploymentSpec deploymentSpec = null;
@@ -70,7 +71,7 @@ public class MockApplicationPackage implements ApplicationPackage {
Map<Path, MockApplicationFile> files,
String schemaDir,
String deploymentSpec, String validationOverrides, boolean failOnValidateXml,
- String queryProfile, String queryProfileType) {
+ String queryProfile, String queryProfileType, TenantName tenantName) {
this.root = root;
this.hostsS = hosts;
this.servicesS = services;
@@ -85,19 +86,20 @@ public class MockApplicationPackage implements ApplicationPackage {
applicationMetaData = new ApplicationMetaData("dir",
0L,
false,
- ApplicationId.from(TenantName.defaultName(),
+ ApplicationId.from(tenantName,
ApplicationName.from(APPLICATION_NAME),
InstanceName.defaultName()),
"checksum",
APPLICATION_GENERATION,
0L);
+ this.tenantName = tenantName;
}
/** Returns the root of this application package relative to the current dir */
protected File root() { return root; }
@Override
- public ApplicationId getApplicationId() { return ApplicationId.from("default", "mock-application", "default"); }
+ public ApplicationId getApplicationId() { return ApplicationId.from(tenantName.value(), "mock-application", "default"); }
@Override
public Reader getServices() {
@@ -246,6 +248,7 @@ public class MockApplicationPackage implements ApplicationPackage {
private boolean failOnValidateXml = false;
private String queryProfile = null;
private String queryProfileType = null;
+ private TenantName tenantName = TenantName.defaultName();
public Builder() {
}
@@ -323,10 +326,15 @@ public class MockApplicationPackage implements ApplicationPackage {
return this;
}
+ public Builder withTenantname(TenantName tenantName) {
+ this.tenantName = tenantName;
+ return this;
+ }
+
public ApplicationPackage build() {
return new MockApplicationPackage(root, hosts, services, schemas, files, schemaDir,
deploymentSpec, validationOverrides, failOnValidateXml,
- queryProfile, queryProfileType);
+ queryProfile, queryProfileType, tenantName);
}
}
diff --git a/config-model/src/main/java/com/yahoo/schema/RankProfile.java b/config-model/src/main/java/com/yahoo/schema/RankProfile.java
index 69f32daef4a..35ef12f077a 100644
--- a/config-model/src/main/java/com/yahoo/schema/RankProfile.java
+++ b/config-model/src/main/java/com/yahoo/schema/RankProfile.java
@@ -100,6 +100,7 @@ public class RankProfile implements Cloneable {
private Double termwiseLimit = null;
private Double postFilterThreshold = null;
private Double approximateThreshold = null;
+ private Double targetHitsMaxAdjustmentFactor = null;
/** The drop limit used to drop hits with rank score less than or equal to this value */
private double rankScoreDropLimit = -Double.MAX_VALUE;
@@ -768,6 +769,7 @@ public class RankProfile implements Cloneable {
public void setTermwiseLimit(double termwiseLimit) { this.termwiseLimit = termwiseLimit; }
public void setPostFilterThreshold(double threshold) { this.postFilterThreshold = threshold; }
public void setApproximateThreshold(double threshold) { this.approximateThreshold = threshold; }
+ public void setTargetHitsMaxAdjustmentFactor(double factor) { this.targetHitsMaxAdjustmentFactor = factor; }
public OptionalDouble getTermwiseLimit() {
if (termwiseLimit != null) return OptionalDouble.of(termwiseLimit);
@@ -789,6 +791,13 @@ public class RankProfile implements Cloneable {
return uniquelyInherited(p -> p.getApproximateThreshold(), l -> l.isPresent(), "approximate-threshold").orElse(OptionalDouble.empty());
}
+ public OptionalDouble getTargetHitsMaxAdjustmentFactor() {
+ if (targetHitsMaxAdjustmentFactor != null) {
+ return OptionalDouble.of(targetHitsMaxAdjustmentFactor);
+ }
+ return uniquelyInherited(p -> p.getTargetHitsMaxAdjustmentFactor(), l -> l.isPresent(), "target-hits-max-adjustment-factor").orElse(OptionalDouble.empty());
+ }
+
/** Whether we should ignore the default rank features. Set to null to use inherited */
public void setIgnoreDefaultRankFeatures(Boolean ignoreDefaultRankFeatures) {
this.ignoreDefaultRankFeatures = ignoreDefaultRankFeatures;
diff --git a/config-model/src/main/java/com/yahoo/schema/Schema.java b/config-model/src/main/java/com/yahoo/schema/Schema.java
index 93bec4975a6..36730a502ea 100644
--- a/config-model/src/main/java/com/yahoo/schema/Schema.java
+++ b/config-model/src/main/java/com/yahoo/schema/Schema.java
@@ -319,16 +319,12 @@ public class Schema implements ImmutableSchema {
return null;
}
- /**
- * @return true if the document has been added.
- */
+ /** Returns true if the document has been added. */
public boolean hasDocument() {
return documentType != null;
}
- /**
- * @return The document in this search.
- */
+ /** Returns the document in this search. */
@Override
public SDDocumentType getDocument() {
return documentType;
@@ -384,7 +380,7 @@ public class Schema implements ImmutableSchema {
}
/**
- * Returns a field defined in one of the documents of this search definition.
+ * Returns a field defined in one of the documents of this schema.
* This does not include the extra fields defined outside the document
* (those accessible through the getExtraField() method).
*
diff --git a/config-model/src/main/java/com/yahoo/schema/derived/AttributeFields.java b/config-model/src/main/java/com/yahoo/schema/derived/AttributeFields.java
index 12ca67bf2c9..c3531d03d3f 100644
--- a/config-model/src/main/java/com/yahoo/schema/derived/AttributeFields.java
+++ b/config-model/src/main/java/com/yahoo/schema/derived/AttributeFields.java
@@ -51,9 +51,9 @@ public class AttributeFields extends Derived implements AttributesConfig.Produce
if (unsupportedFieldType(field)) {
return; // Ignore complex struct and map fields for indexed search (only supported for streaming search)
}
- if (isArrayOfSimpleStruct(field, false)) {
+ if (isArrayOfSimpleStruct(field)) {
deriveArrayOfSimpleStruct(field);
- } else if (isMapOfSimpleStruct(field, false)) {
+ } else if (isMapOfSimpleStruct(field)) {
deriveMapOfSimpleStruct(field);
} else if (isMapOfPrimitiveType(field)) {
deriveMapOfPrimitiveType(field);
diff --git a/config-model/src/main/java/com/yahoo/schema/derived/ImportedFields.java b/config-model/src/main/java/com/yahoo/schema/derived/ImportedFields.java
index 122048d02b9..fa3f49f06d5 100644
--- a/config-model/src/main/java/com/yahoo/schema/derived/ImportedFields.java
+++ b/config-model/src/main/java/com/yahoo/schema/derived/ImportedFields.java
@@ -61,9 +61,9 @@ public class ImportedFields extends Derived implements ImportedFieldsConfig.Prod
ImmutableSDField targetField = field.targetField();
if (GeoPos.isAnyPos(targetField)) {
// no action needed
- } else if (isArrayOfSimpleStruct(targetField, false)) {
+ } else if (isArrayOfSimpleStruct(targetField)) {
considerNestedFields(builder, field);
- } else if (isMapOfSimpleStruct(targetField, false)) {
+ } else if (isMapOfSimpleStruct(targetField)) {
considerSimpleField(builder, field.getNestedField("key"));
considerNestedFields(builder, field.getNestedField("value"));
} else if (isMapOfPrimitiveType(targetField)) {
diff --git a/config-model/src/main/java/com/yahoo/schema/derived/RawRankProfile.java b/config-model/src/main/java/com/yahoo/schema/derived/RawRankProfile.java
index 82c0c9d516a..29bd454cc62 100644
--- a/config-model/src/main/java/com/yahoo/schema/derived/RawRankProfile.java
+++ b/config-model/src/main/java/com/yahoo/schema/derived/RawRankProfile.java
@@ -153,6 +153,7 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
private final double termwiseLimit;
private final OptionalDouble postFilterThreshold;
private final OptionalDouble approximateThreshold;
+ private final OptionalDouble targetHitsMaxAdjustmentFactor;
private final double rankScoreDropLimit;
private final boolean enableNestedMultivalueGrouping;
@@ -197,6 +198,7 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
enableNestedMultivalueGrouping = deployProperties.featureFlags().enableNestedMultivalueGrouping();
postFilterThreshold = compiled.getPostFilterThreshold();
approximateThreshold = compiled.getApproximateThreshold();
+ targetHitsMaxAdjustmentFactor = compiled.getTargetHitsMaxAdjustmentFactor();
keepRankCount = compiled.getKeepRankCount();
rankScoreDropLimit = compiled.getRankScoreDropLimit();
ignoreDefaultRankFeatures = compiled.getIgnoreDefaultRankFeatures();
@@ -429,6 +431,9 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
if (approximateThreshold.isPresent()) {
properties.add(new Pair<>("vespa.matching.global_filter.lower_limit", String.valueOf(approximateThreshold.getAsDouble())));
}
+ if (targetHitsMaxAdjustmentFactor.isPresent()) {
+ properties.add(new Pair<>("vespa.matching.nns.target_hits_max_adjustment_factor", String.valueOf(targetHitsMaxAdjustmentFactor.getAsDouble())));
+ }
if (matchPhaseSettings != null) {
properties.add(new Pair<>("vespa.matchphase.degradation.attribute", matchPhaseSettings.getAttribute()));
properties.add(new Pair<>("vespa.matchphase.degradation.ascendingorder", matchPhaseSettings.getAscending() + ""));
diff --git a/config-model/src/main/java/com/yahoo/schema/document/ComplexAttributeFieldUtils.java b/config-model/src/main/java/com/yahoo/schema/document/ComplexAttributeFieldUtils.java
index 5e4ee6d4b27..ebafd8f1d24 100644
--- a/config-model/src/main/java/com/yahoo/schema/document/ComplexAttributeFieldUtils.java
+++ b/config-model/src/main/java/com/yahoo/schema/document/ComplexAttributeFieldUtils.java
@@ -22,31 +22,26 @@ import com.yahoo.document.StructDataType;
public class ComplexAttributeFieldUtils {
public static boolean isSupportedComplexField(ImmutableSDField field) {
- return isSupportedComplexField(field, false);
- }
-
- // TODO: Remove the stricterValidation flag when this is changed to being always on.
- public static boolean isSupportedComplexField(ImmutableSDField field, boolean stricterValidation) {
- return (isArrayOfSimpleStruct(field, stricterValidation) ||
- isMapOfSimpleStruct(field, stricterValidation) ||
+ return (isArrayOfSimpleStruct(field) ||
+ isMapOfSimpleStruct(field) ||
isMapOfPrimitiveType(field));
}
- public static boolean isArrayOfSimpleStruct(ImmutableSDField field, boolean stricterValidation) {
+ public static boolean isArrayOfSimpleStruct(ImmutableSDField field) {
if (field.getDataType() instanceof ArrayDataType) {
ArrayDataType arrayType = (ArrayDataType)field.getDataType();
- return isStructWithPrimitiveStructFieldAttributes(arrayType.getNestedType(), field, stricterValidation);
+ return isStructWithPrimitiveStructFieldAttributes(arrayType.getNestedType(), field);
} else {
return false;
}
}
- public static boolean isMapOfSimpleStruct(ImmutableSDField field, boolean stricterValidation) {
+ public static boolean isMapOfSimpleStruct(ImmutableSDField field) {
if (field.getDataType() instanceof MapDataType) {
MapDataType mapType = (MapDataType)field.getDataType();
return isPrimitiveType(mapType.getKeyType()) &&
isStructWithPrimitiveStructFieldAttributes(mapType.getValueType(),
- field.getStructField("value"), stricterValidation);
+ field.getStructField("value"));
} else {
return false;
}
@@ -62,7 +57,7 @@ public class ComplexAttributeFieldUtils {
}
}
- private static boolean isStructWithPrimitiveStructFieldAttributes(DataType type, ImmutableSDField field, boolean stricterValidation) {
+ private static boolean isStructWithPrimitiveStructFieldAttributes(DataType type, ImmutableSDField field) {
if (type instanceof StructDataType && ! GeoPos.isPos(type)) {
for (ImmutableSDField structField : field.getStructFields()) {
Attribute attribute = structField.getAttributes().get(structField.getName());
@@ -75,7 +70,7 @@ public class ComplexAttributeFieldUtils {
return false;
}
}
- if (stricterValidation && !structField.isImportedField() && hasStructFieldAttributes(structField)) {
+ if (!structField.isImportedField() && hasStructFieldAttributes(structField)) {
return false;
}
}
@@ -113,9 +108,9 @@ public class ComplexAttributeFieldUtils {
}
public static boolean isComplexFieldWithOnlyStructFieldAttributes(ImmutableSDField field) {
- if (isArrayOfSimpleStruct(field, false)) {
+ if (isArrayOfSimpleStruct(field)) {
return hasOnlyStructFieldAttributes(field);
- } else if (isMapOfSimpleStruct(field, false)) {
+ } else if (isMapOfSimpleStruct(field)) {
return (field.getStructField("key").hasSingleAttribute()) &&
hasOnlyStructFieldAttributes(field.getStructField("value"));
} else if (isMapOfPrimitiveType(field)) {
diff --git a/config-model/src/main/java/com/yahoo/schema/document/ImmutableSDField.java b/config-model/src/main/java/com/yahoo/schema/document/ImmutableSDField.java
index 2d826e164b7..4c7e7eb28f4 100644
--- a/config-model/src/main/java/com/yahoo/schema/document/ImmutableSDField.java
+++ b/config-model/src/main/java/com/yahoo/schema/document/ImmutableSDField.java
@@ -101,4 +101,5 @@ public interface ImmutableSDField {
boolean existsIndex(String name);
SummaryField getSummaryField(String name);
boolean hasIndex();
+
}
diff --git a/config-model/src/main/java/com/yahoo/schema/parser/ConvertParsedRanking.java b/config-model/src/main/java/com/yahoo/schema/parser/ConvertParsedRanking.java
index bdecf6332a0..c25d393c8bf 100644
--- a/config-model/src/main/java/com/yahoo/schema/parser/ConvertParsedRanking.java
+++ b/config-model/src/main/java/com/yahoo/schema/parser/ConvertParsedRanking.java
@@ -65,6 +65,8 @@ public class ConvertParsedRanking {
(value -> profile.setPostFilterThreshold(value));
parsed.getApproximateThreshold().ifPresent
(value -> profile.setApproximateThreshold(value));
+ parsed.getTargetHitsMaxAdjustmentFactor().ifPresent
+ (value -> profile.setTargetHitsMaxAdjustmentFactor(value));
parsed.getKeepRankCount().ifPresent
(value -> profile.setKeepRankCount(value));
parsed.getMinHitsPerThread().ifPresent
diff --git a/config-model/src/main/java/com/yahoo/schema/parser/IntermediateCollection.java b/config-model/src/main/java/com/yahoo/schema/parser/IntermediateCollection.java
index f0182cfcf3a..d893919f640 100644
--- a/config-model/src/main/java/com/yahoo/schema/parser/IntermediateCollection.java
+++ b/config-model/src/main/java/com/yahoo/schema/parser/IntermediateCollection.java
@@ -56,7 +56,7 @@ public class IntermediateCollection {
parsedSchemas.put(schema.name(), schema);
return schema;
} catch (TokenMgrException e) {
- throw new ParseException("Unknown symbol: " + e.getMessage());
+ throw new ParseException(stream.formatException("Unknown symbol: " + Exceptions.toMessageString(e)));
} catch (ParseException pe) {
throw new ParseException(stream.formatException(Exceptions.toMessageString(pe)));
}
diff --git a/config-model/src/main/java/com/yahoo/schema/parser/ParsedRankProfile.java b/config-model/src/main/java/com/yahoo/schema/parser/ParsedRankProfile.java
index 2809ee0c633..1d06b993cdc 100644
--- a/config-model/src/main/java/com/yahoo/schema/parser/ParsedRankProfile.java
+++ b/config-model/src/main/java/com/yahoo/schema/parser/ParsedRankProfile.java
@@ -29,6 +29,7 @@ class ParsedRankProfile extends ParsedBlock {
private Double termwiseLimit = null;
private Double postFilterThreshold = null;
private Double approximateThreshold = null;
+ private Double targetHitsMaxAdjustmentFactor = null;
private final List<FeatureList> matchFeatures = new ArrayList<>();
private final List<FeatureList> rankFeatures = new ArrayList<>();
private final List<FeatureList> summaryFeatures = new ArrayList<>();
@@ -65,6 +66,7 @@ class ParsedRankProfile extends ParsedBlock {
Optional<Double> getTermwiseLimit() { return Optional.ofNullable(this.termwiseLimit); }
Optional<Double> getPostFilterThreshold() { return Optional.ofNullable(this.postFilterThreshold); }
Optional<Double> getApproximateThreshold() { return Optional.ofNullable(this.approximateThreshold); }
+ Optional<Double> getTargetHitsMaxAdjustmentFactor() { return Optional.ofNullable(this.targetHitsMaxAdjustmentFactor); }
List<FeatureList> getMatchFeatures() { return List.copyOf(this.matchFeatures); }
List<FeatureList> getRankFeatures() { return List.copyOf(this.rankFeatures); }
List<FeatureList> getSummaryFeatures() { return List.copyOf(this.summaryFeatures); }
@@ -231,4 +233,9 @@ class ParsedRankProfile extends ParsedBlock {
this.approximateThreshold = threshold;
}
+ void setTargetHitsMaxAdjustmentFactor(double factor) {
+ verifyThat(targetHitsMaxAdjustmentFactor == null, "already has target-hits-max-adjustment-factor");
+ this.targetHitsMaxAdjustmentFactor = factor;
+ }
+
}
diff --git a/config-model/src/main/java/com/yahoo/schema/processing/ImportedFieldsResolver.java b/config-model/src/main/java/com/yahoo/schema/processing/ImportedFieldsResolver.java
index 8e44bd026a3..ee465be44f2 100644
--- a/config-model/src/main/java/com/yahoo/schema/processing/ImportedFieldsResolver.java
+++ b/config-model/src/main/java/com/yahoo/schema/processing/ImportedFieldsResolver.java
@@ -52,9 +52,9 @@ public class ImportedFieldsResolver extends Processor {
ImmutableSDField targetField = getTargetField(importedField, reference);
if (GeoPos.isAnyPos(targetField)) {
resolveImportedPositionField(importedField, reference, targetField, validate);
- } else if (isArrayOfSimpleStruct(targetField, false)) {
+ } else if (isArrayOfSimpleStruct(targetField)) {
resolveImportedArrayOfStructField(importedField, reference, targetField, validate);
- } else if (isMapOfSimpleStruct(targetField, false)) {
+ } else if (isMapOfSimpleStruct(targetField)) {
resolveImportedMapOfStructField(importedField, reference, targetField, validate);
} else if (isMapOfPrimitiveType(targetField)) {
resolveImportedMapOfPrimitiveField(importedField, reference, targetField, validate);
diff --git a/config-model/src/main/java/com/yahoo/schema/processing/IndexingInputs.java b/config-model/src/main/java/com/yahoo/schema/processing/IndexingInputs.java
index 88e84d5289f..985ec8653c7 100644
--- a/config-model/src/main/java/com/yahoo/schema/processing/IndexingInputs.java
+++ b/config-model/src/main/java/com/yahoo/schema/processing/IndexingInputs.java
@@ -96,11 +96,11 @@ public class IndexingInputs extends Processor {
@Override
protected void doVisit(Expression exp) {
if ( ! (exp instanceof InputExpression)) return;
- String inputField = ((InputExpression)exp).getFieldName();
- if (schema.getField(inputField).hasFullIndexingDocprocRights()) return;
-
- fail(schema, field, "Indexing script refers to field '" + inputField + "' which does not exist " +
- "in document type '" + schema.getDocument().getName() + "', and is not a mutable attribute.");
+ var referencedFieldName = ((InputExpression)exp).getFieldName();
+ var referencedField = schema.getField(referencedFieldName);
+ if (referencedField == null || ! referencedField.hasFullIndexingDocprocRights())
+ fail(schema, field, "Indexing script refers to field '" + referencedFieldName +
+ "' which is neither a field in " + schema.getDocument() + " nor a mutable attribute");
}
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/Logserver.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/Logserver.java
index 7d7d0007b5e..2a0839e209d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/Logserver.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/Logserver.java
@@ -15,7 +15,6 @@ import java.util.Optional;
*/
public class Logserver extends AbstractService {
- private static final long serialVersionUID = 1L;
private static final String logArchiveDir = "$ROOT/logs/vespa/logarchive";
private String compressionType = "gzip";
@@ -32,7 +31,10 @@ public class Logserver extends AbstractService {
@Override
public void initService(DeployState deployState) {
super.initService(deployState);
- this.compressionType = deployState.featureFlags().logFileCompressionAlgorithm("gzip");
+ // TODO Vespa 9: Change default to zstd everywhere
+ this.compressionType = deployState.isHosted()
+ ? deployState.featureFlags().logFileCompressionAlgorithm("zstd")
+ : deployState.featureFlags().logFileCompressionAlgorithm("gzip");
}
/**
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java
index df6fee1deb7..4dce1b33b1d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsProxyContainerCluster.java
@@ -45,7 +45,9 @@ import java.util.logging.Logger;
import java.util.stream.Collectors;
import java.util.stream.Stream;
+import static ai.vespa.metrics.set.DefaultMetrics.defaultMetricSet;
import static ai.vespa.metrics.set.MetricSet.empty;
+import static ai.vespa.metrics.set.SystemMetrics.systemMetricSet;
import static com.yahoo.vespa.model.admin.metricsproxy.ConsumersConfigGenerator.addMetrics;
import static com.yahoo.vespa.model.admin.metricsproxy.ConsumersConfigGenerator.generateConsumers;
import static com.yahoo.vespa.model.admin.metricsproxy.ConsumersConfigGenerator.toConsumerBuilder;
@@ -68,7 +70,10 @@ public class MetricsProxyContainerCluster extends ContainerCluster<MetricsProxyC
{
public static final Logger log = Logger.getLogger(MetricsProxyContainerCluster.class.getName());
+ public static final String NEW_DEFAULT_CONSUMER_ID = "new-default";
+
private static final String METRICS_PROXY_NAME = "metrics-proxy";
+
static final Path METRICS_PROXY_BUNDLE_FILE = PlatformBundles.absoluteBundlePath(METRICS_PROXY_NAME);
static final String METRICS_PROXY_BUNDLE_NAME = "com.yahoo.vespa." + METRICS_PROXY_NAME;
@@ -153,6 +158,15 @@ public class MetricsProxyContainerCluster extends ContainerCluster<MetricsProxyC
builder.consumer.addAll(generateConsumers(amendedVespaConsumer, getUserMetricsConsumers(), getZone().system()));
builder.consumer.add(toConsumerBuilder(MetricsConsumer.defaultConsumer));
+ builder.consumer.add(toConsumerBuilder(newDefaultConsumer()));
+ }
+
+ public MetricsConsumer newDefaultConsumer() {
+ if (isHostedVespa()) {
+ // TODO: use different metric set for hosted vespa.
+ return MetricsConsumer.consumer(NEW_DEFAULT_CONSUMER_ID, defaultMetricSet, systemMetricSet);
+ }
+ return MetricsConsumer.consumer(NEW_DEFAULT_CONSUMER_ID, defaultMetricSet, systemMetricSet);
}
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/MetricsConsumer.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/MetricsConsumer.java
index e18156876cd..cfe3c01e03a 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/MetricsConsumer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/MetricsConsumer.java
@@ -30,11 +30,14 @@ public class MetricsConsumer {
// See also ConsumersConfigGenerator and MetricsBuilder where these must be enumerated
public static final MetricsConsumer vespa =
consumer(VespaMetrics.vespaMetricsConsumerId.id, vespaMetricSet, systemMetricSet, networkMetricSet);
+
public static final MetricsConsumer defaultConsumer =
consumer(ValuesFetcher.defaultMetricsConsumerId.id, defaultMetricSet, systemMetricSet);
+
// Referenced from com.yahoo.vespa.hosted.provision.autoscale.NodeMetricsFetcher
public static final MetricsConsumer autoscaling =
consumer("autoscaling", autoscalingMetricSet);
+
public static final MetricsConsumer vespaCloud =
consumer("vespa-cloud", vespaMetricSet, systemMetricSet, networkMetricSet);
@@ -63,7 +66,7 @@ public class MetricsConsumer {
return metricSet.getMetrics();
}
- private static MetricsConsumer consumer(String id, MetricSet ... metricSets) {
+ public static MetricsConsumer consumer(String id, MetricSet ... metricSets) {
return new MetricsConsumer(id, new MetricSet(id + "-consumer-metrics", List.of(), Arrays.asList(metricSets)));
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/PredefinedMetricSets.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/PredefinedMetricSets.java
index 3af2b4f8732..d100006743b 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/PredefinedMetricSets.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/builder/PredefinedMetricSets.java
@@ -9,11 +9,13 @@ import java.util.Map;
import static ai.vespa.metrics.set.AutoscalingMetrics.autoscalingMetricSet;
import static ai.vespa.metrics.set.DefaultMetrics.defaultMetricSet;
+import static ai.vespa.metrics.set.Vespa9DefaultMetricSet.vespa9defaultMetricSet;
import static ai.vespa.metrics.set.DefaultVespaMetrics.defaultVespaMetricSet;
import static ai.vespa.metrics.set.InfrastructureMetricSet.infrastructureMetricSet;
import static ai.vespa.metrics.set.NetworkMetrics.networkMetricSet;
import static ai.vespa.metrics.set.SystemMetrics.systemMetricSet;
import static ai.vespa.metrics.set.VespaMetricSet.vespaMetricSet;
+import static ai.vespa.metrics.set.Vespa9VespaMetricSet.vespa9vespaMetricSet;
/**
* A data object for predefined metric sets.
@@ -24,8 +26,10 @@ public class PredefinedMetricSets {
private static final Map<String, MetricSet> sets = toMapById(
defaultMetricSet,
+ vespa9defaultMetricSet,
defaultVespaMetricSet,
vespaMetricSet,
+ vespa9vespaMetricSet,
systemMetricSet,
networkMetricSet,
autoscalingMetricSet,
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ComplexFieldsWithStructFieldAttributesValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ComplexFieldsWithStructFieldAttributesValidator.java
index d2999a24775..1d67b0c023f 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ComplexFieldsWithStructFieldAttributesValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/ComplexFieldsWithStructFieldAttributesValidator.java
@@ -39,19 +39,15 @@ public class ComplexFieldsWithStructFieldAttributesValidator extends Validator {
}
private static void validateComplexFields(String clusterName, Schema schema, DeployLogger logger) {
- String unsupportedFields = validateComplexFields(clusterName, schema, false);
+ String unsupportedFields = validateComplexFields(schema);
if (!unsupportedFields.isEmpty()) {
throw new IllegalArgumentException(getErrorMessage(clusterName, schema, unsupportedFields));
}
- unsupportedFields = validateComplexFields(clusterName, schema, true);
- if (!unsupportedFields.isEmpty()) {
- logger.logApplicationPackage(Level.WARNING, getErrorMessage(clusterName, schema, unsupportedFields));
- }
}
- private static String validateComplexFields(String clusterName, Schema schema, boolean stricterValidation) {
+ private static String validateComplexFields(Schema schema) {
return schema.allFields()
- .filter(field -> isUnsupportedComplexField(field, stricterValidation))
+ .filter(field -> isUnsupportedComplexField(field))
.map(ComplexFieldsWithStructFieldAttributesValidator::toString)
.collect(Collectors.joining(", "));
}
@@ -63,14 +59,14 @@ public class ComplexFieldsWithStructFieldAttributesValidator extends Validator {
clusterName, schema.getName(), unsupportedFields);
}
- private static boolean isUnsupportedComplexField(ImmutableSDField field, boolean stricterValidation) {
+ private static boolean isUnsupportedComplexField(ImmutableSDField field) {
return (field.usesStructOrMap() &&
- !isSupportedComplexField(field, stricterValidation) &&
+ !isSupportedComplexField(field) &&
hasStructFieldAttributes(field.getStructFields()));
}
- private static boolean isSupportedComplexField(ImmutableSDField field, boolean stricterValidation) {
- return (ComplexAttributeFieldUtils.isSupportedComplexField(field, stricterValidation) ||
+ private static boolean isSupportedComplexField(ImmutableSDField field) {
+ return (ComplexAttributeFieldUtils.isSupportedComplexField(field) ||
GeoPos.isAnyPos(field));
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidator.java
new file mode 100644
index 00000000000..842405e68f9
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidator.java
@@ -0,0 +1,30 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation;
+
+import com.yahoo.config.model.ConfigModelContext;
+import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.vespa.model.VespaModel;
+
+import java.util.logging.Logger;
+
+/**
+ * Validator to check that only infrastructure tenant can use non-default application-type
+ *
+ * @author mortent
+ */
+public class InfrastructureDeploymentValidator extends Validator {
+
+ private static final Logger log = Logger.getLogger(InfrastructureDeploymentValidator.class.getName());
+
+ @Override
+ public void validate(VespaModel model, DeployState deployState) {
+ // Allow the internally defined tenant owning all infrastructure applications
+ if (ApplicationId.global().tenant().equals(model.applicationPackage().getApplicationId().tenant())) return;
+ ConfigModelContext.ApplicationType applicationType = model.getAdmin().getApplicationType();
+ if (applicationType != ConfigModelContext.ApplicationType.DEFAULT) {
+ log.warning("Tenant %s is not allowed to use application type %s".formatted(model.applicationPackage().getApplicationId().toFullString(), applicationType));
+ throw new IllegalArgumentException("Tenant is not allowed to override application type");
+ }
+ }
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/UriBindingsValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/UriBindingsValidator.java
index f4aa4f649bd..9ea79e0d4ea 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/UriBindingsValidator.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/UriBindingsValidator.java
@@ -14,6 +14,7 @@ import java.util.logging.Level;
/**
* Validates URI bindings for filters and handlers
+ * Enforced in public systems, log warning in non-public systems
*
* @author bjorncs
*/
@@ -57,14 +58,25 @@ class UriBindingsValidator extends Validator {
if (binding instanceof SystemBindingPattern) return;
// Allow binding to port if we are restricting data plane bindings
- if (!binding.matchesAnyPort() && !deployState.featureFlags().useRestrictedDataPlaneBindings()) {
- throw new IllegalArgumentException(createErrorMessage(binding, "binding with port is not allowed"));
+ if (!binding.matchesAnyPort()) {
+ logOrThrow(createErrorMessage(binding, "binding with port is not allowed"), deployState);
}
if (!binding.host().equals(BindingPattern.WILDCARD_PATTERN)) {
- throw new IllegalArgumentException(createErrorMessage(binding, "only binding with wildcard ('*') for hostname is allowed"));
+ logOrThrow(createErrorMessage(binding, "only binding with wildcard ('*') for hostname is allowed"), deployState);
}
if (!binding.scheme().equals("http") && !binding.scheme().equals("https")) {
- throw new IllegalArgumentException(createErrorMessage(binding, "only 'http' is allowed as scheme"));
+ logOrThrow(createErrorMessage(binding, "only 'http' is allowed as scheme"), deployState);
+ }
+ }
+
+ /*
+ * Logs to deploy logger in non-public systems, throw otherwise
+ */
+ private static void logOrThrow(String message, DeployState deployState) {
+ if (deployState.zone().system().isPublic()) {
+ throw new IllegalArgumentException(message);
+ } else {
+ deployState.getDeployLogger().log(Level.WARNING, message);
}
}
@@ -73,7 +85,7 @@ class UriBindingsValidator extends Validator {
}
private static String createErrorMessage(BindingPattern binding, String message) {
- return String.format("For binding '%s': %s", binding.patternString(), message);
+ return String.format("For binding '%s': %s", binding.originalPatternString(), message);
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomHandlerBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomHandlerBuilder.java
index 9b5a1429cb7..d674a56007f 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomHandlerBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomHandlerBuilder.java
@@ -48,7 +48,7 @@ public class DomHandlerBuilder extends VespaDomBuilder.DomConfigProducerBuilderB
@Override
protected Handler doBuild(DeployState deployState, TreeConfigProducer<AnyConfigProducer> parent, Element handlerElement) {
Handler handler = createHandler(handlerElement);
- var ports = deployState.isHosted() && deployState.featureFlags().useRestrictedDataPlaneBindings()
+ var ports = deployState.isHosted()
? portBindingOverride : Set.<Integer>of();
for (Element xmlBinding : XML.getChildren(handlerElement, "binding"))
@@ -64,7 +64,7 @@ public class DomHandlerBuilder extends VespaDomBuilder.DomConfigProducerBuilderB
UserBindingPattern bindingPattern = UserBindingPattern.fromPattern(path);
if (portBindingOverride.isEmpty()) return Set.of(bindingPattern);
return portBindingOverride.stream()
- .map(bindingPattern::withPort)
+ .map(bindingPattern::withOverriddenPort)
.toList();
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java b/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java
index a5a567b18f8..0795fdf41d6 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java
@@ -92,7 +92,7 @@ public class ContainerDocumentApi {
UserBindingPattern bindingPattern = UserBindingPattern.fromPattern(path);
if (ports.isEmpty()) return List.of(bindingPattern);
return ports.stream()
- .map(p -> (BindingPattern)bindingPattern.withPort(p))
+ .map(p -> (BindingPattern)bindingPattern.withOverriddenPort(p))
.toList();
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java
index 3c1c4867f13..584207caeac 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/ApplicationContainerCluster.java
@@ -199,33 +199,36 @@ public final class ApplicationContainerCluster extends ContainerCluster<Applicat
/** Create list of endpoints, these will be consumed later by LbServicesProducer */
private void createEndpointList(DeployState deployState) {
- if(!deployState.isHosted()) return;
- if(deployState.getProperties().applicationId().instance().isTester()) return;
+ if (!deployState.isHosted()) return;
+ if (deployState.getProperties().applicationId().instance().isTester()) return;
List<ApplicationClusterEndpoint> endpoints = new ArrayList<>();
- // Add zone local endpoints using zone dns suffixes, tenant, application and cluster id.
List<String> hosts = getContainers().stream()
.map(AbstractService::getHostName)
.sorted()
.toList();
- for (String suffix : deployState.getProperties().zoneDnsSuffixes()) {
- ApplicationClusterEndpoint.DnsName l4Name = ApplicationClusterEndpoint.DnsName.sharedL4NameFrom(
- deployState.zone().system(),
- ClusterSpec.Id.from(getName()),
- deployState.getProperties().applicationId(),
- suffix);
- endpoints.add(ApplicationClusterEndpoint.builder()
- .zoneScope()
- .sharedL4Routing()
- .dnsName(l4Name)
- .hosts(hosts)
- .clusterId(getName())
- .build());
+ Set<ContainerEndpoint> endpointsFromController = deployState.getEndpoints();
+ // Add zone-scoped endpoints if not provided by the controller
+ // TODO(mpolden): Remove this when controller always includes zone-scope endpoints, and config models < 8.230 are gone
+ if (endpointsFromController.stream().noneMatch(endpoint -> endpoint.scope() == ApplicationClusterEndpoint.Scope.zone)) {
+ for (String suffix : deployState.getProperties().zoneDnsSuffixes()) {
+ ApplicationClusterEndpoint.DnsName l4Name = ApplicationClusterEndpoint.DnsName.sharedL4NameFrom(
+ deployState.zone().system(),
+ ClusterSpec.Id.from(getName()),
+ deployState.getProperties().applicationId(),
+ suffix);
+ endpoints.add(ApplicationClusterEndpoint.builder()
+ .zoneScope()
+ .sharedL4Routing()
+ .dnsName(l4Name)
+ .hosts(hosts)
+ .clusterId(getName())
+ .build());
+ }
}
// Include all endpoints provided by controller
- Set<ContainerEndpoint> endpointsFromController = deployState.getEndpoints();
endpointsFromController.stream()
.filter(ce -> ce.clusterId().equals(getName()))
.filter(ce -> ce.routingMethod() == sharedLayer4)
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java b/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java
index f7d4fe28c6e..9d1b27d4bfe 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java
@@ -377,8 +377,7 @@ public abstract class Container extends AbstractService implements
@Override
public void getConfig(ContainerHttpConfig.Builder builder) {
- if (hostResponseHeaderKey.isPresent())
- builder.hostResponseHeaderKey(hostResponseHeaderKey.get());
+ hostResponseHeaderKey.ifPresent(builder::hostResponseHeaderKey);
}
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/component/BindingPattern.java b/config-model/src/main/java/com/yahoo/vespa/model/container/component/BindingPattern.java
index c3dae7e4c8a..f580a0a2cc9 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/component/BindingPattern.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/component/BindingPattern.java
@@ -63,11 +63,21 @@ public abstract class BindingPattern implements Comparable<BindingPattern> {
return builder.append(path).toString();
}
+ public String originalPatternString() {
+ StringBuilder builder = new StringBuilder(scheme).append("://").append(host);
+ originalPort().ifPresent(port -> builder.append(':').append(port));
+ return builder.append(path).toString();
+ }
+
/** Compares the underlying pattern string for equality */
public boolean hasSamePattern(BindingPattern other) { return this.patternString().equals(other.patternString()); }
/** Returns true if pattern will match any port (if present) in uri **/
- public boolean matchesAnyPort() { return port().filter(p -> !p.equals(WILDCARD_PATTERN)).isEmpty(); }
+ public boolean matchesAnyPort() { return originalPort().filter(p -> !p.equals(WILDCARD_PATTERN)).isEmpty(); }
+
+ public Optional<String> originalPort() {
+ return port();
+ }
@Override
public boolean equals(Object o) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/component/UserBindingPattern.java b/config-model/src/main/java/com/yahoo/vespa/model/container/component/UserBindingPattern.java
index 182eca835c1..e27dfe69f00 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/component/UserBindingPattern.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/component/UserBindingPattern.java
@@ -1,6 +1,9 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.container.component;
+import java.util.Objects;
+import java.util.Optional;
+
/**
* A {@link BindingPattern} which is constructed directly from a user provided 'binding' element from services.xml.
*
@@ -8,12 +11,30 @@ package com.yahoo.vespa.model.container.component;
*/
public class UserBindingPattern extends BindingPattern {
- private UserBindingPattern(String scheme, String host, String port, String path) { super(scheme, host, port, path); }
- private UserBindingPattern(String binding) { super(binding); }
+ private final Optional<String> originalPort;
+
+ private UserBindingPattern(String scheme, String host, String port, String path) {
+ super(scheme, host, port, path);
+ this.originalPort = null;
+ }
+ private UserBindingPattern(String scheme, String host, String port, Optional<String> originalPort, String path) {
+ super(scheme, host, port, path);
+ this.originalPort = originalPort;
+ }
+ private UserBindingPattern(String binding) {
+ super(binding);
+ this.originalPort = null;
+ }
public static UserBindingPattern fromHttpPath(String path) { return new UserBindingPattern("http", "*", null, path); }
public static UserBindingPattern fromPattern(String binding) { return new UserBindingPattern(binding); }
- public UserBindingPattern withPort(int port) { return new UserBindingPattern(scheme(), host(), Integer.toString(port), path()); }
+ public UserBindingPattern withOverriddenPort(int port) { return new UserBindingPattern(scheme(), host(), Integer.toString(port), port(), path()); }
+
+ public Optional<String> originalPort() {
+ return Objects.isNull(originalPort)
+ ? port()
+ : originalPort;
+ }
@Override
public String toString() {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
index 3a679782966..0e72cff1688 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
@@ -755,8 +755,13 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
Element modelsElement = XML.getChild(onnxElement, "models");
for (Element modelElement : XML.getChildren(modelsElement, "model") ) {
OnnxModel onnxModel = profiles.getOnnxModels().asMap().get(modelElement.getAttribute("name"));
- if (onnxModel == null)
- continue; // Skip if model is not found
+ if (onnxModel == null) {
+ String availableModels = String.join(", ", profiles.getOnnxModels().asMap().keySet());
+ context.getDeployState().getDeployLogger().logApplicationPackage(WARNING,
+ "Model '" + modelElement.getAttribute("name") + "' not found. Available ONNX " +
+ "models are: " + availableModels + ". Skipping this configuration.");
+ continue;
+ }
onnxModel.setStatelessExecutionMode(getStringValue(modelElement, "execution-mode", null));
onnxModel.setStatelessInterOpThreads(getIntValue(modelElement, "interop-threads", -1));
onnxModel.setStatelessIntraOpThreads(getIntValue(modelElement, "intraop-threads", -1));
@@ -1117,7 +1122,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
private void addSearchHandler(DeployState deployState, ApplicationContainerCluster cluster, Element searchElement, ConfigModelContext context) {
var bindingPatterns = List.<BindingPattern>of(SearchHandler.DEFAULT_BINDING);
- if (isHostedTenantApplication(context) && deployState.featureFlags().useRestrictedDataPlaneBindings()) {
+ if (isHostedTenantApplication(context)) {
bindingPatterns = SearchHandler.bindingPattern(getDataplanePorts(deployState));
}
SearchHandler searchHandler = new SearchHandler(cluster,
@@ -1139,7 +1144,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
private List<BindingPattern> toBindingList(DeployState deployState, ConfigModelContext context, List<Element> bindingElements) {
List<BindingPattern> result = new ArrayList<>();
- var portOverride = isHostedTenantApplication(context) && deployState.featureFlags().useRestrictedDataPlaneBindings() ? getDataplanePorts(deployState) : Set.<Integer>of();
+ var portOverride = isHostedTenantApplication(context) ? getDataplanePorts(deployState) : Set.<Integer>of();
for (Element element: bindingElements) {
String text = element.getTextContent().trim();
if (!text.isEmpty())
@@ -1152,7 +1157,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
UserBindingPattern bindingPattern = UserBindingPattern.fromPattern(path);
if (portBindingOverride.isEmpty()) return Set.of(bindingPattern);
return portBindingOverride.stream()
- .map(bindingPattern::withPort)
+ .map(bindingPattern::withOverriddenPort)
.toList();
}
@@ -1163,7 +1168,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
ContainerDocumentApi.HandlerOptions documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
Element ignoreUndefinedFields = XML.getChild(documentApiElement, "ignore-undefined-fields");
- var portBindingOverride = deployState.featureFlags().useRestrictedDataPlaneBindings() && isHostedTenantApplication(context)
+ var portBindingOverride = isHostedTenantApplication(context)
? getDataplanePorts(deployState)
: Set.<Integer>of();
return new ContainerDocumentApi(cluster, documentApiOptions,
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterControllerConfig.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterControllerConfig.java
index 538a1b49d24..bbbb51beb54 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterControllerConfig.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ClusterControllerConfig.java
@@ -24,16 +24,11 @@ public class ClusterControllerConfig extends AnyConfigProducer implements Fleetc
private final String clusterName;
private final ModelElement clusterElement;
private final ResourceLimits resourceLimits;
- private final boolean allowMoreThanOneContentGroupDown;
- public Builder(String clusterName,
- ModelElement clusterElement,
- ResourceLimits resourceLimits,
- boolean allowMoreThanOneContentGroupDown) {
+ public Builder(String clusterName, ModelElement clusterElement, ResourceLimits resourceLimits) {
this.clusterName = clusterName;
this.clusterElement = clusterElement;
this.resourceLimits = resourceLimits;
- this.allowMoreThanOneContentGroupDown = allowMoreThanOneContentGroupDown;
}
@Override
@@ -53,15 +48,13 @@ public class ClusterControllerConfig extends AnyConfigProducer implements Fleetc
var tuningConfig = new ClusterControllerTuningBuilder(clusterControllerTuning,
minNodeRatioPerGroup,
bucketSplittingMinimumBits,
- allowMoreThanOneContentGroupDown,
numberOfLeafGroups)
.build();
return new ClusterControllerConfig(ancestor,
clusterName,
tuningConfig,
- resourceLimits,
- allowMoreThanOneContentGroupDown);
+ resourceLimits);
}
}
@@ -69,18 +62,15 @@ public class ClusterControllerConfig extends AnyConfigProducer implements Fleetc
private final String clusterName;
private final ClusterControllerTuning tuning;
private final ResourceLimits resourceLimits;
- private final boolean allowMoreThanOneContentGroupDown;
private ClusterControllerConfig(TreeConfigProducer<?> parent,
String clusterName,
ClusterControllerTuning tuning,
- ResourceLimits resourceLimits,
- boolean allowMoreThanOneContentGroupDown) {
+ ResourceLimits resourceLimits) {
super(parent, "fleetcontroller");
this.clusterName = clusterName;
this.tuning = tuning;
this.resourceLimits = resourceLimits;
- this.allowMoreThanOneContentGroupDown = allowMoreThanOneContentGroupDown;
}
@Override
@@ -105,7 +95,7 @@ public class ClusterControllerConfig extends AnyConfigProducer implements Fleetc
tuning.minStorageUpRatio.ifPresent(builder::min_storage_up_ratio);
tuning.minSplitBits.ifPresent(builder::ideal_distribution_bits);
tuning.minNodeRatioPerGroup.ifPresent(builder::min_node_ratio_per_group);
- tuning.maxGroupsAllowedDown.ifPresent(max -> builder.max_number_of_groups_allowed_to_be_down(allowMoreThanOneContentGroupDown ? max : -1));
+ tuning.maxGroupsAllowedDown.ifPresent(builder::max_number_of_groups_allowed_to_be_down);
resourceLimits.getConfig(builder);
}
@@ -127,7 +117,6 @@ public class ClusterControllerConfig extends AnyConfigProducer implements Fleetc
ClusterControllerTuningBuilder(ModelElement tuning,
Optional<Double> minNodeRatioPerGroup,
Optional<Integer> bucketSplittingMinimumBits,
- boolean maxGroupsAllowedDown,
int numberOfLeafGroups) {
this.minSplitBits = bucketSplittingMinimumBits;
this.minNodeRatioPerGroup = minNodeRatioPerGroup;
@@ -147,12 +136,12 @@ public class ClusterControllerConfig extends AnyConfigProducer implements Fleetc
this.stableStateTimePeriod = Optional.ofNullable(tuning.childAsDuration("stable-state-period"));
this.minDistributorUpRatio = Optional.ofNullable(tuning.childAsDouble("min-distributor-up-ratio"));
this.minStorageUpRatio = Optional.ofNullable(tuning.childAsDouble("min-storage-up-ratio"));
- this.maxGroupsAllowedDown = maxGroupsAllowedDown(tuning, maxGroupsAllowedDown, numberOfLeafGroups);
+ this.maxGroupsAllowedDown = maxGroupsAllowedDown(tuning, numberOfLeafGroups);
}
}
- private static Optional<Integer> maxGroupsAllowedDown(ModelElement tuning, boolean allowMoreThanOneContentGroupDown, int numberOfLeafGroups) {
+ private static Optional<Integer> maxGroupsAllowedDown(ModelElement tuning, int numberOfLeafGroups) {
var groupsAllowedDownRatio = tuning.childAsDouble("groups-allowed-down-ratio");
if (groupsAllowedDownRatio != null) {
@@ -160,7 +149,7 @@ public class ClusterControllerConfig extends AnyConfigProducer implements Fleetc
throw new IllegalArgumentException("groups-allowed-down-ratio must be between 0 and 1, got " + groupsAllowedDownRatio);
var maxGroupsAllowedDown = Math.max(1, (int) Math.floor(groupsAllowedDownRatio * numberOfLeafGroups));
- return allowMoreThanOneContentGroupDown ? Optional.of(maxGroupsAllowedDown) : Optional.empty();
+ return Optional.of(maxGroupsAllowedDown);
}
return Optional.empty();
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/StorageNode.java b/config-model/src/main/java/com/yahoo/vespa/model/content/StorageNode.java
index c0b0ec3943d..123048c1638 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/StorageNode.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/StorageNode.java
@@ -3,10 +3,8 @@ package com.yahoo.vespa.model.content;
import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.deploy.DeployState;
-import com.yahoo.config.model.producer.AnyConfigProducer;
import com.yahoo.config.model.producer.TreeConfigProducer;
import com.yahoo.vespa.config.content.StorFilestorConfig;
-import com.yahoo.vespa.config.content.core.StorBucketmoverConfig;
import com.yahoo.vespa.config.content.core.StorServerConfig;
import com.yahoo.vespa.defaults.Defaults;
import com.yahoo.vespa.model.application.validation.RestartConfigs;
@@ -21,7 +19,7 @@ import java.util.Optional;
/**
* Class to provide config related to a specific storage node.
*/
-@RestartConfigs({StorFilestorConfig.class, StorBucketmoverConfig.class})
+@RestartConfigs({StorFilestorConfig.class})
public class StorageNode extends ContentNode implements StorServerConfig.Producer, StorFilestorConfig.Producer {
static final String rootFolder = Defaults.getDefaults().underVespaHome("var/db/vespa/search/");
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
index 985cef3a5ad..4f98102a61f 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
@@ -167,9 +167,7 @@ public class ContentCluster extends TreeConfigProducer<AnyConfigProducer> implem
ClusterResourceLimits resourceLimits) {
return new ClusterControllerConfig.Builder(c.clusterId,
contentElement,
- resourceLimits.getClusterControllerLimits(),
- deployState.featureFlags()
- .allowMoreThanOneContentGroupDown(new ClusterSpec.Id(c.clusterId)))
+ resourceLimits.getClusterControllerLimits())
.build(deployState, c, contentElement.getXml());
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/IntegrityCheckerProducer.java b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/IntegrityCheckerProducer.java
deleted file mode 100644
index 4f81bbf165f..00000000000
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/IntegrityCheckerProducer.java
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.model.content.storagecluster;
-
-import com.yahoo.vespa.config.content.core.StorIntegritycheckerConfig;
-import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
-import com.yahoo.vespa.model.content.cluster.ContentCluster;
-
-/**
- * Serves stor-integritychecker config for storage clusters.
- */
-public class IntegrityCheckerProducer implements StorIntegritycheckerConfig.Producer {
-
- public static class Builder {
- protected IntegrityCheckerProducer build(ContentCluster cluster, ModelElement clusterElem) {
- return integrityCheckerDisabled();
- }
- }
-
- private final Integer startTime;
- private final Integer stopTime;
- private final String weeklyCycle;
-
- IntegrityCheckerProducer(Integer startTime, Integer stopTime, String weeklyCycle) {
- this.startTime = startTime;
- this.stopTime = stopTime;
- this.weeklyCycle = weeklyCycle;
- }
-
- private static IntegrityCheckerProducer integrityCheckerDisabled() {
- // Leave start/start times at default, but mark each day of the week as
- // not allowing the integrity checker to be run.
- return new IntegrityCheckerProducer(null, null, "-------");
- }
-
- @Override
- public void getConfig(StorIntegritycheckerConfig.Builder builder) {
- if (startTime != null) {
- builder.dailycyclestart(startTime);
- }
-
- if (stopTime != null) {
- builder.dailycyclestop(stopTime);
- }
-
- if (weeklyCycle != null) {
- builder.weeklycycle(weeklyCycle);
- }
- }
-}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java
index a1e809098f2..872fda9d909 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/storagecluster/StorageCluster.java
@@ -3,8 +3,6 @@ package com.yahoo.vespa.model.content.storagecluster;
import ai.vespa.metrics.StorageMetrics;
import com.yahoo.config.model.deploy.DeployState;
-import com.yahoo.vespa.config.content.core.StorIntegritycheckerConfig;
-import com.yahoo.vespa.config.content.core.StorBucketmoverConfig;
import com.yahoo.vespa.config.content.core.StorVisitorConfig;
import com.yahoo.vespa.config.content.StorFilestorConfig;
import com.yahoo.vespa.config.content.core.StorServerConfig;
@@ -23,8 +21,6 @@ import org.w3c.dom.Element;
*/
public class StorageCluster extends TreeConfigProducer<StorageNode>
implements StorServerConfig.Producer,
- StorBucketmoverConfig.Producer,
- StorIntegritycheckerConfig.Producer,
StorFilestorConfig.Producer,
StorVisitorConfig.Producer,
PersistenceConfig.Producer,
@@ -39,7 +35,6 @@ public class StorageCluster extends TreeConfigProducer<StorageNode>
return new StorageCluster(ancestor,
ContentCluster.getClusterId(clusterElem),
new FileStorProducer.Builder().build(deployState.getProperties(), cluster, clusterElem),
- new IntegrityCheckerProducer.Builder().build(cluster, clusterElem),
new StorServerProducer.Builder().build(clusterElem),
new StorVisitorProducer.Builder().build(clusterElem),
new PersistenceProducer.Builder().build(clusterElem));
@@ -48,7 +43,6 @@ public class StorageCluster extends TreeConfigProducer<StorageNode>
private final String clusterName;
private final FileStorProducer fileStorProducer;
- private final IntegrityCheckerProducer integrityCheckerProducer;
private final StorServerProducer storServerProducer;
private final StorVisitorProducer storVisitorProducer;
private final PersistenceProducer persistenceProducer;
@@ -56,24 +50,18 @@ public class StorageCluster extends TreeConfigProducer<StorageNode>
StorageCluster(TreeConfigProducer<?> parent,
String clusterName,
FileStorProducer fileStorProducer,
- IntegrityCheckerProducer integrityCheckerProducer,
StorServerProducer storServerProducer,
StorVisitorProducer storVisitorProducer,
PersistenceProducer persistenceProducer) {
super(parent, "storage");
this.clusterName = clusterName;
this.fileStorProducer = fileStorProducer;
- this.integrityCheckerProducer = integrityCheckerProducer;
this.storServerProducer = storServerProducer;
this.storVisitorProducer = storVisitorProducer;
this.persistenceProducer = persistenceProducer;
}
@Override
- public void getConfig(StorBucketmoverConfig.Builder builder) {
- }
-
- @Override
public void getConfig(MetricsmanagerConfig.Builder builder) {
ContentCluster.getMetricBuilder("fleetcontroller", builder).
addedmetrics(StorageMetrics.VDS_DATASTORED_ALLDISKS_DOCS.baseName()).
@@ -101,11 +89,6 @@ public class StorageCluster extends TreeConfigProducer<StorageNode>
}
@Override
- public void getConfig(StorIntegritycheckerConfig.Builder builder) {
- integrityCheckerProducer.getConfig(builder);
- }
-
- @Override
public void getConfig(StorServerConfig.Builder builder) {
storServerProducer.getConfig(builder);
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java b/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java
index 53da4d31488..e49eed1eaa0 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java
@@ -10,7 +10,6 @@ import com.yahoo.config.provision.NodeResources;
import com.yahoo.metrics.MetricsmanagerConfig;
import com.yahoo.searchlib.TranslogserverConfig;
import com.yahoo.vespa.config.content.StorFilestorConfig;
-import com.yahoo.vespa.config.content.core.StorBucketmoverConfig;
import com.yahoo.vespa.config.content.core.StorCommunicationmanagerConfig;
import com.yahoo.vespa.config.content.core.StorServerConfig;
import com.yahoo.vespa.config.content.core.StorStatusConfig;
@@ -44,8 +43,7 @@ import static com.yahoo.vespa.defaults.Defaults.getDefaults;
* @author hmusum
*/
@RestartConfigs({ProtonConfig.class, MetricsmanagerConfig.class, TranslogserverConfig.class,
- StorFilestorConfig.class, StorBucketmoverConfig.class,
- StorCommunicationmanagerConfig.class, StorStatusConfig.class,
+ StorFilestorConfig.class, StorCommunicationmanagerConfig.class, StorStatusConfig.class,
StorServerConfig.class})
public class SearchNode extends AbstractService implements
SearchInterface,
diff --git a/config-model/src/main/javacc/SchemaParser.jj b/config-model/src/main/javacc/SchemaParser.jj
index b2cb258c0ab..42eeabb5ac7 100644
--- a/config-model/src/main/javacc/SchemaParser.jj
+++ b/config-model/src/main/javacc/SchemaParser.jj
@@ -326,6 +326,7 @@ TOKEN :
| < TERMWISE_LIMIT: "termwise-limit" >
| < POST_FILTER_THRESHOLD: "post-filter-threshold" >
| < APPROXIMATE_THRESHOLD: "approximate-threshold" >
+| < TARGET_HITS_MAX_ADJUSTMENT_FACTOR: "target-hits-max-adjustment-factor" >
| < KEEP_RANK_COUNT: "keep-rank-count" >
| < RANK_SCORE_DROP_LIMIT: "rank-score-drop-limit" >
| < CONSTANTS: "constants" >
@@ -1727,6 +1728,7 @@ void rankProfileItem(ParsedSchema schema, ParsedRankProfile profile) : { }
| termwiseLimit(profile)
| postFilterThreshold(profile)
| approximateThreshold(profile)
+ | targetHitsMaxAdjustmentFactor(profile)
| rankFeatures(profile)
| rankProperties(profile)
| secondPhase(profile)
@@ -2190,6 +2192,19 @@ void approximateThreshold(ParsedRankProfile profile) :
}
/**
+ * This rule consumes a target-hits-max-adjustment-factor statement for a rank profile.
+ *
+ * @param profile the rank profile to modify
+ */
+void targetHitsMaxAdjustmentFactor(ParsedRankProfile profile) :
+{
+ double factor;
+}
+{
+ (<TARGET_HITS_MAX_ADJUSTMENT_FACTOR> <COLON> factor = floatValue()) { profile.setTargetHitsMaxAdjustmentFactor(factor); }
+}
+
+/**
* Consumes a rank-properties block of a rank profile. There
* is a little trick within this rule to allow the final rank property
* to skip the terminating newline token.
@@ -2641,6 +2656,7 @@ String identifierWithDash() :
| <SECOND_PHASE>
| <STRUCT_FIELD>
| <SUMMARY_TO>
+ | <TARGET_HITS_MAX_ADJUSTMENT_FACTOR>
| <TERMWISE_LIMIT>
| <UPPER_BOUND>
) { return token.image; }
diff --git a/config-model/src/main/resources/schema/content.rnc b/config-model/src/main/resources/schema/content.rnc
index bb0e39a41ab..dff24745778 100644
--- a/config-model/src/main/resources/schema/content.rnc
+++ b/config-model/src/main/resources/schema/content.rnc
@@ -6,11 +6,11 @@ include "searchchains.rnc"
Redundancy = element redundancy {
attribute reply-after { xsd:nonNegativeInteger }? &
- xsd:nonNegativeInteger
+ xsd:integer { minInclusive = "1" maxInclusive = "65534" }
}
MinRedundancy = element min-redundancy {
- xsd:nonNegativeInteger
+ xsd:integer { minInclusive = "1" maxInclusive = "65534" }
}
DistributionType = element distribution {
diff --git a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
index 9126c678171..2f8a8bddf20 100644
--- a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
@@ -2379,10 +2379,8 @@ public class ModelProvisioningTest {
" </content>" +
"</services>";
VespaModelTester tester = new VespaModelTester();
- tester.setModelProperties(new TestProperties().setAllowMoreThanOneContentGroupDown(true));
tester.addHosts(9);
- VespaModel model = tester.createModel(servicesXml, true, new DeployState.Builder()
- .properties(new TestProperties().setAllowMoreThanOneContentGroupDown(true)));
+ VespaModel model = tester.createModel(servicesXml, true, new DeployState.Builder().properties(new TestProperties()));
var fleetControllerConfigBuilder = new FleetcontrollerConfig.Builder();
model.getConfig(fleetControllerConfigBuilder, "admin/standalone/cluster-controllers/0/components/clustercontroller-content-configurer");
diff --git a/config-model/src/test/java/com/yahoo/schema/RankProfileTestCase.java b/config-model/src/test/java/com/yahoo/schema/RankProfileTestCase.java
index 85225f0d255..380b458ea8c 100644
--- a/config-model/src/test/java/com/yahoo/schema/RankProfileTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/RankProfileTestCase.java
@@ -459,17 +459,9 @@ public class RankProfileTestCase extends AbstractSchemaTestCase {
}
private void verifyApproximateNearestNeighborThresholdSettings(Double postFilterThreshold, Double approximateThreshold) throws ParseException {
- var rankProfileRegistry = new RankProfileRegistry();
- var props = new TestProperties();
- var queryProfileRegistry = new QueryProfileRegistry();
- var builder = new ApplicationBuilder(rankProfileRegistry, queryProfileRegistry, props);
- builder.addSchema(createSDWithRankProfileThresholds(postFilterThreshold, approximateThreshold));
- builder.build(true);
-
- var schema = builder.getSchema();
- var rankProfile = rankProfileRegistry.get(schema, "my_profile");
- var rawRankProfile = new RawRankProfile(rankProfile, new LargeRankingExpressions(new MockFileRegistry()), queryProfileRegistry,
- new ImportedMlModels(), new AttributeFields(schema), props);
+ var rp = createRankProfile(postFilterThreshold, approximateThreshold, null);
+ var rankProfile = rp.getFirst();
+ var rawRankProfile = rp.getSecond();
if (postFilterThreshold != null) {
assertEquals((double)postFilterThreshold, rankProfile.getPostFilterThreshold().getAsDouble(), 0.000001);
@@ -488,13 +480,52 @@ public class RankProfileTestCase extends AbstractSchemaTestCase {
}
}
- private String createSDWithRankProfileThresholds(Double postFilterThreshold, Double approximateThreshold) {
+ @Test
+ void target_hits_max_adjustment_factor_is_configurable() throws ParseException {
+ verifyTargetHitsMaxAdjustmentFactor(null);
+ verifyTargetHitsMaxAdjustmentFactor(2.0);
+ }
+
+ private void verifyTargetHitsMaxAdjustmentFactor(Double targetHitsMaxAdjustmentFactor) throws ParseException {
+ var rp = createRankProfile(null, null, targetHitsMaxAdjustmentFactor);
+ var rankProfile = rp.getFirst();
+ var rawRankProfile = rp.getSecond();
+ if (targetHitsMaxAdjustmentFactor != null) {
+ assertEquals((double)targetHitsMaxAdjustmentFactor, rankProfile.getTargetHitsMaxAdjustmentFactor().getAsDouble(), 0.000001);
+ assertEquals(String.valueOf(targetHitsMaxAdjustmentFactor), findProperty(rawRankProfile.configProperties(), "vespa.matching.nns.target_hits_max_adjustment_factor").get());
+ } else {
+ assertTrue(rankProfile.getTargetHitsMaxAdjustmentFactor().isEmpty());
+ assertFalse(findProperty(rawRankProfile.configProperties(), "vespa.matching.nns.target_hits_max_adjustment_factor").isPresent());
+ }
+ }
+
+ private Pair<RankProfile, RawRankProfile> createRankProfile(Double postFilterThreshold,
+ Double approximateThreshold,
+ Double targetHitsMaxAdjustmentFactor) throws ParseException {
+ var rankProfileRegistry = new RankProfileRegistry();
+ var props = new TestProperties();
+ var queryProfileRegistry = new QueryProfileRegistry();
+ var builder = new ApplicationBuilder(rankProfileRegistry, queryProfileRegistry, props);
+ builder.addSchema(createSDWithRankProfile(postFilterThreshold, approximateThreshold, targetHitsMaxAdjustmentFactor));
+ builder.build(true);
+
+ var schema = builder.getSchema();
+ var rankProfile = rankProfileRegistry.get(schema, "my_profile");
+ var rawRankProfile = new RawRankProfile(rankProfile, new LargeRankingExpressions(new MockFileRegistry()), queryProfileRegistry,
+ new ImportedMlModels(), new AttributeFields(schema), props);
+ return new Pair<>(rankProfile, rawRankProfile);
+ }
+
+ private String createSDWithRankProfile(Double postFilterThreshold,
+ Double approximateThreshold,
+ Double targetHitsMaxAdjustmentFactor) {
return joinLines(
"search test {",
" document test {}",
" rank-profile my_profile {",
- (postFilterThreshold != null ? (" post-filter-threshold: " + postFilterThreshold) : ""),
- (approximateThreshold != null ? (" approximate-threshold: " + approximateThreshold) : ""),
+ (postFilterThreshold != null ? (" post-filter-threshold: " + postFilterThreshold) : ""),
+ (approximateThreshold != null ? (" approximate-threshold: " + approximateThreshold) : ""),
+ (targetHitsMaxAdjustmentFactor != null ? (" target-hits-max-adjustment-factor: " + targetHitsMaxAdjustmentFactor) : ""),
" }",
"}");
}
diff --git a/config-model/src/test/java/com/yahoo/schema/document/ComplexAttributeFieldUtilsTestCase.java b/config-model/src/test/java/com/yahoo/schema/document/ComplexAttributeFieldUtilsTestCase.java
index 310ede6bae2..7a89f52268f 100644
--- a/config-model/src/test/java/com/yahoo/schema/document/ComplexAttributeFieldUtilsTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/document/ComplexAttributeFieldUtilsTestCase.java
@@ -30,11 +30,11 @@ public class ComplexAttributeFieldUtilsTestCase {
}
boolean isArrayOfSimpleStruct() {
- return ComplexAttributeFieldUtils.isArrayOfSimpleStruct(field(), false);
+ return ComplexAttributeFieldUtils.isArrayOfSimpleStruct(field());
}
boolean isMapOfSimpleStruct() {
- return ComplexAttributeFieldUtils.isMapOfSimpleStruct(field(), false);
+ return ComplexAttributeFieldUtils.isMapOfSimpleStruct(field());
}
boolean isMapOfPrimitiveType() {
diff --git a/config-model/src/test/java/com/yahoo/schema/processing/IndexingInputsTestCase.java b/config-model/src/test/java/com/yahoo/schema/processing/IndexingInputsTestCase.java
index 893ee3b1ea4..d420623f233 100644
--- a/config-model/src/test/java/com/yahoo/schema/processing/IndexingInputsTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/processing/IndexingInputsTestCase.java
@@ -17,29 +17,29 @@ public class IndexingInputsTestCase {
void requireThatExtraFieldInputExtraFieldThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_extra_field.sd",
"For schema 'indexing_extra_field_input_extra_field', field 'bar': Indexing script refers " +
- "to field 'bar' which does not exist in document type " +
- "'indexing_extra_field_input_extra_field', and is not a mutable attribute.");
+ "to field 'bar' which is neither a field in document type " +
+ "'indexing_extra_field_input_extra_field' nor a mutable attribute");
}
@Test
void requireThatExtraFieldInputImplicitThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_implicit.sd",
"For schema 'indexing_extra_field_input_implicit', field 'foo': Indexing script refers to " +
- "field 'foo' which does not exist in document type 'indexing_extra_field_input_implicit', and is not a mutable attribute.");
+ "field 'foo' which is neither a field in document type 'indexing_extra_field_input_implicit' nor a mutable attribute");
}
@Test
void requireThatExtraFieldInputNullThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_null.sd",
"For schema 'indexing_extra_field_input_null', field 'foo': Indexing script refers to field " +
- "'foo' which does not exist in document type 'indexing_extra_field_input_null', and is not a mutable attribute.");
+ "'foo' which is neither a field in document type 'indexing_extra_field_input_null' nor a mutable attribute");
}
@Test
void requireThatExtraFieldInputSelfThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_self.sd",
"For schema 'indexing_extra_field_input_self', field 'foo': Indexing script refers to field " +
- "'foo' which does not exist in document type 'indexing_extra_field_input_self', and is not a mutable attribute.");
+ "'foo' which is neither a field in document type 'indexing_extra_field_input_self' nor a mutable attribute");
}
}
diff --git a/config-model/src/test/java/com/yahoo/schema/processing/RankingExpressionWithOnnxTestCase.java b/config-model/src/test/java/com/yahoo/schema/processing/RankingExpressionWithOnnxTestCase.java
index 2f53dba7bb4..8db8f0710a0 100644
--- a/config-model/src/test/java/com/yahoo/schema/processing/RankingExpressionWithOnnxTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/processing/RankingExpressionWithOnnxTestCase.java
@@ -4,6 +4,8 @@ package com.yahoo.schema.processing;
import com.yahoo.config.application.api.ApplicationFile;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.model.test.MockApplicationPackage;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.TenantName;
import com.yahoo.io.IOUtils;
import com.yahoo.io.reader.NamedReader;
import com.yahoo.path.Path;
@@ -400,7 +402,7 @@ public class RankingExpressionWithOnnxTestCase {
StoringApplicationPackage(Path applicationPackageWritableRoot, String queryProfile, String queryProfileType) {
super(new File(applicationPackageWritableRoot.toString()),
null, null, List.of(), Map.of(), null,
- null, null, false, queryProfile, queryProfileType);
+ null, null, false, queryProfile, queryProfileType, TenantName.defaultName());
}
@Override
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/ClusterInfoTest.java b/config-model/src/test/java/com/yahoo/vespa/model/ClusterInfoTest.java
index 82baed07997..16e13a66a44 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/ClusterInfoTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/ClusterInfoTest.java
@@ -8,18 +8,25 @@ import com.yahoo.config.model.test.MockApplicationPackage;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.Capacity;
+import com.yahoo.config.provision.Cloud;
+import com.yahoo.config.provision.CloudAccount;
+import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.ClusterSpec.Id;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.Zone;
+import com.yahoo.yolean.Exceptions;
import org.junit.jupiter.api.Test;
import java.time.Duration;
import java.util.Map;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
/**
* @author bratseth
@@ -196,7 +203,40 @@ public class ClusterInfoTest {
assertEquals(Duration.ofHours(48), requestedInI2UsWest1.get(new ClusterSpec.Id("testcontent")).clusterInfo().bcpDeadline());
}
+ @Test
+ void host_ttl_requires_cloud_account() throws Exception {
+ var servicesXml = """
+ <services version='1.0'>
+ <container id='testcontainer' version='1.0'>
+ <nodes count='1'/>
+ </container>
+ </services>
+ """;
+
+ var deploymentXml = """
+ <deployment version='1.0' empty-host-ttl='1d'>
+ <instance id='default'>
+ <prod>
+ <region>us-east-1</region>
+ <region empty-host-ttl='0m'>us-north-1</region>
+ <region>us-west-1</region>
+ </prod>
+ </instance>
+ </deployment>
+ """;
+
+ Cloud gcp = Cloud.builder().name(CloudName.GCP).account(CloudAccount.from("vespaz")).allowEnclave(true).build();
+ CloudAccount account = CloudAccount.from("gcp:foobar");
+ assertEquals(Duration.ofHours(24), requestedCapacityIn(account, gcp, "default", "us-east-1", servicesXml, deploymentXml).get(new ClusterSpec.Id("testcontainer")).clusterInfo().hostTTL());
+ assertEquals(Duration.ZERO, requestedCapacityIn(account, gcp, "default", "us-north-1", servicesXml, deploymentXml).get(new ClusterSpec.Id("testcontainer")).clusterInfo().hostTTL());
+ assertEquals(Duration.ZERO, requestedCapacityIn(CloudAccount.empty, gcp, "default", "us-west-1", servicesXml, deploymentXml).get(new Id("testcontainer")).clusterInfo().hostTTL());
+ }
+
private Map<ClusterSpec.Id, Capacity> requestedCapacityIn(String instance, String region, String servicesXml, String deploymentXml) throws Exception {
+ return requestedCapacityIn(null, Cloud.defaultCloud(), instance, region, servicesXml, deploymentXml);
+ }
+
+ private Map<ClusterSpec.Id, Capacity> requestedCapacityIn(CloudAccount account, Cloud cloud, String instance, String region, String servicesXml, String deploymentXml) throws Exception {
var applicationPackage = new MockApplicationPackage.Builder()
.withServices(servicesXml)
.withDeploymentSpec(deploymentXml)
@@ -205,8 +245,9 @@ public class ClusterInfoTest {
var provisioner = new InMemoryProvisioner(10, true);
var deployState = new DeployState.Builder()
.applicationPackage(applicationPackage)
- .zone(new Zone(Environment.prod, RegionName.from(region)))
+ .zone(new Zone(cloud, SystemName.Public, Environment.prod, RegionName.from(region)))
.properties(new TestProperties().setHostedVespa(true)
+ .setCloudAccount(account)
.setApplicationId(ApplicationId.from(TenantName.defaultName(), ApplicationName.defaultName(), InstanceName.from(instance)))
.setZone(new Zone(Environment.prod, RegionName.from(region))))
.modelHostProvisioner(provisioner)
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsConsumersTest.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsConsumersTest.java
index d5ce26bafe8..49019e47bc2 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsConsumersTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/metricsproxy/MetricsConsumersTest.java
@@ -22,7 +22,9 @@ import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.g
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.getModel;
import static com.yahoo.vespa.model.admin.metricsproxy.MetricsProxyModelTester.servicesWithAdminOnly;
import static java.util.Collections.singleton;
-import static org.junit.jupiter.api.Assertions.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
/**
* Tests for {@link MetricsProxyContainerCluster} related to metrics consumers.
@@ -39,9 +41,9 @@ public class MetricsConsumersTest {
private static final int numMetricsForVespaConsumer = numVespaMetrics + numSystemMetrics + numNetworkMetrics;
@Test
- void default_public_consumer_is_set_up_for_self_hosted() {
+ void default_public_consumers_is_set_up_for_self_hosted() {
ConsumersConfig config = consumersConfigFromXml(servicesWithAdminOnly(), self_hosted);
- assertEquals(3, config.consumer().size());
+ assertEquals(4, config.consumer().size());
assertEquals(MetricsConsumer.defaultConsumer.id(), config.consumer(2).name());
int numMetricsForPublicDefaultConsumer = defaultMetricSet.getMetrics().size() + numSystemMetrics;
assertEquals(numMetricsForPublicDefaultConsumer, config.consumer(2).metric().size());
@@ -50,10 +52,11 @@ public class MetricsConsumersTest {
@Test
void consumers_are_set_up_for_hosted() {
ConsumersConfig config = consumersConfigFromXml(servicesWithAdminOnly(), hosted);
- assertEquals(3, config.consumer().size());
+ assertEquals(4, config.consumer().size());
assertEquals(MetricsConsumer.vespa.id(), config.consumer(0).name());
assertEquals(MetricsConsumer.autoscaling.id(), config.consumer(1).name());
assertEquals(MetricsConsumer.defaultConsumer.id(), config.consumer(2).name());
+ assertEquals(MetricsProxyContainerCluster.NEW_DEFAULT_CONSUMER_ID, config.consumer(3).name());
}
@Test
@@ -121,7 +124,7 @@ public class MetricsConsumersTest {
);
VespaModel hostedModel = getModel(services, hosted);
ConsumersConfig config = consumersConfigFromModel(hostedModel);
- assertEquals(3, config.consumer().size());
+ assertEquals(4, config.consumer().size());
// All default metrics are retained
ConsumersConfig.Consumer vespaConsumer = config.consumer(0);
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/BundleValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/BundleValidatorTest.java
index d5bcf1e5e49..0e98225aba5 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/BundleValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/BundleValidatorTest.java
@@ -17,7 +17,6 @@ import java.util.jar.JarFile;
import java.util.jar.JarOutputStream;
import static com.yahoo.yolean.Exceptions.uncheck;
-import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -65,12 +64,12 @@ public class BundleValidatorTest {
JarFile jarFile = createTemporaryJarFile(tempDir, "import-warnings");
validator.validateJarFile(state, jarFile);
String output = buffer.toString();
- assertThat(output)
+ assertTrue(output
.contains("JAR file 'import-warnings.jar' imports the packages [org.json] from 'org.json:json'. \n" +
- "This bundle is no longer provided on Vespa 8 - see https://docs.vespa.ai/en/vespa8-release-notes.html#container-runtime.");
- assertThat(output)
+ "This bundle is no longer provided on Vespa 8 - see https://docs.vespa.ai/en/vespa8-release-notes.html#container-runtime."));
+ assertTrue(output
.contains("JAR file 'import-warnings.jar' imports the packages [org.eclipse.jetty.client.api] from 'jetty'. \n" +
- "The Jetty bundles are no longer provided on Vespa 8 - see https://docs.vespa.ai/en/vespa8-release-notes.html#container-runtime.");
+ "The Jetty bundles are no longer provided on Vespa 8 - see https://docs.vespa.ai/en/vespa8-release-notes.html#container-runtime."));
}
static DeployState createDeployState(StringBuffer buffer) {
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ComplexFieldsValidatorTestCase.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ComplexFieldsValidatorTestCase.java
index 04abd4e4836..8f8918b5140 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ComplexFieldsValidatorTestCase.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/ComplexFieldsValidatorTestCase.java
@@ -71,9 +71,9 @@ public class ComplexFieldsValidatorTestCase {
}
@Test
- void logs_warning_when_struct_field_inside_nested_struct_array_is_specified_as_attribute() throws IOException, SAXException {
- var logger = new MyLogger();
- createModelAndValidate(joinLines(
+ void throws_exception_when_struct_field_inside_nested_struct_array_is_specified_as_attribute() throws IOException, SAXException {
+ Throwable exception = assertThrows(IllegalArgumentException.class, () -> {
+ createModelAndValidate(joinLines(
"schema test {",
"document test {",
"struct item {",
@@ -92,8 +92,10 @@ public class ComplexFieldsValidatorTestCase {
"}",
"}",
"}",
- "}"), logger);
- assertTrue(logger.message.toString().contains(getExpectedMessage("cabinet (cabinet.value.items.name, cabinet.value.items.color)")));
+ "}"));
+
+ });
+ assertTrue(exception.getMessage().contains(getExpectedMessage("cabinet (cabinet.value.items.name, cabinet.value.items.color)")));
}
private String getExpectedMessage(String unsupportedFields) {
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidatorTest.java
new file mode 100644
index 00000000000..0281d5cd6ee
--- /dev/null
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidatorTest.java
@@ -0,0 +1,48 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation;
+
+import com.yahoo.config.model.NullConfigModelRegistry;
+import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.config.model.test.MockApplicationPackage;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.vespa.model.VespaModel;
+import org.junit.jupiter.api.Test;
+import org.xml.sax.SAXException;
+
+import java.io.IOException;
+
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+public class InfrastructureDeploymentValidatorTest {
+
+ @Test
+ public void allows_infrastructure_deployments() {
+ assertDoesNotThrow(() -> runValidator(ApplicationId.global().tenant()));
+ }
+
+ @Test
+ public void prevents_non_infrastructure_deployments() {
+ IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> runValidator(TenantName.defaultName()));
+ assertEquals("Tenant is not allowed to override application type", exception.getMessage());
+ }
+
+ private void runValidator(TenantName tenantName) throws IOException, SAXException {
+ String services = """
+ <services version='1.0' application-type="hosted-infrastructure">
+ <container id='default' version='1.0' />
+ </services>
+ """;
+ var app = new MockApplicationPackage.Builder()
+ .withTenantname(tenantName)
+ .withServices(services)
+ .build();
+ var deployState = new DeployState.Builder().applicationPackage(app).build();
+ var model = new VespaModel(new NullConfigModelRegistry(), deployState);
+
+ var validator = new InfrastructureDeploymentValidator();
+ validator.validate(model, deployState);
+ }
+}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/UriBindingsValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/UriBindingsValidatorTest.java
index ff9596f2062..6307bed28e6 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/UriBindingsValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/UriBindingsValidatorTest.java
@@ -2,23 +2,29 @@
package com.yahoo.vespa.model.application.validation;// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
import com.yahoo.config.application.api.ApplicationPackage;
+import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.model.NullConfigModelRegistry;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.deploy.TestProperties;
import com.yahoo.config.model.test.MockApplicationPackage;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.model.VespaModel;
import org.junit.jupiter.api.Test;
import org.xml.sax.SAXException;
import java.io.IOException;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.*;
/**
* @author bjorncs
*/
public class UriBindingsValidatorTest {
+ Zone cdZone = new Zone(SystemName.cd, Environment.prod, RegionName.defaultName());
+ Zone publicCdZone = new Zone(SystemName.PublicCd, Environment.prod, RegionName.defaultName());
@Test
void fails_on_user_handler_binding_with_port() throws IOException, SAXException {
@@ -29,6 +35,17 @@ public class UriBindingsValidatorTest {
}
@Test
+ void non_public_logs_on_user_handler_binding_with_port() throws IOException, SAXException {
+ StringBuffer log = new StringBuffer();
+ DeployLogger logger = (__, message) -> {
+ System.out.println("message = " + message);
+ log.append(message).append('\n');
+ };
+ runUriBindingValidator(true, createServicesXmlWithHandler("http://*:4443/my-handler"), cdZone, logger);
+ assertTrue(log.toString().contains("For binding 'http://*:4443/my-handler': binding with port is not allowed"));
+ }
+
+ @Test
void fails_on_user_handler_binding_with_hostname() throws IOException, SAXException {
Throwable exception = assertThrows(IllegalArgumentException.class, () -> {
runUriBindingValidator(true, createServicesXmlWithHandler("http://myhostname/my-handler"));
@@ -57,12 +74,6 @@ public class UriBindingsValidatorTest {
runUriBindingValidator(true, createServicesXmlWithHandler("http://*/my-handler"));
}
- @Test
- void allows_portbinding_when_restricting_data_plane() throws IOException, SAXException {
- runUriBindingValidator(new TestProperties().setHostedVespa(true).setUseRestrictedDataPlaneBindings(true), createServicesXmlWithHandler("http://*:4443/my-handler"));
- }
-
- @Test
void allows_user_binding_with_wildcard_port() throws IOException, SAXException {
runUriBindingValidator(true, createServicesXmlWithHandler("http://*:*/my-handler"));
}
@@ -73,15 +84,20 @@ public class UriBindingsValidatorTest {
}
private void runUriBindingValidator(boolean isHosted, String servicesXml) throws IOException, SAXException {
- runUriBindingValidator(new TestProperties().setHostedVespa(isHosted), servicesXml);
+ runUriBindingValidator(new TestProperties().setZone(publicCdZone).setHostedVespa(isHosted), servicesXml, (__, message) -> {});
+ }
+ private void runUriBindingValidator(boolean isHosted, String servicesXml, Zone zone, DeployLogger deployLogger) throws IOException, SAXException {
+ runUriBindingValidator(new TestProperties().setZone(zone).setHostedVespa(isHosted), servicesXml, deployLogger);
}
- private void runUriBindingValidator(TestProperties testProperties, String servicesXml) throws IOException, SAXException {
+ private void runUriBindingValidator(TestProperties testProperties, String servicesXml, DeployLogger deployLogger) throws IOException, SAXException {
ApplicationPackage app = new MockApplicationPackage.Builder()
.withServices(servicesXml)
.build();
DeployState deployState = new DeployState.Builder()
.applicationPackage(app)
+ .deployLogger(deployLogger)
+ .zone(testProperties.zone())
.properties(testProperties)
.build();
VespaModel model = new VespaModel(new NullConfigModelRegistry(), deployState);
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/http/DefaultFilterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/http/DefaultFilterTest.java
index 3e80b319e88..a52b6117482 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/http/DefaultFilterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/http/DefaultFilterTest.java
@@ -11,10 +11,12 @@ import com.yahoo.vespa.model.container.xml.ContainerModelBuilder;
import org.junit.jupiter.api.Test;
import org.w3c.dom.Element;
+import java.util.Map;
import java.util.Set;
+import static java.util.stream.Collectors.toMap;
import static java.util.stream.Collectors.toSet;
-import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* @author bjorncs
@@ -53,17 +55,12 @@ public class DefaultFilterTest extends DomBuilderTest {
ChainsConfig chainsConfig = root.getConfig(ChainsConfig.class, "container/filters/chain");
Set<String> chainsIds = chainsConfig.chains().stream().map(ChainsConfig.Chains::id).collect(toSet());
- assertThat(chainsIds)
- .containsExactlyInAnyOrder(
- "request-chain-with-binding", "response-chain-with-binding", "my-default-request-chain", "my-default-response-chain");
+ assertEquals(chainsIds, Set.of("request-chain-with-binding", "response-chain-with-binding", "my-default-request-chain", "my-default-response-chain"));
}
private static void assertDefaultFiltersInConfig(ServerConfig config) {
- assertThat(config.defaultFilters())
- .containsExactlyInAnyOrder(
- new ServerConfig.DefaultFilters(new ServerConfig.DefaultFilters.Builder()
- .filterId("my-default-request-chain").localPort(8000)),
- new ServerConfig.DefaultFilters(new ServerConfig.DefaultFilters.Builder()
- .filterId("my-default-response-chain").localPort(8000)));
+ var asMap = config.defaultFilters().stream().collect(toMap(ServerConfig.DefaultFilters::filterId, ServerConfig.DefaultFilters::localPort));
+ assertEquals(asMap, Map.of("my-default-request-chain", 8000, "my-default-response-chain", 8000));
+
}
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/search/searchchain/FederationTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/search/searchchain/FederationTest.java
index ff884d6072f..3c96edc482a 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/search/searchchain/FederationTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/search/searchchain/FederationTest.java
@@ -7,7 +7,6 @@ import org.w3c.dom.Element;
import java.util.List;
-import static org.assertj.core.api.Fail.fail;
import static org.junit.jupiter.api.Assertions.*;
/**
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/HandlerBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/HandlerBuilderTest.java
index 6d61610a84f..fac07c6c6e6 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/xml/HandlerBuilderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/xml/HandlerBuilderTest.java
@@ -1,5 +1,6 @@
package com.yahoo.vespa.model.container.xml;
+import com.yahoo.config.model.ConfigModelContext;
import com.yahoo.config.model.builder.xml.test.DomBuilderTest;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.deploy.TestProperties;
@@ -110,36 +111,15 @@ public class HandlerBuilderTest extends ContainerModelBuilderTestBase {
@Test
void restricts_default_bindings_in_hosted_vespa() {
DeployState deployState = new DeployState.Builder()
- .properties(new TestProperties().setHostedVespa(true).setUseRestrictedDataPlaneBindings(true))
+ .properties(new TestProperties().setHostedVespa(true))
.build();
verifyDefaultBindings(deployState, "http://*:4443");
}
@Test
- void does_not_restrict_default_bindings_in_hosted_vespa_when_disabled() {
- DeployState deployState = new DeployState.Builder()
- .properties(new TestProperties().setHostedVespa(true).setUseRestrictedDataPlaneBindings(false))
- .build();
- verifyDefaultBindings(deployState, "http://*");
- }
-
- @Test
- void does_not_restrict_infrastructure() {
- DeployState deployState = new DeployState.Builder()
-
- .properties(
- new TestProperties()
- .setApplicationId(ApplicationId.defaultId())
- .setHostedVespa(true)
- .setUseRestrictedDataPlaneBindings(false))
- .build();
- verifyDefaultBindings(deployState, "http://*");
- }
-
- @Test
void restricts_custom_bindings_in_hosted_vespa() {
DeployState deployState = new DeployState.Builder()
- .properties(new TestProperties().setHostedVespa(true).setUseRestrictedDataPlaneBindings(true))
+ .properties(new TestProperties().setHostedVespa(true))
.build();
verifyCustomSearchBindings(deployState, "http://*:4443");
}
@@ -147,7 +127,7 @@ public class HandlerBuilderTest extends ContainerModelBuilderTestBase {
@Test
void does_not_restrict_default_bindings_in_self_hosted() {
DeployState deployState = new DeployState.Builder()
- .properties(new TestProperties().setHostedVespa(false).setUseRestrictedDataPlaneBindings(false))
+ .properties(new TestProperties().setHostedVespa(false))
.build();
verifyDefaultBindings(deployState, "http://*");
}
@@ -155,12 +135,15 @@ public class HandlerBuilderTest extends ContainerModelBuilderTestBase {
@Test
void does_not_restrict_custom_bindings_in_self_hosted() {
DeployState deployState = new DeployState.Builder()
- .properties(new TestProperties().setHostedVespa(false).setUseRestrictedDataPlaneBindings(false))
+ .properties(new TestProperties().setHostedVespa(false))
.build();
verifyCustomSearchBindings(deployState, "http://*");
}
private void verifyDefaultBindings(DeployState deployState, String bindingPrefix) {
+ verifyDefaultBindings(deployState, bindingPrefix, ConfigModelContext.ApplicationType.DEFAULT);
+ }
+ private void verifyDefaultBindings(DeployState deployState, String bindingPrefix, ConfigModelContext.ApplicationType applicationType) {
Element clusterElem = DomBuilderTest.parse(
"<container id='default' version='1.0'>",
" <search/>",
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
index d9632b62fb2..1360ca259dd 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/ContentClusterTest.java
@@ -1413,32 +1413,32 @@ public class ContentClusterTest extends ContentBaseTest {
@Test
void testGroupsAllowedToBeDown() {
- assertGroupsAllowedsDown(1, 0.5, 1);
- assertGroupsAllowedsDown(2, 0.5, 1);
- assertGroupsAllowedsDown(3, 0.5, 1);
- assertGroupsAllowedsDown(4, 0.5, 2);
- assertGroupsAllowedsDown(5, 0.5, 2);
- assertGroupsAllowedsDown(6, 0.5, 3);
-
- assertGroupsAllowedsDown(1, 0.33, 1);
- assertGroupsAllowedsDown(2, 0.33, 1);
- assertGroupsAllowedsDown(3, 0.33, 1);
- assertGroupsAllowedsDown(4, 0.33, 1);
- assertGroupsAllowedsDown(5, 0.33, 1);
- assertGroupsAllowedsDown(6, 0.33, 1);
-
- assertGroupsAllowedsDown(1, 0.67, 1);
- assertGroupsAllowedsDown(2, 0.67, 1);
- assertGroupsAllowedsDown(3, 0.67, 2);
- assertGroupsAllowedsDown(4, 0.67, 2);
- assertGroupsAllowedsDown(5, 0.67, 3);
- assertGroupsAllowedsDown(6, 0.67, 4);
-
- assertGroupsAllowedsDown(1, 0, 1);
- assertGroupsAllowedsDown(2, 0, 1);
-
- assertGroupsAllowedsDown(1, 1, 1);
- assertGroupsAllowedsDown(2, 1, 2);
+ assertGroupsAllowedDown(1, 0.5, 1);
+ assertGroupsAllowedDown(2, 0.5, 1);
+ assertGroupsAllowedDown(3, 0.5, 1);
+ assertGroupsAllowedDown(4, 0.5, 2);
+ assertGroupsAllowedDown(5, 0.5, 2);
+ assertGroupsAllowedDown(6, 0.5, 3);
+
+ assertGroupsAllowedDown(1, 0.33, 1);
+ assertGroupsAllowedDown(2, 0.33, 1);
+ assertGroupsAllowedDown(3, 0.33, 1);
+ assertGroupsAllowedDown(4, 0.33, 1);
+ assertGroupsAllowedDown(5, 0.33, 1);
+ assertGroupsAllowedDown(6, 0.33, 1);
+
+ assertGroupsAllowedDown(1, 0.67, 1);
+ assertGroupsAllowedDown(2, 0.67, 1);
+ assertGroupsAllowedDown(3, 0.67, 2);
+ assertGroupsAllowedDown(4, 0.67, 2);
+ assertGroupsAllowedDown(5, 0.67, 3);
+ assertGroupsAllowedDown(6, 0.67, 4);
+
+ assertGroupsAllowedDown(1, 0, 1);
+ assertGroupsAllowedDown(2, 0, 1);
+
+ assertGroupsAllowedDown(1, 1, 1);
+ assertGroupsAllowedDown(2, 1, 2);
}
private void assertIndexingDocprocEnabled(boolean indexed, boolean force, boolean expEnabled) {
@@ -1478,9 +1478,9 @@ public class ContentClusterTest extends ContentBaseTest {
assertIndexingDocprocEnabled(false, true, true);
}
- private void assertGroupsAllowedsDown(int groupCount, double groupsAllowedDown, int expectedGroupsAllowedDown) {
+ private void assertGroupsAllowedDown(int groupCount, double groupsAllowedDown, int expectedGroupsAllowedDown) {
var services = servicesWithGroups(groupCount, groupsAllowedDown);
- var model = createEnd2EndOneNode(new TestProperties().setAllowMoreThanOneContentGroupDown(true), services);
+ var model = createEnd2EndOneNode(new TestProperties(), services);
var fleetControllerConfigBuilder = new FleetcontrollerConfig.Builder();
model.getConfig(fleetControllerConfigBuilder, "admin/cluster-controllers/0/components/clustercontroller-storage-configurer");
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java
index af547965749..15fba6a7dc9 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/DispatchTuningTest.java
@@ -15,13 +15,13 @@ public class DispatchTuningTest {
void requireThatAccessorWork() {
DispatchTuning dispatch = new DispatchTuning.Builder()
.setMaxHitsPerPartition(69)
- .setDispatchPolicy("round-robin")
+ .setDispatchPolicy("best-of-random-2")
.setMinActiveDocsCoverage(12.5)
.setTopKProbability(18.3)
.build();
assertEquals(69, dispatch.getMaxHitsPerPartition().intValue());
assertEquals(12.5, dispatch.getMinActiveDocsCoverage(), 0.0);
- assertEquals(DispatchTuning.DispatchPolicy.ROUNDROBIN, dispatch.getDispatchPolicy());
+ assertEquals(DispatchTuning.DispatchPolicy.BEST_OF_RANDOM_2, dispatch.getDispatchPolicy());
assertEquals(18.3, dispatch.getTopkProbability(), 0.0);
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java
index 2f7ed875226..0d7450aafd5 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/FleetControllerClusterTest.java
@@ -3,7 +3,6 @@ package com.yahoo.vespa.model.content;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.deploy.TestProperties;
-import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.vespa.config.content.FleetcontrollerConfig;
import com.yahoo.vespa.model.test.utils.ApplicationPackageUtils;
import com.yahoo.vespa.model.test.utils.VespaModelCreatorWithMockPkg;
@@ -16,7 +15,6 @@ public class FleetControllerClusterTest {
private FleetcontrollerConfig parse(String xml, TestProperties props) {
var deployStateBuilder = new DeployState.Builder().properties(props);
- props.allowMoreThanOneContentGroupDown(new ClusterSpec.Id("default"));
var mockPkg = new VespaModelCreatorWithMockPkg(null, xml, ApplicationPackageUtils.generateSchemas("type1"));
var model = mockPkg.create(deployStateBuilder);
var builder = new FleetcontrollerConfig.Builder();
@@ -48,7 +46,7 @@ public class FleetControllerClusterTest {
</cluster-controller>
</tuning>
</content>""",
- new TestProperties().setAllowMoreThanOneContentGroupDown(true));
+ new TestProperties());
assertEquals(13 * 1000, config.init_progress_time());
assertEquals(27 * 1000, config.storage_transition_time());
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java
index 8a47eb030f3..ab147f22e8b 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedHierarchicDistributionTest.java
@@ -144,30 +144,6 @@ public class IndexedHierarchicDistributionTest {
"</tuning>");
}
- private String getRandomDispatchXml() {
- return joinLines("<tuning>",
- " <dispatch>",
- " <dispatch-policy>random</dispatch-policy>",
- " </dispatch>",
- "</tuning>");
- }
-
- private ContentCluster getOddGroupsCluster() throws Exception {
- String groupXml = joinLines(" <group>",
- " <distribution partitions='2|*'/>",
- " <group distribution-key='0' name='group0'>",
- " <node distribution-key='0' hostalias='mockhost'/>",
- " <node distribution-key='1' hostalias='mockhost'/>",
- " </group>",
- " <group distribution-key='1' name='group1'>",
- " <node distribution-key='3' hostalias='mockhost'/>",
- " <node distribution-key='4' hostalias='mockhost'/>",
- " <node distribution-key='5' hostalias='mockhost'/>",
- " </group>",
- " </group>", "");
- return createCluster(createClusterXml(groupXml, Optional.of(getRandomDispatchXml()), 4, 4));
- }
-
@Test
void requireThatWeMustHaveOnlyOneGroupLevel() {
try {
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java
index 540f625cf2b..2404c6399eb 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/StorageClusterTest.java
@@ -9,7 +9,6 @@ import com.yahoo.config.model.test.MockApplicationPackage;
import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provisioning.FlavorsConfig;
import com.yahoo.vespa.config.content.core.StorCommunicationmanagerConfig;
-import com.yahoo.vespa.config.content.core.StorIntegritycheckerConfig;
import com.yahoo.vespa.config.content.core.StorVisitorConfig;
import com.yahoo.vespa.config.content.StorFilestorConfig;
import com.yahoo.vespa.config.content.core.StorServerConfig;
@@ -327,15 +326,6 @@ public class StorageClusterTest {
}
@Test
- void integrity_checker_explicitly_disabled_when_not_running_with_vds_provider() {
- StorIntegritycheckerConfig.Builder builder = new StorIntegritycheckerConfig.Builder();
- parse(cluster("bees", "")).getConfig(builder);
- StorIntegritycheckerConfig config = new StorIntegritycheckerConfig(builder);
- // '-' --> don't run on the given week day
- assertEquals("-------", config.weeklycycle());
- }
-
- @Test
void testCapacity() {
String xml = joinLines(
"<cluster id=\"storage\">",
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java
index a6ea6cb8132..0800f26d6e8 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/cluster/ClusterTest.java
@@ -72,7 +72,7 @@ public class ClusterTest {
"",
joinLines(
"<max-hits-per-partition>77</max-hits-per-partition>",
- "<dispatch-policy>round-robin</dispatch-policy>",
+ "<dispatch-policy>best-of-random-2</dispatch-policy>",
"<min-active-docs-coverage>93</min-active-docs-coverage>",
"<top-k-probability>0.777</top-k-probability>"),
false);
@@ -81,7 +81,7 @@ public class ClusterTest {
DispatchConfig config = new DispatchConfig(builder);
assertEquals(3, config.redundancy());
assertEquals(93.0, config.minActivedocsPercentage(), DELTA);
- assertEquals(DispatchConfig.DistributionPolicy.ROUNDROBIN, config.distributionPolicy());
+ assertEquals(DispatchConfig.DistributionPolicy.BEST_OF_RANDOM_2, config.distributionPolicy());
assertEquals(77, config.maxHitsPerNode());
assertEquals(0.777, config.topKProbability(), DELTA);
}
diff --git a/config-provisioning/pom.xml b/config-provisioning/pom.xml
index e405f1192b1..9b10fedd79b 100644
--- a/config-provisioning/pom.xml
+++ b/config-provisioning/pom.xml
@@ -64,11 +64,7 @@
</exclusion>
<exclusion>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-core</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/Deployer.java b/config-provisioning/src/main/java/com/yahoo/config/provision/Deployer.java
index f184519e928..5a077d18e89 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/Deployer.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/Deployer.java
@@ -57,6 +57,12 @@ public interface Deployer {
/** Returns the time the active session was activated, or empty if there is no active session */
Optional<Instant> activationTime(ApplicationId application);
+ /** Returns the time of last deployed session for this application or empty if there are no deployments */
+ Optional<Instant> deployTime(ApplicationId application);
+
+ /** Returns whether the application has reindexing ready to go, which was readied after the given instant. */
+ boolean readiedReindexingAfter(ApplicationId application, Instant instant);
+
/** Whether the deployer is bootstrapping, some users of the deployer will want to hold off with deployments in that case. */
default boolean bootstrapping() { return false; }
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/SystemName.java b/config-provisioning/src/main/java/com/yahoo/config/provision/SystemName.java
index e2349f6f63f..c6eecf1e705 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/SystemName.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/SystemName.java
@@ -75,4 +75,6 @@ public enum SystemName {
return Stream.of(values()).filter(predicate).collect(Collectors.toUnmodifiableSet());
}
+ public static Set<SystemName> hostedVespa() { return EnumSet.of(main, cd, Public, PublicCd); }
+
}
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/zone/AuthMethod.java b/config-provisioning/src/main/java/com/yahoo/config/provision/zone/AuthMethod.java
new file mode 100644
index 00000000000..88b8a05c4c6
--- /dev/null
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/zone/AuthMethod.java
@@ -0,0 +1,14 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.config.provision.zone;
+
+/**
+ * An endpoint's authentication method.
+ *
+ * @author mpolden
+ */
+public enum AuthMethod {
+
+ mtls,
+ token,
+
+}
diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainer.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainer.java
index 627a15aab65..c4926fd0250 100644
--- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainer.java
+++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainer.java
@@ -3,24 +3,29 @@ package com.yahoo.vespa.config.proxy.filedistribution;
import com.yahoo.concurrent.DaemonThreadFactory;
import com.yahoo.io.IOUtils;
+import com.yahoo.vespa.config.util.ConfigUtils;
import com.yahoo.vespa.filedistribution.FileDownloader;
+
import java.io.File;
import java.io.IOException;
import java.io.UncheckedIOException;
+import java.nio.file.Path;
import java.nio.file.attribute.BasicFileAttributes;
import java.time.Duration;
import java.time.Instant;
-import java.util.Arrays;
+import java.util.Comparator;
import java.util.HashSet;
+import java.util.List;
+import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
-import java.util.stream.Collectors;
import static java.nio.file.Files.readAttributes;
+import static java.util.logging.Level.INFO;
/**
* Deletes file references and url downloads that have not been used for some time.
@@ -32,30 +37,46 @@ import static java.nio.file.Files.readAttributes;
class FileReferencesAndDownloadsMaintainer implements Runnable {
private static final Logger log = Logger.getLogger(FileReferencesAndDownloadsMaintainer.class.getName());
- private static final File defaultUrlDownloadDir = UrlDownloadRpcServer.downloadDir;
+ private static final File defaultUrlDownloadDir = UrlDownloadRpcServer.defaultDownloadDirectory;
private static final File defaultFileReferencesDownloadDir = FileDownloader.defaultDownloadDirectory;
private static final Duration defaultDurationToKeepFiles = Duration.ofDays(30);
+ private static final int defaultOutdatedFilesToKeep = 20;
private static final Duration interval = Duration.ofMinutes(1);
- private final ScheduledExecutorService executor =
- new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("file references and downloads cleanup"));
+ private final Optional<ScheduledExecutorService> executor;
private final File urlDownloadDir;
private final File fileReferencesDownloadDir;
private final Duration durationToKeepFiles;
+ private final int outDatedFilesToKeep;
FileReferencesAndDownloadsMaintainer() {
- this(defaultFileReferencesDownloadDir, defaultUrlDownloadDir, keepFileReferencesDuration());
+ this(defaultFileReferencesDownloadDir, defaultUrlDownloadDir, keepFileReferencesDuration(),
+ outDatedFilesToKeep(), configServers());
}
- FileReferencesAndDownloadsMaintainer(File fileReferencesDownloadDir, File urlDownloadDir, Duration durationToKeepFiles) {
+ FileReferencesAndDownloadsMaintainer(File fileReferencesDownloadDir,
+ File urlDownloadDir,
+ Duration durationToKeepFiles,
+ int outdatedFilesToKeep,
+ List<String> configServers) {
this.fileReferencesDownloadDir = fileReferencesDownloadDir;
this.urlDownloadDir = urlDownloadDir;
this.durationToKeepFiles = durationToKeepFiles;
- executor.scheduleAtFixedRate(this, interval.toSeconds(), interval.toSeconds(), TimeUnit.SECONDS);
+ this.outDatedFilesToKeep = outdatedFilesToKeep;
+ // Do not run on config servers
+ if (configServers.contains(ConfigUtils.getCanonicalHostName())) {
+ log.log(INFO, "Not running maintainer, since this is on a config server host");
+ executor = Optional.empty();
+ } else {
+ executor = Optional.of(new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("file references and downloads cleanup")));
+ executor.get().scheduleAtFixedRate(this, interval.toSeconds(), interval.toSeconds(), TimeUnit.SECONDS);
+ }
}
@Override
public void run() {
+ if (executor.isEmpty()) return;
+
try {
deleteUnusedFiles(fileReferencesDownloadDir);
deleteUnusedFiles(urlDownloadDir);
@@ -65,42 +86,62 @@ class FileReferencesAndDownloadsMaintainer implements Runnable {
}
public void close() {
- executor.shutdownNow();
- try {
- if ( ! executor.awaitTermination(10, TimeUnit.SECONDS))
- throw new RuntimeException("Unable to shutdown " + executor + " before timeout");
- } catch (InterruptedException e) {
- throw new RuntimeException(e);
- }
+ executor.ifPresent(ex -> {
+ ex.shutdownNow();
+ try {
+ if (! ex.awaitTermination(10, TimeUnit.SECONDS))
+ throw new RuntimeException("Unable to shutdown " + executor + " before timeout");
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ });
}
private void deleteUnusedFiles(File directory) {
- Instant deleteNotUsedSinceInstant = Instant.now().minus(durationToKeepFiles);
- Set<String> filesOnDisk = new HashSet<>();
+
File[] files = directory.listFiles();
- if (files != null)
- filesOnDisk.addAll(Arrays.stream(files).map(File::getName).collect(Collectors.toSet()));
- log.log(Level.FINE, () -> "Files on disk (in " + directory + "): " + filesOnDisk);
+ if (files == null) return;
+
+ List<File> filesToDelete = filesThatCanBeDeleted(files);
+ filesToDelete.forEach(fileReference -> {
+ if (IOUtils.recursiveDeleteDir(fileReference))
+ log.log(Level.FINE, "Deleted " + fileReference.getAbsolutePath());
+ else
+ log.log(Level.WARNING, "Could not delete " + fileReference.getAbsolutePath());
+ });
+ }
- Set<String> filesToDelete = filesOnDisk
+ private List<File> filesThatCanBeDeleted(File[] files) {
+ Instant deleteNotUsedSinceInstant = Instant.now().minus(durationToKeepFiles);
+
+ Set<File> filesOnDisk = new HashSet<>(List.of(files));
+ log.log(Level.FINE, () -> "Files on disk: " + filesOnDisk);
+ int deleteCount = Math.max(0, filesOnDisk.size() - outDatedFilesToKeep);
+ var canBeDeleted = filesOnDisk
.stream()
- .filter(fileReference -> isFileLastModifiedBefore(new File(directory, fileReference), deleteNotUsedSinceInstant))
- .collect(Collectors.toSet());
- if (filesToDelete.size() > 0) {
- log.log(Level.INFO, "Files that can be deleted in " + directory + " (not used since " + deleteNotUsedSinceInstant + "): " + filesToDelete);
- filesToDelete.forEach(fileReference -> {
- File file = new File(directory, fileReference);
- if (!IOUtils.recursiveDeleteDir(file))
- log.log(Level.WARNING, "Could not delete " + file.getAbsolutePath());
- });
- }
+ .peek(file -> log.log(Level.FINE, () -> file + ":" + fileLastModifiedTime(file.toPath())))
+ .filter(fileReference -> isFileLastModifiedBefore(fileReference, deleteNotUsedSinceInstant))
+ .sorted(Comparator.comparing(fileReference -> fileLastModifiedTime(fileReference.toPath())))
+ .toList();
+
+ // Make sure we keep some files
+ canBeDeleted = canBeDeleted.subList(0, Math.min(canBeDeleted.size(), deleteCount));
+ if (canBeDeleted.size() > 0)
+ log.log(INFO, "Files that can be deleted (not accessed since " + deleteNotUsedSinceInstant +
+ ", will also keep " + outDatedFilesToKeep +
+ " no matter when last accessed): " + canBeDeleted);
+
+ return canBeDeleted;
}
private boolean isFileLastModifiedBefore(File fileReference, Instant instant) {
- BasicFileAttributes fileAttributes;
+ return fileLastModifiedTime(fileReference.toPath()).isBefore(instant);
+ }
+
+ private static Instant fileLastModifiedTime(Path fileReference) {
try {
- fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class);
- return fileAttributes.lastModifiedTime().toInstant().isBefore(instant);
+ BasicFileAttributes fileAttributes = readAttributes(fileReference, BasicFileAttributes.class);
+ return fileAttributes.lastModifiedTime().toInstant();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
@@ -114,4 +155,21 @@ class FileReferencesAndDownloadsMaintainer implements Runnable {
return defaultDurationToKeepFiles;
}
+ private static int outDatedFilesToKeep() {
+ String env = System.getenv("VESPA_KEEP_FILE_REFERENCES_COUNT");
+ if (env != null && !env.isEmpty())
+ return Integer.parseInt(env);
+ else
+ return defaultOutdatedFilesToKeep;
+ }
+
+ private static List<String> configServers() {
+ String env = System.getenv("VESPA_CONFIGSERVERS");
+ if (env == null || env.isEmpty())
+ return List.of(ConfigUtils.getCanonicalHostName());
+ else {
+ return List.of(env.split(","));
+ }
+ }
+
}
diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/UrlDownloadRpcServer.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/UrlDownloadRpcServer.java
index 9ba3663f883..35c5bb14755 100644
--- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/UrlDownloadRpcServer.java
+++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/UrlDownloadRpcServer.java
@@ -11,12 +11,8 @@ import com.yahoo.text.Utf8;
import com.yahoo.vespa.defaults.Defaults;
import net.jpountz.xxhash.XXHashFactory;
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
import java.io.File;
import java.io.FileOutputStream;
-import java.io.FileReader;
-import java.io.FileWriter;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.URL;
@@ -25,7 +21,6 @@ import java.nio.channels.Channels;
import java.nio.channels.ReadableByteChannel;
import java.nio.file.Files;
import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -33,6 +28,8 @@ import java.util.logging.Logger;
import static com.yahoo.vespa.config.UrlDownloader.DOES_NOT_EXIST;
import static com.yahoo.vespa.config.UrlDownloader.HTTP_ERROR;
import static com.yahoo.vespa.config.UrlDownloader.INTERNAL_ERROR;
+import static java.lang.Runtime.getRuntime;
+import static java.util.concurrent.Executors.newFixedThreadPool;
/**
* An RPC server that handles URL download requests.
@@ -40,16 +37,17 @@ import static com.yahoo.vespa.config.UrlDownloader.INTERNAL_ERROR;
* @author lesters
*/
class UrlDownloadRpcServer {
- private final static Logger log = Logger.getLogger(UrlDownloadRpcServer.class.getName());
+ private static final Logger log = Logger.getLogger(UrlDownloadRpcServer.class.getName());
private static final String CONTENTS_FILE_NAME = "contents";
- private static final String LAST_MODIFIED_FILE_NAME = "lastmodified";
+ static final File defaultDownloadDirectory = new File(Defaults.getDefaults().underVespaHome("var/db/vespa/download"));
- static final File downloadDir = new File(Defaults.getDefaults().underVespaHome("var/db/vespa/download"));
- private final ExecutorService rpcDownloadExecutor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()),
- new DaemonThreadFactory("Rpc URL download executor"));
+ private final File rootDownloadDir;
+ private final ExecutorService executor = newFixedThreadPool(Math.max(8, getRuntime().availableProcessors()),
+ new DaemonThreadFactory("Rpc URL download executor"));
UrlDownloadRpcServer(Supervisor supervisor) {
+ this.rootDownloadDir = defaultDownloadDirectory;
supervisor.addMethod(new Method("url.waitFor", "s", "s", this::download)
.requireCapabilities(Capability.CONFIGPROXY__FILEDISTRIBUTION_API)
.methodDesc("get path to url download")
@@ -58,9 +56,9 @@ class UrlDownloadRpcServer {
}
void close() {
- rpcDownloadExecutor.shutdownNow();
+ executor.shutdownNow();
try {
- rpcDownloadExecutor.awaitTermination(10, TimeUnit.SECONDS);
+ executor.awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
@@ -68,33 +66,31 @@ class UrlDownloadRpcServer {
private void download(Request req) {
req.detach();
- rpcDownloadExecutor.execute(() -> downloadFile(req));
+ executor.execute(() -> downloadFile(req));
}
private void downloadFile(Request req) {
String url = req.parameters().get(0).asString();
- File downloadDir = new File(UrlDownloadRpcServer.downloadDir, urlToDirName(url));
+ File downloadDir = new File(rootDownloadDir, urlToDirName(url));
+ if (alreadyDownloaded(downloadDir)) {
+ log.log(Level.INFO, "URL '" + url + "' already downloaded");
+ req.returnValues().add(new StringValue(new File(downloadDir, CONTENTS_FILE_NAME).getAbsolutePath()));
+ req.returnRequest();
+ return;
+ }
try {
URL website = new URL(url);
HttpURLConnection connection = (HttpURLConnection) website.openConnection();
- setIfModifiedSince(connection, downloadDir); // don't download if we already have the file
-
if (connection.getResponseCode() == 200) {
log.log(Level.INFO, "Downloading URL '" + url + "'");
downloadFile(req, connection, downloadDir);
-
- } else if (connection.getResponseCode() == 304) {
- log.log(Level.INFO, "URL '" + url + "' already downloaded (server response: 304)");
- req.returnValues().add(new StringValue(new File(downloadDir, CONTENTS_FILE_NAME).getAbsolutePath()));
-
} else {
log.log(Level.SEVERE, "Download of URL '" + url + "' got server response: " + connection.getResponseCode());
req.setError(HTTP_ERROR, String.valueOf(connection.getResponseCode()));
}
-
} catch (Throwable e) {
- log.log(Level.SEVERE, "Download of URL '" + url + "' got exception: " + e.getMessage());
+ log.log(Level.SEVERE, "Download of URL '" + url + "' failed, got exception: " + e.getMessage());
req.setError(INTERNAL_ERROR, "Download of URL '" + url + "' internal error: " + e.getMessage());
}
req.returnRequest();
@@ -110,12 +106,11 @@ class UrlDownloadRpcServer {
fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE);
if (contentsPath.exists() && contentsPath.length() > 0) {
- writeLastModifiedTimestamp(downloadDir, connection.getLastModified());
new RequestTracker().trackRequest(downloadDir);
req.returnValues().add(new StringValue(contentsPath.getAbsolutePath()));
log.log(Level.FINE, () -> "URL '" + url + "' available at " + contentsPath);
log.log(Level.INFO, String.format("Download of URL '%s' done in %.3f seconds",
- url, (System.currentTimeMillis() -start) / 1000.0));
+ url, (System.currentTimeMillis() - start) / 1000.0));
} else {
log.log(Level.SEVERE, "Downloaded URL '" + url + "' not found, returning error");
req.setError(DOES_NOT_EXIST, "Downloaded '" + url + "' not found");
@@ -128,32 +123,9 @@ class UrlDownloadRpcServer {
return String.valueOf(XXHashFactory.fastestJavaInstance().hash64().hash(ByteBuffer.wrap(Utf8.toBytes(uri)), 0));
}
- private static void setIfModifiedSince(HttpURLConnection connection, File downloadDir) throws IOException {
+ private static boolean alreadyDownloaded(File downloadDir) {
File contents = new File(downloadDir, CONTENTS_FILE_NAME);
- if (contents.exists() && contents.length() > 0) {
- long lastModified = readLastModifiedTimestamp(downloadDir);
- if (lastModified > 0) {
- connection.setIfModifiedSince(lastModified);
- }
- }
- }
-
- private static long readLastModifiedTimestamp(File downloadDir) throws IOException {
- File lastModified = new File(downloadDir, LAST_MODIFIED_FILE_NAME);
- if (lastModified.exists() && lastModified.length() > 0) {
- try (BufferedReader br = new BufferedReader(new FileReader(lastModified))) {
- String timestamp = br.readLine();
- return Long.parseLong(timestamp);
- }
- }
- return 0;
- }
-
- private static void writeLastModifiedTimestamp(File downloadDir, long timestamp) throws IOException {
- File lastModified = new File(downloadDir, LAST_MODIFIED_FILE_NAME);
- try (BufferedWriter lastModifiedWriter = new BufferedWriter(new FileWriter(lastModified.getAbsolutePath()))) {
- lastModifiedWriter.write(Long.toString(timestamp));
- }
+ return contents.exists() && contents.length() > 0;
}
}
diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainerTest.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainerTest.java
index fad021c0119..c41305b4dc8 100644
--- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainerTest.java
+++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainerTest.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.config.proxy.filedistribution;
import com.yahoo.io.IOUtils;
+import com.yahoo.vespa.config.util.ConfigUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
@@ -10,6 +11,9 @@ import java.io.File;
import java.io.IOException;
import java.time.Duration;
import java.time.Instant;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.IntStream;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -19,9 +23,12 @@ import static org.junit.jupiter.api.Assertions.assertNotNull;
*/
public class FileReferencesAndDownloadsMaintainerTest {
+ private static final Duration keepDuration = Duration.ofMinutes(1);
+ private static final int outDatedFilesToKeep = 9;
+
private File cachedFileReferences;
private File cachedDownloads;
- private FileReferencesAndDownloadsMaintainer cachedFilesMaintainer;
+ private FileReferencesAndDownloadsMaintainer maintainer;
@TempDir
public File tempFolder;
@@ -30,22 +37,70 @@ public class FileReferencesAndDownloadsMaintainerTest {
public void setup() throws IOException {
cachedFileReferences = newFolder(tempFolder, "cachedFileReferences");
cachedDownloads = newFolder(tempFolder, "cachedDownloads");
- cachedFilesMaintainer = new FileReferencesAndDownloadsMaintainer(cachedFileReferences, cachedDownloads, Duration.ofMinutes(1));
}
@Test
- void require_old_files_to_be_deleted() throws IOException {
+ void require_old_files_to_be_deleted() {
+ maintainer = new FileReferencesAndDownloadsMaintainer(cachedFileReferences, cachedDownloads, keepDuration, outDatedFilesToKeep,
+ List.of("host1"));
runMaintainerAndAssertFiles(0, 0);
- File fileReference = writeFile(cachedFileReferences, "fileReference");
- File download = writeFile(cachedDownloads, "download");
- runMaintainerAndAssertFiles(1, 1);
+ var fileReferences = writeFiles(20);
+ var downloads = writeDownloads(21);
+ runMaintainerAndAssertFiles(20, 21);
+
+ updateLastModifiedTimestamp(0, 5, fileReferences, downloads);
+ runMaintainerAndAssertFiles(15, 16);
- updateLastModifiedTimeStamp(fileReference, Instant.now().minus(Duration.ofMinutes(10)));
- runMaintainerAndAssertFiles(0, 1);
+ updateLastModifiedTimestamp(6, 20, fileReferences, downloads);
+ // Should keep at least outDatedFilesToKeep file references and downloads even if there are more that are old
+ runMaintainerAndAssertFiles(outDatedFilesToKeep, outDatedFilesToKeep);
+ }
- updateLastModifiedTimeStamp(download, Instant.now().minus(Duration.ofMinutes(10)));
+ @Test
+ void require_no_files_deleted_when_running_on_config_server_host() {
+ maintainer = new FileReferencesAndDownloadsMaintainer(cachedFileReferences, cachedDownloads, keepDuration,
+ outDatedFilesToKeep, List.of(ConfigUtils.getCanonicalHostName()));
runMaintainerAndAssertFiles(0, 0);
+
+ var fileReferences = writeFiles(10);
+ var downloads = writeDownloads(10);
+ runMaintainerAndAssertFiles(10, 10);
+
+ updateLastModifiedTimestamp(0, 10, fileReferences, downloads);
+ runMaintainerAndAssertFiles(10, 10);
+ }
+
+ private void updateLastModifiedTimestamp(int startInclusive, int endExclusive, List<File> fileReferences, List<File> downloads) {
+ IntStream.range(startInclusive, endExclusive).forEach(i -> {
+ Instant instant = Instant.now().minus(keepDuration.plus(Duration.ofMinutes(1)).minus(Duration.ofSeconds(i)));
+ updateLastModifiedTimeStamp(fileReferences.get(i), instant);
+ updateLastModifiedTimeStamp(downloads.get(i), instant);
+ });
+ }
+
+ private List<File> writeFiles(int count) {
+ List<File> files = new ArrayList<>();
+ IntStream.range(0, count).forEach(i -> {
+ try {
+ files.add(writeFile(cachedFileReferences, "fileReference" + i));
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
+ return files;
+ }
+
+ private List<File> writeDownloads(int count) {
+ List<File> files = new ArrayList<>();
+ IntStream.range(0, count).forEach(i -> {
+ try {
+ files.add(writeFile(cachedDownloads, "download" + i));
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
+ return files;
}
private void updateLastModifiedTimeStamp(File file, Instant instant) {
@@ -55,7 +110,7 @@ public class FileReferencesAndDownloadsMaintainerTest {
}
private void runMaintainerAndAssertFiles(int fileReferenceCount, int downloadCount) {
- cachedFilesMaintainer.run();
+ maintainer.run();
File[] fileReferences = cachedFileReferences.listFiles();
assertNotNull(fileReferences);
assertEquals(fileReferenceCount, fileReferences.length);
diff --git a/config/pom.xml b/config/pom.xml
index ae80d00923b..9f3ec28b54b 100755
--- a/config/pom.xml
+++ b/config/pom.xml
@@ -76,7 +76,7 @@
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-core</artifactId>
+ <artifactId>hamcrest</artifactId>
<scope>test</scope>
</dependency>
<dependency>
diff --git a/configgen/README b/configgen/README
index cc122f6deb6..d973d206beb 100644
--- a/configgen/README
+++ b/configgen/README
@@ -1,9 +1,9 @@
Vespa Config Generation
=======================
-The configgen module is used to generate config-classes from .def files.
+The configgen module is used to generate config classes from .def files.
-Userguide
+User guide
---------
Usually you will want to use this module through the config-class-plugin
@@ -13,4 +13,3 @@ This module can be used stand-alone by building the jar file (mvn package)
and then calling MakeConfig from that file:
java -Dconfig.spec=<def-file_1,def-file_2,...> -Dconfig.dest=<dest-dir> -jar target/configgen.jar
-
diff --git a/configgen/src/main/java/com/yahoo/config/codegen/CNode.java b/configgen/src/main/java/com/yahoo/config/codegen/CNode.java
index 90bd8ded822..95074e873a6 100644
--- a/configgen/src/main/java/com/yahoo/config/codegen/CNode.java
+++ b/configgen/src/main/java/com/yahoo/config/codegen/CNode.java
@@ -19,7 +19,6 @@ public abstract class CNode {
// TODO: remove! Only set for the root node, and root.getName() returns the same thing!
String defName = null;
- String defVersion = "";
String defNamespace = null;
String defPackage = null;
String defMd5 = "MISSING MD5";
@@ -69,10 +68,6 @@ public abstract class CNode {
defMd5 = md5;
}
- public String getVersion() {
- return defVersion;
- }
-
public String getNamespace() {
if (defNamespace != null) return defNamespace;
if (defPackage != null) return defPackage;
@@ -163,7 +158,6 @@ public abstract class CNode {
"namespace='" + defNamespace + '\'' +
", package='" + defPackage + '\'' +
", name='" + name + '\'' +
- ", version='" + defVersion + '\'' +
'}';
}
diff --git a/configgen/src/main/java/com/yahoo/config/codegen/ConfigGenerator.java b/configgen/src/main/java/com/yahoo/config/codegen/ConfigGenerator.java
index 5ffe18b1699..cb10ffdc2be 100644
--- a/configgen/src/main/java/com/yahoo/config/codegen/ConfigGenerator.java
+++ b/configgen/src/main/java/com/yahoo/config/codegen/ConfigGenerator.java
@@ -296,7 +296,7 @@ public class ConfigGenerator {
List<String> accessors = new LinkedList<>();
for (CNode child : children) {
String accessor = getAccessorCode(child);
- if (accessor.isEmpty() == false) {
+ if (! accessor.isEmpty()) {
accessors.add(accessor);
}
}
diff --git a/configgen/src/main/java/com/yahoo/config/codegen/CppClassBuilder.java b/configgen/src/main/java/com/yahoo/config/codegen/CppClassBuilder.java
index ca38a8fda47..6490c78a150 100644
--- a/configgen/src/main/java/com/yahoo/config/codegen/CppClassBuilder.java
+++ b/configgen/src/main/java/com/yahoo/config/codegen/CppClassBuilder.java
@@ -424,11 +424,9 @@ public class CppClassBuilder implements ClassBuilder {
static String getTypeName(CNode node, boolean includeArray) {
String type = null;
- if (node instanceof InnerCNode) {
- InnerCNode innerNode = (InnerCNode) node;
+ if (node instanceof InnerCNode innerNode) {
type = getTypeName(innerNode.getName());
- } else if (node instanceof LeafCNode) {
- LeafCNode leaf = (LeafCNode) node;
+ } else if (node instanceof LeafCNode leaf) {
if (leaf.getType().equals("bool")) {
type = "bool";
} else if (leaf.getType().equals("int")) {
@@ -471,7 +469,6 @@ public class CppClassBuilder implements ClassBuilder {
void writeStaticMemberDeclarations(Writer w, String indent) throws IOException {
w.write(""
+ indent + "static const vespalib::string CONFIG_DEF_MD5;\n"
- + indent + "static const vespalib::string CONFIG_DEF_VERSION;\n"
+ indent + "static const vespalib::string CONFIG_DEF_NAME;\n"
+ indent + "static const vespalib::string CONFIG_DEF_NAMESPACE;\n"
+ indent + "static const ::config::StringVector CONFIG_DEF_SCHEMA;\n"
@@ -620,7 +617,6 @@ public class CppClassBuilder implements ClassBuilder {
void writeStaticMemberDefinitions(Writer w, CNode root, NormalizedDefinition nd) throws IOException {
String typeName = getInternalClassName(root);
w.write("const vespalib::string " + typeName + "::CONFIG_DEF_MD5(\"" + root.defMd5 + "\");\n"
- + "const vespalib::string " + typeName + "::CONFIG_DEF_VERSION(\"" + root.defVersion + "\");\n"
+ "const vespalib::string " + typeName + "::CONFIG_DEF_NAME(\"" + root.defName + "\");\n"
+ "const vespalib::string " + typeName + "::CONFIG_DEF_NAMESPACE(\"" + root.getNamespace() + "\");\n"
+ "const int64_t " + typeName + "::CONFIG_DEF_SERIALIZE_VERSION(1);\n");
diff --git a/configgen/src/main/java/com/yahoo/config/codegen/DefLine.java b/configgen/src/main/java/com/yahoo/config/codegen/DefLine.java
index 753fad0d41a..385c7f1979e 100644
--- a/configgen/src/main/java/com/yahoo/config/codegen/DefLine.java
+++ b/configgen/src/main/java/com/yahoo/config/codegen/DefLine.java
@@ -30,10 +30,21 @@ public class DefLine {
private final static Pattern defaultNullPattern = Pattern.compile("^\\s*default\\s*=\\s*null");
+ private final boolean systemErrEnabled;
+
public DefLine(String line) {
+ this(line, false);
+ }
+
+ public DefLine(String line, boolean systemErrEnabled) {
+ this.systemErrEnabled = systemErrEnabled;
StringBuilder sb = new StringBuilder(line);
int parsed = parseNameType(sb);
sb.delete(0, parsed);
+ if (type.name.equals("file")) {
+ // Note: 'file' is used internally and also there is no support for 'path' in C++, so cannot be removed yet
+ printSystemErr("Warning: config type 'file' is deprecated, use 'path' instead");
+ }
if (type.name.equals("enum")) {
parsed = parseEnum(sb);
sb.delete(0, parsed);
@@ -157,7 +168,7 @@ public class DefLine {
}
enumString = enumString.replaceFirst("\\{\\s*", "");
enumString = enumString.replaceFirst("\\s*\\}", "");
- String result[] = enumPattern2.split(enumString);
+ String[] result = enumPattern2.split(enumString);
type.enumArray = new String[result.length];
for (int i = 0; i < result.length; i++) {
String s = result[i].trim();
@@ -272,5 +283,9 @@ public class DefLine {
}
}
+ private void printSystemErr(String s) {
+ if (systemErrEnabled) System.err.println(s);
+ }
+
}
diff --git a/configgen/src/main/java/com/yahoo/config/codegen/DefParser.java b/configgen/src/main/java/com/yahoo/config/codegen/DefParser.java
index 2be824658b4..eaf57c8eda8 100644
--- a/configgen/src/main/java/com/yahoo/config/codegen/DefParser.java
+++ b/configgen/src/main/java/com/yahoo/config/codegen/DefParser.java
@@ -150,7 +150,7 @@ public class DefParser {
}
// Only add lines that are not namespace or comment lines
nd.addNormalizedLine(line);
- DefLine defLine = new DefLine(line);
+ DefLine defLine = new DefLine(line, systemErrEnabled);
root.setLeaf(root.getName() + "." + defLine.getName(), defLine, comment);
comment = "";
}
@@ -204,11 +204,8 @@ public class DefParser {
*/
public static void dumpTree(CNode root, String indent) {
StringBuilder sb = new StringBuilder(indent + root.getName());
- if (root instanceof LeafCNode) {
- LeafCNode leaf = ((LeafCNode)root);
- if (leaf.getDefaultValue() != null) {
- sb.append(" = ").append(((LeafCNode)root).getDefaultValue().getValue());
- }
+ if (root instanceof LeafCNode leaf && leaf.getDefaultValue() != null) {
+ sb.append(" = ").append(((LeafCNode) root).getDefaultValue().getValue());
}
System.out.println(sb);
if (!root.getComment().isEmpty()) {
diff --git a/configgen/src/main/java/com/yahoo/config/codegen/InnerCNode.java b/configgen/src/main/java/com/yahoo/config/codegen/InnerCNode.java
index 295558a469c..bbe31ee9f5b 100644
--- a/configgen/src/main/java/com/yahoo/config/codegen/InnerCNode.java
+++ b/configgen/src/main/java/com/yahoo/config/codegen/InnerCNode.java
@@ -68,9 +68,7 @@ public class InnerCNode extends CNode {
if (newChild == null)
throw new IllegalArgumentException("Could not create " + type.name + " " + name);
}
- return children.containsKey(newChild.getName())
- ? children.get(newChild.getName())
- : newChild;
+ return children.getOrDefault(newChild.getName(), newChild);
}
/**
diff --git a/configgen/src/main/java/com/yahoo/config/codegen/LeafCNode.java b/configgen/src/main/java/com/yahoo/config/codegen/LeafCNode.java
index 1395c6814df..afd6acfbabf 100644
--- a/configgen/src/main/java/com/yahoo/config/codegen/LeafCNode.java
+++ b/configgen/src/main/java/com/yahoo/config/codegen/LeafCNode.java
@@ -17,20 +17,20 @@ public abstract class LeafCNode extends CNode {
public static LeafCNode newInstance(DefLine.Type type, InnerCNode parent, String name) {
try {
- switch (type.name) {
- case "int": return new IntegerLeaf(parent, name);
- case "long": return new LongLeaf(parent, name);
- case "double": return new DoubleLeaf(parent, name);
- case "bool": return new BooleanLeaf(parent, name);
- case "string": return new StringLeaf(parent, name);
- case "reference": return new ReferenceLeaf(parent, name);
- case "file": return new FileLeaf(parent, name);
- case "path": return new PathLeaf(parent, name);
- case "enum": return new EnumLeaf(parent, name, type.enumArray);
- case "url" : return new UrlLeaf(parent, name);
- case "model" : return new ModelLeaf(parent, name);
- default: return null;
- }
+ return switch (type.name) {
+ case "int" -> new IntegerLeaf(parent, name);
+ case "long" -> new LongLeaf(parent, name);
+ case "double" -> new DoubleLeaf(parent, name);
+ case "bool" -> new BooleanLeaf(parent, name);
+ case "string" -> new StringLeaf(parent, name);
+ case "reference" -> new ReferenceLeaf(parent, name);
+ case "file" -> new FileLeaf(parent, name);
+ case "path" -> new PathLeaf(parent, name);
+ case "enum" -> new EnumLeaf(parent, name, type.enumArray);
+ case "url" -> new UrlLeaf(parent, name);
+ case "model" -> new ModelLeaf(parent, name);
+ default -> null;
+ };
} catch (NumberFormatException e) {
return null;
}
diff --git a/configserver-flags/pom.xml b/configserver-flags/pom.xml
index e9c0c68685c..02824f2e6e3 100644
--- a/configserver-flags/pom.xml
+++ b/configserver-flags/pom.xml
@@ -91,11 +91,7 @@
</exclusion>
<exclusion>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-core</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
@@ -114,11 +110,6 @@
<artifactId>mockito-core</artifactId>
<scope>test</scope>
</dependency>
- <dependency>
- <groupId>org.assertj</groupId>
- <artifactId>assertj-core</artifactId>
- <scope>test</scope>
- </dependency>
</dependencies>
<build>
<plugins>
diff --git a/configserver-flags/src/test/java/com/yahoo/vespa/configserver/flags/http/FlagsHandlerTest.java b/configserver-flags/src/test/java/com/yahoo/vespa/configserver/flags/http/FlagsHandlerTest.java
index 2700ab114e9..3c4c99e0337 100644
--- a/configserver-flags/src/test/java/com/yahoo/vespa/configserver/flags/http/FlagsHandlerTest.java
+++ b/configserver-flags/src/test/java/com/yahoo/vespa/configserver/flags/http/FlagsHandlerTest.java
@@ -23,8 +23,8 @@ import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.Stream;
-import static org.assertj.core.api.AssertionsForInterfaceTypes.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* @author hakonhall
@@ -36,8 +36,6 @@ public class FlagsHandlerTest {
"id2", true, List.of("joe"), "2010-01-01", "2030-01-01", "desc2", "mod2",
FetchVector.Dimension.HOSTNAME, FetchVector.Dimension.APPLICATION_ID);
- private static final String FLAGS_V1_URL = "https://foo.com:4443/flags/v1";
-
private final FlagsDb flagsDb = new FlagsDbImpl(new MockCurator());
private final FlagsHandler handler = new FlagsHandler(FlagsHandler.testContext(), flagsDb);
@@ -71,14 +69,15 @@ public class FlagsHandlerTest {
void testData() {
// PUT flag with ID id1
verifySuccessfulRequest(Method.PUT, "/data/" + FLAG1.id(),
- "{\n" +
- " \"id\": \"id1\",\n" +
- " \"rules\": [\n" +
- " {\n" +
- " \"value\": true\n" +
- " }\n" +
- " ]\n" +
- "}",
+ """
+ {
+ "id": "id1",
+ "rules": [
+ {
+ "value": true
+ }
+ ]
+ }""",
"");
// GET on ID id1 should return the same as the put.
@@ -99,29 +98,31 @@ public class FlagsHandlerTest {
// PUT id2
verifySuccessfulRequest(Method.PUT, "/data/" + FLAG2.id(),
- "{\n" +
- " \"id\": \"id2\",\n" +
- " \"rules\": [\n" +
- " {\n" +
- " \"conditions\": [\n" +
- " {\n" +
- " \"type\": \"whitelist\",\n" +
- " \"dimension\": \"hostname\",\n" +
- " \"values\": [ \"host1\", \"host2\" ]\n" +
- " },\n" +
- " {\n" +
- " \"type\": \"blacklist\",\n" +
- " \"dimension\": \"application\",\n" +
- " \"values\": [ \"app1\", \"app2\" ]\n" +
- " }\n" +
- " ],\n" +
- " \"value\": true\n" +
- " }\n" +
- " ],\n" +
- " \"attributes\": {\n" +
- " \"zone\": \"zone1\"\n" +
- " }\n" +
- "}\n",
+ """
+ {
+ "id": "id2",
+ "rules": [
+ {
+ "conditions": [
+ {
+ "type": "whitelist",
+ "dimension": "hostname",
+ "values": [ "host1", "host2" ]
+ },
+ {
+ "type": "blacklist",
+ "dimension": "application",
+ "values": [ "app1", "app2" ]
+ }
+ ],
+ "value": true
+ }
+ ],
+ "attributes": {
+ "zone": "zone1"
+ }
+ }
+ """,
"");
// GET on id2 should now return what was put
@@ -135,14 +136,16 @@ public class FlagsHandlerTest {
// Putting (overriding) id1 should work silently
verifySuccessfulRequest(Method.PUT, "/data/" + FLAG1.id(),
- "{\n" +
- " \"id\": \"id1\",\n" +
- " \"rules\": [\n" +
- " {\n" +
- " \"value\": false\n" +
- " }\n" +
- " ]\n" +
- "}\n",
+ """
+ {
+ "id": "id1",
+ "rules": [
+ {
+ "value": false
+ }
+ ]
+ }
+ """,
"");
// Verify PUT
@@ -162,16 +165,16 @@ public class FlagsHandlerTest {
@Test
void testForcing() {
- assertThat(handle(Method.PUT, "/data/" + new FlagId("undef"), "", 400)).contains("There is no flag 'undef'");
+ assertTrue(handle(Method.PUT, "/data/" + new FlagId("undef"), "", 400).contains("There is no flag 'undef'"));
- assertThat(handle(Method.PUT, "/data/" + new FlagId("undef") + "?force=true", "", 400)).
- contains("No content to map due to end-of-input");
+ assertTrue(handle(Method.PUT, "/data/" + new FlagId("undef") + "?force=true", "", 400).
+ contains("No content to map due to end-of-input"));
- assertThat(handle(Method.PUT, "/data/" + FLAG1.id(), "{}", 400)).
- contains("Flag ID missing");
+ assertTrue(handle(Method.PUT, "/data/" + FLAG1.id(), "{}", 400).
+ contains("Flag ID missing"));
- assertThat(handle(Method.PUT, "/data/" + FLAG1.id(), "{\"id\": \"id1\",\"rules\": [{\"value\":\"string\"}]}", 400)).
- contains("Wrong type of JsonNode: STRING");
+ assertTrue(handle(Method.PUT, "/data/" + FLAG1.id(), "{\"id\": \"id1\",\"rules\": [{\"value\":\"string\"}]}", 400).
+ contains("Wrong type of JsonNode: STRING"));
assertEquals(handle(Method.PUT, "/data/" + FLAG1.id() + "?force=true", "{\"id\": \"id1\",\"rules\": [{\"value\":\"string\"}]}", 200),
"");
diff --git a/configserver/pom.xml b/configserver/pom.xml
index 7c94464864c..bfef0748989 100644
--- a/configserver/pom.xml
+++ b/configserver/pom.xml
@@ -86,7 +86,7 @@
<dependency>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-all</artifactId>
+ <artifactId>hamcrest</artifactId>
<scope>test</scope>
</dependency>
<dependency>
@@ -106,11 +106,6 @@
<scope>test</scope>
</dependency>
<dependency>
- <groupId>org.assertj</groupId>
- <artifactId>assertj-core</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
<groupId>com.yahoo.vespa</groupId>
<artifactId>airlift-zstd</artifactId>
<version>${project.version}</version>
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
index 3502ece9cb7..9533f04107d 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
@@ -43,8 +43,10 @@ import com.yahoo.transaction.Transaction;
import com.yahoo.vespa.applicationmodel.InfrastructureApplication;
import com.yahoo.vespa.config.server.application.Application;
import com.yahoo.vespa.config.server.application.ApplicationCuratorDatabase;
+import com.yahoo.vespa.config.server.application.ApplicationData;
import com.yahoo.vespa.config.server.application.ApplicationReindexing;
-import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.ApplicationReindexing.Status;
+import com.yahoo.vespa.config.server.application.ApplicationVersions;
import com.yahoo.vespa.config.server.application.ClusterReindexing;
import com.yahoo.vespa.config.server.application.ClusterReindexingStatusClient;
import com.yahoo.vespa.config.server.application.CompressedApplicationInputStream;
@@ -127,6 +129,7 @@ import static com.yahoo.vespa.config.server.tenant.TenantRepository.HOSTED_VESPA
import static com.yahoo.vespa.curator.Curator.CompletionWaiter;
import static com.yahoo.yolean.Exceptions.uncheck;
import static java.nio.file.Files.readAttributes;
+import static java.util.Comparator.naturalOrder;
/**
* The API for managing applications.
@@ -167,7 +170,6 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
ConfigserverConfig configserverConfig,
Orchestrator orchestrator,
TesterClient testerClient,
- Zone zone,
HealthCheckerProvider healthCheckers,
Metric metric,
SecretStore secretStore,
@@ -446,11 +448,39 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
public Optional<Instant> activationTime(ApplicationId application) {
Tenant tenant = tenantRepository.getTenant(application.tenant());
if (tenant == null) return Optional.empty();
+
Optional<Instant> activatedTime = getActiveSession(tenant, application).map(Session::getActivatedTime);
log.log(Level.FINEST, application + " last activated " + activatedTime.orElse(Instant.EPOCH));
return activatedTime;
}
+ @Override
+ public Optional<Instant> deployTime(ApplicationId application) {
+ Tenant tenant = tenantRepository.getTenant(application.tenant());
+ if (tenant == null) return Optional.empty();
+
+ // TODO: Fallback to empty instead if no deploy time (in Vespa 9)
+ Optional<Long> lastDeployedSession = tenant.getApplicationRepo().applicationData(application)
+ .flatMap(ApplicationData::lastDeployedSession);
+ if (lastDeployedSession.isEmpty()) return activationTime(application);
+
+ Instant createTime = getRemoteSession(tenant, lastDeployedSession.get()).getCreateTime();
+ log.log(Level.FINEST, application + " last deployed " + createTime);
+
+ return Optional.of(createTime);
+ }
+
+ @Override
+ public boolean readiedReindexingAfter(ApplicationId id, Instant instant) {
+ Tenant tenant = tenantRepository.getTenant(id.tenant());
+ if (tenant == null) return false;
+
+ return tenant.getApplicationRepo().database().readReindexingStatus(id)
+ .flatMap(ApplicationReindexing::lastReadiedAt)
+ .map(readiedAt -> readiedAt.isAfter(instant))
+ .orElse(false);
+ }
+
public ApplicationId activate(Tenant tenant,
long sessionId,
TimeoutBudget timeoutBudget,
@@ -659,10 +689,10 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
Tenant tenant = getTenant(applicationId);
if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found");
- Optional<ApplicationSet> activeApplicationSet = tenant.getSessionRepository().getActiveApplicationSet(applicationId);
- if (activeApplicationSet.isEmpty()) throw new NotFoundException("Unknown application id '" + applicationId + "'");
+ Optional<ApplicationVersions> activeApplicationVersions = tenant.getSessionRepository().activeApplicationVersions(applicationId);
+ if (activeApplicationVersions.isEmpty()) throw new NotFoundException("Unknown application id '" + applicationId + "'");
- return activeApplicationSet.get().getForVersionOrLatest(version, clock.instant());
+ return activeApplicationVersions.get().getForVersionOrLatest(version, clock.instant());
}
// Will return Optional.empty() if getting application fails (instead of throwing an exception)
@@ -698,19 +728,19 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
Optional<String> applicationPackage = Optional.empty();
Optional<Session> session = getActiveSession(applicationId);
if (session.isPresent()) {
- FileReference applicationPackageReference = session.get().getApplicationPackageReference();
+ Optional<FileReference> applicationPackageReference = session.get().getApplicationPackageReference();
File downloadDirectory = new File(Defaults.getDefaults().underVespaHome(configserverConfig().fileReferencesDir()));
- if (applicationPackageReference != null && ! fileReferenceExistsOnDisk(downloadDirectory, applicationPackageReference))
- applicationPackage = Optional.of(applicationPackageReference.value());
+ if (applicationPackageReference.isPresent() && ! fileReferenceExistsOnDisk(downloadDirectory, applicationPackageReference.get()))
+ applicationPackage = Optional.of(applicationPackageReference.get().value());
}
return applicationPackage;
}
public List<Version> getAllVersions(ApplicationId applicationId) {
- Optional<ApplicationSet> applicationSet = getActiveApplicationSet(applicationId);
+ Optional<ApplicationVersions> applicationSet = getActiveApplicationSet(applicationId);
return applicationSet.isEmpty()
? List.of()
- : applicationSet.get().getAllVersions(applicationId);
+ : applicationSet.get().versions(applicationId);
}
public HttpResponse validateSecretStore(ApplicationId applicationId, SystemName systemName, Slime slime) {
@@ -1019,8 +1049,8 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
return session;
}
- public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) {
- return getTenant(appId).getSessionRepository().getActiveApplicationSet(appId);
+ public Optional<ApplicationVersions> getActiveApplicationSet(ApplicationId appId) {
+ return getTenant(appId).getSessionRepository().activeApplicationVersions(appId);
}
public Application getActiveApplication(ApplicationId applicationId) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigActivationListener.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigActivationListener.java
index e52089f5400..94ff60a29c1 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigActivationListener.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ConfigActivationListener.java
@@ -2,7 +2,7 @@
package com.yahoo.vespa.config.server;
import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.ApplicationVersions;
/**
* A ConfigActivationListener is used to signal to a component that config has been
@@ -20,7 +20,7 @@ public interface ConfigActivationListener {
*
* Must be thread-safe.
*/
- void configActivated(ApplicationSet application);
+ void configActivated(ApplicationVersions application);
/**
* Application has been removed.
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelManager.java b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelManager.java
index 16423889d01..aee61fa9a44 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelManager.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelManager.java
@@ -11,7 +11,7 @@ import com.yahoo.config.model.api.SuperModelProvider;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.config.GenerationCounter;
-import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.ApplicationVersions;
import com.yahoo.vespa.config.server.model.SuperModelConfigProvider;
import com.yahoo.vespa.flags.FlagSource;
@@ -89,10 +89,10 @@ public class SuperModelManager implements SuperModelProvider {
}
}
- public void configActivated(ApplicationSet applicationSet) {
+ public void configActivated(ApplicationVersions applicationVersions) {
synchronized (monitor) {
// TODO: Should supermodel care about multiple versions?
- ApplicationInfo applicationInfo = applicationSet
+ ApplicationInfo applicationInfo = applicationVersions
.getForVersionOrLatest(Optional.empty(), Instant.now())
.toApplicationInfo();
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelRequestHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelRequestHandler.java
index 93bb44e25d3..d43d898f8c3 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelRequestHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelRequestHandler.java
@@ -10,7 +10,7 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.vespa.config.ConfigKey;
import com.yahoo.vespa.config.GetConfigRequest;
import com.yahoo.vespa.config.protocol.ConfigResponse;
-import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.ApplicationVersions;
import com.yahoo.vespa.config.server.rpc.ConfigResponseFactory;
import java.util.Optional;
@@ -46,10 +46,10 @@ public class SuperModelRequestHandler implements RequestHandler {
* Signals that config has been activated for an {@link com.yahoo.vespa.config.server.application.Application}
* belonging to a tenant.
*
- * @param applicationSet The activated set of {@link com.yahoo.vespa.config.server.application.Application}.
+ * @param applicationVersions The activated set of {@link com.yahoo.vespa.config.server.application.Application}s.
*/
- public synchronized void activateConfig(ApplicationSet applicationSet) {
- superModelManager.configActivated(applicationSet);
+ public synchronized void activateConfig(ApplicationVersions applicationVersions) {
+ superModelManager.configActivated(applicationVersions);
updateHandler();
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabase.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabase.java
index d757421c23a..68b1339a0a9 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabase.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabase.java
@@ -23,10 +23,11 @@ import java.time.Duration;
import java.time.Instant;
import java.util.List;
import java.util.Optional;
+import java.util.OptionalLong;
import java.util.concurrent.ExecutorService;
import java.util.function.UnaryOperator;
-import java.util.stream.Collectors;
+import static com.yahoo.vespa.curator.transaction.CuratorOperations.setData;
import static java.util.stream.Collectors.toUnmodifiableMap;
/**
@@ -73,13 +74,19 @@ public class ApplicationCuratorDatabase {
/**
* Creates a node for the given application, marking its existence.
*/
- public void createApplication(ApplicationId id) {
+ public void createApplication(ApplicationId id, boolean writeAsJson) {
if ( ! id.tenant().equals(tenant))
throw new IllegalArgumentException("Cannot write application id '" + id + "' for tenant '" + tenant + "'");
+
try (Lock lock = lock(id)) {
if (curator.exists(applicationPath(id))) return;
- curator.create(applicationPath(id));
+ if (writeAsJson) {
+ var applicationData = new ApplicationData(id, OptionalLong.empty(), OptionalLong.empty());
+ curator.set(applicationPath(id), applicationData.toJson());
+ } else {
+ curator.create(applicationPath(id));
+ }
modifyReindexing(id, ApplicationReindexing.empty(), UnaryOperator.identity());
}
}
@@ -88,10 +95,34 @@ public class ApplicationCuratorDatabase {
* Returns a transaction which writes the given session id as the currently active for the given application.
*
* @param applicationId An {@link ApplicationId} that represents an active application.
- * @param sessionId Id of the session containing the application package for this id.
+ * @param sessionId session id belonging to the application package for this application id.
*/
- public Transaction createPutTransaction(ApplicationId applicationId, long sessionId) {
- return new CuratorTransaction(curator).add(CuratorOperations.setData(applicationPath(applicationId).getAbsolute(), Utf8.toAsciiBytes(sessionId)));
+ public Transaction createWriteActiveTransaction(Transaction transaction, ApplicationId applicationId, long sessionId, boolean writeAsJson) {
+ String path = applicationPath(applicationId).getAbsolute();
+ return transaction.add(writeAsJson
+ ? setData(path, new ApplicationData(applicationId, OptionalLong.of(sessionId), OptionalLong.of(sessionId)).toJson())
+ : setData(path, Utf8.toAsciiBytes(sessionId)));
+ }
+
+ /**
+ * Returns a transaction which writes the given session id as the currently active for the given application.
+ *
+ * @param applicationId An {@link ApplicationId} that represents an active application.
+ * @param sessionId session id belonging to the application package for this application id.
+ */
+ public Transaction createWritePrepareTransaction(Transaction transaction,
+ ApplicationId applicationId,
+ long sessionId,
+ OptionalLong activeSessionId,
+ boolean writeAsJson) {
+
+ // Needs to read or be supplied current active session id, to avoid overwriting a newer session id.
+
+ String path = applicationPath(applicationId).getAbsolute();
+ if (writeAsJson)
+ return transaction.add(setData(path, new ApplicationData(applicationId, activeSessionId, OptionalLong.of(sessionId)).toJson()));
+ else
+ return transaction; // Do nothing, as there is nothing to write in this case
}
/**
@@ -106,10 +137,35 @@ public class ApplicationCuratorDatabase {
* Returns Optional.empty() if application not found or no active session exists.
*/
public Optional<Long> activeSessionOf(ApplicationId id) {
+ return applicationData(id).flatMap(ApplicationData::activeSession);
+ }
+
+ /**
+ * Returns application data for the given application.
+ * Returns Optional.empty() if application not found or no application data exists.
+ */
+ public Optional<ApplicationData> applicationData(ApplicationId id) {
Optional<byte[]> data = curator.getData(applicationPath(id));
- return (data.isEmpty() || data.get().length == 0)
- ? Optional.empty()
- : data.map(bytes -> Long.parseLong(Utf8.toString(bytes)));
+ if (data.isEmpty() || data.get().length == 0) return Optional.empty();
+
+ try {
+ return Optional.of(ApplicationData.fromBytes(data.get()));
+ } catch (IllegalArgumentException e) {
+ return applicationDataOldFormat(id);
+ }
+ }
+
+ /**
+ * Returns application data for the given application.
+ * Returns Optional.empty() if application not found or no application data exists.
+ */
+ public Optional<ApplicationData> applicationDataOldFormat(ApplicationId id) {
+ Optional<byte[]> data = curator.getData(applicationPath(id));
+ if (data.isEmpty() || data.get().length == 0) return Optional.empty();
+
+ return Optional.of(new ApplicationData(id,
+ OptionalLong.of(data.map(bytes -> Long.parseLong(Utf8.toString(bytes))).get()),
+ OptionalLong.empty()));
}
/**
@@ -134,13 +190,11 @@ public class ApplicationCuratorDatabase {
curator.set(reindexingDataPath(id), ReindexingStatusSerializer.toBytes(status));
}
-
/** Sets up a listenable cache with the given listener, over the applications path of this tenant. */
public Curator.DirectoryCache createApplicationsPathCache(ExecutorService zkCacheExecutor) {
return curator.createDirectoryCache(applicationsPath.getAbsolute(), false, false, zkCacheExecutor);
}
-
private Path reindexingLockPath(ApplicationId id) {
return locksPath.append(id.serializedForm()).append("reindexing");
}
@@ -153,16 +207,10 @@ public class ApplicationCuratorDatabase {
return applicationsPath.append(id.serializedForm());
}
- // Used to determine whether future preparations of this application should use a dedicated CCC.
- private Path dedicatedClusterControllerClusterPath(ApplicationId id) {
- return applicationPath(id).append("dedicatedClusterControllerCluster");
- }
-
private Path reindexingDataPath(ApplicationId id) {
return applicationPath(id).append("reindexing");
}
-
private static class ReindexingStatusSerializer {
private static final String ENABLED = "enabled";
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationData.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationData.java
new file mode 100644
index 00000000000..31e54bd67a0
--- /dev/null
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationData.java
@@ -0,0 +1,81 @@
+package com.yahoo.vespa.config.server.application;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
+
+import java.io.IOException;
+import java.util.Optional;
+import java.util.OptionalLong;
+
+import static com.yahoo.slime.SlimeUtils.optionalLong;
+
+/**
+ * Data class for application id, active session and last deployed session
+ *
+ * @author hmusum
+ */
+public class ApplicationData {
+
+ private static final String APPLICATION_ID_FIELD = "applicationId";
+ private static final String ACTIVE_SESSION_FIELD = "activeSession";
+ private static final String LAST_DEPLOYED_SESSION_FIELD = "lastDeployedSession";
+
+ private final ApplicationId applicationId;
+ private final OptionalLong activeSession;
+ private final OptionalLong lastDeployedSession;
+
+ ApplicationData(ApplicationId applicationId, OptionalLong activeSession, OptionalLong lastDeployedSession) {
+ this.applicationId = applicationId;
+ this.activeSession = activeSession;
+ this.lastDeployedSession = lastDeployedSession;
+ }
+
+ static ApplicationData fromBytes(byte[] data) {
+ return fromSlime(SlimeUtils.jsonToSlime(data));
+ }
+
+ static ApplicationData fromSlime(Slime slime) {
+ Cursor cursor = slime.get();
+ return new ApplicationData(ApplicationId.fromSerializedForm(cursor.field(APPLICATION_ID_FIELD).asString()),
+ optionalLong(cursor.field(ACTIVE_SESSION_FIELD)),
+ optionalLong(cursor.field(LAST_DEPLOYED_SESSION_FIELD)));
+ }
+
+ public byte[] toJson() {
+ try {
+ Slime slime = new Slime();
+ toSlime(slime.setObject());
+ return SlimeUtils.toJsonBytes(slime);
+ } catch (IOException e) {
+ throw new RuntimeException("Serialization of application data to json failed", e);
+ }
+ }
+
+ public ApplicationId applicationId() { return applicationId; }
+
+ public Optional<Long> activeSession() {
+ return Optional.of(activeSession)
+ .filter(OptionalLong::isPresent)
+ .map(OptionalLong::getAsLong);
+ }
+
+ public Optional<Long> lastDeployedSession() {
+ return Optional.of(lastDeployedSession)
+ .filter(OptionalLong::isPresent)
+ .map(OptionalLong::getAsLong);
+ }
+
+ @Override
+ public String toString() {
+ return "application '" + applicationId + "', active session " + activeSession + ", last deployed session " + lastDeployedSession;
+ }
+
+ private void toSlime(Cursor object) {
+ object.setString(APPLICATION_ID_FIELD, applicationId.serializedForm());
+ activeSession.ifPresent(session -> object.setLong(ACTIVE_SESSION_FIELD, session));
+ lastDeployedSession.ifPresent(session -> object.setLong(LAST_DEPLOYED_SESSION_FIELD, session));
+ }
+
+}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationMapper.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationMapper.java
index c195e5cc8c4..6db01c91dea 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationMapper.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationMapper.java
@@ -21,20 +21,20 @@ import java.util.concurrent.ConcurrentHashMap;
*/
public final class ApplicationMapper {
- private final Map<ApplicationId, ApplicationSet> requestHandlers = new ConcurrentHashMap<>();
+ private final Map<ApplicationId, ApplicationVersions> requestHandlers = new ConcurrentHashMap<>();
- private ApplicationSet getApplicationSet(ApplicationId applicationId) {
- ApplicationSet set = requestHandlers.get(applicationId);
- if (set == null) throw new NotFoundException("No such application id: " + applicationId);
+ private ApplicationVersions applicationVersions(ApplicationId applicationId) {
+ ApplicationVersions versions = requestHandlers.get(applicationId);
+ if (versions == null) throw new NotFoundException("No such application id: " + applicationId);
- return set;
+ return versions;
}
/**
- * Register a Application to an application id and specific vespa version
+ * Register an Application to an application id and specific vespa version
*/
- public void register(ApplicationId applicationId, ApplicationSet applicationSet) {
- requestHandlers.put(applicationId, applicationSet);
+ public void register(ApplicationId applicationId, ApplicationVersions applicationVersions) {
+ requestHandlers.put(applicationId, applicationVersions);
}
/**
@@ -45,12 +45,12 @@ public final class ApplicationMapper {
}
/**
- * Retrieve the Application corresponding to this application id and specific vespa version.
+ * Retrieve the Application corresponding to this application id and specified vespa version.
*
* @return the matching application, or null if none matches
*/
public Application getForVersion(ApplicationId applicationId, Optional<Version> vespaVersion, Instant now) throws VersionDoesNotExistException {
- return getApplicationSet(applicationId).getForVersionOrLatest(vespaVersion, now);
+ return applicationVersions(applicationId).getForVersionOrLatest(vespaVersion, now);
}
/** Returns whether this registry has an application for the given application id */
@@ -80,7 +80,7 @@ public final class ApplicationMapper {
}
public List<Application> listApplications(ApplicationId applicationId) {
- return requestHandlers.get(applicationId).getAllApplications();
+ return requestHandlers.get(applicationId).applications();
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationReindexing.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationReindexing.java
index f4e1918e6e3..4c32cccdf20 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationReindexing.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationReindexing.java
@@ -6,14 +6,13 @@ import com.yahoo.vespa.config.server.maintenance.ReindexingMaintainer;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
+import java.util.Comparator;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
-import static java.util.Objects.requireNonNull;
-import static java.util.stream.Collectors.toMap;
-import static java.util.stream.Collectors.toUnmodifiableMap;
+import static java.util.Comparator.naturalOrder;
/**
* Pending reindexing: convergence to the stored config generation allows reindexing to start.
@@ -98,6 +97,16 @@ public class ApplicationReindexing implements Reindexing {
return Optional.ofNullable(clusters.get(clusterName)).map(cluster -> cluster.ready().get(documentType));
}
+ /** Instant at which reindexing in this was last readied, unless no reindexing is still pending, in which case this is empty. */
+ public Optional<Instant> lastReadiedAt() {
+ if ( ! enabled) return Optional.empty();
+ if (clusters.values().stream().anyMatch(cluster -> ! cluster.pending().isEmpty())) return Optional.empty();
+ return clusters.values().stream()
+ .flatMap(cluster -> cluster.ready().values().stream())
+ .map(Reindexing.Status::ready)
+ .max(naturalOrder());
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationSet.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationVersions.java
index 5650c2e7e15..71ec91e758f 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationSet.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ApplicationVersions.java
@@ -16,14 +16,14 @@ import java.util.Optional;
*
* @author vegard
*/
-public final class ApplicationSet {
+public final class ApplicationVersions {
private final Version latestVersion;
private final ApplicationId applicationId;
private final long generation;
private final HashMap<Version, Application> applications = new HashMap<>();
- private ApplicationSet(List<Application> applications) {
+ private ApplicationVersions(List<Application> applications) {
if (applications.isEmpty()) throw new IllegalArgumentException("application list cannot be empty");
Application firstApp = applications.get(0);
@@ -44,12 +44,12 @@ public final class ApplicationSet {
latestVersion = this.applications.keySet().stream().max(Version::compareTo).get();
}
- public static ApplicationSet fromList(List<Application> applications) {
- return new ApplicationSet(applications);
+ public static ApplicationVersions fromList(List<Application> applications) {
+ return new ApplicationVersions(applications);
}
// For testing
- public static ApplicationSet from(Application application) {
+ public static ApplicationVersions from(Application application) {
return fromList(List.of(application));
}
@@ -86,7 +86,7 @@ public final class ApplicationSet {
public ApplicationId getId() { return applicationId; }
- public Collection<String> getAllHosts() {
+ public Collection<String> allHosts() {
return applications.values().stream()
.flatMap(app -> app.getModel().getHosts().stream()
.map(HostInfo::getHostname))
@@ -97,15 +97,15 @@ public final class ApplicationSet {
applications.values().forEach(app -> app.updateHostMetrics(app.getModel().getHosts().size()));
}
- public long getApplicationGeneration() {
+ public long applicationGeneration() {
return generation;
}
- List<Application> getAllApplications() {
+ List<Application> applications() {
return new ArrayList<>(applications.values());
}
- public List<Version> getAllVersions(ApplicationId applicationId) {
+ public List<Version> versions(ApplicationId applicationId) {
return applications.values().stream()
.filter(application -> application.getId().equals(applicationId))
.map(Application::getVespaVersion)
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigInstanceBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigInstanceBuilder.java
index d99d9a7e017..920e1862efa 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigInstanceBuilder.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigInstanceBuilder.java
@@ -53,9 +53,8 @@ class ConfigInstanceBuilder {
Field innerField = builder.getClass().getDeclaredField(node.getName());
innerField.setAccessible(true);
Object innerFieldVal = innerField.get(builder);
- if (innerFieldVal instanceof List) {
+ if (innerFieldVal instanceof List<?> innerList) {
// inner array? Check that list elems are ConfigBuilder
- List<?> innerList = (List<?>) innerFieldVal;
for (Object b : innerList) {
if (b instanceof ConfigBuilder) {
applyDef((ConfigBuilder) b, (InnerCNode) node);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigNotConvergedException.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigNotConvergedException.java
index 88cddb93d9d..4fe8dc0866c 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigNotConvergedException.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/ConfigNotConvergedException.java
@@ -10,8 +10,4 @@ public class ConfigNotConvergedException extends RuntimeException {
super(t);
}
- public ConfigNotConvergedException(String message) {
- super(message);
- }
-
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/FileDistributionStatus.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/FileDistributionStatus.java
index c80faa2375a..ef34d62cef6 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/FileDistributionStatus.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/FileDistributionStatus.java
@@ -100,16 +100,9 @@ public class FileDistributionStatus extends AbstractComponent {
int countFinished = 0;
for (HostStatus hostStatus : hostStatuses) {
switch (hostStatus.status) {
- case IN_PROGRESS:
- countInProgress++;
- break;
- case FINISHED:
- countFinished++;
- break;
- case UNKNOWN:
- countUnknown++;
- break;
- default:
+ case IN_PROGRESS -> countInProgress++;
+ case FINISHED -> countFinished++;
+ case UNKNOWN -> countUnknown++;
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/TenantApplications.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/TenantApplications.java
index cddcb0f316d..1d1ed1042ee 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/TenantApplications.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/TenantApplications.java
@@ -27,7 +27,9 @@ import com.yahoo.vespa.curator.CompletionTimeoutException;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.curator.transaction.CuratorTransaction;
+import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.ListFlag;
import com.yahoo.vespa.flags.PermanentFlags;
import org.apache.curator.framework.CuratorFramework;
@@ -41,6 +43,7 @@ import java.util.Collection;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Optional;
+import java.util.OptionalLong;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
@@ -76,6 +79,7 @@ public class TenantApplications implements RequestHandler, HostValidator {
private final TenantFileSystemDirs tenantFileSystemDirs;
private final String serverId;
private final ListFlag<String> incompatibleVersions;
+ private final BooleanFlag writeApplicationDataAsJson;
public TenantApplications(TenantName tenant, Curator curator, StripedExecutor<TenantName> zkWatcherExecutor,
ExecutorService zkCacheExecutor, Metrics metrics, ConfigActivationListener configActivationListener,
@@ -97,6 +101,7 @@ public class TenantApplications implements RequestHandler, HostValidator {
this.clock = clock;
this.serverId = configserverConfig.serverId();
this.incompatibleVersions = PermanentFlags.INCOMPATIBLE_VERSIONS.bindTo(flagSource);
+ this.writeApplicationDataAsJson = Flags.WRITE_APPLICATION_DATA_AS_JSON.bindTo(flagSource);
}
/** The curator backed ZK storage of this. */
@@ -123,6 +128,14 @@ public class TenantApplications implements RequestHandler, HostValidator {
return database().activeSessionOf(id);
}
+ /**
+ * Returns application data for the given application.
+ * Returns Optional.empty if application not found or no application data exists.
+ */
+ public Optional<ApplicationData> applicationData(ApplicationId id) {
+ return database().applicationData(id);
+ }
+
public boolean sessionExistsInFileSystem(long sessionId) {
return Files.exists(Paths.get(tenantFileSystemDirs.sessionsPath().getAbsolutePath(), String.valueOf(sessionId)));
}
@@ -131,17 +144,34 @@ public class TenantApplications implements RequestHandler, HostValidator {
* Returns a transaction which writes the given session id as the currently active for the given application.
*
* @param applicationId An {@link ApplicationId} that represents an active application.
- * @param sessionId Id of the session containing the application package for this id.
+ * @param sessionId session id belonging to the application package for this application id.
+ */
+ public Transaction createWriteActiveTransaction(Transaction transaction, ApplicationId applicationId, long sessionId) {
+ return database().createWriteActiveTransaction(transaction, applicationId, sessionId, writeApplicationDataAsJson.value());
+ }
+
+ /**
+ * Returns a transaction which writes the given session id as the last deployed for the given application.
+ *
+ * @param applicationId An {@link ApplicationId} that represents an active application.
+ * @param sessionId session id belonging to the application package for this application id.
*/
- public Transaction createPutTransaction(ApplicationId applicationId, long sessionId) {
- return database().createPutTransaction(applicationId, sessionId);
+ public Transaction createWritePrepareTransaction(Transaction transaction,
+ ApplicationId applicationId,
+ long sessionId,
+ Optional<Long> activeSessionId) {
+ return database().createWritePrepareTransaction(transaction,
+ applicationId,
+ sessionId,
+ activeSessionId.map(OptionalLong::of).orElseGet(OptionalLong::empty),
+ writeApplicationDataAsJson.value());
}
/**
* Creates a node for the given application, marking its existence.
*/
public void createApplication(ApplicationId id) {
- database().createApplication(id);
+ database().createApplication(id, writeApplicationDataAsJson.value());
}
/**
@@ -215,29 +245,29 @@ public class TenantApplications implements RequestHandler, HostValidator {
return application.resolveConfig(req, responseFactory);
}
- private void notifyConfigActivationListeners(ApplicationSet applicationSet) {
- List<Application> applications = applicationSet.getAllApplications();
+ private void notifyConfigActivationListeners(ApplicationVersions applicationVersions) {
+ List<Application> applications = applicationVersions.applications();
if (applications.isEmpty()) throw new IllegalArgumentException("application set cannot be empty");
- hostRegistry.update(applications.get(0).getId(), applicationSet.getAllHosts());
- configActivationListener.configActivated(applicationSet);
+ hostRegistry.update(applications.get(0).getId(), applicationVersions.allHosts());
+ configActivationListener.configActivated(applicationVersions);
}
/**
* Activates the config of the given app. Notifies listeners
*
- * @param applicationSet the {@link ApplicationSet} to be activated
+ * @param applicationVersions the {@link ApplicationVersions} to be activated
*/
- public void activateApplication(ApplicationSet applicationSet, long activeSessionId) {
- ApplicationId id = applicationSet.getId();
+ public void activateApplication(ApplicationVersions applicationVersions, long activeSessionId) {
+ ApplicationId id = applicationVersions.getId();
try (@SuppressWarnings("unused") Lock lock = lock(id)) {
if ( ! exists(id))
return; // Application was deleted before activation.
- if (applicationSet.getApplicationGeneration() != activeSessionId)
+ if (applicationVersions.applicationGeneration() != activeSessionId)
return; // Application activated a new session before we got here.
- setActiveApp(applicationSet);
- notifyConfigActivationListeners(applicationSet);
+ setActiveApp(applicationVersions);
+ notifyConfigActivationListeners(applicationVersions);
}
}
@@ -281,13 +311,13 @@ public class TenantApplications implements RequestHandler, HostValidator {
configActivationListener.applicationRemoved(applicationId);
}
- private void setActiveApp(ApplicationSet applicationSet) {
- ApplicationId applicationId = applicationSet.getId();
- Collection<String> hostsForApp = applicationSet.getAllHosts();
+ private void setActiveApp(ApplicationVersions applicationVersions) {
+ ApplicationId applicationId = applicationVersions.getId();
+ Collection<String> hostsForApp = applicationVersions.allHosts();
hostRegistry.update(applicationId, hostsForApp);
- applicationSet.updateHostMetrics();
+ applicationVersions.updateHostMetrics();
tenantMetricUpdater.setApplications(applicationMapper.numApplications());
- applicationMapper.register(applicationId, applicationSet);
+ applicationMapper.register(applicationId, applicationVersions);
}
@Override
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
index d815ea3328a..ada82abf907 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ModelContextImpl.java
@@ -195,11 +195,9 @@ public class ModelContextImpl implements ModelContext {
private final int mbus_cpp_events_before_wakeup;
private final int rpc_num_targets;
private final int rpc_events_before_wakeup;
- private final boolean useRestrictedDataPlaneBindings;
private final int heapPercentage;
private final boolean enableGlobalPhase;
private final String summaryDecodePolicy;
- private final Predicate<ClusterSpec.Id> allowMoreThanOneContentGroupDown;
private final boolean enableDataplaneProxy;
private final boolean enableNestedMultivalueGrouping;
private final boolean useReconfigurableDispatcher;
@@ -239,11 +237,9 @@ public class ModelContextImpl implements ModelContext {
this.rpc_events_before_wakeup = flagValue(source, appId, version, Flags.RPC_EVENTS_BEFORE_WAKEUP);
this.queryDispatchPolicy = flagValue(source, appId, version, Flags.QUERY_DISPATCH_POLICY);
this.queryDispatchWarmup = flagValue(source, appId, version, PermanentFlags.QUERY_DISPATCH_WARMUP);
- this.useRestrictedDataPlaneBindings = flagValue(source, appId, version, Flags.RESTRICT_DATA_PLANE_BINDINGS);
this.heapPercentage = flagValue(source, appId, version, PermanentFlags.HEAP_SIZE_PERCENTAGE);
this.enableGlobalPhase = flagValue(source, appId, version, Flags.ENABLE_GLOBAL_PHASE);
this.summaryDecodePolicy = flagValue(source, appId, version, Flags.SUMMARY_DECODE_POLICY);
- this.allowMoreThanOneContentGroupDown = clusterId -> flagValue(source, appId, version, clusterId, Flags.ALLOW_MORE_THAN_ONE_CONTENT_GROUP_DOWN);
this.enableDataplaneProxy = flagValue(source, appId, version, Flags.ENABLE_DATAPLANE_PROXY);
this.enableNestedMultivalueGrouping = flagValue(source, appId, version, Flags.ENABLE_NESTED_MULTIVALUE_GROUPING);
this.useReconfigurableDispatcher = flagValue(source, appId, version, Flags.USE_RECONFIGURABLE_DISPATCHER);
@@ -293,9 +289,7 @@ public class ModelContextImpl implements ModelContext {
}
return defVal;
}
- @Override public boolean useRestrictedDataPlaneBindings() { return useRestrictedDataPlaneBindings; }
@Override public boolean enableGlobalPhase() { return enableGlobalPhase; }
- @Override public boolean allowMoreThanOneContentGroupDown(ClusterSpec.Id id) { return allowMoreThanOneContentGroupDown.test(id); }
@Override public boolean enableDataplaneProxy() { return enableDataplaneProxy; }
@Override public boolean enableNestedMultivalueGrouping() { return enableNestedMultivalueGrouping; }
@Override public boolean useReconfigurableDispatcher() { return useReconfigurableDispatcher; }
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java
deleted file mode 100644
index 0acf32d79a7..00000000000
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.config.server.deploy;
-
-import com.yahoo.component.Version;
-import com.yahoo.config.application.api.ApplicationFile;
-import com.yahoo.config.application.api.ApplicationMetaData;
-import com.yahoo.config.application.api.ApplicationPackage;
-import com.yahoo.config.application.api.DeployLogger;
-import com.yahoo.config.application.api.FileRegistry;
-import com.yahoo.config.application.api.UnparsedConfigDefinition;
-import com.yahoo.config.provision.AllocatedHosts;
-import com.yahoo.config.provision.serialization.AllocatedHostsSerializer;
-import com.yahoo.io.reader.NamedReader;
-import com.yahoo.path.Path;
-import com.yahoo.text.Utf8;
-import com.yahoo.vespa.config.ConfigDefinitionKey;
-import com.yahoo.vespa.config.server.filedistribution.FileDBRegistry;
-import com.yahoo.vespa.config.server.zookeeper.ZKApplicationPackage;
-import com.yahoo.vespa.curator.Curator;
-import com.yahoo.yolean.Exceptions;
-
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.logging.Level;
-
-import static com.yahoo.config.application.api.ApplicationPackage.*;
-import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.DEFCONFIGS_ZK_SUBPATH;
-import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.META_ZK_PATH;
-import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.USERAPP_ZK_SUBPATH;
-import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.USER_DEFCONFIGS_ZK_SUBPATH;
-
-/**
- * Reads and writes application package to and from ZooKeeper.
- *
- * @author hmusum
- */
-public class ZooKeeperClient {
-
- private final Curator curator;
- private final DeployLogger logger;
- private final Path sessionPath; // session id
-
- private static final ApplicationFile.PathFilter xmlFilter = path -> path.getName().endsWith(".xml");
-
- public ZooKeeperClient(Curator curator, DeployLogger logger, Path sessionPath) {
- this.curator = curator;
- this.logger = logger;
- this.sessionPath = sessionPath;
- }
-
- /**
- * Sets up basic node structure in ZooKeeper and purges old data.
- * This is the first operation on ZK during deploy.
- */
- void initialize() {
- curator.create(sessionPath);
-
- for (String subPath : Arrays.asList(DEFCONFIGS_ZK_SUBPATH,
- USER_DEFCONFIGS_ZK_SUBPATH,
- USERAPP_ZK_SUBPATH,
- ZKApplicationPackage.fileRegistryNode)) {
- // TODO: The replaceFirst below is hackish.
- curator.create(getZooKeeperAppPath().append(subPath.replaceFirst("/", "")));
- }
- }
-
- /**
- * Writes def files and user config into ZK.
- *
- * @param app the application package to feed to zookeeper
- */
- void writeApplicationPackage(ApplicationPackage app) {
- try {
- writeUserDefs(app);
- writeSomeOf(app);
- writeSchemas(app);
- writeUserIncludeDirs(app, app.getUserIncludeDirs());
- writeMetadata(app.getMetaData());
- } catch (Exception e) {
- throw new IllegalStateException("Unable to write vespa model to config server(s) " + System.getProperty("configsources") + "\n" +
- "Please ensure that config server is started " +
- "and check the vespa log for configserver errors. ", e);
- }
- }
-
- private void writeSchemas(ApplicationPackage app) throws IOException {
- Collection<NamedReader> schemas = app.getSchemas();
- if (schemas.isEmpty()) return;
-
- Path zkPath = getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(SCHEMAS_DIR);
- curator.create(zkPath);
- // Ensures that ranking expressions and other files are also written
- writeDir(app.getFile(ApplicationPackage.SEARCH_DEFINITIONS_DIR), zkPath, true);
- writeDir(app.getFile(ApplicationPackage.SCHEMAS_DIR), zkPath, true);
- for (NamedReader sd : schemas) {
- curator.set(zkPath.append(sd.getName()), Utf8.toBytes(com.yahoo.io.IOUtils.readAll(sd.getReader())));
- sd.getReader().close();
- }
- }
-
- /**
- * Puts some of the application package files into ZK - see write(app).
- *
- * @param app the application package to use as input.
- * @throws java.io.IOException if not able to write to Zookeeper
- */
- private void writeSomeOf(ApplicationPackage app) throws IOException {
- // TODO: We should have a way of doing this which doesn't require repeating all the content
- writeFile(app.getFile(Path.fromString(SERVICES)), getZooKeeperAppPath(USERAPP_ZK_SUBPATH));
- writeFile(app.getFile(Path.fromString(HOSTS)), getZooKeeperAppPath(USERAPP_ZK_SUBPATH));
- writeFile(app.getFile(Path.fromString(DEPLOYMENT_FILE.getName())), getZooKeeperAppPath(USERAPP_ZK_SUBPATH));
- writeFile(app.getFile(Path.fromString(VALIDATION_OVERRIDES.getName())), getZooKeeperAppPath(USERAPP_ZK_SUBPATH));
- writeDir(app.getFile(RULES_DIR),
- getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(RULES_DIR),
- (path) -> path.getName().endsWith(ApplicationPackage.RULES_NAME_SUFFIX),
- true);
- writeDir(app.getFile(QUERY_PROFILES_DIR),
- getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(QUERY_PROFILES_DIR),
- xmlFilter, true);
- writeDir(app.getFile(PAGE_TEMPLATES_DIR),
- getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(PAGE_TEMPLATES_DIR),
- xmlFilter, true);
- writeDir(app.getFile(Path.fromString(SEARCHCHAINS_DIR)),
- getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(SEARCHCHAINS_DIR),
- xmlFilter, true);
- writeDir(app.getFile(Path.fromString(DOCPROCCHAINS_DIR)),
- getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(DOCPROCCHAINS_DIR),
- xmlFilter, true);
- writeDir(app.getFile(Path.fromString(ROUTINGTABLES_DIR)),
- getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(ROUTINGTABLES_DIR),
- xmlFilter, true);
- writeDir(app.getFile(MODELS_GENERATED_REPLICATED_DIR),
- getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(MODELS_GENERATED_REPLICATED_DIR),
- true);
- writeDir(app.getFile(SECURITY_DIR),
- getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(SECURITY_DIR),
- true);
- }
-
- private void writeDir(ApplicationFile file, Path zooKeeperAppPath, boolean recurse) throws IOException {
- writeDir(file, zooKeeperAppPath, (__) -> true, recurse);
- }
-
- private void writeDir(ApplicationFile dir, Path path, ApplicationFile.PathFilter filenameFilter, boolean recurse) throws IOException {
- if ( ! dir.isDirectory()) return;
- for (ApplicationFile file : listFiles(dir, filenameFilter)) {
- String name = file.getPath().getName();
- if (name.startsWith(".")) continue; //.svn , .git ...
- if (file.isDirectory()) {
- curator.create(path.append(name));
- if (recurse) {
- writeDir(file, path.append(name), filenameFilter, recurse);
- }
- } else {
- writeFile(file, path);
- }
- }
- }
-
- /**
- * Like {@link ApplicationFile#listFiles(com.yahoo.config.application.api.ApplicationFile.PathFilter)}
- * with slightly different semantics: Never filter out directories.
- */
- private List<ApplicationFile> listFiles(ApplicationFile dir, ApplicationFile.PathFilter filter) {
- List<ApplicationFile> rawList = dir.listFiles();
- List<ApplicationFile> ret = new ArrayList<>();
- if (rawList != null) {
- for (ApplicationFile f : rawList) {
- if (f.isDirectory()) {
- ret.add(f);
- } else {
- if (filter.accept(f.getPath())) {
- ret.add(f);
- }
- }
- }
- }
- return ret;
- }
-
- private void writeFile(ApplicationFile file, Path zkPath) throws IOException {
- if ( ! file.exists()) return;
-
- ByteArrayOutputStream baos = new ByteArrayOutputStream();
- try (InputStream inputStream = file.createInputStream()) {
- inputStream.transferTo(baos);
- baos.flush();
- curator.set(zkPath.append(file.getPath().getName()), baos.toByteArray());
- }
- }
-
- private void writeUserIncludeDirs(ApplicationPackage applicationPackage, List<String> userIncludeDirs) throws IOException {
- for (String userInclude : userIncludeDirs) {
- ApplicationFile dir = applicationPackage.getFile(Path.fromString(userInclude));
- final List<ApplicationFile> files = dir.listFiles();
- if (files == null || files.isEmpty()) {
- curator.create(getZooKeeperAppPath(USERAPP_ZK_SUBPATH + "/" + userInclude));
- }
- writeDir(dir,
- getZooKeeperAppPath(USERAPP_ZK_SUBPATH + "/" + userInclude),
- xmlFilter, true);
- }
- }
-
- /**
- * Feeds all user-defined .def file from the application package into ZooKeeper (both into
- * /defconfigs and /userdefconfigs
- */
- private void writeUserDefs(ApplicationPackage applicationPackage) {
- Map<ConfigDefinitionKey, UnparsedConfigDefinition> configDefs = applicationPackage.getAllExistingConfigDefs();
- for (Map.Entry<ConfigDefinitionKey, UnparsedConfigDefinition> entry : configDefs.entrySet()) {
- ConfigDefinitionKey key = entry.getKey();
- String contents = entry.getValue().getUnparsedContent();
- writeConfigDefinition(key.getName(), key.getNamespace(), getZooKeeperAppPath(USER_DEFCONFIGS_ZK_SUBPATH), contents);
- writeConfigDefinition(key.getName(), key.getNamespace(), getZooKeeperAppPath(DEFCONFIGS_ZK_SUBPATH), contents);
- }
- logger.log(Level.FINE, configDefs.size() + " user config definitions");
- }
-
- private void writeConfigDefinition(String name, String namespace, Path path, String data) {
- curator.set(path.append(namespace + "." + name), Utf8.toBytes(data));
- }
-
- private void write(Version vespaVersion, FileRegistry fileRegistry) {
- String exportedRegistry = FileDBRegistry.exportRegistry(fileRegistry);
- curator.set(getZooKeeperAppPath(ZKApplicationPackage.fileRegistryNode).append(vespaVersion.toFullString()),
- Utf8.toBytes(exportedRegistry));
- }
-
- /**
- * Feeds application metadata to zookeeper. Used by config model to create config
- * for application metadata
- *
- * @param metaData The application metadata.
- */
- private void writeMetadata(ApplicationMetaData metaData) {
- curator.set(getZooKeeperAppPath(META_ZK_PATH), metaData.asJsonBytes());
- }
-
- void cleanupZooKeeper() {
- try {
- List.of(DEFCONFIGS_ZK_SUBPATH, USER_DEFCONFIGS_ZK_SUBPATH, USERAPP_ZK_SUBPATH)
- .forEach(path -> curator.delete(getZooKeeperAppPath(path)));
- } catch (Exception e) {
- logger.log(Level.WARNING, "Could not clean up in zookeeper: " + Exceptions.toMessageString(e));
- //Might be called in an exception handler before re-throw, so do not throw here.
- }
- }
-
- /**
- * Gets a full ZK application path
- *
- * @return a String with the full ZK application path
- */
- private Path getZooKeeperAppPath() {
- return getZooKeeperAppPath(null);
- }
-
- /**
- * Gets a full ZK application path
- *
- * @param trailingPath trailing part of path to be appended to ZK app path
- * @return a String with the full ZK application path including trailing path, if set
- */
- private Path getZooKeeperAppPath(String trailingPath) {
- if (trailingPath == null) return sessionPath;
-
- return sessionPath.append(trailingPath);
- }
-
- public void write(AllocatedHosts hosts) throws IOException {
- curator.set(sessionPath.append(ZKApplicationPackage.allocatedHostsNode),
- AllocatedHostsSerializer.toJson(hosts));
- }
-
- public void write(Map<Version, FileRegistry> fileRegistryMap) {
- for (Map.Entry<Version, FileRegistry> versionFileRegistryEntry : fileRegistryMap.entrySet()) {
- write(versionFileRegistryEntry.getKey(), versionFileRegistryEntry.getValue());
- }
- }
-
-}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployer.java
index 8d889b1fb2c..cb50bd54d38 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployer.java
@@ -1,13 +1,53 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.deploy;
+import com.yahoo.config.application.api.ApplicationFile;
+import com.yahoo.config.application.api.ApplicationMetaData;
import com.yahoo.config.application.api.ApplicationPackage;
+import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.application.api.FileRegistry;
+import com.yahoo.config.application.api.UnparsedConfigDefinition;
import com.yahoo.config.provision.AllocatedHosts;
import com.yahoo.component.Version;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.serialization.AllocatedHostsSerializer;
+import com.yahoo.io.reader.NamedReader;
+import com.yahoo.path.Path;
+import com.yahoo.text.Utf8;
+import com.yahoo.vespa.config.ConfigDefinitionKey;
+import com.yahoo.vespa.config.server.filedistribution.FileDBRegistry;
+import com.yahoo.vespa.config.server.zookeeper.ZKApplicationPackage;
+import com.yahoo.vespa.curator.Curator;
+import com.yahoo.yolean.Exceptions;
+import java.io.ByteArrayOutputStream;
import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
import java.util.Map;
+import java.util.logging.Level;
+
+import static com.yahoo.config.application.api.ApplicationPackage.DEPLOYMENT_FILE;
+import static com.yahoo.config.application.api.ApplicationPackage.DOCPROCCHAINS_DIR;
+import static com.yahoo.config.application.api.ApplicationPackage.HOSTS;
+import static com.yahoo.config.application.api.ApplicationPackage.MODELS_GENERATED_REPLICATED_DIR;
+import static com.yahoo.config.application.api.ApplicationPackage.PAGE_TEMPLATES_DIR;
+import static com.yahoo.config.application.api.ApplicationPackage.QUERY_PROFILES_DIR;
+import static com.yahoo.config.application.api.ApplicationPackage.ROUTINGTABLES_DIR;
+import static com.yahoo.config.application.api.ApplicationPackage.RULES_DIR;
+import static com.yahoo.config.application.api.ApplicationPackage.SCHEMAS_DIR;
+import static com.yahoo.config.application.api.ApplicationPackage.SEARCHCHAINS_DIR;
+import static com.yahoo.config.application.api.ApplicationPackage.SECURITY_DIR;
+import static com.yahoo.config.application.api.ApplicationPackage.SERVICES;
+import static com.yahoo.config.application.api.ApplicationPackage.VALIDATION_OVERRIDES;
+import static com.yahoo.vespa.config.server.session.SessionZooKeeperClient.getSessionPath;
+import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.DEFCONFIGS_ZK_SUBPATH;
+import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.META_ZK_PATH;
+import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.USERAPP_ZK_SUBPATH;
+import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.USER_DEFCONFIGS_ZK_SUBPATH;
/**
* Interface for initializing zookeeper and deploying an application package to zookeeper.
@@ -16,10 +56,11 @@ import java.util.Map;
*/
public class ZooKeeperDeployer {
- private final ZooKeeperClient zooKeeperClient;
+ private final Client client;
- public ZooKeeperDeployer(ZooKeeperClient client) {
- this.zooKeeperClient = client;
+ public ZooKeeperDeployer(Curator curator, DeployLogger logger, ApplicationId applicationId, long sessionId) {
+ Path sessionPath = getSessionPath(applicationId.tenant(), sessionId);
+ this.client = new Client(curator, logger, sessionPath);
}
/**
@@ -32,14 +73,259 @@ public class ZooKeeperDeployer {
*/
public void deploy(ApplicationPackage applicationPackage, Map<Version, FileRegistry> fileRegistryMap,
AllocatedHosts allocatedHosts) throws IOException {
- zooKeeperClient.initialize();
- zooKeeperClient.writeApplicationPackage(applicationPackage);
- zooKeeperClient.write(fileRegistryMap);
- zooKeeperClient.write(allocatedHosts);
+ client.initialize();
+ client.writeApplicationPackage(applicationPackage);
+ client.write(fileRegistryMap);
+ client.write(allocatedHosts);
}
public void cleanup() {
- zooKeeperClient.cleanupZooKeeper();
+ client.cleanupZooKeeper();
+ }
+
+ /**
+ * Reads and writes application package to and from ZooKeeper.
+ *
+ * @author hmusum
+ */
+ public static class Client {
+
+ private final Curator curator;
+ private final DeployLogger logger;
+ private final Path sessionPath; // session id
+
+ private static final ApplicationFile.PathFilter xmlFilter = path -> path.getName().endsWith(".xml");
+
+ public Client(Curator curator, DeployLogger logger, Path sessionPath) {
+ this.curator = curator;
+ this.logger = logger;
+ this.sessionPath = sessionPath;
+ }
+
+ /**
+ * Sets up basic node structure in ZooKeeper and purges old data.
+ * This is the first operation on ZK during deploy.
+ */
+ void initialize() {
+ curator.create(sessionPath);
+
+ for (String subPath : Arrays.asList(DEFCONFIGS_ZK_SUBPATH,
+ USER_DEFCONFIGS_ZK_SUBPATH,
+ USERAPP_ZK_SUBPATH,
+ ZKApplicationPackage.fileRegistryNode)) {
+ // TODO: The replaceFirst below is hackish.
+ curator.create(getZooKeeperAppPath().append(subPath.replaceFirst("/", "")));
+ }
+ }
+
+ /**
+ * Writes def files and user config into ZK.
+ *
+ * @param app the application package to feed to zookeeper
+ */
+ void writeApplicationPackage(ApplicationPackage app) {
+ try {
+ writeUserDefs(app);
+ writeSomeOf(app);
+ writeSchemas(app);
+ writeUserIncludeDirs(app, app.getUserIncludeDirs());
+ writeMetadata(app.getMetaData());
+ } catch (Exception e) {
+ throw new IllegalStateException("Unable to write vespa model to config server(s) " + System.getProperty("configsources") + "\n" +
+ "Please ensure that config server is started " +
+ "and check the vespa log for configserver errors. ", e);
+ }
+ }
+
+ private void writeSchemas(ApplicationPackage app) throws IOException {
+ Collection<NamedReader> schemas = app.getSchemas();
+ if (schemas.isEmpty()) return;
+
+ Path zkPath = getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(SCHEMAS_DIR);
+ curator.create(zkPath);
+ // Ensures that ranking expressions and other files are also written
+ writeDir(app.getFile(ApplicationPackage.SEARCH_DEFINITIONS_DIR), zkPath);
+ writeDir(app.getFile(ApplicationPackage.SCHEMAS_DIR), zkPath);
+ for (NamedReader sd : schemas) {
+ curator.set(zkPath.append(sd.getName()), Utf8.toBytes(com.yahoo.io.IOUtils.readAll(sd.getReader())));
+ sd.getReader().close();
+ }
+ }
+
+ /**
+ * Writes some application package files into ZK - see write(app).
+ *
+ * @param app the application package to use as input.
+ * @throws IOException if not able to write to Zookeeper
+ */
+ private void writeSomeOf(ApplicationPackage app) throws IOException {
+ // TODO: We should have a way of doing this which doesn't require repeating all the content
+ writeFile(app.getFile(Path.fromString(SERVICES)), getZooKeeperAppPath(USERAPP_ZK_SUBPATH));
+ writeFile(app.getFile(Path.fromString(HOSTS)), getZooKeeperAppPath(USERAPP_ZK_SUBPATH));
+ writeFile(app.getFile(Path.fromString(DEPLOYMENT_FILE.getName())), getZooKeeperAppPath(USERAPP_ZK_SUBPATH));
+ writeFile(app.getFile(Path.fromString(VALIDATION_OVERRIDES.getName())), getZooKeeperAppPath(USERAPP_ZK_SUBPATH));
+ writeDir(app.getFile(RULES_DIR),
+ getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(RULES_DIR),
+ (path) -> path.getName().endsWith(ApplicationPackage.RULES_NAME_SUFFIX));
+ writeDir(app.getFile(QUERY_PROFILES_DIR),
+ getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(QUERY_PROFILES_DIR),
+ xmlFilter);
+ writeDir(app.getFile(PAGE_TEMPLATES_DIR),
+ getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(PAGE_TEMPLATES_DIR),
+ xmlFilter);
+ writeDir(app.getFile(Path.fromString(SEARCHCHAINS_DIR)),
+ getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(SEARCHCHAINS_DIR),
+ xmlFilter);
+ writeDir(app.getFile(Path.fromString(DOCPROCCHAINS_DIR)),
+ getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(DOCPROCCHAINS_DIR),
+ xmlFilter);
+ writeDir(app.getFile(Path.fromString(ROUTINGTABLES_DIR)),
+ getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(ROUTINGTABLES_DIR),
+ xmlFilter);
+ writeDir(app.getFile(MODELS_GENERATED_REPLICATED_DIR),
+ getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(MODELS_GENERATED_REPLICATED_DIR));
+ writeDir(app.getFile(SECURITY_DIR),
+ getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(SECURITY_DIR));
+ }
+
+ private void writeDir(ApplicationFile file, Path zooKeeperAppPath) throws IOException {
+ writeDir(file, zooKeeperAppPath, (__) -> true);
+ }
+
+ private void writeDir(ApplicationFile dir, Path path, ApplicationFile.PathFilter filenameFilter) throws IOException {
+ if ( ! dir.isDirectory()) return;
+ for (ApplicationFile file : listFiles(dir, filenameFilter)) {
+ String name = file.getPath().getName();
+ if (name.startsWith(".")) continue; //.svn , .git ...
+ if (file.isDirectory()) {
+ curator.create(path.append(name));
+ writeDir(file, path.append(name), filenameFilter);
+ } else {
+ writeFile(file, path);
+ }
+ }
+ }
+
+ /**
+ * Like {@link ApplicationFile#listFiles(ApplicationFile.PathFilter)}
+ * with slightly different semantics: Never filter out directories.
+ */
+ private List<ApplicationFile> listFiles(ApplicationFile dir, ApplicationFile.PathFilter filter) {
+ List<ApplicationFile> rawList = dir.listFiles();
+ List<ApplicationFile> ret = new ArrayList<>();
+ if (rawList != null) {
+ for (ApplicationFile f : rawList) {
+ if (f.isDirectory()) {
+ ret.add(f);
+ } else {
+ if (filter.accept(f.getPath())) {
+ ret.add(f);
+ }
+ }
+ }
+ }
+ return ret;
+ }
+
+ private void writeFile(ApplicationFile file, Path zkPath) throws IOException {
+ if ( ! file.exists()) return;
+
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ try (InputStream inputStream = file.createInputStream()) {
+ inputStream.transferTo(baos);
+ baos.flush();
+ curator.set(zkPath.append(file.getPath().getName()), baos.toByteArray());
+ }
+ }
+
+ private void writeUserIncludeDirs(ApplicationPackage applicationPackage, List<String> userIncludeDirs) throws IOException {
+ for (String userInclude : userIncludeDirs) {
+ ApplicationFile dir = applicationPackage.getFile(Path.fromString(userInclude));
+ final List<ApplicationFile> files = dir.listFiles();
+ if (files == null || files.isEmpty()) {
+ curator.create(getZooKeeperAppPath(USERAPP_ZK_SUBPATH + "/" + userInclude));
+ }
+ writeDir(dir, getZooKeeperAppPath(USERAPP_ZK_SUBPATH + "/" + userInclude), xmlFilter);
+ }
+ }
+
+ /**
+ * Feeds all user-defined .def file from the application package into ZooKeeper (both into
+ * /defconfigs and /userdefconfigs
+ */
+ private void writeUserDefs(ApplicationPackage applicationPackage) {
+ Map<ConfigDefinitionKey, UnparsedConfigDefinition> configDefs = applicationPackage.getAllExistingConfigDefs();
+ for (Map.Entry<ConfigDefinitionKey, UnparsedConfigDefinition> entry : configDefs.entrySet()) {
+ ConfigDefinitionKey key = entry.getKey();
+ String contents = entry.getValue().getUnparsedContent();
+ writeConfigDefinition(key.getName(), key.getNamespace(), getZooKeeperAppPath(USER_DEFCONFIGS_ZK_SUBPATH), contents);
+ writeConfigDefinition(key.getName(), key.getNamespace(), getZooKeeperAppPath(DEFCONFIGS_ZK_SUBPATH), contents);
+ }
+ logger.log(Level.FINE, configDefs.size() + " user config definitions");
+ }
+
+ private void writeConfigDefinition(String name, String namespace, Path path, String data) {
+ curator.set(path.append(namespace + "." + name), Utf8.toBytes(data));
+ }
+
+ private void write(Version vespaVersion, FileRegistry fileRegistry) {
+ String exportedRegistry = FileDBRegistry.exportRegistry(fileRegistry);
+ curator.set(getZooKeeperAppPath(ZKApplicationPackage.fileRegistryNode).append(vespaVersion.toFullString()),
+ Utf8.toBytes(exportedRegistry));
+ }
+
+ /**
+ * Feeds application metadata to zookeeper. Used by config model to create config
+ * for application metadata
+ *
+ * @param metaData The application metadata.
+ */
+ private void writeMetadata(ApplicationMetaData metaData) {
+ curator.set(getZooKeeperAppPath(META_ZK_PATH), metaData.asJsonBytes());
+ }
+
+ void cleanupZooKeeper() {
+ try {
+ List.of(DEFCONFIGS_ZK_SUBPATH, USER_DEFCONFIGS_ZK_SUBPATH, USERAPP_ZK_SUBPATH)
+ .forEach(path -> curator.delete(getZooKeeperAppPath(path)));
+ } catch (Exception e) {
+ logger.log(Level.WARNING, "Could not clean up in zookeeper: " + Exceptions.toMessageString(e));
+ // Might be called in an exception handler before re-throw, so do not throw here.
+ }
+ }
+
+ /**
+ * Gets a full ZK application path
+ *
+ * @return a String with the full ZK application path
+ */
+ private Path getZooKeeperAppPath() {
+ return getZooKeeperAppPath(null);
+ }
+
+ /**
+ * Gets a full ZK application path
+ *
+ * @param trailingPath trailing part of path to be appended to ZK app path
+ * @return a String with the full ZK application path including trailing path, if set
+ */
+ private Path getZooKeeperAppPath(String trailingPath) {
+ if (trailingPath == null) return sessionPath;
+
+ return sessionPath.append(trailingPath);
+ }
+
+ public void write(AllocatedHosts hosts) throws IOException {
+ curator.set(sessionPath.append(ZKApplicationPackage.allocatedHostsNode),
+ AllocatedHostsSerializer.toJson(hosts));
+ }
+
+ public void write(Map<Version, FileRegistry> fileRegistryMap) {
+ for (Map.Entry<Version, FileRegistry> versionFileRegistryEntry : fileRegistryMap.entrySet()) {
+ write(versionFileRegistryEntry.getKey(), versionFileRegistryEntry.getValue());
+ }
+ }
+
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDirectory.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDirectory.java
index da18c4e4fcc..769ac3923c4 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDirectory.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDirectory.java
@@ -24,12 +24,15 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.attribute.BasicFileAttributes;
import java.time.Clock;
+import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.logging.Level;
import java.util.logging.Logger;
import static com.yahoo.yolean.Exceptions.uncheck;
+import static java.util.logging.Level.FINE;
+import static java.util.logging.Level.INFO;
/**
* Global file directory, holding files for file distribution for all deployed applications.
@@ -40,7 +43,6 @@ public class FileDirectory extends AbstractComponent {
private static final Logger log = Logger.getLogger(FileDirectory.class.getName());
private final Locks<FileReference> locks = new Locks<>(1, TimeUnit.MINUTES);
-
private final File root;
@Inject
@@ -67,7 +69,7 @@ public class FileDirectory extends AbstractComponent {
}
}
- static private class Filter implements FilenameFilter {
+ private static class Filter implements FilenameFilter {
@Override
public boolean accept(File dir, String name) {
return !".".equals(name) && !"..".equals(name) ;
@@ -78,17 +80,24 @@ public class FileDirectory extends AbstractComponent {
return root.getAbsolutePath() + "/" + ref.value();
}
- public File getFile(FileReference reference) {
+ public Optional<File> getFile(FileReference reference) {
ensureRootExist();
File dir = new File(getPath(reference));
- if (!dir.exists())
- throw new IllegalArgumentException("File reference '" + reference.value() + "' with absolute path '" + dir.getAbsolutePath() + "' does not exist.");
- if (!dir.isDirectory())
- throw new IllegalArgumentException("File reference '" + reference.value() + "' with absolute path '" + dir.getAbsolutePath() + "' is not a directory.");
- File [] files = dir.listFiles(new Filter());
- if (files == null || files.length == 0)
- throw new IllegalArgumentException("File reference '" + reference.value() + "' with absolute path '" + dir.getAbsolutePath() + " does not contain any files");
- return files[0];
+ if (!dir.exists()) {
+ // This is common when config server has not yet received the file from one the server the app was deployed on
+ log.log(FINE, "File reference '" + reference.value() + "' ('" + dir.getAbsolutePath() + "') does not exist.");
+ return Optional.empty();
+ }
+ if (!dir.isDirectory()) {
+ log.log(INFO, "File reference '" + reference.value() + "' ('" + dir.getAbsolutePath() + ")' is not a directory.");
+ return Optional.empty();
+ }
+ File[] files = dir.listFiles(new Filter());
+ if (files == null || files.length == 0) {
+ log.log(INFO, "File reference '" + reference.value() + "' ('" + dir.getAbsolutePath() + "') does not contain any files");
+ return Optional.empty();
+ }
+ return Optional.of(files[0]);
}
public File getRoot() { return root; }
@@ -127,16 +136,16 @@ public class FileDirectory extends AbstractComponent {
public void delete(FileReference fileReference, Function<FileReference, Boolean> isInUse) {
try (Lock lock = locks.lock(fileReference)) {
if (isInUse.apply(fileReference))
- log.log(Level.FINE, "Unable to delete file reference '" + fileReference.value() + "' since it is still in use");
+ log.log(FINE, "Unable to delete file reference '" + fileReference.value() + "' since it is still in use");
else
deleteDirRecursively(destinationDir(fileReference));
}
}
private void deleteDirRecursively(File dir) {
- log.log(Level.FINE, "Will delete dir " + dir);
+ log.log(FINE, "Will delete dir " + dir);
if ( ! IOUtils.recursiveDeleteDir(dir))
- log.log(Level.INFO, "Failed to delete " + dir);
+ log.log(INFO, "Failed to delete " + dir);
}
// Check if we should add file, it might already exist
@@ -156,7 +165,7 @@ public class FileDirectory extends AbstractComponent {
// update last modified time so that maintainer deleting unused file references considers this as recently used
destinationDir.setLastModified(Clock.systemUTC().instant().toEpochMilli());
- log.log(Level.FINE, "Directory for file reference '" + fileReference.value() + "' already exists and has all content");
+ log.log(FINE, "Directory for file reference '" + fileReference.value() + "' already exists and has all content");
return false;
}
@@ -179,7 +188,7 @@ public class FileDirectory extends AbstractComponent {
// Copy files to temp dir
File tempDestination = new File(tempDestinationDir.toFile(), source.getName());
- log.log(Level.FINE, () -> "Copying " + source.getAbsolutePath() + " to " + tempDestination.getAbsolutePath());
+ log.log(FINE, () -> "Copying " + source.getAbsolutePath() + " to " + tempDestination.getAbsolutePath());
if (source.isDirectory())
IOUtils.copyDirectory(source, tempDestination, -1);
else
@@ -187,7 +196,7 @@ public class FileDirectory extends AbstractComponent {
// Move to destination dir
Path destinationDir = destinationDir(reference).toPath();
- log.log(Level.FINE, () -> "Moving " + tempDestinationDir + " to " + destinationDir);
+ log.log(FINE, () -> "Moving " + tempDestinationDir + " to " + destinationDir);
Files.move(tempDestinationDir, destinationDir);
return reference;
} catch (IOException e) {
@@ -199,7 +208,7 @@ public class FileDirectory extends AbstractComponent {
private void logfileInfo(File file ) throws IOException {
BasicFileAttributes basicFileAttributes = Files.readAttributes(file.toPath(), BasicFileAttributes.class);
- log.log(Level.FINE, () -> "Adding file " + file.getAbsolutePath() + " (created " + basicFileAttributes.creationTime() +
+ log.log(FINE, () -> "Adding file " + file.getAbsolutePath() + " (created " + basicFileAttributes.creationTime() +
", modified " + basicFileAttributes.lastModifiedTime() +
", size " + basicFileAttributes.size() + ")");
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
index 57d57d16d2f..e45c3a8e380 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
@@ -12,7 +12,6 @@ import com.yahoo.jrt.StringValue;
import com.yahoo.jrt.Supervisor;
import com.yahoo.jrt.Transport;
import com.yahoo.vespa.config.ConnectionPool;
-import com.yahoo.vespa.filedistribution.EmptyFileReferenceData;
import com.yahoo.vespa.filedistribution.FileDistributionConnectionPool;
import com.yahoo.vespa.filedistribution.FileDownloader;
import com.yahoo.vespa.filedistribution.FileReferenceCompressor;
@@ -20,14 +19,16 @@ import com.yahoo.vespa.filedistribution.FileReferenceData;
import com.yahoo.vespa.filedistribution.FileReferenceDownload;
import com.yahoo.vespa.filedistribution.LazyFileReferenceData;
import com.yahoo.vespa.filedistribution.LazyTemporaryStorageFileReferenceData;
-import com.yahoo.yolean.Exceptions;
+
import java.io.File;
import java.io.IOException;
+import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Duration;
import java.time.Instant;
import java.util.List;
+import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@@ -35,6 +36,10 @@ import java.util.logging.Level;
import java.util.logging.Logger;
import static com.yahoo.vespa.config.server.filedistribution.FileDistributionUtil.getOtherConfigServersInCluster;
+import static com.yahoo.vespa.config.server.filedistribution.FileServer.FileApiErrorCodes.NOT_FOUND;
+import static com.yahoo.vespa.config.server.filedistribution.FileServer.FileApiErrorCodes.OK;
+import static com.yahoo.vespa.config.server.filedistribution.FileServer.FileApiErrorCodes.TIMEOUT;
+import static com.yahoo.vespa.config.server.filedistribution.FileServer.FileApiErrorCodes.TRANSFER_FAILED;
import static com.yahoo.vespa.filedistribution.FileReferenceData.CompressionType;
import static com.yahoo.vespa.filedistribution.FileReferenceData.CompressionType.gzip;
import static com.yahoo.vespa.filedistribution.FileReferenceData.Type;
@@ -54,10 +59,11 @@ public class FileServer {
private final List<CompressionType> compressionTypes; // compression types to use, in preferred order
// TODO: Move to filedistribution module, so that it can be used by both clients and servers
- private enum FileApiErrorCodes {
+ enum FileApiErrorCodes {
OK(0, "OK"),
NOT_FOUND(1, "File reference not found"),
- TIMEOUT(2, "Timeout");
+ TIMEOUT(2, "Timeout"),
+ TRANSFER_FAILED(3, "Failed transferring file");
private final int code;
private final String description;
FileApiErrorCodes(int code, String description) {
@@ -103,40 +109,33 @@ public class FileServer {
}
private boolean hasFile(FileReference reference) {
- try {
- return fileDirectory.getFile(reference).exists();
- } catch (IllegalArgumentException e) {
- log.log(Level.FINE, () -> "Failed locating " + reference + ": " + e.getMessage());
- }
+ Optional<File> file = fileDirectory.getFile(reference);
+ if (file.isPresent())
+ return file.get().exists();
+
+ log.log(Level.FINE, () -> "Failed locating " + reference);
return false;
}
FileDirectory getRootDir() { return fileDirectory; }
- void startFileServing(FileReference reference, Receiver target, Set<CompressionType> acceptedCompressionTypes) {
- if ( ! fileDirectory.getFile(reference).exists()) return;
-
- File file = this.fileDirectory.getFile(reference);
- log.log(Level.FINE, () -> "Start serving " + reference + " with file '" + file.getAbsolutePath() + "'");
- FileReferenceData fileData = EmptyFileReferenceData.empty(reference, file.getName());
- try {
- fileData = readFileReferenceData(reference, acceptedCompressionTypes);
+ void startFileServing(FileReference reference, File file, Receiver target, Set<CompressionType> acceptedCompressionTypes) {
+ var absolutePath = file.getAbsolutePath();
+ try (FileReferenceData fileData = fileReferenceData(reference, acceptedCompressionTypes, file)) {
+ log.log(Level.FINE, () -> "Start serving " + reference.value() + " with file '" + absolutePath + "'");
target.receive(fileData, new ReplayStatus(0, "OK"));
- log.log(Level.FINE, () -> "Done serving " + reference.value() + " with file '" + file.getAbsolutePath() + "'");
- } catch (IOException e) {
- String errorDescription = "For" + reference.value() + ": failed reading file '" + file.getAbsolutePath() + "'";
- log.warning(errorDescription + " for sending to '" + target.toString() + "'. " + e.getMessage());
- target.receive(fileData, new ReplayStatus(1, errorDescription));
+ log.log(Level.FINE, () -> "Done serving " + reference.value() + " with file '" + absolutePath + "'");
+ } catch (IOException ioe) {
+ throw new UncheckedIOException("For " + reference.value() + ": failed reading file '" + absolutePath + "'" +
+ " for sending to '" + target.toString() + "'. ", ioe);
} catch (Exception e) {
- log.log(Level.WARNING, "Failed serving " + reference + ": " + Exceptions.toMessageString(e));
- } finally {
- fileData.close();
+ throw new RuntimeException("Failed serving " + reference.value() + " to '" + target + "': ", e);
}
}
- private FileReferenceData readFileReferenceData(FileReference reference, Set<CompressionType> acceptedCompressionTypes) throws IOException {
- File file = this.fileDirectory.getFile(reference);
-
+ private FileReferenceData fileReferenceData(FileReference reference,
+ Set<CompressionType> acceptedCompressionTypes,
+ File file) throws IOException {
if (file.isDirectory()) {
Path tempFile = Files.createTempFile("filereferencedata", reference.value());
CompressionType compressionType = chooseCompressionType(acceptedCompressionTypes);
@@ -172,20 +171,21 @@ public class FileServer {
Set<CompressionType> acceptedCompressionTypes) {
if (Instant.now().isAfter(deadline)) {
log.log(Level.INFO, () -> "Deadline exceeded for request for file reference '" + fileReference + "' from " + client);
- return FileApiErrorCodes.TIMEOUT;
+ return TIMEOUT;
}
- boolean fileExists;
try {
var fileReferenceDownload = new FileReferenceDownload(fileReference, client, downloadFromOtherSourceIfNotFound);
- fileExists = hasFileDownloadIfNeeded(fileReferenceDownload);
- if (fileExists) startFileServing(fileReference, receiver, acceptedCompressionTypes);
- } catch (IllegalArgumentException e) {
- fileExists = false;
+ var file = getFileDownloadIfNeeded(fileReferenceDownload);
+ if (file.isEmpty()) return NOT_FOUND;
+
+ startFileServing(fileReference, file.get(), receiver, acceptedCompressionTypes);
+ } catch (Exception e) {
log.warning("Failed serving file reference '" + fileReference + "', request from " + client + " failed with: " + e.getMessage());
+ return TRANSFER_FAILED;
}
- return (fileExists ? FileApiErrorCodes.OK : FileApiErrorCodes.NOT_FOUND);
+ return OK;
}
/* Choose the first compression type (list is in preferred order) that matches an accepted compression type, or fail */
@@ -198,9 +198,11 @@ public class FileServer {
acceptedCompressionTypes + ", compression types server can use: " + compressionTypes);
}
- boolean hasFileDownloadIfNeeded(FileReferenceDownload fileReferenceDownload) {
+ public Optional<File> getFileDownloadIfNeeded(FileReferenceDownload fileReferenceDownload) {
FileReference fileReference = fileReferenceDownload.fileReference();
- if (hasFile(fileReference)) return true;
+ Optional<File> file = fileDirectory.getFile(fileReference);
+ if (file.isPresent())
+ return file;
if (fileReferenceDownload.downloadFromOtherSourceIfNotFound()) {
log.log(Level.FINE, "File not found, downloading from another source");
@@ -209,13 +211,13 @@ public class FileServer {
FileReferenceDownload newDownload = new FileReferenceDownload(fileReference,
fileReferenceDownload.client(),
false);
- boolean fileExists = downloader.getFile(newDownload).isPresent();
- if ( ! fileExists)
+ file = downloader.getFile(newDownload);
+ if (file.isEmpty())
log.log(Level.INFO, "Failed downloading '" + fileReferenceDownload + "'");
- return fileExists;
+ return file;
} else {
log.log(Level.FINE, "File not found, will not download from another source");
- return false;
+ return Optional.empty();
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
index 22ef6cc2547..031574bec77 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
@@ -66,15 +66,15 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer {
Optional<Session> session = applicationRepository.getActiveSession(applicationId);
if (session.isEmpty()) continue; // App might be deleted after call to listApplications() or not activated yet (bootstrap phase)
- FileReference appFileReference = session.get().getApplicationPackageReference();
- if (appFileReference != null) {
+ Optional<FileReference> appFileReference = session.get().getApplicationPackageReference();
+ if (appFileReference.isPresent()) {
long sessionId = session.get().getSessionId();
attempts++;
- if (!fileReferenceExistsOnDisk(downloadDirectory, appFileReference)) {
+ if (!fileReferenceExistsOnDisk(downloadDirectory, appFileReference.get())) {
log.fine(() -> "Downloading application package with file reference " + appFileReference +
" for " + applicationId + " (session " + sessionId + ")");
- FileReferenceDownload download = new FileReferenceDownload(appFileReference,
+ FileReferenceDownload download = new FileReferenceDownload(appFileReference.get(),
this.getClass().getSimpleName(),
false);
if (fileDownloader.getFile(download).isEmpty()) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java
index 0e45d42efcf..328bd143d81 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ActivatedModelsBuilder.java
@@ -19,7 +19,7 @@ import com.yahoo.container.jdisc.secretstore.SecretStore;
import com.yahoo.vespa.config.server.ServerCache;
import com.yahoo.vespa.config.server.application.Application;
import com.yahoo.vespa.config.server.application.ApplicationCuratorDatabase;
-import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.ApplicationVersions;
import com.yahoo.vespa.config.server.deploy.ModelContextImpl;
import com.yahoo.vespa.config.server.monitoring.MetricUpdater;
import com.yahoo.vespa.config.server.monitoring.Metrics;
@@ -51,7 +51,7 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> {
private final TenantName tenant;
private final long applicationGeneration;
private final SessionZooKeeperClient zkClient;
- private final Optional<ApplicationSet> currentActiveApplicationSet;
+ private final Optional<ApplicationVersions> activeApplicationVersions;
private final ConfigDefinitionRepo configDefinitionRepo;
private final Metrics metrics;
private final Curator curator;
@@ -62,7 +62,7 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> {
public ActivatedModelsBuilder(TenantName tenant,
long applicationGeneration,
SessionZooKeeperClient zkClient,
- Optional<ApplicationSet> currentActiveApplicationSet,
+ Optional<ApplicationVersions> activeApplicationVersions,
ExecutorService executor,
Curator curator,
Metrics metrics,
@@ -77,7 +77,7 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> {
this.tenant = tenant;
this.applicationGeneration = applicationGeneration;
this.zkClient = zkClient;
- this.currentActiveApplicationSet = currentActiveApplicationSet;
+ this.activeApplicationVersions = activeApplicationVersions;
this.configDefinitionRepo = configDefinitionRepo;
this.metrics = metrics;
this.curator = curator;
@@ -122,8 +122,8 @@ public class ActivatedModelsBuilder extends ModelsBuilder<Application> {
}
private Optional<Model> modelOf(Version version) {
- if (currentActiveApplicationSet.isEmpty()) return Optional.empty();
- return currentActiveApplicationSet.get().get(version).map(Application::getModel);
+ if (activeApplicationVersions.isEmpty()) return Optional.empty();
+ return activeApplicationVersions.get().get(version).map(Application::getModel);
}
private static <T> Optional<T> getForVersionOrLatest(Map<Version, T> map, Version version) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java
index a89ba88bfbe..af611b131f6 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java
@@ -29,14 +29,13 @@ import com.yahoo.config.provision.Zone;
import com.yahoo.container.jdisc.secretstore.SecretStore;
import com.yahoo.vespa.config.server.application.Application;
import com.yahoo.vespa.config.server.application.ApplicationCuratorDatabase;
-import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.ApplicationVersions;
import com.yahoo.vespa.config.server.deploy.ModelContextImpl;
import com.yahoo.vespa.config.server.host.HostValidator;
import com.yahoo.vespa.config.server.provision.HostProvisionerProvider;
import com.yahoo.vespa.config.server.session.PrepareParams;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.flags.FlagSource;
-import com.yahoo.yolean.Exceptions;
import java.io.File;
import java.io.IOException;
@@ -67,7 +66,7 @@ public class PreparedModelsBuilder extends ModelsBuilder<PreparedModelsBuilder.P
private final HostValidator hostValidator;
private final PrepareParams params;
private final FileRegistry fileRegistry;
- private final Optional<ApplicationSet> currentActiveApplicationSet;
+ private final Optional<ApplicationVersions> activeApplicationVersions;
private final Curator curator;
private final ExecutorService executor;
@@ -84,7 +83,7 @@ public class PreparedModelsBuilder extends ModelsBuilder<PreparedModelsBuilder.P
HostValidator hostValidator,
DeployLogger deployLogger,
PrepareParams params,
- Optional<ApplicationSet> currentActiveApplicationSet,
+ Optional<ApplicationVersions> activeApplicationVersions,
ConfigserverConfig configserverConfig,
Zone zone) {
super(modelFactoryRegistry, configserverConfig, zone, hostProvisionerProvider, deployLogger);
@@ -97,7 +96,7 @@ public class PreparedModelsBuilder extends ModelsBuilder<PreparedModelsBuilder.P
this.hostValidator = hostValidator;
this.curator = curator;
this.params = params;
- this.currentActiveApplicationSet = currentActiveApplicationSet;
+ this.activeApplicationVersions = activeApplicationVersions;
this.executor = executor;
}
@@ -149,8 +148,8 @@ public class PreparedModelsBuilder extends ModelsBuilder<PreparedModelsBuilder.P
}
private Optional<Model> modelOf(Version version) {
- if (currentActiveApplicationSet.isEmpty()) return Optional.empty();
- return currentActiveApplicationSet.get().get(version).map(Application::getModel);
+ if (activeApplicationVersions.isEmpty()) return Optional.empty();
+ return activeApplicationVersions.get().get(version).map(Application::getModel);
}
private HostProvisioner createHostProvisioner(ApplicationPackage applicationPackage, Provisioned provisioned) {
@@ -213,7 +212,7 @@ public class PreparedModelsBuilder extends ModelsBuilder<PreparedModelsBuilder.P
zone(),
Set.copyOf(containerEndpoints),
params.isBootstrap(),
- currentActiveApplicationSet.isEmpty(),
+ activeApplicationVersions.isEmpty(),
LegacyFlags.from(applicationPackage, flagSource),
endpointCertificateSecrets,
params.athenzDomain(),
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java
index eee7d6ec63d..78a8cda7c34 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java
@@ -31,7 +31,7 @@ import com.yahoo.vespa.config.server.ConfigActivationListener;
import com.yahoo.vespa.config.server.GetConfigContext;
import com.yahoo.vespa.config.server.RequestHandler;
import com.yahoo.vespa.config.server.SuperModelRequestHandler;
-import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.ApplicationVersions;
import com.yahoo.vespa.config.server.filedistribution.FileServer;
import com.yahoo.vespa.config.server.host.HostRegistry;
import com.yahoo.vespa.config.server.monitoring.MetricUpdater;
@@ -44,6 +44,7 @@ import com.yahoo.vespa.filedistribution.FileDownloader;
import com.yahoo.vespa.filedistribution.FileReceiver;
import com.yahoo.vespa.filedistribution.FileReferenceData;
import com.yahoo.vespa.filedistribution.FileReferenceDownload;
+
import java.nio.ByteBuffer;
import java.time.Duration;
import java.util.Arrays;
@@ -61,12 +62,13 @@ import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
-import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.yahoo.vespa.filedistribution.FileReferenceData.CompressionType;
+import static java.util.logging.Level.FINE;
+import static java.util.logging.Level.WARNING;
/**
* An RPC server class that handles the config protocol RPC method "getConfigV3".
@@ -77,8 +79,6 @@ import static com.yahoo.vespa.filedistribution.FileReferenceData.CompressionType
// TODO: Split business logic out of this
public class RpcServer implements Runnable, ConfigActivationListener, TenantListener {
- static final String getConfigMethodName = "getConfigV3";
-
private static final int TRACELEVEL = 6;
static final int TRACELEVEL_DEBUG = 9;
private static final String THREADPOOL_NAME = "rpcserver worker pool";
@@ -158,9 +158,6 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
* Uses the template pattern to call methods in classes that extend RpcServer.
*/
private void getConfigV3(Request req) {
- if (log.isLoggable(Level.FINEST)) {
- log.log(Level.FINEST, getConfigMethodName);
- }
req.detach();
rpcAuthorizer.authorizeConfigRequest(req)
.thenRun(() -> addToRequestQueue(JRTServerConfigRequestV3.createFromRequest(req)));
@@ -184,7 +181,7 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
@Override
public void run() {
- log.log(Level.FINE, "Rpc server will listen on port " + spec.port());
+ log.log(FINE, "Rpc server will listen on port " + spec.port());
try {
Acceptor acceptor = supervisor.listen(spec);
isRunning = true;
@@ -260,25 +257,23 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
* This method should be called when config is activated in the server.
*/
@Override
- public void configActivated(ApplicationSet applicationSet) {
- ApplicationId applicationId = applicationSet.getId();
+ public void configActivated(ApplicationVersions applicationVersions) {
+ ApplicationId applicationId = applicationVersions.getId();
ApplicationState state = getState(applicationId);
- state.setActiveGeneration(applicationSet.getApplicationGeneration());
- reloadSuperModel(applicationSet);
+ state.setActiveGeneration(applicationVersions.applicationGeneration());
+ reloadSuperModel(applicationVersions);
configActivated(applicationId);
}
- private void reloadSuperModel(ApplicationSet applicationSet) {
- superModelRequestHandler.activateConfig(applicationSet);
+ private void reloadSuperModel(ApplicationVersions applicationVersions) {
+ superModelRequestHandler.activateConfig(applicationVersions);
configActivated(ApplicationId.global());
}
void configActivated(ApplicationId applicationId) {
List<DelayedConfigResponses.DelayedConfigResponse> responses = delayedConfigResponses.drainQueue(applicationId);
String logPre = TenantRepository.logPre(applicationId);
- if (log.isLoggable(Level.FINE)) {
- log.log(Level.FINE, logPre + "Start of configActivated: " + responses.size() + " requests on delayed requests queue");
- }
+ log.log(FINE, () -> logPre + "Start of configActivated: " + responses.size() + " requests on delayed requests queue");
int responsesSent = 0;
CompletionService<Boolean> completionService = new ExecutorCompletionService<>(executorService);
while (!responses.isEmpty()) {
@@ -287,15 +282,13 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
// Doing cancel here deals with the case where the timer is already running or has not run, so
// there is no need for any extra check.
if (delayedConfigResponse.cancel()) {
- if (log.isLoggable(Level.FINE)) {
- logRequestDebug(Level.FINE, logPre + "Timer cancelled for ", delayedConfigResponse.request);
- }
+ log.log(FINE, () -> logPre + "Timer cancelled for " + delayedConfigResponse.request);
// Do not wait for this request if we were unable to execute
if (addToRequestQueue(delayedConfigResponse.request, false, completionService)) {
responsesSent++;
}
} else {
- log.log(Level.FINE, () -> logPre + "Timer already cancelled or finished or never scheduled");
+ log.log(FINE, () -> logPre + "Timer already cancelled or finished or never scheduled");
}
}
@@ -306,15 +299,6 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
Thread.currentThread().interrupt();
}
}
-
- if (log.isLoggable(Level.FINE))
- log.log(Level.FINE, logPre + "Finished activating " + responsesSent + " requests");
- }
-
- private void logRequestDebug(Level level, String message, JRTServerConfigRequest request) {
- if (log.isLoggable(level)) {
- log.log(level, message + request.getShortDescription());
- }
}
@Override
@@ -325,9 +309,7 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
}
public void respond(JRTServerConfigRequest request) {
- if (log.isLoggable(Level.FINE)) {
- log.log(Level.FINE, "Trace at request return:\n" + request.getRequestTrace().toString());
- }
+ log.log(FINE, () -> "Trace when responding:\n" + request.getRequestTrace().toString());
request.getRequest().returnRequest();
}
@@ -344,7 +326,7 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
if (GetConfigProcessor.logDebug(trace)) {
String message = "Did not find tenant for host '" + hostname + "', using " + TenantName.defaultName() +
". Hosts in host registry: " + hostRegistry.getAllHosts();
- log.log(Level.FINE, () -> message);
+ log.log(FINE, () -> message);
trace.trace(6, message);
}
return Optional.empty();
@@ -368,7 +350,6 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
public Boolean addToRequestQueue(JRTServerConfigRequest request, boolean forceResponse, CompletionService<Boolean> completionService) {
// It's no longer delayed if we get here
request.setDelayedResponse(false);
- //ConfigDebug.logDebug(log, System.currentTimeMillis(), request.getConfigKey(), "RpcServer.addToRequestQueue()");
try {
final GetConfigProcessor task = new GetConfigProcessor(this, request, forceResponse);
if (completionService == null) {
@@ -405,7 +386,7 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
"'. Request from host '" + request.getClientHostName() + "'";
metrics.incUnknownHostRequests();
trace.trace(TRACELEVEL, msg);
- log.log(Level.WARNING, msg);
+ log.log(WARNING, msg);
return GetConfigContext.empty();
}
RequestHandler handler = requestHandler.get();
@@ -430,7 +411,7 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
@Override
public void onTenantDelete(TenantName tenant) {
- log.log(Level.FINE, () -> TenantRepository.logPre(tenant) +
+ log.log(FINE, () -> TenantRepository.logPre(tenant) +
"Tenant deleted, removing request handler and cleaning host registry");
tenants.remove(tenant);
}
@@ -500,7 +481,8 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
Request request = createMetaRequest(fileData);
invokeRpcIfValidConnection(request);
if (request.isError()) {
- log.warning("Failed delivering meta for reference '" + fileData.fileReference().value() + "' with file '" + fileData.filename() + "' to " +
+ log.log(WARNING, () -> "Failed delivering meta for reference '" + fileData.fileReference().value() +
+ "' with file '" + fileData.filename() + "' to " +
target.toString() + " with error: '" + request.errorMessage() + "'.");
return 1;
} else {
@@ -518,7 +500,8 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
request.parameters().add(new StringValue(fileData.filename()));
request.parameters().add(new StringValue(fileData.type().name()));
request.parameters().add(new Int64Value(fileData.size()));
- // Only add paramter if not gzip, this is default and old clients will not handle the extra parameter
+ // Only add parameter if not gzip, this is default and old clients will not handle the extra parameter
+ // TODO Always add parameter in Vespa 9
if (fileData.compressionType() != CompressionType.gzip)
request.parameters().add(new StringValue(fileData.compressionType().name()));
return request;
@@ -532,7 +515,7 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
request.parameters().add(new DataValue(buf));
invokeRpcIfValidConnection(request);
if (request.isError()) {
- throw new IllegalArgumentException("Failed delivering reference '" + ref.value() + "' to " +
+ throw new IllegalArgumentException("Failed delivering part of reference '" + ref.value() + "' to " +
target.toString() + " with error: '" + request.errorMessage() + "'.");
} else {
if (request.returnValues().get(0).asInt32() != 0) {
@@ -550,7 +533,8 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
request.parameters().add(new StringValue(status.getDescription()));
invokeRpcIfValidConnection(request);
if (request.isError()) {
- throw new IllegalArgumentException("Failed delivering reference '" + fileData.fileReference().value() + "' with file '" + fileData.filename() + "' to " +
+ throw new IllegalArgumentException("Failed delivering eof for reference '" + fileData.fileReference().value() +
+ "' with file '" + fileData.filename() + "' to " +
target.toString() + " with error: '" + request.errorMessage() + "'.");
} else {
if (request.returnValues().get(0).asInt32() != 0) {
@@ -583,7 +567,6 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
acceptedCompressionTypes = Arrays.stream(request.parameters().get(2).asStringArray())
.map(CompressionType::valueOf)
.collect(Collectors.toSet());
- log.log(Level.FINE, "acceptedCompressionTypes=" + acceptedCompressionTypes);
fileServer.serveFile(reference, downloadFromOtherSourceIfNotFound, acceptedCompressionTypes, request, receiver);
});
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/RemoteSession.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/RemoteSession.java
index 12c61272f20..aa6d33fbda8 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/RemoteSession.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/RemoteSession.java
@@ -2,7 +2,7 @@
package com.yahoo.vespa.config.server.session;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.ApplicationVersions;
import java.util.Objects;
import java.util.Optional;
@@ -15,7 +15,7 @@ import java.util.Optional;
*/
public class RemoteSession extends Session {
- private final Optional<ApplicationSet> applicationSet;
+ private final Optional<ApplicationVersions> applicationVersions;
/**
* Creates a session. This involves loading the application, validating it and distributing it.
@@ -36,17 +36,17 @@ public class RemoteSession extends Session {
* @param zooKeeperClient a SessionZooKeeperClient instance
* @param applicationSet current application set for this session
*/
- RemoteSession(TenantName tenant, long sessionId, SessionZooKeeperClient zooKeeperClient, Optional<ApplicationSet> applicationSet) {
+ RemoteSession(TenantName tenant, long sessionId, SessionZooKeeperClient zooKeeperClient, Optional<ApplicationVersions> applicationSet) {
super(tenant, sessionId, zooKeeperClient);
- this.applicationSet = applicationSet;
+ this.applicationVersions = applicationSet;
}
@Override
- public Optional<ApplicationSet> applicationSet() { return applicationSet; }
+ public Optional<ApplicationVersions> applicationVersions() { return applicationVersions; }
- public synchronized RemoteSession activated(ApplicationSet applicationSet) {
- Objects.requireNonNull(applicationSet, "applicationSet cannot be null");
- return new RemoteSession(tenant, sessionId, sessionZooKeeperClient, Optional.of(applicationSet));
+ public synchronized RemoteSession activated(ApplicationVersions applicationVersions) {
+ Objects.requireNonNull(applicationVersions, "applicationVersions cannot be null");
+ return new RemoteSession(tenant, sessionId, sessionZooKeeperClient, Optional.of(applicationVersions));
}
public synchronized RemoteSession deactivated() {
@@ -55,7 +55,7 @@ public class RemoteSession extends Session {
@Override
public String toString() {
- return super.toString() + ",application set=" + applicationSet;
+ return super.toString() + ",application set=" + applicationVersions;
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/Session.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/Session.java
index b627fe9ba3b..f354b5238b2 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/Session.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/Session.java
@@ -17,7 +17,7 @@ import com.yahoo.config.provision.DockerImage;
import com.yahoo.config.provision.TenantName;
import com.yahoo.path.Path;
import com.yahoo.transaction.Transaction;
-import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.ApplicationVersions;
import com.yahoo.vespa.config.server.tenant.TenantRepository;
import java.security.cert.X509Certificate;
import java.time.Instant;
@@ -94,14 +94,10 @@ public abstract class Session implements Comparable<Session> {
* @return log preamble
*/
public String logPre() {
- Optional<ApplicationId> applicationId;
+ Optional<ApplicationId> applicationId = getOptionalApplicationId();
+
// We might not be able to read application id from zookeeper
// e.g. when the app has been deleted. Use tenant name in that case.
- try {
- applicationId = Optional.of(getApplicationId());
- } catch (Exception e) {
- applicationId = Optional.empty();
- }
return applicationId
.filter(appId -> ! appId.equals(ApplicationId.defaultId()))
.map(TenantRepository::logPre)
@@ -116,46 +112,6 @@ public abstract class Session implements Comparable<Session> {
return sessionZooKeeperClient.readActivatedTime();
}
- public void setApplicationId(ApplicationId applicationId) {
- sessionZooKeeperClient.writeApplicationId(applicationId);
- }
-
- void setApplicationPackageReference(FileReference applicationPackageReference) {
- sessionZooKeeperClient.writeApplicationPackageReference(Optional.ofNullable(applicationPackageReference));
- }
-
- public void setVespaVersion(Version version) {
- sessionZooKeeperClient.writeVespaVersion(version);
- }
-
- public void setDockerImageRepository(Optional<DockerImage> dockerImageRepository) {
- sessionZooKeeperClient.writeDockerImageRepository(dockerImageRepository);
- }
-
- public void setAthenzDomain(Optional<AthenzDomain> athenzDomain) {
- sessionZooKeeperClient.writeAthenzDomain(athenzDomain);
- }
-
- public void setQuota(Optional<Quota> quota) {
- sessionZooKeeperClient.writeQuota(quota);
- }
-
- public void setTenantSecretStores(List<TenantSecretStore> tenantSecretStores) {
- sessionZooKeeperClient.writeTenantSecretStores(tenantSecretStores);
- }
-
- public void setOperatorCertificates(List<X509Certificate> operatorCertificates) {
- sessionZooKeeperClient.writeOperatorCertificates(operatorCertificates);
- }
-
- public void setCloudAccount(Optional<CloudAccount> cloudAccount) {
- sessionZooKeeperClient.writeCloudAccount(cloudAccount);
- }
-
- public void setDataplaneTokens(List<DataplaneToken> dataplaneTokens) {
- sessionZooKeeperClient.writeDataplaneTokens(dataplaneTokens);
- }
-
/** Returns application id read from ZooKeeper. Will throw RuntimeException if not found */
public ApplicationId getApplicationId() { return sessionZooKeeperClient.readApplicationId(); }
@@ -168,7 +124,7 @@ public abstract class Session implements Comparable<Session> {
}
}
- public FileReference getApplicationPackageReference() {return sessionZooKeeperClient.readApplicationPackageReference(); }
+ public Optional<FileReference> getApplicationPackageReference() { return sessionZooKeeperClient.readApplicationPackageReference(); }
public Optional<DockerImage> getDockerImageRepository() { return sessionZooKeeperClient.readDockerImageRepository(); }
@@ -202,6 +158,8 @@ public abstract class Session implements Comparable<Session> {
return sessionZooKeeperClient.readDataplaneTokens();
}
+ public SessionZooKeeperClient getSessionZooKeeperClient() { return sessionZooKeeperClient; }
+
private Transaction createSetStatusTransaction(Status status) {
return sessionZooKeeperClient.createWriteStatusTransaction(status);
}
@@ -226,7 +184,7 @@ public abstract class Session implements Comparable<Session> {
return getApplicationPackage().getFile(relativePath);
}
- Optional<ApplicationSet> applicationSet() { return Optional.empty(); };
+ Optional<ApplicationVersions> applicationVersions() { return Optional.empty(); }
private void markSessionEdited() {
setStatus(Session.Status.NEW);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionData.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionData.java
new file mode 100644
index 00000000000..1d5a560fc8b
--- /dev/null
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionData.java
@@ -0,0 +1,108 @@
+package com.yahoo.vespa.config.server.session;
+
+import com.yahoo.component.Version;
+import com.yahoo.config.FileReference;
+import com.yahoo.config.model.api.Quota;
+import com.yahoo.config.model.api.TenantSecretStore;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.AthenzDomain;
+import com.yahoo.config.provision.CloudAccount;
+import com.yahoo.config.provision.DataplaneToken;
+import com.yahoo.config.provision.DockerImage;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.config.server.tenant.DataplaneTokenSerializer;
+import com.yahoo.vespa.config.server.tenant.OperatorCertificateSerializer;
+import com.yahoo.vespa.config.server.tenant.TenantSecretStoreSerializer;
+
+import java.io.IOException;
+import java.security.cert.X509Certificate;
+import java.time.Instant;
+import java.util.List;
+import java.util.Optional;
+
+import static com.yahoo.slime.SlimeUtils.optionalString;
+
+/**
+ * Data class for session information, typically parameters supplied in a deployment request that needs
+ * to be persisted in ZooKeeper. These will be used when creating a new session based on an existing one.
+ *
+ * @author hmusum
+ */
+public record SessionData(ApplicationId applicationId,
+ Optional<FileReference> applicationPackageReference,
+ Version version,
+ Instant created,
+ Optional<DockerImage> dockerImageRepository,
+ Optional<AthenzDomain> athenzDomain,
+ Optional<Quota> quota,
+ List<TenantSecretStore> tenantSecretStores,
+ List<X509Certificate> operatorCertificates,
+ Optional<CloudAccount> cloudAccount,
+ List<DataplaneToken> dataplaneTokens) {
+
+ // NOTE: Any state added here MUST also be propagated in com.yahoo.vespa.config.server.deploy.Deployment.prepare()
+ static final String APPLICATION_ID_PATH = "applicationId";
+ static final String APPLICATION_PACKAGE_REFERENCE_PATH = "applicationPackageReference";
+ static final String VERSION_PATH = "version";
+ static final String CREATE_TIME_PATH = "createTime";
+ static final String DOCKER_IMAGE_REPOSITORY_PATH = "dockerImageRepository";
+ static final String ATHENZ_DOMAIN = "athenzDomain";
+ static final String QUOTA_PATH = "quota";
+ static final String TENANT_SECRET_STORES_PATH = "tenantSecretStores";
+ static final String OPERATOR_CERTIFICATES_PATH = "operatorCertificates";
+ static final String CLOUD_ACCOUNT_PATH = "cloudAccount";
+ static final String DATAPLANE_TOKENS_PATH = "dataplaneTokens";
+ static final String SESSION_DATA_PATH = "sessionData";
+
+ public byte[] toJson() {
+ try {
+ Slime slime = new Slime();
+ toSlime(slime.setObject());
+ return SlimeUtils.toJsonBytes(slime);
+ }
+ catch (IOException e) {
+ throw new RuntimeException("Serialization of session data to json failed", e);
+ }
+ }
+
+ private void toSlime(Cursor object) {
+ object.setString(APPLICATION_ID_PATH, applicationId.serializedForm());
+ applicationPackageReference.ifPresent(ref -> object.setString(APPLICATION_PACKAGE_REFERENCE_PATH, ref.value()));
+ object.setString(VERSION_PATH, version.toString());
+ object.setLong(CREATE_TIME_PATH, created.toEpochMilli());
+ dockerImageRepository.ifPresent(image -> object.setString(DOCKER_IMAGE_REPOSITORY_PATH, image.asString()));
+ athenzDomain.ifPresent(domain -> object.setString(ATHENZ_DOMAIN, domain.value()));
+ quota.ifPresent(q -> q.toSlime(object.setObject(QUOTA_PATH)));
+
+ Cursor tenantSecretStoresArray = object.setArray(TENANT_SECRET_STORES_PATH);
+ TenantSecretStoreSerializer.toSlime(tenantSecretStores, tenantSecretStoresArray);
+
+ Cursor operatorCertificatesArray = object.setArray(OPERATOR_CERTIFICATES_PATH);
+ OperatorCertificateSerializer.toSlime(operatorCertificates, operatorCertificatesArray);
+
+ cloudAccount.ifPresent(account -> object.setString(CLOUD_ACCOUNT_PATH, account.value()));
+
+ Cursor dataplaneTokensArray = object.setArray(DATAPLANE_TOKENS_PATH);
+ DataplaneTokenSerializer.toSlime(dataplaneTokens, dataplaneTokensArray);
+ }
+
+ static SessionData fromSlime(Slime slime) {
+ Cursor cursor = slime.get();
+ return new SessionData(ApplicationId.fromSerializedForm(cursor.field(APPLICATION_ID_PATH).asString()),
+ optionalString(cursor.field(APPLICATION_PACKAGE_REFERENCE_PATH)).map(FileReference::new),
+ Version.fromString(cursor.field(VERSION_PATH).asString()),
+ Instant.ofEpochMilli(cursor.field(CREATE_TIME_PATH).asLong()),
+ optionalString(cursor.field(DOCKER_IMAGE_REPOSITORY_PATH)).map(DockerImage::fromString),
+ optionalString(cursor.field(ATHENZ_DOMAIN)).map(AthenzDomain::from),
+ SlimeUtils.isPresent(cursor.field(QUOTA_PATH))
+ ? Optional.of(Quota.fromSlime(cursor.field(QUOTA_PATH)))
+ : Optional.empty(),
+ TenantSecretStoreSerializer.listFromSlime(cursor.field(TENANT_SECRET_STORES_PATH)),
+ OperatorCertificateSerializer.fromSlime(cursor.field(OPERATOR_CERTIFICATES_PATH)),
+ optionalString(cursor.field(CLOUD_ACCOUNT_PATH)).map(CloudAccount::from),
+ DataplaneTokenSerializer.fromSlime(cursor.field(DATAPLANE_TOKENS_PATH)));
+ }
+
+}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
index ae87a0dd182..aeff97169f4 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
@@ -34,7 +34,7 @@ import com.yahoo.net.HostName;
import com.yahoo.path.Path;
import com.yahoo.vespa.config.server.ConfigServerSpec;
import com.yahoo.vespa.config.server.TimeoutBudget;
-import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.ApplicationVersions;
import com.yahoo.vespa.config.server.configchange.ConfigChangeActions;
import com.yahoo.vespa.config.server.deploy.ZooKeeperDeployer;
import com.yahoo.vespa.config.server.filedistribution.FileDistributionFactory;
@@ -49,7 +49,9 @@ import com.yahoo.vespa.config.server.tenant.EndpointCertificateMetadataStore;
import com.yahoo.vespa.config.server.tenant.EndpointCertificateRetriever;
import com.yahoo.vespa.config.server.tenant.TenantRepository;
import com.yahoo.vespa.curator.Curator;
+import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.model.application.validation.BundleValidator;
import org.xml.sax.SAXException;
import javax.xml.parsers.ParserConfigurationException;
@@ -90,6 +92,7 @@ public class SessionPreparer {
private final SecretStore secretStore;
private final FlagSource flagSource;
private final ExecutorService executor;
+ private final BooleanFlag writeSessionData;
public SessionPreparer(ModelFactoryRegistry modelFactoryRegistry,
FileDistributionFactory fileDistributionFactory,
@@ -111,6 +114,7 @@ public class SessionPreparer {
this.secretStore = secretStore;
this.flagSource = flagSource;
this.executor = executor;
+ this.writeSessionData = Flags.WRITE_CONFIG_SERVER_SESSION_DATA_AS_ONE_BLOB.bindTo(flagSource);
}
ExecutorService getExecutor() { return executor; }
@@ -121,14 +125,14 @@ public class SessionPreparer {
* @param hostValidator host validator
* @param logger for storing logs returned in response to client.
* @param params parameters controlling behaviour of prepare.
- * @param activeApplicationSet set of currently active applications.
+ * @param activeApplicationVersions active application versions.
* @return the config change actions that must be done to handle the activation of the models prepared.
*/
public PrepareResult prepare(HostValidator hostValidator, DeployLogger logger, PrepareParams params,
- Optional<ApplicationSet> activeApplicationSet, Instant now, File serverDbSessionDir,
+ Optional<ApplicationVersions> activeApplicationVersions, Instant now, File serverDbSessionDir,
ApplicationPackage applicationPackage, SessionZooKeeperClient sessionZooKeeperClient) {
ApplicationId applicationId = params.getApplicationId();
- Preparation preparation = new Preparation(hostValidator, logger, params, activeApplicationSet,
+ Preparation preparation = new Preparation(hostValidator, logger, params, activeApplicationVersions,
TenantRepository.getTenantPath(applicationId.tenant()),
serverDbSessionDir, applicationPackage, sessionZooKeeperClient);
preparation.preprocess();
@@ -180,7 +184,7 @@ public class SessionPreparer {
private final FileRegistry fileRegistry;
Preparation(HostValidator hostValidator, DeployLogger logger, PrepareParams params,
- Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath,
+ Optional<ApplicationVersions> activeApplicationVersions, Path tenantPath,
File serverDbSessionDir, ApplicationPackage applicationPackage,
SessionZooKeeperClient sessionZooKeeperClient) {
this.logger = logger;
@@ -213,7 +217,7 @@ public class SessionPreparer {
hostValidator,
logger,
params,
- currentActiveApplicationSet,
+ activeApplicationVersions,
configserverConfig,
zone);
}
@@ -335,7 +339,8 @@ public class SessionPreparer {
writeStateToZooKeeper(sessionZooKeeperClient,
preprocessedApplicationPackage,
applicationId,
- filereference,
+ sessionZooKeeperClient.readCreateTime(),
+ Optional.of(filereference),
dockerImageRepository,
vespaVersion,
logger,
@@ -377,7 +382,8 @@ public class SessionPreparer {
private void writeStateToZooKeeper(SessionZooKeeperClient zooKeeperClient,
ApplicationPackage applicationPackage,
ApplicationId applicationId,
- FileReference fileReference,
+ Instant created,
+ Optional<FileReference> fileReference,
Optional<DockerImage> dockerImageRepository,
Version vespaVersion,
DeployLogger deployLogger,
@@ -389,22 +395,24 @@ public class SessionPreparer {
List<X509Certificate> operatorCertificates,
Optional<CloudAccount> cloudAccount,
List<DataplaneToken> dataplaneTokens) {
- ZooKeeperDeployer zkDeployer = zooKeeperClient.createDeployer(deployLogger);
+ var zooKeeperDeplyer = new ZooKeeperDeployer(curator, deployLogger, applicationId, zooKeeperClient.sessionId());
try {
- zkDeployer.deploy(applicationPackage, fileRegistryMap, allocatedHosts);
- // Note: When changing the below you need to also change similar calls in SessionRepository.createSessionFromExisting()
- zooKeeperClient.writeApplicationId(applicationId);
- zooKeeperClient.writeApplicationPackageReference(Optional.of(fileReference));
- zooKeeperClient.writeVespaVersion(vespaVersion);
- zooKeeperClient.writeDockerImageRepository(dockerImageRepository);
- zooKeeperClient.writeAthenzDomain(athenzDomain);
- zooKeeperClient.writeQuota(quota);
- zooKeeperClient.writeTenantSecretStores(tenantSecretStores);
- zooKeeperClient.writeOperatorCertificates(operatorCertificates);
- zooKeeperClient.writeCloudAccount(cloudAccount);
- zooKeeperClient.writeDataplaneTokens(dataplaneTokens);
+ zooKeeperDeplyer.deploy(applicationPackage, fileRegistryMap, allocatedHosts);
+ new SessionSerializer().write(zooKeeperClient,
+ applicationId,
+ created,
+ fileReference,
+ dockerImageRepository,
+ vespaVersion,
+ athenzDomain,
+ quota,
+ tenantSecretStores,
+ operatorCertificates,
+ cloudAccount,
+ dataplaneTokens,
+ writeSessionData);
} catch (RuntimeException | IOException e) {
- zkDeployer.cleanup();
+ zooKeeperDeplyer.cleanup();
throw new RuntimeException("Error preparing session", e);
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java
index f82aa405380..44a656a1579 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java
@@ -6,7 +6,6 @@ import com.google.common.collect.Multiset;
import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.concurrent.DaemonThreadFactory;
import com.yahoo.concurrent.StripedExecutor;
-import com.yahoo.config.FileReference;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.model.api.ConfigDefinitionRepo;
@@ -23,11 +22,10 @@ import com.yahoo.transaction.NestedTransaction;
import com.yahoo.transaction.Transaction;
import com.yahoo.vespa.config.server.ConfigServerDB;
import com.yahoo.vespa.config.server.TimeoutBudget;
-import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.ApplicationVersions;
import com.yahoo.vespa.config.server.application.TenantApplications;
import com.yahoo.vespa.config.server.configchange.ConfigChangeActions;
import com.yahoo.vespa.config.server.deploy.TenantFileSystemDirs;
-import com.yahoo.vespa.config.server.filedistribution.FileDirectory;
import com.yahoo.vespa.config.server.filedistribution.FileDistributionFactory;
import com.yahoo.vespa.config.server.http.InvalidApplicationException;
import com.yahoo.vespa.config.server.http.UnknownVespaVersionException;
@@ -41,7 +39,10 @@ import com.yahoo.vespa.config.server.tenant.TenantRepository;
import com.yahoo.vespa.config.server.zookeeper.SessionCounter;
import com.yahoo.vespa.config.server.zookeeper.ZKApplication;
import com.yahoo.vespa.curator.Curator;
+import com.yahoo.vespa.curator.transaction.CuratorTransaction;
+import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.LongFlag;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.UnboundStringFlag;
@@ -127,6 +128,7 @@ public class SessionRepository {
private final ConfigDefinitionRepo configDefinitionRepo;
private final int maxNodeSize;
private final LongFlag expiryTimeFlag;
+ private final BooleanFlag writeSessionData;
public SessionRepository(TenantName tenantName,
TenantApplications applicationRepo,
@@ -168,7 +170,8 @@ public class SessionRepository {
this.modelFactoryRegistry = modelFactoryRegistry;
this.configDefinitionRepo = configDefinitionRepo;
this.maxNodeSize = maxNodeSize;
- expiryTimeFlag = PermanentFlags.CONFIG_SERVER_SESSION_EXPIRY_TIME.bindTo(flagSource);
+ this.expiryTimeFlag = PermanentFlags.CONFIG_SERVER_SESSION_EXPIRY_TIME.bindTo(flagSource);
+ this.writeSessionData = Flags.WRITE_CONFIG_SERVER_SESSION_DATA_AS_ONE_BLOB.bindTo(flagSource);
loadSessions(); // Needs to be done before creating cache below
this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor);
@@ -237,16 +240,24 @@ public class SessionRepository {
throw new UnknownVespaVersionException("Vespa version '" + version + "' not known by this config server");
});
- applicationRepo.createApplication(params.getApplicationId()); // TODO jvenstad: This is wrong, but it has to be done now, since preparation can change the application ID of a session :(
- logger.log(Level.FINE, "Created application " + params.getApplicationId());
+ ApplicationId applicationId = params.getApplicationId();
+ applicationRepo.createApplication(applicationId); // TODO jvenstad: This is wrong, but it has to be done now, since preparation can change the application ID of a session :(
+ logger.log(Level.FINE, "Created application " + applicationId);
long sessionId = session.getSessionId();
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId);
Optional<CompletionWaiter> waiter = params.isDryRun()
? Optional.empty()
: Optional.of(sessionZooKeeperClient.createPrepareWaiter());
- Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId());
+ Optional<ApplicationVersions> activeApplicationVersions = activeApplicationVersions(applicationId);
+ try (var transaction = new CuratorTransaction(curator)) {
+ applicationRepo.createWritePrepareTransaction(transaction,
+ applicationId,
+ sessionId,
+ getActiveSessionId(applicationId))
+ .commit();
+ }
ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo, logger, params,
- activeApplicationSet, now, getSessionAppDir(sessionId),
+ activeApplicationVersions, now, getSessionAppDir(sessionId),
session.getApplicationPackage(), sessionZooKeeperClient)
.getConfigChangeActions();
setPrepared(session);
@@ -266,24 +277,17 @@ public class SessionRepository {
boolean internalRedeploy,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
- ApplicationId existingApplicationId = existingSession.getApplicationId();
+ ApplicationId applicationId = existingSession.getApplicationId();
File existingApp = getSessionAppDir(existingSession.getSessionId());
+ Instant created = clock.instant();
LocalSession session = createSessionFromApplication(existingApp,
- existingApplicationId,
+ applicationId,
internalRedeploy,
timeoutBudget,
- deployLogger);
- // Note: Setters below need to be kept in sync with calls in SessionPreparer.writeStateToZooKeeper()
- session.setApplicationId(existingApplicationId);
- session.setApplicationPackageReference(existingSession.getApplicationPackageReference());
- session.setVespaVersion(existingSession.getVespaVersion());
- session.setDockerImageRepository(existingSession.getDockerImageRepository());
- session.setAthenzDomain(existingSession.getAthenzDomain());
- session.setQuota(existingSession.getQuota());
- session.setTenantSecretStores(existingSession.getTenantSecretStores());
- session.setOperatorCertificates(existingSession.getOperatorCertificates());
- session.setCloudAccount(existingSession.getCloudAccount());
- session.setDataplaneTokens(existingSession.getDataplaneTokens());
+ deployLogger,
+ created);
+ applicationRepo.createApplication(applicationId);
+ write(existingSession, session, applicationId, created);
return session;
}
@@ -299,8 +303,10 @@ public class SessionRepository {
ApplicationId applicationId,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
+ LocalSession session = createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget,
+ deployLogger, clock.instant());
applicationRepo.createApplication(applicationId);
- return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget, deployLogger);
+ return session;
}
/**
@@ -484,20 +490,20 @@ public class SessionRepository {
notifyCompletion(waiter);
}
- public ApplicationSet ensureApplicationLoaded(RemoteSession session) {
- if (session.applicationSet().isPresent()) {
- return session.applicationSet().get();
+ public ApplicationVersions ensureApplicationLoaded(RemoteSession session) {
+ if (session.applicationVersions().isPresent()) {
+ return session.applicationVersions().get();
}
Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId());
- Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan)
- .flatMap(this::getApplicationSet);
- ApplicationSet applicationSet = loadApplication(session, previousApplicationSet);
- RemoteSession activated = session.activated(applicationSet);
+ Optional<ApplicationVersions> previousActiveApplicationVersions = activeSessionId.filter(session::isNewerThan)
+ .flatMap(this::activeApplicationVersions);
+ ApplicationVersions applicationVersions = loadApplication(session, previousActiveApplicationVersions);
+ RemoteSession activated = session.activated(applicationVersions);
long sessionId = activated.getSessionId();
remoteSessionCache.put(sessionId, activated);
updateSessionStateWatcher(sessionId);
- return applicationSet;
+ return applicationVersions;
}
void confirmUpload(Session session) {
@@ -531,10 +537,9 @@ public class SessionRepository {
}
}
- private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) {
+ private ApplicationVersions loadApplication(Session session, Optional<ApplicationVersions> previousApplicationSet) {
log.log(Level.FINE, () -> "Loading application for " + session);
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId());
- ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage();
ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(),
session.getSessionId(),
sessionZooKeeperClient,
@@ -549,12 +554,12 @@ public class SessionRepository {
zone,
modelFactoryRegistry,
configDefinitionRepo);
- return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(),
- sessionZooKeeperClient.readDockerImageRepository(),
- sessionZooKeeperClient.readVespaVersion(),
- applicationPackage,
- new AllocatedHostsFromAllModels(),
- clock.instant()));
+ return ApplicationVersions.fromList(builder.buildModels(session.getApplicationId(),
+ session.getDockerImageRepository(),
+ session.getVespaVersion(),
+ sessionZooKeeperClient.loadApplicationPackage(),
+ new AllocatedHostsFromAllModels(),
+ clock.instant()));
}
private void nodeChanged() {
@@ -578,6 +583,25 @@ public class SessionRepository {
});
}
+ // ---------------- Serialization ----------------------------------------------------------------
+
+ private void write(Session existingSession, LocalSession session, ApplicationId applicationId, Instant created) {
+ SessionSerializer sessionSerializer = new SessionSerializer();
+ sessionSerializer.write(session.getSessionZooKeeperClient(),
+ applicationId,
+ created,
+ existingSession.getApplicationPackageReference(),
+ existingSession.getDockerImageRepository(),
+ existingSession.getVespaVersion(),
+ existingSession.getAthenzDomain(),
+ existingSession.getQuota(),
+ existingSession.getTenantSecretStores(),
+ existingSession.getOperatorCertificates(),
+ existingSession.getCloudAccount(),
+ existingSession.getDataplaneTokens(),
+ writeSessionData);
+ }
+
// ---------------- Common stuff ----------------------------------------------------------------
public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) {
@@ -720,14 +744,15 @@ public class SessionRepository {
ApplicationId applicationId,
boolean internalRedeploy,
TimeoutBudget timeoutBudget,
- DeployLogger deployLogger) {
+ DeployLogger deployLogger,
+ Instant created) {
long sessionId = getNextSessionId();
try {
ensureSessionPathDoesNotExist(sessionId);
ApplicationPackage app = createApplicationPackage(applicationDirectory, applicationId, sessionId, internalRedeploy, Optional.of(deployLogger));
log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper");
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
- sessionZKClient.createNewSession(clock.instant());
+ sessionZKClient.createNewSession(created);
CompletionWaiter waiter = sessionZKClient.getUploadWaiter();
LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient);
waiter.awaitCompletion(Duration.ofSeconds(Math.min(120, timeoutBudget.timeLeft().getSeconds())));
@@ -761,11 +786,11 @@ public class SessionRepository {
}
}
- public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) {
- return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet);
+ public Optional<ApplicationVersions> activeApplicationVersions(ApplicationId appId) {
+ return applicationRepo.activeSessionOf(appId).flatMap(this::activeApplicationVersions);
}
- private Optional<ApplicationSet> getApplicationSet(long sessionId) {
+ private Optional<ApplicationVersions> activeApplicationVersions(long sessionId) {
try {
return Optional.ofNullable(getRemoteSession(sessionId)).map(this::ensureApplicationLoaded);
} catch (IllegalArgumentException e) {
@@ -854,23 +879,18 @@ public class SessionRepository {
}
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
- FileReference fileReference = sessionZKClient.readApplicationPackageReference();
+ var fileReference = sessionZKClient.readApplicationPackageReference();
log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference);
- if (fileReference == null) return;
+ if (fileReference.isEmpty()) return;
+
+ Optional<File> sessionDir = fileDistributionFactory.fileDirectory().getFile(fileReference.get());
+ // We cannot be guaranteed that the file reference exists (it could be that it has not
+ // been downloaded yet), and e.g. when bootstrapping we cannot throw an exception in that case
+ if (sessionDir.isEmpty()) return;
- File sessionDir;
- FileDirectory fileDirectory = fileDistributionFactory.fileDirectory();
- try {
- sessionDir = fileDirectory.getFile(fileReference);
- } catch (IllegalArgumentException e) {
- // We cannot be guaranteed that the file reference exists (it could be that it has not
- // been downloaded yet), and e.g. when bootstrapping we cannot throw an exception in that case
- log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference + " not found");
- return;
- }
ApplicationId applicationId = sessionZKClient.readApplicationId();
log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId);
- createLocalSession(sessionDir, applicationId, sessionId);
+ createLocalSession(sessionDir.get(), applicationId, sessionId);
}
private Optional<Long> getActiveSessionId(ApplicationId applicationId) {
@@ -965,7 +985,7 @@ public class SessionRepository {
public Transaction createActivateTransaction(Session session) {
Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE);
- transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations());
+ transaction.add(applicationRepo.createWriteActiveTransaction(transaction, session.getApplicationId(), session.getSessionId()).operations());
return transaction;
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionSerializer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionSerializer.java
new file mode 100644
index 00000000000..46acb8c7ef1
--- /dev/null
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionSerializer.java
@@ -0,0 +1,72 @@
+package com.yahoo.vespa.config.server.session;
+
+import com.yahoo.component.Version;
+import com.yahoo.config.FileReference;
+import com.yahoo.config.model.api.Quota;
+import com.yahoo.config.model.api.TenantSecretStore;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.AthenzDomain;
+import com.yahoo.config.provision.CloudAccount;
+import com.yahoo.config.provision.DataplaneToken;
+import com.yahoo.config.provision.DockerImage;
+import com.yahoo.vespa.flags.BooleanFlag;
+
+import java.security.cert.X509Certificate;
+import java.time.Instant;
+import java.util.List;
+import java.util.Optional;
+
+/**
+ * Serialization and deserialization of session data to/from ZooKeeper.
+ * @author hmusum
+ */
+public class SessionSerializer {
+
+ void write(SessionZooKeeperClient zooKeeperClient, ApplicationId applicationId,
+ Instant created, Optional<FileReference> fileReference, Optional<DockerImage> dockerImageRepository,
+ Version vespaVersion, Optional<AthenzDomain> athenzDomain, Optional<Quota> quota,
+ List<TenantSecretStore> tenantSecretStores, List<X509Certificate> operatorCertificates,
+ Optional<CloudAccount> cloudAccount, List<DataplaneToken> dataplaneTokens,
+ BooleanFlag writeSessionData) {
+ zooKeeperClient.writeApplicationId(applicationId);
+ zooKeeperClient.writeApplicationPackageReference(fileReference);
+ zooKeeperClient.writeVespaVersion(vespaVersion);
+ zooKeeperClient.writeDockerImageRepository(dockerImageRepository);
+ zooKeeperClient.writeAthenzDomain(athenzDomain);
+ zooKeeperClient.writeQuota(quota);
+ zooKeeperClient.writeTenantSecretStores(tenantSecretStores);
+ zooKeeperClient.writeOperatorCertificates(operatorCertificates);
+ zooKeeperClient.writeCloudAccount(cloudAccount);
+ zooKeeperClient.writeDataplaneTokens(dataplaneTokens);
+ if (writeSessionData.value())
+ zooKeeperClient.writeSessionData(new SessionData(applicationId,
+ fileReference,
+ vespaVersion,
+ created,
+ dockerImageRepository,
+ athenzDomain,
+ quota,
+ tenantSecretStores,
+ operatorCertificates,
+ cloudAccount,
+ dataplaneTokens));
+ }
+
+ SessionData read(SessionZooKeeperClient zooKeeperClient, BooleanFlag readSessionData) {
+ if (readSessionData.value())
+ return zooKeeperClient.readSessionData();
+ else
+ return new SessionData(zooKeeperClient.readApplicationId(),
+ zooKeeperClient.readApplicationPackageReference(),
+ zooKeeperClient.readVespaVersion(),
+ zooKeeperClient.readCreateTime(),
+ zooKeeperClient.readDockerImageRepository(),
+ zooKeeperClient.readAthenzDomain(),
+ zooKeeperClient.readQuota(),
+ zooKeeperClient.readTenantSecretStores(),
+ zooKeeperClient.readOperatorCertificates(),
+ zooKeeperClient.readCloudAccount(),
+ zooKeeperClient.readDataplaneTokens());
+ }
+
+}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java
index 23b6fe075fa..85abd937ac0 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java
@@ -6,7 +6,6 @@ import com.yahoo.component.Version;
import com.yahoo.component.Vtag;
import com.yahoo.config.FileReference;
import com.yahoo.config.application.api.ApplicationPackage;
-import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.model.api.ConfigDefinitionRepo;
import com.yahoo.config.model.api.Quota;
import com.yahoo.config.model.api.TenantSecretStore;
@@ -23,8 +22,6 @@ import com.yahoo.text.Utf8;
import com.yahoo.transaction.Transaction;
import com.yahoo.vespa.config.server.NotFoundException;
import com.yahoo.vespa.config.server.UserConfigDefinitionRepo;
-import com.yahoo.vespa.config.server.deploy.ZooKeeperClient;
-import com.yahoo.vespa.config.server.deploy.ZooKeeperDeployer;
import com.yahoo.vespa.config.server.filedistribution.AddFileInterface;
import com.yahoo.vespa.config.server.filedistribution.MockFileManager;
import com.yahoo.vespa.config.server.tenant.CloudAccountSerializer;
@@ -45,6 +42,18 @@ import java.util.List;
import java.util.Optional;
import java.util.logging.Level;
+import static com.yahoo.vespa.config.server.session.SessionData.APPLICATION_ID_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.APPLICATION_PACKAGE_REFERENCE_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.ATHENZ_DOMAIN;
+import static com.yahoo.vespa.config.server.session.SessionData.CLOUD_ACCOUNT_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.CREATE_TIME_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.DATAPLANE_TOKENS_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.DOCKER_IMAGE_REPOSITORY_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.OPERATOR_CERTIFICATES_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.QUOTA_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.SESSION_DATA_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.TENANT_SECRET_STORES_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.VERSION_PATH;
import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.USER_DEFCONFIGS_ZK_SUBPATH;
import static com.yahoo.vespa.curator.Curator.CompletionWaiter;
import static com.yahoo.yolean.Exceptions.uncheck;
@@ -61,18 +70,6 @@ public class SessionZooKeeperClient {
// NOTE: Any state added here MUST also be propagated in com.yahoo.vespa.config.server.deploy.Deployment.prepare()
- static final String APPLICATION_ID_PATH = "applicationId";
- static final String APPLICATION_PACKAGE_REFERENCE_PATH = "applicationPackageReference";
- private static final String VERSION_PATH = "version";
- private static final String CREATE_TIME_PATH = "createTime";
- private static final String DOCKER_IMAGE_REPOSITORY_PATH = "dockerImageRepository";
- private static final String ATHENZ_DOMAIN = "athenzDomain";
- private static final String QUOTA_PATH = "quota";
- private static final String TENANT_SECRET_STORES_PATH = "tenantSecretStores";
- private static final String OPERATOR_CERTIFICATES_PATH = "operatorCertificates";
- private static final String CLOUD_ACCOUNT_PATH = "cloudAccount";
- private static final String DATAPLANE_TOKENS_PATH = "dataplaneTokens";
-
private final Curator curator;
private final TenantName tenantName;
private final long sessionId;
@@ -180,11 +177,8 @@ public class SessionZooKeeperClient {
reference -> curator.set(applicationPackageReferencePath(), Utf8.toBytes(reference.value())));
}
- FileReference readApplicationPackageReference() {
- Optional<byte[]> data = curator.getData(applicationPackageReferencePath());
- if (data.isEmpty()) return null; // This should not happen.
-
- return new FileReference(Utf8.toString(data.get()));
+ Optional<FileReference> readApplicationPackageReference() {
+ return curator.getData(applicationPackageReferencePath()).map(d -> new FileReference(Utf8.toString(d)));
}
private Path applicationPackageReferencePath() {
@@ -227,6 +221,14 @@ public class SessionZooKeeperClient {
curator.set(versionPath(), Utf8.toBytes(version.toString()));
}
+ public void writeSessionData(SessionData sessionData) {
+ curator.set(sessionPath.append(SESSION_DATA_PATH), sessionData.toJson());
+ }
+
+ public SessionData readSessionData() {
+ return SessionData.fromSlime(SlimeUtils.jsonToSlime(curator.getData(sessionPath.append(SESSION_DATA_PATH)).orElseThrow()));
+ }
+
public Version readVespaVersion() {
Optional<byte[]> data = curator.getData(versionPath());
// TODO: Empty version should not be possible any more - verify and remove
@@ -261,11 +263,6 @@ public class SessionZooKeeperClient {
.orElseThrow(() -> new IllegalStateException("Allocated hosts does not exists"));
}
- public ZooKeeperDeployer createDeployer(DeployLogger logger) {
- ZooKeeperClient zkClient = new ZooKeeperClient(curator, logger, sessionPath);
- return new ZooKeeperDeployer(zkClient);
- }
-
public Transaction createWriteStatusTransaction(Session.Status status) {
CuratorTransaction transaction = new CuratorTransaction(curator);
if (curator.exists(sessionStatusPath)) {
@@ -368,7 +365,7 @@ public class SessionZooKeeperClient {
transaction.commit();
}
- private static Path getSessionPath(TenantName tenantName, long sessionId) {
+ public static Path getSessionPath(TenantName tenantName, long sessionId) {
return TenantRepository.getSessionsPath(tenantName).append(String.valueOf(sessionId));
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/DataplaneTokenSerializer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/DataplaneTokenSerializer.java
index ef41512f979..3b819da6237 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/DataplaneTokenSerializer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/DataplaneTokenSerializer.java
@@ -54,6 +54,11 @@ public class DataplaneTokenSerializer {
public static Slime toSlime(List<DataplaneToken> dataplaneTokens) {
Slime slime = new Slime();
Cursor root = slime.setArray();
+ toSlime(dataplaneTokens, root);
+ return slime;
+ }
+
+ public static void toSlime(List<DataplaneToken> dataplaneTokens, Cursor root) {
for (DataplaneToken token : dataplaneTokens) {
Cursor cursor = root.addObject();
cursor.setString(ID_FIELD, token.tokenId());
@@ -65,6 +70,6 @@ public class DataplaneTokenSerializer {
val.setString(EXPIRATION_FIELD, v.expiration().map(Instant::toString).orElse("<none>"));
});
}
- return slime;
}
+
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/OperatorCertificateSerializer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/OperatorCertificateSerializer.java
index 232dd2e5fe7..e5a969bb948 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/OperatorCertificateSerializer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/OperatorCertificateSerializer.java
@@ -1,8 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
package com.yahoo.vespa.config.server.tenant;
-import com.yahoo.config.model.api.ApplicationRoles;
import com.yahoo.security.X509CertificateUtils;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
@@ -11,21 +9,28 @@ import com.yahoo.slime.SlimeUtils;
import java.security.cert.X509Certificate;
import java.util.List;
-import java.util.stream.Collectors;
+/**
+ * Serializer for operator certificates.
+ * The certificates are serialized as a list of PEM strings.
+ * @author tokle
+ */
public class OperatorCertificateSerializer {
private final static String certificateField = "certificates";
-
public static Slime toSlime(List<X509Certificate> certificateList) {
Slime slime = new Slime();
var root = slime.setObject();
Cursor array = root.setArray(certificateField);
+ toSlime(certificateList, array);
+ return slime;
+ }
+
+ public static void toSlime(List<X509Certificate> certificateList, Cursor array) {
certificateList.stream()
.map(X509CertificateUtils::toPem)
.forEach(array::addString);
- return slime;
}
public static List<X509Certificate> fromSlime(Inspector object) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantSecretStoreSerializer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantSecretStoreSerializer.java
index 262192ad6c4..1980fea3ae5 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantSecretStoreSerializer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantSecretStoreSerializer.java
@@ -30,10 +30,14 @@ public class TenantSecretStoreSerializer {
public static Slime toSlime(List<TenantSecretStore> tenantSecretStores) {
Slime slime = new Slime();
Cursor cursor = slime.setArray();
- tenantSecretStores.forEach(tenantSecretStore -> toSlime(tenantSecretStore, cursor.addObject()));
+ toSlime(tenantSecretStores, cursor);
return slime;
}
+ public static void toSlime(List<TenantSecretStore> tenantSecretStores, Cursor cursor) {
+ tenantSecretStores.forEach(tenantSecretStore -> toSlime(tenantSecretStore, cursor.addObject()));
+ }
+
public static void toSlime(TenantSecretStore tenantSecretStore, Cursor object) {
object.setString(awsIdField, tenantSecretStore.getAwsId());
object.setString(nameField, tenantSecretStore.getName());
@@ -56,4 +60,5 @@ public class TenantSecretStoreSerializer {
inspector.traverse(((ArrayTraverser)(idx, store) -> tenantSecretStores.add(fromSlime(store))));
return tenantSecretStores;
}
+
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
index 950a21e5750..a704599c6d4 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
@@ -31,7 +31,9 @@ import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.vespa.config.protocol.ConfigResponse;
import com.yahoo.vespa.config.protocol.DefContent;
import com.yahoo.vespa.config.protocol.VespaVersion;
+import com.yahoo.vespa.config.server.application.ApplicationData;
import com.yahoo.vespa.config.server.application.OrchestratorMock;
+import com.yahoo.vespa.config.server.application.TenantApplications;
import com.yahoo.vespa.config.server.deploy.DeployTester;
import com.yahoo.vespa.config.server.deploy.TenantFileSystemDirs;
import com.yahoo.vespa.config.server.filedistribution.FileDirectory;
@@ -49,6 +51,7 @@ import com.yahoo.vespa.config.server.tenant.TenantRepository;
import com.yahoo.vespa.config.server.tenant.TestTenantRepository;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.mock.MockCurator;
+import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.model.VespaModelFactory;
@@ -57,6 +60,7 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import org.junit.rules.TemporaryFolder;
+
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.IOException;
@@ -97,7 +101,6 @@ public class ApplicationRepositoryTest {
private ApplicationRepository applicationRepository;
private TenantRepository tenantRepository;
- private MockProvisioner provisioner;
private OrchestratorMock orchestrator;
private TimeoutBudget timeoutBudget;
private Curator curator;
@@ -123,7 +126,7 @@ public class ApplicationRepositoryTest {
.build();
flagSource = new InMemoryFlagSource();
fileDirectory = new FileDirectory(configserverConfig);
- provisioner = new MockProvisioner();
+ MockProvisioner provisioner = new MockProvisioner();
tenantRepository = new TestTenantRepository.Builder()
.withClock(clock)
.withConfigserverConfig(configserverConfig)
@@ -220,6 +223,27 @@ public class ApplicationRepositoryTest {
}
@Test
+ public void applicationData() {
+ flagSource = flagSource.withBooleanFlag(Flags.WRITE_APPLICATION_DATA_AS_JSON.id(), true);
+ long firstSessionId = deployApp(testApp).sessionId();
+ assertApplicationData(firstSessionId, firstSessionId);
+ assertEquals(firstSessionId, applicationRepository.getActiveSession(applicationId()).get().getSessionId());
+
+ // Create new session, no changes to application data
+ long secondSessionId = createSession(applicationId(), timeoutBudget, testApp);
+ assertNotEquals(firstSessionId, secondSessionId);
+ assertApplicationData(firstSessionId, firstSessionId);
+
+ // Prepare, last deployed session id should be the new one
+ prepare(testApp, secondSessionId);
+ assertApplicationData(firstSessionId, secondSessionId);
+
+ // Activate, active session id should be the new one
+ activate(applicationId(), secondSessionId, timeoutBudget);
+ assertApplicationData(secondSessionId, secondSessionId);
+ }
+
+ @Test
public void createFromActiveSession() {
long originalSessionId = deployApp(testApp).sessionId();
@@ -326,7 +350,7 @@ public class ApplicationRepositoryTest {
// Delete app and verify that it has been deleted from repos and no application set exists
assertTrue(applicationRepository.delete(applicationId()));
assertTrue(applicationRepository.getActiveSession(applicationId()).isEmpty());
- assertEquals(Optional.empty(), sessionRepository.getRemoteSession(sessionId).applicationSet());
+ assertEquals(Optional.empty(), sessionRepository.getRemoteSession(sessionId).applicationVersions());
assertTrue(curator.exists(sessionNode));
assertEquals(Session.Status.DELETE.name(), Utf8.toString(curator.getData(sessionNode.append("sessionState")).get()));
assertTrue(sessionFile.exists());
@@ -681,6 +705,11 @@ public class ApplicationRepositoryTest {
return applicationRepository.deploy(application, prepareParams());
}
+ private long prepare(File application, long sessionId) {
+ applicationRepository.prepare(sessionId, prepareParams());
+ return sessionId;
+ }
+
private PrepareResult deployApp(File applicationPackage) {
return deployApp(applicationPackage, prepareParams());
}
@@ -809,4 +838,12 @@ public class ApplicationRepositoryTest {
return applicationRepository.createSessionFromExisting(applicationId, false, timeoutBudget, new BaseDeployLogger());
}
+ private void assertApplicationData(long expectedActiveSesionId, long expectedLastDeployedSessionId) {
+ TenantApplications applications = tenantRepository.getTenant(applicationId().tenant()).getApplicationRepo();
+ ApplicationData applicationData = applications.applicationData(applicationId()).get();
+ assertEquals(applicationId(), applicationData.applicationId());
+ assertEquals(expectedActiveSesionId, applicationData.activeSession().get().longValue());
+ assertEquals(expectedLastDeployedSessionId, applicationData.lastDeployedSession().get().longValue());
+ }
+
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelRequestHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelRequestHandlerTest.java
index c3766ad9b83..1b4e9ad1231 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelRequestHandlerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelRequestHandlerTest.java
@@ -7,7 +7,7 @@ import com.yahoo.config.model.application.provider.FilesApplicationPackage;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.config.server.application.Application;
-import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.ApplicationVersions;
import com.yahoo.vespa.config.server.monitoring.MetricUpdater;
import com.yahoo.vespa.curator.mock.MockCurator;
import com.yahoo.vespa.flags.InMemoryFlagSource;
@@ -112,8 +112,8 @@ public class SuperModelRequestHandlerTest {
assertTrue(controller.hasApplication(ApplicationId.global(), Optional.empty()));
}
- private ApplicationSet createApp(ApplicationId applicationId, long generation) throws IOException, SAXException {
- return ApplicationSet.from(
+ private ApplicationVersions createApp(ApplicationId applicationId, long generation) throws IOException, SAXException {
+ return ApplicationVersions.from(
new TestApplication(
new VespaModel(FilesApplicationPackage.fromFile(testApp)),
new ServerCache(),
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabaseTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabaseTest.java
index cbdb462c35e..2f0ac236170 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabaseTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationCuratorDatabaseTest.java
@@ -3,23 +3,28 @@ package com.yahoo.vespa.config.server.application;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.vespa.curator.mock.MockCurator;
+import com.yahoo.vespa.curator.transaction.CuratorTransaction;
import org.junit.Test;
import java.time.Instant;
import java.util.Optional;
+import java.util.OptionalLong;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* @author jonmv
*/
public class ApplicationCuratorDatabaseTest {
+ private final MockCurator curator = new MockCurator();
+
@Test
public void testReindexingStatusSerialization() {
ApplicationId id = ApplicationId.defaultId();
- ApplicationCuratorDatabase db = new ApplicationCuratorDatabase(id.tenant(), new MockCurator());
+ ApplicationCuratorDatabase db = new ApplicationCuratorDatabase(id.tenant(), curator);
assertEquals(Optional.empty(), db.readReindexingStatus(id));
@@ -35,4 +40,98 @@ public class ApplicationCuratorDatabaseTest {
assertEquals(reindexing, db.readReindexingStatus(id).orElseThrow());
}
+ @Test
+ public void testReadingAndWritingApplicationData() {
+ ApplicationId id = ApplicationId.defaultId();
+ ApplicationCuratorDatabase db = new ApplicationCuratorDatabase(id.tenant(), curator);
+
+ assertEquals(Optional.empty(), db.applicationData(id));
+
+ db.createApplication(id, false);
+ assertEquals(Optional.empty(), db.applicationData(id)); // still empty, as no data has been written to node
+ deleteApplication(db, id);
+
+ db.createApplication(id, true);
+ // Can be read as json, but no active session or last deployed session
+ Optional<ApplicationData> applicationData = db.applicationData(id);
+ assertTrue(applicationData.isPresent());
+ assertEquals(id, applicationData.get().applicationId());
+ assertFalse(applicationData.get().activeSession().isPresent());
+ assertFalse(applicationData.get().lastDeployedSession().isPresent());
+
+ // Prepare session 2, no active session
+ prepareSession(db, id, 2, OptionalLong.empty(), false);
+ // Activate session 2, last deployed session not present (not writing json)
+ activateSession(db, id, 2, false);
+ // Can be read as session id only
+ applicationData = db.applicationData(id);
+ assertTrue(applicationData.isPresent());
+ assertEquals(id, applicationData.get().applicationId());
+ assertTrue(applicationData.get().activeSession().isPresent());
+ assertEquals(2, applicationData.get().activeSession().get().longValue());
+ assertFalse(applicationData.get().lastDeployedSession().isPresent());
+ // Can be read as session data as well
+ applicationData = db.applicationData(id);
+ assertTrue(applicationData.isPresent());
+ assertEquals(id, applicationData.get().applicationId());
+ assertTrue(applicationData.get().activeSession().isPresent());
+ assertEquals(2, applicationData.get().activeSession().get().longValue());
+ assertFalse(applicationData.get().lastDeployedSession().isPresent());
+
+ // Prepare session 3, last deployed session is still 2
+ prepareSession(db, id, 3, OptionalLong.of(2), true);
+ // Can be read as json, active session is still 2 and last deployed session is 3
+ applicationData = db.applicationData(id);
+ assertTrue(applicationData.isPresent());
+ assertEquals(id, applicationData.get().applicationId());
+ assertTrue(applicationData.get().activeSession().isPresent(), applicationData.get().toString());
+ assertEquals(2L, applicationData.get().activeSession().get().longValue());
+ assertTrue(applicationData.get().lastDeployedSession().isPresent());
+ assertEquals(3, applicationData.get().lastDeployedSession().get().longValue());
+
+ activateSession(db, id, 3, true);
+ // Can be read as json, active session and last deployed session present
+ applicationData = db.applicationData(id);
+ assertTrue(applicationData.isPresent());
+ assertEquals(id, applicationData.get().applicationId());
+ assertTrue(applicationData.get().activeSession().isPresent());
+ assertEquals(3, applicationData.get().activeSession().get().longValue());
+ assertTrue(applicationData.get().lastDeployedSession().isPresent());
+ assertEquals(3, applicationData.get().lastDeployedSession().get().longValue());
+
+ // createApplication should not overwrite the node if it already exists
+ db.createApplication(id, true);
+ // Can be read as json, active session and last deployed session present
+ applicationData = db.applicationData(id);
+ assertTrue(applicationData.isPresent());
+ assertEquals(id, applicationData.get().applicationId());
+ assertTrue(applicationData.get().activeSession().isPresent());
+ assertEquals(3, applicationData.get().activeSession().get().longValue());
+ assertTrue(applicationData.get().lastDeployedSession().isPresent());
+ assertEquals(3, applicationData.get().lastDeployedSession().get().longValue());
+ }
+
+
+ private void deleteApplication(ApplicationCuratorDatabase db, ApplicationId applicationId) {
+ try (var t = db.createDeleteTransaction(applicationId)) {
+ t.commit();
+ }
+ }
+
+ private void prepareSession(ApplicationCuratorDatabase db, ApplicationId applicationId, long sessionId, OptionalLong activesSessionId, boolean writeAsJson) {
+ try (var t = db.createWritePrepareTransaction(new CuratorTransaction(curator),
+ applicationId,
+ sessionId,
+ activesSessionId,
+ writeAsJson)) {
+ t.commit();
+ }
+ }
+
+ private void activateSession(ApplicationCuratorDatabase db, ApplicationId applicationId, long sessionId, boolean writeAsJson) {
+ try (var t = db.createWriteActiveTransaction(new CuratorTransaction(curator), applicationId, sessionId, writeAsJson)) {
+ t.commit();
+ }
+ }
+
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationMapperTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationMapperTest.java
index 57af219c813..fb5d6537a19 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationMapperTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationMapperTest.java
@@ -38,7 +38,7 @@ public class ApplicationMapperTest {
@Test
public void testGetForVersionReturnsCorrectVersion() {
- applicationMapper.register(appId, ApplicationSet.fromList(applications));
+ applicationMapper.register(appId, ApplicationVersions.fromList(applications));
assertEquals(applicationMapper.getForVersion(appId, Optional.of(vespaVersions.get(0)), Instant.now()), applications.get(0));
assertEquals(applicationMapper.getForVersion(appId, Optional.of(vespaVersions.get(1)), Instant.now()), applications.get(1));
assertEquals(applicationMapper.getForVersion(appId, Optional.of(vespaVersions.get(2)), Instant.now()), applications.get(2));
@@ -46,19 +46,19 @@ public class ApplicationMapperTest {
@Test
public void testGetForVersionReturnsLatestVersion() {
- applicationMapper.register(appId, ApplicationSet.fromList(applications));
+ applicationMapper.register(appId, ApplicationVersions.fromList(applications));
assertEquals(applicationMapper.getForVersion(appId, Optional.empty(), Instant.now()), applications.get(2));
}
@Test (expected = VersionDoesNotExistException.class)
public void testGetForVersionThrows() {
- applicationMapper.register(appId, ApplicationSet.fromList(Arrays.asList(applications.get(0), applications.get(2))));
+ applicationMapper.register(appId, ApplicationVersions.fromList(Arrays.asList(applications.get(0), applications.get(2))));
applicationMapper.getForVersion(appId, Optional.of(vespaVersions.get(1)), Instant.now());
}
@Test (expected = NotFoundException.class)
public void testGetForVersionThrows2() {
- applicationMapper.register(appId, ApplicationSet.from(applications.get(0)));
+ applicationMapper.register(appId, ApplicationVersions.from(applications.get(0)));
applicationMapper.getForVersion(new ApplicationId.Builder()
.tenant("different")
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationReindexingTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationReindexingTest.java
index 80ac28e9dbc..972bd86d752 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationReindexingTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationReindexingTest.java
@@ -41,6 +41,11 @@ public class ApplicationReindexingTest {
assertEquals(Optional.empty(),
reindexing.status("three", "a"));
+ assertEquals(Optional.empty(),
+ reindexing.lastReadiedAt());
+ assertEquals(Optional.of(Instant.ofEpochMilli(3)),
+ reindexing.withoutPending("two", "b").lastReadiedAt());
+
// Remove "a" in "one", and "one" entirely.
assertEquals(Optional.empty(),
reindexing.without("one", "a").status("one", "a"));
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationSetTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationVersionsTest.java
index 7629680b16f..d0a3bf4ec9b 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationSetTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/application/ApplicationVersionsTest.java
@@ -18,9 +18,9 @@ import static org.junit.Assert.assertEquals;
/**
* @author Vegard Sjonfjell
*/
-public class ApplicationSetTest {
+public class ApplicationVersionsTest {
- private ApplicationSet applicationSet;
+ private ApplicationVersions applicationVersions;
private final List<Version> vespaVersions = new ArrayList<>();
private final List<Application> applications = new ArrayList<>();
@@ -32,29 +32,29 @@ public class ApplicationSetTest {
@Test
public void testGetForVersionOrLatestReturnsCorrectVersion() {
- applicationSet = ApplicationSet.fromList(applications);
- assertEquals(applicationSet.getForVersionOrLatest(Optional.of(vespaVersions.get(0)), Instant.now()), applications.get(0));
- assertEquals(applicationSet.getForVersionOrLatest(Optional.of(vespaVersions.get(1)), Instant.now()), applications.get(1));
- assertEquals(applicationSet.getForVersionOrLatest(Optional.of(vespaVersions.get(2)), Instant.now()), applications.get(2));
+ applicationVersions = ApplicationVersions.fromList(applications);
+ assertEquals(applicationVersions.getForVersionOrLatest(Optional.of(vespaVersions.get(0)), Instant.now()), applications.get(0));
+ assertEquals(applicationVersions.getForVersionOrLatest(Optional.of(vespaVersions.get(1)), Instant.now()), applications.get(1));
+ assertEquals(applicationVersions.getForVersionOrLatest(Optional.of(vespaVersions.get(2)), Instant.now()), applications.get(2));
}
@Test
public void testGetForVersionOrLatestReturnsLatestVersion() {
- applicationSet = ApplicationSet.fromList(applications);
- assertEquals(applicationSet.getForVersionOrLatest(Optional.empty(), Instant.now()), applications.get(2));
+ applicationVersions = ApplicationVersions.fromList(applications);
+ assertEquals(applicationVersions.getForVersionOrLatest(Optional.empty(), Instant.now()), applications.get(2));
}
@Test (expected = VersionDoesNotExistException.class)
public void testGetForVersionOrLatestThrows() {
- applicationSet = ApplicationSet.fromList(Arrays.asList(applications.get(0), applications.get(2)));
- applicationSet.getForVersionOrLatest(Optional.of(vespaVersions.get(1)), Instant.now());
+ applicationVersions = ApplicationVersions.fromList(Arrays.asList(applications.get(0), applications.get(2)));
+ applicationVersions.getForVersionOrLatest(Optional.of(vespaVersions.get(1)), Instant.now());
}
@Test
public void testGetAllVersions() {
- applicationSet = ApplicationSet.fromList(applications);
+ applicationVersions = ApplicationVersions.fromList(applications);
assertEquals(List.of(Version.fromString("1.2.3"), Version.fromString("1.2.4"), Version.fromString("1.2.5")),
- applicationSet.getAllVersions(ApplicationId.defaultId()));
+ applicationVersions.versions(ApplicationId.defaultId()));
}
private Application createApplication(Version version) {
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/application/TenantApplicationsTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/application/TenantApplicationsTest.java
index 2ad04fdd572..81544f4ed61 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/application/TenantApplicationsTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/application/TenantApplicationsTest.java
@@ -26,6 +26,7 @@ import com.yahoo.vespa.config.server.tenant.TestTenantRepository;
import com.yahoo.vespa.curator.CompletionTimeoutException;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.mock.MockCurator;
+import com.yahoo.vespa.curator.transaction.CuratorTransaction;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.model.VespaModel;
@@ -36,6 +37,7 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.xml.sax.SAXException;
+
import java.io.File;
import java.io.IOException;
import java.time.Clock;
@@ -120,11 +122,11 @@ public class TenantApplicationsTest {
TenantApplications repo = createZKAppRepo();
ApplicationId myapp = createApplicationId("myapp");
repo.createApplication(myapp);
- repo.createPutTransaction(myapp, 3).commit();
+ writeActiveTransaction(repo, myapp, 3);
String path = TenantRepository.getApplicationsPath(tenantName).append(myapp.serializedForm()).getAbsolute();
assertNotNull(curatorFramework.checkExists().forPath(path));
assertEquals("3", Utf8.toString(curatorFramework.getData().forPath(path)));
- repo.createPutTransaction(myapp, 5).commit();
+ writeActiveTransaction(repo, myapp, 5);
assertNotNull(curatorFramework.checkExists().forPath(path));
assertEquals("5", Utf8.toString(curatorFramework.getData().forPath(path)));
}
@@ -136,26 +138,26 @@ public class TenantApplicationsTest {
ApplicationId id2 = createApplicationId("myapp2");
repo.createApplication(id1);
repo.createApplication(id2);
- repo.createPutTransaction(id1, 1).commit();
- repo.createPutTransaction(id2, 1).commit();
+ writeActiveTransaction(repo, id1, 1);
+ writeActiveTransaction(repo, id2, 1);
assertEquals(2, repo.activeApplications().size());
- repo.createDeleteTransaction(id1).commit();
+ deleteApplication(repo, id1);
assertEquals(1, repo.activeApplications().size());
- repo.createDeleteTransaction(id2).commit();
+ deleteApplication(repo, id2);
assertEquals(0, repo.activeApplications().size());
}
- private static ApplicationSet createSet(ApplicationId id, Version version) throws IOException, SAXException {
+ private static ApplicationVersions createApplicationVersions(ApplicationId id, Version version) throws IOException, SAXException {
VespaModel model = new VespaModel(new NullConfigModelRegistry(),
new DeployState.Builder().wantedNodeVespaVersion(version)
.applicationPackage(FilesApplicationPackage.fromFile(new File("src/test/apps/app")))
.build());
- return ApplicationSet.from(new Application(model,
- new ServerCache(),
- 1,
- Version.emptyVersion,
- MetricUpdater.createTestUpdater(),
- id));
+ return ApplicationVersions.from(new Application(model,
+ new ServerCache(),
+ 1,
+ Version.emptyVersion,
+ MetricUpdater.createTestUpdater(),
+ id));
}
@Test
@@ -164,22 +166,22 @@ public class TenantApplicationsTest {
TenantApplications applications = createZKAppRepo(flagSource);
ApplicationId app1 = createApplicationId("myapp");
applications.createApplication(app1);
- applications.createPutTransaction(app1, 1).commit();
+ writeActiveTransaction(applications, app1, 1);
Version deployedVersion0 = Version.fromString("6.1");
- applications.activateApplication(createSet(app1, deployedVersion0), 1);
+ applications.activateApplication(createApplicationVersions(app1, deployedVersion0), 1);
assertTrue("Empty version is compatible", applications.compatibleWith(Optional.empty(), app1));
Version nodeVersion0 = Version.fromString("6.0");
assertTrue("Lower version is compatible", applications.compatibleWith(Optional.of(nodeVersion0), app1));
Version deployedVersion1 = Version.fromString("7.1");
- applications.activateApplication(createSet(app1, deployedVersion1), 1);
+ applications.activateApplication(createApplicationVersions(app1, deployedVersion1), 1);
assertTrue("New major is compatible", applications.compatibleWith(Optional.of(nodeVersion0), app1));
flagSource.withListFlag(PermanentFlags.INCOMPATIBLE_VERSIONS.id(), List.of("8"), String.class);
Version deployedVersion2 = Version.fromString("8.1");
- applications.activateApplication(createSet(app1, deployedVersion2), 1);
+ applications.activateApplication(createApplicationVersions(app1, deployedVersion2), 1);
assertFalse("New major is incompatible", applications.compatibleWith(Optional.of(nodeVersion0), app1));
Version nodeVersion1 = Version.fromString("8.0");
@@ -191,7 +193,7 @@ public class TenantApplicationsTest {
final AtomicInteger removed = new AtomicInteger(0);
@Override
- public void configActivated(ApplicationSet application) {
+ public void configActivated(ApplicationVersions application) {
activated.incrementAndGet();
}
@@ -203,19 +205,19 @@ public class TenantApplicationsTest {
@Test
public void testListConfigs() throws IOException, SAXException {
- TenantApplications applications = createTenantApplications(TenantName.defaultName(), new MockCurator(), configserverConfig, new MockConfigActivationListener(), new InMemoryFlagSource());
+ TenantApplications applications = createTenantApplications(TenantName.defaultName(), curator, configserverConfig, new MockConfigActivationListener(), new InMemoryFlagSource());
assertFalse(applications.hasApplication(ApplicationId.defaultId(), Optional.of(vespaVersion)));
VespaModel model = new VespaModel(FilesApplicationPackage.fromFile(new File("src/test/apps/app")));
ApplicationId applicationId = ApplicationId.defaultId();
applications.createApplication(applicationId);
- applications.createPutTransaction(applicationId, 1).commit();
- applications.activateApplication(ApplicationSet.from(new Application(model,
- new ServerCache(),
- 1,
- vespaVersion,
- MetricUpdater.createTestUpdater(),
- applicationId)),
+ writeActiveTransaction(applications, applicationId, 1);
+ applications.activateApplication(ApplicationVersions.from(new Application(model,
+ new ServerCache(),
+ 1,
+ vespaVersion,
+ MetricUpdater.createTestUpdater(),
+ applicationId)),
1);
Set<ConfigKey<?>> configNames = applications.listConfigs(applicationId, Optional.of(vespaVersion), false);
assertTrue(configNames.contains(new ConfigKey<>("sentinel", "hosts", "cloud.config")));
@@ -325,4 +327,16 @@ public class TenantApplicationsTest {
flagSource);
}
+ private static void deleteApplication(TenantApplications repo, ApplicationId id1) {
+ try (var transaction = repo.createDeleteTransaction(id1)) {
+ transaction.commit();
+ }
+ }
+
+ private void writeActiveTransaction(TenantApplications repo, ApplicationId id1, int x) {
+ try (var transaction = new CuratorTransaction(curator)) {
+ repo.createWriteActiveTransaction(transaction, id1, x).commit();
+ }
+ }
+
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClientTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClientTest.java
deleted file mode 100644
index 2d42cb12076..00000000000
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClientTest.java
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.config.server.deploy;
-
-import com.google.common.collect.ImmutableSet;
-import com.yahoo.component.Version;
-import com.yahoo.config.application.api.ApplicationMetaData;
-import com.yahoo.config.application.api.ApplicationPackage;
-import com.yahoo.config.application.api.FileRegistry;
-import com.yahoo.config.model.application.provider.BaseDeployLogger;
-import com.yahoo.config.model.application.provider.DeployData;
-import com.yahoo.config.model.application.provider.FilesApplicationPackage;
-import com.yahoo.config.model.application.provider.MockFileRegistry;
-import com.yahoo.config.provision.AllocatedHosts;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.HostSpec;
-import com.yahoo.path.Path;
-import com.yahoo.text.Utf8;
-import com.yahoo.vespa.config.server.zookeeper.ZKApplicationPackage;
-import com.yahoo.vespa.curator.Curator;
-import com.yahoo.vespa.curator.mock.MockCurator;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-
-import static com.yahoo.config.provision.serialization.AllocatedHostsSerializer.fromJson;
-import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.DEFCONFIGS_ZK_SUBPATH;
-import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.META_ZK_PATH;
-import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.USERAPP_ZK_SUBPATH;
-import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.USER_DEFCONFIGS_ZK_SUBPATH;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-/**
- * Unit tests for ZooKeeperClient.
- *
- * @author hmusum
- */
-public class ZooKeeperClientTest {
-
- @Rule
- public TemporaryFolder temporaryFolder = new TemporaryFolder();
-
- private Curator zk;
- private final Path appPath = Path.fromString("/1");
-
- @Before
- public void setupZK() throws IOException {
- zk = new MockCurator();
- ZooKeeperClient zkc = new ZooKeeperClient(zk, new BaseDeployLogger(), appPath);
- ApplicationPackage app = FilesApplicationPackage.fromFileWithDeployData(new File("src/test/apps/zkfeed"),
- new DeployData("/bar/baz",
- ApplicationId.from("default", "appName", "default"),
- 1345L,
- true,
- 3L,
- 2L));
- Map<Version, FileRegistry> fileRegistries = createFileRegistries();
- app.writeMetaData();
- zkc.initialize();
- zkc.writeApplicationPackage(app);
- zkc.write(fileRegistries);
- }
-
- private Map<Version, FileRegistry> createFileRegistries() {
- FileRegistry a = new MockFileRegistry();
- a.addFile("fileA");
- FileRegistry b = new MockFileRegistry();
- b.addFile("fileB");
- Map<Version, FileRegistry> registryMap = new HashMap<>();
- registryMap.put(new Version(1, 2, 3), a);
- registryMap.put(new Version(3, 2, 1), b);
- return registryMap;
- }
-
- @Test
- public void testInitZooKeeper() {
- Curator zk = new MockCurator();
- BaseDeployLogger logger = new BaseDeployLogger();
- long generation = 1L;
- ZooKeeperClient zooKeeperClient = new ZooKeeperClient(zk, logger, Path.fromString("/1"));
- zooKeeperClient.initialize();
- Path appPath = Path.fromString("/");
- assertEquals(1, zk.getChildren(appPath).size());
- Path currentAppPath = appPath.append(String.valueOf(generation));
- assertTrue(zk.exists(currentAppPath));
- assertTrue(zk.exists(currentAppPath.append(DEFCONFIGS_ZK_SUBPATH.replaceFirst("/", ""))));
- assertEquals(4, zk.getChildren(currentAppPath).size());
- }
-
- @Test
- public void testFeedDefFilesToZooKeeper() {
- Path defsPath = appPath.append(DEFCONFIGS_ZK_SUBPATH);
- assertTrue(zk.exists(appPath.append(DEFCONFIGS_ZK_SUBPATH.replaceFirst("/", ""))));
- List<String> children = zk.getChildren(defsPath);
- assertEquals(defsPath + " children", 1, children.size());
- Collections.sort(children);
- assertEquals("a.b.test2", children.get(0));
-
- assertTrue(zk.exists(appPath.append(USER_DEFCONFIGS_ZK_SUBPATH.replaceFirst("/", ""))));
- Path userDefsPath = appPath.append(USER_DEFCONFIGS_ZK_SUBPATH);
- children = zk.getChildren(userDefsPath);
- assertEquals(1, children.size());
- Collections.sort(children);
- assertEquals("a.b.test2", children.get(0));
- }
-
- @Test
- public void testFeedAppMetaDataToZooKeeper() {
- assertTrue(zk.exists(appPath.append(META_ZK_PATH)));
- ApplicationMetaData metaData = ApplicationMetaData.fromJsonString(
- Utf8.toString(zk.getData(appPath.append(META_ZK_PATH)).get()));
- assertTrue(metaData.getChecksum().length() > 0);
- assertTrue(metaData.isInternalRedeploy());
- assertEquals("/bar/baz", metaData.getDeployPath());
- assertEquals(1345, metaData.getDeployTimestamp().longValue());
- assertEquals(3, metaData.getGeneration().longValue());
- assertEquals(2, metaData.getPreviousActiveGeneration());
- }
-
- @Test
- public void testVersionedFileRegistry() {
- Path fileRegPath = appPath.append(ZKApplicationPackage.fileRegistryNode);
- assertTrue(zk.exists(fileRegPath));
- assertTrue(zk.exists(fileRegPath.append("/1.2.3")));
- assertTrue(zk.exists(fileRegPath.append("/3.2.1")));
- // assertNull("Data at " + fileRegPath, zk.getData(fileRegPath)); Not null any more .. hm
- }
-
- @Test
- public void include_dirs_are_written_to_ZK() {
- assertTrue(zk.exists(appPath.append(USERAPP_ZK_SUBPATH).append("dir1").append("default.xml")));
- assertTrue(zk.exists(appPath.append(USERAPP_ZK_SUBPATH).append("nested").append("dir2").append("chain2.xml")));
- assertTrue(zk.exists(appPath.append(USERAPP_ZK_SUBPATH).append("nested").append("dir2").append("chain3.xml")));
- }
-
- @Test
- public void search_chain_dir_written_to_ZK() {
- assertTrue(zk.exists(appPath().append("search").append("chains").append("dir1").append("default.xml")));
- assertTrue(zk.exists(appPath().append("search").append("chains").append("dir2").append("chain2.xml")));
- assertTrue(zk.exists(appPath().append("search").append("chains").append("dir2").append("chain3.xml")));
- }
-
- private Path appPath() {
- return appPath.append(USERAPP_ZK_SUBPATH);
- }
-
- @Test
- public void testWritingHostNamesToZooKeeper() throws IOException {
- Curator zk = new MockCurator();
- BaseDeployLogger logger = new BaseDeployLogger();
- Path app = Path.fromString("/1");
- ZooKeeperClient zooKeeperClient = new ZooKeeperClient(zk, logger, app);
- zooKeeperClient.initialize();
- HostSpec host1 = new HostSpec("host1.yahoo.com", Optional.empty());
- HostSpec host2 = new HostSpec("host2.yahoo.com", Optional.empty());
- ImmutableSet<HostSpec> hosts = ImmutableSet.of(host1, host2);
- zooKeeperClient.write(AllocatedHosts.withHosts(hosts));
- Path hostsPath = app.append(ZKApplicationPackage.allocatedHostsNode);
- assertTrue(zk.exists(hostsPath));
-
- AllocatedHosts deserialized = fromJson(zk.getData(hostsPath).get());
- assertEquals(hosts, deserialized.getHosts());
- }
-
-}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployerTest.java
index 1bc980c9099..17344e94c51 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/deploy/ZooKeeperDeployerTest.java
@@ -1,16 +1,26 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.deploy;
+import com.google.common.collect.ImmutableSet;
import com.yahoo.component.Version;
+import com.yahoo.config.application.api.ApplicationMetaData;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.application.api.DeployLogger;
+import com.yahoo.config.application.api.FileRegistry;
+import com.yahoo.config.model.application.provider.BaseDeployLogger;
+import com.yahoo.config.model.application.provider.DeployData;
import com.yahoo.config.model.application.provider.FilesApplicationPackage;
import com.yahoo.config.model.application.provider.MockFileRegistry;
import com.yahoo.config.provision.AllocatedHosts;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.HostSpec;
import com.yahoo.io.IOUtils;
import com.yahoo.path.Path;
+import com.yahoo.text.Utf8;
+import com.yahoo.vespa.config.server.zookeeper.ZKApplicationPackage;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.mock.MockCurator;
+import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
@@ -18,8 +28,20 @@ import org.junit.rules.TemporaryFolder;
import java.io.File;
import java.io.IOException;
import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
import java.util.logging.Level;
+import static com.yahoo.config.provision.serialization.AllocatedHostsSerializer.fromJson;
+import static com.yahoo.vespa.config.server.session.SessionZooKeeperClient.getSessionPath;
+import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.DEFCONFIGS_ZK_SUBPATH;
+import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.META_ZK_PATH;
+import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.USERAPP_ZK_SUBPATH;
+import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.USER_DEFCONFIGS_ZK_SUBPATH;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@@ -28,10 +50,31 @@ import static org.junit.Assert.fail;
*/
public class ZooKeeperDeployerTest {
+ private Curator zk;
+ private final Path appPath = Path.fromString("/1");
+
@Rule
public TemporaryFolder folder = new TemporaryFolder();
private static final String defFile = "test2.def";
+ @Before
+ public void setupZK() throws IOException {
+ zk = new MockCurator();
+ ZooKeeperDeployer.Client zkc = new ZooKeeperDeployer.Client(zk, new BaseDeployLogger(), appPath);
+ ApplicationPackage app = FilesApplicationPackage.fromFileWithDeployData(new File("src/test/apps/zkfeed"),
+ new DeployData("/bar/baz",
+ ApplicationId.from("default", "appName", "default"),
+ 1345L,
+ true,
+ 3L,
+ 2L));
+ Map<Version, FileRegistry> fileRegistries = createFileRegistries();
+ app.writeMetaData();
+ zkc.initialize();
+ zkc.writeApplicationPackage(app);
+ zkc.write(fileRegistries);
+ }
+
@Test
public void require_that_deployer_is_initialized() throws IOException {
Curator curator = new MockCurator();
@@ -43,17 +86,117 @@ public class ZooKeeperDeployerTest {
e.printStackTrace();
fail();
}
- deploy(FilesApplicationPackage.fromFile(new File("src/test/apps/content")), curator, Path.fromString("/1"));
- deploy(FilesApplicationPackage.fromFile(new File("src/test/apps/content")), curator, Path.fromString("/2"));
+ deploy(FilesApplicationPackage.fromFile(new File("src/test/apps/content")), curator, 1);
+ deploy(FilesApplicationPackage.fromFile(new File("src/test/apps/content")), curator, 2);
+ }
+
+ private Map<Version, FileRegistry> createFileRegistries() {
+ FileRegistry a = new MockFileRegistry();
+ a.addFile("fileA");
+ FileRegistry b = new MockFileRegistry();
+ b.addFile("fileB");
+ Map<Version, FileRegistry> registryMap = new HashMap<>();
+ registryMap.put(new Version(1, 2, 3), a);
+ registryMap.put(new Version(3, 2, 1), b);
+ return registryMap;
+ }
+
+ @Test
+ public void testInitZooKeeper() {
+ Curator zk = new MockCurator();
+ BaseDeployLogger logger = new BaseDeployLogger();
+ long generation = 1L;
+ ZooKeeperDeployer.Client client = new ZooKeeperDeployer.Client(zk, logger, Path.fromString("/1"));
+ client.initialize();
+ Path appPath = Path.fromString("/");
+ assertEquals(1, zk.getChildren(appPath).size());
+ Path currentAppPath = appPath.append(String.valueOf(generation));
+ assertTrue(zk.exists(currentAppPath));
+ assertTrue(zk.exists(currentAppPath.append(DEFCONFIGS_ZK_SUBPATH.replaceFirst("/", ""))));
+ assertEquals(4, zk.getChildren(currentAppPath).size());
+ }
+
+ @Test
+ public void testFeedDefFilesToZooKeeper() {
+ Path defsPath = appPath.append(DEFCONFIGS_ZK_SUBPATH);
+ assertTrue(zk.exists(appPath.append(DEFCONFIGS_ZK_SUBPATH.replaceFirst("/", ""))));
+ List<String> children = zk.getChildren(defsPath);
+ assertEquals(defsPath + " children", 1, children.size());
+ Collections.sort(children);
+ assertEquals("a.b.test2", children.get(0));
+
+ assertTrue(zk.exists(appPath.append(USER_DEFCONFIGS_ZK_SUBPATH.replaceFirst("/", ""))));
+ Path userDefsPath = appPath.append(USER_DEFCONFIGS_ZK_SUBPATH);
+ children = zk.getChildren(userDefsPath);
+ assertEquals(1, children.size());
+ Collections.sort(children);
+ assertEquals("a.b.test2", children.get(0));
+ }
+
+ @Test
+ public void testFeedAppMetaDataToZooKeeper() {
+ assertTrue(zk.exists(appPath.append(META_ZK_PATH)));
+ ApplicationMetaData metaData = ApplicationMetaData.fromJsonString(
+ Utf8.toString(zk.getData(appPath.append(META_ZK_PATH)).get()));
+ assertTrue(metaData.getChecksum().length() > 0);
+ assertTrue(metaData.isInternalRedeploy());
+ assertEquals("/bar/baz", metaData.getDeployPath());
+ assertEquals(1345, metaData.getDeployTimestamp().longValue());
+ assertEquals(3, metaData.getGeneration().longValue());
+ assertEquals(2, metaData.getPreviousActiveGeneration());
+ }
+
+ @Test
+ public void testVersionedFileRegistry() {
+ Path fileRegPath = appPath.append(ZKApplicationPackage.fileRegistryNode);
+ assertTrue(zk.exists(fileRegPath));
+ assertTrue(zk.exists(fileRegPath.append("/1.2.3")));
+ assertTrue(zk.exists(fileRegPath.append("/3.2.1")));
+ // assertNull("Data at " + fileRegPath, zk.getData(fileRegPath)); Not null any more .. hm
+ }
+
+ @Test
+ public void include_dirs_are_written_to_ZK() {
+ assertTrue(zk.exists(appPath.append(USERAPP_ZK_SUBPATH).append("dir1").append("default.xml")));
+ assertTrue(zk.exists(appPath.append(USERAPP_ZK_SUBPATH).append("nested").append("dir2").append("chain2.xml")));
+ assertTrue(zk.exists(appPath.append(USERAPP_ZK_SUBPATH).append("nested").append("dir2").append("chain3.xml")));
+ }
+
+ @Test
+ public void search_chain_dir_written_to_ZK() {
+ assertTrue(zk.exists(appPath().append("search").append("chains").append("dir1").append("default.xml")));
+ assertTrue(zk.exists(appPath().append("search").append("chains").append("dir2").append("chain2.xml")));
+ assertTrue(zk.exists(appPath().append("search").append("chains").append("dir2").append("chain3.xml")));
+ }
+
+ private Path appPath() {
+ return appPath.append(USERAPP_ZK_SUBPATH);
+ }
+
+ @Test
+ public void testWritingHostNamesToZooKeeper() throws IOException {
+ Curator zk = new MockCurator();
+ BaseDeployLogger logger = new BaseDeployLogger();
+ Path app = Path.fromString("/1");
+ ZooKeeperDeployer.Client client = new ZooKeeperDeployer.Client(zk, logger, app);
+ client.initialize();
+ HostSpec host1 = new HostSpec("host1.yahoo.com", Optional.empty());
+ HostSpec host2 = new HostSpec("host2.yahoo.com", Optional.empty());
+ ImmutableSet<HostSpec> hosts = ImmutableSet.of(host1, host2);
+ client.write(AllocatedHosts.withHosts(hosts));
+ Path hostsPath = app.append(ZKApplicationPackage.allocatedHostsNode);
+ assertTrue(zk.exists(hostsPath));
+
+ AllocatedHosts deserialized = fromJson(zk.getData(hostsPath).get());
+ assertEquals(hosts, deserialized.getHosts());
}
- public void deploy(ApplicationPackage applicationPackage, Curator curator, Path appPath) throws IOException {
- MockDeployLogger logger = new MockDeployLogger();
- ZooKeeperClient client = new ZooKeeperClient(curator, logger, appPath);
- ZooKeeperDeployer deployer = new ZooKeeperDeployer(client);
+ public void deploy(ApplicationPackage applicationPackage, Curator curator, long sessionId) throws IOException {
+ ZooKeeperDeployer deployer = new ZooKeeperDeployer(curator, new MockDeployLogger(), applicationPackage.getApplicationId(), sessionId);
+ deployer.deploy(applicationPackage, Map.of(new Version(1, 0, 0), new MockFileRegistry()), AllocatedHosts.withHosts(Set.of()));
- deployer.deploy(applicationPackage, Collections.singletonMap(new Version(1, 0, 0), new MockFileRegistry()), AllocatedHosts.withHosts(Collections.emptySet()));
- assertTrue(curator.exists(appPath));
+ Path sessionPath = getSessionPath(applicationPackage.getApplicationId().tenant(), sessionId);
+ assertTrue(curator.exists(sessionPath));
}
private static class MockDeployLogger implements DeployLogger {
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileDirectoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileDirectoryTest.java
index 649d382ddb6..040df208323 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileDirectoryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileDirectoryTest.java
@@ -37,9 +37,9 @@ public class FileDirectoryTest {
FileReference foo = createFile("foo");
FileReference bar = createFile("bar");
- assertTrue(fileDirectory.getFile(foo).exists());
+ assertTrue(fileDirectory.getFile(foo).get().exists());
assertEquals("ea315b7acac56246", foo.value());
- assertTrue(fileDirectory.getFile(bar).exists());
+ assertTrue(fileDirectory.getFile(bar).get().exists());
assertEquals("2b8e97f15c854e1d", bar.value());
}
@@ -49,7 +49,7 @@ public class FileDirectoryTest {
File subDirectory = new File(temporaryFolder.getRoot(), subdirName);
createFileInSubDir(subDirectory, "foo", "some content");
FileReference fileReference = fileDirectory.addFile(subDirectory);
- File dir = fileDirectory.getFile(fileReference);
+ File dir = fileDirectory.getFile(fileReference).get();
assertTrue(dir.exists());
assertTrue(new File(dir, "foo").exists());
assertFalse(new File(dir, "doesnotexist").exists());
@@ -58,7 +58,7 @@ public class FileDirectoryTest {
// Change contents of a file, file reference value should change
createFileInSubDir(subDirectory, "foo", "new content");
FileReference fileReference2 = fileDirectory.addFile(subDirectory);
- dir = fileDirectory.getFile(fileReference2);
+ dir = fileDirectory.getFile(fileReference2).get();
assertTrue(new File(dir, "foo").exists());
assertNotEquals(fileReference + " should not be equal to " + fileReference2, fileReference, fileReference2);
assertEquals("e5d4b3fe5ee3ede3", fileReference2.value());
@@ -66,7 +66,7 @@ public class FileDirectoryTest {
// Add a file, should be available and file reference should have another value
createFileInSubDir(subDirectory, "bar", "some other content");
FileReference fileReference3 = fileDirectory.addFile(subDirectory);
- dir = fileDirectory.getFile(fileReference3);
+ dir = fileDirectory.getFile(fileReference3).get();
assertTrue(new File(dir, "foo").exists());
assertTrue(new File(dir, "bar").exists());
assertEquals("894bced3fc9d199b", fileReference3.value());
@@ -78,7 +78,7 @@ public class FileDirectoryTest {
File subDirectory = new File(temporaryFolder.getRoot(), subdirName);
createFileInSubDir(subDirectory, "foo", "some content");
FileReference fileReference = fileDirectory.addFile(subDirectory);
- File dir = fileDirectory.getFile(fileReference);
+ File dir = fileDirectory.getFile(fileReference).get();
assertTrue(dir.exists());
File foo = new File(dir, "foo");
assertTrue(foo.exists());
@@ -90,7 +90,7 @@ public class FileDirectoryTest {
try { Thread.sleep(1000);} catch (InterruptedException e) {/*ignore */} // Needed since we have timestamp resolution of 1 second
Files.delete(Paths.get(fileDirectory.getPath(fileReference)).resolve("subdir").resolve("foo"));
fileReference = fileDirectory.addFile(subDirectory);
- dir = fileDirectory.getFile(fileReference);
+ dir = fileDirectory.getFile(fileReference).get();
File foo2 = new File(dir, "foo");
assertTrue(dir.exists());
assertTrue(foo2.exists());
@@ -107,7 +107,7 @@ public class FileDirectoryTest {
File subDirectory = new File(temporaryFolder.getRoot(), subdirName);
createFileInSubDir(subDirectory, "foo", "some content");
FileReference fileReference = fileDirectory.addFile(subDirectory);
- File dir = fileDirectory.getFile(fileReference);
+ File dir = fileDirectory.getFile(fileReference).get();
assertTrue(dir.exists());
File foo = new File(dir, "foo");
assertTrue(foo.exists());
@@ -119,7 +119,7 @@ public class FileDirectoryTest {
// Add a file that already exists, nothing should happen
createFileInSubDir(subDirectory, "foo", "some content"); // same as before, nothing should happen
FileReference fileReference3 = fileDirectory.addFile(subDirectory);
- dir = fileDirectory.getFile(fileReference3);
+ dir = fileDirectory.getFile(fileReference3).get();
assertTrue(new File(dir, "foo").exists());
assertEquals("bebc5a1aee74223d", fileReference3.value()); // same hash
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
index 49458acd60b..373b39c8365 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
@@ -29,6 +29,7 @@ import static com.yahoo.vespa.filedistribution.FileReferenceData.CompressionType
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
public class FileServerTest {
@@ -60,9 +61,9 @@ public class FileServerTest {
String dir = "123";
assertFalse(fileServer.hasFile(dir));
FileReferenceDownload foo = new FileReferenceDownload(new FileReference(dir), "test");
- assertFalse(fileServer.hasFileDownloadIfNeeded(foo));
+ assertFalse(fileServer.getFileDownloadIfNeeded(foo).isPresent());
writeFile(dir);
- assertTrue(fileServer.hasFileDownloadIfNeeded(foo));
+ assertTrue(fileServer.getFileDownloadIfNeeded(foo).isPresent());
}
@Test
@@ -78,7 +79,9 @@ public class FileServerTest {
File dir = getFileServerRootDir();
IOUtils.writeFile(dir + "/12y/f1", "dummy-data", true);
CompletableFuture<byte []> content = new CompletableFuture<>();
- fileServer.startFileServing(new FileReference("12y"), new FileReceiver(content), Set.of(gzip));
+ FileReference fileReference = new FileReference("12y");
+ var file = fileServer.getFileDownloadIfNeeded(new FileReferenceDownload(fileReference, "test"));
+ fileServer.startFileServing(fileReference, file.get(), new FileReceiver(content), Set.of(gzip));
assertEquals(new String(content.get()), "dummy-data");
}
@@ -89,7 +92,9 @@ public class FileServerTest {
File dir = getFileServerRootDir();
IOUtils.writeFile(dir + "/subdir/12z/f1", "dummy-data-2", true);
CompletableFuture<byte []> content = new CompletableFuture<>();
- fileServer.startFileServing(new FileReference("subdir"), new FileReceiver(content), Set.of(gzip, lz4));
+ FileReference fileReference = new FileReference("subdir");
+ var file = fileServer.getFileDownloadIfNeeded(new FileReferenceDownload(fileReference, "test"));
+ fileServer.startFileServing(fileReference, file.get(), new FileReceiver(content), Set.of(gzip, lz4));
// Decompress with lz4 and check contents
var compressor = new FileReferenceCompressor(FileReferenceData.Type.compressed, lz4);
@@ -130,6 +135,27 @@ public class FileServerTest {
assertEquals(1, fileServer.downloader().connectionPool().getSize());
}
+ @Test
+ public void requireThatErrorsAreHandled() throws IOException, ExecutionException, InterruptedException {
+ File dir = getFileServerRootDir();
+ IOUtils.writeFile(dir + "/12y/f1", "dummy-data", true);
+ CompletableFuture<byte []> content = new CompletableFuture<>();
+ FailingFileReceiver fileReceiver = new FailingFileReceiver(content);
+
+ // Should fail the first time, see FailingFileReceiver
+ FileReference reference = new FileReference("12y");
+ var file = fileServer.getFileDownloadIfNeeded(new FileReferenceDownload(reference, "test"));
+ try {
+ fileServer.startFileServing(reference, file.get(), fileReceiver, Set.of(gzip));
+ fail("Should have failed");
+ } catch (RuntimeException e) {
+ // expected
+ }
+
+ fileServer.startFileServing(reference, file.get(), fileReceiver, Set.of(gzip));
+ assertEquals(new String(content.get()), "dummy-data");
+ }
+
private void writeFile(String dir) throws IOException {
File rootDir = getFileServerRootDir();
IOUtils.createDirectory(rootDir + "/" + dir);
@@ -153,6 +179,23 @@ public class FileServerTest {
}
}
+ private static class FailingFileReceiver implements FileServer.Receiver {
+ final CompletableFuture<byte []> content;
+ int counter = 0;
+ FailingFileReceiver(CompletableFuture<byte []> content) {
+ this.content = content;
+ }
+ @Override
+ public void receive(FileReferenceData fileData, FileServer.ReplayStatus status) {
+ counter++;
+ if (counter <= 1)
+ throw new RuntimeException("Failed to receive file");
+ else {
+ this.content.complete(fileData.content().array());
+ }
+ }
+ }
+
private File getFileServerRootDir() {
return fileServer.getRootDir().getRoot();
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/HostHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/HostHandlerTest.java
index ba1d69c13dd..6a4099bc45a 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/HostHandlerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/HostHandlerTest.java
@@ -66,7 +66,7 @@ public class HostHandlerTest {
public void require_correct_tenant_and_application_for_hostname() throws Exception {
ApplicationId applicationId = applicationId();
applicationRepository.deploy(testApp, new PrepareParams.Builder().applicationId(applicationId).build());
- String hostname = applicationRepository.getActiveApplicationSet(applicationId).get().getAllHosts().iterator().next();
+ String hostname = applicationRepository.getActiveApplicationSet(applicationId).get().allHosts().iterator().next();
assertApplicationForHost(hostname, applicationId);
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ListApplicationsHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ListApplicationsHandlerTest.java
index 76790e6264d..841867921da 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ListApplicationsHandlerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/http/v2/ListApplicationsHandlerTest.java
@@ -15,6 +15,7 @@ import com.yahoo.vespa.config.server.application.TenantApplications;
import com.yahoo.vespa.config.server.http.SessionHandlerTest;
import com.yahoo.vespa.config.server.tenant.TenantRepository;
import com.yahoo.vespa.config.server.tenant.TestTenantRepository;
+import com.yahoo.vespa.curator.transaction.CuratorTransaction;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@@ -38,6 +39,7 @@ public class ListApplicationsHandlerTest {
private TenantApplications applicationRepo, applicationRepo2;
private ListApplicationsHandler handler;
+ private TenantRepository tenantRepository;
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
@@ -48,7 +50,7 @@ public class ListApplicationsHandlerTest {
.configServerDBDir(temporaryFolder.newFolder().getAbsolutePath())
.configDefinitionsDir(temporaryFolder.newFolder().getAbsolutePath())
.build();
- TenantRepository tenantRepository = new TestTenantRepository.Builder()
+ tenantRepository = new TestTenantRepository.Builder()
.withConfigserverConfig(configserverConfig)
.build();
tenantRepository.addTenant(mytenant);
@@ -67,12 +69,12 @@ public class ListApplicationsHandlerTest {
"[]");
ApplicationId id1 = ApplicationId.from("mytenant", "foo", "quux");
applicationRepo.createApplication(id1);
- applicationRepo.createPutTransaction(id1, 1).commit();
+ writeActiveTransaction(applicationRepo, id1, 1);
assertResponse(url, Response.Status.OK,
"[\"" + url + "foo/environment/dev/region/us-east/instance/quux\"]");
ApplicationId id2 = ApplicationId.from("mytenant", "bali", "quux");
applicationRepo.createApplication(id2);
- applicationRepo.createPutTransaction(id2, 1).commit();
+ writeActiveTransaction(applicationRepo, id2, 1);
assertResponse(url, Response.Status.OK,
"[\"" + url + "bali/environment/dev/region/us-east/instance/quux\"," +
"\"" + url + "foo/environment/dev/region/us-east/instance/quux\"]"
@@ -98,10 +100,10 @@ public class ListApplicationsHandlerTest {
public void require_that_listing_works_with_multiple_tenants() throws Exception {
ApplicationId id1 = ApplicationId.from("mytenant", "foo", "quux");
applicationRepo.createApplication(id1);
- applicationRepo.createPutTransaction(id1, 1).commit();
+ writeActiveTransaction(applicationRepo, id1, 1);
ApplicationId id2 = ApplicationId.from("foobar", "quux", "foo");
applicationRepo2.createApplication(id2);
- applicationRepo2.createPutTransaction(id2, 1).commit();
+ writeActiveTransaction(applicationRepo2, id2, 1);
String url = "http://myhost:14000/application/v2/tenant/mytenant/application/";
assertResponse(url, Response.Status.OK,
"[\"" + url + "foo/environment/dev/region/us-east/instance/quux\"]");
@@ -124,4 +126,11 @@ public class ListApplicationsHandlerTest {
assertEquals(expectedStatus, response.getStatus());
assertEquals(expectedResponse, SessionHandlerTest.getRenderedString(response));
}
+
+ private void writeActiveTransaction(TenantApplications repo, ApplicationId id1, int x) {
+ try (var transaction = new CuratorTransaction(tenantRepository.getCurator())) {
+ repo.createWriteActiveTransaction(transaction, id1, x).commit();
+ }
+ }
+
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/RpcServerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/RpcServerTest.java
index 9190cbc0d8a..8db86aa4dec 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/RpcServerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/RpcServerTest.java
@@ -25,7 +25,7 @@ import com.yahoo.vespa.config.protocol.Trace;
import com.yahoo.vespa.config.server.ApplicationRepository;
import com.yahoo.vespa.config.server.ServerCache;
import com.yahoo.vespa.config.server.application.Application;
-import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.ApplicationVersions;
import com.yahoo.vespa.config.server.monitoring.MetricUpdater;
import com.yahoo.vespa.config.server.session.PrepareParams;
import com.yahoo.vespa.filedistribution.LazyFileReferenceData;
@@ -142,7 +142,7 @@ public class RpcServerTest {
new Version(1, 2, 3),
MetricUpdater.createTestUpdater(),
applicationId);
- ApplicationSet appSet = ApplicationSet.from(app);
+ ApplicationVersions appSet = ApplicationVersions.from(app);
tester.rpcServer().configActivated(appSet);
ConfigKey<?> key = new ConfigKey<>(LbServicesConfig.class, "*");
JRTClientConfigRequest clientReq = createRequest(new RawConfig(key, LbServicesConfig.getDefMd5()));
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/RpcTester.java b/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/RpcTester.java
index 8770970308a..54f6cf73356 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/RpcTester.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/RpcTester.java
@@ -22,12 +22,12 @@ import com.yahoo.vespa.config.server.filedistribution.FileServer;
import com.yahoo.vespa.config.server.host.HostRegistry;
import com.yahoo.vespa.config.server.monitoring.Metrics;
import com.yahoo.vespa.config.server.rpc.security.NoopRpcAuthorizer;
-import com.yahoo.vespa.config.server.tenant.Tenant;
import com.yahoo.vespa.config.server.tenant.TenantRepository;
import com.yahoo.vespa.config.server.tenant.TestTenantRepository;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import org.junit.After;
import org.junit.rules.TemporaryFolder;
+
import java.io.IOException;
import java.time.Duration;
import java.time.Instant;
@@ -53,13 +53,11 @@ public class RpcTester implements AutoCloseable {
private Supervisor sup;
private final ApplicationId applicationId;
private final TenantName tenantName;
- private final TenantRepository tenantRepository;
final HostRegistry hostRegistry = new HostRegistry();
private final ApplicationRepository applicationRepository;
private final List<Integer> allocatedPorts = new ArrayList<>();
private final TemporaryFolder temporaryFolder;
- private final ConfigserverConfig configserverConfig;
RpcTester(ApplicationId applicationId, TemporaryFolder temporaryFolder) throws InterruptedException, IOException {
this(applicationId, temporaryFolder, new ConfigserverConfig.Builder());
@@ -69,20 +67,39 @@ public class RpcTester implements AutoCloseable {
this.temporaryFolder = temporaryFolder;
this.applicationId = applicationId;
this.tenantName = applicationId.tenant();
- int port = allocatePort();
- spec = createSpec(port);
- configBuilder.rpcport(port)
- .configServerDBDir(temporaryFolder.newFolder().getAbsolutePath())
- .configDefinitionsDir(temporaryFolder.newFolder().getAbsolutePath())
- .fileReferencesDir(temporaryFolder.newFolder().getAbsolutePath());
- configserverConfig = new ConfigserverConfig(configBuilder);
- rpcServer = createRpcServer(configserverConfig);
- tenantRepository = new TestTenantRepository.Builder()
- .withHostRegistry(hostRegistry)
- .withConfigserverConfig(configserverConfig)
- .build();
- tenantRepository.addTenant(tenantName);
- startRpcServer();
+
+ Spec tempSpec;
+ ConfigserverConfig tempConfig;
+ RpcServer tempRpcServer;
+ TenantRepository tempTenantRepository;
+
+ int iterations = 0;
+ // Need to loop because we might get a port that is in use
+ do {
+ int port = allocatePort();
+ tempSpec = createSpec(port);
+ configBuilder.rpcport(port)
+ .configServerDBDir(temporaryFolder.newFolder().getAbsolutePath())
+ .configDefinitionsDir(temporaryFolder.newFolder().getAbsolutePath())
+ .fileReferencesDir(temporaryFolder.newFolder().getAbsolutePath());
+ tempConfig = new ConfigserverConfig(configBuilder);
+ tempRpcServer = createRpcServer(tempConfig);
+ tempTenantRepository = new TestTenantRepository.Builder()
+ .withHostRegistry(hostRegistry)
+ .withConfigserverConfig(tempConfig)
+ .build();
+ tempTenantRepository.addTenant(tenantName);
+ startRpcServer(tempRpcServer, tempTenantRepository, tempSpec);
+ iterations++;
+ } while (!tempRpcServer.isRunning() && iterations < 10);
+
+ assertTrue("server is not running", tempRpcServer.isRunning());
+
+ spec = tempSpec;
+ ConfigserverConfig configserverConfig = tempConfig;
+ rpcServer = tempRpcServer;
+ TenantRepository tenantRepository = tempTenantRepository;
+
applicationRepository = new ApplicationRepository.Builder()
.withTenantRepository(tenantRepository)
.withConfigserverConfig(configserverConfig)
@@ -90,11 +107,12 @@ public class RpcTester implements AutoCloseable {
.build();
}
- public void close() {
+ public void close() throws InterruptedException {
rpcServer.stop();
for (Integer port : allocatedPorts) {
PortRangeAllocator.releasePort(port);
}
+ t.join();
}
private int allocatePort() throws InterruptedException {
@@ -107,7 +125,7 @@ public class RpcTester implements AutoCloseable {
InMemoryFlagSource flagSource = new InMemoryFlagSource();
RpcServer rpcServer = new RpcServer(config,
new SuperModelRequestHandler(new TestConfigDefinitionRepo(),
- configserverConfig,
+ config,
new SuperModelManager(
config,
Zone.defaultZone(),
@@ -115,37 +133,31 @@ public class RpcTester implements AutoCloseable {
flagSource)),
Metrics.createTestMetrics(),
hostRegistry,
- new FileServer(configserverConfig, new FileDirectory(temporaryFolder.newFolder())),
+ new FileServer(config, new FileDirectory(temporaryFolder.newFolder())),
new NoopRpcAuthorizer(),
new RpcRequestHandlerProvider());
rpcServer.setUpGetConfigHandlers();
return rpcServer;
}
- void startRpcServer() {
+ void startRpcServer(RpcServer rpcServer, TenantRepository tenantRepository, Spec spec) {
hostRegistry.update(applicationId, List.of("localhost"));
rpcServer.onTenantCreate(tenantRepository.getTenant(tenantName));
t = new Thread(rpcServer);
t.start();
sup = new Supervisor(new Transport());
- pingServer();
- }
-
- @After
- public void stopRpc() throws InterruptedException {
- rpcServer.stop();
- t.join();
+ pingServer(spec);
}
private Spec createSpec(int port) {
return new Spec("tcp/localhost:" + port);
}
- private void pingServer() {
+ private void pingServer(Spec spec) {
long endTime = System.currentTimeMillis() + 60_000;
Request req = new Request("ping");
while (System.currentTimeMillis() < endTime) {
- performRequest(req);
+ performRequest(req, spec);
if (!req.isError() && req.returnValues().size() > 0 && req.returnValues().get(0).asInt32() == 0) {
break;
}
@@ -157,6 +169,10 @@ public class RpcTester implements AutoCloseable {
}
void performRequest(Request req) {
+ performRequest(req, spec);
+ }
+
+ void performRequest(Request req, Spec spec) {
clock.advance(Duration.ofMillis(10));
sup.connect(spec).invokeSync(req, Duration.ofSeconds(10));
}
@@ -165,8 +181,6 @@ public class RpcTester implements AutoCloseable {
return rpcServer;
}
- Tenant tenant() { return tenantRepository.getTenant(tenantName); }
-
public ApplicationRepository applicationRepository() { return applicationRepository; }
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java
index 52d5ba16562..0158aa1961d 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java
@@ -67,8 +67,8 @@ import java.util.OptionalInt;
import java.util.Set;
import java.util.logging.Level;
+import static com.yahoo.vespa.config.server.session.SessionData.APPLICATION_PACKAGE_REFERENCE_PATH;
import static com.yahoo.vespa.config.server.session.SessionPreparer.PrepareResult;
-import static com.yahoo.vespa.config.server.session.SessionZooKeeperClient.APPLICATION_PACKAGE_REFERENCE_PATH;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionRepositoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionRepositoryTest.java
index 07d3aac5a52..bb71cbd35d4 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionRepositoryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionRepositoryTest.java
@@ -19,7 +19,7 @@ import com.yahoo.io.reader.NamedReader;
import com.yahoo.path.Path;
import com.yahoo.text.Utf8;
import com.yahoo.vespa.config.server.ApplicationRepository;
-import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.ApplicationVersions;
import com.yahoo.vespa.config.server.application.OrchestratorMock;
import com.yahoo.vespa.config.server.filedistribution.MockFileDistributionFactory;
import com.yahoo.vespa.config.server.http.InvalidApplicationException;
@@ -115,11 +115,11 @@ public class SessionRepositoryTest {
assertNotNull(sessionRepository.getLocalSession(secondSessionId));
assertNull(sessionRepository.getLocalSession(secondSessionId + 1));
- ApplicationSet applicationSet = sessionRepository.ensureApplicationLoaded(sessionRepository.getRemoteSession(firstSessionId));
- assertNotNull(applicationSet);
- assertEquals(2, applicationSet.getApplicationGeneration());
- assertEquals(applicationId.application(), applicationSet.getForVersionOrLatest(Optional.empty(), Instant.now()).getId().application());
- assertNotNull(applicationSet.getForVersionOrLatest(Optional.empty(), Instant.now()).getModel());
+ ApplicationVersions applicationVersions = sessionRepository.ensureApplicationLoaded(sessionRepository.getRemoteSession(firstSessionId));
+ assertNotNull(applicationVersions);
+ assertEquals(2, applicationVersions.applicationGeneration());
+ assertEquals(applicationId.application(), applicationVersions.getForVersionOrLatest(Optional.empty(), Instant.now()).getId().application());
+ assertNotNull(applicationVersions.getForVersionOrLatest(Optional.empty(), Instant.now()).getModel());
LocalSession session = sessionRepository.getLocalSession(secondSessionId);
Collection<NamedReader> a = session.applicationPackage.get().getSchemas();
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClientTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClientTest.java
index 4a7aeafab7e..5365cbd84f1 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClientTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClientTest.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.config.server.session;
import com.yahoo.cloud.config.ConfigserverConfig;
+import com.yahoo.component.Version;
import com.yahoo.config.FileReference;
import com.yahoo.config.model.api.Quota;
import com.yahoo.config.model.api.TenantSecretStore;
@@ -16,10 +17,13 @@ import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
+
import java.time.Instant;
import java.util.List;
import java.util.Optional;
+import static com.yahoo.vespa.config.server.session.SessionData.APPLICATION_ID_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.SESSION_DATA_PATH;
import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.SESSIONSTATE_ZK_SUBPATH;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -87,7 +91,7 @@ public class SessionZooKeeperClientTest {
int sessionId = 3;
SessionZooKeeperClient zkc = createSessionZKClient(sessionId);
zkc.writeApplicationId(id);
- Path path = sessionPath(sessionId).append(SessionZooKeeperClient.APPLICATION_ID_PATH);
+ Path path = sessionPath(sessionId).append(APPLICATION_ID_PATH);
assertTrue(curator.exists(path));
assertEquals(id.serializedForm(), Utf8.toString(curator.getData(path).get()));
}
@@ -135,7 +139,7 @@ public class SessionZooKeeperClientTest {
final FileReference testRef = new FileReference("test-ref");
SessionZooKeeperClient zkc = createSessionZKClient(3);
zkc.writeApplicationPackageReference(Optional.of(testRef));
- assertEquals(testRef, zkc.readApplicationPackageReference());
+ assertEquals(testRef, zkc.readApplicationPackageReference().get());
}
@Test
@@ -157,9 +161,31 @@ public class SessionZooKeeperClientTest {
assertEquals(secretStores, zkc.readTenantSecretStores());
}
+ @Test
+ public void require_that_session_data_is_written_to_zk() {
+ int sessionId = 2;
+ SessionZooKeeperClient zkc = createSessionZKClient(sessionId);
+ zkc.writeSessionData(new SessionData(ApplicationId.defaultId(),
+ Optional.of(new FileReference("foo")),
+ Version.fromString("8.195.1"),
+ Instant.now(),
+ Optional.empty(),
+ Optional.empty(),
+ Optional.empty(),
+ List.of(),
+ List.of(),
+ Optional.empty(),
+ List.of()));
+ Path path = sessionPath(sessionId).append(SESSION_DATA_PATH);
+ assertTrue(curator.exists(path));
+ String data = Utf8.toString(curator.getData(path).get());
+ assertTrue(data.contains("{\"applicationId\":\"default:default:default\",\"applicationPackageReference\":\"foo\",\"version\":\"8.195.1\",\"createTime\":"));
+ assertTrue(data.contains(",\"tenantSecretStores\":[],\"operatorCertificates\":[],\"dataplaneTokens\":[]}"));
+ }
+
private void assertApplicationIdParse(long sessionId, String idString, String expectedIdString) {
SessionZooKeeperClient zkc = createSessionZKClient(sessionId);
- Path path = sessionPath(sessionId).append(SessionZooKeeperClient.APPLICATION_ID_PATH);
+ Path path = sessionPath(sessionId).append(APPLICATION_ID_PATH);
curator.set(path, Utf8.toBytes(idString));
assertEquals(expectedIdString, zkc.readApplicationId().serializedForm());
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/TenantRepositoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/TenantRepositoryTest.java
index 9af1bbb875e..02ee3202475 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/TenantRepositoryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/tenant/TenantRepositoryTest.java
@@ -17,7 +17,7 @@ import com.yahoo.vespa.config.server.MockSecretStore;
import com.yahoo.vespa.config.server.ServerCache;
import com.yahoo.vespa.config.server.TestConfigDefinitionRepo;
import com.yahoo.vespa.config.server.application.Application;
-import com.yahoo.vespa.config.server.application.ApplicationSet;
+import com.yahoo.vespa.config.server.application.ApplicationVersions;
import com.yahoo.vespa.config.server.application.TenantApplications;
import com.yahoo.vespa.config.server.application.TenantApplicationsTest;
import com.yahoo.vespa.config.server.filedistribution.FileDirectory;
@@ -29,6 +29,7 @@ import com.yahoo.vespa.config.server.monitoring.Metrics;
import com.yahoo.vespa.config.server.provision.HostProvisionerProvider;
import com.yahoo.vespa.curator.Curator;
import com.yahoo.vespa.curator.mock.MockCurator;
+import com.yahoo.vespa.curator.transaction.CuratorTransaction;
import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.model.VespaModel;
@@ -106,13 +107,15 @@ public class TenantRepositoryTest {
TenantApplications applicationRepo = tenantRepository.getTenant(tenant1).getApplicationRepo();
ApplicationId id = ApplicationId.from(tenant1, ApplicationName.defaultName(), InstanceName.defaultName());
applicationRepo.createApplication(id);
- applicationRepo.createPutTransaction(id, 4).commit();
- applicationRepo.activateApplication(ApplicationSet.from(new Application(new VespaModel(MockApplicationPackage.createEmpty()),
- new ServerCache(),
- 4L,
- new Version(1, 2, 3),
- MetricUpdater.createTestUpdater(),
- id)),
+ try (var transaction = new CuratorTransaction(curator)) {
+ applicationRepo.createWriteActiveTransaction(transaction, id, 4).commit();
+ }
+ applicationRepo.activateApplication(ApplicationVersions.from(new Application(new VespaModel(MockApplicationPackage.createEmpty()),
+ new ServerCache(),
+ 4L,
+ new Version(1, 2, 3),
+ MetricUpdater.createTestUpdater(),
+ id)),
4);
assertEquals(1, listener.activated.get());
}
diff --git a/container-core/pom.xml b/container-core/pom.xml
index eec8b60077b..3df232406ca 100644
--- a/container-core/pom.xml
+++ b/container-core/pom.xml
@@ -390,18 +390,8 @@
</exclusions>
</dependency>
<dependency>
- <groupId>org.assertj</groupId>
- <artifactId>assertj-core</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>org.cthul</groupId>
- <artifactId>cthul-matchers</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>hamcrest</artifactId>
<scope>test</scope>
</dependency>
<dependency>
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/state/HostLifeGatherer.java b/container-core/src/main/java/com/yahoo/container/jdisc/state/HostLifeGatherer.java
index 538397fad24..3da9bfa5008 100644
--- a/container-core/src/main/java/com/yahoo/container/jdisc/state/HostLifeGatherer.java
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/state/HostLifeGatherer.java
@@ -17,29 +17,11 @@ public class HostLifeGatherer {
private static final ObjectMapper jsonMapper = new ObjectMapper();
- private static final Path UPTIME_PATH = Path.of("/proc");
-
- public static JsonNode getHostLifePacket(FileWrapper fileWrapper) {
- long upTime;
- int statusCode = 0;
- String statusMessage = "OK";
-
- try {
- upTime = fileWrapper.getFileAgeInSeconds(UPTIME_PATH);
- } catch (IOException e) {
- upTime = 0;
- statusCode = 1;
- statusMessage = "Unable to read proc folder";
- }
-
-
+ public static JsonNode getHostLifePacket() {
ObjectNode jsonObject = jsonMapper.createObjectNode();
- jsonObject.put("status_code", statusCode);
- jsonObject.put("status_msg", statusMessage);
jsonObject.put("timestamp", Instant.now().getEpochSecond());
jsonObject.put("application", "host_life");
ObjectNode metrics = jsonMapper.createObjectNode();
- metrics.put("uptime", upTime);
metrics.put("alive", 1);
jsonObject.set("metrics", metrics);
ObjectNode dimensions = jsonMapper.createObjectNode();
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/state/MetricGatherer.java b/container-core/src/main/java/com/yahoo/container/jdisc/state/MetricGatherer.java
deleted file mode 100644
index 103a4363ac2..00000000000
--- a/container-core/src/main/java/com/yahoo/container/jdisc/state/MetricGatherer.java
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.container.jdisc.state;
-
-import com.fasterxml.jackson.databind.JsonNode;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Gathers metrics regarding currently processing coredumps and host life.
- *
- * @author olaa
- */
-public class MetricGatherer {
-
- static List<JsonNode> getAdditionalMetrics() {
- FileWrapper fileWrapper = new FileWrapper();
- List<JsonNode> packetList = new ArrayList<>();
- if (System.getProperty("os.name").contains("nux"))
- packetList.add(HostLifeGatherer.getHostLifePacket(fileWrapper));
- return packetList;
- }
-
-}
diff --git a/container-core/src/main/java/com/yahoo/container/jdisc/state/MetricsPacketsHandler.java b/container-core/src/main/java/com/yahoo/container/jdisc/state/MetricsPacketsHandler.java
index c60389fc55e..83136754c85 100644
--- a/container-core/src/main/java/com/yahoo/container/jdisc/state/MetricsPacketsHandler.java
+++ b/container-core/src/main/java/com/yahoo/container/jdisc/state/MetricsPacketsHandler.java
@@ -104,8 +104,7 @@ public class MetricsPacketsHandler extends AbstractRequestHandler {
var metricSetId = queryMap.get("metric-set");
var format = queryMap.get("format");
- // TODO: Remove "array-formatted"
- if ("array".equals(format) || queryMap.containsKey("array-formatted")) {
+ if ("array".equals(format)) {
return getMetricsArray(metricSetId);
}
if ("prometheus".equals(format)) {
@@ -126,7 +125,6 @@ public class MetricsPacketsHandler extends AbstractRequestHandler {
ArrayNode jsonArray = jsonMapper.createArrayNode();
getPacketsForSnapshot(getSnapshot(), metricSetId, applicationName, timer.currentTimeMillis())
.forEach(jsonArray::add);
- MetricGatherer.getAdditionalMetrics().forEach(jsonArray::add);
root.set("metrics", jsonArray);
return jsonToString(root)
.getBytes(StandardCharsets.UTF_8);
@@ -198,6 +196,7 @@ public class MetricsPacketsHandler extends AbstractRequestHandler {
packets.add(packet);
}
}
+ packets.add(HostLifeGatherer.getHostLifePacket());
return packets;
}
@@ -231,7 +230,10 @@ public class MetricsPacketsHandler extends AbstractRequestHandler {
GaugeMetric gauge = (GaugeMetric) value;
metrics.put(name + ".average", sanitizeDouble(gauge.getAverage()))
.put(name + ".last", sanitizeDouble(gauge.getLast()))
- .put(name + ".max", sanitizeDouble(gauge.getMax()));
+ .put(name + ".max", sanitizeDouble(gauge.getMax()))
+ .put(name + ".min", sanitizeDouble(gauge.getMin()))
+ .put(name + ".sum", sanitizeDouble(gauge.getSum()))
+ .put(name + ".count", gauge.getCount());
if (gauge.getPercentiles().isPresent()) {
for (Tuple2<String, Double> prefixAndValue : gauge.getPercentiles().get()) {
metrics.put(name + "." + prefixAndValue.first + "percentile", prefixAndValue.second.doubleValue());
@@ -255,6 +257,9 @@ public class MetricsPacketsHandler extends AbstractRequestHandler {
metrics.put(name + ".average", sanitizeDouble(gauge.getAverage()));
metrics.put(name + ".last", sanitizeDouble(gauge.getLast()));
metrics.put(name + ".max", sanitizeDouble(gauge.getMax()));
+ metrics.put(name + ".min", sanitizeDouble(gauge.getMin()));
+ metrics.put(name + ".sum", sanitizeDouble(gauge.getSum()));
+ metrics.put(name + ".count", gauge.getCount());
if (gauge.getPercentiles().isPresent()) {
for (Tuple2<String, Double> prefixAndValue : gauge.getPercentiles().get()) {
metrics.put(name + "." + prefixAndValue.first + "percentile", prefixAndValue.second.doubleValue());
diff --git a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectionThrottler.java b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectionThrottler.java
index 06d1a707be9..06a9986e996 100644
--- a/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectionThrottler.java
+++ b/container-core/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectionThrottler.java
@@ -46,11 +46,11 @@ class ConnectionThrottler extends ContainerLifeCycle implements SelectorManager.
private boolean isThrottling = false;
ConnectionThrottler(AbstractConnector connector, ConnectorConfig.Throttling config) {
- this(Runtime.getRuntime(), new RateStatistic(1, TimeUnit.SECONDS), connector.getScheduler(), connector, config);
+ this(Jvm.fromRuntime(), new RateStatistic(1, TimeUnit.SECONDS), connector.getScheduler(), connector, config);
}
// Intended for unit testing
- ConnectionThrottler(Runtime runtime,
+ ConnectionThrottler(Jvm runtime,
RateStatistic rateStatistic,
Scheduler scheduler,
AbstractConnector connector,
@@ -150,10 +150,10 @@ class ConnectionThrottler extends ContainerLifeCycle implements SelectorManager.
* Note: implementation inspired by Jetty's {@link LowResourceMonitor}
*/
private static class HeapResourceLimit extends AbstractLifeCycle implements ResourceLimit {
- private final Runtime runtime;
+ private final Jvm runtime;
private final double maxHeapUtilization;
- HeapResourceLimit(Runtime runtime, double maxHeapUtilization) {
+ HeapResourceLimit(Jvm runtime, double maxHeapUtilization) {
this.runtime = runtime;
this.maxHeapUtilization = maxHeapUtilization;
}
@@ -269,4 +269,18 @@ class ConnectionThrottler extends ContainerLifeCycle implements SelectorManager.
}
}
}
+
+ interface Jvm {
+ long maxMemory();
+ long freeMemory();
+
+ static Jvm fromRuntime() {
+ return new Jvm() {
+ final Runtime rt = Runtime.getRuntime();
+
+ @Override public long maxMemory() { return rt.maxMemory(); }
+ @Override public long freeMemory() { return rt.freeMemory(); }
+ };
+ }
+ }
}
diff --git a/container-core/src/test/java/com/yahoo/container/jdisc/ThreadedHttpRequestHandlerTest.java b/container-core/src/test/java/com/yahoo/container/jdisc/ThreadedHttpRequestHandlerTest.java
index 86b7f545762..8590983d0ad 100644
--- a/container-core/src/test/java/com/yahoo/container/jdisc/ThreadedHttpRequestHandlerTest.java
+++ b/container-core/src/test/java/com/yahoo/container/jdisc/ThreadedHttpRequestHandlerTest.java
@@ -7,7 +7,8 @@ import org.junit.jupiter.api.Test;
import java.util.concurrent.Executors;
-import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* @author bjorncs
@@ -22,10 +23,10 @@ public class ThreadedHttpRequestHandlerTest {
driver.sendRequest("http://localhost/myhandler").readAll();
String expectedMetricName = "jdisc.http.handler.unhandled_exceptions";
- assertThat(metricMock.innvocations())
- .containsKey(expectedMetricName);
- assertThat(((MetricMock.SimpleMetricContext) metricMock.innvocations().get(expectedMetricName).ctx).dimensions)
- .containsEntry("exception", "DummyException");
+ assertTrue(metricMock.innvocations()
+ .containsKey(expectedMetricName));
+ assertEquals("DummyException",
+ ((MetricMock.SimpleMetricContext) metricMock.innvocations().get(expectedMetricName).ctx).dimensions.get("exception"));
}
diff --git a/container-core/src/test/java/com/yahoo/container/jdisc/state/HostLifeGathererTest.java b/container-core/src/test/java/com/yahoo/container/jdisc/state/HostLifeGathererTest.java
index c394630cb40..28b4aff26fe 100644
--- a/container-core/src/test/java/com/yahoo/container/jdisc/state/HostLifeGathererTest.java
+++ b/container-core/src/test/java/com/yahoo/container/jdisc/state/HostLifeGathererTest.java
@@ -4,9 +4,8 @@ package com.yahoo.container.jdisc.state;
import com.fasterxml.jackson.databind.JsonNode;
import org.junit.jupiter.api.Test;
-import java.nio.file.Path;
-
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
@@ -16,22 +15,10 @@ public class HostLifeGathererTest {
@Test
void host_is_alive() {
- JsonNode packet = HostLifeGatherer.getHostLifePacket(new MockFileWrapper());
+ JsonNode packet = HostLifeGatherer.getHostLifePacket();
JsonNode metrics = packet.get("metrics");
assertEquals("host_life", packet.get("application").textValue());
- assertEquals(0, packet.get("status_code").intValue());
- assertEquals("OK", packet.get("status_msg").textValue());
-
- assertEquals(123L, metrics.get("uptime").longValue());
assertEquals(1, metrics.get("alive").intValue());
-
- }
-
- static class MockFileWrapper extends FileWrapper {
-
- @Override
- long getFileAgeInSeconds(Path path) {
- return 123;
- }
+ assertTrue(packet.get("dimensions").hasNonNull("vespaVersion"));
}
}
diff --git a/container-core/src/test/java/com/yahoo/container/jdisc/state/MetricsPacketsHandlerTest.java b/container-core/src/test/java/com/yahoo/container/jdisc/state/MetricsPacketsHandlerTest.java
index 38c1072c759..ead26a0a898 100644
--- a/container-core/src/test/java/com/yahoo/container/jdisc/state/MetricsPacketsHandlerTest.java
+++ b/container-core/src/test/java/com/yahoo/container/jdisc/state/MetricsPacketsHandlerTest.java
@@ -189,15 +189,21 @@ public class MetricsPacketsHandlerTest extends StateHandlerTestBase {
"gauge.metric.average" : 0.2,
"gauge.metric.last" : 0.2,
"gauge.metric.max" : 0.2,
+ "gauge.metric.min" : 0.2,
+ "gauge.metric.sum" : 0.2,
+ "gauge.metric.count" : 1,
"configserver.requests.count" : 120,
"lockAttempt.lockedLoad.average" : 500.0,
"lockAttempt.lockedLoad.last" : 500.0,
"lockAttempt.lockedLoad.max" : 500.0,
+ "lockAttempt.lockedLoad.min" : 500.0,
+ "lockAttempt.lockedLoad.sum" : 500.0,
+ "lockAttempt.lockedLoad.count" : 1,
"counter.metric.count" : 5
}
}
""";
- assertEquals(expectedResponse, response);
+ assertTrue(response.startsWith(expectedResponse));
// With filtering
response = requestAsString("http://localhost/metrics-packets?metric-set=infrastructure");
@@ -210,13 +216,13 @@ public class MetricsPacketsHandlerTest extends StateHandlerTestBase {
"host" : "some-hostname"
},
"metrics" : {
- "lockAttempt.lockedLoad.max" : 500.0,
"configserver.requests.count" : 120,
- "lockAttempt.lockedLoad.average" : 500.0
+ "lockAttempt.lockedLoad.average" : 500.0,
+ "lockAttempt.lockedLoad.max" : 500.0
}
}
""";
- assertEquals(expectedResponse, response);
+ assertTrue(response.startsWith(expectedResponse));
}
private List<JsonNode> incrementTimeAndGetJsonPackets() throws Exception {
diff --git a/container-core/src/test/java/com/yahoo/container/logging/CircularArrayAccessLogKeeperTest.java b/container-core/src/test/java/com/yahoo/container/logging/CircularArrayAccessLogKeeperTest.java
index f942ee588be..01bbe0d009c 100644
--- a/container-core/src/test/java/com/yahoo/container/logging/CircularArrayAccessLogKeeperTest.java
+++ b/container-core/src/test/java/com/yahoo/container/logging/CircularArrayAccessLogKeeperTest.java
@@ -6,7 +6,7 @@ import org.junit.jupiter.api.Test;
import static org.hamcrest.CoreMatchers.hasItems;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.core.Is.is;
-import static org.hamcrest.core.IsCollectionContaining.hasItem;
+import static org.hamcrest.core.IsIterableContaining.hasItem;
import static org.hamcrest.core.IsNot.not;
import static org.hamcrest.MatcherAssert.assertThat;
diff --git a/container-core/src/test/java/com/yahoo/container/logging/LogFileHandlerTestCase.java b/container-core/src/test/java/com/yahoo/container/logging/LogFileHandlerTestCase.java
index b5c65733436..220c09481c2 100644
--- a/container-core/src/test/java/com/yahoo/container/logging/LogFileHandlerTestCase.java
+++ b/container-core/src/test/java/com/yahoo/container/logging/LogFileHandlerTestCase.java
@@ -25,8 +25,10 @@ import java.util.logging.LogRecord;
import java.util.zip.GZIPInputStream;
import static com.yahoo.yolean.Exceptions.uncheck;
-import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* @author Bob Travis
@@ -50,8 +52,8 @@ public class LogFileHandlerTestCase {
long tomorrowDays = (now / millisPerDay) + 1;
long tomorrowMillis = tomorrowDays * millisPerDay;
- assertThat(tomorrowMillis + 1000).isEqualTo(h.logThread.getNextRotationTime(tomorrowMillis));
- assertThat(tomorrowMillis + 10000).isEqualTo(h.logThread.getNextRotationTime(tomorrowMillis + 3000));
+ assertEquals(tomorrowMillis + 1000, h.logThread.getNextRotationTime(tomorrowMillis));
+ assertEquals(tomorrowMillis + 10000, h.logThread.getNextRotationTime(tomorrowMillis + 3000));
String message = "test";
h.publish(message);
h.publish("another test");
@@ -118,11 +120,11 @@ public class LogFileHandlerTestCase {
String longMessage = formatter.format(new LogRecord(Level.INFO, "string which is way longer than the word test"));
handler.publish(longMessage);
handler.flush();
- assertThat(Files.size(Paths.get(firstFile))).isEqualTo(31);
+ assertEquals(31, Files.size(Paths.get(firstFile)));
final long expectedSecondFileLength = 72;
long symlinkFileLength = Files.size(root.toPath().resolve("symlink"));
- assertThat(symlinkFileLength).isEqualTo(expectedSecondFileLength);
+ assertEquals(expectedSecondFileLength, symlinkFileLength);
handler.shutdown();
}
@@ -135,8 +137,8 @@ public class LogFileHandlerTestCase {
firstHandler.publishAndWait("test");
firstHandler.shutdown();
- assertThat(Files.size(Paths.get(firstHandler.getFileName()))).isEqualTo(5);
- assertThat(root.toPath().resolve("symlink").toRealPath().toString()).isEqualTo(
+ assertEquals(5, Files.size(Paths.get(firstHandler.getFileName())));
+ assertEquals(root.toPath().resolve("symlink").toRealPath().toString(),
Paths.get(firstHandler.getFileName()).toRealPath().toString());
LogFileHandler<String> secondHandler = new LogFileHandler<>(
@@ -144,11 +146,11 @@ public class LogFileHandlerTestCase {
secondHandler.publishAndWait("test");
secondHandler.rotateNow();
- assertThat(root.toPath().resolve("symlink").toRealPath().toString()).isEqualTo(
+ assertEquals(root.toPath().resolve("symlink").toRealPath().toString(),
Paths.get(secondHandler.getFileName()).toRealPath().toString());
while (Files.exists(root.toPath().resolve(firstHandler.getFileName()))) Thread.sleep(1);
- assertThat(Files.exists(Paths.get(firstHandler.getFileName() + ".zst"))).isTrue();
+ assertTrue(Files.exists(Paths.get(firstHandler.getFileName() + ".zst")));
secondHandler.shutdown();
}
@@ -187,20 +189,20 @@ public class LogFileHandlerTestCase {
}
h.flush();
String f1 = h.getFileName();
- assertThat(f1).startsWith(root.getAbsolutePath() + "/logfilehandlertest.");
+ assertTrue(f1.startsWith(root.getAbsolutePath() + "/logfilehandlertest."));
File uncompressed = new File(f1);
File compressed = new File(f1 + "." + fileExtension);
- assertThat(uncompressed).exists();
- assertThat(compressed).doesNotExist();
+ assertTrue(uncompressed.exists());
+ assertFalse(compressed.exists());
String content = IOUtils.readFile(uncompressed);
- assertThat(content).hasLineCount(logEntries);
+ assertEquals(logEntries, content.lines().count());
h.rotateNow();
while (uncompressed.exists()) {
Thread.sleep(1);
}
- assertThat(compressed).exists();
+ assertTrue(compressed.exists());
String uncompressedContent = decompressor.apply(compressed.toPath(), content.getBytes().length);
- assertThat(uncompressedContent).isEqualTo(content);
+ assertEquals(uncompressedContent, content);
h.shutdown();
}
@@ -211,14 +213,6 @@ public class LogFileHandlerTestCase {
outputStream.write(record.getBytes(StandardCharsets.UTF_8));
}
- private static File newFolder(File root, String... subDirs) throws IOException {
- String subFolder = String.join("/", subDirs);
- File result = new File(root, subFolder);
- if (!result.mkdirs()) {
- throw new IOException("Couldn't create folders " + root);
- }
- return result;
- }
}
private static File newFolder(File root, String... subDirs) throws IOException {
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectionThrottlerTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectionThrottlerTest.java
index a65231db2b7..cc73ab52aa1 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectionThrottlerTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectionThrottlerTest.java
@@ -24,7 +24,7 @@ public class ConnectionThrottlerTest {
@Test
void throttles_when_any_resource_check_exceeds_configured_threshold() {
- Runtime runtime = mock(Runtime.class);
+ var runtime = mock(ConnectionThrottler.Jvm.class);
when(runtime.maxMemory()).thenReturn(100l);
RateStatistic rateStatistic = new RateStatistic(1, TimeUnit.HOURS);
MockScheduler scheduler = new MockScheduler();
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/FilterTestCase.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/FilterTestCase.java
index ce49926c58b..cc839768ad5 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/FilterTestCase.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/FilterTestCase.java
@@ -28,7 +28,6 @@ import com.yahoo.jdisc.http.filter.ResponseHeaderFilter;
import com.yahoo.jdisc.http.filter.chain.RequestFilterChain;
import com.yahoo.jdisc.http.filter.chain.ResponseFilterChain;
import com.yahoo.jdisc.http.server.jetty.testutils.ConnectorFactoryRegistryModule;
-import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
@@ -44,6 +43,7 @@ import java.util.concurrent.atomic.AtomicReference;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.MatcherAssert.assertThat;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.isNull;
import static org.mockito.Mockito.mock;
@@ -557,7 +557,7 @@ public class FilterTestCase {
.expectStatusCode(is(Response.Status.OK));
ArgumentCaptor<HttpRequest> requestArgumentCaptor = ArgumentCaptor.forClass(HttpRequest.class);
verify(filter).filter(requestArgumentCaptor.capture(), any(ResponseHandler.class));
- Assertions.assertThat(requestArgumentCaptor.getValue().context()).containsKey(RequestHandlerSpec.ATTRIBUTE_NAME);
+ assertTrue(requestArgumentCaptor.getValue().context().containsKey(RequestHandlerSpec.ATTRIBUTE_NAME));
}
private static JettyTestDriver newDriver(MyRequestHandler requestHandler, FilterBindings filterBindings) {
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java
index ae1a6494acd..cbe21d5581b 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerConformanceTest.java
@@ -43,13 +43,12 @@ import static com.yahoo.jdisc.Response.Status.NOT_FOUND;
import static com.yahoo.jdisc.Response.Status.OK;
import static org.apache.http.HttpStatus.SC_INTERNAL_SERVER_ERROR;
import static org.apache.http.HttpStatus.SC_NOT_FOUND;
-import static org.cthul.matchers.CthulMatchers.containsPattern;
-import static org.cthul.matchers.CthulMatchers.matchesPattern;
import static org.hamcrest.CoreMatchers.any;
import static org.hamcrest.CoreMatchers.anyOf;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.matchesPattern;
/**
* @author Simon Thoresen Hult
@@ -119,8 +118,8 @@ public class HttpServerConformanceTest extends ServerProviderConformanceTest {
@Override
@Test
public void testBindingNotFoundException() throws Throwable {
- final Pattern contentPattern = Pattern.compile("No binding for URI 'http://.+/status.html'\\.");
- new TestRunner().expect(errorWithReason(is(NOT_FOUND), containsPattern(contentPattern)))
+ final Pattern contentPattern = Pattern.compile(".*No binding for URI 'http://.+/status.html'\\.");
+ new TestRunner().expect(errorWithReason(is(NOT_FOUND), matchesPattern(contentPattern)))
.execute();
}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
index 6f9c854be64..c4c9161ccfb 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/HttpServerTest.java
@@ -38,7 +38,6 @@ import org.apache.hc.client5.http.entity.mime.StringBody;
import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient;
import org.apache.hc.core5.http.ConnectionClosedException;
import org.apache.hc.core5.http.ContentType;
-import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
@@ -84,14 +83,12 @@ import static com.yahoo.jdisc.http.server.jetty.SimpleHttpClient.ResponseValidat
import static com.yahoo.jdisc.http.server.jetty.Utils.createHttp2Client;
import static com.yahoo.jdisc.http.server.jetty.Utils.createSslTestDriver;
import static com.yahoo.jdisc.http.server.jetty.Utils.generatePrivateKeyAndCertificate;
-import static org.cthul.matchers.CthulMatchers.containsPattern;
-import static org.cthul.matchers.CthulMatchers.matchesPattern;
import static org.hamcrest.CoreMatchers.containsString;
-import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.startsWith;
-import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.matchesPattern;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
@@ -128,7 +125,7 @@ public class HttpServerTest {
.listenPort(driver.server().getListenPort())
);
} catch (final Throwable t) {
- assertThat(t.getCause(), instanceOf(BindException.class));
+ assertTrue(t.getCause() instanceof BindException);
}
assertTrue(driver.close());
}
@@ -143,7 +140,7 @@ public class HttpServerTest {
newBindingSetSelector("unknown"));
driver.client().get("/status.html")
.expectStatusCode(is(NOT_FOUND))
- .expectContent(containsPattern(Pattern.compile(
+ .expectContent(matchesPattern(Pattern.compile(".*" +
Pattern.quote(BindingSetNotFoundException.class.getName()) +
": No binding set named &apos;unknown&apos;\\.\n\tat .+",
Pattern.DOTALL | Pattern.MULTILINE)));
@@ -192,7 +189,7 @@ public class HttpServerTest {
.expectStatusCode(is(REQUEST_URI_TOO_LONG));
RequestLogEntry entry = requestLogMock.poll(Duration.ofSeconds(5));
assertEquals(414, entry.statusCode().getAsInt());
- assertThat(driver.close(), is(true));
+ assertTrue(driver.close());
}
@Test
@@ -474,7 +471,7 @@ public class HttpServerTest {
final JettyTestDriver driver = JettyTestDriver.newInstance(new EchoWithHeaderRequestHandler(CONNECTION, CLOSE));
driver.client().get("/status.html")
.expectHeader(CONNECTION, is(CLOSE));
- assertThat(driver.close(), is(true));
+ assertTrue(driver.close());
}
@Test
@@ -584,7 +581,7 @@ public class HttpServerTest {
driver.client().get("/status.html")
.expectStatusCode(is(OK))
.expectContent(matchesPattern("\\d{13,}"));
- assertThat(driver.close(), is(true));
+ assertTrue(driver.close());
}
@Test
@@ -690,25 +687,25 @@ public class HttpServerTest {
}
assertTrue(driver.close());
List<ConnectionLogEntry> logEntries = connectionLog.logEntries();
- Assertions.assertThat(logEntries).hasSize(1);
+ assertEquals(1, logEntries.size());
ConnectionLogEntry logEntry = logEntries.get(0);
assertEquals(4, UUID.fromString(logEntry.id()).version());
- Assertions.assertThat(logEntry.timestamp()).isAfter(Instant.EPOCH);
- Assertions.assertThat(logEntry.requests()).hasValue(100L);
- Assertions.assertThat(logEntry.responses()).hasValue(100L);
- Assertions.assertThat(logEntry.peerAddress()).hasValue("127.0.0.1");
- Assertions.assertThat(logEntry.localAddress()).hasValue("127.0.0.1");
- Assertions.assertThat(logEntry.localPort()).hasValue(listenPort);
- Assertions.assertThat(logEntry.httpBytesReceived()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(100000L));
- Assertions.assertThat(logEntry.httpBytesSent()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(10000L));
- Assertions.assertThat(logEntry.sslProtocol()).hasValueSatisfying(TlsContext.ALLOWED_PROTOCOLS::contains);
- Assertions.assertThat(logEntry.sslPeerSubject()).hasValue("CN=localhost");
- Assertions.assertThat(logEntry.sslCipherSuite()).hasValueSatisfying(cipher -> Assertions.assertThat(cipher).isNotBlank());
- Assertions.assertThat(logEntry.sslSessionId()).hasValueSatisfying(sessionId -> Assertions.assertThat(sessionId).hasSize(64));
- Assertions.assertThat(logEntry.sslPeerNotBefore()).hasValue(Instant.EPOCH);
- Assertions.assertThat(logEntry.sslPeerNotAfter()).hasValue(Instant.EPOCH.plus(100_000, ChronoUnit.DAYS));
- Assertions.assertThat(logEntry.sslBytesReceived()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(100000L));
- Assertions.assertThat(logEntry.sslBytesSent()).hasValueSatisfying(value -> Assertions.assertThat(value).isGreaterThan(10000L));
+ assertTrue(logEntry.timestamp().isAfter(Instant.EPOCH));
+ assertEquals(100L, logEntry.requests().get());
+ assertEquals(100L, logEntry.responses().get());
+ assertEquals("127.0.0.1", logEntry.peerAddress().get());
+ assertEquals("127.0.0.1", logEntry.localAddress().get());
+ assertEquals(listenPort, logEntry.localPort().get());
+ assertTrue(logEntry.httpBytesReceived().get() > 100000L);
+ assertTrue(logEntry.httpBytesSent().get() > 10000L);
+ assertTrue(TlsContext.ALLOWED_PROTOCOLS.contains(logEntry.sslProtocol().get()));
+ assertEquals("CN=localhost", logEntry.sslPeerSubject().get());
+ assertFalse(logEntry.sslCipherSuite().get().isBlank());
+ assertEquals(64, logEntry.sslSessionId().get().length());
+ assertEquals(Instant.EPOCH, logEntry.sslPeerNotBefore().get());
+ assertEquals(Instant.EPOCH.plus(100_000, ChronoUnit.DAYS), logEntry.sslPeerNotAfter().get());
+ assertTrue(logEntry.sslBytesReceived().get() > 100000L);
+ assertTrue(logEntry.sslBytesSent().get() > 10000L);
}
@Test
@@ -721,9 +718,9 @@ public class HttpServerTest {
binder -> binder.bind(RequestLog.class).toInstance(requestLogMock));
driver.client().newPost("/status.html").setContent("abcdef").execute().expectStatusCode(is(OK));
RequestLogEntry entry = requestLogMock.poll(Duration.ofSeconds(5));
- Assertions.assertThat(entry.statusCode()).hasValue(200);
- Assertions.assertThat(entry.requestSize()).hasValue(6);
- assertThat(driver.close(), is(true));
+ assertEquals(200, entry.statusCode().getAsInt());
+ assertEquals(6, entry.requestSize().getAsLong());
+ assertTrue(driver.close());
}
@Test
@@ -735,7 +732,7 @@ public class HttpServerTest {
InMemoryConnectionLog connectionLog = new InMemoryConnectionLog();
JettyTestDriver driver = createSslTestDriver(certificateFile, privateKeyFile, metricConsumer, connectionLog);
driver.client().get("/").expectStatusCode(is(OK));
- assertThat(driver.close(), is(true));
+ assertTrue(driver.close());
verify(metricConsumer.mockitoMock(), atLeast(1))
.set(MetricDefinitions.REQUESTS_PER_CONNECTION, 1L, MetricConsumerMock.STATIC_CONTEXT);
}
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ProxyProtocolTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ProxyProtocolTest.java
index 246b7875692..811a8006720 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ProxyProtocolTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/ProxyProtocolTest.java
@@ -29,7 +29,6 @@ import java.util.logging.Logger;
import static com.yahoo.jdisc.http.server.jetty.Utils.generatePrivateKeyAndCertificate;
import static com.yahoo.yolean.Exceptions.uncheckInterrupted;
-import static org.assertj.core.api.Assertions.assertThat;
import static org.eclipse.jetty.client.ProxyProtocolClientConnectionFactory.V1;
import static org.eclipse.jetty.client.ProxyProtocolClientConnectionFactory.V2;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -206,14 +205,14 @@ class ProxyProtocolTest {
private static void assertLogEntryHasRemote(ConnectionLogEntry entry, String expectedAddress, int expectedPort) {
if (expectedAddress != null) {
- assertThat(entry.remoteAddress()).hasValue(expectedAddress);
+ assertEquals(expectedAddress, entry.remoteAddress().get());
} else {
- assertThat(entry.remoteAddress()).isEmpty();
+ assertTrue(entry.remoteAddress().isEmpty());
}
if (expectedPort > 0) {
- assertThat(entry.remotePort()).hasValue(expectedPort);
+ assertEquals(expectedPort, entry.remotePort().get());
} else {
- assertThat(entry.remotePort()).isEmpty();
+ assertTrue(entry.remotePort().isEmpty());
}
}
@@ -227,8 +226,8 @@ class ProxyProtocolTest {
await(waitCondition);
assertTrue(driver.close());
if (waitCondition.test(null)) await(waitCondition);
- assertThat(reqLog.entries()).hasSize(expectedReqLogSize);
- assertThat(connLog.logEntries()).hasSize(expectedConnLogSize);
+ assertEquals(expectedReqLogSize, reqLog.entries().size());
+ assertEquals(expectedConnLogSize, connLog.logEntries().size());
}
private static void await(Predicate<Void> waitCondition) {
diff --git a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/SslHandshakeMetricsTest.java b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/SslHandshakeMetricsTest.java
index 99fa9bb2052..22699efbd46 100644
--- a/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/SslHandshakeMetricsTest.java
+++ b/container-core/src/test/java/com/yahoo/jdisc/http/server/jetty/SslHandshakeMetricsTest.java
@@ -21,7 +21,6 @@ import java.util.logging.Logger;
import static com.yahoo.jdisc.http.server.jetty.Utils.createSslTestDriver;
import static com.yahoo.jdisc.http.server.jetty.Utils.generatePrivateKeyAndCertificate;
-import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
@@ -59,7 +58,7 @@ class SslHandshakeMetricsTest {
verify(metricConsumer.mockitoMock(), atLeast(1))
.add(MetricDefinitions.SSL_HANDSHAKE_FAILURE_MISSING_CLIENT_CERT, 1L, MetricConsumerMock.STATIC_CONTEXT);
assertTrue(driver.close());
- assertThat(connectionLog.logEntries()).hasSize(1);
+ assertEquals(1, connectionLog.logEntries().size());
assertSslHandshakeFailurePresent(
connectionLog.logEntries().get(0), SSLHandshakeException.class, SslHandshakeFailure.MISSING_CLIENT_CERT.failureType());
}
@@ -82,7 +81,7 @@ class SslHandshakeMetricsTest {
verify(metricConsumer.mockitoMock(), atLeast(1))
.add(MetricDefinitions.SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_PROTOCOLS, 1L, MetricConsumerMock.STATIC_CONTEXT);
assertTrue(driver.close());
- assertThat(connectionLog.logEntries()).hasSize(1);
+ assertEquals(1, connectionLog.logEntries().size());
assertSslHandshakeFailurePresent(
connectionLog.logEntries().get(0), SSLHandshakeException.class, SslHandshakeFailure.INCOMPATIBLE_PROTOCOLS.failureType());
}
@@ -103,7 +102,7 @@ class SslHandshakeMetricsTest {
verify(metricConsumer.mockitoMock(), atLeast(1))
.add(MetricDefinitions.SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_CIPHERS, 1L, MetricConsumerMock.STATIC_CONTEXT);
assertTrue(driver.close());
- assertThat(connectionLog.logEntries()).hasSize(1);
+ assertEquals(1, connectionLog.logEntries().size());
assertSslHandshakeFailurePresent(
connectionLog.logEntries().get(0), SSLHandshakeException.class, SslHandshakeFailure.INCOMPATIBLE_CIPHERS.failureType());
}
@@ -128,7 +127,7 @@ class SslHandshakeMetricsTest {
verify(metricConsumer.mockitoMock(), atLeast(1))
.add(MetricDefinitions.SSL_HANDSHAKE_FAILURE_INVALID_CLIENT_CERT, 1L, MetricConsumerMock.STATIC_CONTEXT);
assertTrue(driver.close());
- assertThat(connectionLog.logEntries()).hasSize(1);
+ assertEquals(1, connectionLog.logEntries().size());
assertSslHandshakeFailurePresent(
connectionLog.logEntries().get(0), SSLHandshakeException.class, SslHandshakeFailure.INVALID_CLIENT_CERT.failureType());
}
@@ -153,7 +152,7 @@ class SslHandshakeMetricsTest {
verify(metricConsumer.mockitoMock(), atLeast(1))
.add(MetricDefinitions.SSL_HANDSHAKE_FAILURE_EXPIRED_CLIENT_CERT, 1L, MetricConsumerMock.STATIC_CONTEXT);
assertTrue(driver.close());
- assertThat(connectionLog.logEntries()).hasSize(1);
+ assertEquals(1, connectionLog.logEntries().size());
}
@@ -169,7 +168,7 @@ class SslHandshakeMetricsTest {
client.get("/status.html");
fail("SSLHandshakeException expected");
} catch (SSLHandshakeException e) {
- assertThat(e.getMessage()).contains(expectedExceptionSubstring);
+ assertTrue(e.getMessage().contains(expectedExceptionSubstring));
} catch (SocketException | SSLException e) {
// This exception is thrown if Apache httpclient's write thread detects the handshake failure before the read thread.
var msg = e.getMessage();
@@ -182,7 +181,7 @@ class SslHandshakeMetricsTest {
private static void assertSslHandshakeFailurePresent(
ConnectionLogEntry entry, Class<? extends SSLHandshakeException> expectedException, String expectedType) {
- assertThat(entry.sslHandshakeFailure()).isPresent();
+ assertTrue(entry.sslHandshakeFailure().isPresent());
ConnectionLogEntry.SslHandshakeFailure failure = entry.sslHandshakeFailure().get();
assertEquals(expectedType, failure.type());
ConnectionLogEntry.SslHandshakeFailure.ExceptionEntry exceptionEntry = failure.exceptionChain().get(0);
diff --git a/container-dependencies-enforcer/pom.xml b/container-dependencies-enforcer/pom.xml
index 10e0af3ce7a..3c991823d5d 100644
--- a/container-dependencies-enforcer/pom.xml
+++ b/container-dependencies-enforcer/pom.xml
@@ -69,7 +69,7 @@
<!-- Guava with its internal dependencies -->
<include>com.google.guava:guava:${guava.vespa.version}:provided</include>
- <include>com.google.errorprone:error_prone_annotations:[2.18.0, 3):provided</include>
+ <include>com.google.errorprone:error_prone_annotations:[2.21.1, 3):provided</include>
<include>com.google.guava:failureaccess:[1.0.1, 2):provided</include>
<include>com.google.j2objc:j2objc-annotations:[2.8, 3):provided</include>
@@ -160,6 +160,9 @@
<include>io.airlift:airline:${airline.vespa.version}:test</include>
<include>io.prometheus:simpleclient:${prometheus.client.vespa.version}:test</include>
<include>io.prometheus:simpleclient_common:${prometheus.client.vespa.version}:test</include>
+ <include>io.prometheus:simpleclient_tracer_common:${prometheus.client.vespa.version}:test</include>
+ <include>io.prometheus:simpleclient_tracer_otel:${prometheus.client.vespa.version}:test</include>
+ <include>io.prometheus:simpleclient_tracer_otel_agent:${prometheus.client.vespa.version}:test</include>
<include>junit:junit:${junit4.vespa.version}:test</include>
<include>net.java.dev.jna:jna:${jna.vespa.version}:test</include>
<include>net.openhft:zero-allocation-hashing:jar:${zero-allocation-hashing.vespa.version}:test</include>
@@ -195,6 +198,7 @@
<include>org.eclipse.jetty:jetty-server:${jetty.vespa.version}:test</include>
<include>org.eclipse.jetty:jetty-servlet:${jetty.vespa.version}:test</include>
<include>org.eclipse.jetty:jetty-util:${jetty.vespa.version}:test</include>
+ <include>org.hamcrest:hamcrest:${hamcrest.vespa.version}:test</include>
<include>org.hamcrest:hamcrest-core:${hamcrest.vespa.version}:test</include>
<include>org.hdrhistogram:HdrHistogram:${hdrhistogram.vespa.version}:test</include>
<include>org.json:json:${org.json.vespa.version}:test</include> <!-- TODO: Remove on Vespa 9 -->
diff --git a/container-disc/pom.xml b/container-disc/pom.xml
index 4ec141d2758..164e74c5073 100644
--- a/container-disc/pom.xml
+++ b/container-disc/pom.xml
@@ -32,11 +32,7 @@
</exclusion>
<exclusion>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-core</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
@@ -46,17 +42,12 @@
<scope>test</scope>
</dependency>
<dependency>
- <groupId>org.assertj</groupId>
- <artifactId>assertj-core</artifactId>
- <scope>test</scope>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>annotations</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
</dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>annotations</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
+ <dependency>
<groupId>com.yahoo.vespa</groupId>
<artifactId>config-lib</artifactId>
<version>${project.version}</version>
diff --git a/container-disc/src/main/java/com/yahoo/container/jdisc/metric/JrtMetrics.java b/container-disc/src/main/java/com/yahoo/container/jdisc/metric/JrtMetrics.java
index c1b95cce654..ecbbcd8e27a 100644
--- a/container-disc/src/main/java/com/yahoo/container/jdisc/metric/JrtMetrics.java
+++ b/container-disc/src/main/java/com/yahoo/container/jdisc/metric/JrtMetrics.java
@@ -29,7 +29,7 @@ class JrtMetrics {
Snapshot changesSincePrevious = snapshot.changesSince(previousSnapshot);
increment(ContainerMetrics.JRT_TRANSPORT_TLS_CERTIFICATE_VERIFICATION_FAILURES.baseName(), changesSincePrevious.tlsCertificateVerificationFailures());
increment(ContainerMetrics.JRT_TRANSPORT_PEER_AUTHORIZATION_FAILURES.baseName(), changesSincePrevious.peerAuthorizationFailures());
- increment(ContainerMetrics.JRT_TRANSPORT_SERVER_TLS_CONNECIONTS_ESTABLISHED.baseName(), changesSincePrevious.serverTlsConnectionsEstablished());
+ increment(ContainerMetrics.JRT_TRANSPORT_SERVER_TLS_CONNECTIONS_ESTABLISHED.baseName(), changesSincePrevious.serverTlsConnectionsEstablished());
increment(ContainerMetrics.JRT_TRANSPORT_CLIENT_TLS_CONNECTIONS_ESTABLISHED.baseName(), changesSincePrevious.clientTlsConnectionsEstablished());
increment(ContainerMetrics.JRT_TRANSPORT_CLIENT_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName(), changesSincePrevious.serverUnencryptedConnectionsEstablished());
increment(ContainerMetrics.JRT_TRANSPORT_CLIENT_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName(), changesSincePrevious.clientUnencryptedConnectionsEstablished());
diff --git a/container-search/abi-spec.json b/container-search/abi-spec.json
index 0f440957dfd..cdb660f294a 100644
--- a/container-search/abi-spec.json
+++ b/container-search/abi-spec.json
@@ -6981,12 +6981,14 @@
"public java.lang.Integer getMinHitsPerThread()",
"public java.lang.Double getPostFilterThreshold()",
"public java.lang.Double getApproximateThreshold()",
+ "public java.lang.Double getTargetHitsMaxAdjustmentFactor()",
"public void setTermwiselimit(double)",
"public void setNumThreadsPerSearch(int)",
"public void setNumSearchPartitions(int)",
"public void setMinHitsPerThread(int)",
"public void setPostFilterThreshold(double)",
"public void setApproximateThreshold(double)",
+ "public void setTargetHitsMaxAdjustmentFactor(double)",
"public void prepare(com.yahoo.search.query.ranking.RankProperties)",
"public com.yahoo.search.query.ranking.Matching clone()",
"public boolean equals(java.lang.Object)",
@@ -7000,6 +7002,7 @@
"public static final java.lang.String MINHITSPERTHREAD",
"public static final java.lang.String POST_FILTER_THRESHOLD",
"public static final java.lang.String APPROXIMATE_THRESHOLD",
+ "public static final java.lang.String TARGET_HITS_MAX_ADJUSTMENT_FACTOR",
"public java.lang.Double termwiseLimit"
]
},
diff --git a/container-search/pom.xml b/container-search/pom.xml
index cea05826ce0..4c1d337d8ad 100644
--- a/container-search/pom.xml
+++ b/container-search/pom.xml
@@ -157,20 +157,11 @@
</exclusion>
<exclusion>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-core</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
- <groupId>org.assertj</groupId>
- <artifactId>assertj-core</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter-api</artifactId>
<scope>test</scope>
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/ReconfigurableDispatcher.java b/container-search/src/main/java/com/yahoo/search/dispatch/ReconfigurableDispatcher.java
index 625a8bcb6da..c86c21d677f 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/ReconfigurableDispatcher.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/ReconfigurableDispatcher.java
@@ -1,20 +1,17 @@
package com.yahoo.search.dispatch;
import com.yahoo.component.ComponentId;
+import com.yahoo.component.annotation.Inject;
import com.yahoo.config.subscription.ConfigSubscriber;
+import com.yahoo.container.QrConfig;
import com.yahoo.container.handler.VipStatus;
-import com.yahoo.messagebus.network.rpc.SlobrokConfigSubscriber;
import com.yahoo.vespa.config.search.DispatchConfig;
import com.yahoo.vespa.config.search.DispatchNodesConfig;
import com.yahoo.yolean.UncheckedInterruptedException;
-import java.util.Objects;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
-import static java.util.Objects.requireNonNull;
-
/**
* @author jonmv
*/
@@ -22,10 +19,20 @@ public class ReconfigurableDispatcher extends Dispatcher {
private final ConfigSubscriber subscriber;
- public ReconfigurableDispatcher(ComponentId clusterId, DispatchConfig dispatchConfig, VipStatus vipStatus) {
+ @Inject
+ public ReconfigurableDispatcher(ComponentId clusterId, DispatchConfig dispatchConfig, QrConfig qrConfig, VipStatus vipStatus) {
super(clusterId, dispatchConfig, new DispatchNodesConfig.Builder().build(), vipStatus);
this.subscriber = new ConfigSubscriber();
- this.subscriber.subscribe(this::updateWithNewConfig, DispatchNodesConfig.class, clusterId.stringValue());
+ CountDownLatch configured = new CountDownLatch(1);
+ this.subscriber.subscribe(config -> { updateWithNewConfig(config); configured.countDown(); },
+ DispatchNodesConfig.class, configId(clusterId, qrConfig));
+ try {
+ if ( ! configured.await(1, TimeUnit.MINUTES))
+ throw new IllegalStateException("timed out waiting for initial dispatch nodes config for " + clusterId.getName());
+ }
+ catch (InterruptedException e) {
+ throw new UncheckedInterruptedException("interrupted waiting for initial dispatch nodes config for " + clusterId.getName(), e);
+ }
}
@Override
@@ -34,4 +41,8 @@ public class ReconfigurableDispatcher extends Dispatcher {
super.deconstruct();
}
+ private static String configId(ComponentId clusterId, QrConfig qrConfig) {
+ return qrConfig.clustername() + "/component/" + clusterId.getName();
+ }
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/handler/Json2SingleLevelMap.java b/container-search/src/main/java/com/yahoo/search/handler/Json2SingleLevelMap.java
index c931c6a356f..45a0c606584 100644
--- a/container-search/src/main/java/com/yahoo/search/handler/Json2SingleLevelMap.java
+++ b/container-search/src/main/java/com/yahoo/search/handler/Json2SingleLevelMap.java
@@ -77,7 +77,7 @@ class Json2SingleLevelMap {
} else if (token == JsonToken.START_ARRAY) {
map.put(fieldName, skipChildren(parser, buf));
} else if (token == JsonToken.START_OBJECT) {
- if (fieldName.equals("select.where") || fieldName.equals("select.grouping")) {
+ if (fieldName.startsWith("input.") || fieldName.equals("select.where") || fieldName.equals("select.grouping")) {
map.put(fieldName, skipChildren(parser, buf));
} else {
parse(map, fieldName + ".");
diff --git a/container-search/src/main/java/com/yahoo/search/logging/AbstractSpoolingLogger.java b/container-search/src/main/java/com/yahoo/search/logging/AbstractSpoolingLogger.java
index 601da11ab33..ec8e7dd3a79 100644
--- a/container-search/src/main/java/com/yahoo/search/logging/AbstractSpoolingLogger.java
+++ b/container-search/src/main/java/com/yahoo/search/logging/AbstractSpoolingLogger.java
@@ -3,7 +3,6 @@ package com.yahoo.search.logging;
import com.yahoo.concurrent.DaemonThreadFactory;
-import java.io.IOException;
import java.time.Clock;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.ScheduledExecutorService;
@@ -54,8 +53,15 @@ public abstract class AbstractSpoolingLogger extends AbstractThreadedLogger impl
return true;
}
- // TODO Call from a component or make this class a component
- public void shutdown() {
+ @Deprecated
+ /*
+ @deprecated use {@link #deconstruct()} instead
+ */
+ public void shutdown() { deconstruct(); }
+
+ @Override
+ public void deconstruct() {
+ super.deconstruct();
executorService.shutdown();
try {
if ( ! executorService.awaitTermination(10, TimeUnit.SECONDS))
diff --git a/container-search/src/main/java/com/yahoo/search/logging/AbstractThreadedLogger.java b/container-search/src/main/java/com/yahoo/search/logging/AbstractThreadedLogger.java
index ffc916bad65..db5d0494822 100644
--- a/container-search/src/main/java/com/yahoo/search/logging/AbstractThreadedLogger.java
+++ b/container-search/src/main/java/com/yahoo/search/logging/AbstractThreadedLogger.java
@@ -2,6 +2,8 @@
package com.yahoo.search.logging;
+import com.yahoo.component.AbstractComponent;
+
import java.util.concurrent.Executor;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RejectedExecutionException;
@@ -10,7 +12,7 @@ import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
-abstract class AbstractThreadedLogger implements Logger {
+abstract class AbstractThreadedLogger extends AbstractComponent implements Logger {
private final static java.util.logging.Logger log = java.util.logging.Logger.getLogger(AbstractThreadedLogger.class.getName());
@@ -51,10 +53,15 @@ abstract class AbstractThreadedLogger implements Logger {
}
/**
- * Actually transports the entry to it's destination
+ * Actually transports the entry to its destination
*/
public abstract boolean transport(LoggerEntry entry);
+ /** Synchronously shuts down and waits for enqueued entries to be sent. */
+ @Override
+ public void deconstruct() {
+ executor.close();
+ }
private static class WorkerThread extends Thread {
diff --git a/container-search/src/main/java/com/yahoo/search/query/profile/QueryProfileProperties.java b/container-search/src/main/java/com/yahoo/search/query/profile/QueryProfileProperties.java
index f73ed52246c..7b9fe7da7a2 100644
--- a/container-search/src/main/java/com/yahoo/search/query/profile/QueryProfileProperties.java
+++ b/container-search/src/main/java/com/yahoo/search/query/profile/QueryProfileProperties.java
@@ -108,7 +108,7 @@ public class QueryProfileProperties extends Properties {
/**
* Sets a value in this query profile
*
- * @throws IllegalArgumentException if this property cannot be set in the wrapped query profile
+ * @throws IllegalInputException if this property cannot be set in the wrapped query profile
*/
@Override
public void set(CompoundName name, Object value, Map<String, String> context) {
diff --git a/container-search/src/main/java/com/yahoo/search/query/properties/QueryProperties.java b/container-search/src/main/java/com/yahoo/search/query/properties/QueryProperties.java
index 896a916aa50..99d6959441a 100644
--- a/container-search/src/main/java/com/yahoo/search/query/properties/QueryProperties.java
+++ b/container-search/src/main/java/com/yahoo/search/query/properties/QueryProperties.java
@@ -91,6 +91,7 @@ public class QueryProperties extends Properties {
addDualCasedRM(map, Matching.MINHITSPERTHREAD, GetterSetter.of(query -> query.getRanking().getMatching().getMinHitsPerThread(), (query, value) -> query.getRanking().getMatching().setMinHitsPerThread(asInteger(value, 0))));
addDualCasedRM(map, Matching.POST_FILTER_THRESHOLD, GetterSetter.of(query -> query.getRanking().getMatching().getPostFilterThreshold(), (query, value) -> query.getRanking().getMatching().setPostFilterThreshold(asDouble(value, 1.0))));
addDualCasedRM(map, Matching.APPROXIMATE_THRESHOLD, GetterSetter.of(query -> query.getRanking().getMatching().getApproximateThreshold(), (query, value) -> query.getRanking().getMatching().setApproximateThreshold(asDouble(value, 0.05))));
+ addDualCasedRM(map, Matching.TARGET_HITS_MAX_ADJUSTMENT_FACTOR, GetterSetter.of(query -> query.getRanking().getMatching().getTargetHitsMaxAdjustmentFactor(), (query, value) -> query.getRanking().getMatching().setTargetHitsMaxAdjustmentFactor(asDouble(value, 20.0))));
map.put(CompoundName.fromComponents(Ranking.RANKING, Ranking.MATCH_PHASE, MatchPhase.ATTRIBUTE), GetterSetter.of(query -> query.getRanking().getMatchPhase().getAttribute(), (query, value) -> query.getRanking().getMatchPhase().setAttribute(asString(value, null))));
map.put(CompoundName.fromComponents(Ranking.RANKING, Ranking.MATCH_PHASE, MatchPhase.ASCENDING), GetterSetter.of(query -> query.getRanking().getMatchPhase().getAscending(), (query, value) -> query.getRanking().getMatchPhase().setAscending(asBoolean(value, false))));
@@ -169,7 +170,6 @@ public class QueryProperties extends Properties {
return;
}
- //TODO Why is there error handling in set path and not in get path ?
if (key.first().equals(Ranking.RANKING)) {
if (key.size() > 2) {
String restKey = key.rest().rest().toString();
@@ -189,6 +189,7 @@ public class QueryProperties extends Properties {
}
}
if (reservedPrefix.contains(key.first())) {
+ // Setting a property under the reserved paths are illegal, while retrieving(get) one is not.
throwIllegalParameter(key.rest().toString(), key.first());
} else {
super.set(key, value, context);
diff --git a/container-search/src/main/java/com/yahoo/search/query/properties/RankProfileInputProperties.java b/container-search/src/main/java/com/yahoo/search/query/properties/RankProfileInputProperties.java
index 6c65a5e898a..6203eadffad 100644
--- a/container-search/src/main/java/com/yahoo/search/query/properties/RankProfileInputProperties.java
+++ b/container-search/src/main/java/com/yahoo/search/query/properties/RankProfileInputProperties.java
@@ -3,6 +3,7 @@ package com.yahoo.search.query.properties;
import com.yahoo.api.annotations.Beta;
import com.yahoo.language.process.Embedder;
+import com.yahoo.processing.IllegalInputException;
import com.yahoo.processing.request.CompoundName;
import com.yahoo.search.Query;
import com.yahoo.search.schema.SchemaInfo;
@@ -46,7 +47,7 @@ public class RankProfileInputProperties extends Properties {
query.getModel().getLanguage());
}
catch (IllegalArgumentException e) {
- throw new IllegalArgumentException("Could not set '" + name + "' to '" + value + "'", e);
+ throw new IllegalInputException("Could not set '" + name + "' to '" + value + "'", e);
}
}
}
@@ -84,7 +85,7 @@ public class RankProfileInputProperties extends Properties {
}
private void throwIllegalInput(CompoundName name, Object value, TensorType expectedType) {
- throw new IllegalArgumentException("Could not set '" + name + "' to '" + value + "': " +
+ throw new IllegalInputException("Could not set '" + name + "' to '" + value + "': " +
"This input is declared in rank profile '" + query.getRanking().getProfile() +
"' as " + expectedType);
}
diff --git a/container-search/src/main/java/com/yahoo/search/query/ranking/Matching.java b/container-search/src/main/java/com/yahoo/search/query/ranking/Matching.java
index 35fbd52f967..4d21f32d16d 100644
--- a/container-search/src/main/java/com/yahoo/search/query/ranking/Matching.java
+++ b/container-search/src/main/java/com/yahoo/search/query/ranking/Matching.java
@@ -24,6 +24,7 @@ public class Matching implements Cloneable {
public static final String MINHITSPERTHREAD = "minHitsPerThread";
public static final String POST_FILTER_THRESHOLD = "postFilterThreshold";
public static final String APPROXIMATE_THRESHOLD = "approximateThreshold";
+ public static final String TARGET_HITS_MAX_ADJUSTMENT_FACTOR = "targetHitsMaxAdjustmentFactor";
static {
argumentType =new QueryProfileType(Ranking.MATCHING);
@@ -35,6 +36,7 @@ public class Matching implements Cloneable {
argumentType.addField(new FieldDescription(MINHITSPERTHREAD, "integer"));
argumentType.addField(new FieldDescription(POST_FILTER_THRESHOLD, "double"));
argumentType.addField(new FieldDescription(APPROXIMATE_THRESHOLD, "double"));
+ argumentType.addField(new FieldDescription(TARGET_HITS_MAX_ADJUSTMENT_FACTOR, "double"));
argumentType.freeze();
}
@@ -46,6 +48,7 @@ public class Matching implements Cloneable {
private Integer minHitsPerThread = null;
private Double postFilterThreshold = null;
private Double approximateThreshold = null;
+ private Double targetHitsMaxAdjustmentFactor = null;
public Double getTermwiseLimit() { return termwiseLimit; }
public Integer getNumThreadsPerSearch() { return numThreadsPerSearch; }
@@ -53,6 +56,7 @@ public class Matching implements Cloneable {
public Integer getMinHitsPerThread() { return minHitsPerThread; }
public Double getPostFilterThreshold() { return postFilterThreshold; }
public Double getApproximateThreshold() { return approximateThreshold; }
+ public Double getTargetHitsMaxAdjustmentFactor() { return targetHitsMaxAdjustmentFactor; }
public void setTermwiselimit(double value) {
if ((value < 0.0) || (value > 1.0)) {
@@ -75,6 +79,9 @@ public class Matching implements Cloneable {
public void setApproximateThreshold(double threshold) {
approximateThreshold = threshold;
}
+ public void setTargetHitsMaxAdjustmentFactor(double factor) {
+ targetHitsMaxAdjustmentFactor = factor;
+ }
/** Internal operation - DO NOT USE */
public void prepare(RankProperties rankProperties) {
@@ -97,6 +104,9 @@ public class Matching implements Cloneable {
if (approximateThreshold != null) {
rankProperties.put("vespa.matching.global_filter.lower_limit", String.valueOf(approximateThreshold));
}
+ if (targetHitsMaxAdjustmentFactor != null) {
+ rankProperties.put("vespa.matching.nns.target_hits_max_adjustment_factor", String.valueOf(targetHitsMaxAdjustmentFactor));
+ }
}
@Override
@@ -119,12 +129,14 @@ public class Matching implements Cloneable {
Objects.equals(numSearchPartitions, matching.numSearchPartitions) &&
Objects.equals(minHitsPerThread, matching.minHitsPerThread) &&
Objects.equals(postFilterThreshold, matching.postFilterThreshold) &&
- Objects.equals(approximateThreshold, matching.approximateThreshold);
+ Objects.equals(approximateThreshold, matching.approximateThreshold) &&
+ Objects.equals(targetHitsMaxAdjustmentFactor, matching.targetHitsMaxAdjustmentFactor);
}
@Override
public int hashCode() {
- return Objects.hash(termwiseLimit, numThreadsPerSearch, numSearchPartitions, minHitsPerThread, postFilterThreshold, approximateThreshold);
+ return Objects.hash(termwiseLimit, numThreadsPerSearch, numSearchPartitions, minHitsPerThread,
+ postFilterThreshold, approximateThreshold, targetHitsMaxAdjustmentFactor);
}
}
diff --git a/container-search/src/main/java/com/yahoo/search/query/ranking/RankFeatures.java b/container-search/src/main/java/com/yahoo/search/query/ranking/RankFeatures.java
index f288010a633..724325051f9 100644
--- a/container-search/src/main/java/com/yahoo/search/query/ranking/RankFeatures.java
+++ b/container-search/src/main/java/com/yahoo/search/query/ranking/RankFeatures.java
@@ -95,7 +95,9 @@ public class RankFeatures implements Cloneable {
if (feature == null) return Optional.empty();
if (feature instanceof Tensor) return Optional.of((Tensor)feature);
if (feature instanceof Double) return Optional.of(Tensor.from((Double)feature));
- throw new IllegalArgumentException("Expected '" + name + "' to be a tensor, but it is the string '" + feature + "'");
+ throw new IllegalArgumentException("Expected '" + name + "' to be a tensor, but it is the string '" + feature +
+ "', this usually means that '" + name + "' is not defined in the schema. " +
+ "See https://docs.vespa.ai/en/tensor-user-guide.html#querying-with-tensors");
}
/**
diff --git a/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java b/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java
index 62b56e6e8ff..4956698cc2f 100644
--- a/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java
+++ b/container-search/src/test/java/com/yahoo/search/dispatch/LoadBalancerTest.java
@@ -7,7 +7,6 @@ import com.yahoo.search.dispatch.LoadBalancer.GroupStatus;
import com.yahoo.search.dispatch.searchcluster.Group;
import com.yahoo.search.dispatch.searchcluster.Node;
import org.junit.jupiter.api.Test;
-import org.opentest4j.AssertionFailedError;
import java.time.Duration;
import java.time.Instant;
@@ -34,7 +33,7 @@ public class LoadBalancerTest {
Optional<Group> grp = lb.takeGroup(null);
Group group = grp.orElseThrow(() -> {
- throw new AssertionFailedError("Expected a SearchCluster.Group");
+ throw new IllegalStateException("Expected a SearchCluster.Group");
});
assertEquals(1, group.nodes().size());
}
@@ -47,7 +46,7 @@ public class LoadBalancerTest {
Optional<Group> grp = lb.takeGroup(null);
Group group = grp.orElseThrow(() -> {
- throw new AssertionFailedError("Expected a SearchCluster.Group");
+ throw new IllegalStateException("Expected a SearchCluster.Group");
});
assertEquals(1, group.nodes().size());
}
diff --git a/container-search/src/test/java/com/yahoo/search/grouping/vespa/RequestBuilderTestCase.java b/container-search/src/test/java/com/yahoo/search/grouping/vespa/RequestBuilderTestCase.java
index f8e67a10076..6d02721c15e 100644
--- a/container-search/src/test/java/com/yahoo/search/grouping/vespa/RequestBuilderTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/grouping/vespa/RequestBuilderTestCase.java
@@ -25,10 +25,8 @@ import com.yahoo.searchlib.expression.StrCatFunctionNode;
import com.yahoo.searchlib.expression.StringResultNode;
import com.yahoo.searchlib.expression.TimeStampFunctionNode;
import com.yahoo.searchlib.expression.ToStringFunctionNode;
-import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.Test;
-import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
@@ -468,29 +466,29 @@ public class RequestBuilderTestCase {
String expectedA = "[{ Attribute, result = [Count] }]";
assertLayout("all(group(a) each(output(count())))",
- Arrays.asList(expectedA).toString());
+ List.of(expectedA).toString());
assertLayout("all(group(a) each(output(count()) all()))",
- Arrays.asList(expectedA).toString());
+ List.of(expectedA).toString());
assertLayout("all(group(a) each(output(count()) all(group(b))))",
- Arrays.asList(expectedA).toString());
+ List.of(expectedA).toString());
assertLayout("all(group(a) each(output(count()) all(group(b) each())))",
- Arrays.asList(expectedA).toString());
+ List.of(expectedA).toString());
assertLayout("all(group(a) each(output(count()) all(group(b) each())))",
- Arrays.asList(expectedA).toString());
+ List.of(expectedA).toString());
assertLayout("all(group(a) each(output(count()) all(group(b) each())) as(foo)" +
" each())",
- Arrays.asList(expectedA).toString());
+ List.of(expectedA).toString());
assertLayout("all(group(a) each(output(count()) all(group(b) each())) as(foo)" +
" each(group(b)))",
- Arrays.asList(expectedA).toString());
+ List.of(expectedA).toString());
assertLayout("all(group(a) each(output(count()) all(group(b) each())) as(foo)" +
" each(group(b) each()))",
- Arrays.asList(expectedA).toString());
+ List.of(expectedA).toString());
String expectedB = "[{ Attribute }, { Attribute, result = [Count] }]";
assertLayout("all(group(a) each(output(count()) all(group(b) each())) as(foo)" +
" each(group(b) each(output(count()))))",
- Arrays.asList(expectedB, expectedA).toString());
+ List.of(expectedB, expectedA).toString());
}
@Test
@@ -650,8 +648,8 @@ public class RequestBuilderTestCase {
@Test
void requireThatTimeZoneIsAppliedToTimeFunctions() {
- for (String timePart : Arrays.asList("dayofmonth", "dayofweek", "dayofyear", "hourofday",
- "minuteofhour", "monthofyear", "secondofminute", "year"))
+ for (String timePart : List.of("dayofmonth", "dayofweek", "dayofyear", "hourofday",
+ "minuteofhour", "monthofyear", "secondofminute", "year"))
{
String request = "all(output(avg(time." + timePart + "(foo))))";
assertTimeZone(request, "GMT-2", -7200L);
@@ -686,7 +684,7 @@ public class RequestBuilderTestCase {
test.expectedOutput = Boolean.toString(true);
test.request = "all(output(avg(now() - foo)))";
test.outputWriter = new OutputWriter() {
- long before = System.currentTimeMillis();
+ final long before = System.currentTimeMillis();
@Override
public String write(List<Grouping> groupingList, GroupingTransform transform) {
@@ -836,7 +834,7 @@ public class RequestBuilderTestCase {
builder.build();
fail();
} catch (IllegalInputException e) {
- Assertions.assertThat(e.getMessage()).contains(errorSubstring);
+ assertTrue(e.getMessage().contains(errorSubstring));
}
}
@@ -924,7 +922,7 @@ public class RequestBuilderTestCase {
RequestBuilder builder = new RequestBuilder(0);
builder.setRootOperation(GroupingOperation.fromString(test.request));
builder.setTimeZone(TimeZone.getTimeZone(test.timeZone));
- builder.addContinuations(Arrays.asList(test.continuation));
+ builder.addContinuations(test.continuation != null ? List.of(test.continuation) : List.of());
try {
builder.build();
if (test.expectedException != null) {
diff --git a/container-search/src/test/java/com/yahoo/search/handler/JSONSearchHandlerTestCase.java b/container-search/src/test/java/com/yahoo/search/handler/JSONSearchHandlerTestCase.java
index 83022ccf3ff..7b8015044c6 100644
--- a/container-search/src/test/java/com/yahoo/search/handler/JSONSearchHandlerTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/handler/JSONSearchHandlerTestCase.java
@@ -16,7 +16,6 @@ import com.yahoo.search.searchchain.config.test.SearchChainConfigurerTestCase;
import com.yahoo.slime.Inspector;
import com.yahoo.slime.SlimeUtils;
import com.yahoo.test.json.JsonTestHelper;
-import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Disabled;
@@ -149,7 +148,7 @@ public class JSONSearchHandlerTestCase {
configurer.reloadConfig();
SearchHandler newSearchHandler = fetchSearchHandler(configurer);
- assertTrue(searchHandler != newSearchHandler, "Do I have a new instance of the search handler?");
+ assertNotSame(searchHandler, newSearchHandler, "Do I have a new instance of the search handler?");
try (RequestHandlerTestDriver newDriver = new RequestHandlerTestDriver(newSearchHandler)) {
ObjectNode json = jsonMapper.createObjectNode();
json.put("yql", "selectz * from foo where bar > 1453501295");
@@ -193,13 +192,15 @@ public class JSONSearchHandlerTestCase {
ObjectNode json = jsonMapper.createObjectNode();
json.put("format", "xml");
- assertEquals("<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
- "<result total-hit-count=\"0\">\n" +
- " <hit relevancy=\"1.0\">\n" +
- " <field name=\"relevancy\">1.0</field>\n" +
- " <field name=\"uri\">testHit</field>\n" +
- " </hit>\n" +
- "</result>\n", driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE).readAll());
+ assertEquals("""
+ <?xml version="1.0" encoding="utf-8" ?>
+ <result total-hit-count="0">
+ <hit relevancy="1.0">
+ <field name="relevancy">1.0</field>
+ <field name="uri">testHit</field>
+ </hit>
+ </result>
+ """, driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE).readAll());
}
@Test
@@ -253,13 +254,15 @@ public class JSONSearchHandlerTestCase {
}
private static final String xmlResult =
- "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
- "<result total-hit-count=\"0\">\n" +
- " <hit relevancy=\"1.0\">\n" +
- " <field name=\"relevancy\">1.0</field>\n" +
- " <field name=\"uri\">testHit</field>\n" +
- " </hit>\n" +
- "</result>\n";
+ """
+ <?xml version="1.0" encoding="utf-8" ?>
+ <result total-hit-count="0">
+ <hit relevancy="1.0">
+ <field name="relevancy">1.0</field>
+ <field name="uri">testHit</field>
+ </hit>
+ </result>
+ """;
private void assertXmlResult(JsonNode json, RequestHandlerTestDriver driver) {
assertOkResult(driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE), xmlResult);
@@ -276,19 +279,6 @@ public class JSONSearchHandlerTestCase {
}
- private static final String pageResult =
- "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
- "<page version=\"1.0\">\n" +
- "\n" +
- " <content>\n" +
- " <hit relevance=\"1.0\">\n" +
- " <id>testHit</id>\n" +
- " <uri>testHit</uri>\n" +
- " </hit>\n" +
- " </content>\n" +
- "\n" +
- "</page>\n";
-
private void assertOkResult(RequestHandlerTestDriver.MockResponseHandler response, String expected) {
assertEquals(expected, response.readAll());
assertEquals(200, response.getStatus());
@@ -302,11 +292,26 @@ public class JSONSearchHandlerTestCase {
configurer.reloadConfig();
SearchHandler newSearchHandler = fetchSearchHandler(configurer);
- assertTrue(searchHandler != newSearchHandler, "Do I have a new instance of the search handler?");
+ assertNotSame(searchHandler, newSearchHandler, "Do I have a new instance of the search handler?");
return new RequestHandlerTestDriver(newSearchHandler);
}
@Test
+ void testInputParameters() throws IOException {
+ String json = """
+ {
+ "input": {
+ "query(q_category)": { "Tablet Keyboard Cases": 42.5 },
+ "query(q_vector)": [ 1, 2.5, 3 ]
+ }
+ }
+ """;
+ Map<String, String> map = new Json2SingleLevelMap(new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8))).parse();
+ assertEquals("{ \"Tablet Keyboard Cases\": 42.5 }", map.get("input.query(q_category)"));
+ assertEquals("[ 1, 2.5, 3 ]", map.get("input.query(q_vector)"));
+ }
+
+ @Test
void testSelectParameters() throws IOException {
ObjectNode json = jsonMapper.createObjectNode();
@@ -353,23 +358,25 @@ public class JSONSearchHandlerTestCase {
@Test
void testJsonWithWhereAndGroupingUnderSelect() {
- String query = "{\n" +
- " \"select\": {\n" +
- " \"where\": {\n" +
- " \"contains\": [\n" +
- " \"field\",\n" +
- " \"term\"\n" +
- " ]\n" +
- " },\n" +
- " \"grouping\":[\n" +
- " {\n" +
- " \"all\": {\n" +
- " \"output\": \"count()\"\n" +
- " }\n" +
- " }\n" +
- " ]\n" +
- " }\n" +
- "}\n";
+ String query = """
+ {
+ "select": {
+ "where": {
+ "contains": [
+ "field",
+ "term"
+ ]
+ },
+ "grouping":[
+ {
+ "all": {
+ "output": "count()"
+ }
+ }
+ ]
+ }
+ }
+ """;
String result = driver.sendRequest(uri + "searchChain=echoingQuery", com.yahoo.jdisc.http.HttpRequest.Method.POST, query, JSON_CONTENT_TYPE).readAll();
String expected = "{\"root\":{\"id\":\"toplevel\",\"relevance\":1.0,\"fields\":{\"totalCount\":0},\"children\":[{\"id\":\"Query\",\"relevance\":1.0,\"fields\":{\"query\":\"select * from sources * where field contains \\\"term\\\" | all(output(count()))\"}}]}}";
@@ -378,21 +385,23 @@ public class JSONSearchHandlerTestCase {
@Test
void testJsonWithWhereAndGroupingSeparate() {
- String query = "{\n" +
- " \"select.where\": {\n" +
- " \"contains\": [\n" +
- " \"field\",\n" +
- " \"term\"\n" +
- " ]\n" +
- " },\n" +
- " \"select.grouping\":[\n" +
- " {\n" +
- " \"all\": {\n" +
- " \"output\": \"count()\"\n" +
- " }\n" +
- " }\n" +
- " ]\n" +
- "}\n";
+ String query = """
+ {
+ "select.where": {
+ "contains": [
+ "field",
+ "term"
+ ]
+ },
+ "select.grouping":[
+ {
+ "all": {
+ "output": "count()"
+ }
+ }
+ ]
+ }
+ """;
String result = driver.sendRequest(uri + "searchChain=echoingQuery", com.yahoo.jdisc.http.HttpRequest.Method.POST, query, JSON_CONTENT_TYPE).readAll();
String expected = "{\"root\":{\"id\":\"toplevel\",\"relevance\":1.0,\"fields\":{\"totalCount\":0},\"children\":[{\"id\":\"Query\",\"relevance\":1.0,\"fields\":{\"query\":\"select * from sources * where field contains \\\"term\\\" | all(output(count()))\"}}]}}";
@@ -524,7 +533,7 @@ public class JSONSearchHandlerTestCase {
// Get mapping
Map<String, String> propertyMap = request.propertyMap();
- Assertions.assertThat(propertyMap).isEqualTo(map);
+ assertEquals(propertyMap, map);
}
@Test
diff --git a/container-search/src/test/java/com/yahoo/search/logging/LocalDiskLoggerTest.java b/container-search/src/test/java/com/yahoo/search/logging/LocalDiskLoggerTest.java
index 6ed1154e58a..e94068fa988 100644
--- a/container-search/src/test/java/com/yahoo/search/logging/LocalDiskLoggerTest.java
+++ b/container-search/src/test/java/com/yahoo/search/logging/LocalDiskLoggerTest.java
@@ -32,21 +32,11 @@ public class LocalDiskLoggerTest {
.blob("my entry blob content".getBytes())
.track("my-track")
.send();
- waitForFile(logFile);
+ logger.deconstruct();
String test = IOUtils.readAll(new FileReader(logFile));
assertTrue(test.contains(Base64.getEncoder().encodeToString("my entry blob content".getBytes())));
assertTrue(test.contains("my-track"));
}
- private void waitForFile(File file) throws InterruptedException {
- int waitFor = 10;
- while ( ! file.exists() && --waitFor > 0) {
- Thread.sleep(10);
- }
- if ( ! file.exists()) {
- fail("Local disk logger file was not created");
- }
- }
-
}
diff --git a/container-search/src/test/java/com/yahoo/search/query/MatchingTestCase.java b/container-search/src/test/java/com/yahoo/search/query/MatchingTestCase.java
index e3a1eb18a33..37d0e9e1072 100644
--- a/container-search/src/test/java/com/yahoo/search/query/MatchingTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/query/MatchingTestCase.java
@@ -20,6 +20,7 @@ public class MatchingTestCase {
assertNull(query.getRanking().getMatching().getMinHitsPerThread());
assertNull(query.getRanking().getMatching().getPostFilterThreshold());
assertNull(query.getRanking().getMatching().getApproximateThreshold());
+ assertNull(query.getRanking().getMatching().getTargetHitsMaxAdjustmentFactor());
}
@Test
@@ -30,13 +31,15 @@ public class MatchingTestCase {
"&ranking.matching.numSearchPartitions=13" +
"&ranking.matching.minHitsPerThread=3" +
"&ranking.matching.postFilterThreshold=0.8" +
- "&ranking.matching.approximateThreshold=0.3");
+ "&ranking.matching.approximateThreshold=0.3" +
+ "&ranking.matching.targetHitsMaxAdjustmentFactor=2.5");
assertEquals(Double.valueOf(0.7), query.getRanking().getMatching().getTermwiseLimit());
assertEquals(Integer.valueOf(17), query.getRanking().getMatching().getNumThreadsPerSearch());
assertEquals(Integer.valueOf(13), query.getRanking().getMatching().getNumSearchPartitions());
assertEquals(Integer.valueOf(3), query.getRanking().getMatching().getMinHitsPerThread());
assertEquals(Double.valueOf(0.8), query.getRanking().getMatching().getPostFilterThreshold());
assertEquals(Double.valueOf(0.3), query.getRanking().getMatching().getApproximateThreshold());
+ assertEquals(Double.valueOf(2.5), query.getRanking().getMatching().getTargetHitsMaxAdjustmentFactor());
query.prepare();
assertEquals("0.7", query.getRanking().getProperties().get("vespa.matching.termwise_limit").get(0));
@@ -45,6 +48,7 @@ public class MatchingTestCase {
assertEquals("3", query.getRanking().getProperties().get("vespa.matching.minhitsperthread").get(0));
assertEquals("0.8", query.getRanking().getProperties().get("vespa.matching.global_filter.upper_limit").get(0));
assertEquals("0.3", query.getRanking().getProperties().get("vespa.matching.global_filter.lower_limit").get(0));
+ assertEquals("2.5", query.getRanking().getProperties().get("vespa.matching.nns.target_hits_max_adjustment_factor").get(0));
}
@Test
diff --git a/container-search/src/test/java/com/yahoo/search/query/SortingTestCase.java b/container-search/src/test/java/com/yahoo/search/query/SortingTestCase.java
index 8422196638a..b325bde05d9 100644
--- a/container-search/src/test/java/com/yahoo/search/query/SortingTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/query/SortingTestCase.java
@@ -81,8 +81,8 @@ public class SortingTestCase {
private void requireThatChineseHasCorrectRules(Collator col) {
final int reorderCodes [] = {UScript.HAN};
- assertEquals("14.0.0.0", col.getUCAVersion().toString());
- assertEquals("153.112.40.0", col.getVersion().toString());
+ assertEquals("15.0.0.0", col.getUCAVersion().toString());
+ assertEquals("153.120.43.8", col.getVersion().toString());
assertEquals(Arrays.toString(reorderCodes), Arrays.toString(col.getReorderCodes()));
assertNotEquals("", ((RuleBasedCollator) col).getRules());
diff --git a/controller-api/pom.xml b/controller-api/pom.xml
index 73e4522c521..f19349ae801 100644
--- a/controller-api/pom.xml
+++ b/controller-api/pom.xml
@@ -77,11 +77,6 @@
<!-- compile -->
<dependency>
- <groupId>com.intellij</groupId>
- <artifactId>annotations</artifactId>
- <version>9.0.4</version>
- </dependency>
- <dependency>
<artifactId>aws-java-sdk-core</artifactId>
<groupId>com.amazonaws</groupId>
<exclusions>
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/DeploymentData.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/DeploymentData.java
index f73aeb89f0e..d9384373deb 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/DeploymentData.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/DeploymentData.java
@@ -8,8 +8,6 @@ import com.yahoo.config.provision.DockerImage;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.athenz.api.AthenzDomain;
import com.yahoo.vespa.hosted.controller.api.integration.billing.Quota;
-import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificate;
-import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint;
import com.yahoo.vespa.hosted.controller.api.integration.dataplanetoken.DataplaneTokenVersions;
import com.yahoo.vespa.hosted.controller.api.integration.secrets.TenantSecretStore;
import com.yahoo.yolean.concurrent.Memoized;
@@ -18,7 +16,6 @@ import java.io.InputStream;
import java.security.cert.X509Certificate;
import java.util.List;
import java.util.Optional;
-import java.util.Set;
import java.util.function.Supplier;
import static java.util.Objects.requireNonNull;
@@ -35,8 +32,7 @@ public class DeploymentData {
private final ZoneId zone;
private final Supplier<InputStream> applicationPackage;
private final Version platform;
- private final Set<ContainerEndpoint> containerEndpoints;
- private final Supplier<Optional<EndpointCertificate>> endpointCertificate;
+ private final Supplier<DeploymentEndpoints> endpoints;
private final Optional<DockerImage> dockerImageRepo;
private final Optional<AthenzDomain> athenzDomain;
private final Supplier<Quota> quota;
@@ -47,8 +43,7 @@ public class DeploymentData {
private final boolean dryRun;
public DeploymentData(ApplicationId instance, ZoneId zone, Supplier<InputStream> applicationPackage, Version platform,
- Set<ContainerEndpoint> containerEndpoints,
- Supplier<Optional<EndpointCertificate>> endpointCertificate,
+ Supplier<DeploymentEndpoints> endpoints,
Optional<DockerImage> dockerImageRepo,
Optional<AthenzDomain> athenzDomain,
Supplier<Quota> quota,
@@ -61,8 +56,7 @@ public class DeploymentData {
this.zone = requireNonNull(zone);
this.applicationPackage = requireNonNull(applicationPackage);
this.platform = requireNonNull(platform);
- this.containerEndpoints = Set.copyOf(requireNonNull(containerEndpoints));
- this.endpointCertificate = new Memoized<>(requireNonNull(endpointCertificate));
+ this.endpoints = new Memoized<>(requireNonNull(endpoints));
this.dockerImageRepo = requireNonNull(dockerImageRepo);
this.athenzDomain = athenzDomain;
this.quota = new Memoized<>(requireNonNull(quota));
@@ -89,12 +83,8 @@ public class DeploymentData {
return platform;
}
- public Set<ContainerEndpoint> containerEndpoints() {
- return containerEndpoints;
- }
-
- public Optional<EndpointCertificate> endpointCertificate() {
- return endpointCertificate.get();
+ public Supplier<DeploymentEndpoints> endpoints() {
+ return endpoints;
}
public Optional<DockerImage> dockerImageRepo() {
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/DeploymentEndpoints.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/DeploymentEndpoints.java
new file mode 100644
index 00000000000..9ec17571a35
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/DeploymentEndpoints.java
@@ -0,0 +1,25 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.application.v4.model;
+
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificate;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint;
+
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+
+/**
+ * The endpoints and their certificate (if any) of a deployment.
+ *
+ * @author mpolden
+ */
+public record DeploymentEndpoints(Set<ContainerEndpoint> endpoints, Optional<EndpointCertificate> certificate) {
+
+ public static final DeploymentEndpoints none = new DeploymentEndpoints(Set.of(), Optional.empty());
+
+ public DeploymentEndpoints {
+ Objects.requireNonNull(endpoints);
+ Objects.requireNonNull(certificate);
+ }
+
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java
index 7246903a51b..4746fa2da26 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ContainerEndpoint.java
@@ -1,79 +1,39 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.api.integration.configserver;
+import com.yahoo.config.provision.zone.AuthMethod;
import com.yahoo.config.provision.zone.RoutingMethod;
import java.util.List;
+import java.util.Map;
import java.util.Objects;
import java.util.OptionalInt;
/**
- * This represents a list of one or more names for a container cluster.
+ * The endpoint of a container cluster. This encapsulates the endpoint details passed from controller to the config
+ * server on deploy.
+ *
+ * @param clusterId ID of the cluster to which this points
+ * @param scope Scope of this endpoint
+ * @param names All valid DNS names for this endpoint. This can contain both proper DNS names and synthetic identifiers
+ * used for routing, such as a Host header value that is not necessarily a proper DNS name
+ * @param weight The relative weight of this endpoint
+ * @param routingMethod The routing method used by this endpoint
+ * @param authMethods Supported authentication methods for each endpoint name
*
* @author mpolden
*/
-public class ContainerEndpoint {
-
- private final String clusterId;
- private final String scope;
- private final List<String> names;
- private final OptionalInt weight;
- private final RoutingMethod routingMethod;
+public record ContainerEndpoint(String clusterId, String scope, List<String> names, OptionalInt weight,
+ RoutingMethod routingMethod, Map<String, AuthMethod> authMethods) {
- public ContainerEndpoint(String clusterId, String scope, List<String> names, OptionalInt weight, RoutingMethod routingMethod) {
+ public ContainerEndpoint(String clusterId, String scope, List<String> names, OptionalInt weight,
+ RoutingMethod routingMethod, Map<String, AuthMethod> authMethods) {
this.clusterId = nonEmpty(clusterId, "clusterId must be non-empty");
this.scope = Objects.requireNonNull(scope, "scope must be non-null");
this.names = List.copyOf(Objects.requireNonNull(names, "names must be non-null"));
this.weight = Objects.requireNonNull(weight, "weight must be non-null");
this.routingMethod = Objects.requireNonNull(routingMethod, "routingMethod must be non-null");
- }
-
- /** ID of the cluster to which this points */
- public String clusterId() {
- return clusterId;
- }
-
- /** The scope of this endpoint */
- public String scope() {
- return scope;
- }
-
- /**
- * All valid DNS names for this endpoint. This can contain both proper DNS names and synthetic identifiers used for
- * routing, such as a Host header value that is not necessarily a proper DNS name.
- */
- public List<String> names() {
- return names;
- }
-
- /** The relative weight of this endpoint */
- public OptionalInt weight() {
- return weight;
- }
-
- /** The routing method used by this endpoint */
- public RoutingMethod routingMethod() {
- return routingMethod;
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
- ContainerEndpoint that = (ContainerEndpoint) o;
- return clusterId.equals(that.clusterId) && scope.equals(that.scope) && names.equals(that.names) && weight.equals(that.weight) && routingMethod == that.routingMethod;
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(clusterId, scope, names, weight, routingMethod);
- }
-
- @Override
- public String toString() {
- return "container endpoint for cluster " + clusterId + ": " + String.join(", ", names) +
- " [method=" + routingMethod + ",scope=" + scope + ",weight=" +
- weight.stream().boxed().map(Object::toString).findFirst().orElse("<none>") + "]";
+ this.authMethods = Objects.requireNonNull(Map.copyOf(authMethods), "authMethods must be non-null");
}
private static String nonEmpty(String s, String message) {
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java
index a26e7cce29a..0b0664cd3bf 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/Node.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.controller.api.integration.configserver;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.CloudAccount;
import com.yahoo.config.provision.DockerImage;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.NodeResources;
@@ -75,6 +76,7 @@ public class Node {
private final Optional<String> switchHostname;
private final Optional<String> modelName;
private final Environment environment;
+ private final CloudAccount cloudAccount;
private Node(String id, HostName hostname, Optional<HostName> parentHostname, State state, NodeType type,
NodeResources resources, Optional<ApplicationId> owner, Version currentVersion, Version wantedVersion,
@@ -87,7 +89,7 @@ public class Node {
DockerImage wantedDockerImage, DockerImage currentDockerImage, Optional<ClusterType> exclusiveToClusterType, Map<String, String> reports,
List<Event> history, Set<String> ipAddresses, Set<String> additionalIpAddresses,
Set<String> additionalHostnames, Optional<String> switchHostname,
- Optional<String> modelName, Environment environment) {
+ Optional<String> modelName, Environment environment, CloudAccount cloudAccount) {
this.id = Objects.requireNonNull(id, "id must be non-null");
this.hostname = Objects.requireNonNull(hostname, "hostname must be non-null");
this.parentHostname = Objects.requireNonNull(parentHostname, "parentHostname must be non-null");
@@ -133,6 +135,7 @@ public class Node {
this.switchHostname = Objects.requireNonNull(switchHostname, "switchHostname must be non-null");
this.modelName = Objects.requireNonNull(modelName, "modelName must be non-null");
this.environment = Objects.requireNonNull(environment, "environment must be non-ull");
+ this.cloudAccount = Objects.requireNonNull(cloudAccount, "cloudAccount must be non-null");
}
/** The cloud provider's unique ID for this */
@@ -344,6 +347,10 @@ public class Node {
return environment;
}
+ public CloudAccount cloudAccount() {
+ return cloudAccount;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
@@ -501,6 +508,7 @@ public class Node {
private Optional<String> switchHostname = Optional.empty();
private Optional<String> modelName = Optional.empty();
private Environment environment = Environment.unknown;
+ private CloudAccount cloudAccount = CloudAccount.empty;
private Builder() {}
@@ -785,6 +793,11 @@ public class Node {
return this;
}
+ public Builder cloudAccount(CloudAccount cloudAccount) {
+ this.cloudAccount = cloudAccount;
+ return this;
+ }
+
public Node build() {
return new Node(id, hostname, parentHostname, state, type, resources, owner, currentVersion, wantedVersion,
currentOsVersion, wantedOsVersion, deferOsUpgrade, currentFirmwareCheck, wantedFirmwareCheck, serviceState,
@@ -792,7 +805,7 @@ public class Node {
wantedRebootGeneration, cost, failCount, flavor, clusterId, clusterType, group, index, retired,
wantToRetire, wantToDeprovision, wantToRebuild, down, reservedTo, exclusiveTo, wantedDockerImage,
currentDockerImage, exclusiveToClusterType, reports, history, ipAddresses, additionalIpAddresses,
- additionalHostnames, switchHostname, modelName, environment);
+ additionalHostnames, switchHostname, modelName, environment, cloudAccount);
}
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java
index 29e1d494ffc..7303320a4f7 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java
@@ -95,8 +95,8 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> {
public String stringId() {
return source.map(SourceRevision::commit).map(ApplicationVersion::abbreviateCommit)
.or(this::commit)
- .map(commit -> String.format("%s.%d-%s", majorVersion, buildNumber().getAsLong(), commit))
- .orElseGet(() -> majorVersion + "." + buildNumber().getAsLong());
+ .map(commit -> String.format("%s.%d-%s", majorVersion, buildNumber(), commit))
+ .orElseGet(() -> majorVersion + "." + buildNumber());
}
/**
@@ -105,8 +105,8 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> {
*/
public Optional<SourceRevision> source() { return source; }
- /** Returns the build number that built this version */
- public OptionalLong buildNumber() { return OptionalLong.of(id.number()); }
+ /** Returns the build number of this version */
+ public long buildNumber() { return id.number(); }
/** Returns the email of the author of commit of this version, if known */
public Optional<String> authorEmail() { return authorEmail; }
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/AccountId.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/AccountId.java
new file mode 100644
index 00000000000..34438c2dd1e
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/AccountId.java
@@ -0,0 +1,12 @@
+package com.yahoo.vespa.hosted.controller.api.integration.organization;
+
+import ai.vespa.validation.StringWrapper;
+
+public class AccountId extends StringWrapper<AccountId> {
+
+ public AccountId(String value) {
+ super(value);
+ if (value.isBlank()) throw new IllegalArgumentException("id must be non-blank");
+ }
+
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/DeploymentIssues.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/DeploymentIssues.java
index 75866b68ab1..3989d4bbae6 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/DeploymentIssues.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/DeploymentIssues.java
@@ -15,7 +15,7 @@ import java.util.Optional;
*/
public interface DeploymentIssues {
- IssueId fileUnlessOpen(Optional<IssueId> issueId, ApplicationId applicationId, User asignee, Contact contact);
+ IssueId fileUnlessOpen(Optional<IssueId> issueId, ApplicationId applicationId, AccountId assigneeId, User assignee, Contact contact);
IssueId fileUnlessOpen(Collection<ApplicationId> applicationIds, Version version);
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/Issue.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/Issue.java
index 7db85da1dbb..57696e0649d 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/Issue.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/Issue.java
@@ -18,11 +18,13 @@ public class Issue {
private final String description;
private final List<String> labels;
private final User assignee;
+ private final AccountId assigneeId;
private final Type type;
private final String queue;
private final Optional<String> component;
- private Issue(String summary, String description, List<String> labels, User assignee, Type type, String queue, Optional<String> component) {
+ private Issue(String summary, String description, List<String> labels, User assignee,
+ AccountId assigneeId, Type type, String queue, Optional<String> component) {
if (summary.isEmpty()) throw new IllegalArgumentException("Issue summary can not be empty!");
if (description.isEmpty()) throw new IllegalArgumentException("Issue description can not be empty!");
@@ -30,45 +32,50 @@ public class Issue {
this.description = description;
this.labels = List.copyOf(labels);
this.assignee = assignee;
+ this.assigneeId = assigneeId;
this.type = type;
this.queue = queue;
this.component = component;
}
public Issue(String summary, String description, String queue, Optional<String> component) {
- this(summary, description, Collections.emptyList(), null, Type.defect, queue, component);
+ this(summary, description, Collections.emptyList(), null, null, Type.defect, queue, component);
}
public Issue append(String appendage) {
- return new Issue(summary, description + appendage, labels, assignee, type, queue, component);
+ return new Issue(summary, description + appendage, labels, assignee, assigneeId, type, queue, component);
}
public Issue with(String label) {
List<String> labels = new ArrayList<>(this.labels);
labels.add(label);
- return new Issue(summary, description, labels, assignee, type, queue, component);
+ return new Issue(summary, description, labels, assignee, assigneeId, type, queue, component);
}
public Issue with(List<String> labels) {
List<String> newLabels = new ArrayList<>(this.labels);
newLabels.addAll(labels);
- return new Issue(summary, description, newLabels, assignee, type, queue, component);
+ return new Issue(summary, description, newLabels, assignee, assigneeId, type, queue, component);
+ }
+
+ public Issue with(AccountId assigneeId) {
+ return new Issue(summary, description, labels, null, assigneeId, type, queue, component);
}
public Issue with(User assignee) {
- return new Issue(summary, description, labels, assignee, type, queue, component);
+ return new Issue(summary, description, labels, assignee, null, type, queue, component);
}
public Issue with(Type type) {
- return new Issue(summary, description, labels, assignee, type, queue, component);
+ return new Issue(summary, description, labels, assignee, assigneeId, type, queue, component);
}
public Issue in(String queue) {
- return new Issue(summary, description, labels, assignee, type, queue, Optional.empty());
+ return new Issue(summary, description, labels, assignee, assigneeId, type, queue, Optional.empty());
}
public Issue withoutComponent() {
- return new Issue(summary, description, labels, assignee, type, queue, Optional.empty());
+ return new Issue(summary, description, labels, assignee, assigneeId, type, queue, Optional.empty());
}
public String summary() {
@@ -83,8 +90,11 @@ public class Issue {
return labels;
}
- public Optional<User> assignee() {
- return Optional.ofNullable(assignee);
+ public Optional<User> assignee() { return Optional.ofNullable(assignee);
+ }
+
+ public Optional<AccountId> assigneeId() {
+ return Optional.ofNullable(assigneeId);
}
public Type type() {
@@ -98,6 +108,7 @@ public class Issue {
public Optional<String> component() {
return component;
}
+
public enum Type {
defect, // A defect which needs fixing.
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/IssueHandler.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/IssueHandler.java
index 8123b6f2ce6..9b9c3df8104 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/IssueHandler.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/IssueHandler.java
@@ -82,6 +82,14 @@ public interface IssueHandler {
Optional<User> assigneeOf(IssueId issueId);
/**
+ * Returns the account id assigned to the given issue, if any.
+ *
+ * @param issueId ID of the issue for which to find the assignee.
+ * @return The account id of the user responsible for fixing the given issue, if found.
+ */
+ Optional<AccountId> assigneeIdOf(IssueId issueId);
+
+ /**
* Reassign the issue with the given ID to the given user, and returns the outcome of this.
*
* @param issueId ID of the issue to be reassigned.
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/IssueInfo.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/IssueInfo.java
index 52c022bebdf..f71e6b6507d 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/IssueInfo.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/IssueInfo.java
@@ -16,13 +16,13 @@ public class IssueInfo {
private final IssueId id;
private final Instant updated;
private final Status status;
- private final User assignee;
+ private final AccountId assigneeId;
- public IssueInfo(IssueId id, Instant updated, Status status, User assignee) {
+ public IssueInfo(IssueId id, Instant updated, Status status, AccountId assigneeId) {
this.id = id;
this.updated = updated;
this.status = status;
- this.assignee = assignee;
+ this.assigneeId = assigneeId;
}
public IssueId id() {
@@ -37,11 +37,10 @@ public class IssueInfo {
return status;
}
- public Optional<User> assignee() {
- return Optional.ofNullable(assignee);
+ public Optional<AccountId> assigneeId() {
+ return Optional.ofNullable(assigneeId);
}
-
public enum Status {
toDo("To Do"),
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/MockIssueHandler.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/MockIssueHandler.java
index d60c4d196e2..fb4986d0061 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/MockIssueHandler.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/MockIssueHandler.java
@@ -42,7 +42,7 @@ public class MockIssueHandler implements IssueHandler {
@Override
public IssueId file(Issue issue) {
- if (!issue.assignee().isPresent()) throw new RuntimeException();
+ if (issue.assignee().isEmpty() && issue.assigneeId().isEmpty()) throw new RuntimeException();
IssueId issueId = IssueId.from("" + counter.incrementAndGet());
issues.put(issueId, new MockIssue(issue));
return issueId;
@@ -55,7 +55,7 @@ public class MockIssueHandler implements IssueHandler {
.map(entry -> new IssueInfo(entry.getKey(),
entry.getValue().updated,
entry.getValue().isOpen() ? Status.toDo : Status.done,
- entry.getValue().assignee))
+ entry.getValue().assigneeId))
.toList();
}
@@ -85,6 +85,11 @@ public class MockIssueHandler implements IssueHandler {
}
@Override
+ public Optional<AccountId> assigneeIdOf(IssueId issueId) {
+ return Optional.ofNullable(issues.get(issueId).assigneeId);
+ }
+
+ @Override
public boolean reassign(IssueId issueId, User assignee) {
issues.get(issueId).assignee = assignee;
touch(issueId);
@@ -159,21 +164,13 @@ public class MockIssueHandler implements IssueHandler {
projects.put(projectKey, projectInfo);
}
- private static class PropertyInfo {
-
- private List<List<User>> contacts = Collections.emptyList();
- private URI issueUrl = URI.create("issues.tld");
- private URI contactsUrl = URI.create("contacts.tld");
- private URI propertyUrl = URI.create("properties.tld");
-
- }
-
public class MockIssue {
private Issue issue;
private Instant updated;
private boolean open;
private User assignee;
+ private AccountId assigneeId;
private List<String> watchers;
private MockIssue(Issue issue) {
@@ -181,11 +178,13 @@ public class MockIssueHandler implements IssueHandler {
this.updated = clock.instant();
this.open = true;
this.assignee = issue.assignee().orElse(null);
+ this.assigneeId = issue.assigneeId().orElse(null);
this.watchers = new ArrayList<>();
}
public Issue issue() { return issue; }
public User assignee() { return assignee; }
+ public AccountId assigneeId() { return assigneeId; }
public boolean isOpen() { return open; }
public List<String> watchers() { return List.copyOf(watchers); }
public void addWatcher(String watcher) { watchers.add(watcher); }
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/OwnershipIssues.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/OwnershipIssues.java
index df128f18193..6822b4b432f 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/OwnershipIssues.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/organization/OwnershipIssues.java
@@ -20,11 +20,12 @@ public interface OwnershipIssues {
*
* @param issueId ID of the previous ownership issue filed for the given application.
* @param summary Summary of an application for which to file an issue.
+ * @param assigneeId Issue assignee id
* @param assignee Issue assignee
* @param contact Contact info for the application tenant
* @return ID of the created issue, if one was created.
*/
- Optional<IssueId> confirmOwnership(Optional<IssueId> issueId, ApplicationSummary summary, User assignee, Contact contact);
+ Optional<IssueId> confirmOwnership(Optional<IssueId> issueId, ApplicationSummary summary, AccountId assigneeId, User assignee, Contact contact);
/**
* Make sure the given ownership confirmation request is acted upon, unless it is already acknowledged.
@@ -38,6 +39,6 @@ public interface OwnershipIssues {
* @param issueId ID of the ownership issue.
* @return The owner of the application, if it has been confirmed.
*/
- Optional<User> getConfirmedOwner(IssueId issueId);
+ Optional<AccountId> getConfirmedOwner(IssueId issueId);
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/DummyOwnershipIssues.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/DummyOwnershipIssues.java
index d3d5ba96781..caff9460628 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/DummyOwnershipIssues.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/DummyOwnershipIssues.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.api.integration.stubs;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.AccountId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.ApplicationSummary;
import com.yahoo.vespa.hosted.controller.api.integration.organization.Contact;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
@@ -15,7 +16,7 @@ import java.util.Optional;
public class DummyOwnershipIssues implements OwnershipIssues {
@Override
- public Optional<IssueId> confirmOwnership(Optional<IssueId> issueId, ApplicationSummary summary, User assignee, Contact contact) {
+ public Optional<IssueId> confirmOwnership(Optional<IssueId> issueId, ApplicationSummary summary, AccountId assigneeId, User assignee, Contact contact) {
return Optional.empty();
}
@@ -24,7 +25,7 @@ public class DummyOwnershipIssues implements OwnershipIssues {
}
@Override
- public Optional<User> getConfirmedOwner(IssueId issueId) {
+ public Optional<AccountId> getConfirmedOwner(IssueId issueId) {
return Optional.empty();
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/LoggingDeploymentIssues.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/LoggingDeploymentIssues.java
index 20178f300d2..1de3418bd93 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/LoggingDeploymentIssues.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/stubs/LoggingDeploymentIssues.java
@@ -5,6 +5,7 @@ package com.yahoo.vespa.hosted.controller.api.integration.stubs;
import com.yahoo.component.annotation.Inject;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.AccountId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.Contact;
import com.yahoo.vespa.hosted.controller.api.integration.organization.DeploymentIssues;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
@@ -52,7 +53,7 @@ public class LoggingDeploymentIssues implements DeploymentIssues {
}
@Override
- public IssueId fileUnlessOpen(Optional<IssueId> issueId, ApplicationId applicationId, User assignee, Contact contact) {
+ public IssueId fileUnlessOpen(Optional<IssueId> issueId, ApplicationId applicationId, AccountId assigneeId, User assignee, Contact contact) {
return fileUnlessPresent(issueId, applicationId);
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagValidationException.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagValidationException.java
new file mode 100644
index 00000000000..00c88102819
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagValidationException.java
@@ -0,0 +1,11 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.systemflags.v1;
+
+/**
+ * @author hakonhall
+ */
+public class FlagValidationException extends RuntimeException {
+ public FlagValidationException(String message) {
+ super(message);
+ }
+}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java
index bad53620c81..fbf3a5d9a03 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java
@@ -80,6 +80,54 @@ public interface FlagsTarget {
static String zoneFile(SystemName system, ZoneId zone) { return jsonFile(system.value() + "." + zone.environment().value() + "." + zone.region().value()); }
static String controllerFile(SystemName system) { return jsonFile(system.value() + ".controller"); }
+ /** Return true if the filename applies to the system. Throws on invalid filename format. */
+ static boolean filenameForSystem(String filename, SystemName system) throws FlagValidationException {
+ if (filename.equals(defaultFile())) return true;
+
+ String[] parts = filename.split("\\.", -1);
+ if (parts.length < 2) throw new FlagValidationException("Invalid flag filename: " + filename);
+
+ if (!parts[parts.length - 1].equals("json")) throw new FlagValidationException("Invalid flag filename: " + filename);
+
+ SystemName systemFromFile;
+ try {
+ systemFromFile = SystemName.from(parts[0]);
+ } catch (IllegalArgumentException e) {
+ throw new FlagValidationException("First part of flag filename is neither 'default' nor a valid system: " + filename);
+ }
+ if (!SystemName.hostedVespa().contains(systemFromFile))
+ throw new FlagValidationException("Unknown system in flag filename: " + filename);
+ if (!systemFromFile.equals(system)) return false;
+
+ if (parts.length == 2) return true; // systemFile
+
+ if (parts.length == 3) {
+ if (parts[1].equals("controller")) return true; // controllerFile
+ try {
+ Environment.from(parts[1]);
+ } catch (IllegalArgumentException e) {
+ throw new FlagValidationException("Invalid environment in flag filename: " + filename);
+ }
+ return true; // environmentFile
+ }
+
+ if (parts.length == 4) {
+ try {
+ Environment.from(parts[1]);
+ } catch (IllegalArgumentException e) {
+ throw new FlagValidationException("Invalid environment in flag filename: " + filename);
+ }
+ try {
+ RegionName.from(parts[2]);
+ } catch (IllegalArgumentException e) {
+ throw new FlagValidationException("Invalid region in flag filename: " + filename);
+ }
+ return true; // zoneFile
+ }
+
+ throw new FlagValidationException("Invalid flag filename: " + filename);
+ }
+
/** Partially resolve inter-zone dimensions, except those dimensions defined by the flag for a controller zone. */
static FlagData partialResolve(FlagData data, SystemName system, CloudName cloud, ZoneId virtualZoneId) {
Set<FetchVector.Dimension> flagDimensions =
@@ -94,7 +142,7 @@ public interface FlagsTarget {
var fetchVector = new FetchVector();
if (!flagDimensions.contains(CLOUD)) fetchVector = fetchVector.with(CLOUD, cloud.value());
if (!flagDimensions.contains(ENVIRONMENT)) fetchVector = fetchVector.with(ENVIRONMENT, virtualZoneId.environment().value());
- if (!flagDimensions.contains(SYSTEM)) fetchVector = fetchVector.with(SYSTEM, system.value());
+ fetchVector = fetchVector.with(SYSTEM, system.value());
if (!flagDimensions.contains(ZONE_ID)) fetchVector = fetchVector.with(ZONE_ID, virtualZoneId.value());
return fetchVector.isEmpty() ? data : data.partialResolve(fetchVector);
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java
index 8ca4c37a85a..fa8a0ddcba1 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java
@@ -1,11 +1,13 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.api.systemflags.v1;
+import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.CloudAccount;
import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
@@ -18,11 +20,14 @@ import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.text.JSON;
import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagId;
+import com.yahoo.vespa.flags.json.Condition;
import com.yahoo.vespa.flags.json.DimensionHelper;
import com.yahoo.vespa.flags.json.FlagData;
+import com.yahoo.vespa.flags.json.RelationalCondition;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneRegistry;
import java.io.BufferedInputStream;
+import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@@ -32,11 +37,8 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.List;
import java.util.Map;
-import java.util.Objects;
-import java.util.Optional;
import java.util.Set;
import java.util.TreeMap;
import java.util.function.Consumer;
@@ -49,6 +51,7 @@ import java.util.zip.ZipOutputStream;
import static com.yahoo.config.provision.CloudName.AWS;
import static com.yahoo.config.provision.CloudName.GCP;
import static com.yahoo.config.provision.CloudName.YAHOO;
+import static com.yahoo.vespa.flags.FetchVector.Dimension.SYSTEM;
import static com.yahoo.yolean.Exceptions.uncheck;
/**
@@ -74,7 +77,7 @@ public class SystemFlagsDataArchive {
this.files = files;
}
- public static SystemFlagsDataArchive fromZip(InputStream rawIn) {
+ public static SystemFlagsDataArchive fromZip(InputStream rawIn, ZoneRegistry zoneRegistry) {
Builder builder = new Builder();
try (ZipInputStream zipIn = new ZipInputStream(new BufferedInputStream(rawIn))) {
ZipEntry entry;
@@ -82,8 +85,8 @@ public class SystemFlagsDataArchive {
String name = entry.getName();
if (!entry.isDirectory() && name.startsWith("flags/")) {
Path filePath = Paths.get(name);
- String rawData = new String(zipIn.readAllBytes(), StandardCharsets.UTF_8);
- addFile(builder, rawData, filePath, Set.of(), null);
+ String fileContent = new String(zipIn.readAllBytes(), StandardCharsets.UTF_8);
+ builder.maybeAddFile(filePath, fileContent, zoneRegistry, true);
}
}
return builder.build();
@@ -92,27 +95,19 @@ public class SystemFlagsDataArchive {
}
}
- public static SystemFlagsDataArchive fromDirectoryAndSystem(Path directory, ZoneRegistry systemDefinition) {
- return fromDirectory(directory, systemDefinition);
- }
-
- public static SystemFlagsDataArchive fromDirectory(Path directory) { return fromDirectory(directory, null); }
-
- private static SystemFlagsDataArchive fromDirectory(Path directory, ZoneRegistry systemDefinition) {
- Set<String> filenamesForSystem = getFilenamesForSystem(systemDefinition);
+ public static SystemFlagsDataArchive fromDirectory(Path directory, ZoneRegistry zoneRegistry, boolean simulateInController) {
Path root = directory.toAbsolutePath();
Path flagsDirectory = directory.resolve("flags");
if (!Files.isDirectory(flagsDirectory)) {
- throw new IllegalArgumentException("Sub-directory 'flags' does not exist: " + flagsDirectory);
+ throw new FlagValidationException("Sub-directory 'flags' does not exist: " + flagsDirectory);
}
- try (Stream<Path> directoryStream = Files.walk(root)) {
+ try (Stream<Path> directoryStream = Files.walk(flagsDirectory)) {
Builder builder = new Builder();
- directoryStream.forEach(absolutePath -> {
- Path relativePath = root.relativize(absolutePath);
- if (!Files.isDirectory(absolutePath) &&
- relativePath.startsWith("flags")) {
- String rawData = uncheck(() -> Files.readString(absolutePath, StandardCharsets.UTF_8));
- addFile(builder, rawData, relativePath, filenamesForSystem, systemDefinition);
+ directoryStream.forEach(path -> {
+ Path relativePath = root.relativize(path.toAbsolutePath());
+ if (Files.isRegularFile(path)) {
+ String fileContent = uncheck(() -> Files.readString(path, StandardCharsets.UTF_8));
+ builder.maybeAddFile(relativePath, fileContent, zoneRegistry, simulateInController);
}
});
return builder.build();
@@ -121,6 +116,14 @@ public class SystemFlagsDataArchive {
}
}
+ public byte[] toZipBytes() {
+ try (ByteArrayOutputStream out = new ByteArrayOutputStream()) {
+ toZip(out);
+ return out.toByteArray();
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
public void toZip(OutputStream out) {
ZipOutputStream zipOut = new ZipOutputStream(out);
@@ -153,142 +156,135 @@ public class SystemFlagsDataArchive {
return targetData;
}
- public void validateAllFilesAreForTargets(SystemName currentSystem, Set<FlagsTarget> targets) throws IllegalArgumentException {
+ public void validateAllFilesAreForTargets(Set<FlagsTarget> targets) throws FlagValidationException {
Set<String> validFiles = targets.stream()
- .flatMap(target -> target.flagDataFilesPrioritized().stream())
- .collect(Collectors.toSet());
- Set<SystemName> otherSystems = Arrays.stream(SystemName.values())
- .filter(systemName -> systemName != currentSystem)
- .collect(Collectors.toSet());
- files.forEach((flagId, fileMap) -> {
- for (String filename : fileMap.keySet()) {
- boolean isFileForOtherSystem = otherSystems.stream()
- .anyMatch(system -> filename.startsWith(system.value() + "."));
- boolean isFileForCurrentSystem = validFiles.contains(filename);
- if (!isFileForOtherSystem && !isFileForCurrentSystem) {
- throw new IllegalArgumentException("Unknown flag file: " + toFilePath(flagId, filename));
- }
+ .flatMap(target -> target.flagDataFilesPrioritized().stream())
+ .collect(Collectors.toSet());
+ files.forEach((flagId, fileMap) -> fileMap.keySet().forEach(filename -> {
+ if (!validFiles.contains(filename)) {
+ throw new FlagValidationException("Unknown flag file: " + toFilePath(flagId, filename));
}
- });
+ }));
}
- private static Set<String> getFilenamesForSystem(ZoneRegistry systemDefinition) {
- if (systemDefinition == null) return Set.of();
- return FlagsTarget.getAllTargetsInSystem(systemDefinition, false).stream()
- .flatMap(target -> target.flagDataFilesPrioritized().stream())
- .collect(Collectors.toSet());
+ boolean hasFlagData(FlagId flagId, String filename) {
+ return files.getOrDefault(flagId, Map.of()).containsKey(filename);
}
- private static void addFile(Builder builder, String rawData, Path filePath, Set<String> filenamesForSystem,
- ZoneRegistry systemDefinition) {
- String filename = filePath.getFileName().toString();
- if (filename.startsWith(".")) {
- return; // Ignore files starting with '.'
- }
- if (!filenamesForSystem.isEmpty() && !filenamesForSystem.contains(filename)) {
- if (systemDefinition != null && filename.startsWith(systemDefinition.system().value() + '.')) {
- throw new IllegalArgumentException(String.format(
- "Environment or zone in filename '%s' does not exist", filename));
- }
- return; // Ignore files irrelevant for system
- }
- if (!filename.endsWith(".json")) {
- throw new IllegalArgumentException(String.format("Only JSON files are allowed in 'flags/' directory (found '%s')", filePath.toString()));
- }
- FlagId directoryDeducedFlagId = new FlagId(filePath.getName(filePath.getNameCount()-2).toString());
- FlagData flagData;
- if (rawData.isBlank()) {
- flagData = new FlagData(directoryDeducedFlagId);
- } else {
- Set<ZoneId> zones = systemDefinition == null ?
- Set.of() :
- systemDefinition.zones().all().zones().stream().map(ZoneApi::getVirtualId).collect(Collectors.toSet());
- String normalizedRawData = normalizeJson(rawData, zones);
- flagData = FlagData.deserialize(normalizedRawData);
- if (!directoryDeducedFlagId.equals(flagData.id())) {
- throw new IllegalArgumentException(
- String.format("Flag data file with flag id '%s' in directory for '%s'",
- flagData.id(), directoryDeducedFlagId.toString()));
- }
-
- String serializedData = flagData.serializeToJson();
- if (!JSON.equals(serializedData, normalizedRawData)) {
- throw new IllegalArgumentException("""
- %s contains unknown non-comment fields or rules with null values: after removing any comment fields the JSON is:
- %s
- but deserializing this ended up with:
- %s
- These fields may be spelled wrong, or remove them?
- See https://git.ouroath.com/vespa/hosted-feature-flags for more info on the JSON syntax
- """.formatted(filePath, normalizedRawData, serializedData));
+ private static void validateSystems(FlagData flagData) throws FlagValidationException {
+ flagData.rules().forEach(rule -> rule.conditions().forEach(condition -> {
+ if (condition.dimension() == SYSTEM) {
+ validateConditionValues(condition, system -> {
+ if (!SystemName.hostedVespa().contains(SystemName.from(system)))
+ throw new FlagValidationException("Unknown system: " + system);
+ });
}
- }
-
- if (builder.hasFile(filename, flagData)) {
- throw new IllegalArgumentException(
- String.format("Flag data file in '%s' contains redundant flag data for id '%s' already set in another directory!",
- filePath, flagData.id()));
- }
-
- builder.addFile(filename, flagData);
+ }));
}
- static String normalizeJson(String json, Set<ZoneId> zones) {
- JsonNode root = uncheck(() -> mapper.readTree(json));
- removeCommentsRecursively(root);
- removeNullRuleValues(root);
- verifyValues(root, zones);
- return root.toString();
+ private static void validateForSystem(FlagData flagData, ZoneRegistry zoneRegistry, boolean inController) throws FlagValidationException {
+ Set<ZoneId> zones = inController ?
+ zoneRegistry.zonesIncludingSystem().all().zones().stream().map(ZoneApi::getVirtualId).collect(Collectors.toSet()) :
+ null;
+
+ flagData.rules().forEach(rule -> rule.conditions().forEach(condition -> {
+ int force_switch_expression_dummy = switch (condition.type()) {
+ case RELATIONAL -> switch (condition.dimension()) {
+ case APPLICATION_ID, CLOUD, CLOUD_ACCOUNT, CLUSTER_ID, CLUSTER_TYPE, CONSOLE_USER_EMAIL,
+ ENVIRONMENT, HOSTNAME, NODE_TYPE, SYSTEM, TENANT_ID, ZONE_ID ->
+ throw new FlagValidationException(condition.type().toWire() + " " +
+ DimensionHelper.toWire(condition.dimension()) +
+ " condition is not supported");
+ case VESPA_VERSION -> {
+ RelationalCondition rCond = RelationalCondition.create(condition.toCreateParams());
+ Version version = Version.fromString(rCond.relationalPredicate().rightOperand());
+ if (version.getMajor() < 8)
+ throw new FlagValidationException("Major Vespa version must be at least 8: " + version);
+ yield 0;
+ }
+ };
+
+ case WHITELIST, BLACKLIST -> switch (condition.dimension()) {
+ case APPLICATION_ID -> validateConditionValues(condition, ApplicationId::fromSerializedForm);
+ case CONSOLE_USER_EMAIL -> validateConditionValues(condition, email -> {
+ if (!email.contains("@"))
+ throw new FlagValidationException("Invalid email address: " + email);
+ });
+ case CLOUD -> validateConditionValues(condition, cloud -> {
+ if (!Set.of(YAHOO, AWS, GCP).contains(CloudName.from(cloud)))
+ throw new FlagValidationException("Unknown cloud: " + cloud);
+ });
+ case CLOUD_ACCOUNT -> validateConditionValues(condition, CloudAccount::from);
+ case CLUSTER_ID -> validateConditionValues(condition, ClusterSpec.Id::from);
+ case CLUSTER_TYPE -> validateConditionValues(condition, ClusterSpec.Type::from);
+ case ENVIRONMENT -> validateConditionValues(condition, Environment::from);
+ case HOSTNAME -> validateConditionValues(condition, HostName::of);
+ case NODE_TYPE -> validateConditionValues(condition, NodeType::valueOf);
+ case SYSTEM -> throw new IllegalStateException("Flag data contains system dimension");
+ case TENANT_ID -> validateConditionValues(condition, TenantName::from);
+ case VESPA_VERSION -> throw new FlagValidationException(condition.type().toWire() + " " +
+ DimensionHelper.toWire(condition.dimension()) +
+ " condition is not supported");
+ case ZONE_ID -> validateConditionValues(condition, zoneIdString -> {
+ ZoneId zoneId = ZoneId.from(zoneIdString);
+ if (inController && !zones.contains(zoneId))
+ throw new FlagValidationException("Unknown zone: " + zoneIdString);
+ });
+ };
+ };
+ }));
}
- private static void verifyValues(JsonNode root, Set<ZoneId> zones) {
- var cursor = new JsonAccessor(root);
- cursor.get("rules").forEachArrayElement(rule -> rule.get("conditions").forEachArrayElement(condition -> {
- FetchVector.Dimension dimension = DimensionHelper
- .fromWire(condition.get("dimension")
- .asString()
- .orElseThrow(() -> new IllegalArgumentException("Invalid dimension in condition: " + condition)));
- switch (dimension) {
- case APPLICATION_ID -> validateStringValues(condition, ApplicationId::fromSerializedForm);
- case CONSOLE_USER_EMAIL -> validateStringValues(condition, email -> {});
- case CLOUD -> validateStringValues(condition, cloud -> {
- if (!Set.of(YAHOO, AWS, GCP).contains(CloudName.from(cloud)))
- throw new IllegalArgumentException("Unknown cloud: " + cloud);
- });
- case CLUSTER_ID -> validateStringValues(condition, ClusterSpec.Id::from);
- case CLUSTER_TYPE -> validateStringValues(condition, ClusterSpec.Type::from);
- case ENVIRONMENT -> validateStringValues(condition, Environment::from);
- case HOSTNAME -> validateStringValues(condition, HostName::of);
- case NODE_TYPE -> validateStringValues(condition, NodeType::valueOf);
- case SYSTEM -> validateStringValues(condition, system -> {
- if (!Set.of(SystemName.cd, SystemName.main, SystemName.PublicCd, SystemName.Public).contains(SystemName.from(system)))
- throw new IllegalArgumentException("Unknown system: " + system);
- });
- case TENANT_ID -> validateStringValues(condition, TenantName::from);
- case VESPA_VERSION -> validateStringValues(condition, versionString -> {
- Version vespaVersion = Version.fromString(versionString);
- if (vespaVersion.getMajor() < 8)
- throw new IllegalArgumentException("Major Vespa version must be at least 8: " + versionString);
- });
- case ZONE_ID -> validateStringValues(condition, zoneId -> {
- if (!zones.contains(ZoneId.from(zoneId)))
- throw new IllegalArgumentException("Unknown zone: " + zoneId);
- });
+ private static int validateConditionValues(Condition condition, Consumer<String> valueValidator) {
+ condition.toCreateParams().values().forEach(value -> {
+ try {
+ valueValidator.accept(value);
+ } catch (IllegalArgumentException e) {
+ String dimension = DimensionHelper.toWire(condition.dimension());
+ String type = condition.type().toWire();
+ throw new FlagValidationException("Invalid %s '%s' in %s condition: %s".formatted(dimension, value, type, e.getMessage()));
}
- }));
+ });
+
+ return 0; // dummy to force switch expression
}
- private static void validateStringValues(JsonAccessor condition, Consumer<String> valueValidator) {
- condition.get("values").forEachArrayElement(conditionValue -> {
- String value = conditionValue.asString()
- .orElseThrow(() -> {
- String dimension = condition.get("dimension").asString().orElseThrow();
- String type = condition.get("type").asString().orElseThrow();
- return new IllegalArgumentException("Non-string value in %s %s condition: %s".formatted(
- dimension, type, conditionValue));
- });
- valueValidator.accept(value);
- });
+ private static FlagData parseFlagData(FlagId flagId, String fileContent, ZoneRegistry zoneRegistry, boolean inController) {
+ if (fileContent.isBlank()) return new FlagData(flagId);
+
+ final JsonNode root;
+ try {
+ root = mapper.readTree(fileContent);
+ } catch (JsonProcessingException e) {
+ throw new FlagValidationException("Invalid JSON: " + e.getMessage());
+ }
+
+ removeCommentsRecursively(root);
+ removeNullRuleValues(root);
+ String normalizedRawData = root.toString();
+ FlagData flagData = FlagData.deserialize(normalizedRawData);
+
+ if (!flagId.equals(flagData.id()))
+ throw new FlagValidationException("Flag ID specified in file (%s) doesn't match the directory name (%s)"
+ .formatted(flagData.id(), flagId.toString()));
+
+ String serializedData = flagData.serializeToJson();
+ if (!JSON.equals(serializedData, normalizedRawData))
+ throw new FlagValidationException("""
+ Unknown non-comment fields or rules with null values: after removing any comment fields the JSON is:
+ %s
+ but deserializing this ended up with:
+ %s
+ These fields may be spelled wrong, or remove them?
+ See https://git.ouroath.com/vespa/hosted-feature-flags for more info on the JSON syntax
+ """.formatted(normalizedRawData, serializedData));
+
+ validateSystems(flagData);
+ flagData = flagData.partialResolve(new FetchVector().with(SYSTEM, zoneRegistry.system().value()));
+
+ validateForSystem(flagData, zoneRegistry, inController);
+
+ return flagData;
}
private static void removeCommentsRecursively(JsonNode node) {
@@ -325,56 +321,46 @@ public class SystemFlagsDataArchive {
public Builder() {}
- public Builder addFile(String filename, FlagData data) {
- files.computeIfAbsent(data.id(), k -> new TreeMap<>()).put(filename, data);
- return this;
- }
+ boolean maybeAddFile(Path filePath, String fileContent, ZoneRegistry zoneRegistry, boolean inController) {
+ String filename = filePath.getFileName().toString();
- public boolean hasFile(String filename, FlagData data) {
- return files.containsKey(data.id()) && files.get(data.id()).containsKey(filename);
- }
+ if (filename.startsWith("."))
+ return false; // Ignore files starting with '.'
- public SystemFlagsDataArchive build() {
- Map<FlagId, Map<String, FlagData>> copy = new TreeMap<>();
- files.forEach((flagId, map) -> copy.put(flagId, new TreeMap<>(map)));
- return new SystemFlagsDataArchive(copy);
- }
-
- }
+ if (!inController && !FlagsTarget.filenameForSystem(filename, zoneRegistry.system()))
+ return false; // Ignore files for other systems
- private static class JsonAccessor {
- private final JsonNode jsonNode;
+ FlagId directoryDeducedFlagId = new FlagId(filePath.getName(filePath.getNameCount()-2).toString());
- public JsonAccessor(JsonNode jsonNode) {
- this.jsonNode = jsonNode;
- }
+ if (hasFile(filename, directoryDeducedFlagId))
+ throw new FlagValidationException("Flag data file in '%s' contains redundant flag data for id '%s' already set in another directory!"
+ .formatted(filePath, directoryDeducedFlagId));
- public JsonAccessor get(String fieldName) {
- if (jsonNode == null) {
- return this;
- } else {
- return new JsonAccessor(jsonNode.get(fieldName));
+ final FlagData flagData;
+ try {
+ flagData = parseFlagData(directoryDeducedFlagId, fileContent, zoneRegistry, inController);
+ } catch (FlagValidationException e) {
+ throw new FlagValidationException("In file " + filePath + ": " + e.getMessage());
}
- }
- public Optional<String> asString() {
- return jsonNode != null && jsonNode.isTextual() ? Optional.of(jsonNode.textValue()) : Optional.empty();
+ addFile(filename, flagData);
+ return true;
}
- public void forEachArrayElement(Consumer<JsonAccessor> consumer) {
- if (jsonNode != null && jsonNode.isArray()) {
- jsonNode.forEach(jsonNodeElement -> consumer.accept(new JsonAccessor(jsonNodeElement)));
- }
+ public Builder addFile(String filename, FlagData data) {
+ files.computeIfAbsent(data.id(), k -> new TreeMap<>()).put(filename, data);
+ return this;
}
- /** Returns true if this (JsonNode) is a string and equal to value. */
- public boolean isEqualTo(String value) {
- return jsonNode != null && jsonNode.isTextual() && Objects.equals(jsonNode.textValue(), value);
+ public boolean hasFile(String filename, FlagId id) {
+ return files.containsKey(id) && files.get(id).containsKey(filename);
}
- @Override
- public String toString() {
- return jsonNode == null ? "undefined" : jsonNode.toString();
+ public SystemFlagsDataArchive build() {
+ Map<FlagId, Map<String, FlagData>> copy = new TreeMap<>();
+ files.forEach((flagId, map) -> copy.put(flagId, new TreeMap<>(map)));
+ return new SystemFlagsDataArchive(copy);
}
+
}
}
diff --git a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTargetTest.java b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTargetTest.java
new file mode 100644
index 00000000000..9177813e38f
--- /dev/null
+++ b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTargetTest.java
@@ -0,0 +1,41 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.api.systemflags.v1;
+
+import com.yahoo.config.provision.SystemName;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.*;
+
+/**
+ * @author hakonhall
+ */
+class FlagsTargetTest {
+ @Test
+ void sanityCheckFilename() {
+ assertTrue(FlagsTarget.filenameForSystem("default.json", SystemName.main));
+ assertTrue(FlagsTarget.filenameForSystem("main.json", SystemName.main));
+ assertTrue(FlagsTarget.filenameForSystem("main.controller.json", SystemName.main));
+ assertTrue(FlagsTarget.filenameForSystem("main.prod.json", SystemName.main));
+ assertTrue(FlagsTarget.filenameForSystem("main.prod.us-west-1.json", SystemName.main));
+ assertTrue(FlagsTarget.filenameForSystem("main.prod.abc-foo-3.json", SystemName.main));
+
+ assertFalse(FlagsTarget.filenameForSystem("public.json", SystemName.main));
+ assertFalse(FlagsTarget.filenameForSystem("public.controller.json", SystemName.main));
+ assertFalse(FlagsTarget.filenameForSystem("public.prod.json", SystemName.main));
+ assertFalse(FlagsTarget.filenameForSystem("public.prod.us-west-1.json", SystemName.main));
+ assertFalse(FlagsTarget.filenameForSystem("public.prod.abc-foo-3.json", SystemName.main));
+
+ assertFlagValidationException("First part of flag filename is neither 'default' nor a valid system: defaults.json", "defaults.json");
+ assertFlagValidationException("Invalid flag filename: default", "default");
+ assertFlagValidationException("Invalid flag filename: README", "README");
+ assertFlagValidationException("First part of flag filename is neither 'default' nor a valid system: nosystem.json", "nosystem.json");
+ assertFlagValidationException("Invalid environment in flag filename: main.noenv.json", "main.noenv.json");
+ assertFlagValidationException("Invalid region in flag filename: main.prod.%badregion.json", "main.prod.%badregion.json");
+ }
+
+ private void assertFlagValidationException(String expectedMessage, String filename) {
+ FlagValidationException e = assertThrows(FlagValidationException.class, () -> FlagsTarget.filenameForSystem(filename, SystemName.main));
+ assertEquals(expectedMessage, e.getMessage());
+ }
+
+} \ No newline at end of file
diff --git a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java
index 3417dc85224..759f21579d4 100644
--- a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java
+++ b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java
@@ -14,6 +14,7 @@ import com.yahoo.vespa.athenz.api.AthenzService;
import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagId;
import com.yahoo.vespa.flags.RawFlag;
+import com.yahoo.vespa.flags.json.Condition;
import com.yahoo.vespa.flags.json.FlagData;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneRegistry;
import org.junit.jupiter.api.Test;
@@ -31,9 +32,9 @@ import java.net.URI;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
-import java.util.Map;
import java.util.Optional;
import java.util.Set;
+import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -76,40 +77,73 @@ public class SystemFlagsDataArchiveTest {
@Test
void can_serialize_and_deserialize_archive() throws IOException {
+ can_serialize_and_deserialize_archive(false);
+ can_serialize_and_deserialize_archive(true);
+ }
+
+ private void can_serialize_and_deserialize_archive(boolean simulateInController) throws IOException {
File tempFile = File.createTempFile("serialized-flags-archive", null, temporaryFolder);
try (OutputStream out = new BufferedOutputStream(new FileOutputStream(tempFile))) {
- var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags/"));
+ var archive = fromDirectory("system-flags", simulateInController);
+ if (simulateInController)
+ archive.validateAllFilesAreForTargets(Set.of(mainControllerTarget, prodUsWestCfgTarget));
archive.toZip(out);
}
try (InputStream in = new BufferedInputStream(new FileInputStream(tempFile))) {
- SystemFlagsDataArchive archive = SystemFlagsDataArchive.fromZip(in);
+ SystemFlagsDataArchive archive = SystemFlagsDataArchive.fromZip(in, createZoneRegistryMock());
assertArchiveReturnsCorrectTestFlagDataForTarget(archive);
}
}
@Test
void retrieves_correct_flag_data_for_target() {
- var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags/"));
+ retrieves_correct_flag_data_for_target(false);
+ retrieves_correct_flag_data_for_target(true);
+ }
+
+ private void retrieves_correct_flag_data_for_target(boolean simulateInController) {
+ var archive = fromDirectory("system-flags", simulateInController);
+ if (simulateInController)
+ archive.validateAllFilesAreForTargets(Set.of(mainControllerTarget, prodUsWestCfgTarget));
assertArchiveReturnsCorrectTestFlagDataForTarget(archive);
}
@Test
void supports_multi_level_flags_directory() {
- var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-multi-level/"));
+ supports_multi_level_flags_directory(false);
+ supports_multi_level_flags_directory(true);
+ }
+
+ private void supports_multi_level_flags_directory(boolean simulateInController) {
+ var archive = fromDirectory("system-flags-multi-level", simulateInController);
+ if (simulateInController)
+ archive.validateAllFilesAreForTargets(Set.of(mainControllerTarget, prodUsWestCfgTarget));
assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "default");
}
@Test
void duplicated_flagdata_is_detected() {
- Throwable exception = assertThrows(IllegalArgumentException.class, () -> {
- var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-multi-level-with-duplicated-flagdata/"));
- });
+ duplicated_flagdata_is_detected(false);
+ duplicated_flagdata_is_detected(true);
+ }
+
+ private void duplicated_flagdata_is_detected(boolean simulateInController) {
+ Throwable exception = assertThrows(FlagValidationException.class, () -> {
+ fromDirectory("system-flags-multi-level-with-duplicated-flagdata", simulateInController);
+ });
assertTrue(exception.getMessage().contains("contains redundant flag data for id 'my-test-flag' already set in another directory!"));
}
@Test
void empty_files_are_handled_as_no_flag_data_for_target() {
- var archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags/"));
+ empty_files_are_handled_as_no_flag_data_for_target(false);
+ empty_files_are_handled_as_no_flag_data_for_target(true);
+ }
+
+ private void empty_files_are_handled_as_no_flag_data_for_target(boolean simulateInController) {
+ var archive = fromDirectory("system-flags", simulateInController);
+ if (simulateInController)
+ archive.validateAllFilesAreForTargets(Set.of(mainControllerTarget, prodUsWestCfgTarget));
assertNoFlagData(archive, FLAG_WITH_EMPTY_DATA, mainControllerTarget);
assertFlagDataHasValue(archive, FLAG_WITH_EMPTY_DATA, prodUsWestCfgTarget, "main.prod.us-west-1");
assertNoFlagData(archive, FLAG_WITH_EMPTY_DATA, prodUsEast3CfgTarget);
@@ -117,38 +151,46 @@ public class SystemFlagsDataArchiveTest {
}
@Test
- void throws_exception_on_non_json_file() {
- Throwable exception = assertThrows(IllegalArgumentException.class, () -> {
- SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-invalid-file-name/"));
+ void hv_throws_exception_on_non_json_file() {
+ Throwable exception = assertThrows(FlagValidationException.class, () -> {
+ fromDirectory("system-flags-with-invalid-file-name", false);
});
- assertTrue(exception.getMessage().contains("Only JSON files are allowed in 'flags/' directory (found 'flags/my-test-flag/file-name-without-dot-json')"));
+ assertEquals("Invalid flag filename: file-name-without-dot-json",
+ exception.getMessage());
}
@Test
void throws_exception_on_unknown_file() {
- Throwable exception = assertThrows(IllegalArgumentException.class, () -> {
- SystemFlagsDataArchive archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-unknown-file-name/"));
- archive.validateAllFilesAreForTargets(SystemName.main, Set.of(mainControllerTarget, prodUsWestCfgTarget));
+ Throwable exception = assertThrows(FlagValidationException.class, () -> {
+ SystemFlagsDataArchive archive = fromDirectory("system-flags-with-unknown-file-name", true);
+ archive.validateAllFilesAreForTargets(Set.of(mainControllerTarget, prodUsWestCfgTarget));
});
- assertTrue(exception.getMessage().contains("Unknown flag file: flags/my-test-flag/main.prod.unknown-region.json"));
+ assertEquals("Unknown flag file: flags/my-test-flag/main.prod.unknown-region.json", exception.getMessage());
+ }
+
+ @Test
+ void unknown_region_is_still_zipped() {
+ // This is useful when the program zipping the files is on a different version than the controller
+ var archive = fromDirectory("system-flags-with-unknown-file-name", false);
+ assertTrue(archive.hasFlagData(MY_TEST_FLAG, "main.prod.unknown-region.json"));
}
@Test
void throws_exception_on_unknown_region() {
- Throwable exception = assertThrows(IllegalArgumentException.class, () -> {
- Path directory = Paths.get("src/test/resources/system-flags-with-unknown-file-name/");
- SystemFlagsDataArchive.fromDirectoryAndSystem(directory, createZoneRegistryMock());
+ Throwable exception = assertThrows(FlagValidationException.class, () -> {
+ var archive = fromDirectory("system-flags-with-unknown-file-name", true);
+ archive.validateAllFilesAreForTargets(Set.of(mainControllerTarget, prodUsWestCfgTarget));
});
- assertTrue(exception.getMessage().contains("Environment or zone in filename 'main.prod.unknown-region.json' does not exist"));
+ assertEquals("Unknown flag file: flags/my-test-flag/main.prod.unknown-region.json", exception.getMessage());
}
@Test
void throws_on_unknown_field() {
- Throwable exception = assertThrows(IllegalArgumentException.class, () -> {
- SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-unknown-field-name/"));
+ Throwable exception = assertThrows(FlagValidationException.class, () -> {
+ fromDirectory("system-flags-with-unknown-field-name", true);
});
assertEquals("""
- flags/my-test-flag/main.prod.us-west-1.json contains unknown non-comment fields or rules with null values: after removing any comment fields the JSON is:
+ In file flags/my-test-flag/main.prod.us-west-1.json: Unknown non-comment fields or rules with null values: after removing any comment fields the JSON is:
{"id":"my-test-flag","rules":[{"condition":[{"type":"whitelist","dimension":"hostname","values":["foo.com"]}],"value":"default"}]}
but deserializing this ended up with:
{"id":"my-test-flag","rules":[{"value":"default"}]}
@@ -160,7 +202,7 @@ public class SystemFlagsDataArchiveTest {
@Test
void handles_absent_rule_value() {
- SystemFlagsDataArchive archive = SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/system-flags-with-null-value/"));
+ SystemFlagsDataArchive archive = fromDirectory("system-flags-with-null-value", true);
// west has null value on first rule
List<FlagData> westFlagData = archive.flagData(prodUsWestCfgTarget);
@@ -179,6 +221,7 @@ public class SystemFlagsDataArchiveTest {
void remove_comments_and_null_value_in_rules() {
assertTrue(JSON.equals("""
{
+ "id": "foo",
"rules": [
{
"conditions": [
@@ -210,8 +253,9 @@ public class SystemFlagsDataArchiveTest {
}
]
}""",
- SystemFlagsDataArchive.normalizeJson("""
+ normalizeJson("""
{
+ "id": "foo",
"comment": "bar",
"rules": [
{
@@ -250,82 +294,91 @@ public class SystemFlagsDataArchiveTest {
"value": true
}
]
- }""", Set.of(ZoneId.from("prod.us-west-1")))));
+ }""")));
+ }
+
+ private static String normalizeJson(String json) {
+ SystemFlagsDataArchive.Builder builder = new SystemFlagsDataArchive.Builder();
+ assertTrue(builder.maybeAddFile(Path.of("flags/temporary/foo/default.json"), json, createZoneRegistryMock(), true));
+ List<FlagData> flagData = builder.build().flagData(prodUsWestCfgTarget);
+ assertEquals(1, flagData.size());
+ return JSON.canonical(flagData.get(0).serializeToJson());
}
@Test
void normalize_json_succeed_on_valid_values() {
- normalizeJson("application", "\"a:b:c\"");
- normalizeJson("cloud", "\"yahoo\"");
- normalizeJson("cloud", "\"aws\"");
- normalizeJson("cloud", "\"gcp\"");
- normalizeJson("cluster-id", "\"some-id\"");
- normalizeJson("cluster-type", "\"admin\"");
- normalizeJson("cluster-type", "\"container\"");
- normalizeJson("cluster-type", "\"content\"");
- normalizeJson("console-user-email", "\"name@domain.com\"");
- normalizeJson("environment", "\"prod\"");
- normalizeJson("environment", "\"staging\"");
- normalizeJson("environment", "\"test\"");
- normalizeJson("hostname", "\"2080046-v6-11.ostk.bm2.prod.gq1.yahoo.com\"");
- normalizeJson("node-type", "\"tenant\"");
- normalizeJson("node-type", "\"host\"");
- normalizeJson("node-type", "\"config\"");
- normalizeJson("node-type", "\"host\"");
- normalizeJson("system", "\"main\"");
- normalizeJson("system", "\"public\"");
- normalizeJson("tenant", "\"vespa\"");
- normalizeJson("vespa-version", "\"8.201.13\"");
- normalizeJson("zone", "\"prod.us-west-1\"", Set.of(ZoneId.from("prod.us-west-1")));
- }
-
- private void normalizeJson(String dimension, String jsonValue) {
- normalizeJson(dimension, jsonValue, Set.of());
- }
-
- private void normalizeJson(String dimension, String jsonValue, Set<ZoneId> zones) {
- SystemFlagsDataArchive.normalizeJson("""
+ addFile(Condition.Type.WHITELIST, "application", "a:b:c");
+ addFile(Condition.Type.WHITELIST, "cloud", "yahoo");
+ addFile(Condition.Type.WHITELIST, "cloud", "aws");
+ addFile(Condition.Type.WHITELIST, "cloud", "gcp");
+ addFile(Condition.Type.WHITELIST, "cluster-id", "some-id");
+ addFile(Condition.Type.WHITELIST, "cluster-type", "admin");
+ addFile(Condition.Type.WHITELIST, "cluster-type", "container");
+ addFile(Condition.Type.WHITELIST, "cluster-type", "content");
+ addFile(Condition.Type.WHITELIST, "console-user-email", "name@domain.com");
+ addFile(Condition.Type.WHITELIST, "environment", "prod");
+ addFile(Condition.Type.WHITELIST, "environment", "staging");
+ addFile(Condition.Type.WHITELIST, "environment", "test");
+ addFile(Condition.Type.WHITELIST, "hostname", "2080046-v6-11.ostk.bm2.prod.gq1.yahoo.com");
+ addFile(Condition.Type.WHITELIST, "node-type", "tenant");
+ addFile(Condition.Type.WHITELIST, "node-type", "host");
+ addFile(Condition.Type.WHITELIST, "node-type", "config");
+ addFile(Condition.Type.WHITELIST, "node-type", "host");
+ addFile(Condition.Type.WHITELIST, "system", "main");
+ addFile(Condition.Type.WHITELIST, "system", "public");
+ addFile(Condition.Type.WHITELIST, "tenant", "vespa");
+ addFile(Condition.Type.RELATIONAL, "vespa-version", ">=8.201.13");
+ addFile(Condition.Type.WHITELIST, "zone", "prod.us-west-1");
+ }
+
+ private void addFile(Condition.Type type, String dimension, String jsonValue) {
+ SystemFlagsDataArchive.Builder builder = new SystemFlagsDataArchive.Builder();
+
+ String valuesField = type == Condition.Type.RELATIONAL ?
+ "\"predicate\": \"%s\"".formatted(jsonValue) :
+ "\"values\": [ \"%s\" ]".formatted(jsonValue);
+
+ assertTrue(builder.maybeAddFile(Path.of("flags/temporary/foo/default.json"), """
{
"id": "foo",
"rules": [
{
"conditions": [
{
- "type": "whitelist",
+ "type": "%s",
"dimension": "%s",
- "values": [ %s ]
+ %s
}
],
"value": true
}
]
}
- """.formatted(dimension, jsonValue), zones);
+ """.formatted(type.toWire(), dimension, valuesField),
+ createZoneRegistryMock(),
+ true));
}
@Test
void normalize_json_fail_on_invalid_values() {
- failNormalizeJson("application", "\"a.b.c\"", "Application ids must be on the form tenant:application:instance, but was a.b.c");
- failNormalizeJson("cloud", "\"foo\"", "Unknown cloud: foo");
- // failNormalizeJson("cluster-id", ... any String is valid
- failNormalizeJson("cluster-type", "\"foo\"", "Illegal cluster type 'foo'");
- failNormalizeJson("console-user-email", "123", "Non-string value in console-user-email whitelist condition: 123");
- failNormalizeJson("environment", "\"foo\"", "'foo' is not a valid environment identifier");
- failNormalizeJson("hostname", "\"not:a:hostname\"", "hostname must match '(([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9])\\.?', but got: 'not:a:hostname'");
- failNormalizeJson("node-type", "\"footype\"", "No enum constant com.yahoo.config.provision.NodeType.footype");
- failNormalizeJson("system", "\"bar\"", "'bar' is not a valid system");
- failNormalizeJson("tenant", "123", "Non-string value in tenant whitelist condition: 123");
- failNormalizeJson("vespa-version", "\"not-a-version\"", "Invalid version component in 'not-a-version'");
- failNormalizeJson("zone", "\"dev.non-existing-zone\"", Set.of(ZoneId.from("prod.example-region")), "Unknown zone: dev.non-existing-zone");
- }
-
- private void failNormalizeJson(String dimension, String jsonValue, String expectedExceptionMessage) {
- failNormalizeJson(dimension, jsonValue, Set.of(), expectedExceptionMessage);
+ failAddFile(Condition.Type.WHITELIST, "application", "a.b.c", "In file flags/temporary/foo/default.json: Invalid application 'a.b.c' in whitelist condition: Application ids must be on the form tenant:application:instance, but was a.b.c");
+ failAddFile(Condition.Type.WHITELIST, "cloud", "foo", "In file flags/temporary/foo/default.json: Unknown cloud: foo");
+ // cluster-id: any String is valid
+ failAddFile(Condition.Type.WHITELIST, "cluster-type", "foo", "In file flags/temporary/foo/default.json: Invalid cluster-type 'foo' in whitelist condition: Illegal cluster type 'foo'");
+ failAddFile(Condition.Type.WHITELIST, "console-user-email", "not-valid-email-address", "In file flags/temporary/foo/default.json: Invalid email address: not-valid-email-address");
+ failAddFile(Condition.Type.WHITELIST, "environment", "foo", "In file flags/temporary/foo/default.json: Invalid environment 'foo' in whitelist condition: 'foo' is not a valid environment identifier");
+ failAddFile(Condition.Type.WHITELIST, "hostname", "not:a:hostname", "In file flags/temporary/foo/default.json: Invalid hostname 'not:a:hostname' in whitelist condition: hostname must match '(([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9])\\.?', but got: 'not:a:hostname'");
+ failAddFile(Condition.Type.WHITELIST, "node-type", "footype", "In file flags/temporary/foo/default.json: Invalid node-type 'footype' in whitelist condition: No enum constant com.yahoo.config.provision.NodeType.footype");
+ failAddFile(Condition.Type.WHITELIST, "system", "bar", "In file flags/temporary/foo/default.json: Invalid system 'bar' in whitelist condition: 'bar' is not a valid system");
+ failAddFile(Condition.Type.WHITELIST, "tenant", "a tenant", "In file flags/temporary/foo/default.json: Invalid tenant 'a tenant' in whitelist condition: tenant name must match '[a-zA-Z0-9_-]{1,256}', but got: 'a tenant'");
+ failAddFile(Condition.Type.WHITELIST, "vespa-version", "not-a-version", "In file flags/temporary/foo/default.json: whitelist vespa-version condition is not supported");
+ failAddFile(Condition.Type.RELATIONAL, "vespa-version", ">7.1.2", "In file flags/temporary/foo/default.json: Major Vespa version must be at least 8: 7.1.2");
+ failAddFile(Condition.Type.WHITELIST, "zone", "dev.%illegal", "In file flags/temporary/foo/default.json: Invalid zone 'dev.%illegal' in whitelist condition: region name must match '[a-z]([a-z0-9-]*[a-z0-9])*', but got: '%illegal'");
}
- private void failNormalizeJson(String dimension, String jsonValue, Set<ZoneId> zones, String expectedExceptionMessage) {
+ private void failAddFile(Condition.Type type, String dimension, String jsonValue, String expectedExceptionMessage) {
try {
- normalizeJson(dimension, jsonValue, zones);
+ addFile(type, dimension, jsonValue);
fail();
} catch (RuntimeException e) {
assertEquals(expectedExceptionMessage, e.getMessage());
@@ -334,14 +387,16 @@ public class SystemFlagsDataArchiveTest {
@Test
void ignores_files_not_related_to_specified_system_definition() {
- ZoneRegistry registry = createZoneRegistryMock();
- Path testDirectory = Paths.get("src/test/resources/system-flags-for-multiple-systems/");
- var archive = SystemFlagsDataArchive.fromDirectoryAndSystem(testDirectory, registry);
+ var archive = fromDirectory("system-flags-for-multiple-systems", false);
assertFlagDataHasValue(archive, MY_TEST_FLAG, cdControllerTarget, "default"); // Would be 'cd.controller' if files for CD system were included
assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "default");
assertFlagDataHasValue(archive, MY_TEST_FLAG, prodUsWestCfgTarget, "main.prod.us-west-1");
}
+ private SystemFlagsDataArchive fromDirectory(String testDirectory, boolean simulateInController) {
+ return SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/" + testDirectory), createZoneRegistryMock(), simulateInController);
+ }
+
@SuppressWarnings("unchecked") // workaround for mocking a method for generic return type
private static ZoneRegistry createZoneRegistryMock() {
// Cannot use the standard registry mock as it's located in controller-server module
@@ -354,12 +409,21 @@ public class SystemFlagsDataArchiveTest {
when(registryMock.systemZone()).thenReturn(zoneApi);
when(registryMock.getConfigServerVipUri(any())).thenReturn(URI.create("http://localhost:8080/"));
when(registryMock.getConfigServerHttpsIdentity(any())).thenReturn(new AthenzService("domain", "servicename"));
+ ZoneList zones = mockZoneList("prod.us-west-1", "prod.us-east-3");
+ when(registryMock.zones()).thenReturn(zones);
+ ZoneList zonesIncludingSystem = mockZoneList("prod.us-west-1", "prod.us-east-3", "prod.controller");
+ when(registryMock.zonesIncludingSystem()).thenReturn(zonesIncludingSystem);
+ return registryMock;
+ }
+
+ @SuppressWarnings("unchecked") // workaround for mocking a method for generic return type
+ private static ZoneList mockZoneList(String... zones) {
ZoneList zoneListMock = mock(ZoneList.class);
when(zoneListMock.reachable()).thenReturn(zoneListMock);
when(zoneListMock.all()).thenReturn(zoneListMock);
- when(zoneListMock.zones()).thenReturn((List)List.of(new SimpleZone("prod.us-west-1"), new SimpleZone("prod.us-east-3")));
- when(registryMock.zones()).thenReturn(zoneListMock);
- return registryMock;
+ List<? extends ZoneApi> zoneList = Stream.of(zones).map(SimpleZone::new).toList();
+ when(zoneListMock.zones()).thenReturn((List) zoneList);
+ return zoneListMock;
}
private static void assertArchiveReturnsCorrectTestFlagDataForTarget(SystemFlagsDataArchive archive) {
@@ -373,7 +437,7 @@ public class SystemFlagsDataArchiveTest {
List<FlagData> data = getData(archive, flagId, target);
assertEquals(1, data.size());
FlagData flagData = data.get(0);
- RawFlag rawFlag = flagData.resolve(FetchVector.fromMap(Map.of())).get();
+ RawFlag rawFlag = flagData.resolve(new FetchVector()).get();
assertEquals(String.format("\"%s\"", value), rawFlag.asJson());
}
diff --git a/controller-server/pom.xml b/controller-server/pom.xml
index 64cc89c3321..5e738a2dd4a 100644
--- a/controller-server/pom.xml
+++ b/controller-server/pom.xml
@@ -184,11 +184,7 @@
</exclusion>
<exclusion>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-core</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
index 66e62ff7b95..f7eb7cdde0d 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
@@ -8,6 +8,7 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RevisionId;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.AccountId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
import com.yahoo.vespa.hosted.controller.application.ApplicationActivity;
@@ -52,7 +53,8 @@ public class Application {
private final OptionalLong projectId;
private final Optional<IssueId> deploymentIssueId;
private final Optional<IssueId> ownershipIssueId;
- private final Optional<User> owner;
+ private final Optional<User> userOwner;
+ private final Optional<AccountId> issueOwner;
private final OptionalInt majorVersion;
private final ApplicationMetrics metrics;
private final Set<PublicKey> deployKeys;
@@ -60,14 +62,14 @@ public class Application {
/** Creates an empty application. */
public Application(TenantAndApplicationId id, Instant now) {
- this(id, now, DeploymentSpec.empty, ValidationOverrides.empty, Optional.empty(),
+ this(id, now, DeploymentSpec.empty, ValidationOverrides.empty, Optional.empty(), Optional.empty(),
Optional.empty(), Optional.empty(), OptionalInt.empty(), new ApplicationMetrics(0, 0),
Set.of(), OptionalLong.empty(), RevisionHistory.empty(), List.of());
}
// Do not use directly - edit through LockedApplication.
public Application(TenantAndApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec, ValidationOverrides validationOverrides,
- Optional<IssueId> deploymentIssueId, Optional<IssueId> ownershipIssueId, Optional<User> owner,
+ Optional<IssueId> deploymentIssueId, Optional<IssueId> ownershipIssueId, Optional<User> userOwner, Optional<AccountId> issueOwner,
OptionalInt majorVersion, ApplicationMetrics metrics, Set<PublicKey> deployKeys, OptionalLong projectId,
RevisionHistory revisions, Collection<Instance> instances) {
this.id = Objects.requireNonNull(id, "id cannot be null");
@@ -76,7 +78,8 @@ public class Application {
this.validationOverrides = Objects.requireNonNull(validationOverrides, "validationOverrides cannot be null");
this.deploymentIssueId = Objects.requireNonNull(deploymentIssueId, "deploymentIssueId cannot be null");
this.ownershipIssueId = Objects.requireNonNull(ownershipIssueId, "ownershipIssueId cannot be null");
- this.owner = Objects.requireNonNull(owner, "owner cannot be null");
+ this.userOwner = Objects.requireNonNull(userOwner, "owner cannot be null");
+ this.issueOwner = Objects.requireNonNull(issueOwner, "issueOwner cannot be null");
this.majorVersion = Objects.requireNonNull(majorVersion, "majorVersion cannot be null");
this.metrics = Objects.requireNonNull(metrics, "metrics cannot be null");
this.deployKeys = Objects.requireNonNull(deployKeys, "deployKeys cannot be null");
@@ -143,8 +146,12 @@ public class Application {
return ownershipIssueId;
}
- public Optional<User> owner() {
- return owner;
+ public Optional<User> userOwner() {
+ return userOwner;
+ }
+
+ public Optional<AccountId> issueOwner() {
+ return issueOwner;
}
/**
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
index bac2c0ab9d7..de3e29386c9 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
@@ -13,6 +13,7 @@ import com.yahoo.config.provision.CloudAccount;
import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.DockerImage;
import com.yahoo.config.provision.InstanceName;
+import com.yahoo.config.provision.Tags;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.text.Text;
@@ -28,6 +29,7 @@ import com.yahoo.vespa.flags.ListFlag;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.StringFlag;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeploymentData;
+import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeploymentEndpoints;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.identifiers.InstanceId;
import com.yahoo.vespa.hosted.controller.api.integration.billing.BillingController;
@@ -36,7 +38,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.billing.Quota;
import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificate;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ApplicationReindexing;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServer;
-import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.DeploymentResult;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.DeploymentResult.LogEntry;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
@@ -55,13 +56,13 @@ import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics.Warning;
import com.yahoo.vespa.hosted.controller.application.DeploymentQuotaCalculator;
-import com.yahoo.vespa.hosted.controller.application.GeneratedEndpoint;
import com.yahoo.vespa.hosted.controller.application.QuotaUsage;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackageStream;
import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackageValidator;
+import com.yahoo.vespa.hosted.controller.application.pkg.BasicServicesXml;
import com.yahoo.vespa.hosted.controller.athenz.impl.AthenzFacade;
import com.yahoo.vespa.hosted.controller.certificate.EndpointCertificates;
import com.yahoo.vespa.hosted.controller.concurrent.Once;
@@ -72,6 +73,8 @@ import com.yahoo.vespa.hosted.controller.deployment.Run;
import com.yahoo.vespa.hosted.controller.notification.Notification;
import com.yahoo.vespa.hosted.controller.notification.NotificationSource;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
+import com.yahoo.vespa.hosted.controller.routing.GeneratedEndpoints;
+import com.yahoo.vespa.hosted.controller.routing.PreparedEndpoints;
import com.yahoo.vespa.hosted.controller.security.AccessControl;
import com.yahoo.vespa.hosted.controller.security.Credentials;
import com.yahoo.vespa.hosted.controller.support.access.SupportAccessGrant;
@@ -89,7 +92,6 @@ import java.security.cert.X509Certificate;
import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
-import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
@@ -105,6 +107,7 @@ import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.function.Supplier;
+import java.util.function.UnaryOperator;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -496,7 +499,7 @@ public class ApplicationController {
}
/** Deploys an application package for an existing application instance. */
- public DeploymentResult deploy(JobId job, boolean deploySourceVersions, Consumer<String> deployLogger) {
+ public DeploymentResult deploy(JobId job, boolean deploySourceVersions, Consumer<String> deployLogger, UnaryOperator<Optional<CloudAccount>> cloudAccountOverride) {
if (job.application().instance().isTester())
throw new IllegalArgumentException("'" + job.application() + "' is a tester application!");
@@ -515,28 +518,18 @@ public class ApplicationController {
RevisionId revision = run.versions().sourceRevision().filter(__ -> deploySourceVersions).orElse(run.versions().targetRevision());
ApplicationPackageStream applicationPackage = new ApplicationPackageStream(() -> applicationStore.stream(deployment, revision));
AtomicReference<RevisionId> lastRevision = new AtomicReference<>();
- Instance instance;
- Set<ContainerEndpoint> containerEndpoints;
- try (Mutex lock = lock(applicationId)) {
- LockedApplication application = new LockedApplication(requireApplication(applicationId), lock);
- application.get().revisions().last().map(ApplicationVersion::id).ifPresent(lastRevision::set);
- instance = application.get().require(job.application().instance());
-
- containerEndpoints = controller.routing().of(deployment).prepare(application);
- } // Release application lock while doing the deployment, which is a lengthy task.
-
- Supplier<Optional<EndpointCertificate>> endpointCertificate = () -> {
+ // Prepare endpoints lazily
+ Supplier<PreparedEndpoints> preparedEndpoints = () -> {
try (Mutex lock = lock(applicationId)) {
- Optional<EndpointCertificate> data = endpointCertificates.get(instance, zone, applicationPackage.truncatedPackage().deploymentSpec());
- data.ifPresent(e -> deployLogger.accept("Using CA signed certificate version %s".formatted(e.version())));
- return data;
+ LockedApplication application = new LockedApplication(requireApplication(applicationId), lock);
+ application.get().revisions().last().map(ApplicationVersion::id).ifPresent(lastRevision::set);
+ return prepareEndpoints(deployment, job, application, applicationPackage, deployLogger);
}
};
// Carry out deployment without holding the application lock.
- DeploymentDataAndResult dataAndResult = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints,
- endpointCertificate, run.isDryRun(), run.testerCertificate());
-
+ DeploymentDataAndResult dataAndResult = deploy(job.application(), applicationPackage, zone, platform, preparedEndpoints,
+ run.isDryRun(), run.testerCertificate(), cloudAccountOverride);
// Record the quota usage for this application
var quotaUsage = deploymentQuotaUsage(zone, job.application());
@@ -572,6 +565,28 @@ public class ApplicationController {
}
}
+ private PreparedEndpoints prepareEndpoints(DeploymentId deployment, JobId job, LockedApplication application,
+ ApplicationPackageStream applicationPackage, Consumer<String> deployLogger) {
+ Instance instance = application.get().require(job.application().instance());
+ Tags tags = applicationPackage.truncatedPackage().deploymentSpec().instance(instance.name())
+ .map(DeploymentInstanceSpec::tags)
+ .orElseGet(Tags::empty);
+ Optional<EndpointCertificate> certificate = endpointCertificates.get(instance, deployment.zoneId(), applicationPackage.truncatedPackage().deploymentSpec());
+ certificate.ifPresent(e -> deployLogger.accept("Using CA signed certificate version %s".formatted(e.version())));
+ BasicServicesXml services;
+ try {
+ services = applicationPackage.truncatedPackage().services(deployment, tags);
+ } catch (Exception e) {
+ // If the basic parsing done by the controller fails, we ignore the exception here so that
+ // complete parsing errors are propagated from the config server. Otherwise, throwing here
+ // will interrupt the request while it's being streamed to the config server
+ log.warning("Ignoring failure to parse services.xml for deployment " + deployment +
+ " while streaming application package: " + Exceptions.toMessageString(e));
+ services = BasicServicesXml.empty;
+ }
+ return controller.routing().of(deployment).prepare(services, certificate, application);
+ }
+
/** Stores the deployment spec and validation overrides from the application package, and runs cleanup. Returns new instances. */
public List<InstanceName> storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) {
validatePackage(applicationPackage, application.get());
@@ -635,26 +650,27 @@ public class ApplicationController {
ApplicationPackageStream applicationPackage = new ApplicationPackageStream(
() -> new ByteArrayInputStream(artifactRepository.getSystemApplicationPackage(application.id(), zone, version))
);
- return deploy(application.id(), applicationPackage, zone, version, Set.of(), Optional::empty, false, Optional.empty()).result();
+ return deploy(application.id(), applicationPackage, zone, version, null, false, Optional.empty(), UnaryOperator.identity()).result();
} else {
throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString());
}
}
/** Deploys the given tester application to the given zone. */
- public DeploymentResult deployTester(TesterId tester, ApplicationPackageStream applicationPackage, ZoneId zone, Version platform) {
- return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), Optional::empty, false, Optional.empty()).result();
+ public DeploymentResult deployTester(TesterId tester, ApplicationPackageStream applicationPackage, ZoneId zone, Version platform, UnaryOperator<Optional<CloudAccount>> cloudAccountOverride) {
+ return deploy(tester.id(), applicationPackage, zone, platform, null, false, Optional.empty(), cloudAccountOverride).result();
}
private record DeploymentDataAndResult(DeploymentData data, DeploymentResult result) {}
+
private DeploymentDataAndResult deploy(ApplicationId application, ApplicationPackageStream applicationPackage,
- ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints,
- Supplier<Optional<EndpointCertificate>> endpointCertificate,
- boolean dryRun, Optional<X509Certificate> testerCertificate) {
+ ZoneId zone, Version platform, Supplier<PreparedEndpoints> preparedEndpoints,
+ boolean dryRun, Optional<X509Certificate> testerCertificate,
+ UnaryOperator<Optional<CloudAccount>> cloudAccountOverride) {
DeploymentId deployment = new DeploymentId(application, zone);
// Routing and metadata may have changed, so we need to refresh state after deployment, even if deployment fails.
interface CleanCloseable extends AutoCloseable { void close(); }
- List<GeneratedEndpoint> generatedEndpoints = new ArrayList<>();
+ AtomicReference<GeneratedEndpoints> generatedEndpoints = new AtomicReference<>(GeneratedEndpoints.empty);
try (CleanCloseable postDeployment = () -> updateRoutingAndMeta(deployment, applicationPackage, generatedEndpoints)) {
Optional<DockerImage> dockerImageRepo = Optional.ofNullable(
dockerImageRepoFlag
@@ -682,28 +698,25 @@ public class ApplicationController {
if (testerCertificate.isPresent()) {
operatorCertificates = Stream.concat(operatorCertificates.stream(), testerCertificate.stream()).toList();
}
- Supplier<Optional<CloudAccount>> cloudAccount = () -> decideCloudAccountOf(deployment, applicationPackage.truncatedPackage().deploymentSpec());
+ Supplier<Optional<CloudAccount>> cloudAccount = () -> cloudAccountOverride.apply(decideCloudAccountOf(deployment, applicationPackage.truncatedPackage().deploymentSpec()));
List<DataplaneTokenVersions> dataplaneTokenVersions = controller.dataplaneTokenService().listTokens(application.tenant());
- Supplier<Optional<EndpointCertificate>> endpointCertificateWrapper = () -> {
- Optional<EndpointCertificate> data = endpointCertificate.get();
- // TODO(mpolden): Pass these endpoints to config server as part of the deploy call. This will let the
- // application know which endpoints are mTLS and which are token-based
- data.flatMap(EndpointCertificate::randomizedId)
- .ifPresent(applicationPart -> generatedEndpoints.addAll(controller.routing().generateEndpoints(applicationPart, deployment.applicationId())));
- return data;
+ Supplier<DeploymentEndpoints> endpoints = () -> {
+ if (preparedEndpoints == null) return DeploymentEndpoints.none;
+ PreparedEndpoints prepared = preparedEndpoints.get();
+ generatedEndpoints.set(prepared.generatedEndpoints());
+ return new DeploymentEndpoints(prepared.containerEndpoints(), prepared.certificate());
};
DeploymentData deploymentData = new DeploymentData(application, zone, applicationPackage::zipStream, platform,
- endpoints, endpointCertificateWrapper, dockerImageRepo, domain,
- deploymentQuota, tenantSecretStores, operatorCertificates, cloudAccount, dataplaneTokenVersions, dryRun);
+ endpoints, dockerImageRepo, domain, deploymentQuota, tenantSecretStores, operatorCertificates, cloudAccount, dataplaneTokenVersions, dryRun);
ConfigServer.PreparedApplication preparedApplication = configServer.deploy(deploymentData);
return new DeploymentDataAndResult(deploymentData, preparedApplication.deploymentResult());
}
}
- private void updateRoutingAndMeta(DeploymentId id, ApplicationPackageStream data, List<GeneratedEndpoint> generatedEndpoints) {
+ private void updateRoutingAndMeta(DeploymentId id, ApplicationPackageStream data, AtomicReference<GeneratedEndpoints> generatedEndpoints) {
if (id.applicationId().instance().isTester()) return;
- controller.routing().of(id).configure(data.truncatedPackage().deploymentSpec(), generatedEndpoints);
+ controller.routing().of(id).activate(data.truncatedPackage().deploymentSpec(), generatedEndpoints.get());
if ( ! id.zoneId().environment().isManuallyDeployed()) return;
controller.applications().applicationStore().putMeta(id, clock.instant(), data.truncatedPackage().metaDataZip());
}
@@ -764,7 +777,7 @@ public class ApplicationController {
}
/**
- * Deletes the the given application. All known instances of the applications will be deleted.
+ * Deletes the given application. All known instances of the applications will be deleted.
*
* @throws IllegalArgumentException if the application has deployments or the caller is not authorized
*/
@@ -784,7 +797,7 @@ public class ApplicationController {
throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments);
for (Instance instance : application.get().instances().values()) {
- controller.routing().removeEndpointsInDns(application.get(), instance.name());
+ controller.routing().removeRotationEndpointsFromDns(application.get(), instance.name());
application = application.without(instance.name());
}
@@ -820,7 +833,7 @@ public class ApplicationController {
&& application.get().deploymentSpec().instanceNames().contains(instanceId.instance()))
throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first");
- controller.routing().removeEndpointsInDns(application.get(), instanceId.instance());
+ controller.routing().removeRotationEndpointsFromDns(application.get(), instanceId.instance());
curator.writeApplication(application.without(instanceId.instance()).get());
controller.jobController().collectGarbage();
controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId));
@@ -873,7 +886,7 @@ public class ApplicationController {
/**
* Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual.
- * If this cannot be ascertained, we must assumed it is not.
+ * If this cannot be ascertained, we must assume it is not.
*/
public boolean isHealthy(DeploymentId deploymentId) {
try {
@@ -918,7 +931,7 @@ public class ApplicationController {
DeploymentId id = new DeploymentId(instanceId, zone);
interface CleanCloseable extends AutoCloseable { void close(); }
try (CleanCloseable postDeactivation = () -> {
- application.ifPresent(app -> controller.routing().of(id).configure(app.get().deploymentSpec(), List.of()));
+ application.ifPresent(app -> controller.routing().of(id).activate(app.get().deploymentSpec(), GeneratedEndpoints.empty));
if (id.zoneId().environment().isManuallyDeployed())
applicationStore.putMetaTombstone(id, clock.instant());
if ( ! id.zoneId().environment().isTest())
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java
index b99c52d1533..066d10041c2 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/LockedApplication.java
@@ -5,6 +5,7 @@ import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.transaction.Mutex;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.AccountId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
@@ -37,7 +38,8 @@ public class LockedApplication {
private final ValidationOverrides validationOverrides;
private final Optional<IssueId> deploymentIssueId;
private final Optional<IssueId> ownershipIssueId;
- private final Optional<User> owner;
+ private final Optional<User> userOwner;
+ private final Optional<AccountId> issueOwner;
private final OptionalInt majorVersion;
private final ApplicationMetrics metrics;
private final Set<PublicKey> deployKeys;
@@ -53,15 +55,14 @@ public class LockedApplication {
*/
LockedApplication(Application application, Mutex lock) {
this(Objects.requireNonNull(lock, "lock cannot be null"), application.id(), application.createdAt(),
- application.deploymentSpec(), application.validationOverrides(),
- application.deploymentIssueId(), application.ownershipIssueId(),
- application.owner(), application.majorVersion(), application.metrics(), application.deployKeys(),
+ application.deploymentSpec(), application.validationOverrides(), application.deploymentIssueId(), application.ownershipIssueId(),
+ application.userOwner(), application.issueOwner(), application.majorVersion(), application.metrics(), application.deployKeys(),
application.projectId(), application.instances(), application.revisions());
}
private LockedApplication(Mutex lock, TenantAndApplicationId id, Instant createdAt, DeploymentSpec deploymentSpec,
- ValidationOverrides validationOverrides,
- Optional<IssueId> deploymentIssueId, Optional<IssueId> ownershipIssueId, Optional<User> owner,
+ ValidationOverrides validationOverrides, Optional<IssueId> deploymentIssueId,
+ Optional<IssueId> ownershipIssueId, Optional<User> userOwner, Optional<AccountId> issueOwner,
OptionalInt majorVersion, ApplicationMetrics metrics, Set<PublicKey> deployKeys,
OptionalLong projectId, Map<InstanceName, Instance> instances, RevisionHistory revisions) {
this.lock = lock;
@@ -71,7 +72,8 @@ public class LockedApplication {
this.validationOverrides = validationOverrides;
this.deploymentIssueId = deploymentIssueId;
this.ownershipIssueId = ownershipIssueId;
- this.owner = owner;
+ this.userOwner = userOwner;
+ this.issueOwner = issueOwner;
this.majorVersion = majorVersion;
this.metrics = metrics;
this.deployKeys = deployKeys;
@@ -83,7 +85,7 @@ public class LockedApplication {
/** Returns a read-only copy of this */
public Application get() {
return new Application(id, createdAt, deploymentSpec, validationOverrides,
- deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, deployKeys,
+ deploymentIssueId, ownershipIssueId, userOwner, issueOwner, majorVersion, metrics, deployKeys,
projectId, revisions, instances.values());
}
@@ -91,7 +93,7 @@ public class LockedApplication {
var instances = new HashMap<>(this.instances);
instances.put(instance, new Instance(id.instance(instance)));
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides,
- deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, deployKeys,
+ deploymentIssueId, ownershipIssueId, userOwner, issueOwner, majorVersion, metrics, deployKeys,
projectId, instances, revisions);
}
@@ -99,7 +101,7 @@ public class LockedApplication {
var instances = new HashMap<>(this.instances);
instances.put(instance, modification.apply(instances.get(instance)));
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides,
- deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, deployKeys,
+ deploymentIssueId, ownershipIssueId, userOwner, issueOwner, majorVersion, metrics, deployKeys,
projectId, instances, revisions);
}
@@ -107,57 +109,57 @@ public class LockedApplication {
var instances = new HashMap<>(this.instances);
instances.remove(instance);
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides,
- deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, deployKeys,
+ deploymentIssueId, ownershipIssueId, userOwner, issueOwner, majorVersion, metrics, deployKeys,
projectId, instances, revisions);
}
public LockedApplication withProjectId(OptionalLong projectId) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides,
- deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, deployKeys,
+ deploymentIssueId, ownershipIssueId, userOwner, issueOwner, majorVersion, metrics, deployKeys,
projectId, instances, revisions);
}
public LockedApplication withDeploymentIssueId(IssueId issueId) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides,
- Optional.ofNullable(issueId), ownershipIssueId, owner, majorVersion, metrics, deployKeys,
+ Optional.ofNullable(issueId), ownershipIssueId, userOwner, issueOwner, majorVersion, metrics, deployKeys,
projectId, instances, revisions);
}
public LockedApplication with(DeploymentSpec deploymentSpec) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides,
- deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, deployKeys,
+ deploymentIssueId, ownershipIssueId, userOwner, issueOwner, majorVersion, metrics, deployKeys,
projectId, instances, revisions);
}
public LockedApplication with(ValidationOverrides validationOverrides) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides,
- deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, deployKeys,
+ deploymentIssueId, ownershipIssueId, userOwner, issueOwner, majorVersion, metrics, deployKeys,
projectId, instances, revisions);
}
public LockedApplication withOwnershipIssueId(IssueId issueId) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides,
- deploymentIssueId, Optional.of(issueId), owner, majorVersion, metrics, deployKeys,
+ deploymentIssueId, Optional.of(issueId), userOwner, issueOwner, majorVersion, metrics, deployKeys,
projectId, instances, revisions);
}
- public LockedApplication withOwner(User owner) {
+ public LockedApplication withOwner(AccountId issueOwner) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides,
- deploymentIssueId, ownershipIssueId, Optional.of(owner), majorVersion, metrics, deployKeys,
+ deploymentIssueId, ownershipIssueId, userOwner, Optional.of(issueOwner), majorVersion, metrics, deployKeys,
projectId, instances, revisions);
}
/** Set a major version for this, or set to null to remove any major version override */
public LockedApplication withMajorVersion(Integer majorVersion) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides,
- deploymentIssueId, ownershipIssueId, owner,
- majorVersion == null ? OptionalInt.empty() : OptionalInt.of(majorVersion),
+ deploymentIssueId, ownershipIssueId, userOwner,
+ issueOwner, majorVersion == null ? OptionalInt.empty() : OptionalInt.of(majorVersion),
metrics, deployKeys, projectId, instances, revisions);
}
public LockedApplication with(ApplicationMetrics metrics) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides,
- deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, deployKeys,
+ deploymentIssueId, ownershipIssueId, userOwner, issueOwner, majorVersion, metrics, deployKeys,
projectId, instances, revisions);
}
@@ -165,7 +167,7 @@ public class LockedApplication {
Set<PublicKey> keys = new LinkedHashSet<>(deployKeys);
keys.add(pemDeployKey);
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides,
- deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, keys,
+ deploymentIssueId, ownershipIssueId, userOwner, issueOwner, majorVersion, metrics, keys,
projectId, instances, revisions);
}
@@ -173,13 +175,13 @@ public class LockedApplication {
Set<PublicKey> keys = new LinkedHashSet<>(deployKeys);
keys.remove(pemDeployKey);
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides,
- deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, keys,
+ deploymentIssueId, ownershipIssueId, userOwner, issueOwner, majorVersion, metrics, keys,
projectId, instances, revisions);
}
public LockedApplication withRevisions(UnaryOperator<RevisionHistory> change) {
return new LockedApplication(lock, id, createdAt, deploymentSpec, validationOverrides,
- deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics,
+ deploymentIssueId, ownershipIssueId, userOwner, issueOwner, majorVersion, metrics,
deployKeys, projectId, instances, change.apply(revisions));
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/OsController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/OsController.java
index c426c27418d..58c3b4da5e4 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/OsController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/OsController.java
@@ -11,6 +11,7 @@ import com.yahoo.vespa.hosted.controller.versions.OsVersionStatus;
import com.yahoo.vespa.hosted.controller.versions.OsVersionTarget;
import java.time.Instant;
+import java.util.Comparator;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
@@ -161,9 +162,12 @@ public record OsController(Controller controller) {
/** Remove certifications for non-existent OS versions */
public void removeStaleCertifications(OsVersionStatus currentStatus) {
try (Mutex lock = curator().lockCertifiedOsVersions()) {
- Set<OsVersion> knownVersions = currentStatus.versions().keySet();
+ Optional<OsVersion> minKnownVersion = currentStatus.versions().keySet().stream()
+ .filter(v -> !v.version().isEmpty())
+ .min(Comparator.naturalOrder());
+ if (minKnownVersion.isEmpty()) return;
Set<CertifiedOsVersion> certifiedVersions = new HashSet<>(readCertified());
- if (certifiedVersions.removeIf(cv -> !knownVersions.contains(cv.osVersion()))) {
+ if (certifiedVersions.removeIf(cv -> cv.osVersion().version().isBefore(minKnownVersion.get().version()))) {
curator().writeCertifiedOsVersions(certifiedVersions);
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
index bceef3fd96f..cc6195c075d 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/RoutingController.java
@@ -10,6 +10,7 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.zone.AuthMethod;
import com.yahoo.config.provision.zone.RoutingMethod;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.flags.BooleanFlag;
@@ -17,7 +18,6 @@ import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificate;
-import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint;
import com.yahoo.vespa.hosted.controller.api.integration.dns.Record;
import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordData;
import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordName;
@@ -29,8 +29,10 @@ import com.yahoo.vespa.hosted.controller.application.EndpointList;
import com.yahoo.vespa.hosted.controller.application.GeneratedEndpoint;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
-import com.yahoo.vespa.hosted.controller.certificate.AssignedCertificate;
+import com.yahoo.vespa.hosted.controller.application.pkg.BasicServicesXml;
import com.yahoo.vespa.hosted.controller.dns.NameServiceQueue.Priority;
+import com.yahoo.vespa.hosted.controller.routing.GeneratedEndpoints;
+import com.yahoo.vespa.hosted.controller.routing.PreparedEndpoints;
import com.yahoo.vespa.hosted.controller.routing.RoutingId;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicies;
import com.yahoo.vespa.hosted.controller.routing.context.DeploymentRoutingContext;
@@ -51,13 +53,11 @@ import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
-import java.util.OptionalInt;
import java.util.Set;
import java.util.TreeMap;
import java.util.stream.Collectors;
@@ -117,22 +117,95 @@ public class RoutingController {
return rotationRepository;
}
- /** Read and return zone-scoped endpoints for given deployment */
+ /** Prepares and returns the endpoints relevant for given deployment */
+ public PreparedEndpoints prepare(DeploymentId deployment, BasicServicesXml services, Optional<EndpointCertificate> certificate, LockedApplication application) {
+ EndpointList endpoints = EndpointList.EMPTY;
+
+ // Assign rotations to application
+ for (var deploymentInstanceSpec : application.get().deploymentSpec().instances()) {
+ if (deploymentInstanceSpec.concerns(Environment.prod)) {
+ application = controller.routing().assignRotations(application, deploymentInstanceSpec.name());
+ }
+ }
+
+ // Add zone-scoped endpoints
+ final GeneratedEndpoints generatedEndpoints;
+ if (!usesSharedRouting(deployment.zoneId())) { // TODO(mpolden): Remove this check when config models < 8.230 are gone
+ boolean includeTokenEndpoint = tokenEndpointEnabled(deployment.applicationId());
+ Map<ClusterSpec.Id, List<GeneratedEndpoint>> generatedEndpointsByCluster = new HashMap<>();
+ for (var container : services.containers()) {
+ ClusterSpec.Id clusterId = ClusterSpec.Id.from(container.id());
+ boolean tokenSupported = includeTokenEndpoint && container.authMethods().contains(BasicServicesXml.Container.AuthMethod.token);
+ List<GeneratedEndpoint> generatedForCluster = certificate.flatMap(EndpointCertificate::randomizedId)
+ .map(id -> generateEndpoints(id, deployment.applicationId(), tokenSupported))
+ .orElseGet(List::of);
+ if (!generatedForCluster.isEmpty()) {
+ generatedEndpointsByCluster.put(clusterId, generatedForCluster);
+ }
+ endpoints = endpoints.and(endpointsOf(deployment, clusterId, generatedForCluster).scope(Scope.zone));
+ }
+ generatedEndpoints = new GeneratedEndpoints(generatedEndpointsByCluster);
+
+ } else {
+ generatedEndpoints = GeneratedEndpoints.empty;
+ }
+
+ // Add global- and application-scoped endpoints
+ endpoints = endpoints.and(declaredEndpointsOf(application.get().id(), application.get().deploymentSpec(), generatedEndpoints).targets(deployment));
+ PreparedEndpoints prepared = new PreparedEndpoints(deployment,
+ endpoints,
+ application.get().require(deployment.applicationId().instance()).rotations(),
+ certificate);
+
+ // Register rotation-backed endpoints in DNS
+ registerRotationEndpointsInDns(prepared);
+
+ return prepared;
+ }
+
+ /** Read and return zone- and region-scoped endpoints for given deployment */
public EndpointList readEndpointsOf(DeploymentId deployment) {
- boolean addTokenEndpoint = tokenEndpointEnabled(deployment.applicationId());
Set<Endpoint> endpoints = new LinkedHashSet<>();
- // To discover the cluster name for a zone-scoped endpoint, we need to read the routing policy
for (var policy : routingPolicies.read(deployment)) {
- RoutingMethod routingMethod = controller.zoneRegistry().routingMethod(policy.id().zone());
- endpoints.addAll(policy.zoneEndpointsIn(controller.system(), routingMethod, addTokenEndpoint));
- endpoints.add(policy.regionEndpointIn(controller.system(), routingMethod, Optional.empty()));
- for (var ge : policy.generatedEndpoints()) {
- boolean include = switch (ge.authMethod()) {
- case token -> addTokenEndpoint;
- case mtls -> true;
- };
- if (include) {
- endpoints.add(policy.regionEndpointIn(controller.system(), routingMethod, Optional.of(ge)));
+ endpoints.addAll(endpointsOf(deployment, policy.id().cluster(), policy.generatedEndpoints()).asList());
+ }
+ return EndpointList.copyOf(endpoints);
+ }
+
+ /** Returns the zone- and region-scoped endpoints of given deployment */
+ public EndpointList endpointsOf(DeploymentId deployment, ClusterSpec.Id cluster, List<GeneratedEndpoint> generatedEndpoints) {
+ // TODO(mpolden): Support tokens only when generated endpoints are available
+ boolean tokenSupported = tokenEndpointEnabled(deployment.applicationId()) &&
+ (generatedEndpoints.isEmpty() || generatedEndpoints.stream().anyMatch(ge -> ge.authMethod() == AuthMethod.token));
+ RoutingMethod routingMethod = controller.zoneRegistry().routingMethod(deployment.zoneId());
+ boolean isProduction = deployment.zoneId().environment().isProduction();
+ List<Endpoint> endpoints = new ArrayList<>();
+ Endpoint.EndpointBuilder zoneEndpoint = Endpoint.of(deployment.applicationId())
+ .routingMethod(routingMethod)
+ .on(Port.fromRoutingMethod(routingMethod))
+ .target(cluster, deployment);
+ endpoints.add(zoneEndpoint.in(controller.system()));
+ if (tokenSupported) {
+ endpoints.add(zoneEndpoint.authMethod(AuthMethod.token).in(controller.system()));
+ }
+ Endpoint.EndpointBuilder regionEndpoint = Endpoint.of(deployment.applicationId())
+ .routingMethod(routingMethod)
+ .on(Port.fromRoutingMethod(routingMethod))
+ .targetRegion(cluster, deployment.zoneId());
+ // Region endpoints are only used by global- and application-endpoints and are thus only needed in
+ // production environments
+ if (isProduction) {
+ endpoints.add(regionEndpoint.in(controller.system()));
+ }
+ for (var generatedEndpoint : generatedEndpoints) {
+ boolean include = switch (generatedEndpoint.authMethod()) {
+ case token -> tokenSupported;
+ case mtls -> true;
+ };
+ if (include) {
+ endpoints.add(zoneEndpoint.generatedFrom(generatedEndpoint).in(controller.system()));
+ if (isProduction) {
+ endpoints.add(regionEndpoint.generatedFrom(generatedEndpoint).in(controller.system()));
}
}
}
@@ -147,43 +220,47 @@ public class RoutingController {
/** Read application and return declared endpoints for given application */
public EndpointList readDeclaredEndpointsOf(TenantAndApplicationId application) {
- return declaredEndpointsOf(controller.applications().requireApplication(application));
+ return readDeclaredEndpointsOf(controller.applications().requireApplication(application));
+ }
+
+ public EndpointList readDeclaredEndpointsOf(Application application) {
+ return declaredEndpointsOf(application.id(), application.deploymentSpec(), readMultiDeploymentGeneratedEndpoints(application.id()));
}
/** Returns endpoints declared in {@link DeploymentSpec} for given application */
- public EndpointList declaredEndpointsOf(Application application) {
- List<GeneratedEndpoint> generatedEndpoints = readGeneratedEndpoints(application);
+ private EndpointList declaredEndpointsOf(TenantAndApplicationId application, DeploymentSpec deploymentSpec, GeneratedEndpoints generatedEndpoints) {
Set<Endpoint> endpoints = new LinkedHashSet<>();
- DeploymentSpec deploymentSpec = application.deploymentSpec();
+ // Global endpoints
for (var spec : deploymentSpec.instances()) {
- ApplicationId instance = application.id().instance(spec.name());
- // Add endpoints declared with current syntax
+ ApplicationId instance = application.instance(spec.name());
spec.endpoints().forEach(declaredEndpoint -> {
RoutingId routingId = RoutingId.of(instance, EndpointId.of(declaredEndpoint.endpointId()));
List<DeploymentId> deployments = declaredEndpoint.regions().stream()
.map(region -> new DeploymentId(instance,
ZoneId.from(Environment.prod, region)))
.toList();
- endpoints.addAll(computeGlobalEndpoints(routingId, ClusterSpec.Id.from(declaredEndpoint.containerId()), deployments, generatedEndpoints));
+ ClusterSpec.Id cluster = ClusterSpec.Id.from(declaredEndpoint.containerId());
+ endpoints.addAll(computeGlobalEndpoints(routingId, cluster, deployments, generatedEndpoints));
});
}
- // Add application endpoints
+ // Application endpoints
for (var declaredEndpoint : deploymentSpec.endpoints()) {
Map<DeploymentId, Integer> deployments = declaredEndpoint.targets().stream()
- .collect(toMap(t -> new DeploymentId(application.id().instance(t.instance()),
+ .collect(toMap(t -> new DeploymentId(application.instance(t.instance()),
ZoneId.from(Environment.prod, t.region())),
t -> t.weight()));
ZoneId zone = deployments.keySet().iterator().next().zoneId(); // Where multiple zones are possible, they all have the same routing method.
RoutingMethod routingMethod = usesSharedRouting(zone) ? RoutingMethod.sharedLayer4 : RoutingMethod.exclusive;
- Endpoint.EndpointBuilder builder = Endpoint.of(application.id())
+ ClusterSpec.Id cluster = ClusterSpec.Id.from(declaredEndpoint.containerId());
+ Endpoint.EndpointBuilder builder = Endpoint.of(application)
.targetApplication(EndpointId.of(declaredEndpoint.endpointId()),
- ClusterSpec.Id.from(declaredEndpoint.containerId()),
+ cluster,
deployments)
.routingMethod(routingMethod)
.on(Port.fromRoutingMethod(routingMethod));
endpoints.add(builder.in(controller.system()));
- for (var ge : generatedEndpoints) {
+ for (var ge : generatedEndpoints.cluster(cluster)) {
endpoints.add(builder.generatedFrom(ge).in(controller.system()));
}
}
@@ -195,6 +272,7 @@ public class RoutingController {
TreeMap<ZoneId, List<Endpoint>> endpoints = new TreeMap<>(Comparator.comparing(ZoneId::value));
for (var deployment : deployments) {
EndpointList zoneEndpoints = readEndpointsOf(deployment).scope(Endpoint.Scope.zone)
+ .authMethod(AuthMethod.mtls)
.not().legacy();
EndpointList directEndpoints = zoneEndpoints.direct();
if (!directEndpoints.isEmpty()) {
@@ -255,62 +333,47 @@ public class RoutingController {
return Collections.unmodifiableList(endpointDnsNames);
}
- /** Returns the global and application-level endpoints for given deployment, as container endpoints */
- public Set<ContainerEndpoint> containerEndpointsOf(LockedApplication application, InstanceName instanceName, ZoneId zone) {
- // Assign rotations to application
- for (var deploymentInstanceSpec : application.get().deploymentSpec().instances()) {
- if (deploymentInstanceSpec.concerns(Environment.prod)) {
- application = controller.routing().assignRotations(application, deploymentInstanceSpec.name());
- }
+ /** Remove endpoints in DNS for all rotations assigned to given instance */
+ public void removeRotationEndpointsFromDns(Application application, InstanceName instanceName) {
+ Set<Endpoint> endpointsToRemove = new LinkedHashSet<>();
+ Instance instance = application.require(instanceName);
+ // Compute endpoints from rotations. When removing DNS records for rotation-based endpoints we cannot use the
+ // deployment spec, because submitting an empty deployment spec is the first step of removing an application
+ for (var rotation : instance.rotations()) {
+ var deployments = rotation.regions().stream()
+ .map(region -> new DeploymentId(instance.id(), ZoneId.from(Environment.prod, region)))
+ .toList();
+ endpointsToRemove.addAll(computeGlobalEndpoints(RoutingId.of(instance.id(), rotation.endpointId()),
+ rotation.clusterId(), deployments, readMultiDeploymentGeneratedEndpoints(application.id())));
}
+ endpointsToRemove.forEach(endpoint -> controller.nameServiceForwarder()
+ .removeRecords(Record.Type.CNAME,
+ RecordName.from(endpoint.dnsName()),
+ Priority.normal,
+ Optional.of(application.id())));
+ }
- // Add endpoints backed by a rotation, and register them in DNS if necessary
- Instance instance = application.get().require(instanceName);
- Set<ContainerEndpoint> containerEndpoints = new HashSet<>();
- DeploymentId deployment = new DeploymentId(instance.id(), zone);
- EndpointList endpoints = declaredEndpointsOf(application.get()).targets(deployment);
- EndpointList globalEndpoints = endpoints.scope(Endpoint.Scope.global);
- for (var assignedRotation : instance.rotations()) {
+ private void registerRotationEndpointsInDns(PreparedEndpoints prepared) {
+ TenantAndApplicationId owner = TenantAndApplicationId.from(prepared.deployment().applicationId());
+ EndpointList globalEndpoints = prepared.endpoints().scope(Scope.global);
+ for (var assignedRotation : prepared.rotations()) {
EndpointList rotationEndpoints = globalEndpoints.named(assignedRotation.endpointId(), Scope.global)
.requiresRotation();
-
// Skip rotations which do not apply to this zone
- if (!assignedRotation.regions().contains(zone.region())) {
+ if (!assignedRotation.regions().contains(prepared.deployment().zoneId().region())) {
continue;
}
-
// Register names in DNS
Rotation rotation = rotationRepository.requireRotation(assignedRotation.rotationId());
for (var endpoint : rotationEndpoints) {
controller.nameServiceForwarder().createRecord(
new Record(Record.Type.CNAME, RecordName.from(endpoint.dnsName()), RecordData.fqdn(rotation.name())),
Priority.normal,
- Optional.of(application.get().id()));
- List<String> names = List.of(endpoint.dnsName(),
- // Include rotation ID as a valid name of this container endpoint
- // (required by global routing health checks)
- assignedRotation.rotationId().asString());
- containerEndpoints.add(new ContainerEndpoint(assignedRotation.clusterId().value(),
- asString(Endpoint.Scope.global),
- names,
- OptionalInt.empty(),
- endpoint.routingMethod()));
+ Optional.of(owner)
+ );
}
}
- // Add endpoints not backed by a rotation (i.e. other routing methods so that the config server always knows
- // about global names, even when not using rotations)
- globalEndpoints.not().requiresRotation()
- .groupingBy(Endpoint::cluster)
- .forEach((clusterId, clusterEndpoints) -> {
- containerEndpoints.add(new ContainerEndpoint(clusterId.value(),
- asString(Endpoint.Scope.global),
- clusterEndpoints.mapToList(Endpoint::dnsName),
- OptionalInt.empty(),
- RoutingMethod.exclusive));
- });
- // Add application endpoints
- EndpointList applicationEndpoints = endpoints.scope(Endpoint.Scope.application);
- for (var endpoint : applicationEndpoints.shared()) { // DNS for non-shared endpoints is handled by RoutingPolicies
+ for (var endpoint : prepared.endpoints().scope(Scope.application).shared()) { // DNS for non-shared application endpoints is handled by RoutingPolicies
Set<ZoneId> targetZones = endpoint.targets().stream()
.map(t -> t.deployment().zoneId())
.collect(Collectors.toUnmodifiableSet());
@@ -323,79 +386,31 @@ public class RoutingController {
controller.nameServiceForwarder().createRecord(
new Record(Record.Type.CNAME, RecordName.from(endpoint.dnsName()), RecordData.fqdn(vipHostname)),
Priority.normal,
- Optional.of(application.get().id()));
- }
- Map<ClusterSpec.Id, EndpointList> applicationEndpointsByCluster = applicationEndpoints.groupingBy(Endpoint::cluster);
- for (var kv : applicationEndpointsByCluster.entrySet()) {
- ClusterSpec.Id clusterId = kv.getKey();
- EndpointList clusterEndpoints = kv.getValue();
- for (var endpoint : clusterEndpoints) {
- Optional<Endpoint.Target> matchingTarget = endpoint.targets().stream()
- .filter(t -> t.routesTo(deployment))
- .findFirst();
- if (matchingTarget.isEmpty()) throw new IllegalStateException("No target found routing to " + deployment + " in " + endpoint);
- containerEndpoints.add(new ContainerEndpoint(clusterId.value(),
- asString(Endpoint.Scope.application),
- List.of(endpoint.dnsName()),
- OptionalInt.of(matchingTarget.get().weight()),
- endpoint.routingMethod()));
- }
+ Optional.of(owner));
}
- return Collections.unmodifiableSet(containerEndpoints);
- }
-
- /** Remove endpoints in DNS for all rotations assigned to given instance */
- public void removeEndpointsInDns(Application application, InstanceName instanceName) {
- Set<Endpoint> endpointsToRemove = new LinkedHashSet<>();
- Instance instance = application.require(instanceName);
- // Compute endpoints from rotations. When removing DNS records for rotation-based endpoints we cannot use the
- // deployment spec, because submitting an empty deployment spec is the first step of removing an application
- for (var rotation : instance.rotations()) {
- var deployments = rotation.regions().stream()
- .map(region -> new DeploymentId(instance.id(), ZoneId.from(Environment.prod, region)))
- .toList();
- endpointsToRemove.addAll(computeGlobalEndpoints(RoutingId.of(instance.id(), rotation.endpointId()),
- rotation.clusterId(), deployments, readGeneratedEndpoints(application)));
- }
- endpointsToRemove.forEach(endpoint -> controller.nameServiceForwarder()
- .removeRecords(Record.Type.CNAME,
- RecordName.from(endpoint.dnsName()),
- Priority.normal,
- Optional.of(application.id())));
}
/** Generate endpoints for all authentication methods, using given application part */
- public List<GeneratedEndpoint> generateEndpoints(String applicationPart, ApplicationId instance) {
+ private List<GeneratedEndpoint> generateEndpoints(String applicationPart, ApplicationId instance, boolean token) {
if (!randomizedEndpointsEnabled(instance)) {
return List.of();
}
- return generateEndpoints(applicationPart);
- }
-
-
- private List<GeneratedEndpoint> generateEndpoints(String applicationPart) {
- return Arrays.stream(Endpoint.AuthMethod.values())
+ return Arrays.stream(AuthMethod.values())
+ .filter(method -> method != AuthMethod.token || token)
.map(method -> new GeneratedEndpoint(GeneratedEndpoint.createPart(controller.random(true)),
applicationPart,
method))
.toList();
}
- /** This is only suitable for use in declared endpoints, which ignore the randomly generated cluster part */
- private List<GeneratedEndpoint> readGeneratedEndpoints(Application application) {
- boolean includeTokenEndpoint = application.productionInstances().values().stream()
- .map(Instance::id)
- .anyMatch(this::tokenEndpointEnabled);
- Optional<String> randomizedId = controller.curator().readAssignedCertificate(application.id(), Optional.empty())
- .map(AssignedCertificate::certificate)
- .flatMap(EndpointCertificate::randomizedId);
- if (randomizedId.isEmpty()) {
- return List.of();
+ /** Returns generated endpoint suitable for use in endpoints whose scope is {@link Scope#multiDeployment()} */
+ private GeneratedEndpoints readMultiDeploymentGeneratedEndpoints(TenantAndApplicationId application) {
+ Map<ClusterSpec.Id, List<GeneratedEndpoint>> endpoints = new HashMap<>();
+ for (var policy : policies().read(application)) {
+ // The cluster part is not used in this context because multi-deployment endpoints have a user-controlled name
+ endpoints.putIfAbsent(policy.id().cluster(), policy.generatedEndpoints().stream().toList());
}
- return generateEndpoints(randomizedId.get()).stream().filter(endpoint -> switch (endpoint.authMethod()) {
- case token -> includeTokenEndpoint;
- case mtls -> true;
- }).toList();
+ return new GeneratedEndpoints(endpoints);
}
/**
@@ -435,7 +450,7 @@ public class RoutingController {
}
/** Compute global endpoints for given routing ID, application and deployments */
- private List<Endpoint> computeGlobalEndpoints(RoutingId routingId, ClusterSpec.Id cluster, List<DeploymentId> deployments, List<GeneratedEndpoint> generatedEndpoints) {
+ private List<Endpoint> computeGlobalEndpoints(RoutingId routingId, ClusterSpec.Id cluster, List<DeploymentId> deployments, GeneratedEndpoints generatedEndpoints) {
var endpoints = new ArrayList<Endpoint>();
var directMethods = 0;
var availableRoutingMethods = routingMethodsOfAll(deployments);
@@ -449,14 +464,15 @@ public class RoutingController {
.on(Port.fromRoutingMethod(method))
.routingMethod(method);
endpoints.add(builder.in(controller.system()));
- for (var ge : generatedEndpoints) {
+ for (var ge : generatedEndpoints.cluster(cluster)) {
endpoints.add(builder.generatedFrom(ge).in(controller.system()));
}
}
return endpoints;
}
- public boolean tokenEndpointEnabled(ApplicationId instance) {
+
+ private boolean tokenEndpointEnabled(ApplicationId instance) {
return createTokenEndpoint.with(FetchVector.Dimension.APPLICATION_ID, instance.serializedForm()).value();
}
@@ -472,13 +488,5 @@ public class RoutingController {
return 'v' + base32 + Endpoint.internalDnsSuffix(system);
}
- private static String asString(Endpoint.Scope scope) {
- return switch (scope) {
- case application -> "application";
- case global -> "global";
- case weighted -> "weighted";
- case zone -> "zone";
- };
- }
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java
index 1a4095001ff..010bc023dad 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Endpoint.java
@@ -6,6 +6,7 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.zone.AuthMethod;
import com.yahoo.config.provision.zone.RoutingMethod;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.text.Text;
@@ -165,7 +166,7 @@ public class Endpoint {
@Override
public String toString() {
- return Text.format("endpoint %s [scope=%s, legacy=%s, routingMethod=%s, authMethod=%s]", url, scope, legacy, routingMethod, authMethod);
+ return Text.format("endpoint %s [scope=%s, legacy=%s, routingMethod=%s, authMethod=%s, name=%s]", url, scope, legacy, routingMethod, authMethod, name());
}
private static String endpointOrClusterAsString(EndpointId id, ClusterSpec.Id cluster) {
@@ -405,12 +406,6 @@ public class Endpoint {
}
- /** An endpoint's authentication method */
- public enum AuthMethod {
- mtls,
- token,
- }
-
/** Represents an endpoint's HTTP port */
public record Port(int port) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointList.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointList.java
index dcc3e229f92..310a78e45f0 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointList.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/EndpointList.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.controller.application;
import com.yahoo.collections.AbstractFilteringList;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.InstanceName;
+import com.yahoo.config.provision.zone.AuthMethod;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import java.util.Collection;
@@ -94,10 +95,19 @@ public class EndpointList extends AbstractFilteringList<Endpoint, EndpointList>
return matching(endpoint -> endpoint.routingMethod().isShared());
}
+ /** Returns the subset of endpoints supporting given authentication method */
+ public EndpointList authMethod(AuthMethod authMethod) {
+ return matching(endpoint -> endpoint.authMethod() == authMethod);
+ }
+
public static EndpointList copyOf(Collection<Endpoint> endpoints) {
return new EndpointList(endpoints, false);
}
+ public static EndpointList of(Endpoint ...endpoint) {
+ return copyOf(List.of(endpoint));
+ }
+
@Override
public String toString() {
return asList().toString();
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/GeneratedEndpoint.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/GeneratedEndpoint.java
index dd6f4e5111d..a9d6dcb08f9 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/GeneratedEndpoint.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/GeneratedEndpoint.java
@@ -1,6 +1,7 @@
package com.yahoo.vespa.hosted.controller.application;
import ai.vespa.validation.Validation;
+import com.yahoo.config.provision.zone.AuthMethod;
import java.util.random.RandomGenerator;
import java.util.regex.Pattern;
@@ -11,7 +12,7 @@ import java.util.regex.Pattern;
*
* @author mpolden
*/
-public record GeneratedEndpoint(String clusterPart, String applicationPart, Endpoint.AuthMethod authMethod) {
+public record GeneratedEndpoint(String clusterPart, String applicationPart, AuthMethod authMethod) {
private static final Pattern PART_PATTERN = Pattern.compile("^[a-f][a-f0-9]{7}$");
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/SystemApplication.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/SystemApplication.java
index 56b7ffd01f9..9d909cb5ebf 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/SystemApplication.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/SystemApplication.java
@@ -29,7 +29,7 @@ public enum SystemApplication {
configServerHost(InfrastructureApplication.CONFIG_SERVER_HOST),
configServer(InfrastructureApplication.CONFIG_SERVER),
proxyHost(InfrastructureApplication.PROXY_HOST),
- proxy(InfrastructureApplication.PROXY, proxyHost, configServer),
+ proxy(InfrastructureApplication.PROXY, configServer),
tenantHost(InfrastructureApplication.TENANT_HOST);
/** The tenant owning all system applications */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java
index 3ec79b03ee8..3ec7f120726 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java
@@ -22,8 +22,11 @@ import com.yahoo.vespa.archive.ArchiveStreamReader;
import com.yahoo.vespa.archive.ArchiveStreamReader.ArchiveFile;
import com.yahoo.vespa.archive.ArchiveStreamReader.Options;
import com.yahoo.vespa.hosted.controller.Application;
+import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.deployment.ZipBuilder;
import com.yahoo.yolean.Exceptions;
+import org.w3c.dom.Document;
+
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
@@ -136,6 +139,25 @@ public class ApplicationPackage {
*/
public ValidationOverrides validationOverrides() { return validationOverrides; }
+ /** Returns a basic variant of services.xml contained in this package, pre-processed according to given deployment and tags */
+ public BasicServicesXml services(DeploymentId deployment, Tags tags) {
+ FileWrapper servicesXml = files.wrapper().wrap(Paths.get(servicesFile));
+ if (!servicesXml.exists()) return BasicServicesXml.empty;
+ try {
+ Document document = new XmlPreProcessor(files.wrapper().wrap(Paths.get("./")),
+ new InputStreamReader(new ByteArrayInputStream(servicesXml.content()), UTF_8),
+ deployment.applicationId().instance(),
+ deployment.zoneId().environment(),
+ deployment.zoneId().region(),
+ tags).run();
+ return BasicServicesXml.parse(document);
+ } catch (IllegalArgumentException e) {
+ throw e;
+ } catch (Exception e) {
+ throw new IllegalArgumentException(e);
+ }
+ }
+
/** Returns the platform version which package was compiled against, if known. */
public Optional<Version> compileVersion() { return compileVersion; }
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/BasicServicesXml.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/BasicServicesXml.java
new file mode 100644
index 00000000000..9eb10857526
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/BasicServicesXml.java
@@ -0,0 +1,92 @@
+package com.yahoo.vespa.hosted.controller.application.pkg;
+
+import com.yahoo.text.XML;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+import java.util.stream.Collectors;
+
+/**
+ * A partially parsed variant of services.xml, for use by the {@link com.yahoo.vespa.hosted.controller.Controller}.
+ *
+ * @author mpolden
+ */
+public record BasicServicesXml(List<Container> containers) {
+
+ public static final BasicServicesXml empty = new BasicServicesXml(List.of());
+
+ private static final String SERVICES_TAG = "services";
+ private static final String CONTAINER_TAG = "container";
+ private static final String CLIENTS_TAG = "clients";
+ private static final String CLIENT_TAG = "client";
+ private static final String TOKEN_TAG = "token";
+
+ public BasicServicesXml(List<Container> containers) {
+ this.containers = List.copyOf(Objects.requireNonNull(containers));
+ }
+
+ /** Parse a services.xml from given document */
+ public static BasicServicesXml parse(Document document) {
+ Element root = document.getDocumentElement();
+ if (!root.getTagName().equals("services")) {
+ throw new IllegalArgumentException("Root tag must be <" + SERVICES_TAG + ">");
+ }
+ List<BasicServicesXml.Container> containers = new ArrayList<>();
+ for (var childNode : XML.getChildren(root)) {
+ if (childNode.getTagName().equals(CONTAINER_TAG)) {
+ String id = childNode.getAttribute("id");
+ if (id.isEmpty()) throw new IllegalArgumentException(CONTAINER_TAG + " tag requires 'id' attribute");
+ List<Container.AuthMethod> methods = parseAuthMethods(childNode);
+ containers.add(new Container(id, methods));
+ }
+ }
+ return new BasicServicesXml(containers);
+ }
+
+ private static List<BasicServicesXml.Container.AuthMethod> parseAuthMethods(Element containerNode) {
+ List<BasicServicesXml.Container.AuthMethod> methods = new ArrayList<>();
+ for (var node : XML.getChildren(containerNode)) {
+ if (node.getTagName().equals(CLIENTS_TAG)) {
+ for (var clientNode : XML.getChildren(node)) {
+ if (clientNode.getTagName().equals(CLIENT_TAG)) {
+ boolean tokenEnabled = XML.getChildren(clientNode).stream()
+ .anyMatch(n -> n.getTagName().equals(TOKEN_TAG));
+ methods.add(tokenEnabled ? Container.AuthMethod.token : Container.AuthMethod.mtls);
+ }
+ }
+ }
+ }
+ if (methods.isEmpty()) {
+ methods.add(Container.AuthMethod.mtls);
+ }
+ return methods;
+ }
+
+ /**
+ * A Vespa container service.
+ *
+ * @param id ID of container
+ * @param authMethods Authentication methods supported by this container
+ */
+ public record Container(String id, List<AuthMethod> authMethods) {
+
+ public Container(String id, List<AuthMethod> authMethods) {
+ this.id = Objects.requireNonNull(id);
+ this.authMethods = Objects.requireNonNull(authMethods).stream()
+ .distinct()
+ .sorted()
+ .collect(Collectors.toList());
+ if (authMethods.isEmpty()) throw new IllegalArgumentException("Container must have at least one auth method");
+ }
+
+ public enum AuthMethod {
+ mtls,
+ token,
+ }
+
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackage.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackage.java
index b4c9b2ebd57..120c0a89f45 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackage.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackage.java
@@ -97,7 +97,9 @@ public class TestPackage {
keyPair = null;
this.certificate = null;
}
- this.applicationPackageStream = new ApplicationPackageStream(inZip, () -> __ -> false, () -> new Replacer() {
+ boolean isEnclave = isPublicSystem &&
+ !spec.cloudAccount(cloud, id.application().instance(), id.type().zone()).isUnspecified();
+ this.applicationPackageStream = new ApplicationPackageStream(inZip, () -> name -> name.endsWith(".xml"), () -> new Replacer() {
// Initially skips all declared entries, ensuring they're generated and appended after all input entries.
final Map<String, UnaryOperator<InputStream>> entries = new HashMap<>();
@@ -127,7 +129,7 @@ public class TestPackage {
__ -> new ByteArrayInputStream(servicesXml( ! isPublicSystem,
certificateValidFrom != null,
hasLegacyTests,
- testerResourcesFor(id.type().zone(), spec.requireInstance(id.application().instance())),
+ testerResourcesFor(id.type().zone(), spec.requireInstance(id.application().instance()), isEnclave),
testerApp)));
entries.put(deploymentFile,
@@ -225,7 +227,7 @@ public class TestPackage {
return new TestSummary(problems, suites);
}
- static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
+ static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec, boolean isEnclave) {
NodeResources nodeResources = spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
@@ -233,6 +235,7 @@ public class TestPackage {
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().matches("^(aws|gcp)-.*") ? DEFAULT_TESTER_RESOURCES_CLOUD
: DEFAULT_TESTER_RESOURCES);
+ if (isEnclave) nodeResources = nodeResources.with(NodeResources.Architecture.x86_64);
return nodeResources.with(NodeResources.DiskSpeed.any);
}
@@ -245,8 +248,8 @@ public class TestPackage {
// Of the remaining memory, split 50/50 between Surefire running the tests and the rest
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
- String resourceString = Text.format("<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
- resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
+ String resourceString = Text.format("<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\" architecture=\"%s\"/>",
+ resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name(), resources.architecture().name());
String runtimeProviderClass = config.runtimeProviderClass();
String tenantCdBundle = config.tenantCdBundle();
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
index 07695c17042..919facee0c1 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
@@ -7,10 +7,12 @@ import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.Notifications;
import com.yahoo.config.application.api.Notifications.When;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.CloudAccount;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.EndpointsChecker;
import com.yahoo.config.provision.EndpointsChecker.Availability;
import com.yahoo.config.provision.EndpointsChecker.Status;
+import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.zone.RoutingMethod;
@@ -60,6 +62,7 @@ import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.Consumer;
+import java.util.function.Predicate;
import java.util.function.Supplier;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -83,6 +86,7 @@ import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.running;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.success;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.testFailure;
import static com.yahoo.vespa.hosted.controller.deployment.Step.Status.succeeded;
+import static com.yahoo.vespa.hosted.controller.deployment.Step.Status.unfinished;
import static com.yahoo.vespa.hosted.controller.deployment.Step.copyVespaLogs;
import static com.yahoo.vespa.hosted.controller.deployment.Step.deactivateReal;
import static com.yahoo.vespa.hosted.controller.deployment.Step.deactivateTester;
@@ -92,7 +96,10 @@ import static com.yahoo.vespa.hosted.controller.deployment.Step.deployTester;
import static com.yahoo.vespa.hosted.controller.deployment.Step.installTester;
import static com.yahoo.vespa.hosted.controller.deployment.Step.report;
import static com.yahoo.yolean.Exceptions.uncheck;
+import static com.yahoo.yolean.Exceptions.uncheckInterruptedAndRestoreFlag;
+import static java.lang.Math.min;
import static java.util.Objects.requireNonNull;
+import static java.util.function.Predicate.not;
import static java.util.logging.Level.FINE;
import static java.util.logging.Level.INFO;
import static java.util.logging.Level.WARNING;
@@ -131,15 +138,15 @@ public class InternalStepRunner implements StepRunner {
try {
return switch (step.get()) {
case deployTester -> deployTester(id, logger);
+ case installTester -> installTester(id, logger);
case deployInitialReal -> deployInitialReal(id, logger);
case installInitialReal -> installInitialReal(id, logger);
case deployReal -> deployReal(id, logger);
- case installTester -> installTester(id, logger);
case installReal -> installReal(id, logger);
case startStagingSetup -> startTests(id, true, logger);
case endStagingSetup -> endTests(id, true, logger);
- case endTests -> endTests(id, false, logger);
case startTests -> startTests(id, false, logger);
+ case endTests -> endTests(id, false, logger);
case copyVespaLogs -> copyVespaLogs(id, logger);
case deactivateReal -> deactivateReal(id, logger);
case deactivateTester -> deactivateTester(id, logger);
@@ -148,9 +155,9 @@ public class InternalStepRunner implements StepRunner {
} catch (UncheckedIOException e) {
logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e);
return Optional.empty();
- } catch (RuntimeException|LinkageError e) {
+ } catch (RuntimeException | LinkageError e) {
logger.log(WARNING, "Unexpected exception running " + id, e);
- if (step.get().alwaysRun() && !(e instanceof LinkageError)) {
+ if (step.get().alwaysRun() && ! (e instanceof LinkageError)) {
logger.log("Will keep trying, as this is a cleanup step.");
return Optional.empty();
}
@@ -176,7 +183,10 @@ public class InternalStepRunner implements StepRunner {
private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) {
Optional<X509Certificate> testerCertificate = controller.jobController().run(id).testerCertificate();
- return deploy(() -> controller.applications().deploy(id.job(), setTheStage, logger::log),
+ return deploy(() -> controller.applications().deploy(id.job(),
+ setTheStage,
+ logger::log,
+ account -> getCloudAccountWithOverrideForStaging(id, account)),
controller.jobController().run(id)
.stepInfo(setTheStage ? deployInitialReal : deployReal).get()
.startTime().get(),
@@ -198,7 +208,8 @@ public class InternalStepRunner implements StepRunner {
return deploy(() -> controller.applications().deployTester(id.tester(),
testerPackage(id),
id.type().zone(),
- platform),
+ platform,
+ cloudAccount -> setCloudAccountForStaging(id, cloudAccount)),
controller.jobController().run(id)
.stepInfo(deployTester).get()
.startTime().get(),
@@ -206,6 +217,36 @@ public class InternalStepRunner implements StepRunner {
logger);
}
+ private Optional<CloudAccount> setCloudAccountForStaging(RunId id, Optional<CloudAccount> account) {
+ if (id.type().environment() == Environment.staging) {
+ controller.jobController().locked(id, run -> run.with(account.orElse(CloudAccount.empty)));
+ }
+ return account;
+ }
+
+ private Optional<CloudAccount> getCloudAccountWithOverrideForStaging(RunId id, Optional<CloudAccount> account) {
+ if (id.type().environment() == Environment.staging) {
+ Instant doom = controller.clock().instant().plusSeconds(60); // Sleeping is bad, but we're already in a sleepy code path: deployment.
+ while (true) {
+ Run run = controller.jobController().run(id);
+ Optional<CloudAccount> stored = run.cloudAccount();
+ if (stored.isPresent())
+ return stored.filter(not(CloudAccount.empty::equals));
+
+ // TODO jonmv: remove with next release
+ if (run.stepStatus(deployTester).get() != unfinished)
+ return account; // Use original value for runs which started prior to this code change, and resumed after. Extremely unlikely :>
+
+ long millisToDoom = Duration.between(controller.clock().instant(), doom).toMillis();
+ if (millisToDoom > 0)
+ uncheckInterruptedAndRestoreFlag(() -> Thread.sleep(min(millisToDoom, 5000)));
+ else
+ throw new CloudAccountNotSetException("Cloud account not yet set; must deploy tests first");
+ }
+ }
+ return account;
+ }
+
private Optional<RunStatus> deploy(Supplier<DeploymentResult> deployment, Instant startTime, RunId id, DualLogger logger) {
try {
DeploymentResult result = deployment.get();
@@ -276,6 +317,10 @@ public class InternalStepRunner implements StepRunner {
throw e;
}
+ catch (CloudAccountNotSetException e) {
+ logger.log(INFO, "Timed out waiting for cloud account to be set for " + id + ": " + e.getMessage());
+ return Optional.empty();
+ }
catch (IllegalArgumentException e) {
logger.log(WARNING, e.getMessage());
return Optional.of(deploymentFailed);
@@ -1006,4 +1051,8 @@ public class InternalStepRunner implements StepRunner {
}
+ private static class CloudAccountNotSetException extends RuntimeException {
+ CloudAccountNotSetException(String message) { super(message); }
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
index 6287448c08e..f2357a49952 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
@@ -566,9 +566,9 @@ public class JobController {
AtomicReference<ApplicationVersion> version = new AtomicReference<>();
applications.lockApplicationOrThrow(id, application -> {
Optional<ApplicationVersion> previousVersion = application.get().revisions().last();
- Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong()))
+ Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber()))
.map(ApplicationPackage::new);
- long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L);
+ long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber()).orElse(0L);
version.set(submission.toApplicationVersion(1 + previousBuild));
byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage()))
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Run.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Run.java
index 36df2aeda10..0c5fb3fb3cb 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Run.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/Run.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.deployment;
+import com.yahoo.config.provision.CloudAccount;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import java.security.cert.X509Certificate;
@@ -15,7 +16,6 @@ import java.util.stream.Collectors;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.aborted;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.cancelled;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.noTests;
-import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.reset;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.running;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.success;
import static com.yahoo.vespa.hosted.controller.deployment.Step.Status.succeeded;
@@ -43,13 +43,14 @@ public class Run {
private final Optional<ConvergenceSummary> convergenceSummary;
private final Optional<X509Certificate> testerCertificate;
private final boolean dryRun;
+ private final Optional<CloudAccount> cloudAccount;
private final Optional<String> reason;
// For deserialisation only -- do not use!
public Run(RunId id, Map<Step, StepInfo> steps, Versions versions, boolean isRedeployment, Instant start, Optional<Instant> end,
Optional<Instant> sleepUntil, RunStatus status, long lastTestRecord, Instant lastVespaLogTimestamp,
Optional<Instant> noNodesDownSince, Optional<ConvergenceSummary> convergenceSummary,
- Optional<X509Certificate> testerCertificate, boolean dryRun, Optional<String> reason) {
+ Optional<X509Certificate> testerCertificate, boolean dryRun, Optional<CloudAccount> cloudAccount, Optional<String> reason) {
this.id = id;
this.steps = Collections.unmodifiableMap(new EnumMap<>(steps));
this.versions = versions;
@@ -64,6 +65,7 @@ public class Run {
this.convergenceSummary = convergenceSummary;
this.testerCertificate = testerCertificate;
this.dryRun = dryRun;
+ this.cloudAccount = cloudAccount;
this.reason = reason;
}
@@ -72,7 +74,7 @@ public class Run {
profile.steps().forEach(step -> steps.put(step, StepInfo.initial(step)));
return new Run(id, steps, requireNonNull(versions), isRedeployment, requireNonNull(now), Optional.empty(),
Optional.empty(), running, -1, Instant.EPOCH, Optional.empty(), Optional.empty(),
- Optional.empty(), profile == JobProfile.developmentDryRun, triggeredBy);
+ Optional.empty(), profile == JobProfile.developmentDryRun, Optional.empty(), triggeredBy);
}
/** Returns a new Run with the status of the given completed step set accordingly. */
@@ -87,7 +89,7 @@ public class Run {
steps.put(step.get(), stepInfo.with(Step.Status.of(status)));
RunStatus newStatus = hasFailed() || status == running ? this.status : status;
return new Run(id, steps, versions, isRedeployment, start, end, sleepUntil, newStatus, lastTestRecord,
- lastVespaLogTimestamp, noNodesDownSince, convergenceSummary, testerCertificate, dryRun, reason);
+ lastVespaLogTimestamp, noNodesDownSince, convergenceSummary, testerCertificate, dryRun, cloudAccount, reason);
}
/** Returns a new Run with a new start time*/
@@ -102,13 +104,13 @@ public class Run {
steps.put(step.get(), stepInfo.with(startTime));
return new Run(id, steps, versions, isRedeployment, start, end, sleepUntil, status, lastTestRecord, lastVespaLogTimestamp,
- noNodesDownSince, convergenceSummary, testerCertificate, dryRun, reason);
+ noNodesDownSince, convergenceSummary, testerCertificate, dryRun, cloudAccount, reason);
}
public Run finished(Instant now) {
requireActive();
return new Run(id, steps, versions, isRedeployment, start, Optional.of(now), sleepUntil, status == running ? success : status,
- lastTestRecord, lastVespaLogTimestamp, noNodesDownSince, convergenceSummary, Optional.empty(), dryRun, reason);
+ lastTestRecord, lastVespaLogTimestamp, noNodesDownSince, convergenceSummary, Optional.empty(), dryRun, cloudAccount, reason);
}
public Run aborted(boolean cancelledByHumans) {
@@ -116,7 +118,7 @@ public class Run {
return new Run(id, steps, versions, isRedeployment, start, end, sleepUntil,
cancelledByHumans ? cancelled : aborted,
lastTestRecord, lastVespaLogTimestamp, noNodesDownSince,
- convergenceSummary, testerCertificate, dryRun, reason);
+ convergenceSummary, testerCertificate, dryRun, cloudAccount, reason);
}
public Run reset() {
@@ -124,43 +126,49 @@ public class Run {
Map<Step, StepInfo> reset = new EnumMap<>(steps);
reset.replaceAll((step, __) -> StepInfo.initial(step));
return new Run(id, reset, versions, isRedeployment, start, end, sleepUntil, running, -1, lastVespaLogTimestamp,
- Optional.empty(), Optional.empty(), testerCertificate, dryRun, reason);
+ Optional.empty(), Optional.empty(), testerCertificate, dryRun, cloudAccount, reason);
}
public Run with(long lastTestRecord) {
requireActive();
return new Run(id, steps, versions, isRedeployment, start, end, sleepUntil, status, lastTestRecord, lastVespaLogTimestamp,
- noNodesDownSince, convergenceSummary, testerCertificate, dryRun, reason);
+ noNodesDownSince, convergenceSummary, testerCertificate, dryRun, cloudAccount, reason);
}
public Run with(Instant lastVespaLogTimestamp) {
requireActive();
return new Run(id, steps, versions, isRedeployment, start, end, sleepUntil, status, lastTestRecord, lastVespaLogTimestamp,
- noNodesDownSince, convergenceSummary, testerCertificate, dryRun, reason);
+ noNodesDownSince, convergenceSummary, testerCertificate, dryRun, cloudAccount, reason);
}
public Run noNodesDownSince(Instant noNodesDownSince) {
requireActive();
return new Run(id, steps, versions, isRedeployment, start, end, sleepUntil, status, lastTestRecord, lastVespaLogTimestamp,
- Optional.ofNullable(noNodesDownSince), convergenceSummary, testerCertificate, dryRun, reason);
+ Optional.ofNullable(noNodesDownSince), convergenceSummary, testerCertificate, dryRun, cloudAccount, reason);
}
public Run withSummary(ConvergenceSummary convergenceSummary) {
requireActive();
return new Run(id, steps, versions, isRedeployment, start, end, sleepUntil, status, lastTestRecord, lastVespaLogTimestamp,
- noNodesDownSince, Optional.ofNullable(convergenceSummary), testerCertificate, dryRun, reason);
+ noNodesDownSince, Optional.ofNullable(convergenceSummary), testerCertificate, dryRun, cloudAccount, reason);
}
public Run with(X509Certificate testerCertificate) {
requireActive();
return new Run(id, steps, versions, isRedeployment, start, end, sleepUntil, status, lastTestRecord, lastVespaLogTimestamp,
- noNodesDownSince, convergenceSummary, Optional.of(testerCertificate), dryRun, reason);
+ noNodesDownSince, convergenceSummary, Optional.of(testerCertificate), dryRun, cloudAccount, reason);
}
public Run sleepingUntil(Instant instant) {
requireActive();
return new Run(id, steps, versions, isRedeployment, start, end, Optional.of(instant), status, lastTestRecord, lastVespaLogTimestamp,
- noNodesDownSince, convergenceSummary, testerCertificate, dryRun, reason);
+ noNodesDownSince, convergenceSummary, testerCertificate, dryRun, cloudAccount, reason);
+ }
+
+ public Run with(CloudAccount account) {
+ requireActive();
+ return new Run(id, steps, versions, isRedeployment, start, end, sleepUntil, status, lastTestRecord, lastVespaLogTimestamp,
+ noNodesDownSince, convergenceSummary, testerCertificate, dryRun, Optional.of(account), reason);
}
/** Returns the id of this run. */
@@ -266,6 +274,9 @@ public class Run {
/** Whether this is a dry run deployment. */
public boolean isDryRun() { return dryRun; }
+ /** Cloud account override to use for this run, if set. This should only be used by staging tests. */
+ public Optional<CloudAccount> cloudAccount() { return cloudAccount; }
+
/** The specific reason for triggering this run, if any. This should be empty for jobs triggered bvy deployment orchestration. */
public Optional<String> reason() {
return reason;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java
index cbdfcf70123..1a944cfd5d7 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmer.java
@@ -6,6 +6,7 @@ import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.AccountId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.ApplicationSummary;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.OwnershipIssues;
@@ -70,7 +71,8 @@ public class ApplicationOwnershipConfirmer extends ControllerMaintainer {
tenantOf(application.id()).contact().flatMap(contact -> {
return ownershipIssues.confirmOwnership(application.ownershipIssueId(),
summaryOf(application.id()),
- determineAssignee(application),
+ application.issueOwner().orElse(null),
+ application.userOwner().orElse(null),
contact);
}).ifPresent(newIssueId -> store(newIssueId, application.id()));
}
@@ -156,8 +158,12 @@ public class ApplicationOwnershipConfirmer extends ControllerMaintainer {
return ApplicationList.from(controller().applications().readable());
}
- private User determineAssignee(Application application) {
- return application.owner().orElse(null);
+ private AccountId determineAssignee(Application application) {
+ return application.issueOwner().orElse(null);
+ }
+
+ private User determineLegacyAssignee(Application application) {
+ return application.userOwner().orElse(null);
}
private Tenant tenantOf(TenantAndApplicationId applicationId) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainer.java
index 8f9b9b70639..e77bf52f567 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainer.java
@@ -6,7 +6,6 @@ import com.yahoo.container.jdisc.secretstore.SecretStore;
import com.yahoo.jdisc.Metric;
import com.yahoo.transaction.Mutex;
import com.yahoo.vespa.flags.BooleanFlag;
-import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.IntFlag;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.StringFlag;
@@ -52,7 +51,7 @@ public class CertificatePoolMaintainer extends ControllerMaintainer {
super(controller, interval);
this.controller = controller;
this.secretStore = controller.secretStore();
- this.certPoolSize = Flags.CERT_POOL_SIZE.bindTo(controller.flagSource());
+ this.certPoolSize = PermanentFlags.CERT_POOL_SIZE.bindTo(controller.flagSource());
this.useAlternateCertProvider = PermanentFlags.USE_ALTERNATIVE_ENDPOINT_CERTIFICATE_PROVIDER.bindTo(controller.flagSource());
this.endpointCertificateAlgo = PermanentFlags.ENDPOINT_CERTIFICATE_ALGORITHM.bindTo(controller.flagSource());
this.curator = controller.curator();
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java
index c352fb053dc..cd0f4be7a48 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporter.java
@@ -117,9 +117,10 @@ public class DeploymentIssueReporter extends ControllerMaintainer {
try {
Tenant tenant = ownerOf(application.id());
tenant.contact().ifPresent(contact -> {
- User assignee = application.owner().orElse(null);
Optional<IssueId> ourIssueId = application.deploymentIssueId();
- IssueId issueId = deploymentIssues.fileUnlessOpen(ourIssueId, application.id().defaultInstance(), assignee, contact);
+ IssueId issueId = deploymentIssues.fileUnlessOpen(ourIssueId, application.id().defaultInstance(),
+ application.issueOwner().orElse(null), application.userOwner().orElse(null),
+ contact);
store(application.id(), issueId);
});
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java
index c751c0a130b..8d48576044b 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeScheduler.java
@@ -160,17 +160,12 @@ public class OsUpgradeScheduler extends ControllerMaintainer {
@Override
public Change change(Version currentVersion, Instant instant) {
- OsRelease release = artifactRepository.osRelease(currentVersion.getMajor(), tag());
+ OsRelease release = artifactRepository.osRelease(currentVersion.getMajor(), OsRelease.Tag.latest);
Duration cooldown = remainingCooldownOf(cooldown(), release.age(instant));
Instant scheduleAt = schedulingInstant(instant.plus(cooldown), system);
return new Change(new OsVersion(release.version(), cloud), scheduleAt);
}
- /** Returns the release tag tracked by this system */
- private OsRelease.Tag tag() {
- return system.isCd() ? OsRelease.Tag.latest : OsRelease.Tag.stable;
- }
-
/** The cool-down period that must pass before a release can be used */
private Duration cooldown() {
return system.isCd() ? Duration.ofDays(1) : Duration.ZERO;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
index 52206d41c00..00c9bd165a9 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
@@ -1,7 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.maintenance;
-import com.yahoo.component.Version;
+import ai.vespa.metrics.ControllerMetrics;
import com.yahoo.concurrent.UncheckedTimeoutException;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterResources;
@@ -71,8 +71,8 @@ public class ResourceMeterMaintainer extends ControllerMaintainer {
private final Metric metric;
private final Clock clock;
- private static final String METERING_LAST_REPORTED = "metering_last_reported";
- private static final String METERING_TOTAL_REPORTED = "metering_total_reported";
+ private static final String METERING_LAST_REPORTED = ControllerMetrics.METERING_LAST_REPORTED.baseName();
+ private static final String METERING_TOTAL_REPORTED = ControllerMetrics.METERING_TOTAL_REPORTED.baseName();
private static final int METERING_REFRESH_INTERVAL_SECONDS = 1800;
@SuppressWarnings("WeakerAccess")
@@ -271,7 +271,7 @@ public class ResourceMeterMaintainer extends ControllerMaintainer {
private Metric.Context getMetricContext(ApplicationId applicationId, ZoneId zoneId) {
return metric.createContext(Map.of(
- "tenant", applicationId.tenant().value(),
+ "tenantName", applicationId.tenant().value(),
"applicationId", applicationId.toFullString(),
"zoneId", zoneId.value()
));
@@ -279,7 +279,7 @@ public class ResourceMeterMaintainer extends ControllerMaintainer {
private Metric.Context getMetricContext(ResourceSnapshot snapshot) {
return metric.createContext(Map.of(
- "tenant", snapshot.getApplicationId().tenant().value(),
+ "tenantName", snapshot.getApplicationId().tenant().value(),
"applicationId", snapshot.getApplicationId().toFullString(),
"zoneId", snapshot.getZoneId().value(),
"architecture", snapshot.resources().architecture()
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgrader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgrader.java
index effcc4dd4df..40b51e692f3 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgrader.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgrader.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.component.Version;
+import com.yahoo.config.provision.CloudName;
import com.yahoo.config.provision.zone.NodeSlice;
import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.text.Text;
@@ -50,7 +51,7 @@ public class SystemUpgrader extends InfrastructureUpgrader<VespaVersionTarget> {
@Override
protected boolean expectUpgradeOf(Node node, SystemApplication application, ZoneApi zone) {
- return eligibleForUpgrade(node);
+ return eligibleForUpgrade(node, zone);
}
@Override
@@ -90,7 +91,11 @@ public class SystemUpgrader extends InfrastructureUpgrader<VespaVersionTarget> {
}
/** Returns whether node in application should be upgraded by this */
- public static boolean eligibleForUpgrade(Node node) {
+ public static boolean eligibleForUpgrade(Node node, ZoneApi zone) {
+ // Temporary hack until GCP enclave works again
+ if (zone.getCloudName().equals(CloudName.GCP) && node.hostname().value().startsWith("e"))
+ return false;
+
return upgradableNodeStates.contains(node.state());
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
index 9890a5b361b..e6b3dd74abc 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
@@ -24,6 +24,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RevisionId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.AccountId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
@@ -86,7 +87,8 @@ public class ApplicationSerializer {
private static final String revisionPinnedField = "revisionPinned";
private static final String deploymentIssueField = "deploymentIssueId";
private static final String ownershipIssueIdField = "ownershipIssueId";
- private static final String ownerField = "confirmedOwner";
+ private static final String userOwnerField = "confirmedOwner";
+ private static final String issueOwnerField = "confirmedOwnerId";
private static final String majorVersionField = "majorVersion";
private static final String writeQualityField = "writeQuality";
private static final String queryQualityField = "queryQuality";
@@ -174,7 +176,8 @@ public class ApplicationSerializer {
application.projectId().ifPresent(projectId -> root.setLong(projectIdField, projectId));
application.deploymentIssueId().ifPresent(jiraIssueId -> root.setString(deploymentIssueField, jiraIssueId.value()));
application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value()));
- application.owner().ifPresent(owner -> root.setString(ownerField, owner.username()));
+ application.userOwner().ifPresent(owner -> root.setString(userOwnerField, owner.username()));
+ application.issueOwner().ifPresent(owner -> root.setString(issueOwnerField, owner.value()));
application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion));
root.setDouble(queryQualityField, application.metrics().queryServiceQuality());
root.setDouble(writeQualityField, application.metrics().writeServiceQuality());
@@ -259,7 +262,7 @@ public class ApplicationSerializer {
}
private void toSlime(ApplicationVersion applicationVersion, Cursor object) {
- applicationVersion.buildNumber().ifPresent(number -> object.setLong(applicationBuildNumberField, number));
+ object.setLong(applicationBuildNumberField, applicationVersion.buildNumber());
applicationVersion.source().ifPresent(source -> toSlime(source, object.setObject(sourceRevisionField)));
applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email));
applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString()));
@@ -349,7 +352,8 @@ public class ApplicationSerializer {
ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString());
Optional<IssueId> deploymentIssueId = SlimeUtils.optionalString(root.field(deploymentIssueField)).map(IssueId::from);
Optional<IssueId> ownershipIssueId = SlimeUtils.optionalString(root.field(ownershipIssueIdField)).map(IssueId::from);
- Optional<User> owner = SlimeUtils.optionalString(root.field(ownerField)).map(User::from);
+ Optional<User> userOwner = SlimeUtils.optionalString(root.field(userOwnerField)).map(User::from);
+ Optional<AccountId> issueOwner = SlimeUtils.optionalString(root.field(issueOwnerField)).map(AccountId::new);
OptionalInt majorVersion = SlimeUtils.optionalInteger(root.field(majorVersionField));
ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(),
root.field(writeQualityField).asDouble());
@@ -359,7 +363,7 @@ public class ApplicationSerializer {
RevisionHistory revisions = revisionsFromSlime(root.field(prodVersionsField), root.field(devVersionsField), id);
return new Application(id, createdAt, deploymentSpec, validationOverrides,
- deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics,
+ deploymentIssueId, ownershipIssueId, userOwner, issueOwner, majorVersion, metrics,
deployKeys, projectId, revisions, instances);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java
index 5770649c8b7..9bc9403b9d6 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializer.java
@@ -4,13 +4,13 @@ package com.yahoo.vespa.hosted.controller.persistence;
import ai.vespa.http.DomainName;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.zone.AuthMethod;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.slime.ArrayTraverser;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeUtils;
-import com.yahoo.vespa.hosted.controller.application.Endpoint;
import com.yahoo.vespa.hosted.controller.application.EndpointId;
import com.yahoo.vespa.hosted.controller.application.GeneratedEndpoint;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicy;
@@ -132,17 +132,17 @@ public class RoutingPolicySerializer {
return new RoutingStatus(status, agent, changedAt);
}
- private String authMethod(Endpoint.AuthMethod authMethod) {
+ private String authMethod(AuthMethod authMethod) {
return switch (authMethod) {
case token -> "token";
case mtls -> "mtls";
};
}
- private Endpoint.AuthMethod authMethodFromSlime(Inspector field) {
+ private AuthMethod authMethodFromSlime(Inspector field) {
return switch (field.asString()) {
- case "token" -> Endpoint.AuthMethod.token;
- case "mtls" -> Endpoint.AuthMethod.mtls;
+ case "token" -> AuthMethod.token;
+ case "mtls" -> AuthMethod.mtls;
default -> throw new IllegalArgumentException("Unknown auth method '" + field.asString() + "'");
};
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
index 4547eed24c8..73d0bf6cad6 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.controller.persistence;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.CloudAccount;
import com.yahoo.security.X509CertificateUtils;
import com.yahoo.slime.ArrayTraverser;
import com.yahoo.slime.Cursor;
@@ -98,6 +99,7 @@ class RunSerializer {
private static final String convergenceSummaryField = "convergenceSummaryV2";
private static final String testerCertificateField = "testerCertificate";
private static final String isDryRunField = "isDryRun";
+ private static final String cloudAccountField = "account";
private static final String reasonField = "reason";
Run runFromSlime(Slime slime) {
@@ -142,10 +144,9 @@ class RunSerializer {
Instant.EPOCH.plus(runObject.field(lastVespaLogTimestampField).asLong(), ChronoUnit.MICROS),
SlimeUtils.optionalInstant(runObject.field(noNodesDownSinceField)),
convergenceSummaryFrom(runObject.field(convergenceSummaryField)),
- Optional.of(runObject.field(testerCertificateField))
- .filter(Inspector::valid)
- .map(certificate -> X509CertificateUtils.fromPem(certificate.asString())),
+ SlimeUtils.optionalString(runObject.field(testerCertificateField)).map(X509CertificateUtils::fromPem),
runObject.field(isDryRunField).valid() && runObject.field(isDryRunField).asBool(),
+ SlimeUtils.optionalString(runObject.field(cloudAccountField)).map(CloudAccount::from),
SlimeUtils.optionalString(runObject.field(reasonField)));
}
@@ -239,6 +240,7 @@ class RunSerializer {
versionsObject.setObject(sourceField));
});
runObject.setBool(isDryRunField, run.isDryRun());
+ run.cloudAccount().ifPresent(account -> runObject.setString(cloudAccountField, account.value()));
run.reason().ifPresent(reason -> runObject.setString(reasonField, reason));
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index 94c7829a851..a0e8b1c5610 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -333,7 +333,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
List<EndpointTarget> targets = new ArrayList<>();
out:
for (var app : applications) {
- Optional<Endpoint> declaredEndpoint = controller.routing().declaredEndpointsOf(app).dnsName(endpoint);
+ Optional<Endpoint> declaredEndpoint = controller.routing().readDeclaredEndpointsOf(app).dnsName(endpoint);
if (declaredEndpoint.isPresent()) {
for (var target : declaredEndpoint.get().targets()) {
targets.add(new EndpointTarget(target.deployment(), declaredEndpoint.get().cluster()));
@@ -1739,7 +1739,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
- application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
+ application.issueOwner().ifPresent(owner -> object.setString("owner", owner.value()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
@@ -1931,7 +1931,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
application.activity().lastWritesPerSecond().ifPresent(value -> activity.setDouble("lastWritesPerSecond", value));
application.ownershipIssueId().ifPresent(issueId -> object.setString("ownershipIssueId", issueId.value()));
- application.owner().ifPresent(owner -> object.setString("owner", owner.username()));
+ application.issueOwner().ifPresent(owner -> object.setString("owner", owner.value()));
application.deploymentIssueId().ifPresent(issueId -> object.setString("deploymentIssueId", issueId.value()));
}
@@ -2061,7 +2061,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
if (!legacyEndpoints) {
zoneEndpoints = zoneEndpoints.not().legacy().direct();
}
- EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application).targets(deploymentId);
+ EndpointList declaredEndpoints = controller.routing().readDeclaredEndpointsOf(application).targets(deploymentId);
if (!legacyEndpoints) {
declaredEndpoints = declaredEndpoints.not().legacy().direct();
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
index 43c8e7c9469..d290e52034f 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
@@ -23,7 +23,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RevisionId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.application.Change;
-import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.deployment.ConvergenceSummary;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentStatus;
@@ -53,7 +52,6 @@ import java.util.Locale;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Optional;
-import java.util.SortedMap;
import java.util.stream.Stream;
import static com.yahoo.config.application.api.DeploymentSpec.UpgradePolicy.canary;
@@ -109,10 +107,10 @@ class JobControllerApiHandlerHelper {
int limit = limitStr.map(Integer::parseInt).orElse(Integer.MAX_VALUE);
toSlime(cursor.setArray("runs"), runs.values(), application, limit, baseUriForJobType);
- controller.applications().decideCloudAccountOf(new DeploymentId(id.application(),
- runs.lastEntry().getValue().id().job().type().zone()), // Urgh, must use a job with actual zone.
- application.deploymentSpec())
- .ifPresent(cloudAccount -> cursor.setObject("enclave").setString("cloudAccount", cloudAccount.value()));
+ Optional.ofNullable(runs.lastEntry())
+ .map(entry -> new DeploymentId(id.application(), entry.getValue().id().job().type().zone())) // Urgh, must use a job with actual zone.
+ .flatMap(deployment -> controller.applications().decideCloudAccountOf(deployment, application.deploymentSpec()))
+ .ifPresent(cloudAccount -> cursor.setObject("enclave").setString("cloudAccount", cloudAccount.value()));
return new SlimeJsonResponse(slime);
}
@@ -220,7 +218,12 @@ class JobControllerApiHandlerHelper {
* @return Response with the new application version
*/
static HttpResponse submitResponse(JobController jobController, TenantAndApplicationId id, Submission submission, long projectId) {
- return new MessageResponse("application " + jobController.submit(id, submission, projectId));
+ Slime slime = new Slime();
+ Cursor root = slime.setObject();
+ ApplicationVersion submitted = jobController.submit(id, submission, projectId);
+ root.setString("message", "application " + submitted);
+ root.setLong("build", submitted.buildNumber());
+ return new SlimeJsonResponse(slime);
}
/** Aborts any job of the given type. */
@@ -438,7 +441,7 @@ class JobControllerApiHandlerHelper {
}
static void toSlime(Cursor versionObject, ApplicationVersion version) {
- version.buildNumber().ifPresent(id -> versionObject.setLong("build", id));
+ versionObject.setLong("build", version.buildNumber());
version.compileVersion().ifPresent(platform -> versionObject.setString("compileVersion", platform.toFullString()));
version.sourceUrl().ifPresent(url -> versionObject.setString("sourceUrl", url));
version.commit().ifPresent(commit -> versionObject.setString("commit", commit));
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiHandler.java
index 232f25f5674..bc83eeb73c1 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/routing/RoutingApiHandler.java
@@ -253,7 +253,7 @@ public class RoutingApiHandler extends AuditLoggingRequestHandler {
var instances = instanceId == null
? application.instances().values()
: List.of(application.require(instanceId.instance()));
- EndpointList declaredEndpoints = controller.routing().declaredEndpointsOf(application);
+ EndpointList declaredEndpoints = controller.routing().readDeclaredEndpointsOf(application);
for (var instance : instances) {
var zones = zoneId == null
? instance.deployments().keySet().stream().sorted(Comparator.comparing(ZoneId::value)).toList()
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployer.java
index 355f06fc753..2c38066eddd 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployer.java
@@ -8,6 +8,7 @@ import com.yahoo.vespa.flags.FlagId;
import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.json.FlagData;
import com.yahoo.vespa.hosted.controller.api.integration.ControllerIdentityProvider;
+import com.yahoo.vespa.hosted.controller.api.systemflags.v1.FlagValidationException;
import com.yahoo.vespa.hosted.controller.api.systemflags.v1.FlagsTarget;
import com.yahoo.vespa.hosted.controller.api.systemflags.v1.SystemFlagsDataArchive;
import com.yahoo.vespa.hosted.controller.restapi.systemflags.SystemFlagsDeployResult.OperationError;
@@ -57,6 +58,12 @@ class SystemFlagsDeployer {
}
SystemFlagsDeployResult deployFlags(SystemFlagsDataArchive archive, boolean dryRun) {
+ try {
+ archive.validateAllFilesAreForTargets(targets);
+ } catch (FlagValidationException e) {
+ return new SystemFlagsDeployResult(List.of(OperationError.archiveValidationFailed(e.getMessage())));
+ }
+
Map<FlagsTarget, Future<SystemFlagsDeployResult>> futures = new HashMap<>();
for (FlagsTarget target : targets) {
futures.put(target, executor.submit(() -> deployFlags(target, archive.flagData(target), dryRun)));
@@ -70,11 +77,6 @@ class SystemFlagsDeployer {
throw new RuntimeException(e);
}
});
- try {
- archive.validateAllFilesAreForTargets(system, targets);
- } catch (IllegalArgumentException e) {
- results.add(new SystemFlagsDeployResult(List.of(OperationError.archiveValidationFailed(e.getMessage()))));
- }
return SystemFlagsDeployResult.merge(results);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsHandler.java
index e9b087690ff..bb285b8b742 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsHandler.java
@@ -10,10 +10,13 @@ import com.yahoo.restapi.JacksonJsonResponse;
import com.yahoo.restapi.Path;
import com.yahoo.vespa.hosted.controller.api.integration.ControllerIdentityProvider;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneRegistry;
+import com.yahoo.vespa.hosted.controller.api.systemflags.v1.FlagValidationException;
import com.yahoo.vespa.hosted.controller.api.systemflags.v1.FlagsTarget;
import com.yahoo.vespa.hosted.controller.api.systemflags.v1.SystemFlagsDataArchive;
import com.yahoo.vespa.hosted.controller.restapi.ErrorResponses;
+import java.io.InputStream;
+import java.util.List;
import java.util.concurrent.Executor;
/**
@@ -27,12 +30,14 @@ public class SystemFlagsHandler extends ThreadedHttpRequestHandler {
private static final String API_PREFIX = "/system-flags/v1";
private final SystemFlagsDeployer deployer;
+ private final ZoneRegistry zoneRegistry;
@Inject
public SystemFlagsHandler(ZoneRegistry zoneRegistry,
ControllerIdentityProvider identityProvider,
Executor executor) {
super(executor);
+ this.zoneRegistry = zoneRegistry;
this.deployer = new SystemFlagsDeployer(identityProvider, zoneRegistry.system(), FlagsTarget.getAllTargetsInSystem(zoneRegistry, true));
}
@@ -57,12 +62,22 @@ public class SystemFlagsHandler extends ThreadedHttpRequestHandler {
if (!contentType.equalsIgnoreCase("application/zip")) {
return ErrorResponse.badRequest("Invalid content type: " + contentType);
}
- SystemFlagsDataArchive archive = SystemFlagsDataArchive.fromZip(request.getData());
- SystemFlagsDeployResult result = deployer.deployFlags(archive, dryRun);
+ SystemFlagsDeployResult result = deploy(request.getData(), dryRun);
return new JacksonJsonResponse<>(200, result.toWire());
} catch (Exception e) {
return ErrorResponses.logThrowing(request, log, e);
}
}
+ private SystemFlagsDeployResult deploy(InputStream zipStream, boolean dryRun) {
+ SystemFlagsDataArchive archive;
+ try {
+ archive = SystemFlagsDataArchive.fromZip(zipStream, zoneRegistry);
+ } catch (FlagValidationException e) {
+ return new SystemFlagsDeployResult(List.of(SystemFlagsDeployResult.OperationError.archiveValidationFailed(e.getMessage())));
+ }
+
+ return deployer.deployFlags(archive, dryRun);
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/GeneratedEndpoints.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/GeneratedEndpoints.java
new file mode 100644
index 00000000000..3adbb43a7b5
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/GeneratedEndpoints.java
@@ -0,0 +1,32 @@
+package com.yahoo.vespa.hosted.controller.routing;
+
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.vespa.hosted.controller.application.GeneratedEndpoint;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+
+/**
+ * This represents endpoints generated by the controller for a deployment, grouped by their assigned cluster.
+ *
+ * @author mpolden
+ */
+public record GeneratedEndpoints(Map<ClusterSpec.Id, List<GeneratedEndpoint>> endpoints) {
+
+ public static final GeneratedEndpoints empty = new GeneratedEndpoints(Map.of());
+
+ public GeneratedEndpoints(Map<ClusterSpec.Id, List<GeneratedEndpoint>> endpoints) {
+ this.endpoints = Map.copyOf(Objects.requireNonNull(endpoints));
+ endpoints.forEach((cluster, generatedEndpoints) -> {
+ if (generatedEndpoints.stream().distinct().count() != generatedEndpoints.size()) {
+ throw new IllegalStateException("Endpoints for " + cluster + " must be distinct, got " + generatedEndpoints);
+ }
+ });
+ }
+
+ public List<GeneratedEndpoint> cluster(ClusterSpec.Id cluster) {
+ return endpoints.getOrDefault(cluster, List.of());
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/PreparedEndpoints.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/PreparedEndpoints.java
new file mode 100644
index 00000000000..c67d88fa81f
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/PreparedEndpoints.java
@@ -0,0 +1,121 @@
+package com.yahoo.vespa.hosted.controller.routing;
+
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.zone.AuthMethod;
+import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificate;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint;
+import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
+import com.yahoo.vespa.hosted.controller.application.Endpoint;
+import com.yahoo.vespa.hosted.controller.application.EndpointId;
+import com.yahoo.vespa.hosted.controller.application.EndpointList;
+import com.yahoo.vespa.hosted.controller.application.GeneratedEndpoint;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.OptionalInt;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+/**
+ * This represents the endpoints, and associated resources, that have been prepared for a deployment.
+ *
+ * @author mpolden
+ */
+public record PreparedEndpoints(DeploymentId deployment,
+ EndpointList endpoints,
+ List<AssignedRotation> rotations,
+ Optional<EndpointCertificate> certificate) {
+
+ public PreparedEndpoints(DeploymentId deployment, EndpointList endpoints, List<AssignedRotation> rotations, Optional<EndpointCertificate> certificate) {
+ this.deployment = Objects.requireNonNull(deployment);
+ this.endpoints = Objects.requireNonNull(endpoints);
+ this.rotations = List.copyOf(Objects.requireNonNull(rotations));
+ this.certificate = Objects.requireNonNull(certificate);
+ }
+
+ /** Returns the endpoints generated by this prepare */
+ public GeneratedEndpoints generatedEndpoints() {
+ Map<ClusterSpec.Id, List<GeneratedEndpoint>> generated = new HashMap<>();
+ for (var endpoint : endpoints.generated()) {
+ List<GeneratedEndpoint> clusterGenerated = generated.computeIfAbsent(endpoint.cluster(), (k) -> new ArrayList<>());
+ if (!clusterGenerated.contains(endpoint.generated().get())) {
+ clusterGenerated.add(endpoint.generated().get());
+ }
+ }
+ return new GeneratedEndpoints(generated);
+ }
+
+ /** Returns the endpoints contained in this as {@link com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint} */
+ public Set<ContainerEndpoint> containerEndpoints() {
+ Map<EndpointId, AssignedRotation> rotationsByEndpointId = rotations.stream()
+ .collect(Collectors.toMap(AssignedRotation::endpointId,
+ Function.identity()));
+ Set<ContainerEndpoint> containerEndpoints = new HashSet<>();
+ endpoints.scope(Endpoint.Scope.zone).groupingBy(Endpoint::cluster).forEach((clusterId, clusterEndpoints) -> {
+ containerEndpoints.add(new ContainerEndpoint(clusterId.value(),
+ asString(Endpoint.Scope.zone),
+ clusterEndpoints.mapToList(Endpoint::dnsName),
+ OptionalInt.empty(),
+ clusterEndpoints.first().get().routingMethod(),
+ authMethodsByDnsName(clusterEndpoints)));
+ });
+ endpoints.scope(Endpoint.Scope.global).groupingBy(Endpoint::cluster).forEach((clusterId, clusterEndpoints) -> {
+ for (var endpoint : clusterEndpoints) {
+ List<String> names = new ArrayList<>(2);
+ names.add(endpoint.dnsName());
+ if (endpoint.requiresRotation()) {
+ EndpointId endpointId = EndpointId.of(endpoint.name());
+ AssignedRotation rotation = rotationsByEndpointId.get(endpointId);
+ if (rotation == null) {
+ throw new IllegalArgumentException(endpoint + " requires a rotation, but no rotation has been assigned to " + endpointId);
+ }
+ // Include the rotation ID as a valid name of this container endpoint
+ // (required by global routing health checks)
+ names.add(rotation.rotationId().asString());
+ }
+ containerEndpoints.add(new ContainerEndpoint(clusterId.value(),
+ asString(Endpoint.Scope.global),
+ names,
+ OptionalInt.empty(),
+ endpoint.routingMethod(),
+ authMethodsByDnsName(EndpointList.of(endpoint))));
+ }
+ });
+ endpoints.scope(Endpoint.Scope.application).groupingBy(Endpoint::cluster).forEach((clusterId, clusterEndpoints) -> {
+ for (var endpoint : clusterEndpoints) {
+ Optional<Endpoint.Target> matchingTarget = endpoint.targets().stream()
+ .filter(t -> t.routesTo(deployment))
+ .findFirst();
+ if (matchingTarget.isEmpty()) throw new IllegalStateException("No target found routing to " + deployment + " in " + endpoint);
+ containerEndpoints.add(new ContainerEndpoint(clusterId.value(),
+ asString(Endpoint.Scope.application),
+ List.of(endpoint.dnsName()),
+ OptionalInt.of(matchingTarget.get().weight()),
+ endpoint.routingMethod(),
+ authMethodsByDnsName(EndpointList.of(endpoint))));
+ }
+ });
+ return containerEndpoints;
+ }
+
+ private static Map<String, AuthMethod> authMethodsByDnsName(EndpointList endpoints) {
+ return endpoints.asList().stream().collect(Collectors.toMap(Endpoint::dnsName, Endpoint::authMethod));
+ }
+
+ private static String asString(Endpoint.Scope scope) {
+ return switch (scope) {
+ case application -> "application";
+ case global -> "global";
+ case weighted -> "weighted";
+ case zone -> "zone";
+ };
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
index c8c3d057ee3..eb881519589 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicies.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.controller.routing;
import ai.vespa.http.DomainName;
import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.zone.AuthMethod;
import com.yahoo.config.provision.zone.RoutingMethod;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.transaction.Mutex;
@@ -43,6 +44,7 @@ import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
+import java.util.function.Function;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -107,7 +109,7 @@ public class RoutingPolicies {
* Refresh routing policies for instance in given zone. This is idempotent and changes will only be performed if
* routing configuration affecting given deployment has changed.
*/
- public void refresh(DeploymentId deployment, DeploymentSpec deploymentSpec, List<GeneratedEndpoint> generatedEndpoints) {
+ public void refresh(DeploymentId deployment, DeploymentSpec deploymentSpec, GeneratedEndpoints generatedEndpoints) {
ApplicationId instance = deployment.applicationId();
List<LoadBalancer> loadBalancers = controller.serviceRegistry().configServer()
.getLoadBalancers(instance, deployment.zoneId());
@@ -243,14 +245,25 @@ public class RoutingPolicies {
for (var policy : policies) {
if (policy.dnsZone().isEmpty() && policy.canonicalName().isPresent()) continue;
if (controller.zoneRegistry().routingMethod(policy.id().zone()) != RoutingMethod.exclusive) continue;
- Endpoint endpoint = policy.regionEndpointIn(controller.system(), RoutingMethod.exclusive, parent.generated());
var zonePolicy = db.readZoneRoutingPolicy(policy.id().zone());
- long weight = 1;
- if (isConfiguredOut(zonePolicy, policy)) {
- weight = 0; // A record with 0 weight will not receive traffic. If all records within a group have 0
- // weight, traffic is routed to all records with equal probability.
+ // A record with 0 weight will not receive traffic. If all records within a group have 0
+ // weight, traffic is routed to all records with equal probability
+ long weight = isConfiguredOut(zonePolicy, policy) ? 0 : 1;
+ boolean generated = parent.generated().isPresent();
+ EndpointList weightedEndpoints = controller.routing()
+ .endpointsOf(policy.id().deployment(),
+ policy.id().cluster(),
+ parent.generated().stream().toList())
+ .scope(Endpoint.Scope.weighted);
+ if (generated) {
+ weightedEndpoints = weightedEndpoints.generated();
+ } else {
+ weightedEndpoints = weightedEndpoints.not().generated();
}
-
+ if (weightedEndpoints.size() != 1) {
+ throw new IllegalStateException("Expected to compute exactly one region endpoint for " + policy.id() + " with parent " + parent);
+ }
+ Endpoint endpoint = weightedEndpoints.first().get();
RegionEndpoint regionEndpoint = endpoints.computeIfAbsent(endpoint, (k) -> new RegionEndpoint(
new LatencyAliasTarget(DomainName.of(endpoint.dnsName()), policy.dnsZone().get(), policy.id().zone())));
@@ -282,7 +295,7 @@ public class RoutingPolicies {
Map<Endpoint, Set<Target>> inactiveTargetsByEndpoint = new LinkedHashMap<>();
for (Map.Entry<RoutingId, List<RoutingPolicy>> routeEntry : routingTable.entrySet()) {
RoutingId routingId = routeEntry.getKey();
- EndpointList endpoints = controller.routing().declaredEndpointsOf(application)
+ EndpointList endpoints = controller.routing().readDeclaredEndpointsOf(application)
.named(routingId.endpointId(), Endpoint.Scope.application);
for (Endpoint endpoint : endpoints) {
for (var policy : routeEntry.getValue()) {
@@ -355,22 +368,23 @@ public class RoutingPolicies {
*
* @return the updated policies
*/
- private RoutingPolicyList storePoliciesOf(LoadBalancerAllocation allocation, RoutingPolicyList applicationPolicies, List<GeneratedEndpoint> generatedEndpoints, @SuppressWarnings("unused") Mutex lock) {
+ private RoutingPolicyList storePoliciesOf(LoadBalancerAllocation allocation, RoutingPolicyList applicationPolicies, GeneratedEndpoints generatedEndpoints, @SuppressWarnings("unused") Mutex lock) {
Map<RoutingPolicyId, RoutingPolicy> policies = new LinkedHashMap<>(applicationPolicies.instance(allocation.deployment.applicationId()).asMap());
for (LoadBalancer loadBalancer : allocation.loadBalancers) {
if (loadBalancer.hostname().isEmpty() && loadBalancer.ipAddress().isEmpty()) continue;
var policyId = new RoutingPolicyId(loadBalancer.application(), loadBalancer.cluster(), allocation.deployment.zoneId());
var existingPolicy = policies.get(policyId);
var dnsZone = loadBalancer.ipAddress().isPresent() ? Optional.of("ignored") : loadBalancer.dnsZone();
+ var clusterGeneratedEndpoints = generatedEndpoints.cluster(loadBalancer.cluster());
var newPolicy = new RoutingPolicy(policyId, loadBalancer.hostname(), loadBalancer.ipAddress(), dnsZone,
allocation.instanceEndpointsOf(loadBalancer),
allocation.applicationEndpointsOf(loadBalancer),
RoutingStatus.DEFAULT,
loadBalancer.isPublic(),
- generatedEndpoints);
- boolean addingGeneratedEndpoints = !generatedEndpoints.isEmpty() && (existingPolicy == null || existingPolicy.generatedEndpoints().isEmpty());
+ clusterGeneratedEndpoints);
+ boolean addingGeneratedEndpoints = !clusterGeneratedEndpoints.isEmpty() && (existingPolicy == null || existingPolicy.generatedEndpoints().isEmpty());
if (addingGeneratedEndpoints) {
- generatedEndpoints.forEach(ge -> requireNonClashing(ge, applicationPolicies));
+ clusterGeneratedEndpoints.forEach(ge -> requireNonClashing(ge, applicationPolicies));
}
if (existingPolicy != null) {
newPolicy = newPolicy.with(existingPolicy.routingStatus()); // Always preserve routing status
@@ -386,11 +400,17 @@ public class RoutingPolicies {
return updated;
}
+ private static Map<AuthMethod, GeneratedEndpoint> asMap(List<GeneratedEndpoint> generatedEndpoints) {
+ return generatedEndpoints.stream().collect(Collectors.toMap(GeneratedEndpoint::authMethod, Function.identity()));
+ }
+
/** Update zone DNS record for given policy */
private void updateZoneDnsOf(RoutingPolicy policy, LoadBalancer loadBalancer, DeploymentId deploymentId) {
- RoutingMethod routingMethod = controller.zoneRegistry().routingMethod(deploymentId.zoneId());
- boolean addTokenEndpoint = controller.routing().tokenEndpointEnabled(deploymentId.applicationId());
- for (var endpoint : policy.zoneEndpointsIn(controller.system(), routingMethod, addTokenEndpoint)) {
+ EndpointList zoneEndpoints = controller.routing().endpointsOf(deploymentId,
+ policy.id().cluster(),
+ policy.generatedEndpoints())
+ .scope(Endpoint.Scope.zone);
+ for (var endpoint : zoneEndpoints) {
RecordName name = RecordName.from(endpoint.dnsName());
Record record = policy.canonicalName().isPresent() ?
new Record(Record.Type.CNAME, name, RecordData.fqdn(policy.canonicalName().get().value())) :
@@ -464,14 +484,16 @@ public class RoutingPolicies {
* @return the updated policies
*/
private RoutingPolicyList removePoliciesUnreferencedBy(LoadBalancerAllocation allocation, RoutingPolicyList instancePolicies, @SuppressWarnings("unused") Mutex lock) {
- RoutingMethod routingMethod = controller.zoneRegistry().routingMethod(allocation.deployment.zoneId());
- boolean addTokenEndpoint = controller.routing().tokenEndpointEnabled(allocation.deployment.applicationId());
Map<RoutingPolicyId, RoutingPolicy> newPolicies = new LinkedHashMap<>(instancePolicies.asMap());
Set<RoutingPolicyId> activeIds = allocation.asPolicyIds();
RoutingPolicyList removable = instancePolicies.deployment(allocation.deployment)
.not().matching(policy -> activeIds.contains(policy.id()));
for (var policy : removable) {
- for (var endpoint : policy.zoneEndpointsIn(controller.system(), routingMethod, addTokenEndpoint)) {
+ EndpointList zoneEndpoints = controller.routing().endpointsOf(allocation.deployment,
+ policy.id().cluster(),
+ policy.generatedEndpoints())
+ .scope(Endpoint.Scope.zone);
+ for (var endpoint : zoneEndpoints) {
Record.Type type = policy.canonicalName().isPresent() ? Record.Type.CNAME : Record.Type.A;
nameServiceForwarder(endpoint).removeRecords(type,
RecordName.from(endpoint.dnsName()),
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java
index 0233e7502ef..2363524e306 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/RoutingPolicy.java
@@ -3,15 +3,10 @@ package com.yahoo.vespa.hosted.controller.routing;
import ai.vespa.http.DomainName;
import com.google.common.collect.ImmutableSortedSet;
-import com.yahoo.config.provision.SystemName;
-import com.yahoo.config.provision.zone.RoutingMethod;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
-import com.yahoo.vespa.hosted.controller.application.Endpoint;
-import com.yahoo.vespa.hosted.controller.application.Endpoint.Port;
import com.yahoo.vespa.hosted.controller.application.EndpointId;
import com.yahoo.vespa.hosted.controller.application.GeneratedEndpoint;
-import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
@@ -116,36 +111,6 @@ public record RoutingPolicy(RoutingPolicyId id,
return new RoutingPolicy(id, canonicalName, ipAddress, dnsZone, instanceEndpoints, applicationEndpoints, routingStatus, isPublic, generatedEndpoints);
}
- /** Returns the zone endpoints of this */
- public List<Endpoint> zoneEndpointsIn(SystemName system, RoutingMethod routingMethod, boolean includeTokenEndpoint) {
- DeploymentId deployment = new DeploymentId(id.owner(), id.zone());
- Endpoint.EndpointBuilder builder = endpoint(routingMethod).target(id.cluster(), deployment);
- Endpoint zoneEndpoint = builder.in(system);
- List<Endpoint> endpoints = new ArrayList<>();
- endpoints.add(zoneEndpoint);
- if (includeTokenEndpoint) {
- Endpoint tokenEndpoint = builder.authMethod(Endpoint.AuthMethod.token).in(system);
- endpoints.add(tokenEndpoint);
- }
- for (var generatedEndpoint : generatedEndpoints) {
- boolean include = switch (generatedEndpoint.authMethod()) {
- case token -> includeTokenEndpoint;
- case mtls -> true;
- };
- if (include) {
- endpoints.add(builder.generatedFrom(generatedEndpoint).in(system));
- }
- }
- return endpoints;
- }
-
- /** Returns the region endpoint of this */
- public Endpoint regionEndpointIn(SystemName system, RoutingMethod routingMethod, Optional<GeneratedEndpoint> generated) {
- Endpoint.EndpointBuilder builder = endpoint(routingMethod).targetRegion(id.cluster(), id.zone());
- generated.ifPresent(builder::generatedFrom);
- return builder.in(system);
- }
-
@Override
public boolean equals(Object o) {
if (this == o) return true;
@@ -159,10 +124,4 @@ public record RoutingPolicy(RoutingPolicyId id,
return Objects.hash(id);
}
- private Endpoint.EndpointBuilder endpoint(RoutingMethod routingMethod) {
- return Endpoint.of(id.owner())
- .on(Port.fromRoutingMethod(routingMethod))
- .routingMethod(routingMethod);
- }
-
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java
index 2e11a156dce..64a969a9c9d 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/routing/context/DeploymentRoutingContext.java
@@ -8,10 +8,12 @@ import com.yahoo.vespa.hosted.controller.LockedApplication;
import com.yahoo.vespa.hosted.controller.RoutingController;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.EndpointStatus;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
+import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificate;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServer;
-import com.yahoo.vespa.hosted.controller.api.integration.configserver.ContainerEndpoint;
import com.yahoo.vespa.hosted.controller.application.Endpoint;
-import com.yahoo.vespa.hosted.controller.application.GeneratedEndpoint;
+import com.yahoo.vespa.hosted.controller.application.pkg.BasicServicesXml;
+import com.yahoo.vespa.hosted.controller.routing.GeneratedEndpoints;
+import com.yahoo.vespa.hosted.controller.routing.PreparedEndpoints;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicy;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicyId;
import com.yahoo.vespa.hosted.controller.routing.RoutingStatus;
@@ -20,22 +22,21 @@ import java.time.Clock;
import java.util.List;
import java.util.Objects;
import java.util.Optional;
-import java.util.Set;
/**
- * A deployment routing context, which extends {@link RoutingContext} to support routing configuration of a deployment.
+ * A deployment routing context. This extends {@link RoutingContext} to support configuration of routing for a deployment.
*
* @author mpolden
*/
public abstract class DeploymentRoutingContext implements RoutingContext {
final DeploymentId deployment;
- final RoutingController controller;
+ final RoutingController routing;
final RoutingMethod method;
- public DeploymentRoutingContext(DeploymentId deployment, RoutingMethod method, RoutingController controller) {
+ public DeploymentRoutingContext(DeploymentId deployment, RoutingMethod method, RoutingController routing) {
this.deployment = Objects.requireNonNull(deployment);
- this.controller = Objects.requireNonNull(controller);
+ this.routing = Objects.requireNonNull(routing);
this.method = Objects.requireNonNull(method);
}
@@ -44,13 +45,13 @@ public abstract class DeploymentRoutingContext implements RoutingContext {
*
* @return the container endpoints relevant for this deployment, as declared in deployment spec
*/
- public final Set<ContainerEndpoint> prepare(LockedApplication application) {
- return controller.containerEndpointsOf(application, deployment.applicationId().instance(), deployment.zoneId());
+ public final PreparedEndpoints prepare(BasicServicesXml services, Optional<EndpointCertificate> certificate, LockedApplication application) {
+ return routing.prepare(deployment, services, certificate, application);
}
- /** Configure routing for the deployment in this context, using given deployment spec */
- public final void configure(DeploymentSpec deploymentSpec, List<GeneratedEndpoint> generatedEndpoints) {
- controller.policies().refresh(deployment, deploymentSpec, generatedEndpoints);
+ /** Finalize routing configuration for the deployment in this context, using given deployment spec */
+ public final void activate(DeploymentSpec deploymentSpec, GeneratedEndpoints generatedEndpoints) {
+ routing.policies().refresh(deployment, deploymentSpec, generatedEndpoints);
}
/** Routing method of this context */
@@ -61,7 +62,7 @@ public abstract class DeploymentRoutingContext implements RoutingContext {
/** Read the routing policy for given cluster in this deployment */
public final Optional<RoutingPolicy> routingPolicy(ClusterSpec.Id cluster) {
RoutingPolicyId id = new RoutingPolicyId(deployment.applicationId(), cluster, deployment.zoneId());
- return controller.policies().read(deployment).of(id);
+ return routing.policies().read(deployment).of(id);
}
/** Extension of a {@link DeploymentRoutingContext} for deployments using {@link RoutingMethod#sharedLayer4} routing */
@@ -110,13 +111,13 @@ public abstract class DeploymentRoutingContext implements RoutingContext {
}
private List<String> upstreamNames() {
- List<String> upstreamNames = controller.readEndpointsOf(deployment)
- .scope(Endpoint.Scope.zone)
- .shared()
- .asList().stream()
- .map(endpoint -> endpoint.upstreamName(deployment))
- .distinct()
- .toList();
+ List<String> upstreamNames = routing.readEndpointsOf(deployment)
+ .scope(Endpoint.Scope.zone)
+ .shared()
+ .asList().stream()
+ .map(endpoint -> endpoint.upstreamName(deployment))
+ .distinct()
+ .toList();
if (upstreamNames.isEmpty()) {
throw new IllegalArgumentException("No upstream names found for " + deployment);
}
@@ -137,17 +138,17 @@ public abstract class DeploymentRoutingContext implements RoutingContext {
@Override
public void setRoutingStatus(RoutingStatus.Value value, RoutingStatus.Agent agent) {
- controller.policies().setRoutingStatus(deployment, value, agent);
+ routing.policies().setRoutingStatus(deployment, value, agent);
}
@Override
public RoutingStatus routingStatus() {
// Status for a deployment applies to all clusters within the deployment, so we use the status from the
// first matching policy here
- return controller.policies().read(deployment)
- .first()
- .map(RoutingPolicy::routingStatus)
- .orElse(RoutingStatus.DEFAULT);
+ return routing.policies().read(deployment)
+ .first()
+ .map(RoutingPolicy::routingStatus)
+ .orElse(RoutingStatus.DEFAULT);
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java
index 9fb87735b42..a93c0dfda76 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java
@@ -185,7 +185,7 @@ public record VersionStatus(List<VespaVersion> versions, int currentMajor) {
for (var application : SystemApplication.notController()) {
var nodes = controller.serviceRegistry().configServer().nodeRepository()
.list(zone.getId(), NodeFilter.all().applications(application.id())).stream()
- .filter(SystemUpgrader::eligibleForUpgrade)
+ .filter(node -> SystemUpgrader.eligibleForUpgrade(node, zone))
.toList();
if (nodes.isEmpty()) continue;
boolean configConverged = application.configConvergedIn(zone.getId(), controller, Optional.empty());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
index c46a28c4567..1ac811f0b4f 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
@@ -16,11 +16,13 @@ import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.TenantName;
+import com.yahoo.config.provision.zone.AuthMethod;
import com.yahoo.config.provision.zone.RoutingMethod;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.path.Path;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeploymentData;
+import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeploymentEndpoints;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanRegistryMock;
import com.yahoo.vespa.hosted.controller.api.integration.billing.Quota;
@@ -332,7 +334,8 @@ public class ControllerTest {
List.of("beta.app1.tenant1.global.vespa.oath.cloud",
"rotation-id-01"),
OptionalInt.empty(),
- RoutingMethod.sharedLayer4));
+ RoutingMethod.sharedLayer4,
+ Map.of("beta.app1.tenant1.global.vespa.oath.cloud", AuthMethod.mtls)));
for (Deployment deployment : betaDeployments) {
assertEquals(containerEndpoints,
@@ -350,7 +353,8 @@ public class ControllerTest {
List.of("app1.tenant1.global.vespa.oath.cloud",
"rotation-id-02"),
OptionalInt.empty(),
- RoutingMethod.sharedLayer4));
+ RoutingMethod.sharedLayer4,
+ Map.of("app1.tenant1.global.vespa.oath.cloud", AuthMethod.mtls)));
for (Deployment deployment : defaultDeployments) {
assertEquals(containerEndpoints,
tester.configServer().containerEndpoints().get(defaultContext.deploymentIdIn(deployment.zone())));
@@ -740,10 +744,14 @@ public class ControllerTest {
);
deploymentEndpoints.forEach((deployment, endpoints) -> {
Set<ContainerEndpoint> expected = endpoints.entrySet().stream()
- .map(kv -> new ContainerEndpoint("default", "application",
+ .map(kv -> {
+ Map<String, AuthMethod> authMethods = kv.getKey().stream().collect(Collectors.toMap(Function.identity(), (v) -> AuthMethod.mtls));
+ return new ContainerEndpoint("default", "application",
kv.getKey(),
OptionalInt.of(kv.getValue()),
- tester.controller().zoneRegistry().routingMethod(deployment.zoneId())))
+ tester.controller().zoneRegistry().routingMethod(deployment.zoneId()),
+ authMethods);
+ })
.collect(Collectors.toSet());
assertEquals(expected,
tester.configServer().containerEndpoints().get(deployment),
@@ -790,7 +798,7 @@ public class ControllerTest {
RecordName.from("e.app1.tenant1.a.vespa.oath.cloud"),
RecordData.from("vip.prod.us-east-3.")))),
new TreeSet<>(records));
- List<String> endpointDnsNames = tester.controller().routing().declaredEndpointsOf(context.application())
+ List<String> endpointDnsNames = tester.controller().routing().readDeclaredEndpointsOf(context.application())
.scope(Endpoint.Scope.application)
.sortedBy(comparing(Endpoint::dnsName))
.mapToList(Endpoint::dnsName);
@@ -1536,8 +1544,8 @@ public class ControllerTest {
DeploymentContext context = tester.newDeploymentContext();
DeploymentId deployment = context.deploymentIdIn(ZoneId.from("prod", "us-west-1"));
DeploymentData deploymentData = new DeploymentData(deployment.applicationId(), deployment.zoneId(), InputStream::nullInputStream, Version.fromString("6.1"),
- Set.of(), Optional::empty, Optional.empty(), Optional.empty(),
- Quota::unlimited, List.of(), List.of(), Optional::empty, List.of(),false);
+ () -> DeploymentEndpoints.none, Optional.empty(), Optional.empty(),
+ Quota::unlimited, List.of(), List.of(), Optional::empty, List.of(), false);
tester.configServer().deploy(deploymentData);
assertTrue(tester.configServer().application(deployment.applicationId(), deployment.zoneId()).isPresent());
tester.controller().applications().deactivate(deployment.applicationId(), deployment.zoneId());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/DeploymentQuotaCalculatorTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/DeploymentQuotaCalculatorTest.java
index cb9c1c2fa13..f2897c14ffe 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/DeploymentQuotaCalculatorTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/DeploymentQuotaCalculatorTest.java
@@ -63,7 +63,7 @@ public class DeploymentQuotaCalculatorTest {
void quota_is_divided_among_prod_and_manual_instances() {
var existing_dev_deployment = new Application(TenantAndApplicationId.from(ApplicationId.defaultId()), Instant.EPOCH, DeploymentSpec.empty, ValidationOverrides.empty, Optional.empty(),
- Optional.empty(), Optional.empty(), OptionalInt.empty(), new ApplicationMetrics(1, 1), Set.of(), OptionalLong.empty(), RevisionHistory.empty(),
+ Optional.empty(), Optional.empty(), Optional.empty(), OptionalInt.empty(), new ApplicationMetrics(1, 1), Set.of(), OptionalLong.empty(), RevisionHistory.empty(),
List.of(new Instance(ApplicationId.defaultId()).withNewDeployment(ZoneId.from(Environment.dev, RegionName.defaultName()),
RevisionId.forProduction(1), Version.emptyVersion, Instant.EPOCH, Map.of(), QuotaUsage.create(0.53d), CloudAccount.empty)));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java
index 477aca86b9c..cc7a001b0b4 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/EndpointTest.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.controller.application;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.zone.AuthMethod;
import com.yahoo.config.provision.zone.RoutingMethod;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
@@ -269,7 +270,7 @@ public class EndpointTest {
Endpoint.of(instance1)
.targetRegion(ClusterSpec.Id.from("c1"), prodZone)
.routingMethod(RoutingMethod.exclusive)
- .generatedFrom(new GeneratedEndpoint("deadbeef", "cafed00d", Endpoint.AuthMethod.mtls))
+ .generatedFrom(new GeneratedEndpoint("deadbeef", "cafed00d", AuthMethod.mtls))
.on(Port.tls())
.in(SystemName.Public)
);
@@ -353,7 +354,7 @@ public class EndpointTest {
@Test
public void generated_id() {
- GeneratedEndpoint ge = new GeneratedEndpoint("cafed00d", "deadbeef", Endpoint.AuthMethod.mtls);
+ GeneratedEndpoint ge = new GeneratedEndpoint("cafed00d", "deadbeef", AuthMethod.mtls);
var deployment = new DeploymentId(instance1, ZoneId.from("prod", "us-north-1"));
var tests = Map.of(
// Zone endpoint in main, unlike named endpoints, this includes the scope symbol 'z'
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/BasicServicesXmlTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/BasicServicesXmlTest.java
new file mode 100644
index 00000000000..7d377ef6361
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/BasicServicesXmlTest.java
@@ -0,0 +1,54 @@
+package com.yahoo.vespa.hosted.controller.application.pkg;
+
+import com.yahoo.text.XML;
+import com.yahoo.vespa.hosted.controller.application.pkg.BasicServicesXml.Container;
+import org.junit.jupiter.api.Test;
+
+import java.util.List;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * @author mpolden
+ */
+class BasicServicesXmlTest {
+
+ @Test
+ public void parse() {
+ assertServices(new BasicServicesXml(List.of()), "<services/>");
+ assertServices(new BasicServicesXml(List.of(new Container("foo", List.of(Container.AuthMethod.mtls)),
+ new Container("bar", List.of(Container.AuthMethod.mtls)))),
+ """
+ <services>
+ <container id="foo"/>
+ <container id="bar"/>
+ </services>
+ """);
+ assertServices(new BasicServicesXml(List.of(
+ new Container("foo",
+ List.of(Container.AuthMethod.mtls,
+ Container.AuthMethod.token)),
+ new Container("bar", List.of(Container.AuthMethod.mtls)))),
+ """
+ <services>
+ <container id="foo">
+ <clients>
+ <client id="mtls"/>
+ <client id="token">
+ <token id="my-token"/>
+ </client>
+ <client id="token2">
+ <token id="other-token"/>
+ </client>
+ </clients>
+ </container>
+ <container id="bar"/>
+ </services>
+ """);
+ }
+
+ private void assertServices(BasicServicesXml expected, String xmlForm) {
+ assertEquals(expected, BasicServicesXml.parse(XML.getDocument(xmlForm)));
+ }
+
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackageTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackageTest.java
index f529d81bf32..4c61fe7c77d 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackageTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackageTest.java
@@ -148,8 +148,8 @@ public class TestPackageTest {
"components/foo-tests.jar",
"artifacts/key"),
bundlePackage.keySet());
- assertEquals(Map.of(),
- unzip(bundleTests.asApplicationPackage().truncatedPackage().zippedContent()));
+ assertEquals(Set.of("deployment.xml", "services.xml"),
+ unzip(bundleTests.asApplicationPackage().truncatedPackage().zippedContent()).keySet());
}
@Test
@@ -221,10 +221,10 @@ public class TestPackageTest {
</deployment>
""");
- NodeResources firstResources = TestPackage.testerResourcesFor(ZoneId.from("prod", "gcp-us-west-1"), spec.requireInstance("first"));
- assertEquals(TestPackage.DEFAULT_TESTER_RESOURCES_CLOUD, firstResources);
+ NodeResources firstResources = TestPackage.testerResourcesFor(ZoneId.from("prod", "gcp-us-west-1"), spec.requireInstance("first"), true);
+ assertEquals(TestPackage.DEFAULT_TESTER_RESOURCES_CLOUD.with(NodeResources.Architecture.x86_64), firstResources);
- NodeResources secondResources = TestPackage.testerResourcesFor(ZoneId.from("prod", "us-west-1"), spec.requireInstance("second"));
+ NodeResources secondResources = TestPackage.testerResourcesFor(ZoneId.from("prod", "us-west-1"), spec.requireInstance("second"), false);
assertEquals(6, secondResources.vcpu(), 1e-9);
assertEquals(16, secondResources.memoryGb(), 1e-9);
assertEquals(100, secondResources.diskGb(), 1e-9);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
index fb3026e1d80..f417e3d52fb 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
@@ -8,6 +8,7 @@ import com.yahoo.config.provision.AthenzService;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.zone.AuthMethod;
import com.yahoo.security.SignatureAlgorithm;
import com.yahoo.security.X509CertificateBuilder;
import com.yahoo.security.X509CertificateUtils;
@@ -56,6 +57,7 @@ public class ApplicationPackageBuilder {
"/>\n</notifications>\n").setEmptyValue("");
private final StringBuilder endpointsBody = new StringBuilder();
private final StringBuilder applicationEndpointsBody = new StringBuilder();
+ private final StringBuilder servicesBody = new StringBuilder();
private final List<X509Certificate> trustedCertificates = new ArrayList<>();
private final Map<Environment, Map<String, String>> nonProductionEnvironments = new LinkedHashMap<>();
@@ -112,6 +114,28 @@ public class ApplicationPackageBuilder {
return this;
}
+ public ApplicationPackageBuilder container(String id, AuthMethod... authMethod) {
+ servicesBody.append(" <container id='")
+ .append(id)
+ .append("'>\n")
+ .append(" <clients>\n");
+ for (int i = 0; i < authMethod.length; i++) {
+ AuthMethod m = authMethod[i];
+ servicesBody.append(" <client id='")
+ .append("client-").append(m.name()).append("-").append(i)
+ .append("'>\n");
+ if (m == AuthMethod.token) {
+ servicesBody.append(" <token id='")
+ .append(m.name()).append("-").append(i)
+ .append("'/>\n");
+ }
+ servicesBody.append(" </client>\n");
+ }
+ servicesBody.append(" </clients>\n")
+ .append(" </container>\n");
+ return this;
+ }
+
public ApplicationPackageBuilder applicationEndpoint(String id, String containerId, String region,
Map<InstanceName, Integer> instanceWeights) {
return applicationEndpoint(id, containerId, Map.of(region, instanceWeights));
@@ -350,6 +374,10 @@ public class ApplicationPackageBuilder {
return searchDefinition.getBytes(UTF_8);
}
+ private byte[] services() {
+ return ("<services version='1.0'>\n" + servicesBody + "</services>\n").getBytes(UTF_8);
+ }
+
private static byte[] buildMeta(Version compileVersion) {
return compileVersion == null ? new byte[0]
: ("{\"compileVersion\":\"" + compileVersion.toFullString() +
@@ -362,6 +390,7 @@ public class ApplicationPackageBuilder {
try (ZipOutputStream out = new ZipOutputStream(zip)) {
out.setLevel(Deflater.NO_COMPRESSION); // This is for testing purposes so we skip compression for performance
writeZipEntry(out, "deployment.xml", deploymentSpec());
+ writeZipEntry(out, "services.xml", services());
writeZipEntry(out, "validation-overrides.xml", validationOverrides());
writeZipEntry(out, "schemas/test.sd", searchDefinition());
writeZipEntry(out, "build-meta.json", buildMeta(compileVersion));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
index 3fb4a040e0d..62bdf95515d 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
@@ -29,6 +29,7 @@ import com.yahoo.vespa.hosted.controller.integration.ZoneRegistryMock;
import com.yahoo.vespa.hosted.controller.maintenance.DeploymentUpgrader;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion.Confidence;
+import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import java.time.Duration;
@@ -2912,6 +2913,7 @@ public class DeploymentTriggerTest {
}
@Test
+ @Disabled // For benchmarking, not a test
void miniBenchmark() {
String spec = """
<deployment version="1.0">
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
index 0862496275a..ed5226ebc8b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
@@ -400,6 +400,7 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
catch (IOException e) {
throw new UncheckedIOException(e);
}
+ deployment.cloudAccount(); // Supplier with side effects >_<
lastPrepareVersion = deployment.platform();
if (prepareException != null)
prepareException.accept(ApplicationId.from(deployment.instance().tenant(),
@@ -409,12 +410,12 @@ public class ConfigServerMock extends AbstractComponent implements ConfigServer
applications.put(id, new Application(id.applicationId(), lastPrepareVersion, appPackage));
ClusterSpec.Id cluster = ClusterSpec.Id.from("default");
- deployment.endpointCertificate(); // Supplier with side effects >_<
+ deployment.endpoints(); // Supplier with side effects >_<
if (nodeRepository().list(id.zoneId(), NodeFilter.all().applications(id.applicationId())).isEmpty())
provision(id.zoneId(), id.applicationId(), cluster);
- this.containerEndpoints.put(id, deployment.containerEndpoints());
+ this.containerEndpoints.put(id, deployment.endpoints().get().endpoints());
deployment.cloudAccount().ifPresent(account -> this.cloudAccounts.put(id, account));
if (!deferLoadBalancerProvisioning.contains(id.zoneId().environment())) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java
index 63d479d4c6c..dbb7f80df0e 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java
@@ -61,8 +61,8 @@ public class ZoneRegistryMock extends AbstractComponent implements ZoneRegistry
public ZoneRegistryMock(SystemName system) {
this.system = system;
if (system.isPublic()) {
- this.zones = List.of(ZoneApiMock.fromId("test.us-east-1"),
- ZoneApiMock.fromId("staging.us-east-3"),
+ this.zones = List.of(ZoneApiMock.newBuilder().withId("test.us-east-1").withCloud("aws").withCloudNativeAvailabilityZone("use1-az4").build(),
+ ZoneApiMock.newBuilder().withId("staging.us-east-3").withCloud("aws").withCloudNativeAvailabilityZone("use3-az1").build(),
ZoneApiMock.newBuilder().withId("prod.aws-us-east-1c").withCloud("aws").withCloudNativeAvailabilityZone("use1-az2").build(),
ZoneApiMock.newBuilder().withId("prod.aws-eu-west-1a").withCloud("aws").withCloudNativeAvailabilityZone("euw1-az3").build(),
ZoneApiMock.newBuilder().withId("dev.aws-us-east-1c").withCloud("aws").withCloudNativeAvailabilityZone("use1-az2").build());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmerTest.java
index b643d3e90d2..142210843ff 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ApplicationOwnershipConfirmerTest.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.vespa.hosted.controller.LockedTenant;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.AccountId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.ApplicationSummary;
import com.yahoo.vespa.hosted.controller.api.integration.organization.Contact;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
@@ -80,10 +81,10 @@ public class ApplicationOwnershipConfirmerTest {
assertEquals(issueId2, app.application().ownershipIssueId(), "A new confirmation issue id is stored when something is returned to the maintainer.");
- assertFalse(app.application().owner().isPresent(), "No owner is stored for application");
- issues.owner = Optional.of(User.from("username"));
+ assertFalse(app.application().issueOwner().isPresent(), "No owner is stored for application");
+ issues.owner = Optional.of(new AccountId("username"));
confirmer.maintain();
- assertEquals(app.application().owner().get().username(), "username", "Owner has been added to application");
+ assertEquals(app.application().issueOwner().get().value(), "username", "Owner has been added to application");
// The app deletes all production deployments — see that the issue is forgotten.
assertEquals(issueId2, app.application().ownershipIssueId(), "Confirmation issue for application is still open.");
@@ -103,10 +104,10 @@ public class ApplicationOwnershipConfirmerTest {
private Optional<IssueId> response;
private boolean escalated = false;
- private Optional<User> owner = Optional.empty();
+ private Optional<AccountId> owner = Optional.empty();
@Override
- public Optional<IssueId> confirmOwnership(Optional<IssueId> issueId, ApplicationSummary summary, User assignee, Contact contact) {
+ public Optional<IssueId> confirmOwnership(Optional<IssueId> issueId, ApplicationSummary summary, AccountId assigneeId, User assignee, Contact contact) {
return response;
}
@@ -116,7 +117,7 @@ public class ApplicationOwnershipConfirmerTest {
}
@Override
- public Optional<User> getConfirmedOwner(IssueId issueId) {
+ public Optional<AccountId> getConfirmedOwner(IssueId issueId) {
return owner;
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainerTest.java
index ca4ee5cd4d2..777594c487b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/CertificatePoolMaintainerTest.java
@@ -2,7 +2,7 @@
package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.jdisc.test.MockMetric;
-import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateProviderMock;
import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateRequest.DnsNameStatus;
@@ -23,7 +23,7 @@ public class CertificatePoolMaintainerTest {
@Test
void new_certs_are_requested_until_limit() {
- tester.flagSource().withIntFlag(Flags.CERT_POOL_SIZE.id(), 3);
+ tester.flagSource().withIntFlag(PermanentFlags.CERT_POOL_SIZE.id(), 3);
assertNumCerts(1);
assertNumCerts(2);
assertNumCerts(3);
@@ -32,7 +32,7 @@ public class CertificatePoolMaintainerTest {
@Test
void cert_contains_expected_names() {
- tester.flagSource().withIntFlag(Flags.CERT_POOL_SIZE.id(), 1);
+ tester.flagSource().withIntFlag(PermanentFlags.CERT_POOL_SIZE.id(), 1);
assertNumCerts(1);
EndpointCertificateProviderMock endpointCertificateProvider = (EndpointCertificateProviderMock) tester.controller().serviceRegistry().endpointCertificateProvider();
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainerTest.java
index 3f26b0c7b1f..422df76f1fb 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/EndpointCertificateMaintainerTest.java
@@ -6,7 +6,7 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.CloudAccount;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.jdisc.test.MockMetric;
-import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificate;
import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateProviderMock;
@@ -22,7 +22,6 @@ import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentContext;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.integration.SecretStoreMock;
-import org.jetbrains.annotations.NotNull;
import org.junit.jupiter.api.Test;
import java.time.Duration;
@@ -146,7 +145,6 @@ public class EndpointCertificateMaintainerTest {
Stream.of(makeDeploymentAtAge(2), oldestDeployment, makeDeploymentAtAge(4)).min(maintainer.oldestFirst).get());
}
- @NotNull
private EndpointCertificateMaintainer.EligibleJob makeDeploymentAtAge(int ageInDays) {
var deployment = new Deployment(ZoneId.defaultId(), CloudAccount.empty, RevisionId.forProduction(1), Version.emptyVersion,
Instant.now().minus(ageInDays, ChronoUnit.DAYS), DeploymentMetrics.none, DeploymentActivity.none, QuotaUsage.none, OptionalDouble.empty());
@@ -169,7 +167,7 @@ public class EndpointCertificateMaintainerTest {
void cert_pool_is_not_deleted() {
EndpointCertificateProviderMock endpointCertificateProvider = (EndpointCertificateProviderMock) tester.controller().serviceRegistry().endpointCertificateProvider();
- tester.flagSource().withIntFlag(Flags.CERT_POOL_SIZE.id(), 3);
+ tester.flagSource().withIntFlag(PermanentFlags.CERT_POOL_SIZE.id(), 3);
assertEquals(0.0, certificatePoolMaintainer.maintain(), 0.0000001);
assertEquals(0.0, maintainer.maintain(), 0.0000001);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java
index 178e8f18489..8fe2ac914ef 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsUpgradeSchedulerTest.java
@@ -148,7 +148,7 @@ public class OsUpgradeSchedulerTest {
}
@Test
- void schedule_stable_release() {
+ void schedule_latest_release() {
ControllerTester tester = new ControllerTester();
OsUpgradeScheduler scheduler = new OsUpgradeScheduler(tester.controller(), Duration.ofDays(1));
Instant t0 = Instant.parse("2021-06-22T00:42:12.00Z"); // Outside trigger period
@@ -161,7 +161,7 @@ public class OsUpgradeSchedulerTest {
// Stable release (tagged outside trigger period) is scheduled once trigger period opens
Version version1 = Version.fromString("8.1");
- tester.serviceRegistry().artifactRepository().addRelease(new OsRelease(version1, OsRelease.Tag.stable,
+ tester.serviceRegistry().artifactRepository().addRelease(new OsRelease(version1, OsRelease.Tag.latest,
Instant.parse("2021-06-21T23:59:00.00Z")));
scheduleUpgradeAfter(Duration.ZERO, version0, scheduler, tester);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsVersionStatusUpdaterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsVersionStatusUpdaterTest.java
index af535abce26..6f4052bf0ef 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsVersionStatusUpdaterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsVersionStatusUpdaterTest.java
@@ -68,12 +68,14 @@ public class OsVersionStatusUpdaterTest {
.filter(osVersion -> !osVersion.version().isEmpty())
.collect(Collectors.toSet());
List<OsVersion> versionsToCertify = new ArrayList<>(knownVersions);
- versionsToCertify.addAll(List.of(new OsVersion(Version.fromString("95.0.1"), cloud),
- new OsVersion(Version.fromString("98.0.2"), cloud)));
+ OsVersion futureVersion = new OsVersion(Version.fromString("98.0.2"), cloud); // Keep future version
+ versionsToCertify.addAll(List.of(new OsVersion(Version.fromString("3.11"), cloud),
+ futureVersion));
for (OsVersion version : versionsToCertify) {
tester.controller().os().certify(version.version(), version.cloud(), Version.fromString("1.2.3"));
}
- assertEquals(knownVersions.size() + 2, certifiedOsVersions(tester).size());
+ knownVersions.add(futureVersion);
+ assertEquals(knownVersions.size() + 1, certifiedOsVersions(tester).size());
statusUpdater.maintain();
assertEquals(knownVersions, certifiedOsVersions(tester));
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java
index 8196aa48197..fac05fc125f 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java
@@ -81,7 +81,7 @@ public class ResourceMeterMaintainerTest {
assertEquals(1.72,
(Double) metrics.getMetric(context ->
z1.value().equals(context.get("zoneId")) &&
- app1.tenant().value().equals(context.get("tenant")),
+ app1.tenant().value().equals(context.get("tenantName")),
"metering.cost.hourly").get(),
Double.MIN_VALUE);
}
@@ -109,8 +109,8 @@ public class ResourceMeterMaintainerTest {
assertEquals(tester.clock().millis() / 1000, metrics.getMetric("metering_last_reported"));
assertEquals(2224.0d, (Double) metrics.getMetric("metering_total_reported"), Double.MIN_VALUE);
- assertEquals(24d, (Double) metrics.getMetric(context -> "tenant1".equals(context.get("tenant")), "metering.vcpu").get(), Double.MIN_VALUE);
- assertEquals(40d, (Double) metrics.getMetric(context -> "tenant2".equals(context.get("tenant")), "metering.vcpu").get(), Double.MIN_VALUE);
+ assertEquals(24d, (Double) metrics.getMetric(context -> "tenant1".equals(context.get("tenantName")), "metering.vcpu").get(), Double.MIN_VALUE);
+ assertEquals(40d, (Double) metrics.getMetric(context -> "tenant2".equals(context.get("tenantName")), "metering.vcpu").get(), Double.MIN_VALUE);
// Metering is not refreshed
assertFalse(resourceClient.hasRefreshedMaterializedView());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgraderTest.java
index a7360f3d2d8..a3595ca587c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgraderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgraderTest.java
@@ -16,7 +16,6 @@ import org.junit.jupiter.api.Test;
import java.time.Duration;
import java.util.List;
import java.util.function.Function;
-import java.util.stream.Collectors;
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -324,9 +323,9 @@ public class SystemUpgraderTest {
for (var zone : List.of(zone1, zone2)) {
systemUpgrader.maintain();
completeUpgrade(List.of(SystemApplication.tenantHost,
- SystemApplication.proxyHost,
- SystemApplication.configServerHost),
- version2, zone);
+ SystemApplication.proxyHost,
+ SystemApplication.configServerHost),
+ version2, zone);
completeUpgrade(SystemApplication.configServer, version2, zone);
systemUpgrader.maintain();
completeUpgrade(SystemApplication.proxy, version2, zone);
@@ -341,12 +340,12 @@ public class SystemUpgraderTest {
for (var zone : List.of(zone2, zone1)) {
systemUpgrader.maintain();
completeUpgrade(List.of(SystemApplication.tenantHost,
- SystemApplication.configServerHost,
- SystemApplication.proxy),
- version1, zone);
+ SystemApplication.configServerHost,
+ SystemApplication.proxyHost,
+ SystemApplication.proxy),
+ version1, zone);
convergeServices(SystemApplication.proxy, zone);
- List<SystemApplication> lastToDowngrade = List.of(SystemApplication.configServer,
- SystemApplication.proxyHost);
+ List<SystemApplication> lastToDowngrade = List.of(SystemApplication.configServer);
assertWantedVersion(lastToDowngrade, version2, zone);
// ... and then configserver and proxyhost
@@ -463,7 +462,7 @@ public class SystemUpgraderTest {
private List<Node> listNodes(ZoneApi zone, SystemApplication application) {
return nodeRepository().list(zone.getId(), NodeFilter.all().applications(application.id())).stream()
- .filter(SystemUpgrader::eligibleForUpgrade)
+ .filter(node -> SystemUpgrader.eligibleForUpgrade(node, zone))
.toList();
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
index f287bc52604..13f7ec2a4ec 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
@@ -17,6 +17,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationV
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RevisionId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.AccountId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
@@ -64,25 +65,30 @@ public class ApplicationSerializerTest {
private static final Path testData = Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/");
private static final ZoneId zone1 = ZoneId.from("prod", "us-west-1");
private static final ZoneId zone2 = ZoneId.from("prod", "us-east-3");
- private static final PublicKey publicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" +
- "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9\n" +
- "z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==\n" +
- "-----END PUBLIC KEY-----\n");
- private static final PublicKey otherPublicKey = KeyUtils.fromPemEncodedPublicKey("-----BEGIN PUBLIC KEY-----\n" +
- "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE\n" +
- "pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==\n" +
- "-----END PUBLIC KEY-----\n");
+ private static final PublicKey publicKey = KeyUtils.fromPemEncodedPublicKey("""
+ -----BEGIN PUBLIC KEY-----
+ MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuKVFA8dXk43kVfYKzkUqhEY2rDT9
+ z/4jKSTHwbYR8wdsOSrJGVEUPbS2nguIJ64OJH7gFnxM6sxUVj+Nm2HlXw==
+ -----END PUBLIC KEY-----
+ """);
+ private static final PublicKey otherPublicKey = KeyUtils.fromPemEncodedPublicKey("""
+ -----BEGIN PUBLIC KEY-----
+ MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEFELzPyinTfQ/sZnTmRp5E4Ve/sbE
+ pDhJeqczkyFcT2PysJ5sZwm7rKPEeXDOhzTPCyRvbUqc2SGdWbKUGGa/Yw==
+ -----END PUBLIC KEY-----
+ """);
@Test
void testSerialization() throws Exception {
- DeploymentSpec deploymentSpec = DeploymentSpec.fromXml("<deployment version='1.0'>\n" +
- " <staging/>\n" +
- " <instance id=\"i1\">\n" +
- " <prod>\n" +
- " <region>us-west-1</region>\n" +
- " </prod>\n" +
- " </instance>\n" +
- "</deployment>");
+ DeploymentSpec deploymentSpec = DeploymentSpec.fromXml("""
+ <deployment version='1.0'>
+ <staging/>
+ <instance id="i1">
+ <prod>
+ <region>us-west-1</region>
+ </prod>
+ </instance>
+ </deployment>""");
ValidationOverrides validationOverrides = ValidationOverrides.fromXml("<validation-overrides version='1.0'>" +
" <allow until='2017-06-15'>deployment-removal</allow>" +
"</validation-overrides>");
@@ -154,6 +160,7 @@ public class ApplicationSerializerTest {
Optional.of(IssueId.from("4321")),
Optional.of(IssueId.from("1234")),
Optional.of(User.from("by-username")),
+ Optional.of(new AccountId("foo8ar")),
OptionalInt.of(7),
new ApplicationMetrics(0.5, 0.9),
Set.of(publicKey, otherPublicKey),
@@ -206,7 +213,8 @@ public class ApplicationSerializerTest {
serialized.require(id1.instance()).jobPause(DeploymentContext.stagingTest));
assertEquals(original.ownershipIssueId(), serialized.ownershipIssueId());
- assertEquals(original.owner(), serialized.owner());
+ assertEquals(original.userOwner(), serialized.userOwner());
+ assertEquals(original.issueOwner(), serialized.issueOwner());
assertEquals(original.majorVersion(), serialized.majorVersion());
assertEquals(original.deployKeys(), serialized.deployKeys());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java
index f685c75bbe3..d9007910541 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RoutingPolicySerializerTest.java
@@ -4,8 +4,8 @@ package com.yahoo.vespa.hosted.controller.persistence;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.HostName;
+import com.yahoo.config.provision.zone.AuthMethod;
import com.yahoo.config.provision.zone.ZoneId;
-import com.yahoo.vespa.hosted.controller.application.Endpoint;
import com.yahoo.vespa.hosted.controller.application.EndpointId;
import com.yahoo.vespa.hosted.controller.application.GeneratedEndpoint;
import com.yahoo.vespa.hosted.controller.routing.RoutingPolicy;
@@ -47,7 +47,7 @@ public class RoutingPolicySerializerTest {
Set.of(),
RoutingStatus.DEFAULT,
false,
- List.of(new GeneratedEndpoint("deadbeef", "cafed00d", Endpoint.AuthMethod.mtls))),
+ List.of(new GeneratedEndpoint("deadbeef", "cafed00d", AuthMethod.mtls))),
new RoutingPolicy(id2,
Optional.of(HostName.of("long-and-ugly-name-2")),
Optional.empty(),
@@ -58,7 +58,7 @@ public class RoutingPolicySerializerTest {
RoutingStatus.Agent.tenant,
Instant.ofEpochSecond(123)),
true,
- List.of(new GeneratedEndpoint("cafed00d", "deadbeef", Endpoint.AuthMethod.token))),
+ List.of(new GeneratedEndpoint("cafed00d", "deadbeef", AuthMethod.token))),
new RoutingPolicy(id1,
Optional.empty(),
Optional.of("127.0.0.1"),
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java
index 3aac3e2f757..cae5037ab6f 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.controller.persistence;
import com.google.common.collect.ImmutableMap;
import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.CloudAccount;
import com.yahoo.security.X509CertificateUtils;
import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RevisionId;
@@ -99,6 +100,7 @@ public class RunSerializerTest {
"5MyyPSoCIBltOcmaPfdN03L3zqbqZ6PgUBWsvAHgiBzL3hrtJ+iy\n" +
"-----END CERTIFICATE-----"),
run.testerCertificate().get());
+ assertEquals(Optional.empty(), run.cloudAccount());
assertEquals(ImmutableMap.<Step, StepInfo>builder()
.put(deployInitialReal, new StepInfo(deployInitialReal, unfinished, Optional.empty()))
.put(installInitialReal, new StepInfo(installInitialReal, failed, Optional.of(Instant.ofEpochMilli(1196676940000L))))
@@ -118,10 +120,11 @@ public class RunSerializerTest {
run.steps());
run = run.with(1L << 50)
- .with(Instant.now().truncatedTo(MILLIS))
- .noNodesDownSince(Instant.now().truncatedTo(MILLIS))
- .aborted(false)
- .finished(Instant.now().truncatedTo(MILLIS));
+ .with(Instant.now().truncatedTo(MILLIS))
+ .noNodesDownSince(Instant.now().truncatedTo(MILLIS))
+ .aborted(false)
+ .with(CloudAccount.from("gcp:foobar"))
+ .finished(Instant.now().truncatedTo(MILLIS));
assertEquals(aborted, run.status());
assertTrue(run.hasEnded());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializerTest.java
index 8aa53fb5cd4..70be0734ed9 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/SupportAccessSerializerTest.java
@@ -10,7 +10,6 @@ import com.yahoo.slime.JsonFormat;
import com.yahoo.slime.Slime;
import com.yahoo.vespa.hosted.controller.support.access.SupportAccess;
import com.yahoo.vespa.hosted.controller.support.access.SupportAccessGrant;
-import org.intellij.lang.annotations.Language;
import org.junit.jupiter.api.Test;
import javax.security.auth.x500.X500Principal;
@@ -36,7 +35,6 @@ public class SupportAccessSerializerTest {
.withDisallowed("andreer", hour(22))
.withAllowedUntil(hour(36), "andreer", hour(30));
- @Language("JSON")
private final String expectedWithCertificates = "{\n"
+ " \"history\": [\n"
+ " {\n"
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java
index 915466dac26..4eb6e080737 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiCloudTest.java
@@ -27,7 +27,6 @@ import com.yahoo.vespa.hosted.controller.security.Auth0Credentials;
import com.yahoo.vespa.hosted.controller.security.CloudTenantSpec;
import com.yahoo.vespa.hosted.controller.security.Credentials;
import com.yahoo.vespa.hosted.controller.tenant.CloudTenant;
-import org.assertj.core.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -35,7 +34,6 @@ import java.io.File;
import java.util.Collections;
import java.util.Optional;
import java.util.Set;
-import java.util.regex.Pattern;
import static com.yahoo.application.container.handler.Request.Method.DELETE;
import static com.yahoo.application.container.handler.Request.Method.GET;
@@ -458,7 +456,7 @@ public class ApplicationApiCloudTest extends ControllerContainerCloudTest {
request("/application/v4/tenant/scoober/application/unique/submit", POST)
.data(data)
.roles(Set.of(Role.developer(tenantName))),
- "{\"message\":\"application build 1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
+ "{\"message\":\"application build 1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\",\"build\":1}");
assertTrue(tester.controller().applications().getApplication(TenantAndApplicationId.from(tenantName, application)).isPresent());
}
@@ -471,13 +469,13 @@ public class ApplicationApiCloudTest extends ControllerContainerCloudTest {
String regexGenerateToken = "\\{\"id\":\"myTokenId\",\"token\":\"vespa_cloud_.*\",\"fingerprint\":\".*\"}";
tester.assertResponse(request("/application/v4/tenant/scoober/token/myTokenId", POST).roles(Role.developer(tenantName)),
- (response) -> Assertions.assertThat(new String(response.getBody(), UTF_8)).matches(Pattern.compile(regexGenerateToken)),
+ (response) -> assertTrue(new String(response.getBody(), UTF_8).matches(regexGenerateToken)),
200);
String regexListTokens = "\\{\"tokens\":\\[\\{\"id\":\"myTokenId\",\"versions\":\\[\\{\"fingerprint\":\".*\",\"created\":\".*\",\"author\":\"user@test\",\"expiration\":\".*\"}]}]}";
tester.assertResponse(request("/application/v4/tenant/scoober/token", GET)
.roles(Role.developer(tenantName)),
- (response) -> Assertions.assertThat(new String(response.getBody(), UTF_8)).matches(Pattern.compile(regexListTokens)),
+ (response) -> assertTrue(new String(response.getBody(), UTF_8).matches(regexListTokens)),
200);
// Rejects invalid tokenIds on create
@@ -567,7 +565,7 @@ public class ApplicationApiCloudTest extends ControllerContainerCloudTest {
"\"deployDirectly\":" + deployDirectly +
applicationVersion.map(version ->
"," +
- "\"buildNumber\":" + version.buildNumber().getAsLong() + "," +
+ "\"buildNumber\":" + version.buildNumber() + "," +
"\"sourceRevision\":{" +
"\"repository\":\"" + version.source().get().repository() + "\"," +
"\"branch\":\"" + version.source().get().branch() + "\"," +
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
index 98775ea214d..345f164f8da 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
@@ -44,6 +44,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServ
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
+import com.yahoo.vespa.hosted.controller.api.integration.organization.AccountId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.Contact;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.User;
@@ -336,7 +337,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 123)),
- "{\"message\":\"application build 1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
+ "{\"message\":\"application build 1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\",\"build\":1}");
app1.runJob(DeploymentContext.systemTest).runJob(DeploymentContext.stagingTest).runJob(DeploymentContext.productionUsCentral1);
@@ -365,7 +366,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
tester.assertResponse(request("/application/v4/tenant/tenant2/application/application2/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackage, 1000)),
- "{\"message\":\"application build 1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
+ "{\"message\":\"application build 1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\",\"build\":1}");
deploymentTester.triggerJobs();
@@ -873,7 +874,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(packageWithService, 123)),
- "{\"message\":\"application build 2, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
+ "{\"message\":\"application build 2, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\",\"build\":2}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/diff/2", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> assertTrue(response.getBodyAsString().contains("+ <deployment version='1.0' athenz-domain='domain1' athenz-service='service'>\n" +
@@ -918,7 +919,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
.screwdriverIdentity(SCREWDRIVER_ID)
.header("X-Content-Hash", Base64.getEncoder().encodeToString(Signatures.sha256Digest(streamer::data)))
.data(streamer),
- "{\"message\":\"application build 3, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
+ "{\"message\":\"application build 3, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\",\"build\":3}");
// Sixth attempt has a multi-instance deployment spec, and is accepted.
ApplicationPackage multiInstanceSpec = new ApplicationPackageBuilder()
@@ -931,7 +932,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/instance1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(multiInstanceSpec, 123)),
- "{\"message\":\"application build 4, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
+ "{\"message\":\"application build 4, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\",\"build\":4}");
// DELETE submitted build, to mark it as non-deployable
@@ -1032,7 +1033,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
tester.applications().lockApplicationOrThrow(id, application ->
tester.controller().applications().store(application.withDeploymentIssueId(IssueId.from("123"))
.withOwnershipIssueId(IssueId.from("321"))
- .withOwner(User.from("owner-username"))));
+ .withOwner(new AccountId("owner-account-id"))));
}
@Test
@@ -1305,7 +1306,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit", POST)
.screwdriverIdentity(SCREWDRIVER_ID)
.data(createApplicationSubmissionData(applicationPackageInstance1, 1000)),
- "{\"message\":\"application build 1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
+ "{\"message\":\"application build 1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\",\"build\":1}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET)
.properties(Map.of("build", "42"))
.userIdentity(HOSTED_VESPA_OPERATOR),
@@ -1521,7 +1522,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackage, 123))
.screwdriverIdentity(screwdriverId),
- "{\"message\":\"application build 1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
+ "{\"message\":\"application build 1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\",\"build\":1}");
}
@Test
@@ -1804,7 +1805,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/submit/", POST)
.data(createApplicationSubmissionData(applicationPackageDefault, SCREWDRIVER_ID.value()))
.screwdriverIdentity(SCREWDRIVER_ID),
- "{\"message\":\"application build 1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}",
+ "{\"message\":\"application build 1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\",\"build\":1}",
200);
}
@@ -1849,7 +1850,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
"\"ignoreValidationErrors\":false" +
applicationVersion.map(version ->
"," +
- "\"buildNumber\":" + version.buildNumber().getAsLong() + "," +
+ "\"buildNumber\":" + version.buildNumber() + "," +
"\"sourceRevision\":{" +
"\"repository\":\"" + version.source().get().repository() + "\"," +
"\"branch\":\"" + version.source().get().branch() + "\"," +
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java
index 93937bdc4af..905330c6daf 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java
@@ -14,13 +14,10 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RevisionId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TestReport;
-import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentContext;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
-import com.yahoo.vespa.hosted.controller.notification.Notification.Type;
-import com.yahoo.vespa.hosted.controller.notification.NotificationSource;
import org.junit.jupiter.api.Test;
import java.io.ByteArrayOutputStream;
@@ -34,6 +31,7 @@ import java.util.List;
import java.util.Optional;
import static com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE;
+import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.applicationPackage;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.devAwsUsEast2a;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.devUsEast1;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.productionUsCentral1;
@@ -42,8 +40,6 @@ import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.pro
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.stagingTest;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.systemTest;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.testUsCentral1;
-import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.applicationPackage;
-import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.deploymentFailed;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.installationFailed;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.invalidApplication;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.running;
@@ -208,16 +204,18 @@ public class JobControllerApiHandlerHelperTest {
void testEnclave() {
var cloudAccount = CloudAccount.from("aws:123456789012");
var applicationPackage = new ApplicationPackageBuilder()
+ .cloudAccount(cloudAccount.value())
.stagingTest()
.systemTest()
- .region("aws-us-east-1c", cloudAccount.value())
+ .region("aws-us-east-1c")
.build();
var tester = new DeploymentTester(new ControllerTester(SystemName.Public));
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount.value()), String.class);
- tester.controllerTester().zoneRegistry().configureCloudAccount(cloudAccount, ZoneId.from("prod.aws-us-east-1c"));
+ tester.controllerTester().zoneRegistry().configureCloudAccount(cloudAccount, systemTest.zone(), stagingTest.zone(), ZoneId.from("prod.aws-us-east-1c"));
var app = tester.newDeploymentContext();
app.submit(applicationPackage).deploy();
+ assertEquals(Optional.of(cloudAccount), tester.controllerTester().configServer().cloudAccount(app.deploymentIdIn(systemTest.zone())));
assertResponse(JobControllerApiHandlerHelper.overviewResponse(tester.controller(), app.application().id(), URI.create("https://some.url:43/root/")), "overview-enclave.json");
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/instance1-recursive.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/instance1-recursive.json
index 32b091a92ca..e5ee9157792 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/instance1-recursive.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/instance1-recursive.json
@@ -182,6 +182,6 @@
"lastWritesPerSecond": 2.0
},
"ownershipIssueId": "321",
- "owner": "owner-username",
+ "owner": "owner-account-id",
"deploymentIssueId": "123"
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-enclave.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-enclave.json
index 3673c1bdf07..9d82ed97849 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-enclave.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-enclave.json
@@ -5,11 +5,11 @@
"steps": [
{
"type": "instance",
- "dependencies": [ ],
+ "dependencies": [],
"declared": true,
"instance": "default",
"readyAt": 0,
- "deploying": { },
+ "deploying": {},
"latestVersions": {
"platform": {
"platform": "6.1.0",
@@ -21,7 +21,7 @@
"upgrade": false
}
],
- "blockers": [ ]
+ "blockers": []
},
"application": {
"application": {
@@ -42,21 +42,24 @@
}
}
],
- "blockers": [ ]
+ "blockers": []
}
},
"delayCause": null
},
{
"type": "test",
- "dependencies": [ ],
+ "dependencies": [],
"declared": true,
"instance": "default",
"readyAt": 0,
"jobName": "staging-test",
"url": "https://some.url:43/instance/default/job/staging-test",
"environment": "staging",
- "toRun": [ ],
+ "toRun": [],
+ "enclave": {
+ "cloudAccount": "aws:123456789012"
+ },
"runs": [
{
"id": 1,
@@ -137,14 +140,17 @@
},
{
"type": "test",
- "dependencies": [ ],
+ "dependencies": [],
"declared": true,
"instance": "default",
"readyAt": 0,
"jobName": "system-test",
"url": "https://some.url:43/instance/default/job/system-test",
"environment": "test",
- "toRun": [ ],
+ "toRun": [],
+ "enclave": {
+ "cloudAccount": "aws:123456789012"
+ },
"runs": [
{
"id": 1,
@@ -209,11 +215,7 @@
},
{
"type": "deployment",
- "dependencies": [
- 0,
- 1,
- 2
- ],
+ "dependencies": [0, 1, 2],
"declared": true,
"instance": "default",
"readyAt": 1600000000000,
@@ -228,7 +230,7 @@
"sourceUrl": "repository1/tree/commit1",
"commit": "commit1"
},
- "toRun": [ ],
+ "toRun": [],
"enclave": {
"cloudAccount": "aws:123456789012"
},
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/recursive-root.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/recursive-root.json
index 6dc58cc2800..f1aba622fcf 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/recursive-root.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/recursive-root.json
@@ -189,7 +189,7 @@
"lastWritesPerSecond": 2.0
},
"ownershipIssueId": "321",
- "owner": "owner-username",
+ "owner": "owner-account-id",
"deploymentIssueId": "123"
}
],
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/tenant1-recursive.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/tenant1-recursive.json
index 210a637ece8..e252e042e43 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/tenant1-recursive.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/tenant1-recursive.json
@@ -188,7 +188,7 @@
"lastWritesPerSecond": 2.0
},
"ownershipIssueId": "321",
- "owner": "owner-username",
+ "owner": "owner-account-id",
"deploymentIssueId": "123"
}
],
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/dataplanetoken/DataplaneTokenServiceTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/dataplanetoken/DataplaneTokenServiceTest.java
index e148eac7365..acfba03a700 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/dataplanetoken/DataplaneTokenServiceTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/dataplanetoken/DataplaneTokenServiceTest.java
@@ -15,8 +15,9 @@ import java.security.Principal;
import java.time.Duration;
import java.util.Collection;
import java.util.List;
+import java.util.Set;
-import static org.assertj.core.api.Assertions.assertThat;
+import static java.util.stream.Collectors.toSet;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -43,13 +44,13 @@ public class DataplaneTokenServiceTest {
assertNotEquals(dataplaneToken1.fingerPrint(), dataplaneToken2.fingerPrint());
List<DataplaneTokenVersions> dataplaneTokenVersions = dataplaneTokenService.listTokens(tenantName);
- List<FingerPrint> tokenFingerprints = dataplaneTokenVersions.stream()
+ Set<FingerPrint> tokenFingerprints = dataplaneTokenVersions.stream()
.filter(token -> token.tokenId().equals(tokenId))
.map(DataplaneTokenVersions::tokenVersions)
.flatMap(Collection::stream)
.map(DataplaneTokenVersions.Version::fingerPrint)
- .toList();
- assertThat(tokenFingerprints).containsExactlyInAnyOrder(dataplaneToken1.fingerPrint(), dataplaneToken2.fingerPrint());
+ .collect(toSet());
+ assertEquals(tokenFingerprints, Set.of(dataplaneToken1.fingerPrint(), dataplaneToken2.fingerPrint()));
}
@Test
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiTest.java
index f1a40307804..acb07102008 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/os/OsApiTest.java
@@ -21,7 +21,6 @@ import com.yahoo.vespa.hosted.controller.maintenance.OsUpgrader;
import com.yahoo.vespa.hosted.controller.restapi.ContainerTester;
import com.yahoo.vespa.hosted.controller.restapi.ControllerContainerTest;
import com.yahoo.vespa.hosted.controller.versions.OsVersionStatus;
-import org.intellij.lang.annotations.Language;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -202,7 +201,7 @@ public class OsApiTest extends ControllerContainerTest {
return tester.serviceRegistry().configServerMock().nodeRepository();
}
- private void assertResponse(Request request, @Language("JSON") String body, int statusCode) {
+ private void assertResponse(Request request, String body, int statusCode) {
addIdentityToRequest(request, operator);
tester.assertResponse(request, body, statusCode);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployResultTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployResultTest.java
index d0d362abcfc..8d643534e0c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployResultTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployResultTest.java
@@ -10,11 +10,12 @@ import com.yahoo.vespa.hosted.controller.integration.ZoneRegistryMock;
import org.junit.jupiter.api.Test;
import java.util.List;
+import java.util.Set;
import static com.yahoo.vespa.hosted.controller.restapi.systemflags.SystemFlagsDeployResult.FlagDataChange;
import static com.yahoo.vespa.hosted.controller.restapi.systemflags.SystemFlagsDeployResult.OperationError;
import static com.yahoo.vespa.hosted.controller.restapi.systemflags.SystemFlagsDeployResult.merge;
-import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
/**
* @author bjorncs
@@ -29,17 +30,15 @@ public class SystemFlagsDeployResultTest {
FlagId flagOne = new FlagId("flagone");
FlagId flagTwo = new FlagId("flagtwo");
SystemFlagsDeployResult result = new SystemFlagsDeployResult(
- List.of(
- FlagDataChange.deleted(flagOne, controllerTarget)),
- List.of(
- OperationError.deleteFailed("delete failed", controllerTarget, flagTwo)),
+ List.of(FlagDataChange.deleted(flagOne, controllerTarget)),
+ List.of(OperationError.deleteFailed("delete failed", controllerTarget, flagTwo)),
List.of());
WireSystemFlagsDeployResult wire = result.toWire();
- assertThat(wire.changes).hasSize(1);
- assertThat(wire.changes.get(0).flagId).isEqualTo(flagOne.toString());
- assertThat(wire.errors).hasSize(1);
- assertThat(wire.errors.get(0).flagId).isEqualTo(flagTwo.toString());
+ assertEquals(1, wire.changes.size());
+ assertEquals(wire.changes.get(0).flagId, flagOne.toString());
+ assertEquals(1, wire.errors.size());
+ assertEquals(wire.errors.get(0).flagId, flagTwo.toString());
}
@Test
@@ -65,13 +64,13 @@ public class SystemFlagsDeployResultTest {
SystemFlagsDeployResult mergedResult = merge(results);
List<FlagDataChange> changes = mergedResult.flagChanges();
- assertThat(changes).hasSize(1);
+ assertEquals(1, changes.size());
FlagDataChange change = changes.get(0);
- assertThat(change.targets()).containsOnly(controllerTarget, prodUsWest1Target);
+ assertEquals(change.targets(), Set.of(controllerTarget, prodUsWest1Target));
List<OperationError> errors = mergedResult.errors();
- assertThat(errors).hasSize(1);
+ assertEquals(1, errors.size());
OperationError error = errors.get(0);
- assertThat(error.targets()).containsOnly(controllerTarget, prodUsWest1Target);
+ assertEquals(error.targets(), Set.of(controllerTarget, prodUsWest1Target));
}
} \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployerTest.java
index 8ad64a08244..cb330d28d22 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/systemflags/SystemFlagsDeployerTest.java
@@ -25,6 +25,7 @@ import static com.yahoo.vespa.hosted.controller.restapi.systemflags.SystemFlagsD
import static com.yahoo.yolean.Exceptions.uncheck;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
@@ -174,7 +175,7 @@ public class SystemFlagsDeployerTest {
assertThat(result.flagChanges()).containsOnly(
FlagDataChange.created(FLAG_ID, controllerTarget, defaultData));
- assertThat(result.errors()).isEmpty();
+ assertTrue(result.errors().isEmpty());
}
@Test
@@ -209,8 +210,7 @@ public class SystemFlagsDeployerTest {
.build();
SystemFlagsDeployer deployer = new SystemFlagsDeployer(flagsClient, SYSTEM, Set.of(controllerTarget));
SystemFlagsDeployResult result = deployer.deployFlags(archive, false);
- assertThat(result.flagChanges())
- .isEmpty();
+ assertTrue(result.flagChanges().isEmpty());
assertThat(result.errors())
.containsOnly(OperationError.archiveValidationFailed("Unknown flag file: flags/my-flag/main.prod.unknown-region.json"));
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
index 46ec42cab8f..1dfaf2109c7 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
@@ -14,6 +14,7 @@ import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.zone.AuthMethod;
import com.yahoo.config.provision.zone.RoutingMethod;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.flags.Flags;
@@ -41,6 +42,7 @@ import com.yahoo.vespa.hosted.controller.dns.NameServiceQueue;
import com.yahoo.vespa.hosted.controller.dns.RemoveRecords;
import com.yahoo.vespa.hosted.controller.integration.ZoneApiMock;
import com.yahoo.vespa.hosted.rotation.config.RotationsConfig;
+import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
@@ -334,6 +336,10 @@ public class RoutingPoliciesTest {
var context1 = tester.newDeploymentContext("tenant1", "app1", "default");
// Deploy application
+ ApplicationPackage applicationPackage = applicationPackageBuilder().region(zone1.region())
+ .region(zone2.region())
+ .container("c0", AuthMethod.mtls, AuthMethod.token)
+ .build();
tester.provisionLoadBalancers(1, context1.instanceId(), false, zone1, zone2);
context1.submit(applicationPackage).deferLoadBalancerProvisioningIn(Environment.prod).deploy();
@@ -358,12 +364,23 @@ public class RoutingPoliciesTest {
// Ordinary endpoints are not created in DNS
assertEquals(List.of(), tester.recordNames());
assertEquals(2, tester.policiesOf(context.instanceId()).size());
- // Generated endpoints are created in DNS
+ }
+
+ @Test
+ @Disabled // TODO(mpolden): Enable this test when we start creating generated endpoints for shared routing
+ void zone_routing_policies_with_shared_routing_and_generated_endpoint() {
+ var tester = new RoutingPoliciesTester(new DeploymentTester(), false);
+ var context = tester.newDeploymentContext("tenant1", "app1", "default");
+ tester.provisionLoadBalancers(1, context.instanceId(), true, zone1, zone2);
tester.controllerTester().flagSource().withBooleanFlag(Flags.RANDOMIZED_ENDPOINT_NAMES.id(), true);
addCertificateToPool("cafed00d", UnassignedCertificate.State.ready, tester);
+ ApplicationPackage applicationPackage = applicationPackageBuilder().region(zone1.region())
+ .region(zone2.region())
+ .container("c0", AuthMethod.mtls, AuthMethod.token)
+ .build();
context.submit(applicationPackage).deferLoadBalancerProvisioningIn(Environment.prod).deploy();
- assertEquals(List.of("b22ab332.cafed00d.z.vespa.oath.cloud",
- "d71005bf.cafed00d.z.vespa.oath.cloud"),
+ assertEquals(List.of("c0a25b7c.cafed00d.z.vespa.oath.cloud",
+ "dc5e383c.cafed00d.z.vespa.oath.cloud"),
tester.recordNames());
}
@@ -757,7 +774,7 @@ public class RoutingPoliciesTest {
tester.routingPolicies().setRoutingStatus(context.deploymentIdIn(zone2), RoutingStatus.Value.out,
RoutingStatus.Agent.tenant);
} catch (IllegalArgumentException e) {
- assertEquals("Cannot deactivate routing for tenant1.app1 in prod.us-central-1 as it's the last remaining active deployment in endpoint https://r0.app1.tenant1.global.vespa.oath.cloud/ [scope=global, legacy=false, routingMethod=exclusive, authMethod=mtls]", e.getMessage());
+ assertEquals("Cannot deactivate routing for tenant1.app1 in prod.us-central-1 as it's the last remaining active deployment in endpoint https://r0.app1.tenant1.global.vespa.oath.cloud/ [scope=global, legacy=false, routingMethod=exclusive, authMethod=mtls, name=r0]", e.getMessage());
}
context.flushDnsUpdates();
tester.assertTargets(context.instanceId(), EndpointId.of("r0"), 0, zone2);
@@ -942,7 +959,7 @@ public class RoutingPoliciesTest {
tester.routingPolicies().setRoutingStatus(mainZone2, RoutingStatus.Value.out, RoutingStatus.Agent.tenant);
fail("Expected exception");
} catch (IllegalArgumentException e) {
- assertEquals("Cannot deactivate routing for tenant1.app1.main in prod.south as it's the last remaining active deployment in endpoint https://a0.app1.tenant1.a.vespa.oath.cloud/ [scope=application, legacy=false, routingMethod=exclusive, authMethod=mtls]",
+ assertEquals("Cannot deactivate routing for tenant1.app1.main in prod.south as it's the last remaining active deployment in endpoint https://a0.app1.tenant1.a.vespa.oath.cloud/ [scope=application, legacy=false, routingMethod=exclusive, authMethod=mtls, name=a0]",
e.getMessage());
}
@@ -993,14 +1010,17 @@ public class RoutingPoliciesTest {
var tester = new RoutingPoliciesTester(SystemName.Public);
var context = tester.newDeploymentContext("tenant1", "app1", "default");
tester.controllerTester().flagSource().withBooleanFlag(Flags.RANDOMIZED_ENDPOINT_NAMES.id(), true);
+ tester.enableTokenEndpoint(true);
addCertificateToPool("cafed00d", UnassignedCertificate.State.ready, tester);
// Deploy application
- int clustersPerZone = 1;
+ int clustersPerZone = 2;
var zone1 = ZoneId.from("prod", "aws-us-east-1c");
var zone2 = ZoneId.from("prod", "aws-eu-west-1a");
ApplicationPackage applicationPackage = applicationPackageBuilder().region(zone1.region())
.region(zone2.region())
+ .container("c0", AuthMethod.mtls)
+ .container("c1", AuthMethod.mtls, AuthMethod.token)
.endpoint("foo", "c0")
.applicationEndpoint("bar", "c0", Map.of(zone1.region().value(), Map.of(InstanceName.defaultName(), 1)))
.build();
@@ -1011,6 +1031,8 @@ public class RoutingPoliciesTest {
List<String> expectedRecords = List.of(
// save me, jebus!
"b22ab332.cafed00d.z.vespa-app.cloud",
+ "b7e79800.cafed00d.z.vespa-app.cloud",
+ "b8ee0967.cafed00d.z.vespa-app.cloud",
"bar.app1.tenant1.a.vespa-app.cloud",
"bar.cafed00d.a.vespa-app.cloud",
"c0.app1.tenant1.aws-eu-west-1.w.vespa-app.cloud",
@@ -1019,26 +1041,42 @@ public class RoutingPoliciesTest {
"c0.app1.tenant1.aws-us-east-1c.z.vespa-app.cloud",
"c0.cafed00d.aws-eu-west-1.w.vespa-app.cloud",
"c0.cafed00d.aws-us-east-1.w.vespa-app.cloud",
- "dd0971b4.cafed00d.z.vespa-app.cloud",
+ "c1.app1.tenant1.aws-eu-west-1a.z.vespa-app.cloud",
+ "c1.app1.tenant1.aws-us-east-1c.z.vespa-app.cloud",
+ "c60d3149.cafed00d.z.vespa-app.cloud",
+ "cbff1506.cafed00d.z.vespa-app.cloud",
+ "d151139b.cafed00d.z.vespa-app.cloud",
"foo.app1.tenant1.g.vespa-app.cloud",
- "foo.cafed00d.g.vespa-app.cloud"
+ "foo.cafed00d.g.vespa-app.cloud",
+ "token-c1.app1.tenant1.aws-eu-west-1a.z.vespa-app.cloud",
+ "token-c1.app1.tenant1.aws-us-east-1c.z.vespa-app.cloud"
);
assertEquals(expectedRecords, tester.recordNames());
- assertEquals(2, tester.policiesOf(context.instanceId()).size());
+ assertEquals(4, tester.policiesOf(context.instanceId()).size());
+ ClusterSpec.Id cluster0 = ClusterSpec.Id.from("c0");
+ ClusterSpec.Id cluster1 = ClusterSpec.Id.from("c1");
for (var zone : List.of(zone1, zone2)) {
- EndpointList endpoints = tester.controllerTester().controller().routing().readEndpointsOf(context.deploymentIdIn(zone)).scope(Endpoint.Scope.zone);
- assertEquals(1, endpoints.generated().size());
+ EndpointList generated = tester.controllerTester().controller().routing()
+ .readEndpointsOf(context.deploymentIdIn(zone))
+ .scope(Endpoint.Scope.zone)
+ .generated();
+ assertEquals(1, generated.cluster(cluster0).size());
+ assertEquals(0, generated.cluster(cluster0).authMethod(AuthMethod.token).size());
+ assertEquals(2, generated.cluster(cluster1).size());
+ assertEquals(1, generated.cluster(cluster1).authMethod(AuthMethod.token).size());
}
+
// Ordinary endpoints point to expected targets
- tester.assertTargets(context.instanceId(), EndpointId.of("foo"), ClusterSpec.Id.from("c0"), 0,
+ tester.assertTargets(context.instanceId(), EndpointId.of("foo"), cluster0, 0,
Map.of(zone1, 1L, zone2, 1L));
- tester.assertTargets(context.application().id(), EndpointId.of("bar"), ClusterSpec.Id.from("c0"), 0,
+ tester.assertTargets(context.application().id(), EndpointId.of("bar"), cluster0, 0,
Map.of(context.deploymentIdIn(zone1), 1));
+
// Generated endpoints point to expected targets
- tester.assertTargets(context.instanceId(), EndpointId.of("foo"), ClusterSpec.Id.from("c0"), 0,
+ tester.assertTargets(context.instanceId(), EndpointId.of("foo"), cluster0, 0,
Map.of(zone1, 1L, zone2, 1L),
true);
- tester.assertTargets(context.application().id(), EndpointId.of("bar"), ClusterSpec.Id.from("c0"), 0,
+ tester.assertTargets(context.application().id(), EndpointId.of("bar"), cluster0, 0,
Map.of(context.deploymentIdIn(zone1), 1),
true);
diff --git a/controller-server/src/test/resources/test_runner_services.xml-cd b/controller-server/src/test/resources/test_runner_services.xml-cd
index 4bf3a78801d..35ad0d31577 100644
--- a/controller-server/src/test/resources/test_runner_services.xml-cd
+++ b/controller-server/src/test/resources/test_runner_services.xml-cd
@@ -33,7 +33,7 @@
</component>
<nodes count="1">
- <resources vcpu="2.00" memory="12.00Gb" disk="75.00Gb" disk-speed="fast" storage-type="local"/>
+ <resources vcpu="2.00" memory="12.00Gb" disk="75.00Gb" disk-speed="fast" storage-type="local" architecture="any"/>
</nodes>
</container>
</services>
diff --git a/controller-server/src/test/resources/test_runner_services_with_legacy_tests.xml-cd b/controller-server/src/test/resources/test_runner_services_with_legacy_tests.xml-cd
index 526fd12965b..91317f1490c 100644
--- a/controller-server/src/test/resources/test_runner_services_with_legacy_tests.xml-cd
+++ b/controller-server/src/test/resources/test_runner_services_with_legacy_tests.xml-cd
@@ -34,7 +34,7 @@
<nodes count="1">
<jvm allocated-memory="17%"/>
- <resources vcpu="2.00" memory="12.00Gb" disk="75.00Gb" disk-speed="fast" storage-type="local"/>
+ <resources vcpu="2.00" memory="12.00Gb" disk="75.00Gb" disk-speed="fast" storage-type="local" architecture="any"/>
</nodes>
</container>
</services>
diff --git a/dependency-versions/pom.xml b/dependency-versions/pom.xml
index 5f75b042722..4a84798f4ac 100644
--- a/dependency-versions/pom.xml
+++ b/dependency-versions/pom.xml
@@ -34,16 +34,16 @@
<!-- DO NOT UPGRADE THESE TO A NEW MAJOR VERSION WITHOUT CHECKING FOR BINARY COMPATIBILITY -->
<aopalliance.vespa.version>1.0</aopalliance.vespa.version>
<commons-logging.vespa.version>1.2</commons-logging.vespa.version> <!-- This version is exported by jdisc via jcl-over-slf4j. -->
- <error-prone-annotations.vespa.version>2.18.0</error-prone-annotations.vespa.version>
- <guava.vespa.version>32.1.1-jre</guava.vespa.version>
+ <error-prone-annotations.vespa.version>2.21.1</error-prone-annotations.vespa.version>
+ <guava.vespa.version>32.1.2-jre</guava.vespa.version>
<guice.vespa.version>4.2.3</guice.vespa.version>
<jackson2.vespa.version>2.15.2</jackson2.vespa.version>
<jackson-databind.vespa.version>2.15.2</jackson-databind.vespa.version>
<javax.inject.vespa.version>1</javax.inject.vespa.version>
<javax.servlet-api.vespa.version>3.1.0</javax.servlet-api.vespa.version>
- <javax.ws.rs-api.vespa.version>2.0.1</javax.ws.rs-api.vespa.version>
+ <javax.ws.rs-api.vespa.version>2.1.1</javax.ws.rs-api.vespa.version>
<jaxb.vespa.version>2.3.0</jaxb.vespa.version>
- <slf4j.vespa.version>1.7.32</slf4j.vespa.version>
+ <slf4j.vespa.version>1.7.36</slf4j.vespa.version>
<xml-apis.vespa.version>1.4.01</xml-apis.vespa.version>
<!-- END Dependencies available from the Jdisc container -->
@@ -53,17 +53,18 @@
<airline.vespa.version>0.9</airline.vespa.version>
<antlr.vespa.version>3.5.3</antlr.vespa.version>
- <antlr4.vespa.version>4.11.1</antlr4.vespa.version>
+ <antlr4.vespa.version>4.13.0</antlr4.vespa.version>
<apache.httpclient.vespa.version>4.5.14</apache.httpclient.vespa.version>
<apache.httpcore.vespa.version>4.4.16</apache.httpcore.vespa.version>
<apache.httpclient5.vespa.version>5.2.1</apache.httpclient5.vespa.version>
<apache.httpcore5.vespa.version>5.2.2</apache.httpcore5.vespa.version>
<apiguardian.vespa.version>1.1.2</apiguardian.vespa.version>
- <asm.vespa.version>9.3</asm.vespa.version>
+ <asm.vespa.version>9.5</asm.vespa.version>
+ <assertj.vespa.version>3.24.2</assertj.vespa.version>
<!-- Athenz dependencies. Make sure these dependencies match those in Vespa's internal repositories -->
- <athenz.vespa.version>1.11.34</athenz.vespa.version>
- <aws-sdk.vespa.version>1.12.460</aws-sdk.vespa.version>
+ <athenz.vespa.version>1.11.40</athenz.vespa.version>
+ <aws-sdk.vespa.version>1.12.540</aws-sdk.vespa.version>
<!-- Athenz END -->
<!-- WARNING: If you change curator version, you also need to update
@@ -72,47 +73,56 @@
find zkfacade/src/main/java/org/apache/curator -name package-info.java | \
xargs perl -pi -e 's/major = [0-9]+, minor = [0-9]+, micro = [0-9]+/major = 5, minor = 3, micro = 0/g'
-->
- <bouncycastle.vespa.version>1.74</bouncycastle.vespa.version>
- <curator.vespa.version>5.4.0</curator.vespa.version>
- <commons-codec.vespa.version>1.15</commons-codec.vespa.version>
+ <bouncycastle.vespa.version>1.76</bouncycastle.vespa.version>
+ <checker-qual.vespa.version>3.37.0</checker-qual.vespa.version>
+ <commons-codec.vespa.version>1.16.0</commons-codec.vespa.version>
+ <commons-csv.vespa.version>1.10.0</commons-csv.vespa.version>
<commons-exec.vespa.version>1.3</commons-exec.vespa.version>
- <commons-io.vespa.version>2.11.0</commons-io.vespa.version>
+ <commons-io.vespa.version>2.13.0</commons-io.vespa.version>
+ <commons-lang3.vespa.version>3.13.0</commons-lang3.vespa.version>
<commons.math3.vespa.version>3.6.1</commons.math3.vespa.version>
<commons-compress.vespa.version>1.23.0</commons-compress.vespa.version>
- <eclipse-collections.vespa.version>11.0.0</eclipse-collections.vespa.version>
+ <curator.vespa.version>5.5.0</curator.vespa.version>
+ <dropwizard.metrics.vespa.version>3.2.6</dropwizard.metrics.vespa.version>
+ <eclipse-collections.vespa.version>11.1.0</eclipse-collections.vespa.version>
<felix.vespa.version>7.0.5</felix.vespa.version>
<felix.log.vespa.version>1.0.1</felix.log.vespa.version>
<findbugs.vespa.version>3.0.2</findbugs.vespa.version> <!-- Should be kept in sync with guava -->
- <hamcrest.vespa.version>1.3</hamcrest.vespa.version>
+ <hamcrest.vespa.version>2.2</hamcrest.vespa.version>
<hdrhistogram.vespa.version>2.1.12</hdrhistogram.vespa.version>
- <icu4j.vespa.version>70.1</icu4j.vespa.version>
+ <icu4j.vespa.version>73.2</icu4j.vespa.version>
<java-jjwt.vespa.version>0.11.5</java-jjwt.vespa.version>
- <java-jwt.vespa.version>3.10.0</java-jwt.vespa.version>
- <jaxb.runtime.vespa.version>2.3.2</jaxb.runtime.vespa.version> <!-- 2.3.3 has a BROKEN manifest -->
- <jersey.vespa.version>2.25</jersey.vespa.version>
+ <java-jwt.vespa.version>3.19.4</java-jwt.vespa.version>
+ <jaxb.runtime.vespa.version>2.3.8</jaxb.runtime.vespa.version>
+ <jersey.vespa.version>2.40</jersey.vespa.version>
<jetty.vespa.version>11.0.15</jetty.vespa.version>
<jetty-servlet-api.vespa.version>5.0.2</jetty-servlet-api.vespa.version>
- <jna.vespa.version>5.11.0</jna.vespa.version>
- <joda-time.vespa.version>2.12.2</joda-time.vespa.version>
+ <jimfs.vespa.version>1.3.0</jimfs.vespa.version>
+ <jna.vespa.version>5.13.0</jna.vespa.version>
+ <joda-time.vespa.version>2.12.5</joda-time.vespa.version>
<junit.vespa.version>5.8.1</junit.vespa.version>
<junit.platform.vespa.version>1.8.1</junit.platform.vespa.version>
<junit4.vespa.version>4.13.2</junit4.vespa.version>
+ <luben.zstd.vespa.version>1.5.5-5</luben.zstd.vespa.version>
<lucene.vespa.version>9.7.0</lucene.vespa.version>
- <maven-archiver.vespa.version>3.6.0</maven-archiver.vespa.version>
- <maven-wagon.vespa.version>2.10</maven-wagon.vespa.version>
- <mimepull.vespa.version>1.9.6</mimepull.vespa.version>
- <mockito.vespa.version>4.0.0</mockito.vespa.version>
- <netty.vespa.version>4.1.94.Final</netty.vespa.version>
+ <maven-archiver.vespa.version>3.6.1</maven-archiver.vespa.version>
+ <maven-wagon.vespa.version>3.5.3</maven-wagon.vespa.version>
+ <mimepull.vespa.version>1.10.0</mimepull.vespa.version>
+ <mockito.vespa.version>5.5.0</mockito.vespa.version>
+ <mojo-executor.vespa.version>2.4.0</mojo-executor.vespa.version>
+ <netty.vespa.version>4.1.97.Final</netty.vespa.version>
<netty-tcnative.vespa.version>2.0.61.Final</netty-tcnative.vespa.version>
- <onnxruntime.vespa.version>1.13.1</onnxruntime.vespa.version>
- <opennlp.vespa.version>1.9.3</opennlp.vespa.version>
- <opentest4j.vespa.version>1.2.0</opentest4j.vespa.version>
- <org.json.vespa.version>20230227</org.json.vespa.version>
+ <onnxruntime.vespa.version>1.15.1</onnxruntime.vespa.version>
+ <opennlp.vespa.version>1.9.4</opennlp.vespa.version>
+ <opentest4j.vespa.version>1.3.0</opentest4j.vespa.version>
+ <org.json.vespa.version>20230618</org.json.vespa.version>
<org.lz4.vespa.version>1.8.0</org.lz4.vespa.version>
- <prometheus.client.vespa.version>0.6.0</prometheus.client.vespa.version>
- <protobuf.vespa.version>3.21.7</protobuf.vespa.version>
+ <prometheus.client.vespa.version>0.16.0</prometheus.client.vespa.version>
+ <protobuf.vespa.version>3.24.2</protobuf.vespa.version>
+ <questdb.vespa.version>6.2</questdb.vespa.version>
<spifly.vespa.version>1.3.6</spifly.vespa.version>
- <surefire.vespa.version>3.0.0-M9</surefire.vespa.version>
+ <snappy.vespa.version>1.1.10.3</snappy.vespa.version>
+ <surefire.vespa.version>3.1.2</surefire.vespa.version>
<wiremock.vespa.version>2.35.0</wiremock.vespa.version>
<xerces.vespa.version>2.12.2</xerces.vespa.version>
<zero-allocation-hashing.vespa.version>0.16</zero-allocation-hashing.vespa.version>
@@ -120,24 +130,27 @@
<!-- Maven plugins -->
- <maven-assembly-plugin.vespa.version>3.3.0</maven-assembly-plugin.vespa.version>
+ <clover-maven-plugin.vespa.version>4.4.1</clover-maven-plugin.vespa.version>
+ <maven-antrun-plugin.vespa.version>3.1.0</maven-antrun-plugin.vespa.version>
+ <maven-assembly-plugin.vespa.version>3.6.0</maven-assembly-plugin.vespa.version>
<maven-bundle-plugin.vespa.version>5.1.9</maven-bundle-plugin.vespa.version>
- <maven-compiler-plugin.vespa.version>3.10.1</maven-compiler-plugin.vespa.version>
- <maven-core.vespa.version>3.8.7</maven-core.vespa.version>
+ <maven-compiler-plugin.vespa.version>3.11.0</maven-compiler-plugin.vespa.version>
+ <maven-core.vespa.version>3.9.4</maven-core.vespa.version>
<maven-dependency-plugin.vespa.version>3.6.0</maven-dependency-plugin.vespa.version>
- <maven-deploy-plugin.vespa.version>2.8.2</maven-deploy-plugin.vespa.version>
- <maven-enforcer-plugin.vespa.version>3.3.0</maven-enforcer-plugin.vespa.version>
- <maven-failsafe-plugin.vespa.version>3.0.0-M6</maven-failsafe-plugin.vespa.version>
+ <maven-deploy-plugin.vespa.version>3.1.1</maven-deploy-plugin.vespa.version>
+ <maven-enforcer-plugin.vespa.version>3.4.0</maven-enforcer-plugin.vespa.version>
+ <maven-failsafe-plugin.vespa.version>3.1.2</maven-failsafe-plugin.vespa.version>
<maven-install-plugin.vespa.version>3.1.1</maven-install-plugin.vespa.version>
- <maven-jar-plugin.vespa.version>3.2.0</maven-jar-plugin.vespa.version>
- <maven-javadoc-plugin.vespa.version>3.3.1</maven-javadoc-plugin.vespa.version>
+ <maven-jar-plugin.vespa.version>3.3.0</maven-jar-plugin.vespa.version>
+ <maven-javadoc-plugin.vespa.version>3.5.0</maven-javadoc-plugin.vespa.version>
<maven-plugin-api.vespa.version>${maven-core.vespa.version}</maven-plugin-api.vespa.version>
- <maven-plugin-tools.vespa.version>3.6.4</maven-plugin-tools.vespa.version>
- <maven-resources-plugin.vespa.version>3.2.0</maven-resources-plugin.vespa.version>
- <maven-shade-plugin.vespa.version>3.4.1</maven-shade-plugin.vespa.version>
- <maven-site-plugin.vespa.version>3.9.1</maven-site-plugin.vespa.version>
- <maven-source-plugin.vespa.version>3.2.1</maven-source-plugin.vespa.version>
- <versions-maven-plugin.vespa.version>2.8.1</versions-maven-plugin.vespa.version>
+ <maven-plugin-tools.vespa.version>3.9.0</maven-plugin-tools.vespa.version>
+ <maven-resources-plugin.vespa.version>3.3.1</maven-resources-plugin.vespa.version>
+ <maven-shade-plugin.vespa.version>3.5.0</maven-shade-plugin.vespa.version>
+ <maven-site-plugin.vespa.version>3.12.1</maven-site-plugin.vespa.version>
+ <maven-source-plugin.vespa.version>3.3.0</maven-source-plugin.vespa.version>
+ <properties-maven-plugin.vespa.version>1.2.0</properties-maven-plugin.vespa.version>
+ <versions-maven-plugin.vespa.version>2.16.0</versions-maven-plugin.vespa.version>
</properties>
<profiles>
diff --git a/dist/vespa.spec b/dist/vespa.spec
index c1a409e057f..7351ed3a74b 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -93,7 +93,7 @@ BuildRequires: vespa-openssl-devel >= 1.1.1o-1
BuildRequires: vespa-gtest = 1.13.0
%define _use_vespa_gtest 1
BuildRequires: vespa-lz4-devel >= 1.9.4-1
-BuildRequires: vespa-onnxruntime-devel = 1.13.1
+BuildRequires: vespa-onnxruntime-devel = 1.15.1
BuildRequires: vespa-protobuf-devel = 3.21.12
%define _use_vespa_protobuf 1
BuildRequires: vespa-libzstd-devel >= 1.5.4-1
@@ -104,7 +104,7 @@ BuildRequires: maven
BuildRequires: maven-openjdk17
BuildRequires: openssl-devel
BuildRequires: vespa-lz4-devel >= 1.9.4-1
-BuildRequires: vespa-onnxruntime-devel = 1.13.1
+BuildRequires: vespa-onnxruntime-devel = 1.15.1
BuildRequires: vespa-libzstd-devel >= 1.5.4-1
BuildRequires: vespa-protobuf-devel = 3.21.12
%define _use_vespa_protobuf 1
@@ -126,7 +126,7 @@ BuildRequires: maven-openjdk17
%endif
BuildRequires: openssl-devel
BuildRequires: vespa-lz4-devel >= 1.9.4-1
-BuildRequires: vespa-onnxruntime-devel = 1.13.1
+BuildRequires: vespa-onnxruntime-devel = 1.15.1
BuildRequires: vespa-libzstd-devel >= 1.5.4-1
BuildRequires: protobuf-devel
BuildRequires: llvm-devel
@@ -141,7 +141,7 @@ BuildRequires: vespa-xxhash-devel >= 0.8.1
BuildRequires: xxhash-devel >= 0.8.1
%endif
%if 0%{?el8}
-BuildRequires: vespa-openblas-devel = 0.3.21
+BuildRequires: vespa-openblas-devel >= 0.3.21
%define _use_vespa_openblas 1
%else
BuildRequires: openblas-devel
@@ -257,7 +257,7 @@ Requires: openssl-libs
Requires: vespa-lz4 >= 1.9.4-1
Requires: vespa-libzstd >= 1.5.4-1
%if 0%{?el8}
-Requires: vespa-openblas = 0.3.21
+Requires: vespa-openblas >= 0.3.21
%else
Requires: openblas-serial
%endif
@@ -297,7 +297,7 @@ Requires: vespa-protobuf = 3.21.12
Requires: protobuf
Requires: llvm-libs
%endif
-Requires: vespa-onnxruntime = 1.13.1
+Requires: vespa-onnxruntime = 1.15.1
%description libs
diff --git a/document/abi-spec.json b/document/abi-spec.json
index 6129ea991d5..899c107a242 100644
--- a/document/abi-spec.json
+++ b/document/abi-spec.json
@@ -3442,9 +3442,12 @@
],
"methods" : [
"public void <init>(com.yahoo.document.update.TensorModifyUpdate$Operation, com.yahoo.document.datatypes.TensorFieldValue)",
+ "public void <init>(com.yahoo.document.update.TensorModifyUpdate$Operation, com.yahoo.document.datatypes.TensorFieldValue, boolean)",
"public static com.yahoo.tensor.TensorType convertDimensionsToMapped(com.yahoo.tensor.TensorType)",
"public com.yahoo.document.update.TensorModifyUpdate$Operation getOperation()",
"public com.yahoo.document.datatypes.TensorFieldValue getValue()",
+ "public boolean getCreateNonExistingCells()",
+ "public double getDefaultCellValue()",
"public void setValue(com.yahoo.document.datatypes.TensorFieldValue)",
"public com.yahoo.document.datatypes.FieldValue applyTo(com.yahoo.document.datatypes.FieldValue)",
"protected void checkCompatibility(com.yahoo.document.DataType)",
@@ -3457,7 +3460,8 @@
],
"fields" : [
"protected com.yahoo.document.update.TensorModifyUpdate$Operation operation",
- "protected com.yahoo.document.datatypes.TensorFieldValue tensor"
+ "protected com.yahoo.document.datatypes.TensorFieldValue tensor",
+ "protected boolean createNonExistingCells"
]
},
"com.yahoo.document.update.TensorRemoveUpdate" : {
diff --git a/document/src/main/java/com/yahoo/document/json/readers/TensorModifyUpdateReader.java b/document/src/main/java/com/yahoo/document/json/readers/TensorModifyUpdateReader.java
index 21fa51d5b88..92ede0fbe99 100644
--- a/document/src/main/java/com/yahoo/document/json/readers/TensorModifyUpdateReader.java
+++ b/document/src/main/java/com/yahoo/document/json/readers/TensorModifyUpdateReader.java
@@ -2,6 +2,7 @@
package com.yahoo.document.json.readers;
+import com.fasterxml.jackson.core.JsonToken;
import com.yahoo.document.Field;
import com.yahoo.document.TensorDataType;
import com.yahoo.document.datatypes.TensorFieldValue;
@@ -29,6 +30,7 @@ public class TensorModifyUpdateReader {
private static final String MODIFY_REPLACE = "replace";
private static final String MODIFY_ADD = "add";
private static final String MODIFY_MULTIPLY = "multiply";
+ private static final String MODIFY_CREATE = "create";
public static TensorModifyUpdate createModifyUpdate(TokenBuffer buffer, Field field) {
expectFieldIsOfTypeTensor(field);
@@ -39,7 +41,7 @@ public class TensorModifyUpdateReader {
expectOperationSpecified(result.operation, field.getName());
expectTensorSpecified(result.tensor, field.getName());
- return new TensorModifyUpdate(result.operation, result.tensor);
+ return new TensorModifyUpdate(result.operation, result.tensor, result.createNonExistingCells);
}
private static void expectFieldIsOfTypeTensor(Field field) {
@@ -73,6 +75,7 @@ public class TensorModifyUpdateReader {
private static class ModifyUpdateResult {
TensorModifyUpdate.Operation operation = null;
+ boolean createNonExistingCells = false;
TensorFieldValue tensor = null;
}
@@ -85,6 +88,9 @@ public class TensorModifyUpdateReader {
case MODIFY_OPERATION:
result.operation = createOperation(buffer, field.getName());
break;
+ case MODIFY_CREATE:
+ result.createNonExistingCells = createNonExistingCells(buffer, field.getName());
+ break;
case TENSOR_CELLS:
result.tensor = createTensorFromCells(buffer, field);
break;
@@ -112,6 +118,16 @@ public class TensorModifyUpdateReader {
}
}
+ private static Boolean createNonExistingCells(TokenBuffer buffer, String fieldName) {
+ if (buffer.current() == JsonToken.VALUE_TRUE) {
+ return true;
+ } else if (buffer.current() == JsonToken.VALUE_FALSE) {
+ return false;
+ } else {
+ throw new IllegalArgumentException("Unknown value '" + buffer.currentText() + "' for '" + MODIFY_CREATE + "' in modify update for field '" + fieldName + "'");
+ }
+ }
+
private static TensorFieldValue createTensorFromCells(TokenBuffer buffer, Field field) {
TensorDataType tensorDataType = (TensorDataType)field.getDataType();
TensorType originalType = tensorDataType.getTensorType();
diff --git a/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializerHead.java b/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializerHead.java
index c6fdc915401..765d999dbc9 100644
--- a/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializerHead.java
+++ b/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializerHead.java
@@ -26,20 +26,35 @@ public class VespaDocumentDeserializerHead extends VespaDocumentDeserializer6 {
@Override
protected ValueUpdate readTensorModifyUpdate(DataType type) {
byte operationId = getByte(null);
- TensorModifyUpdate.Operation operation = TensorModifyUpdate.Operation.getOperation(operationId);
+ var operation = decodeOperation(operationId);
if (operation == null) {
throw new DeserializationException("Unknown operation id " + operationId + " for tensor modify update");
}
if (!(type instanceof TensorDataType)) {
throw new DeserializationException("Expected tensor data type, got " + type);
}
+ var createNonExistingCells = decodeCreateNonExistingCells(operationId);
+ if (createNonExistingCells) {
+ // Read the default cell value (but it is not used by TensorModifyUpdate).
+ getDouble(null);
+ }
TensorDataType tensorDataType = (TensorDataType)type;
TensorType tensorType = tensorDataType.getTensorType();
TensorType convertedType = TensorModifyUpdate.convertDimensionsToMapped(tensorType);
TensorFieldValue tensor = new TensorFieldValue(convertedType);
tensor.deserialize(this);
- return new TensorModifyUpdate(operation, tensor);
+ return new TensorModifyUpdate(operation, tensor, createNonExistingCells);
+ }
+
+ private TensorModifyUpdate.Operation decodeOperation(byte operationId) {
+ byte OP_MASK = 0b01111111;
+ return TensorModifyUpdate.Operation.getOperation(operationId & OP_MASK);
+ }
+
+ private boolean decodeCreateNonExistingCells(byte operationId) {
+ byte CREATE_FLAG = -0b10000000;
+ return (operationId & CREATE_FLAG) != 0;
}
@Override
diff --git a/document/src/main/java/com/yahoo/document/serialization/VespaDocumentSerializerHead.java b/document/src/main/java/com/yahoo/document/serialization/VespaDocumentSerializerHead.java
index 66bc8cbb4d5..b2c3cdc09de 100644
--- a/document/src/main/java/com/yahoo/document/serialization/VespaDocumentSerializerHead.java
+++ b/document/src/main/java/com/yahoo/document/serialization/VespaDocumentSerializerHead.java
@@ -19,10 +19,22 @@ public class VespaDocumentSerializerHead extends VespaDocumentSerializer6 {
@Override
public void write(TensorModifyUpdate update) {
- putByte(null, (byte) update.getOperation().id);
+ putByte(null, (byte) encodeOperationId(update));
+ if (update.getCreateNonExistingCells()) {
+ putDouble(null, update.getDefaultCellValue());
+ }
update.getValue().serialize(this);
}
+ private int encodeOperationId(TensorModifyUpdate update) {
+ int operationId = update.getOperation().id;
+ byte CREATE_FLAG = -0b10000000;
+ if (update.getCreateNonExistingCells()) {
+ operationId |= CREATE_FLAG;
+ }
+ return operationId;
+ }
+
@Override
public void write(TensorAddUpdate update) {
update.getValue().serialize(this);
diff --git a/document/src/main/java/com/yahoo/document/update/TensorModifyUpdate.java b/document/src/main/java/com/yahoo/document/update/TensorModifyUpdate.java
index d9521ee0e1c..8a14bd21443 100644
--- a/document/src/main/java/com/yahoo/document/update/TensorModifyUpdate.java
+++ b/document/src/main/java/com/yahoo/document/update/TensorModifyUpdate.java
@@ -6,10 +6,15 @@ import com.yahoo.document.TensorDataType;
import com.yahoo.document.datatypes.FieldValue;
import com.yahoo.document.datatypes.TensorFieldValue;
import com.yahoo.document.serialization.DocumentUpdateWriter;
+import com.yahoo.tensor.MixedTensor;
import com.yahoo.tensor.Tensor;
+import com.yahoo.tensor.TensorAddress;
import com.yahoo.tensor.TensorType;
+import java.util.Arrays;
+import java.util.HashSet;
import java.util.Objects;
+import java.util.Optional;
import java.util.function.DoubleBinaryOperator;
/*
@@ -21,11 +26,17 @@ public class TensorModifyUpdate extends ValueUpdate<TensorFieldValue> {
protected Operation operation;
protected TensorFieldValue tensor;
+ protected boolean createNonExistingCells;
public TensorModifyUpdate(Operation operation, TensorFieldValue tensor) {
+ this(operation, tensor, false);
+ }
+
+ public TensorModifyUpdate(Operation operation, TensorFieldValue tensor, boolean createNonExistingCells) {
super(ValueUpdateClassID.TENSORMODIFY);
this.operation = operation;
this.tensor = tensor;
+ this.createNonExistingCells = createNonExistingCells;
verifyCompatibleType(tensor.getDataType().getTensorType());
}
@@ -45,8 +56,11 @@ public class TensorModifyUpdate extends ValueUpdate<TensorFieldValue> {
}
public Operation getOperation() { return operation; }
-
public TensorFieldValue getValue() { return tensor; }
+ public boolean getCreateNonExistingCells() { return createNonExistingCells; }
+ public double getDefaultCellValue() {
+ return (operation == Operation.MULTIPLY) ? 1.0 : 0.0;
+ }
public void setValue(TensorFieldValue value) { tensor = value; }
@Override
@@ -63,6 +77,12 @@ public class TensorModifyUpdate extends ValueUpdate<TensorFieldValue> {
default:
throw new UnsupportedOperationException("Unknown operation: " + operation);
}
+ if (createNonExistingCells && hasMappedSubtype(oldTensor.type())) {
+ var subspaces = findSubspacesNotInInput(oldTensor, tensor.getTensor().get());
+ if (!subspaces.isEmpty()) {
+ oldTensor = insertSubspaces(oldTensor, subspaces, getDefaultCellValue());
+ }
+ }
Tensor modified = oldTensor.modify(modifier, tensor.getTensor().get().cells());
return new TensorFieldValue(modified);
}
@@ -72,6 +92,63 @@ public class TensorModifyUpdate extends ValueUpdate<TensorFieldValue> {
return oldValue;
}
+ private static boolean hasMappedSubtype(TensorType type) {
+ return !type.mappedSubtype().equals(TensorType.empty);
+ }
+
+ private static boolean hasIndexedSubtype(TensorType type) {
+ return !type.indexedSubtype().equals(TensorType.empty);
+ }
+
+ private static HashSet<TensorAddress> findSubspacesNotInInput(Tensor input, Tensor modifier) {
+ var subspaces = new HashSet<TensorAddress>();
+ var inputCells = input.cells();
+ var type = input.type();
+ for (var itr = modifier.cellIterator(); itr.hasNext(); ) {
+ Tensor.Cell cell = itr.next();
+ TensorAddress address = cell.getKey();
+ if (!inputCells.containsKey(address)) {
+ subspaces.add(createSparsePartAddress(address, type));
+ }
+ }
+ return subspaces;
+ }
+
+ private static TensorAddress createSparsePartAddress(TensorAddress address, TensorType type) {
+ var builder = new TensorAddress.Builder(type.mappedSubtype());
+ for (int i = 0; i < type.dimensions().size(); ++i) {
+ var dim = type.dimensions().get(i);
+ if (dim.isMapped()) {
+ builder.add(dim.name(), address.label(i));
+ }
+ }
+ return builder.build();
+ }
+
+ private static Tensor insertSubspaces(Tensor input, HashSet<TensorAddress> subspaces, double defaultCellValue) {
+ var type = input.type();
+ boolean mixed = hasMappedSubtype(type) && hasIndexedSubtype(type);
+ Tensor.Builder builder;
+ if (mixed) {
+ var boundBuilder = MixedTensor.BoundBuilder.of(type);
+ var values = new double[(int) boundBuilder.denseSubspaceSize()];
+ Arrays.fill(values, defaultCellValue);
+ for (var subspace : subspaces) {
+ boundBuilder.block(subspace, values);
+ }
+ builder = boundBuilder;
+ } else {
+ builder = Tensor.Builder.of(type);
+ for (var subspace : subspaces) {
+ builder.cell(subspace, defaultCellValue);
+ }
+ }
+ for (var itr = input.cellIterator(); itr.hasNext(); ) {
+ builder.cell(itr.next());
+ }
+ return builder.build();
+ }
+
@Override
protected void checkCompatibility(DataType fieldType) {
if (!(fieldType instanceof TensorDataType)) {
@@ -91,17 +168,18 @@ public class TensorModifyUpdate extends ValueUpdate<TensorFieldValue> {
if (!super.equals(o)) return false;
TensorModifyUpdate that = (TensorModifyUpdate) o;
return operation == that.operation &&
- tensor.equals(that.tensor);
+ tensor.equals(that.tensor) &&
+ createNonExistingCells == that.createNonExistingCells;
}
@Override
public int hashCode() {
- return Objects.hash(super.hashCode(), operation, tensor);
+ return Objects.hash(super.hashCode(), operation, tensor, createNonExistingCells);
}
@Override
public String toString() {
- return super.toString() + " " + operation.name + " " + tensor;
+ return super.toString() + " " + operation.name + " " + tensor + " " + createNonExistingCells;
}
/**
diff --git a/document/src/test/java/com/yahoo/document/DocumentTestCase.java b/document/src/test/java/com/yahoo/document/DocumentTestCase.java
index 33b77cb1878..e5f6453c581 100644
--- a/document/src/test/java/com/yahoo/document/DocumentTestCase.java
+++ b/document/src/test/java/com/yahoo/document/DocumentTestCase.java
@@ -42,7 +42,7 @@ import static org.junit.Assert.fail;
/**
* Test for Document and all its features, including (de)serialization.
*
- * @author <a href="thomasg@yahoo-inc.com>Thomas Gundersen</a>
+ * @author Thomas Gundersen
* @author bratseth
*/
public class DocumentTestCase extends DocumentTestCaseBase {
diff --git a/document/src/test/java/com/yahoo/document/DocumentUpdateTestCase.java b/document/src/test/java/com/yahoo/document/DocumentUpdateTestCase.java
index 9733cd41a88..9d4d1e8f3aa 100644
--- a/document/src/test/java/com/yahoo/document/DocumentUpdateTestCase.java
+++ b/document/src/test/java/com/yahoo/document/DocumentUpdateTestCase.java
@@ -822,7 +822,10 @@ public class DocumentUpdateTestCase {
result.addFieldUpdate(FieldUpdate.create(getField("dense_tensor"))
.addValueUpdate(new TensorModifyUpdate(TensorModifyUpdate.Operation.REPLACE, createTensor()))
.addValueUpdate(new TensorModifyUpdate(TensorModifyUpdate.Operation.ADD, createTensor()))
- .addValueUpdate(new TensorModifyUpdate(TensorModifyUpdate.Operation.MULTIPLY, createTensor())));
+ .addValueUpdate(new TensorModifyUpdate(TensorModifyUpdate.Operation.MULTIPLY, createTensor()))
+ .addValueUpdate(new TensorModifyUpdate(TensorModifyUpdate.Operation.REPLACE, createTensor(), true))
+ .addValueUpdate(new TensorModifyUpdate(TensorModifyUpdate.Operation.ADD, createTensor(), true))
+ .addValueUpdate(new TensorModifyUpdate(TensorModifyUpdate.Operation.MULTIPLY, createTensor(), true)));
return result;
}
diff --git a/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java b/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java
index 96b5d2c1fb5..4140a9eee02 100644
--- a/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java
+++ b/document/src/test/java/com/yahoo/document/json/JsonReaderTestCase.java
@@ -1681,6 +1681,26 @@ public class JsonReaderTestCase {
}
@Test
+ public void tensor_modify_update_with_create_non_existing_cells_true() {
+ assertTensorModifyUpdate("{{x:a,y:b}:2.0}", TensorModifyUpdate.Operation.ADD, true, "sparse_tensor",
+ inputJson("{",
+ " 'operation': 'add',",
+ " 'create': true,",
+ " 'cells': [",
+ " { 'address': { 'x': 'a', 'y': 'b' }, 'value': 2.0 } ]}"));
+ }
+
+ @Test
+ public void tensor_modify_update_with_create_non_existing_cells_false() {
+ assertTensorModifyUpdate("{{x:a,y:b}:2.0}", TensorModifyUpdate.Operation.ADD, false, "sparse_tensor",
+ inputJson("{",
+ " 'operation': 'add',",
+ " 'create': false,",
+ " 'cells': [",
+ " { 'address': { 'x': 'a', 'y': 'b' }, 'value': 2.0 } ]}"));
+ }
+
+ @Test
public void tensor_modify_update_treats_the_input_tensor_as_sparse() {
// Note that the type of the tensor in the modify update is sparse (it only has mapped dimensions).
assertTensorModifyUpdate("tensor(x{},y{}):{{x:0,y:0}:2.0, {x:1,y:2}:3.0}",
@@ -2155,16 +2175,25 @@ public class JsonReaderTestCase {
private void assertTensorModifyUpdate(String expectedTensor, TensorModifyUpdate.Operation expectedOperation,
String tensorFieldName, String modifyJson) {
- assertTensorModifyUpdate(expectedTensor, expectedOperation, tensorFieldName,
+ assertTensorModifyUpdate(expectedTensor, expectedOperation, false, tensorFieldName,
+ createTensorModifyUpdate(modifyJson, tensorFieldName));
+ }
+
+ private void assertTensorModifyUpdate(String expectedTensor, TensorModifyUpdate.Operation expectedOperation,
+ boolean expectedCreateNonExistingCells,
+ String tensorFieldName, String modifyJson) {
+ assertTensorModifyUpdate(expectedTensor, expectedOperation, expectedCreateNonExistingCells, tensorFieldName,
createTensorModifyUpdate(modifyJson, tensorFieldName));
}
private static void assertTensorModifyUpdate(String expectedTensor, TensorModifyUpdate.Operation expectedOperation,
+ boolean expectedCreateNonExistingCells,
String tensorFieldName, DocumentUpdate update) {
assertTensorFieldUpdate(update, tensorFieldName);
TensorModifyUpdate modifyUpdate = (TensorModifyUpdate) update.getFieldUpdate(tensorFieldName).getValueUpdate(0);
assertEquals(expectedOperation, modifyUpdate.getOperation());
assertEquals(Tensor.from(expectedTensor), modifyUpdate.getValue().getTensor().get());
+ assertEquals(expectedCreateNonExistingCells, modifyUpdate.getCreateNonExistingCells());
}
private DocumentUpdate createTensorModifyUpdate(String modifyJson, String tensorFieldName) {
diff --git a/document/src/test/java/com/yahoo/document/update/TensorModifyUpdateTest.java b/document/src/test/java/com/yahoo/document/update/TensorModifyUpdateTest.java
index 60dd5ad1d0d..d0b04cf5449 100644
--- a/document/src/test/java/com/yahoo/document/update/TensorModifyUpdateTest.java
+++ b/document/src/test/java/com/yahoo/document/update/TensorModifyUpdateTest.java
@@ -7,6 +7,8 @@ import com.yahoo.tensor.Tensor;
import com.yahoo.tensor.TensorType;
import org.junit.Test;
+import java.util.Optional;
+
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
@@ -47,11 +49,35 @@ public class TensorModifyUpdateTest {
"{{x:0,y:0}:3, {x:0,y:1}:2}", "{{x:0,y:1}:3}", "{{x:0,y:0}:3,{x:0,y:1}:6}");
}
- private void assertApplyTo(String spec, Operation op, String init, String update, String expected) {
- TensorFieldValue initialFieldValue = new TensorFieldValue(Tensor.from(spec, init));
- TensorModifyUpdate modifyUpdate = new TensorModifyUpdate(op, new TensorFieldValue(Tensor.from("tensor(x{},y{})", update)));
- TensorFieldValue updatedFieldValue = (TensorFieldValue) modifyUpdate.applyTo(initialFieldValue);
- assertEquals(Tensor.from(spec, expected), updatedFieldValue.getTensor().get());
+ @Test
+ public void apply_modify_update_operations_with_default_cell_value() {
+ assertApplyTo("tensor(x{})", "tensor(x{})", Operation.MULTIPLY, true,
+ "{{x:a}:1,{x:b}:2}", "{{x:b}:3}", "{{x:a}:1,{x:b}:6}");
+
+ assertApplyTo("tensor(x{})", "tensor(x{})", Operation.MULTIPLY, true,
+ "{{x:a}:1,{x:b}:2}", "{{x:b}:3,{x:c}:4}", "{{x:a}:1,{x:b}:6,{x:c}:4}");
+
+ assertApplyTo("tensor(x{},y[3])", "tensor(x{},y{})", Operation.ADD, true,
+ "{{x:a,y:0}:3,{x:a,y:1}:4,{x:a,y:2}:5}",
+ "{{x:a,y:0}:6,{x:b,y:1}:7,{x:b,y:2}:8,{x:c,y:0}:9}",
+ "{{x:a,y:0}:9,{x:a,y:1}:4,{x:a,y:2}:5," +
+ "{x:b,y:0}:0,{x:b,y:1}:7,{x:b,y:2}:8," +
+ "{x:c,y:0}:9,{x:c,y:1}:0,{x:c,y:2}:0}");
+
+ // NOTE: The default cell value (1.0) used for MULTIPLY operation doesn't have any effect for tensors
+ // with only indexed dimensions, as the dense subspace is always represented (with default cell value 0.0).
+ assertApplyTo("tensor(x[3])", "tensor(x{})", Operation.MULTIPLY, true,
+ "{{x:0}:2}", "{{x:1}:3}", "{{x:0}:2,{x:1}:0,{x:2}:0}");
+ }
+
+ private void assertApplyTo(String spec, Operation op, String input, String update, String expected) {
+ assertApplyTo(spec, "tensor(x{},y{})", op, false, input, update, expected);
}
+ private void assertApplyTo(String inputSpec, String updateSpec, Operation op, boolean createNonExistingCells, String input, String update, String expected) {
+ TensorFieldValue inputFieldValue = new TensorFieldValue(Tensor.from(inputSpec, input));
+ TensorModifyUpdate modifyUpdate = new TensorModifyUpdate(op, new TensorFieldValue(Tensor.from(updateSpec, update)), createNonExistingCells);
+ TensorFieldValue updatedFieldValue = (TensorFieldValue) modifyUpdate.applyTo(inputFieldValue);
+ assertEquals(Tensor.from(inputSpec, expected), updatedFieldValue.getTensor().get());
+ }
}
diff --git a/document/src/tests/data/serialize-tensor-update-cpp.dat b/document/src/tests/data/serialize-tensor-update-cpp.dat
index ad0e9d706b0..d6b6b5e2506 100644
--- a/document/src/tests/data/serialize-tensor-update-cpp.dat
+++ b/document/src/tests/data/serialize-tensor-update-cpp.dat
Binary files differ
diff --git a/document/src/tests/data/serialize-tensor-update-java.dat b/document/src/tests/data/serialize-tensor-update-java.dat
index ad0e9d706b0..d6b6b5e2506 100644
--- a/document/src/tests/data/serialize-tensor-update-java.dat
+++ b/document/src/tests/data/serialize-tensor-update-java.dat
Binary files differ
diff --git a/document/src/tests/documentupdatetestcase.cpp b/document/src/tests/documentupdatetestcase.cpp
index 3fbccaa155f..25815684b7e 100644
--- a/document/src/tests/documentupdatetestcase.cpp
+++ b/document/src/tests/documentupdatetestcase.cpp
@@ -1024,13 +1024,37 @@ TEST(DocumentUpdateTest, tensor_modify_update_can_be_applied)
.add({{"x", "b"}}, 15));
}
-TEST(DocumentUpdateTest, tensor_modify_update_can_be_applied_to_nonexisting_tensor)
+TEST(DocumentUpdateTest, tensor_modify_update_with_create_non_existing_cells_can_be_applied)
+{
+ TensorUpdateFixture f;
+ auto baseLine = f.spec().add({{"x", "a"}}, 2)
+ .add({{"x", "b"}}, 3);
+
+ f.assertApplyUpdate(baseLine,
+ std::make_unique<TensorModifyUpdate>(TensorModifyUpdate::Operation::ADD,
+ f.makeTensor(f.spec().add({{"x", "b"}}, 5).add({{"x", "c"}}, 6)), 0.0),
+ f.spec().add({{"x", "a"}}, 2)
+ .add({{"x", "b"}}, 8)
+ .add({{"x", "c"}}, 6));
+}
+
+TEST(DocumentUpdateTest, tensor_modify_update_is_ignored_when_applied_to_nonexisting_tensor)
{
TensorUpdateFixture f;
f.assertApplyUpdateNonExisting(std::make_unique<TensorModifyUpdate>(TensorModifyUpdate::Operation::ADD,
f.makeTensor(f.spec().add({{"x", "b"}}, 5))));
}
+TEST(DocumentUpdateTest, tensor_modify_update_with_create_non_existing_cells_is_applied_to_nonexisting_tensor)
+{
+ TensorUpdateFixture f;
+ f.assertApplyUpdateNonExisting(std::make_unique<TensorModifyUpdate>(TensorModifyUpdate::Operation::ADD,
+ f.makeTensor(f.spec().add({{"x", "b"}}, 5)
+ .add({{"x", "c"}}, 6)), 0.0),
+ f.spec().add({{"x", "b"}}, 5)
+ .add({{"x", "c"}}, 6));
+}
+
TEST(DocumentUpdateTest, tensor_assign_update_can_be_roundtrip_serialized)
{
TensorUpdateFixture f;
@@ -1069,6 +1093,9 @@ TEST(DocumentUpdateTest, tensor_modify_update_can_be_roundtrip_serialized)
f.assertRoundtripSerialize(TensorModifyUpdate(TensorModifyUpdate::Operation::REPLACE, f.makeBaselineTensor()));
f.assertRoundtripSerialize(TensorModifyUpdate(TensorModifyUpdate::Operation::ADD, f.makeBaselineTensor()));
f.assertRoundtripSerialize(TensorModifyUpdate(TensorModifyUpdate::Operation::MULTIPLY, f.makeBaselineTensor()));
+ f.assertRoundtripSerialize(TensorModifyUpdate(TensorModifyUpdate::Operation::REPLACE, f.makeBaselineTensor(), 0.0));
+ f.assertRoundtripSerialize(TensorModifyUpdate(TensorModifyUpdate::Operation::ADD, f.makeBaselineTensor(), 0.0));
+ f.assertRoundtripSerialize(TensorModifyUpdate(TensorModifyUpdate::Operation::MULTIPLY, f.makeBaselineTensor(), 1.0));
}
TEST(DocumentUpdateTest, tensor_modify_update_on_float_tensor_can_be_roundtrip_serialized)
@@ -1077,6 +1104,9 @@ TEST(DocumentUpdateTest, tensor_modify_update_on_float_tensor_can_be_roundtrip_s
f.assertRoundtripSerialize(TensorModifyUpdate(TensorModifyUpdate::Operation::REPLACE, f.makeBaselineTensor()));
f.assertRoundtripSerialize(TensorModifyUpdate(TensorModifyUpdate::Operation::ADD, f.makeBaselineTensor()));
f.assertRoundtripSerialize(TensorModifyUpdate(TensorModifyUpdate::Operation::MULTIPLY, f.makeBaselineTensor()));
+ f.assertRoundtripSerialize(TensorModifyUpdate(TensorModifyUpdate::Operation::REPLACE, f.makeBaselineTensor(), 0.0));
+ f.assertRoundtripSerialize(TensorModifyUpdate(TensorModifyUpdate::Operation::ADD, f.makeBaselineTensor(), 0.0));
+ f.assertRoundtripSerialize(TensorModifyUpdate(TensorModifyUpdate::Operation::MULTIPLY, f.makeBaselineTensor(), 1.0));
}
TEST(DocumentUpdateTest, tensor_modify_update_on_dense_tensor_can_be_roundtrip_serialized)
@@ -1170,7 +1200,10 @@ struct TensorUpdateSerializeFixture {
result->addUpdate(FieldUpdate(getField("dense_tensor"))
.addUpdate(std::make_unique<TensorModifyUpdate>(TensorModifyUpdate::Operation::REPLACE, makeTensor()))
.addUpdate(std::make_unique<TensorModifyUpdate>(TensorModifyUpdate::Operation::ADD, makeTensor()))
- .addUpdate(std::make_unique<TensorModifyUpdate>(TensorModifyUpdate::Operation::MULTIPLY, makeTensor())));
+ .addUpdate(std::make_unique<TensorModifyUpdate>(TensorModifyUpdate::Operation::MULTIPLY, makeTensor()))
+ .addUpdate(std::make_unique<TensorModifyUpdate>(TensorModifyUpdate::Operation::REPLACE, makeTensor(), 0.0))
+ .addUpdate(std::make_unique<TensorModifyUpdate>(TensorModifyUpdate::Operation::ADD, makeTensor(), 0.0))
+ .addUpdate(std::make_unique<TensorModifyUpdate>(TensorModifyUpdate::Operation::MULTIPLY, makeTensor(), 1.0)));
return result;
}
diff --git a/document/src/tests/serialization/vespadocumentserializer_test.cpp b/document/src/tests/serialization/vespadocumentserializer_test.cpp
index 1839005d720..03878f43e4b 100644
--- a/document/src/tests/serialization/vespadocumentserializer_test.cpp
+++ b/document/src/tests/serialization/vespadocumentserializer_test.cpp
@@ -686,7 +686,7 @@ void deserializeAndCheck(const string &file_name, FieldValueT &value,
const string &field_name) {
File file(file_name);
file.open(File::READONLY);
- vector<char> content(file.stat()._size);
+ vector<char> content(file.getFileSize());
size_t r = file.read(&content[0], content.size(), 0);
ASSERT_EQUAL(content.size(), r);
diff --git a/document/src/tests/tensor_fieldvalue/partial_modify/partial_modify_test.cpp b/document/src/tests/tensor_fieldvalue/partial_modify/partial_modify_test.cpp
index 47ef9f21a27..bf780dba5d3 100644
--- a/document/src/tests/tensor_fieldvalue/partial_modify/partial_modify_test.cpp
+++ b/document/src/tests/tensor_fieldvalue/partial_modify/partial_modify_test.cpp
@@ -60,12 +60,35 @@ Value::UP try_partial_modify(const TensorSpec &a, const TensorSpec &b, join_fun_
return TensorPartialUpdate::modify(*lhs, fun, *rhs, factory);
}
+Value::UP try_partial_modify_with_defaults(const TensorSpec &a, const TensorSpec &b, join_fun_t fun, double default_cell_value) {
+ const auto &factory = SimpleValueBuilderFactory::get();
+ auto lhs = value_from_spec(a, factory);
+ auto rhs = value_from_spec(b, factory);
+ return TensorPartialUpdate::modify_with_defaults(*lhs, fun, *rhs, default_cell_value, factory);
+}
+
TensorSpec perform_partial_modify(const TensorSpec &a, const TensorSpec &b, join_fun_t fun) {
auto up = try_partial_modify(a, b, fun);
EXPECT_TRUE(up);
return spec_from_value(*up);
}
+TensorSpec perform_partial_modify_with_defaults(const TensorSpec &a, const TensorSpec &b, join_fun_t fun, double default_cell_value) {
+ auto up = try_partial_modify_with_defaults(a, b, fun, default_cell_value);
+ EXPECT_TRUE(up);
+ return spec_from_value(*up);
+}
+
+void expect_modify_with_defaults(const vespalib::string& lhs_expr, const vespalib::string& rhs_expr,
+ join_fun_t fun, double default_cell_value, const vespalib::string& exp_expr) {
+ auto lhs = TensorSpec::from_expr(lhs_expr);
+ auto rhs = TensorSpec::from_expr(rhs_expr);
+ auto exp = TensorSpec::from_expr(exp_expr);
+ auto act = perform_partial_modify_with_defaults(lhs, rhs, fun, default_cell_value);
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ EXPECT_EQ(exp, act);
+}
+
TEST(PartialModifyTest, partial_modify_works_for_simple_values) {
for (const auto &layouts: modify_layouts) {
for (auto lhs_ct: CellTypeUtils::list_types()) {
@@ -87,6 +110,33 @@ TEST(PartialModifyTest, partial_modify_works_for_simple_values) {
}
}
+TEST(PartialModifyTest, partial_modify_with_defauls) {
+ expect_modify_with_defaults("tensor(x{}):{{x:\"a\"}:1,{x:\"b\"}:2}",
+ "tensor(x{}):{{x:\"b\"}:3}",
+ operation::Add::f, 0.0,
+ "tensor(x{}):{{x:\"a\"}:1,{x:\"b\"}:5}");
+
+ expect_modify_with_defaults("tensor(x{}):{{x:\"a\"}:1,{x:\"b\"}:2}",
+ "tensor(x{}):{{x:\"b\"}:3,{x:\"c\"}:4}",
+ operation::Add::f, 0.0,
+ "tensor(x{}):{{x:\"a\"}:1,{x:\"b\"}:5,{x:\"c\"}:4}");
+
+ expect_modify_with_defaults("tensor(x{},y[3]):{{x:\"a\",y:0}:3,{x:\"a\",y:1}:4,{x:\"a\",y:2}:5}",
+ "tensor(x{},y{}):{{x:\"a\",y:\"0\"}:6,"
+ "{x:\"b\",y:\"1\"}:7,{x:\"b\",y:\"2\"}:8,"
+ "{x:\"c\",y:\"0\"}:9}",
+ operation::Add::f, 1.0,
+ "tensor(x{},y[3]):{{x:\"a\",y:0}:9,{x:\"a\",y:1}:4,{x:\"a\",y:2}:5,"
+ "{x:\"b\",y:0}:1,{x:\"b\",y:1}:8,{x:\"b\",y:2}:9,"
+ "{x:\"c\",y:0}:10,{x:\"c\",y:1}:1,{x:\"c\",y:2}:1}");
+
+ // NOTE: The specified default cell value doesn't have any effect for tensors with only indexed dimensions,
+ // as the dense subspace is always represented (with default cell value 0.0).
+ expect_modify_with_defaults("tensor(x[3]):{{x:0}:2}", "tensor(x{}):{{x:\"1\"}:3}",
+ operation::Add::f, 2.0,
+ "tensor(x[3]):{{x:0}:2,{x:1}:3,{x:2}:0}");
+}
+
std::vector<std::pair<vespalib::string,vespalib::string>> bad_layouts = {
{ "x3", "x3" },
{ "x3y4_1", "x3y4_1" },
@@ -108,4 +158,17 @@ TEST(PartialModifyTest, partial_modify_returns_nullptr_on_invalid_inputs) {
}
}
+TEST(PartialModifyTest, partial_modify_with_defaults_returns_nullptr_on_invalid_inputs) {
+ for (const auto &layouts: bad_layouts) {
+ TensorSpec lhs = GenSpec::from_desc(layouts.first).seq(N());
+ TensorSpec rhs = GenSpec::from_desc(layouts.second).seq(Div16(N()));
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.to_string().c_str(), rhs.to_string().c_str()));
+ for (auto fun: {operation::Add::f}) {
+ auto actual = try_partial_modify_with_defaults(lhs, rhs, fun, 0.0);
+ auto expect = Value::UP();
+ EXPECT_EQ(actual, expect);
+ }
+ }
+}
+
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/document/src/vespa/document/fieldvalue/tensorfieldvalue.h b/document/src/vespa/document/fieldvalue/tensorfieldvalue.h
index 52b27346ff8..7b025ea21a9 100644
--- a/document/src/vespa/document/fieldvalue/tensorfieldvalue.h
+++ b/document/src/vespa/document/fieldvalue/tensorfieldvalue.h
@@ -32,6 +32,7 @@ public:
void accept(FieldValueVisitor &visitor) override;
void accept(ConstFieldValueVisitor &visitor) const override;
const DataType *getDataType() const override;
+ const TensorDataType& get_tensor_data_type() const { return _dataType; }
TensorFieldValue* clone() const override;
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
void printXml(XmlOutputStream& out) const override;
diff --git a/document/src/vespa/document/serialization/vespadocumentserializer.cpp b/document/src/vespa/document/serialization/vespadocumentserializer.cpp
index 2a2b642dd48..c0b56150c04 100644
--- a/document/src/vespa/document/serialization/vespadocumentserializer.cpp
+++ b/document/src/vespa/document/serialization/vespadocumentserializer.cpp
@@ -476,11 +476,29 @@ VespaDocumentSerializer::write(const RemoveFieldPathUpdate &value)
writeFieldPath(_stream, value);
}
+namespace {
+
+uint8_t
+encode_operation_id(const TensorModifyUpdate& update)
+{
+ uint8_t op = static_cast<uint8_t>(update.getOperation());
+ uint8_t CREATE_FLAG = 0b10000000;
+ if (update.get_default_cell_value().has_value()) {
+ op |= CREATE_FLAG;
+ }
+ return op;
+}
+
+}
+
void
VespaDocumentSerializer::write(const TensorModifyUpdate &value)
{
_stream << uint32_t(ValueUpdate::TensorModify);
- _stream << static_cast<uint8_t>(value.getOperation());
+ _stream << encode_operation_id(value);
+ if (value.get_default_cell_value().has_value()) {
+ _stream << value.get_default_cell_value().value();
+ }
write(value.getTensor());
}
diff --git a/document/src/vespa/document/update/tensor_modify_update.cpp b/document/src/vespa/document/update/tensor_modify_update.cpp
index 92b2a8672c3..198ee1c67c3 100644
--- a/document/src/vespa/document/update/tensor_modify_update.cpp
+++ b/document/src/vespa/document/update/tensor_modify_update.cpp
@@ -9,9 +9,11 @@
#include <vespa/document/fieldvalue/tensorfieldvalue.h>
#include <vespa/document/serialization/vespadocumentdeserializer.h>
#include <vespa/document/util/serializableexceptions.h>
+#include <vespa/eval/eval/fast_value.h>
#include <vespa/eval/eval/operation.h>
+#include <vespa/eval/eval/tensor_spec.h>
#include <vespa/eval/eval/value.h>
-#include <vespa/eval/eval/fast_value.h>
+#include <vespa/eval/eval/value_codec.h>
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/util/xmlstream.h>
@@ -19,10 +21,11 @@
using vespalib::IllegalArgumentException;
using vespalib::IllegalStateException;
-using vespalib::make_string;
-using vespalib::eval::ValueType;
using vespalib::eval::CellType;
using vespalib::eval::FastValueBuilderFactory;
+using vespalib::eval::Value;
+using vespalib::eval::ValueType;
+using vespalib::make_string;
using join_fun_t = double (*)(double, double);
@@ -87,7 +90,8 @@ TensorModifyUpdate::TensorModifyUpdate()
TensorUpdate(),
_operation(Operation::MAX_NUM_OPERATIONS),
_tensorType(),
- _tensor()
+ _tensor(),
+ _default_cell_value()
{
}
@@ -96,7 +100,19 @@ TensorModifyUpdate::TensorModifyUpdate(Operation operation, std::unique_ptr<Tens
TensorUpdate(),
_operation(operation),
_tensorType(std::make_unique<TensorDataType>(dynamic_cast<const TensorDataType &>(*tensor->getDataType()))),
- _tensor(static_cast<TensorFieldValue *>(_tensorType->createFieldValue().release()))
+ _tensor(static_cast<TensorFieldValue *>(_tensorType->createFieldValue().release())),
+ _default_cell_value()
+{
+ *_tensor = *tensor;
+}
+
+TensorModifyUpdate::TensorModifyUpdate(Operation operation, std::unique_ptr<TensorFieldValue> tensor, double default_cell_value)
+ : ValueUpdate(TensorModify),
+ TensorUpdate(),
+ _operation(operation),
+ _tensorType(std::make_unique<TensorDataType>(dynamic_cast<const TensorDataType &>(*tensor->getDataType()))),
+ _tensor(static_cast<TensorFieldValue *>(_tensorType->createFieldValue().release())),
+ _default_cell_value(default_cell_value)
{
*_tensor = *tensor;
}
@@ -116,6 +132,9 @@ TensorModifyUpdate::operator==(const ValueUpdate &other) const
if (*_tensor != *o._tensor) {
return false;
}
+ if (_default_cell_value != o._default_cell_value) {
+ return false;
+ }
return true;
}
@@ -129,34 +148,54 @@ TensorModifyUpdate::checkCompatibility(const Field& field) const
}
}
-std::unique_ptr<vespalib::eval::Value>
-TensorModifyUpdate::applyTo(const vespalib::eval::Value &tensor) const
+std::unique_ptr<Value>
+TensorModifyUpdate::applyTo(const Value &tensor) const
{
return apply_to(tensor, FastValueBuilderFactory::get());
}
-std::unique_ptr<vespalib::eval::Value>
+std::unique_ptr<Value>
TensorModifyUpdate::apply_to(const Value &old_tensor,
const ValueBuilderFactory &factory) const
{
if (auto cellsTensor = _tensor->getAsTensorPtr()) {
auto op = getJoinFunction(_operation);
- return TensorPartialUpdate::modify(old_tensor, op, *cellsTensor, factory);
+ if (_default_cell_value.has_value()) {
+ return TensorPartialUpdate::modify_with_defaults(old_tensor, op, *cellsTensor, _default_cell_value.value(), factory);
+ } else {
+ return TensorPartialUpdate::modify(old_tensor, op, *cellsTensor, factory);
+ }
}
return {};
}
+namespace {
+
+std::unique_ptr<Value>
+create_empty_tensor(const ValueType& type)
+{
+ const auto& factory = FastValueBuilderFactory::get();
+ vespalib::eval::TensorSpec empty_spec(type.to_spec());
+ return vespalib::eval::value_from_spec(empty_spec, factory);
+}
+
+}
+
bool
TensorModifyUpdate::applyTo(FieldValue& value) const
{
if (value.isA(FieldValue::Type::TENSOR)) {
TensorFieldValue &tensorFieldValue = static_cast<TensorFieldValue &>(value);
- auto oldTensor = tensorFieldValue.getAsTensorPtr();
- if (oldTensor) {
- auto newTensor = applyTo(*oldTensor);
- if (newTensor) {
- tensorFieldValue = std::move(newTensor);
- }
+ auto old_tensor = tensorFieldValue.getAsTensorPtr();
+ std::unique_ptr<Value> new_tensor;
+ if (old_tensor) {
+ new_tensor = applyTo(*old_tensor);
+ } else if (_default_cell_value.has_value()) {
+ auto empty_tensor = create_empty_tensor(tensorFieldValue.get_tensor_data_type().getTensorType());
+ new_tensor = applyTo(*empty_tensor);
+ }
+ if (new_tensor) {
+ tensorFieldValue = std::move(new_tensor);
}
} else {
vespalib::string err = make_string("Unable to perform a tensor modify update on a '%s' field value",
@@ -179,6 +218,9 @@ TensorModifyUpdate::print(std::ostream& out, bool verbose, const std::string& in
if (_tensor) {
_tensor->print(out, verbose, indent);
}
+ if (_default_cell_value.has_value()) {
+ out << "," << _default_cell_value.value();
+ }
out << ")";
}
@@ -198,6 +240,26 @@ verifyCellsTensorIsSparse(const vespalib::eval::Value *cellsTensor)
throw IllegalStateException(err, VESPA_STRLOC);
}
+TensorModifyUpdate::Operation
+decode_operation(uint8_t encoded_op)
+{
+ uint8_t OP_MASK = 0b01111111;
+ uint8_t op = encoded_op & OP_MASK;
+ if (op >= static_cast<uint8_t>(TensorModifyUpdate::Operation::MAX_NUM_OPERATIONS)) {
+ vespalib::asciistream msg;
+ msg << "Unrecognized tensor modify update operation " << static_cast<uint32_t>(op);
+ throw DeserializeException(msg.str(), VESPA_STRLOC);
+ }
+ return static_cast<TensorModifyUpdate::Operation>(op);
+}
+
+bool
+decode_create_non_existing_cells(uint8_t encoded_op)
+{
+ uint8_t CREATE_FLAG = 0b10000000;
+ return (encoded_op & CREATE_FLAG) != 0;
+}
+
}
void
@@ -205,12 +267,12 @@ TensorModifyUpdate::deserialize(const DocumentTypeRepo &repo, const DataType &ty
{
uint8_t op;
stream >> op;
- if (op >= static_cast<uint8_t>(Operation::MAX_NUM_OPERATIONS)) {
- vespalib::asciistream msg;
- msg << "Unrecognized tensor modify update operation " << static_cast<uint32_t>(op);
- throw DeserializeException(msg.str(), VESPA_STRLOC);
+ _operation = decode_operation(op);
+ if (decode_create_non_existing_cells(op)) {
+ double value;
+ stream >> value;
+ _default_cell_value = value;
}
- _operation = static_cast<Operation>(op);
_tensorType = convertToCompatibleType(dynamic_cast<const TensorDataType &>(type));
auto tensor = _tensorType->createFieldValue();
if (tensor->isA(FieldValue::Type::TENSOR)) {
diff --git a/document/src/vespa/document/update/tensor_modify_update.h b/document/src/vespa/document/update/tensor_modify_update.h
index 9386b4f8a1b..931d5102c4f 100644
--- a/document/src/vespa/document/update/tensor_modify_update.h
+++ b/document/src/vespa/document/update/tensor_modify_update.h
@@ -2,6 +2,7 @@
#include "tensor_update.h"
#include "valueupdate.h"
+#include <optional>
namespace vespalib::eval { struct Value; }
@@ -29,12 +30,15 @@ private:
Operation _operation;
std::unique_ptr<const TensorDataType> _tensorType;
std::unique_ptr<TensorFieldValue> _tensor;
+ // When this is set, non-existing cells are created in the input tensor before applying the update.
+ std::optional<double> _default_cell_value;
friend ValueUpdate;
TensorModifyUpdate();
ACCEPT_UPDATE_VISITOR;
public:
TensorModifyUpdate(Operation operation, std::unique_ptr<TensorFieldValue> tensor);
+ TensorModifyUpdate(Operation operation, std::unique_ptr<TensorFieldValue> tensor, double default_cell_value);
TensorModifyUpdate(const TensorModifyUpdate &rhs) = delete;
TensorModifyUpdate &operator=(const TensorModifyUpdate &rhs) = delete;
~TensorModifyUpdate() override;
@@ -42,6 +46,7 @@ public:
bool operator==(const ValueUpdate &other) const override;
Operation getOperation() const { return _operation; }
const TensorFieldValue &getTensor() const { return *_tensor; }
+ const std::optional<double>& get_default_cell_value() const { return _default_cell_value; }
void checkCompatibility(const Field &field) const override;
std::unique_ptr<vespalib::eval::Value> applyTo(const vespalib::eval::Value &tensor) const;
std::unique_ptr<Value> apply_to(const Value &tensor,
diff --git a/document/src/vespa/document/update/tensor_partial_update.cpp b/document/src/vespa/document/update/tensor_partial_update.cpp
index 9c3db0edb5f..e37e5750384 100644
--- a/document/src/vespa/document/update/tensor_partial_update.cpp
+++ b/document/src/vespa/document/update/tensor_partial_update.cpp
@@ -1,11 +1,12 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "tensor_partial_update.h"
+#include <vespa/eval/eval/array_array_map.h>
#include <vespa/eval/eval/operation.h>
#include <vespa/vespalib/util/overload.h>
+#include <vespa/vespalib/util/shared_string_repo.h>
#include <vespa/vespalib/util/typify.h>
#include <vespa/vespalib/util/visit_ranges.h>
-#include <vespa/vespalib/util/shared_string_repo.h>
#include <cassert>
#include <set>
@@ -203,27 +204,27 @@ struct PerformModify {
static Value::UP invoke(const Value &input,
join_fun_t function,
const Value &modifier,
- const ValueBuilderFactory &factory);
+ const ValueBuilderFactory &factory,
+ AddressHandler& handler,
+ Value::UP output);
};
template <typename ICT, typename MCT>
Value::UP
-PerformModify::invoke(const Value &input, join_fun_t function, const Value &modifier, const ValueBuilderFactory &factory)
+PerformModify::invoke(const Value &input, join_fun_t function, const Value &modifier, const ValueBuilderFactory &factory,
+ AddressHandler& handler, Value::UP output)
{
const ValueType &input_type = input.type();
const size_t dsss = input_type.dense_subspace_size();
- const ValueType &modifier_type = modifier.type();
- AddressHandler handler(input_type, modifier_type);
- if (! handler.valid) {
- return {};
+ if (!output) {
+ // copy input to output
+ output = copy_tensor<ICT>(input, input_type, handler.for_output, factory);
}
- // copy input to output
- auto out = copy_tensor<ICT>(input, input_type, handler.for_output, factory);
// need to overwrite some cells
- auto output_cells = unconstify(out->cells().template typify<ICT>());
+ auto output_cells = unconstify(output->cells().template typify<ICT>());
const auto modifier_cells = modifier.cells().typify<MCT>();
auto modifier_view = modifier.index().create_view({});
- auto lookup_view = out->index().create_view(handler.for_output.lookup_view_dims);
+ auto lookup_view = output->index().create_view(handler.for_output.lookup_view_dims);
modifier_view->lookup({});
size_t modifier_subspace_index;
while (modifier_view->next_result(handler.from_modifier.next_result_refs, modifier_subspace_index)) {
@@ -242,7 +243,70 @@ PerformModify::invoke(const Value &input, join_fun_t function, const Value &modi
dst[dense_idx] = function(lhs, rhs);
}
}
- return out;
+ return output;
+}
+
+void
+find_sub_spaces_not_in_input(const Value& input, const Value& modifier, double default_cell_value,
+ AddressHandler& handler, ArrayArrayMap<string_id, double>& sub_spaces_result)
+{
+ auto lookup_view = input.index().create_view(handler.for_output.lookup_view_dims);
+ auto modifier_view = modifier.index().create_view({});
+ modifier_view->lookup({});
+ size_t modifier_subspace_index;
+ while (modifier_view->next_result(handler.from_modifier.next_result_refs, modifier_subspace_index)) {
+ handler.handle_address();
+ size_t dense_idx = handler.dense_converter.get_dense_index();
+ if (dense_idx == npos()) {
+ continue;
+ }
+ lookup_view->lookup(handler.for_output.lookup_refs);
+ size_t output_subspace_index;
+ if (!lookup_view->next_result({}, output_subspace_index)) {
+ ConstArrayRef<string_id> addr(handler.for_output.addr);
+ auto [tag, inserted] = sub_spaces_result.lookup_or_add_entry(addr);
+ if (inserted) {
+ auto values = sub_spaces_result.get_values(tag);
+ for (size_t i = 0; i < values.size(); ++i) {
+ values[i] = default_cell_value;
+ }
+ }
+ }
+ }
+}
+
+struct PerformInsertSubspaces {
+ template<typename ICT>
+ static Value::UP invoke(const Value& input,
+ SparseCoords& output_addrs,
+ const ArrayArrayMap<string_id, double>& sub_spaces,
+ const ValueBuilderFactory& factory);
+};
+
+template <typename ICT>
+Value::UP
+PerformInsertSubspaces::invoke(const Value& input,
+ SparseCoords& output_addrs,
+ const ArrayArrayMap<string_id, double>& sub_spaces,
+ const ValueBuilderFactory& factory)
+{
+ const auto& input_type = input.type();
+ const size_t num_mapped_in_input = input_type.count_mapped_dimensions();
+ const size_t dsss = input_type.dense_subspace_size();
+ const size_t expected_subspaces = input.index().size() + sub_spaces.size();
+ auto builder = factory.create_value_builder<ICT>(input_type, num_mapped_in_input, dsss, expected_subspaces);
+ auto no_filter = [] (const auto&, size_t) {
+ return true;
+ };
+ copy_tensor_with_filter<ICT>(input, dsss, output_addrs, *builder, no_filter);
+ sub_spaces.each_entry([&](vespalib::ConstArrayRef<string_id> keys, vespalib::ConstArrayRef<double> values) {
+ auto dst = builder->add_subspace(keys).begin();
+ assert(dsss == values.size());
+ for (size_t i = 0; i < dsss; ++i) {
+ dst[i] = values[i];
+ }
+ });
+ return builder->build(std::move(builder));
}
//-----------------------------------------------------------------------------
@@ -398,9 +462,37 @@ Value::UP
TensorPartialUpdate::modify(const Value &input, join_fun_t function,
const Value &modifier, const ValueBuilderFactory &factory)
{
+ AddressHandler handler(input.type(), modifier.type());
+ if (!handler.valid) {
+ return {};
+ }
+ return typify_invoke<2, TypifyCellType, PerformModify>(
+ input.cells().type, modifier.cells().type,
+ input, function, modifier, factory, handler, Value::UP());
+}
+
+Value::UP
+TensorPartialUpdate::modify_with_defaults(const Value& input, join_fun_t function,
+ const Value& modifier, double default_cell_value, const ValueBuilderFactory& factory)
+{
+ const auto& input_type = input.type();
+ AddressHandler handler(input_type, modifier.type());
+ if (!handler.valid) {
+ return {};
+ }
+ Value::UP output;
+ if (!input_type.is_dense()) {
+ const size_t dsss = input_type.dense_subspace_size();
+ ArrayArrayMap<string_id, double> sub_spaces(handler.for_output.addr.size(), dsss, modifier.index().size());
+ find_sub_spaces_not_in_input(input, modifier, default_cell_value, handler, sub_spaces);
+ if (sub_spaces.size() > 0) {
+ output = typify_invoke<1, TypifyCellType, PerformInsertSubspaces>(
+ input.cells().type, input, handler.for_output, sub_spaces, factory);
+ }
+ }
return typify_invoke<2, TypifyCellType, PerformModify>(
input.cells().type, modifier.cells().type,
- input, function, modifier, factory);
+ input, function, modifier, factory, handler, std::move(output));
}
Value::UP
diff --git a/document/src/vespa/document/update/tensor_partial_update.h b/document/src/vespa/document/update/tensor_partial_update.h
index 196b14f6f5c..f3069d59a9b 100644
--- a/document/src/vespa/document/update/tensor_partial_update.h
+++ b/document/src/vespa/document/update/tensor_partial_update.h
@@ -24,6 +24,15 @@ struct TensorPartialUpdate {
const Value &modifier, const ValueBuilderFactory &factory);
/**
+ * Make a copy of the input and add all dense sub-spaces (with default cell value) exising only in the modifier.
+ * Then apply function(oldvalue, modifier.cellvalue) to the cells that exist in the modifier.
+ * The modifier type must be sparse with exactly the same dimension names as the input type.
+ * Returns null pointer if this constraint is violated.
+ **/
+ static Value::UP modify_with_defaults(const Value& input, join_fun_t function,
+ const Value& modifier, double default_cell_value, const ValueBuilderFactory& factory);
+
+ /**
* Make a copy of the input, but add or overwrite cells from add_cells.
* Requires same type for input and add_cells.
* Returns null pointer if this constraint is violated.
diff --git a/eval/CMakeLists.txt b/eval/CMakeLists.txt
index 4e9fd1b27be..34f1cc6788d 100644
--- a/eval/CMakeLists.txt
+++ b/eval/CMakeLists.txt
@@ -50,6 +50,7 @@ vespa_define_module(
src/tests/instruction/dense_dot_product_function
src/tests/instruction/dense_hamming_distance
src/tests/instruction/dense_inplace_join_function
+ src/tests/instruction/dense_join_reduce_plan
src/tests/instruction/dense_matmul_function
src/tests/instruction/dense_multi_matmul_function
src/tests/instruction/dense_replace_type_function
@@ -83,10 +84,12 @@ vespa_define_module(
src/tests/instruction/sparse_112_dot_product
src/tests/instruction/sparse_dot_product_function
src/tests/instruction/sparse_full_overlap_join_function
+ src/tests/instruction/sparse_join_reduce_plan
src/tests/instruction/sparse_merge_function
src/tests/instruction/sparse_no_overlap_join_function
src/tests/instruction/sparse_singledim_lookup
src/tests/instruction/sum_max_dot_product_function
+ src/tests/instruction/universal_dot_product
src/tests/instruction/unpack_bits_function
src/tests/instruction/vector_from_doubles_function
src/tests/streamed/value
diff --git a/eval/src/tests/eval/fast_value/fast_value_test.cpp b/eval/src/tests/eval/fast_value/fast_value_test.cpp
index 412d8bad200..6eed2168d45 100644
--- a/eval/src/tests/eval/fast_value/fast_value_test.cpp
+++ b/eval/src/tests/eval/fast_value/fast_value_test.cpp
@@ -62,6 +62,81 @@ TEST(FastCellsTest, add_cells_works) {
EXPECT_EQ(*cells.get(5), 6.0);
}
+TEST(FastValueTest, insert_subspace) {
+ Handle foo("foo");
+ Handle bar("bar");
+ string_id foo_id = foo.id();
+ string_id bar_id = bar.id();
+ auto addr = [](string_id &ref){ return ConstArrayRef<string_id>(&ref, 1); };
+ auto type = ValueType::from_spec("tensor<float>(x{},y[2])");
+ auto value = std::make_unique<FastValue<float,true>>(type, 1, 2, 5);
+ EXPECT_EQ(value->index().size(), 0);
+ {
+ auto [cells, added] = value->insert_subspace(addr(foo_id));
+ EXPECT_TRUE(added);
+ EXPECT_EQ(value->index().size(), 1);
+ ASSERT_EQ(cells.size(), 2);
+ cells[0] = 10.0;
+ cells[1] = 20.0;
+ }{
+ auto [cells, added] = value->insert_subspace(addr(bar_id));
+ EXPECT_TRUE(added);
+ EXPECT_EQ(value->index().size(), 2);
+ ASSERT_EQ(cells.size(), 2);
+ cells[0] = 30.0;
+ cells[1] = 40.0;
+ }{
+ auto [cells, added] = value->insert_subspace(addr(foo_id));
+ EXPECT_FALSE(added);
+ EXPECT_EQ(value->index().size(), 2);
+ ASSERT_EQ(cells.size(), 2);
+ EXPECT_EQ(cells[0], 10.0);
+ EXPECT_EQ(cells[1], 20.0);
+ cells[0] = 11.0;
+ cells[1] = 22.0;
+ }{
+ auto [cells, added] = value->insert_subspace(addr(bar_id));
+ EXPECT_FALSE(added);
+ EXPECT_EQ(value->index().size(), 2);
+ ASSERT_EQ(cells.size(), 2);
+ EXPECT_EQ(cells[0], 30.0);
+ EXPECT_EQ(cells[1], 40.0);
+ cells[0] = 33.0;
+ cells[1] = 44.0;
+ }
+ auto actual = spec_from_value(*value);
+ auto expected = TensorSpec("tensor<float>(x{},y[2])")
+ .add({{"x", "foo"}, {"y", 0}}, 11.0)
+ .add({{"x", "foo"}, {"y", 1}}, 22.0)
+ .add({{"x", "bar"}, {"y", 0}}, 33.0)
+ .add({{"x", "bar"}, {"y", 1}}, 44.0);
+ EXPECT_EQ(actual, expected);
+}
+
+TEST(FastValueTest, insert_empty_subspace) {
+ auto addr = [](){ return ConstArrayRef<string_id>(); };
+ auto type = ValueType::from_spec("double");
+ auto value = std::make_unique<FastValue<double,true>>(type, 0, 1, 1);
+ EXPECT_EQ(value->index().size(), 0);
+ {
+ auto [cells, added] = value->insert_subspace(addr());
+ EXPECT_TRUE(added);
+ EXPECT_EQ(value->index().size(), 1);
+ ASSERT_EQ(cells.size(), 1);
+ cells[0] = 10.0;
+ }{
+ auto [cells, added] = value->insert_subspace(addr());
+ EXPECT_FALSE(added);
+ EXPECT_EQ(value->index().size(), 1);
+ ASSERT_EQ(cells.size(), 1);
+ EXPECT_EQ(cells[0], 10.0);
+ cells[0] = 11.0;
+ }
+ auto actual = spec_from_value(*value);
+ auto expected = TensorSpec("double").add({}, 11.0);
+ EXPECT_EQ(actual, expected);
+}
+
using SA = std::vector<vespalib::stringref>;
TEST(FastValueBuilderTest, scalar_add_subspace_robustness) {
diff --git a/eval/src/tests/eval/nested_loop/nested_loop_test.cpp b/eval/src/tests/eval/nested_loop/nested_loop_test.cpp
index caa76816599..5c5d4b219c5 100644
--- a/eval/src/tests/eval/nested_loop/nested_loop_test.cpp
+++ b/eval/src/tests/eval/nested_loop/nested_loop_test.cpp
@@ -5,7 +5,7 @@
using namespace vespalib::eval;
-std::vector<size_t> run_single(size_t idx_in, const std::vector<size_t> &loop, const std::vector<size_t> &stride) {
+std::vector<size_t> run_loop(size_t idx_in, const std::vector<size_t> &loop, const std::vector<size_t> &stride) {
std::vector<size_t> result;
auto capture = [&](size_t idx_out) { result.push_back(idx_out); };
assert(loop.size() == stride.size());
@@ -13,8 +13,8 @@ std::vector<size_t> run_single(size_t idx_in, const std::vector<size_t> &loop, c
return result;
}
-std::vector<std::pair<size_t,size_t>> run_double(size_t idx1_in, size_t idx2_in, const std::vector<size_t> &loop,
- const std::vector<size_t> &stride1, const std::vector<size_t> &stride2)
+std::vector<std::pair<size_t,size_t>> run_two_loops(size_t idx1_in, size_t idx2_in, const std::vector<size_t> &loop,
+ const std::vector<size_t> &stride1, const std::vector<size_t> &stride2)
{
std::vector<std::pair<size_t,size_t>> result;
auto capture = [&](size_t idx1_out, size_t idx2_out) { result.emplace_back(idx1_out, idx2_out); };
@@ -24,37 +24,77 @@ std::vector<std::pair<size_t,size_t>> run_double(size_t idx1_in, size_t idx2_in,
return result;
}
-void verify_double(size_t idx1_in, size_t idx2_in, const std::vector<size_t> &loop,
- const std::vector<size_t> &stride1, const std::vector<size_t> &stride2)
+void add_entry(std::vector<std::vector<size_t>> &result, std::vector<size_t> value) {
+ result.push_back(std::move(value));
+}
+
+std::vector<std::vector<size_t>> run_three_loops(size_t idx1_in, size_t idx2_in, size_t idx3_in, const std::vector<size_t> &loop,
+ const std::vector<size_t> &stride1, const std::vector<size_t> &stride2, const std::vector<size_t> &stride3)
{
- auto res1 = run_single(idx1_in, loop, stride1);
- auto res2 = run_single(idx2_in, loop, stride2);
+ std::vector<std::vector<size_t>> result;
+ auto capture = [&](size_t idx1_out, size_t idx2_out, size_t idx3_out) { add_entry(result, {idx1_out, idx2_out, idx3_out}); };
+ assert(loop.size() == stride1.size());
+ assert(loop.size() == stride2.size());
+ assert(loop.size() == stride3.size());
+ run_nested_loop(idx1_in, idx2_in, idx3_in, loop, stride1, stride2, stride3, capture);
+ return result;
+}
+
+void verify_two(size_t idx1_in, size_t idx2_in, const std::vector<size_t> &loop,
+ const std::vector<size_t> &stride1, const std::vector<size_t> &stride2)
+{
+ auto res1 = run_loop(idx1_in, loop, stride1);
+ auto res2 = run_loop(idx2_in, loop, stride2);
ASSERT_EQ(res1.size(), res2.size());
std::vector<std::pair<size_t,size_t>> expect;
for (size_t i = 0; i < res1.size(); ++i) {
expect.emplace_back(res1[i], res2[i]);
}
- auto actual = run_double(idx1_in, idx2_in, loop, stride1, stride2);
+ auto actual = run_two_loops(idx1_in, idx2_in, loop, stride1, stride2);
+ EXPECT_EQ(actual, expect);
+}
+
+void verify_three(size_t idx1_in, size_t idx2_in, size_t idx3_in, const std::vector<size_t> &loop,
+ const std::vector<size_t> &stride1, const std::vector<size_t> &stride2, const std::vector<size_t> &stride3)
+{
+ auto res1 = run_loop(idx1_in, loop, stride1);
+ auto res2 = run_loop(idx2_in, loop, stride2);
+ auto res3 = run_loop(idx3_in, loop, stride3);
+ ASSERT_EQ(res1.size(), res2.size());
+ ASSERT_EQ(res1.size(), res3.size());
+ std::vector<std::vector<size_t>> expect;
+ for (size_t i = 0; i < res1.size(); ++i) {
+ add_entry(expect, {res1[i], res2[i], res3[i]});
+ }
+ auto actual = run_three_loops(idx1_in, idx2_in, idx3_in, loop, stride1, stride2, stride3);
EXPECT_EQ(actual, expect);
}
std::vector<size_t> v(std::vector<size_t> vec) { return vec; }
-TEST(NestedLoopTest, single_nested_loop_can_be_executed) {
- EXPECT_EQ(v({123}), run_single(123, {}, {}));
- EXPECT_EQ(v({10,11}), run_single(10, {2}, {1}));
- EXPECT_EQ(v({100,110,101,111}), run_single(100, {2,2}, {1,10}));
- EXPECT_EQ(v({100,110,100,110,101,111,101,111}), run_single(100, {2,2,2}, {1,0,10}));
+TEST(NestedLoopTest, nested_loop_can_be_executed) {
+ EXPECT_EQ(v({123}), run_loop(123, {}, {}));
+ EXPECT_EQ(v({10,11}), run_loop(10, {2}, {1}));
+ EXPECT_EQ(v({100,110,101,111}), run_loop(100, {2,2}, {1,10}));
+ EXPECT_EQ(v({100,110,100,110,101,111,101,111}), run_loop(100, {2,2,2}, {1,0,10}));
EXPECT_EQ(v({100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115}),
- run_single(100, {2,2,2,2}, {8,4,2,1}));
+ run_loop(100, {2,2,2,2}, {8,4,2,1}));
+}
+
+TEST(NestedLoopTest, two_parallel_nested_loops_can_be_executed) {
+ verify_two(10, 20, {}, {}, {});
+ verify_two(10, 20, {3}, {5}, {7});
+ verify_two(10, 20, {3,3}, {2,3}, {7,5});
+ verify_two(10, 20, {3,3,2}, {2,0,3}, {0,7,5});
+ verify_two(10, 20, {2,3,2,3}, {7,2,1,3}, {3,7,5,1});
}
-TEST(NestedLoopTest, double_nested_loop_can_be_executed) {
- verify_double(10, 20, {}, {}, {});
- verify_double(10, 20, {3}, {5}, {7});
- verify_double(10, 20, {3,3}, {2,3}, {7,5});
- verify_double(10, 20, {3,3,2}, {2,0,3}, {0,7,5});
- verify_double(10, 20, {2,3,2,3}, {7,2,1,3}, {3,7,5,1});
+TEST(NestedLoopTest, three_parallel_nested_loops_can_be_executed) {
+ verify_three(10, 20, 30, {}, {}, {}, {});
+ verify_three(10, 20, 30, {3}, {5}, {7}, {3});
+ verify_three(10, 20, 30, {3,3}, {2,3}, {7,5}, {5, 3});
+ verify_three(10, 20, 30, {3,3,2}, {2,0,3}, {0,7,5}, {5, 3, 0});
+ verify_three(10, 20, 30, {2,3,2,3}, {7,2,1,3}, {3,7,5,1}, {1,5,7,3});
}
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/eval/value_type/value_type_test.cpp b/eval/src/tests/eval/value_type/value_type_test.cpp
index feb6fb5c368..2e505a9d16d 100644
--- a/eval/src/tests/eval/value_type/value_type_test.cpp
+++ b/eval/src/tests/eval/value_type/value_type_test.cpp
@@ -363,6 +363,20 @@ TEST("require that dimension index can be obtained") {
EXPECT_EQUAL(type("tensor(y[10],x{},z[5])").dimension_index("w"), ValueType::Dimension::npos);
}
+TEST("require that dimension stride can be calculated") {
+ EXPECT_EQUAL(type("error").stride_of("x"), 0u);
+ EXPECT_EQUAL(type("double").stride_of("x"), 0u);
+ EXPECT_EQUAL(type("tensor()").stride_of("x"), 0u);
+ EXPECT_EQUAL(type("tensor(x{})").stride_of("x"), 0u);
+ EXPECT_EQUAL(type("tensor(x[10])").stride_of("x"), 1u);
+ EXPECT_EQUAL(type("tensor(x[10])").stride_of("y"), 0u);
+ EXPECT_EQUAL(type("tensor(x[10],y[5])").stride_of("x"), 5u);
+ EXPECT_EQUAL(type("tensor(x[10],y[5],z[3])").stride_of("x"), 15u);
+ EXPECT_EQUAL(type("tensor(x[10],y[5],z[3])").stride_of("y"), 3u);
+ EXPECT_EQUAL(type("tensor(x[10],y[5],z[3])").stride_of("z"), 1u);
+ EXPECT_EQUAL(type("tensor(x[10],y{},z[3])").stride_of("x"), 3u);
+}
+
void verify_predicates(const ValueType &type,
bool expect_error, bool expect_double, bool expect_tensor,
bool expect_sparse, bool expect_dense, bool expect_mixed)
diff --git a/eval/src/tests/instruction/dense_join_reduce_plan/CMakeLists.txt b/eval/src/tests/instruction/dense_join_reduce_plan/CMakeLists.txt
new file mode 100644
index 00000000000..9de33bcf8a0
--- /dev/null
+++ b/eval/src/tests/instruction/dense_join_reduce_plan/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_dense_join_reduce_plan_test_app TEST
+ SOURCES
+ dense_join_reduce_plan_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_dense_join_reduce_plan_test_app COMMAND eval_dense_join_reduce_plan_test_app)
diff --git a/eval/src/tests/instruction/dense_join_reduce_plan/dense_join_reduce_plan_test.cpp b/eval/src/tests/instruction/dense_join_reduce_plan/dense_join_reduce_plan_test.cpp
new file mode 100644
index 00000000000..9851e209ba5
--- /dev/null
+++ b/eval/src/tests/instruction/dense_join_reduce_plan/dense_join_reduce_plan_test.cpp
@@ -0,0 +1,101 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/instruction/dense_join_reduce_plan.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::instruction;
+
+ValueType type(const vespalib::string &type_spec) {
+ return ValueType::from_spec(type_spec);
+}
+
+TEST(DenseJoinReducePlanTest, make_trivial_plan) {
+ auto plan = DenseJoinReducePlan(type("double"), type("double"), type("double"));
+ EXPECT_TRUE(plan.distinct_result());
+ EXPECT_EQ(plan.lhs_size, 1);
+ EXPECT_EQ(plan.rhs_size, 1);
+ EXPECT_EQ(plan.res_size, 1);
+ EXPECT_TRUE(plan.loop_cnt.empty());
+ EXPECT_TRUE(plan.lhs_stride.empty());
+ EXPECT_TRUE(plan.rhs_stride.empty());
+ EXPECT_TRUE(plan.res_stride.empty());
+}
+
+TEST(DenseJoinReducePlanTest, execute_trivial_plan) {
+ auto plan = DenseJoinReducePlan(type("double"), type("double"), type("double"));
+ size_t res = 0;
+ auto join_reduce = [&](size_t a_idx, size_t b_idx, size_t c_idx) {
+ res += (12 + a_idx + b_idx + c_idx);
+ };
+ plan.execute(5, 10, 15, join_reduce);
+ EXPECT_EQ(res, 42);
+}
+
+TEST(DenseJoinReducePlanTest, make_simple_plan) {
+ auto plan = DenseJoinReducePlan(type("tensor(a[2])"), type("tensor(b[3])"), type("tensor(a[2])"));
+ SmallVector<size_t> expect_loop = {2,3};
+ SmallVector<size_t> expect_lhs_stride = {1,0};
+ SmallVector<size_t> expect_rhs_stride = {0,1};
+ SmallVector<size_t> expect_res_stride = {1,0};
+ EXPECT_FALSE(plan.distinct_result());
+ EXPECT_EQ(plan.lhs_size, 2);
+ EXPECT_EQ(plan.rhs_size, 3);
+ EXPECT_EQ(plan.res_size, 2);
+ EXPECT_EQ(plan.loop_cnt, expect_loop);
+ EXPECT_EQ(plan.lhs_stride, expect_lhs_stride);
+ EXPECT_EQ(plan.rhs_stride, expect_rhs_stride);
+ EXPECT_EQ(plan.res_stride, expect_res_stride);
+}
+
+TEST(DenseJoinReducePlanTest, execute_simple_plan) {
+ auto plan = DenseJoinReducePlan(type("tensor(a[2])"), type("tensor(b[3])"), type("tensor(a[2])"));
+ std::vector<int> a({1, 2});
+ std::vector<int> b({3, 4, 5});
+ std::vector<int> c(2, 0);
+ std::vector<int> expect = {12, 24};
+ ASSERT_EQ(plan.res_size, 2);
+ auto join_reduce = [&](size_t a_idx, size_t b_idx, size_t c_idx) { c[c_idx] += (a[a_idx] * b[b_idx]); };
+ plan.execute(0, 0, 0, join_reduce);
+ EXPECT_EQ(c, expect);
+}
+
+TEST(DenseJoinReducePlanTest, make_distinct_plan) {
+ auto plan = DenseJoinReducePlan(type("tensor(a[2])"),
+ type("tensor(b[3])"),
+ type("tensor(a[2],b[3])"));
+ SmallVector<size_t> expect_loop = {2,3};
+ SmallVector<size_t> expect_lhs_stride = {1,0};
+ SmallVector<size_t> expect_rhs_stride = {0,1};
+ SmallVector<size_t> expect_res_stride = {3,1};
+ EXPECT_TRUE(plan.distinct_result());
+ EXPECT_EQ(plan.lhs_size, 2);
+ EXPECT_EQ(plan.rhs_size, 3);
+ EXPECT_EQ(plan.res_size, 6);
+ EXPECT_EQ(plan.loop_cnt, expect_loop);
+ EXPECT_EQ(plan.lhs_stride, expect_lhs_stride);
+ EXPECT_EQ(plan.rhs_stride, expect_rhs_stride);
+ EXPECT_EQ(plan.res_stride, expect_res_stride);
+}
+
+TEST(DenseJoinReducePlanTest, make_complex_plan) {
+ auto lhs = type("tensor(a{},b[6],c[5],e[3],f[2],g{})");
+ auto rhs = type("tensor(a{},b[6],c[5],d[4],h{})");
+ auto res = type("tensor(a{},b[6],c[5],d[4],e[3])");
+ auto plan = DenseJoinReducePlan(lhs, rhs, res);
+ SmallVector<size_t> expect_loop = {30,4,3,2};
+ SmallVector<size_t> expect_lhs_stride = {6,0,2,1};
+ SmallVector<size_t> expect_rhs_stride = {4,1,0,0};
+ SmallVector<size_t> expect_res_stride = {12,3,1,0};
+ EXPECT_FALSE(plan.distinct_result());
+ EXPECT_EQ(plan.lhs_size, 180);
+ EXPECT_EQ(plan.rhs_size, 120);
+ EXPECT_EQ(plan.res_size, 360);
+ EXPECT_EQ(plan.loop_cnt, expect_loop);
+ EXPECT_EQ(plan.lhs_stride, expect_lhs_stride);
+ EXPECT_EQ(plan.rhs_stride, expect_rhs_stride);
+ EXPECT_EQ(plan.res_stride, expect_res_stride);
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/instruction/sparse_join_reduce_plan/CMakeLists.txt b/eval/src/tests/instruction/sparse_join_reduce_plan/CMakeLists.txt
new file mode 100644
index 00000000000..a333a5e9638
--- /dev/null
+++ b/eval/src/tests/instruction/sparse_join_reduce_plan/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_sparse_join_reduce_plan_test_app TEST
+ SOURCES
+ sparse_join_reduce_plan_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_sparse_join_reduce_plan_test_app COMMAND eval_sparse_join_reduce_plan_test_app)
diff --git a/eval/src/tests/instruction/sparse_join_reduce_plan/sparse_join_reduce_plan_test.cpp b/eval/src/tests/instruction/sparse_join_reduce_plan/sparse_join_reduce_plan_test.cpp
new file mode 100644
index 00000000000..e101487ff59
--- /dev/null
+++ b/eval/src/tests/instruction/sparse_join_reduce_plan/sparse_join_reduce_plan_test.cpp
@@ -0,0 +1,205 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/eval/value.h>
+#include <vespa/eval/eval/fast_value.h>
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/eval/eval/test/gen_spec.h>
+#include <vespa/eval/instruction/sparse_join_reduce_plan.h>
+#include <vespa/vespalib/util/shared_string_repo.h>
+#include <vespa/vespalib/gtest/gtest.h>
+#include <algorithm>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+using namespace vespalib::eval::instruction;
+
+using Handle = vespalib::SharedStringRepo::Handle;
+
+Value::UP val(const vespalib::string &value_desc) {
+ return value_from_spec(GenSpec::from_desc(value_desc), FastValueBuilderFactory::get());
+}
+
+Handle make_handle(string_id id) {
+ return Handle::handle_from_id(id);
+}
+
+Handle make_handle(const vespalib::string &str) {
+ return Handle(str);
+}
+
+struct Event {
+ size_t lhs_idx;
+ size_t rhs_idx;
+ std::vector<Handle> res_addr;
+ template <typename ADDR>
+ Event(size_t a, size_t b, ADDR addr)
+ : lhs_idx(a), rhs_idx(b), res_addr()
+ {
+ for (auto label: addr) {
+ res_addr.push_back(make_handle(label));
+ }
+ }
+ auto operator<=>(const Event &rhs) const = default;
+};
+
+struct Trace {
+ size_t estimate;
+ std::vector<Event> events;
+ Trace(size_t estimate_in)
+ : estimate(estimate_in), events() {}
+ void add_raw(size_t lhs_idx, size_t rhs_idx, ConstArrayRef<string_id> res_addr) {
+ events.emplace_back(lhs_idx, rhs_idx, res_addr);
+ }
+ Trace &add(size_t lhs_idx, size_t rhs_idx, std::vector<vespalib::string> res_addr) {
+ events.emplace_back(lhs_idx, rhs_idx, res_addr);
+ return *this;
+ }
+ auto operator<=>(const Trace &rhs) const = default;
+};
+
+std::ostream &
+operator<<(std::ostream &os, const Event &event) {
+ os << "{ lhs: " << event.lhs_idx << ", rhs: " << event.rhs_idx << ", addr: [";
+ for (size_t i = 0; i < event.res_addr.size(); ++i) {
+ if (i > 0) {
+ os << ", ";
+ }
+ os << event.res_addr[i].as_string();
+ }
+ os << "] }";
+ return os;
+}
+
+std::ostream &
+operator<<(std::ostream &os, const Trace &trace) {
+ os << "estimate: " << trace.estimate << "\n";
+ for (const Event &event: trace.events) {
+ os << " " << event << "\n";
+ }
+ return os;
+}
+
+Trace trace(size_t est) { return Trace(est); }
+
+Trace trace(const vespalib::string &a_desc, const vespalib::string &b_desc,
+ const std::vector<vespalib::string> &reduce_dims)
+{
+ auto a = val(a_desc);
+ auto b = val(b_desc);
+ auto res_type = ValueType::join(a->type(), b->type());
+ if (!reduce_dims.empty()) {
+ res_type = res_type.reduce(reduce_dims);
+ }
+ SparseJoinReducePlan plan(a->type(), b->type(), res_type);
+ Trace trace(plan.estimate_result_size(a->index(), b->index()));
+ plan.execute(a->index(), b->index(),
+ [&trace](size_t lhs_idx, size_t rhs_idx, ConstArrayRef<string_id> res_addr) {
+ trace.add_raw(lhs_idx, rhs_idx, res_addr);
+ });
+ return trace;
+}
+
+//-----------------------------------------------------------------------------
+
+TEST(SparseJoinReducePlanTest, simple_dense) {
+ EXPECT_EQ(trace("x10", "x10", {}), trace(1).add(0, 0, {}));
+ EXPECT_EQ(trace("x10", "x10", {"x"}), trace(1).add(0, 0, {}));
+}
+
+TEST(SparseJoinReducePlanTest, many_dimensions) {
+ EXPECT_EQ(trace("a1_1b1_2c1_3d1_4", "c1_3d1_4e1_5f1_6", {"b","d","f"}), trace(1).add(0, 0, {"1", "3", "5"}));
+ EXPECT_EQ(trace("c1_3d1_4e1_5f1_6", "a1_1b1_2c1_3d1_4", {"b","d","f"}), trace(1).add(0, 0, {"1", "3", "5"}));
+}
+
+TEST(SparseJoinReducePlanTest, traverse_order_can_be_swapped) {
+ EXPECT_EQ(trace("x2_4", "y3_1", {}), trace(6).add(0, 0, {"4", "1"}).add(0, 1, {"4", "2"}).add(0, 2, {"4", "3"})
+ .add(1, 0, {"8", "1"}).add(1, 1, {"8", "2"}).add(1, 2, {"8", "3"}));
+ EXPECT_EQ(trace("y3_1", "x2_4", {}), trace(6).add(0, 0, {"4", "1"}).add(1, 0, {"4", "2"}).add(2, 0, {"4", "3"})
+ .add(0, 1, {"8", "1"}).add(1, 1, {"8", "2"}).add(2, 1, {"8", "3"}));
+}
+
+//-----------------------------------------------------------------------------
+
+TEST(SparseJoinReducePlanTest, full_overlap_no_reduce) {
+ EXPECT_EQ(trace("x4_1", "x2_2", {}), trace(2).add(1, 0, {"2"}).add(3, 1, {"4"}));
+ EXPECT_EQ(trace("x1_1", "x0_0", {}), trace(0));
+ EXPECT_EQ(trace("x0_0", "x1_1", {}), trace(0));
+}
+
+TEST(SparseJoinReducePlanTest, full_overlap_reduce_all) {
+ EXPECT_EQ(trace("x4_1", "x2_2", {"x"}), trace(1).add(1, 0, {}).add(3, 1, {}));
+ EXPECT_EQ(trace("x1_1", "x0_0", {"x"}), trace(1));
+ EXPECT_EQ(trace("x0_0", "x1_1", {"x"}), trace(1));
+}
+
+//-----------------------------------------------------------------------------
+
+TEST(SparseJoinReducePlanTest, no_overlap_no_reduce) {
+ EXPECT_EQ(trace("x2_1", "y3_1", {}), trace(6).add(0, 0, {"1", "1"}).add(0, 1, {"1", "2"}).add(0, 2, {"1", "3"})
+ .add(1, 0, {"2", "1"}).add(1, 1, {"2", "2"}).add(1, 2, {"2", "3"}));
+ EXPECT_EQ(trace("x1_1", "y0_0", {}), trace(0));
+ EXPECT_EQ(trace("y0_0", "x1_1", {}), trace(0));
+}
+
+TEST(SparseJoinReducePlanTest, no_overlap_reduce_last) {
+ EXPECT_EQ(trace("x2_1", "y3_1", {"y"}), trace(2).add(0, 0, {"1"}).add(0, 1, {"1"}).add(0, 2, {"1"})
+ .add(1, 0, {"2"}).add(1, 1, {"2"}).add(1, 2, {"2"}));
+ EXPECT_EQ(trace("x1_1", "y0_0", {"y"}), trace(0));
+ EXPECT_EQ(trace("y0_0", "x1_1", {"y"}), trace(0));
+}
+
+TEST(SparseJoinReducePlanTest, no_overlap_reduce_first) {
+ EXPECT_EQ(trace("x2_1", "y3_1", {"x"}), trace(3).add(0, 0, {"1"}).add(0, 1, {"2"}).add(0, 2, {"3"})
+ .add(1, 0, {"1"}).add(1, 1, {"2"}).add(1, 2, {"3"}));
+ EXPECT_EQ(trace("x0_0", "y1_1", {"x"}), trace(0));
+ EXPECT_EQ(trace("y1_1", "x0_0", {"x"}), trace(0));
+}
+
+TEST(SparseJoinReducePlanTest, no_overlap_reduce_all) {
+ EXPECT_EQ(trace("x2_1", "y3_1", {"x", "y"}), trace(1).add(0, 0, {}).add(0, 1, {}).add(0, 2, {})
+ .add(1, 0, {}).add(1, 1, {}).add(1, 2, {}));
+ EXPECT_EQ(trace("x0_0", "y1_1", {"x", "y"}), trace(1));
+ EXPECT_EQ(trace("y1_1", "x0_0", {"x", "y"}), trace(1));
+}
+
+//-----------------------------------------------------------------------------
+
+TEST(SparseJoinReducePlanTest, partial_overlap_no_reduce) {
+ EXPECT_EQ(trace("x2_1y1_1", "y1_1z2_3", {}), trace(2).add(0, 0, {"1", "1", "3"}).add(0, 1, {"1", "1", "6"})
+ .add(1, 0, {"2", "1", "3"}).add(1, 1, {"2", "1", "6"}));
+ EXPECT_EQ(trace("x2_1y1_1", "y1_2z3_1", {}), trace(2));
+ EXPECT_EQ(trace("x2_1y1_1", "y0_0z2_3", {}), trace(0));
+}
+
+TEST(SparseJoinReducePlanTest, partial_overlap_reduce_first) {
+ EXPECT_EQ(trace("x2_1y1_1", "y1_1z2_3", {"x"}), trace(2).add(0, 0, {"1", "3"}).add(0, 1, {"1", "6"})
+ .add(1, 0, {"1", "3"}).add(1, 1, {"1", "6"}));
+ EXPECT_EQ(trace("x2_1y1_1", "y1_2z3_1", {"x"}), trace(2));
+ EXPECT_EQ(trace("x2_1y1_1", "y0_0z2_3", {"x"}), trace(0));
+}
+
+TEST(SparseJoinReducePlanTest, partial_overlap_reduce_middle) {
+ EXPECT_EQ(trace("x2_1y1_1", "y1_1z2_3", {"y"}), trace(2).add(0, 0, {"1", "3"}).add(0, 1, {"1", "6"})
+ .add(1, 0, {"2", "3"}).add(1, 1, {"2", "6"}));
+ EXPECT_EQ(trace("x2_1y1_1", "y1_2z3_1", {"y"}), trace(2));
+ EXPECT_EQ(trace("x2_1y1_1", "y0_0z2_3", {"y"}), trace(0));
+}
+
+TEST(SparseJoinReducePlanTest, partial_overlap_reduce_last) {
+ EXPECT_EQ(trace("x2_1y1_1", "y1_1z2_3", {"z"}), trace(2).add(0, 0, {"1", "1"}).add(0, 1, {"1", "1"})
+ .add(1, 0, {"2", "1"}).add(1, 1, {"2", "1"}));
+ EXPECT_EQ(trace("x2_1y1_1", "y1_2z3_1", {"z"}), trace(2));
+ EXPECT_EQ(trace("x2_1y1_1", "y0_0z2_3", {"z"}), trace(0));
+}
+
+TEST(SparseJoinReducePlanTest, partial_overlap_reduce_all) {
+ EXPECT_EQ(trace("x2_1y1_1", "y1_1z2_3", {"x", "y", "z"}), trace(1).add(0, 0, {}).add(0, 1, {})
+ .add(1, 0, {}).add(1, 1, {}));
+ EXPECT_EQ(trace("x2_1y1_1", "y1_2z3_1", {"x", "y", "z"}), trace(1));
+ EXPECT_EQ(trace("x2_1y1_1", "y0_0z2_3", {"x", "y", "z"}), trace(1));
+}
+
+//-----------------------------------------------------------------------------
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/tests/instruction/universal_dot_product/CMakeLists.txt b/eval/src/tests/instruction/universal_dot_product/CMakeLists.txt
new file mode 100644
index 00000000000..19023b48d04
--- /dev/null
+++ b/eval/src/tests/instruction/universal_dot_product/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(eval_universal_dot_product_test_app TEST
+ SOURCES
+ universal_dot_product_test.cpp
+ DEPENDS
+ vespaeval
+ GTest::GTest
+)
+vespa_add_test(NAME eval_universal_dot_product_test_app COMMAND eval_universal_dot_product_test_app)
diff --git a/eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp b/eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp
new file mode 100644
index 00000000000..3f60ad69b86
--- /dev/null
+++ b/eval/src/tests/instruction/universal_dot_product/universal_dot_product_test.cpp
@@ -0,0 +1,89 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/eval/eval/fast_value.h>
+#include <vespa/eval/eval/value_codec.h>
+#include <vespa/eval/eval/interpreted_function.h>
+#include <vespa/eval/eval/tensor_function.h>
+#include <vespa/eval/instruction/universal_dot_product.h>
+#include <vespa/eval/eval/test/reference_operations.h>
+#include <vespa/eval/eval/test/gen_spec.h>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/gtest/gtest.h>
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::eval::test;
+
+using vespalib::make_string_short::fmt;
+
+const ValueBuilderFactory &prod_factory = FastValueBuilderFactory::get();
+
+GenSpec::seq_t N_16ths = [] (size_t i) noexcept { return (i + 33.0) / 16.0; };
+
+GenSpec G() { return GenSpec().seq(N_16ths); }
+
+const std::vector<GenSpec> layouts = {
+ G(), G(),
+ G().idx("x", 5), G().idx("x", 5),
+ G().idx("x", 5), G().idx("y", 5),
+ G().idx("x", 5), G().idx("x", 5).idx("y", 5),
+ G().idx("y", 3), G().idx("x", 2).idx("z", 3),
+ G().idx("x", 3).idx("y", 5), G().idx("y", 5).idx("z", 7),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b"}),
+ G().map("x", {"a","b","c"}), G().map("y", {"foo","bar","baz"}),
+ G().map("x", {"a","b","c"}), G().map("x", {"a","b","c"}).map("y", {"foo","bar","baz"}),
+ G().map("x", {"a","b"}).map("y", {"foo","bar","baz"}), G().map("x", {"a","b","c"}).map("y", {"foo","bar"}),
+ G().map("x", {"a","b"}).map("y", {"foo","bar","baz"}), G().map("y", {"foo","bar"}).map("z", {"i","j","k","l"}),
+ G().idx("x", 3).map("y", {"foo", "bar"}), G().map("y", {"foo", "bar"}).idx("z", 7),
+ G().map("x", {"a","b","c"}).idx("y", 5), G().idx("y", 5).map("z", {"i","j","k","l"})
+};
+
+const std::vector<std::vector<vespalib::string>> reductions = {
+ {}, {"x"}, {"y"}, {"z"}, {"x", "y"}, {"x", "z"}, {"y", "z"}
+};
+
+TensorSpec perform_dot_product(const TensorSpec &a, const TensorSpec &b, const std::vector<vespalib::string> &dims)
+{
+ Stash stash;
+ auto lhs = value_from_spec(a, prod_factory);
+ auto rhs = value_from_spec(b, prod_factory);
+ auto res_type = ValueType::join(lhs->type(), rhs->type()).reduce(dims);
+ EXPECT_FALSE(res_type.is_error());
+ UniversalDotProduct dot_product(res_type,
+ tensor_function::inject(lhs->type(), 0, stash),
+ tensor_function::inject(rhs->type(), 1, stash));
+ auto my_op = dot_product.compile_self(prod_factory, stash);
+ InterpretedFunction::EvalSingle single(prod_factory, my_op);
+ return spec_from_value(single.eval(std::vector<Value::CREF>({*lhs,*rhs})));
+}
+
+TEST(UniversalDotProductTest, generic_dot_product_works_for_various_cases) {
+ size_t test_cases = 0;
+ ASSERT_TRUE((layouts.size() % 2) == 0);
+ for (size_t i = 0; i < layouts.size(); i += 2) {
+ const auto &l = layouts[i];
+ const auto &r = layouts[i+1];
+ for (CellType lct : CellTypeUtils::list_types()) {
+ auto lhs = l.cpy().cells(lct);
+ if (lhs.bad_scalar()) continue;
+ for (CellType rct : CellTypeUtils::list_types()) {
+ auto rhs = r.cpy().cells(rct);
+ if (rhs.bad_scalar()) continue;
+ for (const std::vector<vespalib::string> &dims: reductions) {
+ if (ValueType::join(lhs.type(), rhs.type()).reduce(dims).is_error()) continue;
+ ++test_cases;
+ SCOPED_TRACE(fmt("\n===\nLHS: %s\nRHS: %s\n===\n", lhs.gen().to_string().c_str(), rhs.gen().to_string().c_str()));
+ auto expect = ReferenceOperations::reduce(ReferenceOperations::join(lhs, rhs, operation::Mul::f), Aggr::SUM, dims);
+ auto actual = perform_dot_product(lhs, rhs, dims);
+ // fprintf(stderr, "\n===\nLHS: %s\nRHS: %s\n===\nRESULT: %s\n===\n", lhs.gen().to_string().c_str(), rhs.gen().to_string().c_str(), actual.to_string().c_str());
+ EXPECT_EQ(actual, expect);
+ }
+ }
+ }
+ }
+ EXPECT_GT(test_cases, 500);
+ fprintf(stderr, "total test cases run: %zu\n", test_cases);
+}
+
+GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/eval/src/vespa/eval/eval/fast_value.hpp b/eval/src/vespa/eval/eval/fast_value.hpp
index 47f99d19055..561b8ee8102 100644
--- a/eval/src/vespa/eval/eval/fast_value.hpp
+++ b/eval/src/vespa/eval/eval/fast_value.hpp
@@ -162,6 +162,13 @@ struct FastValue final : Value, ValueBuilder<T> {
ArrayRef<T> get_subspace(size_t subspace) {
return {my_cells.get(subspace * my_subspace_size), my_subspace_size};
}
+ std::pair<ArrayRef<T>,bool> insert_subspace(ConstArrayRef<string_id> addr) {
+ if (size_t subspace = my_index.map.lookup(addr); subspace != FastAddrMap::npos()) {
+ return {get_subspace(subspace), false};
+ } else {
+ return {add_subspace(addr), true};
+ }
+ }
ConstArrayRef<T> get_raw_cells() const {
return {my_cells.get(0), my_cells.size};
}
diff --git a/eval/src/vespa/eval/eval/inline_operation.h b/eval/src/vespa/eval/eval/inline_operation.h
index 9b862b59e37..910fa9cffaa 100644
--- a/eval/src/vespa/eval/eval/inline_operation.h
+++ b/eval/src/vespa/eval/eval/inline_operation.h
@@ -4,6 +4,7 @@
#include "operation.h"
#include <vespa/vespalib/util/typify.h>
+#include <cblas.h>
#include <cmath>
namespace vespalib::eval::operation {
@@ -148,4 +149,31 @@ void apply_op2_vec_vec(D *dst, const A *a, const B *b, size_t n, OP2 &&f) {
//-----------------------------------------------------------------------------
+template <typename LCT, typename RCT>
+struct DotProduct {
+ static double apply(const LCT * lhs, const RCT * rhs, size_t count) {
+ double result = 0.0;
+ for (size_t i = 0; i < count; ++i) {
+ result += lhs[i] * rhs[i];
+ }
+ return result;
+ }
+};
+
+template <>
+struct DotProduct<float,float> {
+ static float apply(const float * lhs, const float * rhs, size_t count) {
+ return cblas_sdot(count, lhs, 1, rhs, 1);
+ }
+};
+
+template <>
+struct DotProduct<double,double> {
+ static double apply(const double * lhs, const double * rhs, size_t count) {
+ return cblas_ddot(count, lhs, 1, rhs, 1);
+ }
+};
+
+//-----------------------------------------------------------------------------
+
}
diff --git a/eval/src/vespa/eval/eval/nested_loop.h b/eval/src/vespa/eval/eval/nested_loop.h
index 92e84f5c052..695b212dd90 100644
--- a/eval/src/vespa/eval/eval/nested_loop.h
+++ b/eval/src/vespa/eval/eval/nested_loop.h
@@ -65,6 +65,28 @@ template <typename F> void execute_many(size_t idx1, size_t idx2, const size_t *
//-----------------------------------------------------------------------------
+template <typename F, size_t N> void execute_few(size_t idx1, size_t idx2, size_t idx3, const size_t *loop, const size_t *stride1, const size_t *stride2, const size_t *stride3, const F &f) {
+ if constexpr (N == 0) {
+ f(idx1, idx2, idx3);
+ } else {
+ for (size_t i = 0; i < *loop; ++i, idx1 += *stride1, idx2 += *stride2, idx3 += *stride3) {
+ execute_few<F, N - 1>(idx1, idx2, idx3, loop + 1, stride1 + 1, stride2 + 1, stride3 + 1, f);
+ }
+ }
+}
+
+template <typename F> void execute_many(size_t idx1, size_t idx2, size_t idx3, const size_t *loop, const size_t *stride1, const size_t *stride2, const size_t *stride3, size_t levels, const F &f) {
+ for (size_t i = 0; i < *loop; ++i, idx1 += *stride1, idx2 += *stride2, idx3 += *stride3) {
+ if ((levels - 1) == 3) {
+ execute_few<F, 3>(idx1, idx2, idx3, loop + 1, stride1 + 1, stride2 + 1, stride3 + 1, f);
+ } else {
+ execute_many<F>(idx1, idx2, idx3, loop + 1, stride1 + 1, stride2 + 1, stride3 + 1, levels - 1, f);
+ }
+ }
+}
+
+//-----------------------------------------------------------------------------
+
} // implementation details
// Run a nested loop and pass indexes to 'f'
@@ -95,4 +117,19 @@ void run_nested_loop(size_t idx1, size_t idx2, const V &loop, const V &stride1,
}
}
+// Run three nested loops in parallel and all three indexes to
+// 'f'. Note that 'loop' is shared, which means that only individual
+// strides may differ between the three loops.
+template <typename F, typename V>
+void run_nested_loop(size_t idx1, size_t idx2, size_t idx3, const V &loop, const V &stride1, const V &stride2, const V &stride3, const F &f) {
+ size_t levels = loop.size();
+ switch(levels) {
+ case 0: return f(idx1, idx2, idx3);
+ case 1: return nested_loop::execute_few<F, 1>(idx1, idx2, idx3, &loop[0], &stride1[0], &stride2[0], &stride3[0], f);
+ case 2: return nested_loop::execute_few<F, 2>(idx1, idx2, idx3, &loop[0], &stride1[0], &stride2[0], &stride3[0], f);
+ case 3: return nested_loop::execute_few<F, 3>(idx1, idx2, idx3, &loop[0], &stride1[0], &stride2[0], &stride3[0], f);
+ default: return nested_loop::execute_many<F>(idx1, idx2, idx3, &loop[0], &stride1[0], &stride2[0], &stride3[0], levels, f);
+ }
+}
+
} // namespace
diff --git a/eval/src/vespa/eval/eval/optimize_tensor_function.cpp b/eval/src/vespa/eval/eval/optimize_tensor_function.cpp
index 1d0be47a309..3d9152d6b80 100644
--- a/eval/src/vespa/eval/eval/optimize_tensor_function.cpp
+++ b/eval/src/vespa/eval/eval/optimize_tensor_function.cpp
@@ -37,6 +37,7 @@
#include <vespa/eval/instruction/mixed_l2_distance.h>
#include <vespa/eval/instruction/simple_join_count.h>
#include <vespa/eval/instruction/mapped_lookup.h>
+#include <vespa/eval/instruction/universal_dot_product.h>
#include <vespa/log/log.h>
LOG_SETUP(".eval.eval.optimize_tensor_function");
@@ -88,6 +89,7 @@ const TensorFunction &optimize_for_factory(const ValueBuilderFactory &, const Te
child.set(DenseHammingDistance::optimize(child.get(), stash));
child.set(SimpleJoinCount::optimize(child.get(), stash));
child.set(MappedLookup::optimize(child.get(), stash));
+ // child.set(UniversalDotProduct::optimize(child.get(), stash));
});
run_optimize_pass(root, [&stash](const Child &child)
{
diff --git a/eval/src/vespa/eval/eval/value_codec.cpp b/eval/src/vespa/eval/eval/value_codec.cpp
index d5fc2eb7c16..19cf2012bcb 100644
--- a/eval/src/vespa/eval/eval/value_codec.cpp
+++ b/eval/src/vespa/eval/eval/value_codec.cpp
@@ -335,7 +335,9 @@ std::unique_ptr<Value> decode_value(nbostream &input, const ValueBuilderFactory
std::unique_ptr<Value> value_from_spec(const TensorSpec &spec, const ValueBuilderFactory &factory) {
ValueType type = ValueType::from_spec(spec.type());
- assert(!type.is_error());
+ if (type.is_error()) {
+ throw IllegalArgumentException(fmt("Failed decoding value type from tensorspec(%s)", spec.type().c_str()), VESPA_STRLOC);
+ }
return typify_invoke<1,TypifyCellType,CreateValueFromTensorSpec>(type.cell_type(), type, spec, factory);
}
diff --git a/eval/src/vespa/eval/eval/value_type.cpp b/eval/src/vespa/eval/eval/value_type.cpp
index 7d088b22e06..ae0ea1a0cd6 100644
--- a/eval/src/vespa/eval/eval/value_type.cpp
+++ b/eval/src/vespa/eval/eval/value_type.cpp
@@ -155,7 +155,8 @@ ValueType::error_if(bool has_error, ValueType else_type)
ValueType::~ValueType() = default;
bool
-ValueType::is_double() const {
+ValueType::is_double() const
+{
if (!_error && _dimensions.empty()) {
assert(_cell_type == CellType::DOUBLE);
return true;
@@ -240,7 +241,8 @@ ValueType::dense_subspace_size() const
}
std::vector<ValueType::Dimension>
-ValueType::nontrivial_indexed_dimensions() const {
+ValueType::nontrivial_indexed_dimensions() const
+{
std::vector<ValueType::Dimension> result;
for (const auto &dim: dimensions()) {
if (dim.is_indexed() && !dim.is_trivial()) {
@@ -251,7 +253,8 @@ ValueType::nontrivial_indexed_dimensions() const {
}
std::vector<ValueType::Dimension>
-ValueType::indexed_dimensions() const {
+ValueType::indexed_dimensions() const
+{
std::vector<ValueType::Dimension> result;
for (const auto &dim: dimensions()) {
if (dim.is_indexed()) {
@@ -262,7 +265,8 @@ ValueType::indexed_dimensions() const {
}
std::vector<ValueType::Dimension>
-ValueType::mapped_dimensions() const {
+ValueType::mapped_dimensions() const
+{
std::vector<ValueType::Dimension> result;
for (const auto &dim: dimensions()) {
if (dim.is_mapped()) {
@@ -273,10 +277,27 @@ ValueType::mapped_dimensions() const {
}
size_t
-ValueType::dimension_index(const vespalib::string &name) const {
+ValueType::dimension_index(const vespalib::string &name) const
+{
return my_dimension_index(_dimensions, name);
}
+size_t
+ValueType::stride_of(const vespalib::string &name) const
+{
+ size_t stride = 0;
+ for (const auto &dim: dimensions()) {
+ if (dim.is_indexed()) {
+ if (stride == 0 && dim.name == name) {
+ stride = 1;
+ } else {
+ stride *= dim.size;
+ }
+ }
+ }
+ return stride;
+}
+
std::vector<vespalib::string>
ValueType::dimension_names() const
{
diff --git a/eval/src/vespa/eval/eval/value_type.h b/eval/src/vespa/eval/eval/value_type.h
index 5c0d9e3317d..3bdfcbdd980 100644
--- a/eval/src/vespa/eval/eval/value_type.h
+++ b/eval/src/vespa/eval/eval/value_type.h
@@ -69,6 +69,10 @@ public:
std::vector<Dimension> indexed_dimensions() const;
std::vector<Dimension> mapped_dimensions() const;
size_t dimension_index(const vespalib::string &name) const;
+ size_t stride_of(const vespalib::string &name) const;
+ bool has_dimension(const vespalib::string &name) const {
+ return (dimension_index(name) != Dimension::npos);
+ }
std::vector<vespalib::string> dimension_names() const;
bool operator==(const ValueType &rhs) const noexcept {
return ((_error == rhs._error) &&
diff --git a/eval/src/vespa/eval/instruction/CMakeLists.txt b/eval/src/vespa/eval/instruction/CMakeLists.txt
index 02bbfec5dd3..006a363a64f 100644
--- a/eval/src/vespa/eval/instruction/CMakeLists.txt
+++ b/eval/src/vespa/eval/instruction/CMakeLists.txt
@@ -7,6 +7,7 @@ vespa_add_library(eval_instruction OBJECT
dense_cell_range_function.cpp
dense_dot_product_function.cpp
dense_hamming_distance.cpp
+ dense_join_reduce_plan.cpp
dense_lambda_peek_function.cpp
dense_lambda_peek_optimizer.cpp
dense_matmul_function.cpp
@@ -43,10 +44,12 @@ vespa_add_library(eval_instruction OBJECT
sparse_112_dot_product.cpp
sparse_dot_product_function.cpp
sparse_full_overlap_join_function.cpp
+ sparse_join_reduce_plan.cpp
sparse_merge_function.cpp
sparse_no_overlap_join_function.cpp
sparse_singledim_lookup.cpp
sum_max_dot_product_function.cpp
+ universal_dot_product.cpp
unpack_bits_function.cpp
vector_from_doubles_function.cpp
)
diff --git a/eval/src/vespa/eval/instruction/best_similarity_function.cpp b/eval/src/vespa/eval/instruction/best_similarity_function.cpp
index 964f27a4564..415a08d0d93 100644
--- a/eval/src/vespa/eval/instruction/best_similarity_function.cpp
+++ b/eval/src/vespa/eval/instruction/best_similarity_function.cpp
@@ -1,10 +1,9 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "best_similarity_function.h"
-#include <vespa/eval/eval/operation.h>
+#include <vespa/eval/eval/inline_operation.h>
#include <vespa/eval/eval/value.h>
#include <vespa/vespalib/util/binary_hamming_distance.h>
-#include <cblas.h>
namespace vespalib::eval {
@@ -22,7 +21,7 @@ struct BestSimParam {
struct UseDotProduct {
static float calc(const float *pri, const float *sec, size_t size) {
- return cblas_sdot(size, pri, 1, sec, 1);
+ return DotProduct<float,float>::apply(pri, sec, size);
}
};
diff --git a/eval/src/vespa/eval/instruction/dense_dot_product_function.cpp b/eval/src/vespa/eval/instruction/dense_dot_product_function.cpp
index a2048707685..de9e029f377 100644
--- a/eval/src/vespa/eval/instruction/dense_dot_product_function.cpp
+++ b/eval/src/vespa/eval/instruction/dense_dot_product_function.cpp
@@ -1,9 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "dense_dot_product_function.h"
-#include <vespa/eval/eval/operation.h>
+#include <vespa/eval/eval/inline_operation.h>
#include <vespa/eval/eval/value.h>
-#include <cblas.h>
namespace vespalib::eval {
@@ -16,26 +15,7 @@ template <typename LCT, typename RCT>
void my_dot_product_op(InterpretedFunction::State &state, uint64_t) {
auto lhs_cells = state.peek(1).cells().typify<LCT>();
auto rhs_cells = state.peek(0).cells().typify<RCT>();
- double result = 0.0;
- const LCT *lhs = lhs_cells.cbegin();
- const RCT *rhs = rhs_cells.cbegin();
- for (size_t i = 0; i < lhs_cells.size(); ++i) {
- result += ((*lhs++) * (*rhs++));
- }
- state.pop_pop_push(state.stash.create<DoubleValue>(result));
-}
-
-void my_cblas_double_dot_product_op(InterpretedFunction::State &state, uint64_t) {
- auto lhs_cells = state.peek(1).cells().typify<double>();
- auto rhs_cells = state.peek(0).cells().typify<double>();
- double result = cblas_ddot(lhs_cells.size(), lhs_cells.cbegin(), 1, rhs_cells.cbegin(), 1);
- state.pop_pop_push(state.stash.create<DoubleValue>(result));
-}
-
-void my_cblas_float_dot_product_op(InterpretedFunction::State &state, uint64_t) {
- auto lhs_cells = state.peek(1).cells().typify<float>();
- auto rhs_cells = state.peek(0).cells().typify<float>();
- double result = cblas_sdot(lhs_cells.size(), lhs_cells.cbegin(), 1, rhs_cells.cbegin(), 1);
+ double result = DotProduct<LCT,RCT>::apply(lhs_cells.cbegin(), rhs_cells.cbegin(), lhs_cells.size());
state.pop_pop_push(state.stash.create<DoubleValue>(result));
}
@@ -44,19 +24,6 @@ struct MyDotProductOp {
static auto invoke() { return my_dot_product_op<LCT,RCT>; }
};
-InterpretedFunction::op_function my_select(CellType lct, CellType rct) {
- if (lct == rct) {
- if (lct == CellType::DOUBLE) {
- return my_cblas_double_dot_product_op;
- }
- if (lct == CellType::FLOAT) {
- return my_cblas_float_dot_product_op;
- }
- }
- using MyTypify = TypifyCellType;
- return typify_invoke<2,MyTypify,MyDotProductOp>(lct, rct);
-}
-
} // namespace <unnamed>
DenseDotProductFunction::DenseDotProductFunction(const TensorFunction &lhs_in,
@@ -68,7 +35,8 @@ DenseDotProductFunction::DenseDotProductFunction(const TensorFunction &lhs_in,
InterpretedFunction::Instruction
DenseDotProductFunction::compile_self(const ValueBuilderFactory &, Stash &) const
{
- auto op = my_select(lhs().result_type().cell_type(), rhs().result_type().cell_type());
+ auto op = typify_invoke<2,TypifyCellType,MyDotProductOp>(lhs().result_type().cell_type(),
+ rhs().result_type().cell_type());
return InterpretedFunction::Instruction(op);
}
diff --git a/eval/src/vespa/eval/instruction/dense_join_reduce_plan.cpp b/eval/src/vespa/eval/instruction/dense_join_reduce_plan.cpp
new file mode 100644
index 00000000000..20b7d3364a8
--- /dev/null
+++ b/eval/src/vespa/eval/instruction/dense_join_reduce_plan.cpp
@@ -0,0 +1,95 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "dense_join_reduce_plan.h"
+#include <vespa/vespalib/util/overload.h>
+#include <vespa/vespalib/util/visit_ranges.h>
+#include <cassert>
+
+namespace vespalib::eval::instruction {
+
+namespace {
+
+using Dim = ValueType::Dimension;
+using Dims = std::vector<ValueType::Dimension>;
+
+void visit(auto &v, const Dims &a, const Dims &b) {
+ visit_ranges(v, a.begin(), a.end(), b.begin(), b.end(),
+ [](const auto &x, const auto &y){ return (x.name < y.name); });
+}
+
+Dims merge(const Dims &first, const Dims &second) {
+ Dims result;
+ auto visitor = overload {
+ [&result](visit_ranges_either, const Dim &dim) { result.push_back(dim); },
+ [&result](visit_ranges_both, const Dim &dim, const Dim &) { result.push_back(dim); }
+ };
+ visit(visitor, first, second);
+ return result;
+}
+
+size_t count_only_in_second(const Dims &first, const Dims &second) {
+ size_t result = 0;
+ auto visitor = overload {
+ [](visit_ranges_first, const Dim &) {},
+ [&result](visit_ranges_second, const Dim &) { ++result; },
+ [](visit_ranges_both, const Dim &, const Dim &) {}
+ };
+ visit(visitor, first, second);
+ return result;
+}
+
+struct Strides {
+ size_t lhs;
+ size_t rhs;
+ size_t res;
+ Strides() noexcept : lhs(0), rhs(0), res(0) {}
+ Strides(size_t lhs_in, size_t rhs_in, size_t res_in) noexcept
+ : lhs(lhs_in), rhs(rhs_in), res(res_in) {}
+ bool can_combine_with(const Strides &prev) const noexcept {
+ return ((lhs > 0) == (prev.lhs > 0)) &&
+ ((rhs > 0) == (prev.rhs > 0)) &&
+ ((res > 0) == (prev.res > 0));
+ }
+};
+
+} // <unnamed>
+
+DenseJoinReducePlan::DenseJoinReducePlan(const ValueType &lhs, const ValueType &rhs, const ValueType &res)
+ : lhs_size(lhs.dense_subspace_size()), rhs_size(rhs.dense_subspace_size()), res_size(res.dense_subspace_size()),
+ loop_cnt(), lhs_stride(), rhs_stride(), res_stride()
+{
+ auto dims = merge(lhs.nontrivial_indexed_dimensions(), rhs.nontrivial_indexed_dimensions());
+ assert(count_only_in_second(dims, res.nontrivial_indexed_dimensions()) == 0);
+ Strides prev_strides;
+ for (const auto &dim: dims) {
+ Strides strides(lhs.stride_of(dim.name), rhs.stride_of(dim.name), res.stride_of(dim.name));
+ if (strides.can_combine_with(prev_strides)) {
+ assert(!loop_cnt.empty());
+ loop_cnt.back() *= dim.size;
+ lhs_stride.back() = strides.lhs;
+ rhs_stride.back() = strides.rhs;
+ res_stride.back() = strides.res;
+ } else {
+ loop_cnt.push_back(dim.size);
+ lhs_stride.push_back(strides.lhs);
+ rhs_stride.push_back(strides.rhs);
+ res_stride.push_back(strides.res);
+ }
+ prev_strides = strides;
+ }
+}
+
+DenseJoinReducePlan::~DenseJoinReducePlan() = default;
+
+bool
+DenseJoinReducePlan::distinct_result() const
+{
+ for (size_t stride: res_stride) {
+ if (stride == 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace
diff --git a/eval/src/vespa/eval/instruction/dense_join_reduce_plan.h b/eval/src/vespa/eval/instruction/dense_join_reduce_plan.h
new file mode 100644
index 00000000000..8f9d5218630
--- /dev/null
+++ b/eval/src/vespa/eval/instruction/dense_join_reduce_plan.h
@@ -0,0 +1,27 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/eval/eval/value_type.h>
+#include <vespa/eval/eval/nested_loop.h>
+#include <vespa/vespalib/util/small_vector.h>
+
+namespace vespalib::eval::instruction {
+
+struct DenseJoinReducePlan {
+ size_t lhs_size;
+ size_t rhs_size;
+ size_t res_size;
+ SmallVector<size_t> loop_cnt;
+ SmallVector<size_t> lhs_stride;
+ SmallVector<size_t> rhs_stride;
+ SmallVector<size_t> res_stride;
+ DenseJoinReducePlan(const ValueType &lhs, const ValueType &rhs, const ValueType &res);
+ ~DenseJoinReducePlan();
+ template <typename F> void execute(size_t lhs, size_t rhs, size_t res, const F &f) const {
+ run_nested_loop(lhs, rhs, res, loop_cnt, lhs_stride, rhs_stride, res_stride, f);
+ }
+ bool distinct_result() const;
+};
+
+} // namespace
diff --git a/eval/src/vespa/eval/instruction/mixed_112_dot_product.cpp b/eval/src/vespa/eval/instruction/mixed_112_dot_product.cpp
index 8bfa4b07980..47e1dbb58ed 100644
--- a/eval/src/vespa/eval/instruction/mixed_112_dot_product.cpp
+++ b/eval/src/vespa/eval/instruction/mixed_112_dot_product.cpp
@@ -5,7 +5,6 @@
#include <vespa/vespalib/util/typify.h>
#include <vespa/vespalib/util/require.h>
#include <vespa/eval/eval/visit_stuff.h>
-#include <cblas.h>
#include <algorithm>
#include <optional>
@@ -17,14 +16,6 @@ using namespace instruction;
namespace {
-template <typename CT> double my_dot_product(const CT * lhs, const CT * rhs, size_t count);
-template <> double my_dot_product<double>(const double * lhs, const double * rhs, size_t count) {
- return cblas_ddot(count, lhs, 1, rhs, 1);
-}
-template <> double my_dot_product<float>(const float * lhs, const float * rhs, size_t count) {
- return cblas_sdot(count, lhs, 1, rhs, 1);
-}
-
template <typename T, size_t N>
ConstArrayRef<const T *> as_ccar(std::array<T *, N> &array) {
return {array.data(), array.size()};
@@ -54,10 +45,11 @@ double my_mixed_112_dot_product_fallback(const Value::Index &a_idx, const Value:
auto outer = a_idx.create_view({});
auto model = c_idx.create_view({&single_dim[0], 1});
outer->lookup({});
+ using dot_product = DotProduct<CT,CT>;
while (outer->next_result(as_car(c_addr_ref[0]), a_space)) {
model->lookup(as_ccar(c_addr_ref));
if (model->next_result({}, c_space)) {
- result += my_dot_product<CT>(b_cells, c_cells + (c_space * dense_size), dense_size) * a_cells[a_space];
+ result += dot_product::apply(b_cells, c_cells + (c_space * dense_size), dense_size) * a_cells[a_space];
}
}
return result;
@@ -70,11 +62,12 @@ double my_fast_mixed_112_dot_product(const FastAddrMap *a_map, const FastAddrMap
{
double result = 0.0;
const auto &a_labels = a_map->labels();
+ using dot_product = DotProduct<CT,CT>;
for (size_t a_space = 0; a_space < a_labels.size(); ++a_space) {
if (a_cells[a_space] != 0.0) { // handle pseudo-sparse input
auto c_space = c_map->lookup_singledim(a_labels[a_space]);
if (c_space != FastAddrMap::npos()) {
- result += my_dot_product<CT>(b_cells, c_cells + (c_space * dense_size), dense_size) * a_cells[a_space];
+ result += dot_product::apply(b_cells, c_cells + (c_space * dense_size), dense_size) * a_cells[a_space];
}
}
}
diff --git a/eval/src/vespa/eval/instruction/mixed_inner_product_function.cpp b/eval/src/vespa/eval/instruction/mixed_inner_product_function.cpp
index 248f909fcf5..5880a90a2cd 100644
--- a/eval/src/vespa/eval/instruction/mixed_inner_product_function.cpp
+++ b/eval/src/vespa/eval/instruction/mixed_inner_product_function.cpp
@@ -1,9 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "mixed_inner_product_function.h"
-#include <vespa/eval/eval/operation.h>
+#include <vespa/eval/eval/inline_operation.h>
#include <vespa/eval/eval/value.h>
-#include <cblas.h>
namespace vespalib::eval {
@@ -12,31 +11,6 @@ using namespace operation;
namespace {
-template <typename LCT, typename RCT>
-struct MyDotProduct {
- static double apply(const LCT * lhs, const RCT * rhs, size_t count) {
- double result = 0.0;
- for (size_t i = 0; i < count; ++i) {
- result += lhs[i] * rhs[i];
- }
- return result;
- }
-};
-
-template <>
-struct MyDotProduct<double,double> {
- static double apply(const double * lhs, const double * rhs, size_t count) {
- return cblas_ddot(count, lhs, 1, rhs, 1);
- }
-};
-
-template <>
-struct MyDotProduct<float,float> {
- static float apply(const float * lhs, const float * rhs, size_t count) {
- return cblas_sdot(count, lhs, 1, rhs, 1);
- }
-};
-
struct MixedInnerProductParam {
ValueType res_type;
size_t vector_size;
@@ -66,8 +40,9 @@ void my_mixed_inner_product_op(InterpretedFunction::State &state, uint64_t param
ArrayRef<OCT> out_cells = state.stash.create_uninitialized_array<OCT>(num_output_cells);
const MCT *m_cp = m_cells.begin();
const VCT *v_cp = v_cells.begin();
+ using dot_product = DotProduct<MCT,VCT>;
for (OCT &out : out_cells) {
- out = MyDotProduct<MCT,VCT>::apply(m_cp, v_cp, param.vector_size);
+ out = dot_product::apply(m_cp, v_cp, param.vector_size);
m_cp += param.vector_size;
}
assert(m_cp == m_cells.end());
diff --git a/eval/src/vespa/eval/instruction/sparse_join_reduce_plan.cpp b/eval/src/vespa/eval/instruction/sparse_join_reduce_plan.cpp
new file mode 100644
index 00000000000..00499e7f997
--- /dev/null
+++ b/eval/src/vespa/eval/instruction/sparse_join_reduce_plan.cpp
@@ -0,0 +1,186 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "sparse_join_reduce_plan.h"
+#include <vespa/vespalib/util/overload.h>
+#include <vespa/vespalib/util/visit_ranges.h>
+#include <cassert>
+
+namespace vespalib::eval::instruction {
+
+namespace {
+
+using Dim = ValueType::Dimension;
+using Dims = std::vector<ValueType::Dimension>;
+
+void visit(auto &v, const Dims &a, const Dims &b) {
+ visit_ranges(v, a.begin(), a.end(), b.begin(), b.end(),
+ [](const auto &x, const auto &y){ return (x.name < y.name); });
+}
+
+Dims merge(const Dims &first, const Dims &second) {
+ Dims result;
+ auto visitor = overload {
+ [&result](visit_ranges_either, const Dim &dim) { result.push_back(dim); },
+ [&result](visit_ranges_both, const Dim &dim, const Dim &) { result.push_back(dim); }
+ };
+ visit(visitor, first, second);
+ return result;
+}
+
+size_t count_only_in_second(const Dims &first, const Dims &second) {
+ size_t result = 0;
+ auto visitor = overload {
+ [](visit_ranges_first, const Dim &) {},
+ [&result](visit_ranges_second, const Dim &) { ++result; },
+ [](visit_ranges_both, const Dim &, const Dim &) {}
+ };
+ visit(visitor, first, second);
+ return result;
+}
+
+struct SparseJoinReduceState {
+ SmallVector<string_id,4> addr_space;
+ SmallVector<string_id*,4> a_addr;
+ SmallVector<const string_id*,4> overlap;
+ SmallVector<string_id*,4> b_only;
+ SmallVector<size_t,4> b_view;
+ size_t a_subspace;
+ size_t b_subspace;
+ uint32_t res_dims;
+ SparseJoinReduceState(const bool *in_a, const bool *in_b, const bool *in_res, size_t dims)
+ : addr_space(dims), a_addr(), overlap(), b_only(), b_view(), a_subspace(), b_subspace(), res_dims(0)
+ {
+ size_t b_idx = 0;
+ uint32_t dims_end = addr_space.size();
+ for (size_t i = 0; i < dims; ++i) {
+ string_id *id = in_res[i] ? &addr_space[res_dims++] : &addr_space[--dims_end];
+ if (in_a[i]) {
+ a_addr.push_back(id);
+ if (in_b[i]) {
+ overlap.push_back(id);
+ b_view.push_back(b_idx++);
+ }
+ } else if (in_b[i]) {
+ b_only.push_back(id);
+ ++b_idx;
+ }
+ }
+ // Kept dimensions are allocated from the start and dropped
+ // dimensions are allocated from the end. Make sure they
+ // combine to exactly cover the complete address space.
+ assert(res_dims == dims_end);
+ }
+ ~SparseJoinReduceState();
+};
+SparseJoinReduceState::~SparseJoinReduceState() = default;
+
+void execute_plan(const Value::Index &a, const Value::Index &b,
+ const bool *in_a, const bool *in_b, const bool *in_res,
+ size_t dims, auto &&f)
+{
+ SparseJoinReduceState state(in_a, in_b, in_res, dims);
+ auto outer = a.create_view({});
+ auto inner = b.create_view(state.b_view);
+ outer->lookup({});
+ while (outer->next_result(state.a_addr, state.a_subspace)) {
+ inner->lookup(state.overlap);
+ while (inner->next_result(state.b_only, state.b_subspace)) {
+ f(state.a_subspace, state.b_subspace, ConstArrayRef<string_id>{state.addr_space.begin(), state.res_dims});
+ }
+ }
+}
+
+using est_fun = SparseJoinReducePlan::est_fun_t;
+using est_filter = std::function<bool(bool, bool, bool)>;
+
+struct Est {
+ est_filter filter;
+ est_fun estimate;
+ bool can_use;
+ Est(est_filter filter_in, est_fun estimate_in)
+ : filter(filter_in), estimate(estimate_in), can_use(true) {}
+ ~Est();
+};
+Est::~Est() = default;
+
+size_t est_1(size_t, size_t) noexcept { return 1; }
+size_t est_a_or_0(size_t a, size_t b) noexcept { return (b == 0) ? 0 : a; }
+size_t est_b_or_0(size_t a, size_t b) noexcept { return (a == 0) ? 0 : b; }
+size_t est_min(size_t a, size_t b) noexcept { return std::min(a, b); }
+size_t est_mul(size_t a, size_t b) noexcept { return (a * b); }
+
+bool no_dims(bool, bool, bool) noexcept { return false; }
+bool reduce_all(bool, bool, bool keep) noexcept { return !keep; }
+bool keep_a_reduce_b(bool a, bool b, bool keep) noexcept {
+ if (keep) {
+ return (a && !b);
+ } else {
+ return (!a && b);
+ }
+}
+bool keep_b_reduce_a(bool a, bool b, bool keep) noexcept { return keep_a_reduce_b(b, a, keep); }
+bool full_overlap(bool a, bool b, bool) noexcept { return (a == b); }
+bool no_overlap_keep_all(bool a, bool b, bool keep) noexcept { return keep && (a != b); }
+
+std::vector<Est> make_est_list() {
+ return {
+ { no_dims, est_1 },
+ { reduce_all, est_1 },
+ { keep_a_reduce_b, est_a_or_0 },
+ { keep_b_reduce_a, est_b_or_0 },
+ { full_overlap, est_min },
+ { no_overlap_keep_all, est_mul }
+ };
+}
+
+void update_est_list(std::vector<Est> &est_list, bool in_lhs, bool in_rhs, bool in_res) {
+ for (Est &est: est_list) {
+ if (est.can_use && !est.filter(in_lhs, in_rhs, in_res)) {
+ est.can_use = false;
+ }
+ }
+}
+
+est_fun select_estimate(const std::vector<Est> &est_list) {
+ for (const Est &est: est_list) {
+ if (est.can_use) {
+ return est.estimate;
+ }
+ }
+ return est_min;
+}
+
+} // <unnamed>
+
+SparseJoinReducePlan::SparseJoinReducePlan(const ValueType &lhs, const ValueType &rhs, const ValueType &res)
+ : _in_lhs(), _in_rhs(), _in_res(), _res_dims(0), _estimate()
+{
+ auto dims = merge(lhs.mapped_dimensions(), rhs.mapped_dimensions());
+ assert(count_only_in_second(dims, res.mapped_dimensions()) == 0);
+ auto est_list = make_est_list();
+ for (const auto &dim: dims) {
+ _in_lhs.push_back(lhs.has_dimension(dim.name));
+ _in_rhs.push_back(rhs.has_dimension(dim.name));
+ _in_res.push_back(res.has_dimension(dim.name));
+ if (_in_res.back()) {
+ ++_res_dims;
+ }
+ update_est_list(est_list, _in_lhs.back(), _in_rhs.back(), _in_res.back());
+ }
+ _estimate = select_estimate(est_list);
+ assert(bool(_estimate));
+}
+
+SparseJoinReducePlan::~SparseJoinReducePlan() = default;
+
+void
+SparseJoinReducePlan::execute(const Value::Index &lhs, const Value::Index &rhs, F f) const {
+ if (rhs.size() < lhs.size()) {
+ auto swap = [&](auto a, auto b, auto addr) { f(b, a, addr); };
+ execute_plan(rhs, lhs, _in_rhs.data(), _in_lhs.data(), _in_res.data(), _in_res.size(), swap);
+ } else {
+ execute_plan(lhs, rhs, _in_lhs.data(), _in_rhs.data(), _in_res.data(), _in_res.size(), f);
+ }
+}
+
+} // namespace
diff --git a/eval/src/vespa/eval/instruction/sparse_join_reduce_plan.h b/eval/src/vespa/eval/instruction/sparse_join_reduce_plan.h
new file mode 100644
index 00000000000..c93bf46e2dc
--- /dev/null
+++ b/eval/src/vespa/eval/instruction/sparse_join_reduce_plan.h
@@ -0,0 +1,38 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/eval/eval/value.h>
+#include <vespa/vespalib/util/small_vector.h>
+#include <functional>
+
+namespace vespalib::eval::instruction {
+
+class SparseJoinReducePlan
+{
+public:
+ friend class SparseJoinReducePlanTest;
+
+ using BitList = SmallVector<bool,8>;
+ using est_fun_t = std::function<size_t(size_t lhs_size, size_t rhs_size)>;
+ using F = std::function<void(size_t lhs_subspace, size_t rhs_subspace, ConstArrayRef<string_id> res_addr)>;
+
+private:
+ BitList _in_lhs;
+ BitList _in_rhs;
+ BitList _in_res;
+ size_t _res_dims;
+ est_fun_t _estimate;
+
+public:
+ SparseJoinReducePlan(const ValueType &lhs, const ValueType &rhs, const ValueType &res);
+ ~SparseJoinReducePlan();
+ size_t res_dims() const { return _res_dims; }
+ bool distinct_result() const { return (_res_dims == _in_res.size()); }
+ size_t estimate_result_size(const Value::Index &lhs, const Value::Index &rhs) const {
+ return _estimate(lhs.size(), rhs.size());
+ }
+ void execute(const Value::Index &lhs, const Value::Index &rhs, F f) const;
+};
+
+} // namespace
diff --git a/eval/src/vespa/eval/instruction/sum_max_dot_product_function.cpp b/eval/src/vespa/eval/instruction/sum_max_dot_product_function.cpp
index a76eaa38925..41017bc3687 100644
--- a/eval/src/vespa/eval/instruction/sum_max_dot_product_function.cpp
+++ b/eval/src/vespa/eval/instruction/sum_max_dot_product_function.cpp
@@ -1,9 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "sum_max_dot_product_function.h"
-#include <vespa/eval/eval/operation.h>
+#include <vespa/eval/eval/inline_operation.h>
#include <vespa/eval/eval/value.h>
-#include <cblas.h>
namespace vespalib::eval {
@@ -16,11 +15,12 @@ void my_sum_max_dot_product_op(InterpretedFunction::State &state, uint64_t dp_si
double result = 0.0;
auto query_cells = state.peek(1).cells().typify<float>();
auto document_cells = state.peek(0).cells().typify<float>();
+ using dot_product = DotProduct<float,float>;
if ((query_cells.size() > 0) && (document_cells.size() > 0)) {
for (const float *query = query_cells.begin(); query < query_cells.end(); query += dp_size) {
float max_dp = aggr::Max<float>::null_value();
for (const float *document = document_cells.begin(); document < document_cells.end(); document += dp_size) {
- max_dp = aggr::Max<float>::combine(max_dp, cblas_sdot(dp_size, query, 1, document, 1));
+ max_dp = aggr::Max<float>::combine(max_dp, dot_product::apply(query, document, dp_size));
}
result += max_dp;
}
diff --git a/eval/src/vespa/eval/instruction/universal_dot_product.cpp b/eval/src/vespa/eval/instruction/universal_dot_product.cpp
new file mode 100644
index 00000000000..79a94d862bf
--- /dev/null
+++ b/eval/src/vespa/eval/instruction/universal_dot_product.cpp
@@ -0,0 +1,119 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "universal_dot_product.h"
+#include "sparse_join_reduce_plan.h"
+#include "dense_join_reduce_plan.h"
+#include <vespa/eval/eval/inline_operation.h>
+#include <vespa/eval/eval/fast_value.hpp>
+
+namespace vespalib::eval {
+
+using namespace tensor_function;
+using namespace instruction;
+using namespace operation;
+
+namespace {
+
+struct UniversalDotProductParam {
+ ValueType res_type;
+ SparseJoinReducePlan sparse_plan;
+ DenseJoinReducePlan dense_plan;
+ size_t vector_size;
+
+ UniversalDotProductParam(const ValueType &res_type_in,
+ const ValueType &lhs_type,
+ const ValueType &rhs_type)
+ : res_type(res_type_in),
+ sparse_plan(lhs_type, rhs_type, res_type),
+ dense_plan(lhs_type, rhs_type, res_type),
+ vector_size(1)
+ {
+ if (!dense_plan.loop_cnt.empty() &&
+ dense_plan.lhs_stride.back() == 1 &&
+ dense_plan.rhs_stride.back() == 1 &&
+ dense_plan.res_stride.back() == 0)
+ {
+ vector_size = dense_plan.loop_cnt.back();
+ dense_plan.loop_cnt.pop_back();
+ dense_plan.lhs_stride.pop_back();
+ dense_plan.rhs_stride.pop_back();
+ dense_plan.res_stride.pop_back();
+ }
+ }
+};
+
+template <typename LCT, typename RCT, typename OCT>
+void my_universal_dot_product_op(InterpretedFunction::State &state, uint64_t param_in) {
+ using dot_product = DotProduct<LCT,RCT>;
+ const auto &param = unwrap_param<UniversalDotProductParam>(param_in);
+ const auto &lhs = state.peek(1);
+ const auto &rhs = state.peek(0);
+ const auto &lhs_index = lhs.index();
+ const auto &rhs_index = rhs.index();
+ const auto lhs_cells = lhs.cells().typify<LCT>();
+ const auto rhs_cells = rhs.cells().typify<RCT>();
+ auto &stored_result = state.stash.create<std::unique_ptr<FastValue<OCT,true>>>(
+ std::make_unique<FastValue<OCT,true>>(param.res_type, param.sparse_plan.res_dims(), param.dense_plan.res_size,
+ param.sparse_plan.estimate_result_size(lhs_index, rhs_index)));
+ auto &result = *(stored_result.get());
+ ArrayRef<OCT> dst;
+ auto dense_fun = [&](size_t lhs_idx, size_t rhs_idx, size_t dst_idx) {
+ dst[dst_idx] += dot_product::apply(&lhs_cells[lhs_idx], &rhs_cells[rhs_idx], param.vector_size);
+ };
+ auto sparse_fun = [&](size_t lhs_subspace, size_t rhs_subspace, ConstArrayRef<string_id> res_addr) {
+ bool first;
+ std::tie(dst, first) = result.insert_subspace(res_addr);
+ if (first) {
+ std::fill(dst.begin(), dst.end(), OCT{});
+ }
+ param.dense_plan.execute(lhs_subspace * param.dense_plan.lhs_size,
+ rhs_subspace * param.dense_plan.rhs_size,
+ 0, dense_fun);
+ };
+ param.sparse_plan.execute(lhs_index, rhs_index, sparse_fun);
+ state.pop_pop_push(result);
+}
+
+struct SelectUniversalDotProduct {
+ template <typename LCM, typename RCM, typename SCALAR> static auto invoke(const UniversalDotProductParam &) {
+ constexpr CellMeta ocm = CellMeta::join(LCM::value, RCM::value).reduce(SCALAR::value);
+ using LCT = CellValueType<LCM::value.cell_type>;
+ using RCT = CellValueType<RCM::value.cell_type>;
+ using OCT = CellValueType<ocm.cell_type>;
+ return my_universal_dot_product_op<LCT,RCT,OCT>;
+ }
+};
+
+} // namespace <unnamed>
+
+UniversalDotProduct::UniversalDotProduct(const ValueType &res_type_in,
+ const TensorFunction &lhs_in,
+ const TensorFunction &rhs_in)
+ : tensor_function::Op2(res_type_in, lhs_in, rhs_in)
+{
+}
+
+InterpretedFunction::Instruction
+UniversalDotProduct::compile_self(const ValueBuilderFactory &, Stash &stash) const
+{
+ auto &param = stash.create<UniversalDotProductParam>(result_type(), lhs().result_type(), rhs().result_type());
+ using MyTypify = TypifyValue<TypifyCellMeta,TypifyBool>;
+ auto op = typify_invoke<3,MyTypify,SelectUniversalDotProduct>(lhs().result_type().cell_meta(),
+ rhs().result_type().cell_meta(),
+ result_type().cell_meta().is_scalar,
+ param);
+ return InterpretedFunction::Instruction(op, wrap_param<UniversalDotProductParam>(param));
+}
+
+const TensorFunction &
+UniversalDotProduct::optimize(const TensorFunction &expr, Stash &stash)
+{
+ if (auto reduce = as<Reduce>(expr); reduce && (reduce->aggr() == Aggr::SUM)) {
+ if (auto join = as<Join>(reduce->child()); join && (join->function() == Mul::f)) {
+ return stash.create<UniversalDotProduct>(expr.result_type(), join->lhs(), join->rhs());
+ }
+ }
+ return expr;
+}
+
+} // namespace
diff --git a/eval/src/vespa/eval/instruction/universal_dot_product.h b/eval/src/vespa/eval/instruction/universal_dot_product.h
new file mode 100644
index 00000000000..ac5aa157f17
--- /dev/null
+++ b/eval/src/vespa/eval/instruction/universal_dot_product.h
@@ -0,0 +1,22 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/eval/eval/tensor_function.h>
+
+namespace vespalib::eval {
+
+/**
+ * Tensor function performing dot product compatible operations
+ * (join:mul, reduce:sum) on values of arbitrary complexity.
+ **/
+class UniversalDotProduct : public tensor_function::Op2
+{
+public:
+ UniversalDotProduct(const ValueType &res_type, const TensorFunction &lhs, const TensorFunction &rhs);
+ InterpretedFunction::Instruction compile_self(const ValueBuilderFactory &factory, Stash &stash) const override;
+ bool result_is_mutable() const override { return true; }
+ static const TensorFunction &optimize(const TensorFunction &expr, Stash &stash);
+};
+
+} // namespace
diff --git a/eval/src/vespa/eval/onnx/onnx_wrapper.cpp b/eval/src/vespa/eval/onnx/onnx_wrapper.cpp
index 7b2de4dfaa1..8f9450c2660 100644
--- a/eval/src/vespa/eval/onnx/onnx_wrapper.cpp
+++ b/eval/src/vespa/eval/onnx/onnx_wrapper.cpp
@@ -184,7 +184,7 @@ public:
};
Ort::AllocatorWithDefaultOptions OnnxString::_alloc;
-std::vector<Onnx::DimSize> make_dimensions(const Ort::TensorTypeAndShapeInfo &tensor_info) {
+std::vector<Onnx::DimSize> make_dimensions(const Ort::ConstTensorTypeAndShapeInfo &tensor_info) {
std::vector<const char *> symbolic_sizes(tensor_info.GetDimensionsCount(), nullptr);
tensor_info.GetSymbolicDimensions(symbolic_sizes.data(), symbolic_sizes.size());
auto shape = tensor_info.GetShape();
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/EmptyFileReferenceData.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/EmptyFileReferenceData.java
deleted file mode 100644
index ea8461b42f3..00000000000
--- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/EmptyFileReferenceData.java
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.filedistribution;
-
-import com.yahoo.config.FileReference;
-
-import java.nio.ByteBuffer;
-
-public class EmptyFileReferenceData extends FileReferenceData {
-
- private final byte[] content;
- private final long xxhash;
- private int contentRead = 0;
-
- private EmptyFileReferenceData(FileReference fileReference, String filename, Type type, byte[] content, long xxhash) {
- super(fileReference, filename, type, CompressionType.gzip);
- this.content = content;
- this.xxhash = xxhash;
- }
-
- public static FileReferenceData empty(FileReference fileReference, String filename) {
- return new EmptyFileReferenceData(fileReference, filename, FileReferenceData.Type.file, new byte[0], 0);
- }
-
- public ByteBuffer content() {
- return ByteBuffer.wrap(content);
- }
-
- @Override
- public int nextContent(ByteBuffer bb) {
- if (contentRead >= content.length) {
- return -1;
- } else {
- int left = content.length - contentRead;
- int size = Math.min(bb.remaining(), left);
- bb.put(content, contentRead, size);
- contentRead += size;
- return size;
- }
- }
-
- @Override
- public long xxhash() {
- return xxhash;
- }
-
- @Override
- public long size() {
- return content.length;
- }
-
- @Override
- public void close() {
- // no-op
- }
-}
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java
index b37fe02226b..a567a3bc4b3 100644
--- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java
@@ -243,7 +243,7 @@ public class FileReceiver {
synchronized (sessions) {
if (sessions.containsKey(sessionId)) {
retval = 1;
- log.severe("Session id " + sessionId + " already exist, impossible. Request from(" + req.target() + ")");
+ log.severe("Session id " + sessionId + " already exist, impossible. Request from " + req.target());
} else {
try {
sessions.put(sessionId, new Session(downloadDirectory, sessionId, reference,
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceData.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceData.java
index 3f83cbea506..87f45db5221 100644
--- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceData.java
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceData.java
@@ -10,7 +10,7 @@ import java.nio.ByteBuffer;
*
* @author hmusum
*/
-public abstract class FileReferenceData {
+public abstract class FileReferenceData implements AutoCloseable {
public enum Type { file, compressed }
public enum CompressionType { gzip, lz4, zstd }
diff --git a/flags/pom.xml b/flags/pom.xml
index 2c736d04ccb..0bfb02b1f32 100644
--- a/flags/pom.xml
+++ b/flags/pom.xml
@@ -62,7 +62,7 @@
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>hamcrest</artifactId>
<scope>test</scope>
</dependency>
<dependency>
@@ -77,11 +77,7 @@
</exclusion>
<exclusion>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-core</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/FetchVector.java b/flags/src/main/java/com/yahoo/vespa/flags/FetchVector.java
index 5bcc1e67547..7af1661cf0c 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/FetchVector.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/FetchVector.java
@@ -9,7 +9,6 @@ import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
-import java.util.function.BiConsumer;
import java.util.function.Consumer;
/**
@@ -34,6 +33,11 @@ public class FetchVector {
*/
CLOUD,
+ /**
+ * Cloud account ID from com.yahoo.config.provision.CloudAccount::value, e.g. aws:123456789012
+ */
+ CLOUD_ACCOUNT,
+
/** Cluster ID from com.yahoo.config.provision.ClusterSpec.Id::value, e.g. cluster-controllers, logserver. */
CLUSTER_ID,
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index f677f9d2bda..ea716556210 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -14,8 +14,7 @@ import java.util.TreeMap;
import java.util.function.Predicate;
import static com.yahoo.vespa.flags.FetchVector.Dimension.APPLICATION_ID;
-import static com.yahoo.vespa.flags.FetchVector.Dimension.CLUSTER_ID;
-import static com.yahoo.vespa.flags.FetchVector.Dimension.CLUSTER_TYPE;
+import static com.yahoo.vespa.flags.FetchVector.Dimension.CLOUD_ACCOUNT;
import static com.yahoo.vespa.flags.FetchVector.Dimension.CONSOLE_USER_EMAIL;
import static com.yahoo.vespa.flags.FetchVector.Dimension.HOSTNAME;
import static com.yahoo.vespa.flags.FetchVector.Dimension.NODE_TYPE;
@@ -47,15 +46,6 @@ public class Flags {
private static volatile TreeMap<FlagId, FlagDefinition> flags = new TreeMap<>();
- public static final UnboundBooleanFlag DROP_CACHES = defineFeatureFlag(
- "drop-caches", false,
- List.of("hakonhall", "baldersheim"), "2023-03-06", "2023-08-05",
- "Drop caches on tenant hosts",
- "Takes effect on next tick",
- // The application ID is the exclusive application ID associated with the host,
- // if any, or otherwise hosted-vespa:tenant-host:default.
- APPLICATION_ID, TENANT_ID, CLUSTER_ID, CLUSTER_TYPE);
-
public static final UnboundDoubleFlag DEFAULT_TERM_WISE_LIMIT = defineDoubleFlag(
"default-term-wise-limit", 1.0,
List.of("baldersheim"), "2020-12-02", "2023-12-31",
@@ -223,14 +213,18 @@ public class Flags {
public static final UnboundStringFlag SYSTEM_MEMORY_HIGH = defineStringFlag(
"system-memory-high", "",
List.of("baldersheim"), "2023-02-14", "2023-12-31",
- "The value to write to /sys/fs/cgroup/system.slice/memory.high, if non-empty.",
+ "The value to write to /sys/fs/cgroup/system.slice/memory.high, if non-empty. " +
+ "You may want lower memory.high before lowering memory.max, " +
+ "and raise memory.high after raising memory.max.",
"Takes effect on next tick.",
NODE_TYPE);
public static final UnboundStringFlag SYSTEM_MEMORY_MAX = defineStringFlag(
"system-memory-max", "",
List.of("baldersheim"), "2023-02-14", "2023-12-31",
- "The value to write to /sys/fs/cgroup/system.slice/memory.max, if non-empty.",
+ "The value to write to /sys/fs/cgroup/system.slice/memory.max, if non-empty. " +
+ "You may want lower memory.high before lowering memory.max, " +
+ "and raise memory.high after raising memory.max.",
"Takes effect on next tick.",
NODE_TYPE);
@@ -285,13 +279,6 @@ public class Flags {
APPLICATION_ID,HOSTNAME,NODE_TYPE,TENANT_ID,VESPA_VERSION
);
- public static final UnboundBooleanFlag RESTRICT_DATA_PLANE_BINDINGS = defineFeatureFlag(
- "restrict-data-plane-bindings", false,
- List.of("mortent"), "2022-09-08", "2023-09-01",
- "Use restricted data plane bindings",
- "Takes effect at redeployment",
- APPLICATION_ID);
-
public static final UnboundBooleanFlag ENABLE_OTELCOL = defineFeatureFlag(
"enable-otel-collector", false,
List.of("olaa"), "2022-09-23", "2023-09-01",
@@ -313,39 +300,19 @@ public class Flags {
"Takes effect at redeployment",
APPLICATION_ID);
- public static final UnboundBooleanFlag NODE_ADMIN_TENANT_SERVICE_REGISTRY = defineFeatureFlag(
- "node-admin-tenant-service-registry", true,
- List.of("olaa"), "2023-04-12", "2023-08-07",
- "Whether AthenzCredentialsMaintainer in node-admin should create tenant service identity certificate",
- "Takes effect on next tick",
- HOSTNAME, VESPA_VERSION, APPLICATION_ID
- );
-
public static final UnboundBooleanFlag ENABLE_CROWDSTRIKE = defineFeatureFlag(
- "enable-crowdstrike", true, List.of("andreer"), "2023-04-13", "2023-08-31",
+ "enable-crowdstrike", true, List.of("andreer"), "2023-04-13", "2023-09-14",
"Whether to enable CrowdStrike.", "Takes effect on next host admin tick",
HOSTNAME);
- public static final UnboundBooleanFlag ALLOW_MORE_THAN_ONE_CONTENT_GROUP_DOWN = defineFeatureFlag(
- "allow-more-than-one-content-group-down", false, List.of("hmusum"), "2023-04-14", "2023-08-15",
- "Whether to enable possible configuration of letting more than one content group down",
- "Takes effect at redeployment",
- APPLICATION_ID);
-
public static final UnboundBooleanFlag RANDOMIZED_ENDPOINT_NAMES = defineFeatureFlag(
- "randomized-endpoint-names", false, List.of("andreer"), "2023-04-26", "2023-08-30",
+ "randomized-endpoint-names", false, List.of("andreer"), "2023-04-26", "2023-09-14",
"Whether to use randomized endpoint names",
"Takes effect on application deployment",
APPLICATION_ID);
- public static final UnboundIntFlag CERT_POOL_SIZE = defineIntFlag(
- "cert-pool-size", 0, List.of("andreer"), "2023-06-19", "2023-08-25",
- "Target number of preprovisioned endpoints certificates to maintain",
- "Takes effect on next run of CertPoolMaintainer"
- );
-
public static final UnboundBooleanFlag ENABLE_THE_ONE_THAT_SHOULD_NOT_BE_NAMED = defineFeatureFlag(
- "enable-the-one-that-should-not-be-named", false, List.of("hmusum"), "2023-05-08", "2023-08-15",
+ "enable-the-one-that-should-not-be-named", false, List.of("hmusum"), "2023-05-08", "2023-09-15",
"Whether to enable the one program that should not be named",
"Takes effect at next host-admin tick");
@@ -378,20 +345,34 @@ public class Flags {
public static final UnboundBooleanFlag WRITE_CONFIG_SERVER_SESSION_DATA_AS_ONE_BLOB = defineFeatureFlag(
"write-config-server-session-data-as-blob", false,
- List.of("hmuusm"), "2023-07-19", "2023-09-01",
+ List.of("hmusum"), "2023-07-19", "2023-10-01",
"Whether to write config server session data in one blob or as individual paths",
"Takes effect immediately");
public static final UnboundBooleanFlag READ_CONFIG_SERVER_SESSION_DATA_AS_ONE_BLOB = defineFeatureFlag(
"read-config-server-session-data-as-blob", false,
- List.of("hmuusm"), "2023-07-19", "2023-09-01",
+ List.of("hmusum"), "2023-07-19", "2023-10-01",
"Whether to read config server session data from sesion data blob or from individual paths",
"Takes effect immediately");
- public static final UnboundBooleanFlag USE_VESPA_USER_EVERYWHERE = defineFeatureFlag(
- "use-vespa-user-everywhere", false,
- List.of("aressem"), "2023-07-28", "2023-09-01",
- "Use the vespa user for running Vespa everywhere",
+ public static final UnboundBooleanFlag MORE_WIREGUARD = defineFeatureFlag(
+ "more-wireguard", false,
+ List.of("andreer"), "2023-08-21", "2023-09-21",
+ "Use wireguard in INternal enCLAVES",
+ "Takes effect on next host-admin run",
+ HOSTNAME, CLOUD_ACCOUNT);
+
+ public static final UnboundBooleanFlag IPV6_AWS_TARGET_GROUPS = defineFeatureFlag(
+ "ipv6-aws-target-groups", false,
+ List.of("andreer"), "2023-08-28", "2023-09-29",
+ "Always use IPv6 target groups for load balancers in aws",
+ "Takes effect on next load-balancer provisioning",
+ HOSTNAME, CLOUD_ACCOUNT);
+
+ public static final UnboundBooleanFlag WRITE_APPLICATION_DATA_AS_JSON = defineFeatureFlag(
+ "write-application-data-as-json", false,
+ List.of("hmusum"), "2023-08-27", "2023-10-01",
+ "Whether to write application data (active session id, last deployed session id etc. ) as json",
"Takes effect immediately");
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
index 18f5f5f860d..f856ebeb456 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
@@ -21,6 +21,7 @@ import static com.yahoo.vespa.flags.FetchVector.Dimension.HOSTNAME;
import static com.yahoo.vespa.flags.FetchVector.Dimension.NODE_TYPE;
import static com.yahoo.vespa.flags.FetchVector.Dimension.TENANT_ID;
import static com.yahoo.vespa.flags.FetchVector.Dimension.VESPA_VERSION;
+import static com.yahoo.vespa.flags.FetchVector.Dimension.ZONE_ID;
/**
* Definition for permanent feature flags
@@ -146,6 +147,12 @@ public class PermanentFlags {
"Takes effect on next deployment from controller",
APPLICATION_ID);
+ public static final UnboundBooleanFlag SEND_LIMITED_METRIC_SET = defineFeatureFlag(
+ "send-limited-metric-set", true,
+ "Whether a limited metric set should be fetched from metrics-proxy (CD systems only)",
+ "Takes effect on next host admin tick",
+ APPLICATION_ID);
+
private static final String VERSION_QUALIFIER_REGEX = "[a-zA-Z0-9_-]+";
private static final Pattern QUALIFIER_PATTERN = Pattern.compile("^" + VERSION_QUALIFIER_REGEX + "$");
private static final Pattern VERSION_PATTERN = Pattern.compile("^\\d\\.\\d\\.\\d(\\." + VERSION_QUALIFIER_REGEX + ")?$");
@@ -346,13 +353,20 @@ public class PermanentFlags {
"Takes effect immediately",
TENANT_ID);
- public static final UnboundIntFlag KEEP_FILE_REFERENCES_ON_TENANT_NODES = defineIntFlag(
- "keep-file-references-on-tenant-nodes", 30,
+ public static final UnboundIntFlag KEEP_FILE_REFERENCES_DAYS = defineIntFlag(
+ "keep-file-references-days", 30,
"How many days to keep file references on tenant nodes (based on last modification time)",
"Takes effect on restart of Docker container",
APPLICATION_ID
);
+ public static final UnboundIntFlag KEEP_FILE_REFERENCES_COUNT = defineIntFlag(
+ "keep-file-references-count", 20,
+ "How many file references to keep on tenant nodes (no matter what last modification time is)",
+ "Takes effect on restart of Docker container",
+ ZONE_ID, APPLICATION_ID
+ );
+
public static final UnboundIntFlag ENDPOINT_CONNECTION_TTL = defineIntFlag(
"endpoint-connection-ttl", 45,
"Time to live for connections to endpoints in seconds",
@@ -371,6 +385,30 @@ public class PermanentFlags {
"triggered",
"Takes effect immediately");
+ public static final UnboundBooleanFlag DROP_CACHES = defineFeatureFlag(
+ "drop-caches", true,
+ "Drop pagecache. " +
+ "This is combined with the drop-dentries-and-inodes flag for a single write to /proc/sys/vm/drop_caches.",
+ "Takes effect on next tick",
+ // The application ID is the exclusive application ID associated with the host,
+ // if any, or otherwise hosted-vespa:tenant-host:default.
+ APPLICATION_ID, TENANT_ID, CLUSTER_ID, CLUSTER_TYPE);
+
+ public static final UnboundIntFlag DROP_DENTRIES = defineIntFlag(
+ "drop-dentries", -1,
+ "Drop dentries and inodes every N minutes. 0 means every tick. -1 means disabled. " +
+ "This is combined with the drop-caches flag for a single write to /proc/sys/vm/drop_caches.",
+ "Takes effect on next tick",
+ // The application ID is the exclusive application ID associated with the host,
+ // if any, or otherwise hosted-vespa:tenant-host:default.
+ APPLICATION_ID, TENANT_ID, CLUSTER_ID, CLUSTER_TYPE);
+
+ public static final UnboundIntFlag CERT_POOL_SIZE = defineIntFlag(
+ "cert-pool-size", 0,
+ "Target number of preprovisioned endpoints certificates to maintain",
+ "Takes effect on next run of CertPoolMaintainer"
+ );
+
private PermanentFlags() {}
private static UnboundBooleanFlag defineFeatureFlag(
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/json/DimensionHelper.java b/flags/src/main/java/com/yahoo/vespa/flags/json/DimensionHelper.java
index 5e5506b616b..2193d70ec47 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/json/DimensionHelper.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/json/DimensionHelper.java
@@ -17,6 +17,7 @@ public class DimensionHelper {
static {
serializedDimensions.put(FetchVector.Dimension.APPLICATION_ID, "application");
serializedDimensions.put(FetchVector.Dimension.CLOUD, "cloud");
+ serializedDimensions.put(FetchVector.Dimension.CLOUD_ACCOUNT, "cloud-account");
serializedDimensions.put(FetchVector.Dimension.CLUSTER_ID, "cluster-id");
serializedDimensions.put(FetchVector.Dimension.CLUSTER_TYPE, "cluster-type");
serializedDimensions.put(FetchVector.Dimension.CONSOLE_USER_EMAIL, "console-user-email");
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java b/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java
index 749f6830870..031b61c8e7e 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java
@@ -68,6 +68,10 @@ public class RelationalCondition implements Condition {
return fetchVector.getValue(dimension).map(predicate::test).orElse(false);
}
+ public RelationalPredicate relationalPredicate() {
+ return relationalPredicate;
+ }
+
@Override
public WireCondition toWire() {
var condition = new WireCondition();
diff --git a/fnet/src/tests/sync_execute/sync_execute.cpp b/fnet/src/tests/sync_execute/sync_execute.cpp
index 5d2f4097ab4..0dd65b08874 100644
--- a/fnet/src/tests/sync_execute/sync_execute.cpp
+++ b/fnet/src/tests/sync_execute/sync_execute.cpp
@@ -17,6 +17,8 @@ TEST("sync execute") {
DoIt exe2;
DoIt exe3;
DoIt exe4;
+ DoIt exe5;
+ DoIt exe6;
FNET_Transport transport;
ASSERT_TRUE(transport.execute(&exe1));
ASSERT_TRUE(transport.Start());
@@ -26,14 +28,16 @@ TEST("sync execute") {
ASSERT_TRUE(exe2.gate.getCount() == 0u);
ASSERT_TRUE(transport.execute(&exe3));
transport.ShutDown(false);
- ASSERT_TRUE(!transport.execute(&exe4));
+ uint32_t expect_cnt_4 = transport.execute(&exe4) ? 0 : 1;
transport.sync();
transport.WaitFinished();
+ ASSERT_TRUE(!transport.execute(&exe5));
transport.sync();
ASSERT_TRUE(exe1.gate.getCount() == 0u);
ASSERT_TRUE(exe2.gate.getCount() == 0u);
ASSERT_TRUE(exe3.gate.getCount() == 0u);
- ASSERT_TRUE(exe4.gate.getCount() == 1u);
+ ASSERT_TRUE(exe4.gate.getCount() == expect_cnt_4);
+ ASSERT_TRUE(exe5.gate.getCount() == 1u);
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/fnet/src/vespa/fnet/transport_thread.cpp b/fnet/src/vespa/fnet/transport_thread.cpp
index 0b0df02c04c..217738b7364 100644
--- a/fnet/src/vespa/fnet/transport_thread.cpp
+++ b/fnet/src/vespa/fnet/transport_thread.cpp
@@ -119,7 +119,7 @@ FNET_TransportThread::PostEvent(FNET_ControlPacket *cpacket,
size_t qLen;
{
std::unique_lock<std::mutex> guard(_lock);
- if (IsShutDown()) {
+ if (_reject_events) {
guard.unlock();
DiscardEvent(cpacket, context);
return false;
@@ -243,7 +243,8 @@ FNET_TransportThread::FNET_TransportThread(FNET_Transport &owner_in)
_started(false),
_shutdown(false),
_finished(false),
- _detaching()
+ _detaching(),
+ _reject_events(false)
{
trapsigpipe();
}
@@ -384,9 +385,9 @@ FNET_TransportThread::ShutDown(bool waitFinished)
bool wasEmpty = false;
{
std::lock_guard<std::mutex> guard(_lock);
- if (!IsShutDown()) {
+ if (!should_shut_down()) {
_shutdown.store(true, std::memory_order_relaxed);
- wasEmpty = _queue.IsEmpty_NoLock();
+ wasEmpty = _queue.IsEmpty_NoLock();
}
}
if (wasEmpty) {
@@ -503,7 +504,7 @@ FNET_TransportThread::handle_event(FNET_IOComponent &ctx, bool read, bool write)
bool
FNET_TransportThread::EventLoopIteration() {
- if (!IsShutDown()) {
+ if (!should_shut_down()) {
int msTimeout = vespalib::count_ms(time_tools().event_timeout());
// obtain I/O events
_selector.poll(msTimeout);
@@ -530,7 +531,7 @@ FNET_TransportThread::EventLoopIteration() {
FlushDeleteList();
} // -- END OF MAIN EVENT LOOP --
- if (!IsShutDown())
+ if (!should_shut_down())
return true;
if (is_finished())
return false;
@@ -552,10 +553,22 @@ FNET_TransportThread::checkTimedoutComponents(vespalib::duration timeout) {
void
FNET_TransportThread::endEventLoop() {
+ // close and remove all I/O Components
+ FNET_IOComponent *component = _componentsHead;
+ while (component != nullptr) {
+ assert(component == _componentsHead);
+ FNET_IOComponent *tmp = component;
+ component = component->_ioc_next;
+ RemoveComponent(tmp);
+ tmp->Close();
+ tmp->internal_subref();
+ }
+
// flush event queue
{
std::lock_guard<std::mutex> guard(_lock);
_queue.FlushPackets_NoLock(&_myQueue);
+ _reject_events = true;
}
// discard remaining events
@@ -569,16 +582,6 @@ FNET_TransportThread::endEventLoop() {
}
}
- // close and remove all I/O Components
- FNET_IOComponent *component = _componentsHead;
- while (component != nullptr) {
- assert(component == _componentsHead);
- FNET_IOComponent *tmp = component;
- component = component->_ioc_next;
- RemoveComponent(tmp);
- tmp->Close();
- tmp->internal_subref();
- }
assert(_componentsHead == nullptr &&
_componentsTail == nullptr &&
_timeOutHead == nullptr &&
@@ -588,7 +591,7 @@ FNET_TransportThread::endEventLoop() {
{
std::lock_guard<std::mutex> guard(_shutdownLock);
- _finished.store(true, std::memory_order_relaxed);
+ _finished.store(true, std::memory_order_release);
_shutdownCond.notify_all();
}
diff --git a/fnet/src/vespa/fnet/transport_thread.h b/fnet/src/vespa/fnet/transport_thread.h
index 6047d4e3482..c7ada472501 100644
--- a/fnet/src/vespa/fnet/transport_thread.h
+++ b/fnet/src/vespa/fnet/transport_thread.h
@@ -52,6 +52,7 @@ private:
std::atomic<bool> _shutdown; // should stop event loop ?
std::atomic<bool> _finished; // event loop stopped ?
std::set<FNET_IServerAdapter*> _detaching; // server adapters being detached
+ bool _reject_events; // the transport thread does not want any more events
/**
* Add an IOComponent to the list of components. This operation is
@@ -169,12 +170,12 @@ private:
**/
bool EventLoopIteration();
- bool IsShutDown() const noexcept {
+ [[nodiscard]] bool should_shut_down() const noexcept {
return _shutdown.load(std::memory_order_relaxed);
}
[[nodiscard]] bool is_finished() const noexcept {
- return _finished.load(std::memory_order_relaxed);
+ return _finished.load(std::memory_order_acquire);
}
public:
diff --git a/hosted-api/src/main/java/ai/vespa/hosted/api/ControllerHttpClient.java b/hosted-api/src/main/java/ai/vespa/hosted/api/ControllerHttpClient.java
index 87bf2057bc6..7768c1a1712 100644
--- a/hosted-api/src/main/java/ai/vespa/hosted/api/ControllerHttpClient.java
+++ b/hosted-api/src/main/java/ai/vespa/hosted/api/ControllerHttpClient.java
@@ -68,6 +68,8 @@ public abstract class ControllerHttpClient {
private final HttpClient client;
private final URI endpoint;
+ public record SubmitResult(String message, long id) { }
+
/** Creates an HTTP client against the given endpoint, using the given HTTP client builder to create a client. */
protected ControllerHttpClient(URI endpoint, SSLContext sslContext) {
if (sslContext == null) {
@@ -113,14 +115,14 @@ public abstract class ControllerHttpClient {
}
/** Sends the given submission to the remote controller and returns the version of the accepted package, or throws if this fails. */
- public String submit(Submission submission, TenantName tenant, ApplicationName application) {
- return toMessage(send(request(HttpRequest.newBuilder(applicationPath(tenant, application).resolve("submit"))
- .timeout(Duration.ofMinutes(30)),
- POST,
- new MultiPartStreamer().addJson("submitOptions", metaToJson(submission))
- .addFile("applicationZip", submission.applicationZip())
- .addFile("applicationTestZip", submission.applicationTestZip())),
- 1));
+ public SubmitResult submit(Submission submission, TenantName tenant, ApplicationName application) {
+ return toSubmitResult(send(request(HttpRequest.newBuilder(applicationPath(tenant, application).resolve("submit"))
+ .timeout(Duration.ofMinutes(30)),
+ POST,
+ new MultiPartStreamer().addJson("submitOptions", metaToJson(submission))
+ .addFile("applicationZip", submission.applicationZip())
+ .addFile("applicationTestZip", submission.applicationTestZip())),
+ 1));
}
/** Sends the given deployment to the given application in the given zone, or throws if this fails. */
@@ -462,6 +464,12 @@ public abstract class ControllerHttpClient {
return toInspector(response).field("message").asString();
}
+ private static SubmitResult toSubmitResult(HttpResponse<byte[]> response) {
+ Inspector rootObject = toInspector(response);
+ return new SubmitResult(rootObject.field("message").asString(),
+ rootObject.field("build").asLong());
+ }
+
private static DeploymentResult toDeploymentResult(HttpResponse<byte[]> response) {
Inspector rootObject = toInspector(response);
return new DeploymentResult(rootObject.field("message").asString(),
diff --git a/hosted-tenant-base/pom.xml b/hosted-tenant-base/pom.xml
index 9e3c468aaf5..ef1d3978a81 100644
--- a/hosted-tenant-base/pom.xml
+++ b/hosted-tenant-base/pom.xml
@@ -116,6 +116,13 @@
<artifactId>junit-vintage-engine</artifactId>
<version>${junit.vespa.tenant.version}</version>
</dependency>
+
+ <!-- Transitive dependency of junit4 -->
+ <dependency>
+ <groupId>org.hamcrest</groupId>
+ <artifactId>hamcrest-core</artifactId>
+ <version>${hamcrest.vespa.version}</version>
+ </dependency>
</dependencies>
</dependencyManagement>
@@ -199,7 +206,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
- <version>3.3.0</version>
+ <version>${maven-enforcer-plugin.vespa.version}</version>
<executions>
<execution>
<id>enforce-java</id>
@@ -333,7 +340,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
- <version>1.3</version>
+ <version>${maven-antrun-plugin.vespa.version}</version>
<executions>
<execution>
<id>attach-artifact</id>
@@ -342,7 +349,7 @@
<goal>run</goal>
</goals>
<configuration>
- <tasks>
+ <target>
<!-- Workaround to copy src/test/application/tests only when its parents exists:
Copy in two steps, eliminating the parents in the helper step-->
@@ -364,7 +371,7 @@
</copy>
<zip destfile="target/application-test.zip" basedir="target/application-test/" />
- </tasks>
+ </target>
</configuration>
</execution>
</executions>
diff --git a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpression.java b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpression.java
new file mode 100644
index 00000000000..c8106148630
--- /dev/null
+++ b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpression.java
@@ -0,0 +1,51 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.indexinglanguage.expressions;
+
+import com.yahoo.document.DataType;
+import com.yahoo.document.datatypes.LongFieldValue;
+import java.time.Instant;
+
+/**
+ * Converts ISO-8601 formatted date string to UNIX Epoch Time in seconds
+ *
+ * @author bergum
+ */
+
+public class ToEpochSecondExpression extends Expression {
+ public ToEpochSecondExpression() {
+ super(DataType.STRING); //only accept string input
+ }
+
+ @Override
+ protected void doExecute(ExecutionContext context) {
+ String inputString = String.valueOf(context.getValue());
+ long epochTime = Instant.parse(inputString).getEpochSecond();
+ context.setValue(new LongFieldValue(epochTime));
+ }
+
+ @Override
+ protected void doVerify(VerificationContext context) {
+ context.setValueType(createdOutputType());
+ }
+
+ @Override
+ public DataType createdOutputType() {
+ return DataType.LONG;
+ }
+
+ @Override
+ public String toString() {
+ return "to_epoch_second";
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return obj instanceof ToEpochSecondExpression;
+ }
+
+ @Override
+ public int hashCode() {
+ return getClass().hashCode();
+ }
+
+}
diff --git a/indexinglanguage/src/main/javacc/IndexingParser.jj b/indexinglanguage/src/main/javacc/IndexingParser.jj
index a039ad137ee..d559d9b7260 100644
--- a/indexinglanguage/src/main/javacc/IndexingParser.jj
+++ b/indexinglanguage/src/main/javacc/IndexingParser.jj
@@ -198,6 +198,7 @@ TOKEN :
<TO_INT: "to_int"> |
<TO_LONG: "to_long"> |
<TO_POS: "to_pos"> |
+ <TO_EPOCH_SECOND: "to_epoch_second"> |
<TO_STRING: "to_string"> |
<TO_WSET: "to_wset"> |
<TO_BOOL: "to_bool"> |
@@ -338,6 +339,7 @@ Expression value() :
val = toIntExp() |
val = toLongExp() |
val = toPosExp() |
+ val = toEpochSecondExp() |
val = toStringExp() |
val = toWsetExp() |
val = toBoolExp() |
@@ -713,6 +715,12 @@ Expression toPosExp() : { }
{ return new ToPositionExpression(); }
}
+Expression toEpochSecondExp() : { }
+{
+ ( <TO_EPOCH_SECOND> )
+ { return new ToEpochSecondExpression(); }
+}
+
Expression toStringExp() : { }
{
( <TO_STRING> )
diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpressionTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpressionTestCase.java
new file mode 100644
index 00000000000..7203afcc1a0
--- /dev/null
+++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpressionTestCase.java
@@ -0,0 +1,51 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.indexinglanguage.expressions;
+
+import com.yahoo.document.DataType;
+import com.yahoo.document.datatypes.FieldValue;
+import com.yahoo.document.datatypes.LongFieldValue;
+import com.yahoo.document.datatypes.StringFieldValue;
+import com.yahoo.vespa.indexinglanguage.SimpleTestAdapter;
+import org.junit.Test;
+
+import static com.yahoo.vespa.indexinglanguage.expressions.ExpressionAssert.assertVerify;
+import static com.yahoo.vespa.indexinglanguage.expressions.ExpressionAssert.assertVerifyThrows;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+
+public class ToEpochSecondExpressionTestCase {
+ @Test
+ public void requireThatHashCodeAndEqualsAreImplemented() {
+ Expression exp = new ToEpochSecondExpression();
+ assertFalse(exp.equals(new Object()));
+ assertEquals(exp, new ToEpochSecondExpression());
+ assertEquals(exp.hashCode(), new ToEpochSecondExpression().hashCode());
+ }
+
+ @Test
+ public void requireThatExpressionCanBeVerified() {
+ Expression exp = new ToEpochSecondExpression();
+ assertVerify(DataType.STRING, exp, DataType.LONG);
+ assertVerifyThrows(DataType.INT, exp, "Expected string input, got int.");
+ assertVerifyThrows(null, exp, "Expected string input, got null.");
+ }
+
+ @Test
+ public void requireThatValueIsConvertedWithMs() {
+ ExecutionContext ctx = new ExecutionContext(new SimpleTestAdapter());
+ ctx.setValue(new StringFieldValue("2023-12-24T17:00:43.000Z")).execute(new ToEpochSecondExpression());
+ FieldValue val = ctx.getValue();
+ assertTrue(val instanceof LongFieldValue);
+ assertEquals(1703437243L, ((LongFieldValue)val).getLong());
+ }
+
+ @Test
+ public void requireThatValueIsConverted() {
+ ExecutionContext ctx = new ExecutionContext(new SimpleTestAdapter());
+ ctx.setValue(new StringFieldValue("2023-12-24T17:00:43Z")).execute(new ToEpochSecondExpression());
+ FieldValue val = ctx.getValue();
+ assertTrue(val instanceof LongFieldValue);
+ assertEquals(1703437243L, ((LongFieldValue)val).getLong());
+ }
+}
diff --git a/integration/intellij/build.gradle.kts b/integration/intellij/build.gradle.kts
index a3ec76e2f1e..b1e5180d02f 100644
--- a/integration/intellij/build.gradle.kts
+++ b/integration/intellij/build.gradle.kts
@@ -4,7 +4,7 @@ import org.jetbrains.grammarkit.tasks.GenerateParserTask
plugins {
id("java-library")
- id("org.jetbrains.intellij") version "1.13.3"
+ id("org.jetbrains.intellij") version "1.15.0"
id("org.jetbrains.grammarkit") version "2022.3.1"
id("maven-publish") // to deploy the plugin into a Maven repo
}
diff --git a/jdisc_core/pom.xml b/jdisc_core/pom.xml
index fa9bf05fad3..4471269358a 100644
--- a/jdisc_core/pom.xml
+++ b/jdisc_core/pom.xml
@@ -57,11 +57,7 @@
</exclusion>
<exclusion>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-core</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
diff --git a/jdisc_core/src/test/resources/exportPackages.properties b/jdisc_core/src/test/resources/exportPackages.properties
index 388d1510a10..5726cf8d924 100644
--- a/jdisc_core/src/test/resources/exportPackages.properties
+++ b/jdisc_core/src/test/resources/exportPackages.properties
@@ -1,3 +1,3 @@
#generated by com.yahoo.jdisc.core.ExportPackages
#Fri Jul 07 16:04:11 CEST 2023
-exportPackages=org.osgi.framework; version\="1.10.0", org.osgi.framework.connect; version\="1.0.0", org.osgi.framework.dto; uses\:\="org.osgi.dto"; version\="1.8.0", org.osgi.framework.hooks.bundle; uses\:\="org.osgi.framework"; version\="1.1.0", org.osgi.framework.hooks.resolver; uses\:\="org.osgi.framework.wiring"; version\="1.0.0", org.osgi.framework.hooks.service; uses\:\="org.osgi.framework"; version\="1.1.0", org.osgi.framework.hooks.weaving; uses\:\="org.osgi.framework.wiring"; version\="1.1.0", org.osgi.framework.launch; uses\:\="org.osgi.framework"; version\="1.2.0", org.osgi.framework.namespace; uses\:\="org.osgi.resource"; version\="1.2.0", org.osgi.framework.startlevel; uses\:\="org.osgi.framework"; version\="1.0.0", org.osgi.framework.startlevel.dto; uses\:\="org.osgi.dto"; version\="1.0.0", org.osgi.framework.wiring; uses\:\="org.osgi.framework,org.osgi.resource"; version\="1.2.0", org.osgi.framework.wiring.dto; uses\:\="org.osgi.dto,org.osgi.resource.dto"; version\="1.3.0", org.osgi.resource; version\="1.0.1", org.osgi.resource.dto; uses\:\="org.osgi.dto"; version\="1.0.1", org.osgi.service.packageadmin; uses\:\="org.osgi.framework"; version\="1.2.1", org.osgi.service.startlevel; uses\:\="org.osgi.framework"; version\="1.1.1", org.osgi.service.url; version\="1.0.1", org.osgi.service.resolver; uses\:\="org.osgi.resource"; version\="1.1.1", org.osgi.util.tracker; uses\:\="org.osgi.framework"; version\="1.5.3", org.osgi.dto; version\="1.1.1", org.osgi.service.condition; version\="1.0.0", java.util.jar; version\="0.0.0.JavaSE_017", java.nio; version\="0.0.0.JavaSE_017", java.nio.file.spi; version\="0.0.0.JavaSE_017", java.security; version\="0.0.0.JavaSE_017", java.util; version\="0.0.0.JavaSE_017", javax.crypto.interfaces; version\="0.0.0.JavaSE_017", java.nio.charset.spi; version\="0.0.0.JavaSE_017", java.util.concurrent; version\="0.0.0.JavaSE_017", javax.security.auth.spi; version\="0.0.0.JavaSE_017", java.lang.annotation; version\="0.0.0.JavaSE_017", javax.security.cert; version\="0.0.0.JavaSE_017", java.net; version\="0.0.0.JavaSE_017", java.util.spi; version\="0.0.0.JavaSE_017", java.io; version\="0.0.0.JavaSE_017", java.nio.charset; version\="0.0.0.JavaSE_017", java.time.zone; version\="0.0.0.JavaSE_017", javax.crypto; version\="0.0.0.JavaSE_017", java.time.chrono; version\="0.0.0.JavaSE_017", java.nio.channels; version\="0.0.0.JavaSE_017", java.security.spec; version\="0.0.0.JavaSE_017", java.security.cert; version\="0.0.0.JavaSE_017", java.util.concurrent.atomic; version\="0.0.0.JavaSE_017", java.nio.file; version\="0.0.0.JavaSE_017", java.math; version\="0.0.0.JavaSE_017", java.nio.channels.spi; version\="0.0.0.JavaSE_017", java.text.spi; version\="0.0.0.JavaSE_017", java.security.interfaces; version\="0.0.0.JavaSE_017", java.lang.constant; version\="0.0.0.JavaSE_017", javax.net.ssl; version\="0.0.0.JavaSE_017", javax.security.auth.login; version\="0.0.0.JavaSE_017", javax.security.auth.callback; version\="0.0.0.JavaSE_017", java.lang.reflect; version\="0.0.0.JavaSE_017", javax.security.auth.x500; version\="0.0.0.JavaSE_017", javax.net; version\="0.0.0.JavaSE_017", java.util.function; version\="0.0.0.JavaSE_017", java.lang.runtime; version\="0.0.0.JavaSE_017", java.lang; version\="0.0.0.JavaSE_017", java.time; version\="0.0.0.JavaSE_017", java.util.stream; version\="0.0.0.JavaSE_017", javax.crypto.spec; version\="0.0.0.JavaSE_017", java.text; version\="0.0.0.JavaSE_017", java.util.random; version\="0.0.0.JavaSE_017", java.nio.file.attribute; version\="0.0.0.JavaSE_017", java.util.zip; version\="0.0.0.JavaSE_017", java.time.temporal; version\="0.0.0.JavaSE_017", java.util.concurrent.locks; version\="0.0.0.JavaSE_017", java.time.format; version\="0.0.0.JavaSE_017", java.lang.invoke; version\="0.0.0.JavaSE_017", java.lang.module; version\="0.0.0.JavaSE_017", java.net.spi; version\="0.0.0.JavaSE_017", java.util.regex; version\="0.0.0.JavaSE_017", java.lang.ref; version\="0.0.0.JavaSE_017", javax.security.auth; version\="0.0.0.JavaSE_017", javax.lang.model.element; version\="0.0.0.JavaSE_017", javax.annotation.processing; version\="0.0.0.JavaSE_017", javax.lang.model; version\="0.0.0.JavaSE_017", javax.lang.model.util; version\="0.0.0.JavaSE_017", javax.lang.model.type; version\="0.0.0.JavaSE_017", javax.tools; version\="0.0.0.JavaSE_017", java.awt.datatransfer; version\="0.0.0.JavaSE_017", java.awt.event; version\="0.0.0.JavaSE_017", javax.accessibility; version\="0.0.0.JavaSE_017", javax.swing.plaf.nimbus; version\="0.0.0.JavaSE_017", javax.print; version\="0.0.0.JavaSE_017", javax.print.attribute; version\="0.0.0.JavaSE_017", javax.sound.sampled; version\="0.0.0.JavaSE_017", javax.imageio.event; version\="0.0.0.JavaSE_017", javax.swing.filechooser; version\="0.0.0.JavaSE_017", javax.swing.plaf; version\="0.0.0.JavaSE_017", javax.swing.undo; version\="0.0.0.JavaSE_017", javax.swing.plaf.basic; version\="0.0.0.JavaSE_017", javax.swing.text; version\="0.0.0.JavaSE_017", java.awt.dnd; version\="0.0.0.JavaSE_017", javax.sound.midi; version\="0.0.0.JavaSE_017", java.applet; version\="0.0.0.JavaSE_017", java.awt.im.spi; version\="0.0.0.JavaSE_017", javax.imageio; version\="0.0.0.JavaSE_017", java.awt.font; version\="0.0.0.JavaSE_017", javax.swing.text.rtf; version\="0.0.0.JavaSE_017", javax.swing.text.html.parser; version\="0.0.0.JavaSE_017", java.beans; version\="0.0.0.JavaSE_017", javax.swing.plaf.synth; version\="0.0.0.JavaSE_017", java.awt.desktop; version\="0.0.0.JavaSE_017", javax.swing.event; version\="0.0.0.JavaSE_017", javax.imageio.stream; version\="0.0.0.JavaSE_017", java.awt; version\="0.0.0.JavaSE_017", java.beans.beancontext; version\="0.0.0.JavaSE_017", javax.swing.plaf.metal; version\="0.0.0.JavaSE_017", javax.print.event; version\="0.0.0.JavaSE_017", java.awt.im; version\="0.0.0.JavaSE_017", javax.swing.plaf.multi; version\="0.0.0.JavaSE_017", java.awt.image.renderable; version\="0.0.0.JavaSE_017", javax.swing; version\="0.0.0.JavaSE_017", javax.swing.colorchooser; version\="0.0.0.JavaSE_017", javax.print.attribute.standard; version\="0.0.0.JavaSE_017", javax.sound.midi.spi; version\="0.0.0.JavaSE_017", javax.swing.table; version\="0.0.0.JavaSE_017", javax.imageio.metadata; version\="0.0.0.JavaSE_017", java.awt.image; version\="0.0.0.JavaSE_017", java.awt.print; version\="0.0.0.JavaSE_017", javax.imageio.plugins.tiff; version\="0.0.0.JavaSE_017", javax.swing.tree; version\="0.0.0.JavaSE_017", javax.imageio.plugins.jpeg; version\="0.0.0.JavaSE_017", java.awt.geom; version\="0.0.0.JavaSE_017", java.awt.color; version\="0.0.0.JavaSE_017", javax.imageio.plugins.bmp; version\="0.0.0.JavaSE_017", javax.sound.sampled.spi; version\="0.0.0.JavaSE_017", javax.swing.border; version\="0.0.0.JavaSE_017", javax.imageio.spi; version\="0.0.0.JavaSE_017", javax.swing.text.html; version\="0.0.0.JavaSE_017", java.lang.instrument; version\="0.0.0.JavaSE_017", java.util.logging; version\="0.0.0.JavaSE_017", java.lang.management; version\="0.0.0.JavaSE_017", javax.management.openmbean; version\="0.0.0.JavaSE_017", javax.management.loading; version\="0.0.0.JavaSE_017", javax.management.relation; version\="0.0.0.JavaSE_017", javax.management; version\="0.0.0.JavaSE_017", javax.management.timer; version\="0.0.0.JavaSE_017", javax.management.modelmbean; version\="0.0.0.JavaSE_017", javax.management.monitor; version\="0.0.0.JavaSE_017", javax.management.remote; version\="0.0.0.JavaSE_017", javax.management.remote.rmi; version\="0.0.0.JavaSE_017", javax.naming; version\="0.0.0.JavaSE_017", javax.naming.ldap.spi; version\="0.0.0.JavaSE_017", javax.naming.event; version\="0.0.0.JavaSE_017", javax.naming.directory; version\="0.0.0.JavaSE_017", javax.naming.ldap; version\="0.0.0.JavaSE_017", javax.naming.spi; version\="0.0.0.JavaSE_017", java.net.http; version\="0.0.0.JavaSE_017", java.util.prefs; version\="0.0.0.JavaSE_017", java.rmi.registry; version\="0.0.0.JavaSE_017", java.rmi.server; version\="0.0.0.JavaSE_017", java.rmi; version\="0.0.0.JavaSE_017", java.rmi.dgc; version\="0.0.0.JavaSE_017", javax.rmi.ssl; version\="0.0.0.JavaSE_017", javax.script; version\="0.0.0.JavaSE_017", org.ietf.jgss; version\="0.0.0.JavaSE_017", javax.security.auth.kerberos; version\="0.0.0.JavaSE_017", javax.security.sasl; version\="0.0.0.JavaSE_017", javax.smartcardio; version\="0.0.0.JavaSE_017", javax.sql; version\="0.0.0.JavaSE_017", java.sql; version\="0.0.0.JavaSE_017", javax.sql.rowset; version\="0.0.0.JavaSE_017", javax.sql.rowset.serial; version\="0.0.0.JavaSE_017", javax.sql.rowset.spi; version\="0.0.0.JavaSE_017", javax.transaction.xa; version\="0.0.0.JavaSE_017", javax.xml.xpath; version\="0.0.0.JavaSE_017", javax.xml.transform; version\="0.0.0.JavaSE_017", org.xml.sax; version\="0.0.0.JavaSE_017", javax.xml.stream; version\="0.0.0.JavaSE_017", javax.xml.stream.events; version\="0.0.0.JavaSE_017", org.w3c.dom.traversal; version\="0.0.0.JavaSE_017", javax.xml.catalog; version\="0.0.0.JavaSE_017", javax.xml.datatype; version\="0.0.0.JavaSE_017", javax.xml.transform.sax; version\="0.0.0.JavaSE_017", javax.xml; version\="0.0.0.JavaSE_017", org.xml.sax.ext; version\="0.0.0.JavaSE_017", javax.xml.parsers; version\="0.0.0.JavaSE_017", javax.xml.validation; version\="0.0.0.JavaSE_017", javax.xml.transform.dom; version\="0.0.0.JavaSE_017", javax.xml.transform.stream; version\="0.0.0.JavaSE_017", org.w3c.dom; version\="0.0.0.JavaSE_017", org.w3c.dom.bootstrap; version\="0.0.0.JavaSE_017", org.w3c.dom.views; version\="0.0.0.JavaSE_017", org.xml.sax.helpers; version\="0.0.0.JavaSE_017", javax.xml.transform.stax; version\="0.0.0.JavaSE_017", javax.xml.namespace; version\="0.0.0.JavaSE_017", javax.xml.stream.util; version\="0.0.0.JavaSE_017", org.w3c.dom.ls; version\="0.0.0.JavaSE_017", org.w3c.dom.ranges; version\="0.0.0.JavaSE_017", org.w3c.dom.events; version\="0.0.0.JavaSE_017", javax.xml.crypto.dom; version\="0.0.0.JavaSE_017", javax.xml.crypto.dsig.dom; version\="0.0.0.JavaSE_017", javax.xml.crypto.dsig.keyinfo; version\="0.0.0.JavaSE_017", javax.xml.crypto.dsig.spec; version\="0.0.0.JavaSE_017", javax.xml.crypto.dsig; version\="0.0.0.JavaSE_017", javax.xml.crypto; version\="0.0.0.JavaSE_017", com.sun.java.accessibility.util; version\="0.0.0.JavaSE_017", com.sun.tools.attach.spi; version\="0.0.0.JavaSE_017", com.sun.tools.attach; version\="0.0.0.JavaSE_017", com.sun.source.doctree; version\="0.0.0.JavaSE_017", com.sun.tools.javac; version\="0.0.0.JavaSE_017", com.sun.source.util; version\="0.0.0.JavaSE_017", com.sun.source.tree; version\="0.0.0.JavaSE_017", jdk.dynalink.linker.support; version\="0.0.0.JavaSE_017", jdk.dynalink.beans; version\="0.0.0.JavaSE_017", jdk.dynalink.linker; version\="0.0.0.JavaSE_017", jdk.dynalink; version\="0.0.0.JavaSE_017", jdk.dynalink.support; version\="0.0.0.JavaSE_017", com.sun.net.httpserver.spi; version\="0.0.0.JavaSE_017", com.sun.net.httpserver; version\="0.0.0.JavaSE_017", jdk.security.jarsigner; version\="0.0.0.JavaSE_017", com.sun.jarsigner; version\="0.0.0.JavaSE_017", jdk.javadoc.doclet; version\="0.0.0.JavaSE_017", com.sun.tools.jconsole; version\="0.0.0.JavaSE_017", com.sun.jdi.event; version\="0.0.0.JavaSE_017", com.sun.jdi.connect; version\="0.0.0.JavaSE_017", com.sun.jdi.request; version\="0.0.0.JavaSE_017", com.sun.jdi; version\="0.0.0.JavaSE_017", com.sun.jdi.connect.spi; version\="0.0.0.JavaSE_017", jdk.jfr; version\="0.0.0.JavaSE_017", jdk.jfr.consumer; version\="0.0.0.JavaSE_017", jdk.jshell.execution; version\="0.0.0.JavaSE_017", jdk.jshell; version\="0.0.0.JavaSE_017", jdk.jshell.tool; version\="0.0.0.JavaSE_017", jdk.jshell.spi; version\="0.0.0.JavaSE_017", netscape.javascript; version\="0.0.0.JavaSE_017", com.sun.management; version\="0.0.0.JavaSE_017", jdk.management.jfr; version\="0.0.0.JavaSE_017", jdk.nio; version\="0.0.0.JavaSE_017", jdk.net; version\="0.0.0.JavaSE_017", jdk.nio.mapmode; version\="0.0.0.JavaSE_017", com.sun.nio.sctp; version\="0.0.0.JavaSE_017", com.sun.security.auth.module; version\="0.0.0.JavaSE_017", com.sun.security.auth.callback; version\="0.0.0.JavaSE_017", com.sun.security.auth; version\="0.0.0.JavaSE_017", com.sun.security.auth.login; version\="0.0.0.JavaSE_017", com.sun.security.jgss; version\="0.0.0.JavaSE_017", sun.misc; version\="0.0.0.JavaSE_017", sun.reflect; version\="0.0.0.JavaSE_017", com.sun.nio.file; version\="0.0.0.JavaSE_017", jdk.swing.interop; version\="0.0.0.JavaSE_017", org.w3c.dom.html; version\="0.0.0.JavaSE_017", org.w3c.dom.stylesheets; version\="0.0.0.JavaSE_017", org.w3c.dom.css; version\="0.0.0.JavaSE_017", org.w3c.dom.xpath; version\="0.0.0.JavaSE_017", com.yahoo.jdisc, com.yahoo.jdisc.application, com.yahoo.jdisc.handler, com.yahoo.jdisc.service, com.yahoo.jdisc.statistics, com.yahoo.jdisc.refcount, javax.inject;version\=1.0.0, org.aopalliance.intercept, org.aopalliance.aop, com.google.common.annotations;version\="32.1.1",com.google.common.base;version\="32.1.1";uses\:\="javax.annotation",com.google.common.cache;version\="32.1.1";uses\:\="com.google.common.base,com.google.common.collect,com.google.common.util.concurrent,javax.annotation",com.google.common.collect;version\="32.1.1";uses\:\="com.google.common.base,javax.annotation",com.google.common.escape;version\="32.1.1";uses\:\="com.google.common.base,javax.annotation",com.google.common.eventbus;version\="32.1.1",com.google.common.graph;version\="32.1.1";uses\:\="com.google.common.collect,javax.annotation",com.google.common.hash;version\="32.1.1";uses\:\="com.google.common.base,javax.annotation",com.google.common.html;version\="32.1.1";uses\:\="com.google.common.escape",com.google.common.io;version\="32.1.1";uses\:\="com.google.common.base,com.google.common.collect,com.google.common.graph,com.google.common.hash,javax.annotation",com.google.common.math;version\="32.1.1";uses\:\="javax.annotation",com.google.common.net;version\="32.1.1";uses\:\="com.google.common.base,com.google.common.collect,com.google.common.escape,javax.annotation",com.google.common.primitives;version\="32.1.1";uses\:\="com.google.common.base,javax.annotation",com.google.common.reflect;version\="32.1.1";uses\:\="com.google.common.collect,com.google.common.io,javax.annotation",com.google.common.util.concurrent;version\="32.1.1";uses\:\="com.google.common.base,com.google.common.collect,com.google.common.util.concurrent.internal,javax.annotation",com.google.common.xml;version\="32.1.1";uses\:\="com.google.common.escape", com.google.inject;version\="1.4",com.google.inject.binder;version\="1.4",com.google.inject.matcher;version\="1.4",com.google.inject.multibindings;version\="1.4",com.google.inject.name;version\="1.4",com.google.inject.spi;version\="1.4",com.google.inject.util;version\="1.4", org.slf4j;version\=1.7.32, org.slf4j.spi;version\=1.7.32, org.slf4j.helpers;version\=1.7.32, org.slf4j.event;version\=1.7.32, org.slf4j.impl;version\=1.7.32, org.apache.commons.logging;version\=1.2, org.apache.commons.logging.impl;version\=1.2, com.sun.jna;version\=5.11.0, com.sun.jna.ptr;version\=5.11.0, com.sun.jna.win32;version\=5.11.0, org.apache.log4j;version\=1.2.17,org.apache.log4j.helpers;version\=1.2.17,org.apache.log4j.spi;version\=1.2.17,org.apache.log4j.xml;version\=1.2.17, com.yahoo.component.annotation;version\="1.0.0", com.yahoo.config;version\=1.0.0, com.yahoo.vespa.defaults;version\=1.0.0, ai.vespa.http;version\=1.0.0,ai.vespa.llm.client.openai;version\=1.0.0,ai.vespa.llm.completion;version\=1.0.0,ai.vespa.llm.test;version\=1.0.0,ai.vespa.llm;version\=1.0.0,ai.vespa.net;version\=1.0.0,ai.vespa.validation;version\=1.0.0,com.yahoo.binaryprefix;version\=1.0.0,com.yahoo.collections;version\=1.0.0,com.yahoo.compress;version\=1.0.0,com.yahoo.concurrent.classlock;version\=1.0.0,com.yahoo.concurrent.maintenance;version\=1.0.0,com.yahoo.concurrent;version\=1.0.0,com.yahoo.data.access.helpers;version\=1.0.0,com.yahoo.data.access.simple;version\=1.0.0,com.yahoo.data.access.slime;version\=1.0.0,com.yahoo.data.access;version\=1.0.0,com.yahoo.errorhandling;version\=1.0.0,com.yahoo.exception;version\=1.0.0,com.yahoo.geo;version\=1.0.0,com.yahoo.io.reader;version\=1.0.0,com.yahoo.io;version\=1.0.0,com.yahoo.javacc;version\=1.0.0,com.yahoo.lang;version\=1.0.0,com.yahoo.nativec;version\=1.0.0,com.yahoo.net;version\=1.0.0,com.yahoo.path;version\=1.0.0,com.yahoo.protect;version\=1.0.0,com.yahoo.reflection;version\=1.0.0,com.yahoo.slime;version\=1.0.0,com.yahoo.stream;version\=1.0.0,com.yahoo.system.execution;version\=1.0.0,com.yahoo.system;version\=1.0.0,com.yahoo.tensor.evaluation;version\=1.0.0,com.yahoo.tensor.functions;version\=1.0.0,com.yahoo.tensor.serialization;version\=1.0.0,com.yahoo.tensor;version\=1.0.0,com.yahoo.text.internal;version\=1.0.0,com.yahoo.text;version\=1.0.0,com.yahoo.time;version\=1.0.0,com.yahoo.transaction;version\=1.0.0,com.yahoo.vespa.objects;version\=1.0.0,com.yahoo.yolean.chain;version\=1.0.0,com.yahoo.yolean.concurrent;version\=1.0.0,com.yahoo.yolean.function;version\=1.0.0,com.yahoo.yolean.system;version\=1.0.0,com.yahoo.yolean.trace;version\=1.0.0,com.yahoo.yolean;version\=1.0.0, com.yahoo.log.event;version\=1.0.0,com.yahoo.log.impl;version\=1.0.0,com.yahoo.log;version\=1.0.0, javax.xml.bind;version\="2.3";uses\:\="javax.xml.bind.annotation.adapters,javax.xml.bind.attachment,javax.xml.namespace,javax.xml.stream,javax.xml.transform,javax.xml.validation,org.w3c.dom,org.xml.sax",javax.xml.bind.annotation;version\="2.3";uses\:\="javax.xml.bind,javax.xml.parsers,javax.xml.transform,javax.xml.transform.dom,org.w3c.dom",javax.xml.bind.annotation.adapters;version\="2.3",javax.xml.bind.attachment;version\="2.3";uses\:\="javax.activation",javax.xml.bind.helpers;version\="2.3";uses\:\="javax.xml.bind,javax.xml.bind.annotation.adapters,javax.xml.bind.attachment,javax.xml.stream,javax.xml.transform,javax.xml.validation,org.w3c.dom,org.xml.sax",javax.xml.bind.util;version\="2.3";uses\:\="javax.xml.bind,javax.xml.transform.sax", com.sun.istack;version\="3.0.5";uses\:\="javax.activation,javax.xml.stream,org.xml.sax,org.xml.sax.helpers",com.sun.istack.localization;version\="3.0.5",com.sun.istack.logging;version\="3.0.5",com.sun.xml.bind;uses\:\="org.xml.sax";version\="2.3.0",com.sun.xml.bind.annotation;version\="2.3.0",com.sun.xml.bind.api;uses\:\="org.xml.sax";version\="2.3.0",com.sun.xml.bind.api.impl;version\="2.3.0",com.sun.xml.bind.marshaller;uses\:\="javax.xml.parsers,org.w3c.dom,org.xml.sax,org.xml.sax.helpers";version\="2.3.0",com.sun.xml.bind.unmarshaller;uses\:\="com.sun.xml.bind.v2.runtime.unmarshaller,javax.xml.bind,org.w3c.dom,org.xml.sax";version\="2.3.0",com.sun.xml.bind.util;version\="2.3.0",com.sun.xml.bind.v2;version\="2.3.0",com.sun.xml.bind.v2.model.annotation;uses\:\="com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.runtime";version\="2.3.0",com.sun.xml.bind.v2.model.core;uses\:\="com.sun.xml.bind.v2.model.annotation,com.sun.xml.bind.v2.model.impl,com.sun.xml.bind.v2.model.nav,com.sun.xml.bind.v2.runtime,javax.activation,javax.xml.bind,javax.xml.bind.annotation,javax.xml.bind.annotation.adapters,javax.xml.namespace,javax.xml.transform";version\="2.3.0",com.sun.xml.bind.v2.model.impl;uses\:\="com.sun.xml.bind.v2.model.annotation,com.sun.xml.bind.v2.model.nav";version\="2.3.0",com.sun.xml.bind.v2.model.nav;uses\:\="com.sun.xml.bind.v2.runtime";version\="2.3.0",com.sun.xml.bind.v2.model.util;uses\:\="javax.xml.namespace";version\="2.3.0",com.sun.xml.bind.v2.runtime;uses\:\="com.sun.xml.bind.v2.model.annotation,javax.activation,javax.xml.bind,javax.xml.bind.annotation.adapters";version\="2.3.0",com.sun.xml.bind.v2.runtime.unmarshaller;uses\:\="javax.xml.bind,org.w3c.dom,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.schemagen.episode;uses\:\="com.sun.xml.txw2,com.sun.xml.txw2.annotation";version\="2.3.0",com.sun.xml.bind.v2.util;uses\:\="javax.xml.parsers,javax.xml.transform,javax.xml.validation,javax.xml.xpath";version\="2.3.0",com.sun.xml.txw2;uses\:\="com.sun.xml.txw2.output,javax.xml.namespace";version\="2.3.0",com.sun.xml.txw2.annotation;version\="2.3.0",com.sun.xml.txw2.output;uses\:\="com.sun.xml.txw2,javax.xml.namespace,javax.xml.stream,javax.xml.transform,javax.xml.transform.dom,javax.xml.transform.sax,javax.xml.transform.stream,org.w3c.dom,org.xml.sax,org.xml.sax.ext,org.xml.sax.helpers";version\="2.3.0", com.sun.xml.bind;uses\:\="com.sun.xml.bind.v2.runtime.reflect,javax.xml.bind,javax.xml.bind.annotation.adapters,javax.xml.datatype,javax.xml.namespace,javax.xml.stream,org.xml.sax";version\="2.3.0",com.sun.xml.bind.api;uses\:\="com.sun.xml.bind.v2.model.annotation,com.sun.xml.bind.v2.model.runtime,com.sun.xml.bind.v2.runtime,javax.xml.bind,javax.xml.bind.attachment,javax.xml.namespace,javax.xml.stream,javax.xml.transform,org.w3c.dom,org.xml.sax";version\="2.3.0",com.sun.xml.bind.marshaller;version\="2.3.0",com.sun.xml.bind.unmarshaller;uses\:\="org.xml.sax";version\="2.3.0",com.sun.xml.bind.util;uses\:\="com.sun.xml.bind,javax.xml.bind.helpers,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2;uses\:\="com.sun.xml.bind.api,com.sun.xml.bind.v2.model.annotation,javax.xml.bind";version\="2.3.0",com.sun.xml.bind.v2.bytecode;version\="2.3.0",com.sun.xml.bind.v2.model.annotation;uses\:\="com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.model.nav,com.sun.xml.bind.v2.runtime";version\="2.3.0",com.sun.xml.bind.v2.model.impl;uses\:\="com.sun.xml.bind.api,com.sun.xml.bind.v2.model.annotation,com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.model.nav,com.sun.xml.bind.v2.model.runtime,com.sun.xml.bind.v2.runtime,javax.activation,javax.xml.namespace";version\="2.3.0",com.sun.xml.bind.v2.model.runtime;uses\:\="com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.runtime,com.sun.xml.bind.v2.runtime.reflect,javax.xml.bind,javax.xml.namespace,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.runtime;uses\:\="com.sun.istack,com.sun.xml.bind.api,com.sun.xml.bind.marshaller,com.sun.xml.bind.v2.model.annotation,com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.model.runtime,com.sun.xml.bind.v2.runtime.output,com.sun.xml.bind.v2.runtime.property,com.sun.xml.bind.v2.runtime.unmarshaller,javax.activation,javax.xml.bind,javax.xml.bind.annotation,javax.xml.bind.annotation.adapters,javax.xml.bind.attachment,javax.xml.bind.helpers,javax.xml.namespace,javax.xml.stream,javax.xml.transform,javax.xml.transform.sax,javax.xml.validation,org.w3c.dom,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.runtime.output;uses\:\="com.sun.xml.bind.marshaller,com.sun.xml.bind.v2.runtime,com.sun.xml.fastinfoset.stax,javax.xml.stream,org.jvnet.staxex,org.w3c.dom,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.runtime.property;uses\:\="com.sun.xml.bind.api,com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.model.runtime,com.sun.xml.bind.v2.runtime,com.sun.xml.bind.v2.runtime.reflect,com.sun.xml.bind.v2.runtime.unmarshaller,com.sun.xml.bind.v2.util,javax.xml.namespace,javax.xml.stream,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.runtime.reflect;uses\:\="com.sun.xml.bind.api,com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.model.runtime,com.sun.xml.bind.v2.runtime,com.sun.xml.bind.v2.runtime.unmarshaller,javax.xml.bind,javax.xml.bind.annotation.adapters,javax.xml.stream,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.runtime.reflect.opt;uses\:\="com.sun.xml.bind.api,com.sun.xml.bind.v2.model.runtime,com.sun.xml.bind.v2.runtime,com.sun.xml.bind.v2.runtime.reflect,javax.xml.stream,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.runtime.unmarshaller;uses\:\="com.sun.xml.bind,com.sun.xml.bind.api,com.sun.xml.bind.unmarshaller,com.sun.xml.bind.util,com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.runtime,com.sun.xml.bind.v2.runtime.output,com.sun.xml.bind.v2.runtime.reflect,javax.activation,javax.xml.bind,javax.xml.bind.annotation,javax.xml.bind.annotation.adapters,javax.xml.bind.attachment,javax.xml.bind.helpers,javax.xml.namespace,javax.xml.stream,javax.xml.transform,javax.xml.transform.sax,javax.xml.validation,org.w3c.dom,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.schemagen;uses\:\="com.sun.xml.bind.api,com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.model.nav,com.sun.xml.txw2.output,javax.xml.bind,javax.xml.namespace";version\="2.3.0",com.sun.xml.bind.v2.schemagen.xmlschema;uses\:\="com.sun.xml.txw2,com.sun.xml.txw2.annotation,javax.xml.namespace";version\="2.3.0",com.sun.xml.bind.v2.util;uses\:\="com.sun.xml.bind.v2.runtime,com.sun.xml.bind.v2.runtime.unmarshaller,javax.activation,javax.xml.namespace,javax.xml.transform.stream,org.xml.sax";version\="2.3.0", javax.activation;uses\:\="com.sun.activation.registries";version\="1.2",com.sun.activation.viewers;uses\:\="javax.activation";version\="1.2.0",com.sun.activation.registries;version\="1.2.0"
+exportPackages=org.osgi.framework; version\="1.10.0", org.osgi.framework.connect; version\="1.0.0", org.osgi.framework.dto; uses\:\="org.osgi.dto"; version\="1.8.0", org.osgi.framework.hooks.bundle; uses\:\="org.osgi.framework"; version\="1.1.0", org.osgi.framework.hooks.resolver; uses\:\="org.osgi.framework.wiring"; version\="1.0.0", org.osgi.framework.hooks.service; uses\:\="org.osgi.framework"; version\="1.1.0", org.osgi.framework.hooks.weaving; uses\:\="org.osgi.framework.wiring"; version\="1.1.0", org.osgi.framework.launch; uses\:\="org.osgi.framework"; version\="1.2.0", org.osgi.framework.namespace; uses\:\="org.osgi.resource"; version\="1.2.0", org.osgi.framework.startlevel; uses\:\="org.osgi.framework"; version\="1.0.0", org.osgi.framework.startlevel.dto; uses\:\="org.osgi.dto"; version\="1.0.0", org.osgi.framework.wiring; uses\:\="org.osgi.framework,org.osgi.resource"; version\="1.2.0", org.osgi.framework.wiring.dto; uses\:\="org.osgi.dto,org.osgi.resource.dto"; version\="1.3.0", org.osgi.resource; version\="1.0.1", org.osgi.resource.dto; uses\:\="org.osgi.dto"; version\="1.0.1", org.osgi.service.packageadmin; uses\:\="org.osgi.framework"; version\="1.2.1", org.osgi.service.startlevel; uses\:\="org.osgi.framework"; version\="1.1.1", org.osgi.service.url; version\="1.0.1", org.osgi.service.resolver; uses\:\="org.osgi.resource"; version\="1.1.1", org.osgi.util.tracker; uses\:\="org.osgi.framework"; version\="1.5.3", org.osgi.dto; version\="1.1.1", org.osgi.service.condition; version\="1.0.0", java.util.jar; version\="0.0.0.JavaSE_017", java.nio; version\="0.0.0.JavaSE_017", java.nio.file.spi; version\="0.0.0.JavaSE_017", java.security; version\="0.0.0.JavaSE_017", java.util; version\="0.0.0.JavaSE_017", javax.crypto.interfaces; version\="0.0.0.JavaSE_017", java.nio.charset.spi; version\="0.0.0.JavaSE_017", java.util.concurrent; version\="0.0.0.JavaSE_017", javax.security.auth.spi; version\="0.0.0.JavaSE_017", java.lang.annotation; version\="0.0.0.JavaSE_017", javax.security.cert; version\="0.0.0.JavaSE_017", java.net; version\="0.0.0.JavaSE_017", java.util.spi; version\="0.0.0.JavaSE_017", java.io; version\="0.0.0.JavaSE_017", java.nio.charset; version\="0.0.0.JavaSE_017", java.time.zone; version\="0.0.0.JavaSE_017", javax.crypto; version\="0.0.0.JavaSE_017", java.time.chrono; version\="0.0.0.JavaSE_017", java.nio.channels; version\="0.0.0.JavaSE_017", java.security.spec; version\="0.0.0.JavaSE_017", java.security.cert; version\="0.0.0.JavaSE_017", java.util.concurrent.atomic; version\="0.0.0.JavaSE_017", java.nio.file; version\="0.0.0.JavaSE_017", java.math; version\="0.0.0.JavaSE_017", java.nio.channels.spi; version\="0.0.0.JavaSE_017", java.text.spi; version\="0.0.0.JavaSE_017", java.security.interfaces; version\="0.0.0.JavaSE_017", java.lang.constant; version\="0.0.0.JavaSE_017", javax.net.ssl; version\="0.0.0.JavaSE_017", javax.security.auth.login; version\="0.0.0.JavaSE_017", javax.security.auth.callback; version\="0.0.0.JavaSE_017", java.lang.reflect; version\="0.0.0.JavaSE_017", javax.security.auth.x500; version\="0.0.0.JavaSE_017", javax.net; version\="0.0.0.JavaSE_017", java.util.function; version\="0.0.0.JavaSE_017", java.lang.runtime; version\="0.0.0.JavaSE_017", java.lang; version\="0.0.0.JavaSE_017", java.time; version\="0.0.0.JavaSE_017", java.util.stream; version\="0.0.0.JavaSE_017", javax.crypto.spec; version\="0.0.0.JavaSE_017", java.text; version\="0.0.0.JavaSE_017", java.util.random; version\="0.0.0.JavaSE_017", java.nio.file.attribute; version\="0.0.0.JavaSE_017", java.util.zip; version\="0.0.0.JavaSE_017", java.time.temporal; version\="0.0.0.JavaSE_017", java.util.concurrent.locks; version\="0.0.0.JavaSE_017", java.time.format; version\="0.0.0.JavaSE_017", java.lang.invoke; version\="0.0.0.JavaSE_017", java.lang.module; version\="0.0.0.JavaSE_017", java.net.spi; version\="0.0.0.JavaSE_017", java.util.regex; version\="0.0.0.JavaSE_017", java.lang.ref; version\="0.0.0.JavaSE_017", javax.security.auth; version\="0.0.0.JavaSE_017", javax.lang.model.element; version\="0.0.0.JavaSE_017", javax.annotation.processing; version\="0.0.0.JavaSE_017", javax.lang.model; version\="0.0.0.JavaSE_017", javax.lang.model.util; version\="0.0.0.JavaSE_017", javax.lang.model.type; version\="0.0.0.JavaSE_017", javax.tools; version\="0.0.0.JavaSE_017", java.awt.datatransfer; version\="0.0.0.JavaSE_017", java.awt.event; version\="0.0.0.JavaSE_017", javax.accessibility; version\="0.0.0.JavaSE_017", javax.swing.plaf.nimbus; version\="0.0.0.JavaSE_017", javax.print; version\="0.0.0.JavaSE_017", javax.print.attribute; version\="0.0.0.JavaSE_017", javax.sound.sampled; version\="0.0.0.JavaSE_017", javax.imageio.event; version\="0.0.0.JavaSE_017", javax.swing.filechooser; version\="0.0.0.JavaSE_017", javax.swing.plaf; version\="0.0.0.JavaSE_017", javax.swing.undo; version\="0.0.0.JavaSE_017", javax.swing.plaf.basic; version\="0.0.0.JavaSE_017", javax.swing.text; version\="0.0.0.JavaSE_017", java.awt.dnd; version\="0.0.0.JavaSE_017", javax.sound.midi; version\="0.0.0.JavaSE_017", java.applet; version\="0.0.0.JavaSE_017", java.awt.im.spi; version\="0.0.0.JavaSE_017", javax.imageio; version\="0.0.0.JavaSE_017", java.awt.font; version\="0.0.0.JavaSE_017", javax.swing.text.rtf; version\="0.0.0.JavaSE_017", javax.swing.text.html.parser; version\="0.0.0.JavaSE_017", java.beans; version\="0.0.0.JavaSE_017", javax.swing.plaf.synth; version\="0.0.0.JavaSE_017", java.awt.desktop; version\="0.0.0.JavaSE_017", javax.swing.event; version\="0.0.0.JavaSE_017", javax.imageio.stream; version\="0.0.0.JavaSE_017", java.awt; version\="0.0.0.JavaSE_017", java.beans.beancontext; version\="0.0.0.JavaSE_017", javax.swing.plaf.metal; version\="0.0.0.JavaSE_017", javax.print.event; version\="0.0.0.JavaSE_017", java.awt.im; version\="0.0.0.JavaSE_017", javax.swing.plaf.multi; version\="0.0.0.JavaSE_017", java.awt.image.renderable; version\="0.0.0.JavaSE_017", javax.swing; version\="0.0.0.JavaSE_017", javax.swing.colorchooser; version\="0.0.0.JavaSE_017", javax.print.attribute.standard; version\="0.0.0.JavaSE_017", javax.sound.midi.spi; version\="0.0.0.JavaSE_017", javax.swing.table; version\="0.0.0.JavaSE_017", javax.imageio.metadata; version\="0.0.0.JavaSE_017", java.awt.image; version\="0.0.0.JavaSE_017", java.awt.print; version\="0.0.0.JavaSE_017", javax.imageio.plugins.tiff; version\="0.0.0.JavaSE_017", javax.swing.tree; version\="0.0.0.JavaSE_017", javax.imageio.plugins.jpeg; version\="0.0.0.JavaSE_017", java.awt.geom; version\="0.0.0.JavaSE_017", java.awt.color; version\="0.0.0.JavaSE_017", javax.imageio.plugins.bmp; version\="0.0.0.JavaSE_017", javax.sound.sampled.spi; version\="0.0.0.JavaSE_017", javax.swing.border; version\="0.0.0.JavaSE_017", javax.imageio.spi; version\="0.0.0.JavaSE_017", javax.swing.text.html; version\="0.0.0.JavaSE_017", java.lang.instrument; version\="0.0.0.JavaSE_017", java.util.logging; version\="0.0.0.JavaSE_017", java.lang.management; version\="0.0.0.JavaSE_017", javax.management.openmbean; version\="0.0.0.JavaSE_017", javax.management.loading; version\="0.0.0.JavaSE_017", javax.management.relation; version\="0.0.0.JavaSE_017", javax.management; version\="0.0.0.JavaSE_017", javax.management.timer; version\="0.0.0.JavaSE_017", javax.management.modelmbean; version\="0.0.0.JavaSE_017", javax.management.monitor; version\="0.0.0.JavaSE_017", javax.management.remote; version\="0.0.0.JavaSE_017", javax.management.remote.rmi; version\="0.0.0.JavaSE_017", javax.naming; version\="0.0.0.JavaSE_017", javax.naming.ldap.spi; version\="0.0.0.JavaSE_017", javax.naming.event; version\="0.0.0.JavaSE_017", javax.naming.directory; version\="0.0.0.JavaSE_017", javax.naming.ldap; version\="0.0.0.JavaSE_017", javax.naming.spi; version\="0.0.0.JavaSE_017", java.net.http; version\="0.0.0.JavaSE_017", java.util.prefs; version\="0.0.0.JavaSE_017", java.rmi.registry; version\="0.0.0.JavaSE_017", java.rmi.server; version\="0.0.0.JavaSE_017", java.rmi; version\="0.0.0.JavaSE_017", java.rmi.dgc; version\="0.0.0.JavaSE_017", javax.rmi.ssl; version\="0.0.0.JavaSE_017", javax.script; version\="0.0.0.JavaSE_017", org.ietf.jgss; version\="0.0.0.JavaSE_017", javax.security.auth.kerberos; version\="0.0.0.JavaSE_017", javax.security.sasl; version\="0.0.0.JavaSE_017", javax.smartcardio; version\="0.0.0.JavaSE_017", javax.sql; version\="0.0.0.JavaSE_017", java.sql; version\="0.0.0.JavaSE_017", javax.sql.rowset; version\="0.0.0.JavaSE_017", javax.sql.rowset.serial; version\="0.0.0.JavaSE_017", javax.sql.rowset.spi; version\="0.0.0.JavaSE_017", javax.transaction.xa; version\="0.0.0.JavaSE_017", javax.xml.xpath; version\="0.0.0.JavaSE_017", javax.xml.transform; version\="0.0.0.JavaSE_017", org.xml.sax; version\="0.0.0.JavaSE_017", javax.xml.stream; version\="0.0.0.JavaSE_017", javax.xml.stream.events; version\="0.0.0.JavaSE_017", org.w3c.dom.traversal; version\="0.0.0.JavaSE_017", javax.xml.catalog; version\="0.0.0.JavaSE_017", javax.xml.datatype; version\="0.0.0.JavaSE_017", javax.xml.transform.sax; version\="0.0.0.JavaSE_017", javax.xml; version\="0.0.0.JavaSE_017", org.xml.sax.ext; version\="0.0.0.JavaSE_017", javax.xml.parsers; version\="0.0.0.JavaSE_017", javax.xml.validation; version\="0.0.0.JavaSE_017", javax.xml.transform.dom; version\="0.0.0.JavaSE_017", javax.xml.transform.stream; version\="0.0.0.JavaSE_017", org.w3c.dom; version\="0.0.0.JavaSE_017", org.w3c.dom.bootstrap; version\="0.0.0.JavaSE_017", org.w3c.dom.views; version\="0.0.0.JavaSE_017", org.xml.sax.helpers; version\="0.0.0.JavaSE_017", javax.xml.transform.stax; version\="0.0.0.JavaSE_017", javax.xml.namespace; version\="0.0.0.JavaSE_017", javax.xml.stream.util; version\="0.0.0.JavaSE_017", org.w3c.dom.ls; version\="0.0.0.JavaSE_017", org.w3c.dom.ranges; version\="0.0.0.JavaSE_017", org.w3c.dom.events; version\="0.0.0.JavaSE_017", javax.xml.crypto.dom; version\="0.0.0.JavaSE_017", javax.xml.crypto.dsig.dom; version\="0.0.0.JavaSE_017", javax.xml.crypto.dsig.keyinfo; version\="0.0.0.JavaSE_017", javax.xml.crypto.dsig.spec; version\="0.0.0.JavaSE_017", javax.xml.crypto.dsig; version\="0.0.0.JavaSE_017", javax.xml.crypto; version\="0.0.0.JavaSE_017", com.sun.java.accessibility.util; version\="0.0.0.JavaSE_017", com.sun.tools.attach.spi; version\="0.0.0.JavaSE_017", com.sun.tools.attach; version\="0.0.0.JavaSE_017", com.sun.source.doctree; version\="0.0.0.JavaSE_017", com.sun.tools.javac; version\="0.0.0.JavaSE_017", com.sun.source.util; version\="0.0.0.JavaSE_017", com.sun.source.tree; version\="0.0.0.JavaSE_017", jdk.dynalink.linker.support; version\="0.0.0.JavaSE_017", jdk.dynalink.beans; version\="0.0.0.JavaSE_017", jdk.dynalink.linker; version\="0.0.0.JavaSE_017", jdk.dynalink; version\="0.0.0.JavaSE_017", jdk.dynalink.support; version\="0.0.0.JavaSE_017", com.sun.net.httpserver.spi; version\="0.0.0.JavaSE_017", com.sun.net.httpserver; version\="0.0.0.JavaSE_017", jdk.security.jarsigner; version\="0.0.0.JavaSE_017", com.sun.jarsigner; version\="0.0.0.JavaSE_017", jdk.javadoc.doclet; version\="0.0.0.JavaSE_017", com.sun.tools.jconsole; version\="0.0.0.JavaSE_017", com.sun.jdi.event; version\="0.0.0.JavaSE_017", com.sun.jdi.connect; version\="0.0.0.JavaSE_017", com.sun.jdi.request; version\="0.0.0.JavaSE_017", com.sun.jdi; version\="0.0.0.JavaSE_017", com.sun.jdi.connect.spi; version\="0.0.0.JavaSE_017", jdk.jfr; version\="0.0.0.JavaSE_017", jdk.jfr.consumer; version\="0.0.0.JavaSE_017", jdk.jshell.execution; version\="0.0.0.JavaSE_017", jdk.jshell; version\="0.0.0.JavaSE_017", jdk.jshell.tool; version\="0.0.0.JavaSE_017", jdk.jshell.spi; version\="0.0.0.JavaSE_017", netscape.javascript; version\="0.0.0.JavaSE_017", com.sun.management; version\="0.0.0.JavaSE_017", jdk.management.jfr; version\="0.0.0.JavaSE_017", jdk.nio; version\="0.0.0.JavaSE_017", jdk.net; version\="0.0.0.JavaSE_017", jdk.nio.mapmode; version\="0.0.0.JavaSE_017", com.sun.nio.sctp; version\="0.0.0.JavaSE_017", com.sun.security.auth.module; version\="0.0.0.JavaSE_017", com.sun.security.auth.callback; version\="0.0.0.JavaSE_017", com.sun.security.auth; version\="0.0.0.JavaSE_017", com.sun.security.auth.login; version\="0.0.0.JavaSE_017", com.sun.security.jgss; version\="0.0.0.JavaSE_017", sun.misc; version\="0.0.0.JavaSE_017", sun.reflect; version\="0.0.0.JavaSE_017", com.sun.nio.file; version\="0.0.0.JavaSE_017", jdk.swing.interop; version\="0.0.0.JavaSE_017", org.w3c.dom.html; version\="0.0.0.JavaSE_017", org.w3c.dom.stylesheets; version\="0.0.0.JavaSE_017", org.w3c.dom.css; version\="0.0.0.JavaSE_017", org.w3c.dom.xpath; version\="0.0.0.JavaSE_017", com.yahoo.jdisc, com.yahoo.jdisc.application, com.yahoo.jdisc.handler, com.yahoo.jdisc.service, com.yahoo.jdisc.statistics, com.yahoo.jdisc.refcount, javax.inject;version\=1.0.0, org.aopalliance.intercept, org.aopalliance.aop, com.google.common.annotations;version\="32.1.2",com.google.common.base;version\="32.1.2";uses\:\="javax.annotation",com.google.common.cache;version\="32.1.2";uses\:\="com.google.common.base,com.google.common.collect,com.google.common.util.concurrent,javax.annotation",com.google.common.collect;version\="32.1.2";uses\:\="com.google.common.base,javax.annotation",com.google.common.escape;version\="32.1.2";uses\:\="com.google.common.base,javax.annotation",com.google.common.eventbus;version\="32.1.2",com.google.common.graph;version\="32.1.2";uses\:\="com.google.common.collect,javax.annotation",com.google.common.hash;version\="32.1.2";uses\:\="com.google.common.base,javax.annotation",com.google.common.html;version\="32.1.2";uses\:\="com.google.common.escape",com.google.common.io;version\="32.1.2";uses\:\="com.google.common.base,com.google.common.collect,com.google.common.graph,com.google.common.hash,javax.annotation",com.google.common.math;version\="32.1.2";uses\:\="javax.annotation",com.google.common.net;version\="32.1.2";uses\:\="com.google.common.base,com.google.common.collect,com.google.common.escape,javax.annotation",com.google.common.primitives;version\="32.1.2";uses\:\="com.google.common.base,javax.annotation",com.google.common.reflect;version\="32.1.2";uses\:\="com.google.common.collect,com.google.common.io,javax.annotation",com.google.common.util.concurrent;version\="32.1.2";uses\:\="com.google.common.base,com.google.common.collect,com.google.common.util.concurrent.internal,javax.annotation",com.google.common.xml;version\="32.1.2";uses\:\="com.google.common.escape", com.google.inject;version\="1.4",com.google.inject.binder;version\="1.4",com.google.inject.matcher;version\="1.4",com.google.inject.multibindings;version\="1.4",com.google.inject.name;version\="1.4",com.google.inject.spi;version\="1.4",com.google.inject.util;version\="1.4", org.slf4j;version\=1.7.32, org.slf4j.spi;version\=1.7.32, org.slf4j.helpers;version\=1.7.32, org.slf4j.event;version\=1.7.32, org.slf4j.impl;version\=1.7.32, org.apache.commons.logging;version\=1.2, org.apache.commons.logging.impl;version\=1.2, com.sun.jna;version\=5.11.0, com.sun.jna.ptr;version\=5.11.0, com.sun.jna.win32;version\=5.11.0, org.apache.log4j;version\=1.2.17,org.apache.log4j.helpers;version\=1.2.17,org.apache.log4j.spi;version\=1.2.17,org.apache.log4j.xml;version\=1.2.17, com.yahoo.component.annotation;version\="1.0.0", com.yahoo.config;version\=1.0.0, com.yahoo.vespa.defaults;version\=1.0.0, ai.vespa.http;version\=1.0.0,ai.vespa.llm.client.openai;version\=1.0.0,ai.vespa.llm.completion;version\=1.0.0,ai.vespa.llm.test;version\=1.0.0,ai.vespa.llm;version\=1.0.0,ai.vespa.net;version\=1.0.0,ai.vespa.validation;version\=1.0.0,com.yahoo.binaryprefix;version\=1.0.0,com.yahoo.collections;version\=1.0.0,com.yahoo.compress;version\=1.0.0,com.yahoo.concurrent.classlock;version\=1.0.0,com.yahoo.concurrent.maintenance;version\=1.0.0,com.yahoo.concurrent;version\=1.0.0,com.yahoo.data.access.helpers;version\=1.0.0,com.yahoo.data.access.simple;version\=1.0.0,com.yahoo.data.access.slime;version\=1.0.0,com.yahoo.data.access;version\=1.0.0,com.yahoo.errorhandling;version\=1.0.0,com.yahoo.exception;version\=1.0.0,com.yahoo.geo;version\=1.0.0,com.yahoo.io.reader;version\=1.0.0,com.yahoo.io;version\=1.0.0,com.yahoo.javacc;version\=1.0.0,com.yahoo.lang;version\=1.0.0,com.yahoo.nativec;version\=1.0.0,com.yahoo.net;version\=1.0.0,com.yahoo.path;version\=1.0.0,com.yahoo.protect;version\=1.0.0,com.yahoo.reflection;version\=1.0.0,com.yahoo.slime;version\=1.0.0,com.yahoo.stream;version\=1.0.0,com.yahoo.system.execution;version\=1.0.0,com.yahoo.system;version\=1.0.0,com.yahoo.tensor.evaluation;version\=1.0.0,com.yahoo.tensor.functions;version\=1.0.0,com.yahoo.tensor.serialization;version\=1.0.0,com.yahoo.tensor;version\=1.0.0,com.yahoo.text.internal;version\=1.0.0,com.yahoo.text;version\=1.0.0,com.yahoo.time;version\=1.0.0,com.yahoo.transaction;version\=1.0.0,com.yahoo.vespa.objects;version\=1.0.0,com.yahoo.yolean.chain;version\=1.0.0,com.yahoo.yolean.concurrent;version\=1.0.0,com.yahoo.yolean.function;version\=1.0.0,com.yahoo.yolean.system;version\=1.0.0,com.yahoo.yolean.trace;version\=1.0.0,com.yahoo.yolean;version\=1.0.0, com.yahoo.log.event;version\=1.0.0,com.yahoo.log.impl;version\=1.0.0,com.yahoo.log;version\=1.0.0, javax.xml.bind;version\="2.3";uses\:\="javax.xml.bind.annotation.adapters,javax.xml.bind.attachment,javax.xml.namespace,javax.xml.stream,javax.xml.transform,javax.xml.validation,org.w3c.dom,org.xml.sax",javax.xml.bind.annotation;version\="2.3";uses\:\="javax.xml.bind,javax.xml.parsers,javax.xml.transform,javax.xml.transform.dom,org.w3c.dom",javax.xml.bind.annotation.adapters;version\="2.3",javax.xml.bind.attachment;version\="2.3";uses\:\="javax.activation",javax.xml.bind.helpers;version\="2.3";uses\:\="javax.xml.bind,javax.xml.bind.annotation.adapters,javax.xml.bind.attachment,javax.xml.stream,javax.xml.transform,javax.xml.validation,org.w3c.dom,org.xml.sax",javax.xml.bind.util;version\="2.3";uses\:\="javax.xml.bind,javax.xml.transform.sax", com.sun.istack;version\="3.0.5";uses\:\="javax.activation,javax.xml.stream,org.xml.sax,org.xml.sax.helpers",com.sun.istack.localization;version\="3.0.5",com.sun.istack.logging;version\="3.0.5",com.sun.xml.bind;uses\:\="org.xml.sax";version\="2.3.0",com.sun.xml.bind.annotation;version\="2.3.0",com.sun.xml.bind.api;uses\:\="org.xml.sax";version\="2.3.0",com.sun.xml.bind.api.impl;version\="2.3.0",com.sun.xml.bind.marshaller;uses\:\="javax.xml.parsers,org.w3c.dom,org.xml.sax,org.xml.sax.helpers";version\="2.3.0",com.sun.xml.bind.unmarshaller;uses\:\="com.sun.xml.bind.v2.runtime.unmarshaller,javax.xml.bind,org.w3c.dom,org.xml.sax";version\="2.3.0",com.sun.xml.bind.util;version\="2.3.0",com.sun.xml.bind.v2;version\="2.3.0",com.sun.xml.bind.v2.model.annotation;uses\:\="com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.runtime";version\="2.3.0",com.sun.xml.bind.v2.model.core;uses\:\="com.sun.xml.bind.v2.model.annotation,com.sun.xml.bind.v2.model.impl,com.sun.xml.bind.v2.model.nav,com.sun.xml.bind.v2.runtime,javax.activation,javax.xml.bind,javax.xml.bind.annotation,javax.xml.bind.annotation.adapters,javax.xml.namespace,javax.xml.transform";version\="2.3.0",com.sun.xml.bind.v2.model.impl;uses\:\="com.sun.xml.bind.v2.model.annotation,com.sun.xml.bind.v2.model.nav";version\="2.3.0",com.sun.xml.bind.v2.model.nav;uses\:\="com.sun.xml.bind.v2.runtime";version\="2.3.0",com.sun.xml.bind.v2.model.util;uses\:\="javax.xml.namespace";version\="2.3.0",com.sun.xml.bind.v2.runtime;uses\:\="com.sun.xml.bind.v2.model.annotation,javax.activation,javax.xml.bind,javax.xml.bind.annotation.adapters";version\="2.3.0",com.sun.xml.bind.v2.runtime.unmarshaller;uses\:\="javax.xml.bind,org.w3c.dom,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.schemagen.episode;uses\:\="com.sun.xml.txw2,com.sun.xml.txw2.annotation";version\="2.3.0",com.sun.xml.bind.v2.util;uses\:\="javax.xml.parsers,javax.xml.transform,javax.xml.validation,javax.xml.xpath";version\="2.3.0",com.sun.xml.txw2;uses\:\="com.sun.xml.txw2.output,javax.xml.namespace";version\="2.3.0",com.sun.xml.txw2.annotation;version\="2.3.0",com.sun.xml.txw2.output;uses\:\="com.sun.xml.txw2,javax.xml.namespace,javax.xml.stream,javax.xml.transform,javax.xml.transform.dom,javax.xml.transform.sax,javax.xml.transform.stream,org.w3c.dom,org.xml.sax,org.xml.sax.ext,org.xml.sax.helpers";version\="2.3.0", com.sun.xml.bind;uses\:\="com.sun.xml.bind.v2.runtime.reflect,javax.xml.bind,javax.xml.bind.annotation.adapters,javax.xml.datatype,javax.xml.namespace,javax.xml.stream,org.xml.sax";version\="2.3.0",com.sun.xml.bind.api;uses\:\="com.sun.xml.bind.v2.model.annotation,com.sun.xml.bind.v2.model.runtime,com.sun.xml.bind.v2.runtime,javax.xml.bind,javax.xml.bind.attachment,javax.xml.namespace,javax.xml.stream,javax.xml.transform,org.w3c.dom,org.xml.sax";version\="2.3.0",com.sun.xml.bind.marshaller;version\="2.3.0",com.sun.xml.bind.unmarshaller;uses\:\="org.xml.sax";version\="2.3.0",com.sun.xml.bind.util;uses\:\="com.sun.xml.bind,javax.xml.bind.helpers,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2;uses\:\="com.sun.xml.bind.api,com.sun.xml.bind.v2.model.annotation,javax.xml.bind";version\="2.3.0",com.sun.xml.bind.v2.bytecode;version\="2.3.0",com.sun.xml.bind.v2.model.annotation;uses\:\="com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.model.nav,com.sun.xml.bind.v2.runtime";version\="2.3.0",com.sun.xml.bind.v2.model.impl;uses\:\="com.sun.xml.bind.api,com.sun.xml.bind.v2.model.annotation,com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.model.nav,com.sun.xml.bind.v2.model.runtime,com.sun.xml.bind.v2.runtime,javax.activation,javax.xml.namespace";version\="2.3.0",com.sun.xml.bind.v2.model.runtime;uses\:\="com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.runtime,com.sun.xml.bind.v2.runtime.reflect,javax.xml.bind,javax.xml.namespace,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.runtime;uses\:\="com.sun.istack,com.sun.xml.bind.api,com.sun.xml.bind.marshaller,com.sun.xml.bind.v2.model.annotation,com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.model.runtime,com.sun.xml.bind.v2.runtime.output,com.sun.xml.bind.v2.runtime.property,com.sun.xml.bind.v2.runtime.unmarshaller,javax.activation,javax.xml.bind,javax.xml.bind.annotation,javax.xml.bind.annotation.adapters,javax.xml.bind.attachment,javax.xml.bind.helpers,javax.xml.namespace,javax.xml.stream,javax.xml.transform,javax.xml.transform.sax,javax.xml.validation,org.w3c.dom,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.runtime.output;uses\:\="com.sun.xml.bind.marshaller,com.sun.xml.bind.v2.runtime,com.sun.xml.fastinfoset.stax,javax.xml.stream,org.jvnet.staxex,org.w3c.dom,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.runtime.property;uses\:\="com.sun.xml.bind.api,com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.model.runtime,com.sun.xml.bind.v2.runtime,com.sun.xml.bind.v2.runtime.reflect,com.sun.xml.bind.v2.runtime.unmarshaller,com.sun.xml.bind.v2.util,javax.xml.namespace,javax.xml.stream,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.runtime.reflect;uses\:\="com.sun.xml.bind.api,com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.model.runtime,com.sun.xml.bind.v2.runtime,com.sun.xml.bind.v2.runtime.unmarshaller,javax.xml.bind,javax.xml.bind.annotation.adapters,javax.xml.stream,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.runtime.reflect.opt;uses\:\="com.sun.xml.bind.api,com.sun.xml.bind.v2.model.runtime,com.sun.xml.bind.v2.runtime,com.sun.xml.bind.v2.runtime.reflect,javax.xml.stream,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.runtime.unmarshaller;uses\:\="com.sun.xml.bind,com.sun.xml.bind.api,com.sun.xml.bind.unmarshaller,com.sun.xml.bind.util,com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.runtime,com.sun.xml.bind.v2.runtime.output,com.sun.xml.bind.v2.runtime.reflect,javax.activation,javax.xml.bind,javax.xml.bind.annotation,javax.xml.bind.annotation.adapters,javax.xml.bind.attachment,javax.xml.bind.helpers,javax.xml.namespace,javax.xml.stream,javax.xml.transform,javax.xml.transform.sax,javax.xml.validation,org.w3c.dom,org.xml.sax";version\="2.3.0",com.sun.xml.bind.v2.schemagen;uses\:\="com.sun.xml.bind.api,com.sun.xml.bind.v2.model.core,com.sun.xml.bind.v2.model.nav,com.sun.xml.txw2.output,javax.xml.bind,javax.xml.namespace";version\="2.3.0",com.sun.xml.bind.v2.schemagen.xmlschema;uses\:\="com.sun.xml.txw2,com.sun.xml.txw2.annotation,javax.xml.namespace";version\="2.3.0",com.sun.xml.bind.v2.util;uses\:\="com.sun.xml.bind.v2.runtime,com.sun.xml.bind.v2.runtime.unmarshaller,javax.activation,javax.xml.namespace,javax.xml.transform.stream,org.xml.sax";version\="2.3.0", javax.activation;uses\:\="com.sun.activation.registries";version\="1.2",com.sun.activation.viewers;uses\:\="javax.activation";version\="1.2.0",com.sun.activation.registries;version\="1.2.0"
diff --git a/maven-plugins/allowed-maven-dependencies.txt b/maven-plugins/allowed-maven-dependencies.txt
index 1996320bec5..7ec1f645ad9 100644
--- a/maven-plugins/allowed-maven-dependencies.txt
+++ b/maven-plugins/allowed-maven-dependencies.txt
@@ -6,50 +6,49 @@ aopalliance:aopalliance:1.0
com.fasterxml.jackson.core:jackson-annotations:2.15.2
com.fasterxml.jackson.core:jackson-core:2.15.2
com.fasterxml.jackson.core:jackson-databind:2.15.2
-com.google.errorprone:error_prone_annotations:2.18.0
+com.github.luben:zstd-jni:1.5.5-5
+com.google.errorprone:error_prone_annotations:2.21.1
com.google.guava:failureaccess:1.0.1
-com.google.guava:guava:32.1.1-jre
-com.google.inject:guice:4.2.3:no_aop
+com.google.guava:guava:32.1.2-jre
+com.google.inject:guice:4.2.3
com.google.j2objc:j2objc-annotations:2.8
-commons-codec:commons-codec:1.15
-commons-io:commons-io:2.11.0
+commons-codec:commons-codec:1.16.0
+commons-io:commons-io:2.13.0
javax.annotation:javax.annotation-api:1.2
javax.inject:javax.inject:1
org.apache-extras.beanshell:bsh:2.0b6
-org.apache.commons:commons-collections4:4.2
+org.apache.commons:commons-collections4:4.4
org.apache.commons:commons-compress:1.23.0
-org.apache.commons:commons-lang3:3.12.0
-org.apache.maven.enforcer:enforcer-rules:3.3.0
-org.apache.maven:maven-archiver:3.6.0
-org.apache.maven:maven-artifact:3.8.7
-org.apache.maven:maven-builder-support:3.8.7
-org.apache.maven:maven-core:3.8.7
-org.apache.maven:maven-model:3.8.7
-org.apache.maven:maven-model-builder:3.8.7
-org.apache.maven:maven-plugin-api:3.8.7
-org.apache.maven:maven-repository-metadata:3.8.7
-org.apache.maven:maven-resolver-provider:3.8.7
-org.apache.maven:maven-settings:3.8.7
-org.apache.maven:maven-settings-builder:3.8.7
-org.apache.maven.enforcer:enforcer-api:3.3.0
-org.apache.maven.plugin-tools:maven-plugin-annotations:3.6.4
-org.apache.maven.plugins:maven-shade-plugin:3.4.1
-org.apache.maven.resolver:maven-resolver-api:1.6.3
-org.apache.maven.resolver:maven-resolver-impl:1.6.3
-org.apache.maven.resolver:maven-resolver-spi:1.6.3
-org.apache.maven.resolver:maven-resolver-util:1.6.3
-org.apache.maven.shared:maven-artifact-transfer:0.13.1
-org.apache.maven.shared:maven-common-artifact-filters:3.1.0
-org.apache.maven.shared:maven-dependency-tree:3.2.0
+org.apache.commons:commons-lang3:3.13.0
+org.apache.maven:maven-archiver:3.6.1
+org.apache.maven:maven-artifact:3.9.4
+org.apache.maven:maven-builder-support:3.9.4
+org.apache.maven:maven-core:3.9.4
+org.apache.maven:maven-model:3.9.4
+org.apache.maven:maven-model-builder:3.9.4
+org.apache.maven:maven-plugin-api:3.9.4
+org.apache.maven:maven-repository-metadata:3.9.4
+org.apache.maven:maven-resolver-provider:3.9.4
+org.apache.maven:maven-settings:3.9.4
+org.apache.maven:maven-settings-builder:3.9.4
+org.apache.maven.enforcer:enforcer-api:3.4.0
+org.apache.maven.enforcer:enforcer-rules:3.4.0
+org.apache.maven.plugin-tools:maven-plugin-annotations:3.9.0
+org.apache.maven.plugins:maven-shade-plugin:3.5.0
+org.apache.maven.resolver:maven-resolver-api:1.9.14
+org.apache.maven.resolver:maven-resolver-impl:1.9.14
+org.apache.maven.resolver:maven-resolver-named-locks:1.9.14
+org.apache.maven.resolver:maven-resolver-spi:1.9.14
+org.apache.maven.resolver:maven-resolver-util:1.9.14
+org.apache.maven.shared:maven-dependency-tree:3.2.1
org.apache.maven.shared:maven-shared-utils:3.3.4
-org.codehaus.plexus:plexus-archiver:4.4.0
+org.codehaus.plexus:plexus-archiver:4.8.0
org.codehaus.plexus:plexus-cipher:2.0
-org.codehaus.plexus:plexus-classworlds:2.6.0
+org.codehaus.plexus:plexus-classworlds:2.7.0
org.codehaus.plexus:plexus-component-annotations:2.1.0
org.codehaus.plexus:plexus-interpolation:1.26
-org.codehaus.plexus:plexus-io:3.4.0
+org.codehaus.plexus:plexus-io:3.4.1
org.codehaus.plexus:plexus-sec-dispatcher:2.0
-org.codehaus.plexus:plexus-utils:3.3.1
org.codehaus.plexus:plexus-utils:3.5.1
org.eclipse.aether:aether-api:1.0.0.v20140518
org.eclipse.aether:aether-util:1.0.0.v20140518
@@ -57,30 +56,28 @@ org.eclipse.sisu:org.eclipse.sisu.inject:0.3.5
org.eclipse.sisu:org.eclipse.sisu.plexus:0.3.5
org.iq80.snappy:snappy:0.4
org.jdom:jdom2:2.0.6.1
-org.ow2.asm:asm:9.3
-org.ow2.asm:asm-analysis:9.3
-org.ow2.asm:asm-commons:9.3
-org.ow2.asm:asm-tree:9.3
-org.slf4j:slf4j-api:1.7.32
-org.slf4j:slf4j-simple:1.7.22
+org.ow2.asm:asm:9.5
+org.ow2.asm:asm-commons:9.5
+org.ow2.asm:asm-tree:9.5
+org.slf4j:slf4j-api:1.7.36
org.tukaani:xz:1.9
-org.twdata.maven:mojo-executor:2.3.0
+org.twdata.maven:mojo-executor:2.4.0
org.vafer:jdependency:2.8.0
#[test-only]
# Contains dependencies that are used exclusively in 'test' scope
junit:junit:4.13.2
-net.bytebuddy:byte-buddy:1.11.19
-net.bytebuddy:byte-buddy-agent:1.11.19
+net.bytebuddy:byte-buddy:1.14.6
+net.bytebuddy:byte-buddy-agent:1.14.6
org.apiguardian:apiguardian-api:1.1.2
-org.hamcrest:hamcrest-all:1.3
-org.hamcrest:hamcrest-core:1.3
+org.hamcrest:hamcrest:2.2
+org.hamcrest:hamcrest-core:2.2
org.junit.jupiter:junit-jupiter:5.8.1
org.junit.jupiter:junit-jupiter-api:5.8.1
org.junit.jupiter:junit-jupiter-engine:5.8.1
org.junit.jupiter:junit-jupiter-params:5.8.1
org.junit.platform:junit-platform-commons:1.8.1
org.junit.platform:junit-platform-engine:1.8.1
-org.mockito:mockito-core:4.0.0
-org.objenesis:objenesis:3.2
-org.opentest4j:opentest4j:1.2.0
+org.mockito:mockito-core:5.5.0
+org.objenesis:objenesis:3.3
+org.opentest4j:opentest4j:1.3.0
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/SourceSession.java b/messagebus/src/main/java/com/yahoo/messagebus/SourceSession.java
index ad06aed43ec..53fd49dcf58 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/SourceSession.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/SourceSession.java
@@ -34,7 +34,7 @@ public final class SourceSession implements ReplyHandler, MessageBus.SendBlocked
void dec() { count --; }
boolean enough() { return count > 5; }
}
- private static ThreadLocal<Counter> sendBlockedRecurseLevel = ThreadLocal.withInitial(Counter::new);
+ private static final ThreadLocal<Counter> sendBlockedRecurseLevel = ThreadLocal.withInitial(Counter::new);
/**
* The default constructor requires values for all final member variables
@@ -139,13 +139,15 @@ public final class SourceSession implements ReplyHandler, MessageBus.SendBlocked
if (closed) {
return new Result(ErrorCode.SEND_QUEUE_CLOSED, "Source session is closed.");
}
- if (throttlePolicy != null && ! throttlePolicy.canSend(message, pendingCount)) {
- return new Result(ErrorCode.SEND_QUEUE_FULL,
- "Too much pending data (" + pendingCount + " messages).");
- }
- message.pushHandler(replyHandler);
if (throttlePolicy != null) {
+ if (! throttlePolicy.canSend(message, pendingCount)) {
+ return new Result(ErrorCode.SEND_QUEUE_FULL,
+ "Too much pending data (" + pendingCount + " messages).");
+ }
+ message.pushHandler(replyHandler);
throttlePolicy.processMessage(message);
+ } else {
+ message.pushHandler(replyHandler);
}
++pendingCount;
}
diff --git a/messagebus/src/vespa/messagebus/network/rpctarget.cpp b/messagebus/src/vespa/messagebus/network/rpctarget.cpp
index 9c6ca9dff69..d7f3e77c6fd 100644
--- a/messagebus/src/vespa/messagebus/network/rpctarget.cpp
+++ b/messagebus/src/vespa/messagebus/network/rpctarget.cpp
@@ -5,8 +5,8 @@
namespace mbus {
-RPCTarget::RPCTarget(const string &spec, FRT_Supervisor &orb) :
- _lock(),
+RPCTarget::RPCTarget(const string &spec, FRT_Supervisor &orb, ctor_tag)
+ : _lock(),
_orb(orb),
_name(spec),
_target(*_orb.GetTarget(spec.c_str())),
@@ -48,6 +48,7 @@ RPCTarget::resolveVersion(duration timeout, RPCTarget::IVersionHandler &handler)
handler.handleVersion(_version.get());
} else if (shouldInvoke) {
FRT_RPCRequest *req = _orb.AllocRPCRequest();
+ req->getStash().create<SP>(shared_from_this());
req->SetMethodName("mbus.getVersion");
_target.InvokeAsync(req, vespalib::to_s(timeout), this);
}
@@ -67,8 +68,9 @@ RPCTarget::isValid() const
}
void
-RPCTarget::RequestDone(FRT_RPCRequest *req)
+RPCTarget::RequestDone(FRT_RPCRequest *raw_req)
{
+ auto req = vespalib::ref_counted<FRT_RPCRequest>::internal_attach(raw_req);
HandlerList handlers;
{
std::lock_guard guard(_lock);
@@ -94,7 +96,6 @@ RPCTarget::RequestDone(FRT_RPCRequest *req)
_state = (_version.get() ? VERSION_RESOLVED : VERSION_NOT_RESOLVED);
}
_cond.notify_all();
- req->internal_subref();
}
} // namespace mbus
diff --git a/messagebus/src/vespa/messagebus/network/rpctarget.h b/messagebus/src/vespa/messagebus/network/rpctarget.h
index fffffae64f7..77fcef5f48f 100644
--- a/messagebus/src/vespa/messagebus/network/rpctarget.h
+++ b/messagebus/src/vespa/messagebus/network/rpctarget.h
@@ -13,7 +13,7 @@ namespace mbus {
* target. Instances of this class are returned by {@link RPCService}, and
* cached by {@link RPCTargetPool}.
*/
-class RPCTarget : public FRT_IRequestWait {
+class RPCTarget : public FRT_IRequestWait, public std::enable_shared_from_this<RPCTarget> {
public:
/**
* Declares a version handler used when resolving the version of a target.
@@ -58,6 +58,7 @@ private:
Version_UP _version;
HandlerList _versionHandlers;
+ struct ctor_tag {};
public:
/**
* Convenience typedefs.
@@ -72,7 +73,10 @@ public:
* @param spec The connection spec of this target.
* @param orb The FRT supervisor to use when connecting to target.
*/
- RPCTarget(const string &name, FRT_Supervisor &orb);
+ RPCTarget(const string &name, FRT_Supervisor &orb, ctor_tag);
+ static SP create(const string &name, FRT_Supervisor &orb) {
+ return std::make_shared<RPCTarget>(name, orb, ctor_tag{});
+ }
/**
* Destructor. Subrefs the contained FRT target.
diff --git a/messagebus/src/vespa/messagebus/network/rpctargetpool.cpp b/messagebus/src/vespa/messagebus/network/rpctargetpool.cpp
index b403c65f863..db09b127114 100644
--- a/messagebus/src/vespa/messagebus/network/rpctargetpool.cpp
+++ b/messagebus/src/vespa/messagebus/network/rpctargetpool.cpp
@@ -97,7 +97,7 @@ RPCTargetPool::getTarget(FRT_Supervisor &orb, const RPCServiceAddress &address)
std::vector<RPCTarget::SP> targets;
targets.reserve(_numTargetsPerSpec);
for (size_t i(0); i < _numTargetsPerSpec; i++) {
- targets.push_back(std::make_shared<RPCTarget>(spec, orb));
+ targets.push_back(RPCTarget::create(spec, orb));
}
_targets.insert(TargetMap::value_type(spec, Entry(std::move(targets), currentTime)));
return _targets.find(spec)->second.getTarget(guard, currentTime);
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/yamas/YamasHandler.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/yamas/YamasHandler.java
index ef23a5ad070..fc5c790db54 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/yamas/YamasHandler.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/yamas/YamasHandler.java
@@ -59,18 +59,18 @@ public class YamasHandler extends HttpHandlerBase {
@Override
public Optional<HttpResponse> doHandle(URI requestUri, Path apiPath, String consumer) {
if (apiPath.matches(V1_PATH)) return Optional.of(resourceListResponse(requestUri, List.of(VALUES_PATH, CONSUMERS_PATH)));
- if (apiPath.matches(VALUES_PATH)) return Optional.of(valuesResponse(consumer));
+ if (apiPath.matches(VALUES_PATH)) return Optional.of(valuesResponse(consumer, requestUri.getQuery()));
if (apiPath.matches(CONSUMERS_PATH)) return Optional.of(consumersResponse());
return Optional.empty();
}
- private HttpResponse valuesResponse(String consumer) {
+ private HttpResponse valuesResponse(String consumer, String query) {
try {
List<MetricsPacket> metrics = new ArrayList<>(consumer == null ? valuesFetcher.fetchAllMetrics() : valuesFetcher.fetch(consumer));
if (consumer == null || "Vespa".equalsIgnoreCase(consumer)) {
metrics.addAll(nodeMetricGatherer.gatherMetrics()); // TODO: Currently only add these metrics in this handler. Eventually should be included in all handlers
}
- return new YamasResponse(OK, metrics);
+ return new YamasResponse(OK, metrics, useJsonl(query));
} catch (JsonRenderingException e) {
return new ErrorResponse(INTERNAL_SERVER_ERROR, e.getMessage());
}
@@ -93,4 +93,8 @@ public class YamasHandler extends HttpHandlerBase {
};
}
+ private boolean useJsonl(String query) {
+ return query != null && query.contains("jsonl=true");
+ }
+
} \ No newline at end of file
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/yamas/YamasResponse.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/yamas/YamasResponse.java
index 6c94de49140..e838987133f 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/yamas/YamasResponse.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/http/yamas/YamasResponse.java
@@ -16,10 +16,12 @@ import java.util.List;
public class YamasResponse extends HttpResponse {
private final List<MetricsPacket> metrics;
+ private boolean useJsonl;
- public YamasResponse(int code, List<MetricsPacket> metrics) {
+ public YamasResponse(int code, List<MetricsPacket> metrics, boolean useJsonl) {
super(code);
this.metrics = metrics;
+ this.useJsonl = useJsonl;
}
@Override
@@ -29,7 +31,10 @@ public class YamasResponse extends HttpResponse {
@Override
public void render(OutputStream outputStream) throws IOException {
- YamasJsonUtil.toJson(metrics, outputStream, false);
+ if (useJsonl)
+ YamasJsonUtil.toJsonl(metrics, outputStream, false);
+ else
+ YamasJsonUtil.toJson(metrics, outputStream, false);
}
}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/json/YamasJsonUtil.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/json/YamasJsonUtil.java
index 5086846293b..39589e144e6 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/json/YamasJsonUtil.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/json/YamasJsonUtil.java
@@ -8,6 +8,7 @@ import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.StreamWriteFeature;
+import com.fasterxml.jackson.core.util.MinimalPrettyPrinter;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.ByteArrayOutputStream;
@@ -113,6 +114,15 @@ public class YamasJsonUtil {
generator.close();
}
+ public static void toJsonl(List<MetricsPacket> metrics, OutputStream outputStream, boolean addStatus) throws IOException {
+ JsonGenerator generator = factory.createGenerator(outputStream)
+ .setPrettyPrinter(new MinimalPrettyPrinter("\n"));
+ for (MetricsPacket metricsPacket : metrics) {
+ toJson(metricsPacket, generator, addStatus);
+ }
+ generator.close();
+ }
+
private static void toJson(MetricsPacket metric, JsonGenerator generator, boolean addStatus) throws IOException {
generator.writeStartObject();
if (addStatus) {
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/prometheus/PrometheusUtil.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/prometheus/PrometheusUtil.java
index c2d05a8636e..ecfdb978e29 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/prometheus/PrometheusUtil.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/metric/model/prometheus/PrometheusUtil.java
@@ -13,9 +13,6 @@ import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
-import static java.util.Collections.emptyList;
-import static java.util.Collections.singletonList;
-
/**
* @author yj-jtakagi
* @author gjoranv
@@ -52,7 +49,7 @@ public class PrometheusUtil {
} else {
sampleList = new ArrayList<>();
samples.put(metricName, sampleList);
- metricFamilySamples.add(new MetricFamilySamples(metricName, Collector.Type.UNTYPED, "", sampleList));
+ metricFamilySamples.add(new MetricFamilySamples(metricName, Collector.Type.UNKNOWN, "", sampleList));
}
sampleList.add(new Sample(metricName, labels, labelValues, metric.getValue().doubleValue(), packet.timestamp * 1000));
}
diff --git a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/node/NodeMetricGatherer.java b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/node/NodeMetricGatherer.java
index 3da7aef0a12..9c8a49cfce8 100644
--- a/metrics-proxy/src/main/java/ai/vespa/metricsproxy/node/NodeMetricGatherer.java
+++ b/metrics-proxy/src/main/java/ai/vespa/metricsproxy/node/NodeMetricGatherer.java
@@ -42,13 +42,9 @@ public class NodeMetricGatherer {
}
public List<MetricsPacket> gatherMetrics() {
- FileWrapper fileWrapper = new FileWrapper();
List<MetricsPacket.Builder> metricPacketBuilders = new ArrayList<>();
- if (SystemPollerProvider.runningOnLinux()) {
- JsonNode packet = HostLifeGatherer.getHostLifePacket(fileWrapper);
- addObjectToBuilders(metricPacketBuilders, packet);
- }
+ addObjectToBuilders(metricPacketBuilders, HostLifeGatherer.getHostLifePacket());
return metricPacketBuilders.stream()
.map(metricPacketBuilder ->
diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/yamas/YamasHandlerTest.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/yamas/YamasHandlerTest.java
index 346fc6a462b..1d198a40ba8 100644
--- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/yamas/YamasHandlerTest.java
+++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/http/yamas/YamasHandlerTest.java
@@ -54,4 +54,11 @@ public class YamasHandlerTest extends HttpHandlerTestBase {
assertFalse(valuesResponse.contains("status_msg"));
}
+ @Test
+ public void allows_fetching_jsonl_metrics() {
+ assertTrue(valuesResponse.startsWith("{\"metrics\":[{\"timestamp\":"));
+ valuesResponse = testDriver.sendRequest(VALUES_URI + "?jsonl=true").readAll();
+ assertTrue(valuesResponse.startsWith("{\"timestamp\":"));
+ }
+
}
diff --git a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/metric/model/json/YamasJsonModelTest.java b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/metric/model/json/YamasJsonModelTest.java
index 3e85166430d..559f6e8457a 100644
--- a/metrics-proxy/src/test/java/ai/vespa/metricsproxy/metric/model/json/YamasJsonModelTest.java
+++ b/metrics-proxy/src/test/java/ai/vespa/metricsproxy/metric/model/json/YamasJsonModelTest.java
@@ -35,7 +35,7 @@ public class YamasJsonModelTest {
YamasJsonModel jsonModel = getYamasJsonModel("yamas-array.json");
try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
- YamasResponse response = new YamasResponse(200, List.of(YamasJsonUtil.toMetricsPacketBuilder(jsonModel).build()));
+ YamasResponse response = new YamasResponse(200, List.of(YamasJsonUtil.toMetricsPacketBuilder(jsonModel).build()), false);
response.render(outputStream);
assertEquals(EXPECTED_JSON, outputStream.toString());
}
@@ -52,7 +52,7 @@ public class YamasJsonModelTest {
// Serialize and verify
try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
- YamasResponse response = new YamasResponse(200, List.of(YamasJsonUtil.toMetricsPacketBuilder(jsonModel).build()));
+ YamasResponse response = new YamasResponse(200, List.of(YamasJsonUtil.toMetricsPacketBuilder(jsonModel).build()), false);
response.render(outputStream);
assertEquals(EXPECTED_JSON, outputStream.toString());
}
@@ -85,6 +85,22 @@ public class YamasJsonModelTest {
assertNull(jsonModel.routing);
}
+ @Test
+ public void creates_correct_jsonl() throws IOException {
+ YamasJsonModel jsonModel = getYamasJsonModel("yamas-array.json");
+ MetricsPacket packet = YamasJsonUtil.toMetricsPacketBuilder(jsonModel).build();
+ // Add packet twice to verify object delimiter
+ List<MetricsPacket> metricPackets = List.of(packet, packet);
+ try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
+ YamasResponse response = new YamasResponse(200, metricPackets, true);
+ response.render(outputStream);
+ assertEquals("""
+ {"timestamp":1400047900,"application":"vespa.searchnode","metrics":{"cpu":55.5555555555555,"memory_virt":22222222222,"memory_rss":5555555555},"dimensions":{"applicationName":"app","tenantName":"tenant","metrictype":"system","instance":"searchnode","applicationInstance":"default","clustername":"cluster"},"routing":{"yamas":{"namespaces":["Vespa"]}}}
+ {"timestamp":1400047900,"application":"vespa.searchnode","metrics":{"cpu":55.5555555555555,"memory_virt":22222222222,"memory_rss":5555555555},"dimensions":{"applicationName":"app","tenantName":"tenant","metrictype":"system","instance":"searchnode","applicationInstance":"default","clustername":"cluster"},"routing":{"yamas":{"namespaces":["Vespa"]}}}""",
+ outputStream.toString());
+ }
+ }
+
private YamasJsonModel getYamasJsonModel(String testFile) throws IOException {
String filename = getClass().getClassLoader().getResource(testFile).getFile();
BufferedReader reader = Files.newBufferedReader(Paths.get(filename));
diff --git a/metrics/pom.xml b/metrics/pom.xml
index e8303e5a01f..a3c045ec2f9 100644
--- a/metrics/pom.xml
+++ b/metrics/pom.xml
@@ -12,8 +12,14 @@
<packaging>jar</packaging>
<version>8-SNAPSHOT</version>
<name>metrics</name>
+
<dependencies>
<dependency>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
<groupId>com.yahoo.vespa</groupId>
<artifactId>annotations</artifactId>
<version>${project.version}</version>
@@ -25,7 +31,18 @@
<version>${project.version}</version>
<scope>provided</scope>
</dependency>
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-api</artifactId>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.junit.jupiter</groupId>
+ <artifactId>junit-jupiter-engine</artifactId>
+ <scope>test</scope>
+ </dependency>
</dependencies>
+
<build>
<plugins>
<plugin>
@@ -86,11 +103,9 @@
<artifactId>maven-install-plugin</artifactId>
</plugin>
<plugin>
- <!-- Remove when v2.1 is the default
- - it is required by maven-project-info-reports-plugin v2.2 -->
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-site-plugin</artifactId>
- <version>2.1</version>
+ <version>${maven-site-plugin.vespa.version}</version>
</plugin>
</plugins>
</build>
diff --git a/metrics/src/main/java/ai/vespa/metrics/ContainerMetrics.java b/metrics/src/main/java/ai/vespa/metrics/ContainerMetrics.java
index e515bbf9275..ac7ecfa124a 100644
--- a/metrics/src/main/java/ai/vespa/metrics/ContainerMetrics.java
+++ b/metrics/src/main/java/ai/vespa/metrics/ContainerMetrics.java
@@ -17,6 +17,7 @@ public enum ContainerMetrics implements VespaMetrics {
JDISC_GC_COUNT("jdisc.gc.count", Unit.OPERATION, "Number of JVM garbage collections done"),
JDISC_GC_MS("jdisc.gc.ms", Unit.MILLISECOND, "Time spent in JVM garbage collection"),
JDISC_JVM("jdisc.jvm", Unit.VERSION, "JVM runtime version"),
+ CPU("cpu", Unit.THREAD, "Container service CPU pressure"),
JDISC_MEMORY_MAPPINGS("jdisc.memory_mappings", Unit.OPERATION, "JDISC Memory mappings"),
JDISC_OPEN_FILE_DESCRIPTORS("jdisc.open_file_descriptors", Unit.ITEM, "JDISC Open file descriptors"),
@@ -28,7 +29,7 @@ public enum ContainerMetrics implements VespaMetrics {
JDISC_THREAD_POOL_MAX_ALLOWED_SIZE("jdisc.thread_pool.max_allowed_size", Unit.THREAD, "The maximum allowed number of threads in the pool"),
JDISC_THREAD_POOL_ACTIVE_THREADS("jdisc.thread_pool.active_threads", Unit.THREAD, "Number of threads that are active"),
- JDISC_DEACTIVATED_CONTAINERS("jdisc.deactivated_containers.total", Unit.ITEM, "JDISC Deactivated container instances"),
+ JDISC_DEACTIVATED_CONTAINERS_TOTAL("jdisc.deactivated_containers.total", Unit.ITEM, "JDISC Deactivated container instances"),
JDISC_DEACTIVATED_CONTAINERS_WITH_RETAINED_REFS("jdisc.deactivated_containers.with_retained_refs.last", Unit.ITEM, "JDISC Deactivated container nodes with retained refs"),
JDISC_APPLICATION_FAILED_COMPONENT_GRAPHS("jdisc.application.failed_component_graphs", Unit.ITEM, "JDISC Application failed component graphs"),
JDISC_APPLICATION_COMPONENT_GRAPH_CREATION_TIME_MILLIS("jdisc.application.component_graph.creation_time_millis", Unit.MILLISECOND, "JDISC Application component graph creation time"),
@@ -45,7 +46,7 @@ public enum ContainerMetrics implements VespaMetrics {
JDISC_HTTP_SSL_HANDSHAKE_FAILURE_MISSING_CLIENT_CERT("jdisc.http.ssl.handshake.failure.missing_client_cert", Unit.OPERATION, "JDISC HTTP SSL Handshake failures due to missing client certificate"),
JDISC_HTTP_SSL_HANDSHAKE_FAILURE_EXPIRED_CLIENT_CERT("jdisc.http.ssl.handshake.failure.expired_client_cert", Unit.OPERATION, "JDISC HTTP SSL Handshake failures due to expired client certificate"),
JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INVALID_CLIENT_CERT("jdisc.http.ssl.handshake.failure.invalid_client_cert", Unit.OPERATION, "JDISC HTTP SSL Handshake failures due to invalid client certificate"),
- JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_PROTOCOLS("jdisc.http.ssl.handshake.failure.incompatible_protocols", Unit.OPERATION, "JDISC HTTP SSL Handshake failures due to inincompatible protocols"),
+ JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_PROTOCOLS("jdisc.http.ssl.handshake.failure.incompatible_protocols", Unit.OPERATION, "JDISC HTTP SSL Handshake failures due to incompatible protocols"),
JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_CHIFERS("jdisc.http.ssl.handshake.failure.incompatible_chifers", Unit.OPERATION, "JDISC HTTP SSL Handshake failures due to incompatible chifers"),
JDISC_HTTP_SSL_HANDSHAKE_FAILURE_CONNECTION_CLOSED("jdisc.http.ssl.handshake.failure.connection_closed", Unit.OPERATION, "JDISC HTTP SSL Handshake failures due to connection closed"),
JDISC_HTTP_SSL_HANDSHAKE_FAILURE_UNKNOWN("jdisc.http.ssl.handshake.failure.unknown", Unit.OPERATION, "JDISC HTTP SSL Handshake failures for unknown reason"),
@@ -117,7 +118,7 @@ public enum ContainerMetrics implements VespaMetrics {
// SearchChain metrics
- PEAK_QPS("peak_qps", Unit.QUERY_PER_SECOND, "The highest number of qps for a second for this metrics shapshot"),
+ PEAK_QPS("peak_qps", Unit.QUERY_PER_SECOND, "The highest number of qps for a second for this metrics snapshot"),
SEARCH_CONNECTIONS("search_connections", Unit.CONNECTION, "Number of search connections"),
FEED_OPERATIONS("feed.operations", Unit.OPERATION, "Number of document feed operations"),
FEED_LATENCY("feed.latency", Unit.MILLISECOND, "Feed latency"),
@@ -125,9 +126,9 @@ public enum ContainerMetrics implements VespaMetrics {
QUERIES("queries", Unit.OPERATION, "Query volume"),
QUERY_CONTAINER_LATENCY("query_container_latency", Unit.MILLISECOND, "The query execution time consumed in the container"),
QUERY_LATENCY("query_latency", Unit.MILLISECOND, "The overall query latency as seen by the container"),
- QUERY_TIMEOUT("query_timeout", Unit.MILLISECOND, "The amount of time allowed for query execytion, from the client"),
+ QUERY_TIMEOUT("query_timeout", Unit.MILLISECOND, "The amount of time allowed for query execution, from the client"),
FAILED_QUERIES("failed_queries", Unit.OPERATION, "The number of failed queries"),
- DEGRADED_QUERIES("degraded_queries", Unit.OPERATION, "The number of degraded queries, e.g. due to some conent nodes not responding in time"),
+ DEGRADED_QUERIES("degraded_queries", Unit.OPERATION, "The number of degraded queries, e.g. due to some content nodes not responding in time"),
HITS_PER_QUERY("hits_per_query", Unit.HIT_PER_QUERY, "The number of hits returned"),
QUERY_HIT_OFFSET("query_hit_offset", Unit.HIT, "The offset for hits returned"),
DOCUMENTS_COVERED("documents_covered", Unit.DOCUMENT, "The combined number of documents considered during query evaluation"),
@@ -169,7 +170,7 @@ public enum ContainerMetrics implements VespaMetrics {
// Java (JRT) TLS metrics
JRT_TRANSPORT_TLS_CERTIFICATE_VERIFICATION_FAILURES("jrt.transport.tls-certificate-verification-failures", Unit.FAILURE, "TLS certificate verification failures"),
JRT_TRANSPORT_PEER_AUTHORIZATION_FAILURES("jrt.transport.peer-authorization-failures", Unit.FAILURE, "TLS peer authorization failures"),
- JRT_TRANSPORT_SERVER_TLS_CONNECIONTS_ESTABLISHED("jrt.transport.server.tls-connections-established", Unit.CONNECTION, "TLS server connections established"),
+ JRT_TRANSPORT_SERVER_TLS_CONNECTIONS_ESTABLISHED("jrt.transport.server.tls-connections-established", Unit.CONNECTION, "TLS server connections established"),
JRT_TRANSPORT_CLIENT_TLS_CONNECTIONS_ESTABLISHED("jrt.transport.client.tls-connections-established", Unit.CONNECTION, "TLS client connections established"),
JRT_TRANSPORT_SERVER_UNENCRYPTED_CONNECTIONS_ESTABLISHED("jrt.transport.server.unencrypted-connections-established", Unit.CONNECTION, "Unencrypted server connections established"),
JRT_TRANSPORT_CLIENT_UNENCRYPTED_CONNECTIONS_ESTABLISHED("jrt.transport.client.unencrypted-connections-established", Unit.CONNECTION, "Unencrypted client connections established"),
diff --git a/metrics/src/main/java/ai/vespa/metrics/ControllerMetrics.java b/metrics/src/main/java/ai/vespa/metrics/ControllerMetrics.java
index 4770fe51830..0f200308862 100644
--- a/metrics/src/main/java/ai/vespa/metrics/ControllerMetrics.java
+++ b/metrics/src/main/java/ai/vespa/metrics/ControllerMetrics.java
@@ -37,6 +37,7 @@ public enum ControllerMetrics implements VespaMetrics {
DNS_QUEUED_REQUESTS("dns.queuedRequests", Unit.REQUEST, "Queued DNS requests"),
ZMS_QUOTA_USAGE("zms.quota.usage", Unit.FRACTION, "ZMS Quota usage per resource type"),
COREDUMP_PROCESSED("coredump.processed", Unit.FAILURE,"Controller: Core dumps processed"),
+ AUTH0_EXCEPTIONS("auth0.exceptions", Unit.FAILURE, "Controller: Auth0 exceptions"),
// Metrics per API, metrics names generated in ControllerMaintainer/MetricsReporter
OPERATION_APPLICATION("operation.application", Unit.REQUEST, "Controller: Requests for /application API"),
diff --git a/metrics/src/main/java/ai/vespa/metrics/HostedNodeAdminMetrics.java b/metrics/src/main/java/ai/vespa/metrics/HostedNodeAdminMetrics.java
index 97185e9c703..a5f21eeba44 100644
--- a/metrics/src/main/java/ai/vespa/metrics/HostedNodeAdminMetrics.java
+++ b/metrics/src/main/java/ai/vespa/metrics/HostedNodeAdminMetrics.java
@@ -21,6 +21,10 @@ public enum HostedNodeAdminMetrics implements VespaMetrics {
MEM_UTIL("mem.util", Unit.PERCENTAGE, "Memory utilisation"),
MEM_TOTAL_USED("mem_total.used", Unit.BYTE, "Total amount of memory used by the node, including OS buffer caches"),
MEM_TOTAL_UTIL("mem_total.util", Unit.PERCENTAGE, "Total memory utilisation"),
+ MEM_SOCK("mem.sock", Unit.BYTE, "Amount of memory used in network transmission buffers"),
+ MEM_SLAB_RECLAIMABLE("mem.slab_reclaimable", Unit.BYTE, "Amount of 'slab' that might be reclaimed"),
+ MEM_SLAB("mem.slab", Unit.BYTE, "Amount of memory used for storing in-kernel data structures"),
+ MEM_ANON("mem.anon", Unit.BYTE, "Amount of memory used in anonymous mappings"),
GPU_UTIL("gpu.util", Unit.PERCENTAGE, "GPU utilisation"),
GPU_MEM_USED("gpu.memory.used", Unit.BYTE, "GPU memory used"),
GPU_MEM_TOTAL("gpu.memory.total", Unit.BYTE, "GPU memory available"),
diff --git a/metrics/src/main/java/ai/vespa/metrics/VespaMetrics.java b/metrics/src/main/java/ai/vespa/metrics/VespaMetrics.java
index 3a17d8a3155..9a498abc911 100644
--- a/metrics/src/main/java/ai/vespa/metrics/VespaMetrics.java
+++ b/metrics/src/main/java/ai/vespa/metrics/VespaMetrics.java
@@ -17,6 +17,8 @@ public interface VespaMetrics {
return baseName() + "." + suffix.suffix();
}
+ // TODO: make the below methods return Metric objects instead of Strings.
+
default String ninety_five_percentile() {
return withSuffix(Suffix.ninety_five_percentile);
}
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/BasicMetricSets.java b/metrics/src/main/java/ai/vespa/metrics/set/BasicMetricSets.java
new file mode 100644
index 00000000000..f167e654e6f
--- /dev/null
+++ b/metrics/src/main/java/ai/vespa/metrics/set/BasicMetricSets.java
@@ -0,0 +1,23 @@
+package ai.vespa.metrics.set;
+
+import ai.vespa.metrics.ContainerMetrics;
+
+/**
+ * Defines metric sets that are meant to be used as building blocks for other metric sets.
+ *
+ * @author gjoranv
+ */
+public class BasicMetricSets {
+
+ static MetricSet containerHttpStatusMetrics() {
+ return new MetricSet.Builder("basic-container-http-status")
+ .metric(ContainerMetrics.HTTP_STATUS_1XX.rate())
+
+ .metric(ContainerMetrics.HTTP_STATUS_2XX.rate())
+ .metric(ContainerMetrics.HTTP_STATUS_3XX.rate())
+ .metric(ContainerMetrics.HTTP_STATUS_4XX.rate())
+ .metric(ContainerMetrics.HTTP_STATUS_5XX.rate())
+ .build();
+ }
+
+}
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/DefaultMetrics.java b/metrics/src/main/java/ai/vespa/metrics/set/DefaultMetrics.java
index 9e23a7625cb..d75f94e7c7e 100644
--- a/metrics/src/main/java/ai/vespa/metrics/set/DefaultMetrics.java
+++ b/metrics/src/main/java/ai/vespa/metrics/set/DefaultMetrics.java
@@ -1,21 +1,19 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// TODO: Keep the set of metrics in this set stable until Vespa 9.
+// TODO: Vespa 9: Let this class be replaced by Vespa9DefaultMetricSet.
package ai.vespa.metrics.set;
+import ai.vespa.metrics.ClusterControllerMetrics;
import ai.vespa.metrics.ContainerMetrics;
-import ai.vespa.metrics.SearchNodeMetrics;
-import ai.vespa.metrics.StorageMetrics;
import ai.vespa.metrics.DistributorMetrics;
-import ai.vespa.metrics.ClusterControllerMetrics;
-import ai.vespa.metrics.SentinelMetrics;
import ai.vespa.metrics.NodeAdminMetrics;
-import ai.vespa.metrics.Suffix;
-import ai.vespa.metrics.VespaMetrics;
+import ai.vespa.metrics.SearchNodeMetrics;
+import ai.vespa.metrics.SentinelMetrics;
+import ai.vespa.metrics.StorageMetrics;
-import java.util.Collections;
import java.util.EnumSet;
-import java.util.LinkedHashSet;
-import java.util.Set;
+import java.util.List;
import static ai.vespa.metrics.Suffix.average;
import static ai.vespa.metrics.Suffix.count;
@@ -41,135 +39,141 @@ public class DefaultMetrics {
private static MetricSet createMetricSet() {
return new MetricSet(defaultMetricSetId,
- getAllMetrics(),
- Set.of(defaultVespaMetricSet));
+ List.of(),
+ List.of(defaultVespaMetricSet,
+ getContainerMetrics(),
+ getSearchChainMetrics(),
+ getDocprocMetrics(),
+ getSearchNodeMetrics(),
+ getContentMetrics(),
+ getStorageMetrics(),
+ getDistributorMetrics(),
+ getClusterControllerMetrics(),
+ getSentinelMetrics(),
+ getOtherMetrics()));
}
- private static Set<Metric> getAllMetrics() {
- Set<Metric> metrics = new LinkedHashSet<>();
-
- addContainerMetrics(metrics);
- addSearchChainMetrics(metrics);
- addDocprocMetrics(metrics);
- addSearchNodeMetrics(metrics);
- addContentMetrics(metrics);
- addStorageMetrics(metrics);
- addDistributorMetrics(metrics);
- addClusterControllerMetrics(metrics);
- addOtherMetrics(metrics);
- return Collections.unmodifiableSet(metrics);
+ private static MetricSet getContainerMetrics() {
+ return new MetricSet.Builder("default-container")
+ .metric(ContainerMetrics.HTTP_STATUS_1XX.rate())
+ .metric(ContainerMetrics.HTTP_STATUS_2XX.rate())
+ .metric(ContainerMetrics.HTTP_STATUS_3XX.rate())
+ .metric(ContainerMetrics.HTTP_STATUS_4XX.rate())
+ .metric(ContainerMetrics.HTTP_STATUS_5XX.rate())
+ .metric(ContainerMetrics.JDISC_GC_MS, EnumSet.of(max, average))
+ .metric(ContainerMetrics.MEM_HEAP_FREE.average())
+ .metric(ContainerMetrics.FEED_LATENCY, EnumSet.of(sum, count))
+ // .metric(ContainerMetrics.CPU.baseName()) // TODO: Add to container metrics
+ .metric(ContainerMetrics.JDISC_THREAD_POOL_SIZE.max())
+ .metric(ContainerMetrics.JDISC_THREAD_POOL_ACTIVE_THREADS, EnumSet.of(sum, count, min, max))
+ .metric(ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_CAPACITY.max())
+ .metric(ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_SIZE, EnumSet.of(sum, count, min, max))
+ .metric(ContainerMetrics.SERVER_ACTIVE_THREADS.average())
+
+ // Metrics needed for alerting
+ .metric(ContainerMetrics.JDISC_SINGLETON_IS_ACTIVE, EnumSet.of(max, last)) // TODO: Vespa 9: Remove last
+ .metric(ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_MISSING_CLIENT_CERT.rate())
+ .metric(ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_PROTOCOLS.rate())
+ .metric(ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_CHIFERS.rate())
+ .metric(ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_UNKNOWN.rate())
+ .metric(ContainerMetrics.JDISC_APPLICATION_FAILED_COMPONENT_GRAPHS.rate())
+ .metric(ContainerMetrics.ATHENZ_TENANT_CERT_EXPIRY_SECONDS, EnumSet.of(min, max, last)) // TODO: Vespa 9: Remove max, last
+ .build();
}
- private static void addContainerMetrics(Set<Metric> metrics) {
- addMetric(metrics, ContainerMetrics.HTTP_STATUS_1XX.rate());
- addMetric(metrics, ContainerMetrics.HTTP_STATUS_2XX.rate());
- addMetric(metrics, ContainerMetrics.HTTP_STATUS_3XX.rate());
- addMetric(metrics, ContainerMetrics.HTTP_STATUS_4XX.rate());
- addMetric(metrics, ContainerMetrics.HTTP_STATUS_5XX.rate());
- addMetric(metrics, ContainerMetrics.JDISC_GC_MS.average());
- addMetric(metrics, ContainerMetrics.MEM_HEAP_FREE.average());
- addMetric(metrics, ContainerMetrics.FEED_LATENCY, EnumSet.of(sum, count));
- addMetric(metrics, ContainerMetrics.JDISC_GC_MS.max());
- // addMetric(metrics, ContainerMetrics.CPU.baseName()); // TODO: Add to container metrics
- addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_SIZE.max());
- addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_ACTIVE_THREADS, EnumSet.of(sum, count, min, max));
- addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_CAPACITY.max());
- addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_SIZE, EnumSet.of(sum, count, min, max));
- addMetric(metrics, ContainerMetrics.SERVER_ACTIVE_THREADS.average());
-
- // Metrics needed for alerting
- addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_IS_ACTIVE.last());
- addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_MISSING_CLIENT_CERT.rate());
- addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_PROTOCOLS.rate());
- addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_CHIFERS.rate());
- addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_UNKNOWN.rate());
- addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_FAILED_COMPONENT_GRAPHS.rate());
- addMetric(metrics, ContainerMetrics.ATHENZ_TENANT_CERT_EXPIRY_SECONDS.last());
- }
-
- private static void addSearchChainMetrics(Set<Metric> metrics) {
- addMetric(metrics, ContainerMetrics.QUERIES.rate());
- addMetric(metrics, ContainerMetrics.QUERY_LATENCY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile, average)); // TODO: Remove average with Vespa 9
- addMetric(metrics, ContainerMetrics.HITS_PER_QUERY, EnumSet.of(sum, count, max, average)); // TODO: Remove average with Vespa 9
- addMetric(metrics, ContainerMetrics.TOTAL_HITS_PER_QUERY, EnumSet.of(sum, count, max, average)); // TODO: Remove average with Vespa 9
- addMetric(metrics, ContainerMetrics.DEGRADED_QUERIES.rate());
- addMetric(metrics, ContainerMetrics.FAILED_QUERIES.rate());
+ private static MetricSet getSearchChainMetrics() {
+ return new MetricSet.Builder("default-search-chain")
+ .metric(ContainerMetrics.QUERIES.rate())
+ .metric(ContainerMetrics.QUERY_LATENCY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile, average)) // TODO: Remove average with Vespa 9
+ .metric(ContainerMetrics.HITS_PER_QUERY, EnumSet.of(sum, count, max, average)) // TODO: Remove average with Vespa 9
+ .metric(ContainerMetrics.TOTAL_HITS_PER_QUERY, EnumSet.of(sum, count, max, average)) // TODO: Remove average with Vespa 9
+ .metric(ContainerMetrics.DEGRADED_QUERIES.rate())
+ .metric(ContainerMetrics.FAILED_QUERIES.rate())
+ .build();
}
- private static void addDocprocMetrics(Set<Metric> metrics) {
- addMetric(metrics, ContainerMetrics.DOCPROC_DOCUMENTS.sum());
+ private static MetricSet getDocprocMetrics() {
+ return new MetricSet.Builder("default-docproc")
+ .metric(ContainerMetrics.DOCPROC_DOCUMENTS.sum())
+ .build();
}
- private static void addSearchNodeMetrics(Set<Metric> metrics) {
+ private static MetricSet getSearchNodeMetrics() {
// Metrics needed for alerting
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK.average());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY.average());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_FEEDING_BLOCKED, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ return new MetricSet.Builder("default-search-node")
+ .metric(SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK.average())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY.average())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_FEEDING_BLOCKED, EnumSet.of(max, last)) // TODO: Vespa 9: Remove last
+ .build();
}
- private static void addContentMetrics(Set<Metric> metrics) {
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUESTED_DOCUMENTS.rate());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_LATENCY, EnumSet.of(sum, count, max, average)); // TODO: Remove average with Vespa 9
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_LATENCY, EnumSet.of(sum, count, max, average)); // TODO: Remove average with Vespa 9
-
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_TOTAL.last());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_READY.last());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_ACTIVE.last());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DISK_USAGE.last());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MEMORY_USAGE_ALLOCATED_BYTES.last());
-
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK.average());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY.average());
-
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_DOCS_MATCHED.rate());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_DOCS_RERANKED.rate());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_SETUP_TIME, EnumSet.of(sum, count, max, average)); // TODO: Remove average with Vespa 9
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_LATENCY, EnumSet.of(sum, count, max, average)); // TODO: Remove average with Vespa 9
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_RERANK_TIME, EnumSet.of(sum, count, max, average)); // TODO: Remove average with Vespa 9
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_DISK_USAGE.last());
+ private static MetricSet getContentMetrics() {
+ return new MetricSet.Builder("default-content")
+ .metric(SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUESTED_DOCUMENTS.rate())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_LATENCY, EnumSet.of(sum, count, max, average)) // TODO: Remove average with Vespa 9
+ .metric(SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_LATENCY, EnumSet.of(sum, count, max, average)) // TODO: Remove average with Vespa 9
+
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_TOTAL, EnumSet.of(max,last)) // TODO: Vespa 9: Remove last
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_READY, EnumSet.of(max,last)) // TODO: Vespa 9: Remove last
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_ACTIVE, EnumSet.of(max,last)) // TODO: Vespa 9: Remove last
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DISK_USAGE.last())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MEMORY_USAGE_ALLOCATED_BYTES.last())
+
+ .metric(SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK.average())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY.average())
+
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_DOCS_MATCHED.rate())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_DOCS_RERANKED.rate())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_SETUP_TIME, EnumSet.of(sum, count, max, average)) // TODO: Remove average with Vespa 9
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_LATENCY, EnumSet.of(sum, count, max, average)) // TODO: Remove average with Vespa 9
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_RERANK_TIME, EnumSet.of(sum, count, max, average)) // TODO: Remove average with Vespa 9
+ .metric(SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_DISK_USAGE.last())
+ .build();
}
- private static void addStorageMetrics(Set<Metric> metrics) {
- addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_COUNT.rate());
- addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_COUNT.rate());
- addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_COUNT.rate());
+ private static MetricSet getStorageMetrics() {
+ return new MetricSet.Builder("default-storage")
+ .metric(StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_COUNT.rate())
+ .metric(StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_COUNT.rate())
+ .metric(StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_COUNT.rate())
+ .build();
}
- private static void addDistributorMetrics(Set<Metric> metrics) {
- addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_DOCSSTORED.average());
+ private static MetricSet getDistributorMetrics() {
+ return new MetricSet.Builder("default-distributor")
+ .metric(DistributorMetrics.VDS_DISTRIBUTOR_DOCSSTORED.average())
- // Metrics needed for alerting
- addMetric(metrics, DistributorMetrics.VDS_BOUNCER_CLOCK_SKEW_ABORTS.count());
+ // Metrics needed for alerting
+ .metric(DistributorMetrics.VDS_BOUNCER_CLOCK_SKEW_ABORTS.count())
+ .build();
}
- private static void addClusterControllerMetrics(Set<Metric> metrics) {
+ private static MetricSet getClusterControllerMetrics() {
// Metrics needed for alerting
- addMetric(metrics, ClusterControllerMetrics.DOWN_COUNT.last());
- addMetric(metrics, ClusterControllerMetrics.MAINTENANCE_COUNT.last());
- addMetric(metrics, ClusterControllerMetrics.UP_COUNT.last());
- addMetric(metrics, ClusterControllerMetrics.IS_MASTER.last());
- addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_NODES_ABOVE_LIMIT, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
- addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_MEMORY_UTILIZATION, EnumSet.of(last, max)); // TODO: Vespa 9: Remove last
- addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_DISK_UTILIZATION, EnumSet.of(last, max)); // TODO: Vespa 9: Remove last
+ return new MetricSet.Builder("default-cluster-controller")
+ .metric(ClusterControllerMetrics.DOWN_COUNT, EnumSet.of(max, last)) // TODO: Vespa 9: Remove last
+ .metric(ClusterControllerMetrics.MAINTENANCE_COUNT, EnumSet.of(max, last)) // TODO: Vespa 9: Remove last
+ .metric(ClusterControllerMetrics.UP_COUNT, EnumSet.of(max, last)) // TODO: Vespa 9: Remove last
+ .metric(ClusterControllerMetrics.IS_MASTER, EnumSet.of(max, last)) // TODO: Vespa 9: Remove last
+ .metric(ClusterControllerMetrics.RESOURCE_USAGE_NODES_ABOVE_LIMIT, EnumSet.of(max, last)) // TODO: Vespa 9: Remove last
+ .metric(ClusterControllerMetrics.RESOURCE_USAGE_MAX_MEMORY_UTILIZATION, EnumSet.of(last, max)) // TODO: Vespa 9: Remove last
+ .metric(ClusterControllerMetrics.RESOURCE_USAGE_MAX_DISK_UTILIZATION, EnumSet.of(last, max)) // TODO: Vespa 9: Remove last
+ .build();
}
- private static void addSentinelMetrics(Set<Metric> metrics) {
+ private static MetricSet getSentinelMetrics() {
// Metrics needed for alerting
- addMetric(metrics, SentinelMetrics.SENTINEL_TOTAL_RESTARTS.last());
+ return new MetricSet.Builder("default-sentinel")
+ .metric(SentinelMetrics.SENTINEL_TOTAL_RESTARTS, EnumSet.of(max, sum, last)) // TODO: Vespa 9: Remove last, sum?
+ .build();
}
- private static void addOtherMetrics(Set<Metric> metrics) {
+ private static MetricSet getOtherMetrics() {
// Metrics needed for alerting
- addMetric(metrics, NodeAdminMetrics.ENDPOINT_CERTIFICATE_EXPIRY_SECONDS.baseName());
- addMetric(metrics, NodeAdminMetrics.NODE_CERTIFICATE_EXPIRY_SECONDS.baseName());
- }
-
- private static void addMetric(Set<Metric> metrics, String nameWithSuffix) {
- metrics.add(new Metric(nameWithSuffix));
- }
-
- private static void addMetric(Set<Metric> metrics, VespaMetrics metric, EnumSet<Suffix> suffixes) {
- suffixes.forEach(suffix -> metrics.add(new Metric(metric.baseName() + "." + suffix.suffix())));
+ return new MetricSet.Builder("default-other")
+ .metric(NodeAdminMetrics.ENDPOINT_CERTIFICATE_EXPIRY_SECONDS.baseName())
+ .metric(NodeAdminMetrics.NODE_CERTIFICATE_EXPIRY_SECONDS.baseName())
+ .build();
}
private DefaultMetrics() { }
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/DefaultVespaMetrics.java b/metrics/src/main/java/ai/vespa/metrics/set/DefaultVespaMetrics.java
index 93b6bfab002..e34c8ee68eb 100644
--- a/metrics/src/main/java/ai/vespa/metrics/set/DefaultVespaMetrics.java
+++ b/metrics/src/main/java/ai/vespa/metrics/set/DefaultVespaMetrics.java
@@ -4,9 +4,6 @@ package ai.vespa.metrics.set;
import ai.vespa.metrics.ContainerMetrics;
import ai.vespa.metrics.SearchNodeMetrics;
-import java.util.LinkedHashSet;
-import java.util.Set;
-
/**
* Encapsulates a minimal set of Vespa metrics to be used as default for all metrics consumers.
*
@@ -19,11 +16,11 @@ public class DefaultVespaMetrics {
public static final MetricSet defaultVespaMetricSet = createDefaultVespaMetricSet();
private static MetricSet createDefaultVespaMetricSet() {
- Set<Metric> metrics = new LinkedHashSet<>();
-
- metrics.add(new Metric(ContainerMetrics.FEED_OPERATIONS.rate()));
- metrics.add(new Metric(SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_FEEDING_BLOCKED.last()));
- return new MetricSet("default-vespa", metrics);
+ return new MetricSet.Builder("default-vespa")
+ .metric(ContainerMetrics.FEED_OPERATIONS.rate())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_FEEDING_BLOCKED.last())
+ .build();
}
+
}
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/InfrastructureMetricSet.java b/metrics/src/main/java/ai/vespa/metrics/set/InfrastructureMetricSet.java
index 571d292b54d..b3f27fa6117 100644
--- a/metrics/src/main/java/ai/vespa/metrics/set/InfrastructureMetricSet.java
+++ b/metrics/src/main/java/ai/vespa/metrics/set/InfrastructureMetricSet.java
@@ -17,6 +17,7 @@ import static ai.vespa.metrics.Suffix.average;
import static ai.vespa.metrics.Suffix.count;
import static ai.vespa.metrics.Suffix.last;
import static ai.vespa.metrics.Suffix.max;
+import static ai.vespa.metrics.Suffix.min;
import static ai.vespa.metrics.Suffix.sum;
/**
@@ -51,72 +52,73 @@ public class InfrastructureMetricSet {
addMetric(metrics, ConfigServerMetrics.DELAYED_RESPONSES.count());
addMetric(metrics, ConfigServerMetrics.SESSION_CHANGE_ERRORS.count());
- addMetric(metrics, ConfigServerMetrics.ZK_Z_NODES, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last.
- addMetric(metrics, ConfigServerMetrics.ZK_AVG_LATENCY, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last.
- addMetric(metrics, ConfigServerMetrics.ZK_MAX_LATENCY, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last.
- addMetric(metrics, ConfigServerMetrics.ZK_CONNECTIONS, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last.
+ addMetric(metrics, ConfigServerMetrics.ZK_Z_NODES.max());
+ addMetric(metrics, ConfigServerMetrics.ZK_MAX_LATENCY, EnumSet.of(max, average));
+ addMetric(metrics, ConfigServerMetrics.ZK_CONNECTIONS.max());
addMetric(metrics, ConfigServerMetrics.ZK_CONNECTION_LOST.count());
addMetric(metrics, ConfigServerMetrics.ZK_RECONNECTED.count());
addMetric(metrics, ConfigServerMetrics.ZK_SUSPENDED.count());
- addMetric(metrics, ConfigServerMetrics.ZK_OUTSTANDING_REQUESTS, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last.
+ addMetric(metrics, ConfigServerMetrics.ZK_OUTSTANDING_REQUESTS.max());
// Node repository metrics
- addMetric(metrics, ConfigServerMetrics.NODES_NON_ACTIVE_FRACTION.last());
- addMetric(metrics, ConfigServerMetrics.CLUSTER_COST.last());
- addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_CPU.last());
- addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_MEMORY.last());
- addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.last());
+ addMetric(metrics, ConfigServerMetrics.NODES_ACTIVE.max());
+ addMetric(metrics, ConfigServerMetrics.NODES_NON_ACTIVE_FRACTION.max());
+ addMetric(metrics, ConfigServerMetrics.CLUSTER_COST.max());
+ addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_CPU.max());
+ addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_MEMORY.max());
+ addMetric(metrics, ConfigServerMetrics.CLUSTER_LOAD_IDEAL_DISK.max());
addMetric(metrics, ConfigServerMetrics.WANT_TO_REBOOT.max());
addMetric(metrics, ConfigServerMetrics.WANT_TO_RESTART.max());
addMetric(metrics, ConfigServerMetrics.WANT_TO_RETIRE.max());
addMetric(metrics, ConfigServerMetrics.RETIRED.max());
addMetric(metrics, ConfigServerMetrics.WANT_TO_CHANGE_VESPA_VERSION.max());
- addMetric(metrics, ConfigServerMetrics.HAS_WIRE_GUARD_KEY.last());
+ addMetric(metrics, ConfigServerMetrics.HAS_WIRE_GUARD_KEY.max());
addMetric(metrics, ConfigServerMetrics.WANT_TO_DEPROVISION.max());
- addMetric(metrics, ConfigServerMetrics.SUSPENDED, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, ConfigServerMetrics.SUSPENDED.max());
addMetric(metrics, ConfigServerMetrics.SOME_SERVICES_DOWN.max());
- addMetric(metrics, ConfigServerMetrics.NODE_FAILER_BAD_NODE.last());
+ addMetric(metrics, ConfigServerMetrics.NODE_FAILER_BAD_NODE.max());
addMetric(metrics, ConfigServerMetrics.LOCK_ATTEMPT_LOCKED_LOAD, EnumSet.of(max,average));
- addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_CPU, EnumSet.of(average, last)); // TODO: Vespa 9: Remove last?
- addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_MEM, EnumSet.of(average, last)); // TODO: Vespa 9: Remove last?
- addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_DISK, EnumSet.of(average, last)); // TODO: Vespa 9: Remove last?
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_CPU.max());
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_MEM.max());
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_ALLOCATED_CAPACITY_DISK.max());
addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_FREE_CAPACITY_CPU.max());
addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_FREE_CAPACITY_MEM.max());
addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_FREE_CAPACITY_DISK.max());
- addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_CPU, EnumSet.of(max,average));
- addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_DISK, EnumSet.of(max,average));
- addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_MEM, EnumSet.of(max,average));
- addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_SKEW.last());
- addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_PENDING_REDEPLOYMENTS.last());
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_CPU.max());
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_DISK.max());
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_TOTAL_CAPACITY_MEM.max());
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DOCKER_SKEW.max());
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_PENDING_REDEPLOYMENTS.max());
addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_ACTIVE_HOSTS.max());
addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_DIRTY_HOSTS.max());
addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_FAILED_HOSTS.max());
addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_INACTIVE_HOSTS.max());
- addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_PROVISIONED_HOSTS.last());
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_PROVISIONED_HOSTS.max());
addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_READY_HOSTS.max());
addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_RESERVED_HOSTS.max());
- addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_PARKED_HOSTS, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_PARKED_HOSTS.max());
addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_ACTIVE_NODES.max());
addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_FAILED_NODES.max());
- addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_PARKED_NODES, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, ConfigServerMetrics.HOSTED_VESPA_PARKED_NODES.max());
addMetric(metrics, ConfigServerMetrics.RPC_SERVER_WORK_QUEUE_SIZE.average());
- addMetric(metrics, ConfigServerMetrics.DEPLOYMENT_ACTIVATE_MILLIS.last());
- addMetric(metrics, ConfigServerMetrics.DEPLOYMENT_PREPARE_MILLIS.last());
+ addMetric(metrics, ConfigServerMetrics.DEPLOYMENT_ACTIVATE_MILLIS.max());
+ addMetric(metrics, ConfigServerMetrics.DEPLOYMENT_PREPARE_MILLIS.max());
addMetric(metrics, ConfigServerMetrics.LOCK_ATTEMPT_LOCKED_LOAD, EnumSet.of(max, average));
- addMetric(metrics, ConfigServerMetrics.MAINTENANCE_SUCCESS_FACTOR_DEVIATION.last());
+ addMetric(metrics, ConfigServerMetrics.MAINTENANCE_SUCCESS_FACTOR_DEVIATION.max());
addMetric(metrics, ConfigServerMetrics.MAINTENANCE_DEPLOYMENT_FAILURE.count());
addMetric(metrics, ConfigServerMetrics.MAINTENANCE_DEPLOYMENT_TRANSIENT_FAILURE.count());
addMetric(metrics, ConfigServerMetrics.OVERCOMMITTED_HOSTS.max());
- addMetric(metrics, ConfigServerMetrics.SPARE_HOST_CAPACITY.last());
- addMetric(metrics, ConfigServerMetrics.THROTTLED_NODE_FAILURES, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
- addMetric(metrics, ConfigServerMetrics.NODE_FAIL_THROTTLING.last());
+ addMetric(metrics, ConfigServerMetrics.SPARE_HOST_CAPACITY, EnumSet.of(min, max, last)); // TODO: Vespa 9: Remove last. WAIT
+ addMetric(metrics, ConfigServerMetrics.THROTTLED_HOST_FAILURES.max());
+ addMetric(metrics, ConfigServerMetrics.THROTTLED_NODE_FAILURES.max());
+ addMetric(metrics, ConfigServerMetrics.NODE_FAIL_THROTTLING.max());
addMetric(metrics, ConfigServerMetrics.ORCHESTRATOR_LOCK_ACQUIRE_SUCCESS.count());
addMetric(metrics, ConfigServerMetrics.ORCHESTRATOR_LOCK_ACQUIRE_TIMEOUT.count());
- addMetric(metrics, ConfigServerMetrics.ZONE_WORKING.last());
+ addMetric(metrics, ConfigServerMetrics.ZONE_WORKING.max());
addMetric(metrics, ConfigServerMetrics.THROTTLED_HOST_PROVISIONING.max());
// Container metrics that should be stored for the config-server
@@ -125,11 +127,11 @@ public class InfrastructureMetricSet {
addMetric(metrics, ContainerMetrics.HTTP_STATUS_2XX.count());
addMetric(metrics, ContainerMetrics.HTTP_STATUS_4XX.count());
addMetric(metrics, ContainerMetrics.HTTP_STATUS_5XX.count());
- addMetric(metrics, ContainerMetrics.JDISC_GC_MS.last());
+ addMetric(metrics, ContainerMetrics.JDISC_GC_MS.max());
addMetric(metrics, ContainerMetrics.MEM_HEAP_USED.average());
addMetric(metrics, ContainerMetrics.SERVER_NUM_REQUESTS.count());
- addMetric(metrics, ContainerMetrics.SERVER_STARTED_MILLIS.last());
- addMetric(metrics, ContainerMetrics.SERVER_TOTAL_SUCCESSFUL_RESPONSE_LATENCY.last());
+ addMetric(metrics, ContainerMetrics.SERVER_STARTED_MILLIS.max());
+ addMetric(metrics, ContainerMetrics.SERVER_TOTAL_SUCCESSFUL_RESPONSE_LATENCY.max());
return metrics;
}
@@ -138,40 +140,42 @@ public class InfrastructureMetricSet {
Set<Metric> metrics = new LinkedHashSet<>();
addMetric(metrics, ControllerMetrics.ATHENZ_REQUEST_ERROR.count());
- addMetric(metrics, ControllerMetrics.ARCHIVE_BUCKET_COUNT.last());
- addMetric(metrics, ControllerMetrics.BILLING_TENANTS.last());
+ addMetric(metrics, ControllerMetrics.ARCHIVE_BUCKET_COUNT.max());
+ addMetric(metrics, ControllerMetrics.BILLING_TENANTS.max());
addMetric(metrics, ControllerMetrics.DEPLOYMENT_ABORT.count());
- addMetric(metrics, ControllerMetrics.DEPLOYMENT_AVERAGE_DURATION, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last.
+ addMetric(metrics, ControllerMetrics.DEPLOYMENT_AVERAGE_DURATION.max());
addMetric(metrics, ControllerMetrics.DEPLOYMENT_CONVERGENCE_FAILURE.count());
addMetric(metrics, ControllerMetrics.DEPLOYMENT_DEPLOYMENT_FAILURE.count());
addMetric(metrics, ControllerMetrics.DEPLOYMENT_ERROR.count());
- addMetric(metrics, ControllerMetrics.DEPLOYMENT_FAILING_UPGRADES.last());
- addMetric(metrics, ControllerMetrics.DEPLOYMENT_FAILURE_PERCENTAGE.last());
+ addMetric(metrics, ControllerMetrics.DEPLOYMENT_FAILING_UPGRADES.min());
+ addMetric(metrics, ControllerMetrics.DEPLOYMENT_FAILURE_PERCENTAGE.max());
addMetric(metrics, ControllerMetrics.DEPLOYMENT_NODE_COUNT_BY_OS_VERSION.max());
addMetric(metrics, ControllerMetrics.DEPLOYMENT_OS_CHANGE_DURATION.max());
addMetric(metrics, ControllerMetrics.DEPLOYMENT_START.count());
addMetric(metrics, ControllerMetrics.DEPLOYMENT_SUCCESS.count());
addMetric(metrics, ControllerMetrics.DEPLOYMENT_TEST_FAILURE.count());
- addMetric(metrics, ControllerMetrics.DEPLOYMENT_WARNINGS.last());
+ addMetric(metrics, ControllerMetrics.DEPLOYMENT_WARNINGS.max());
addMetric(metrics, ControllerMetrics.DEPLOYMENT_ENDPOINT_CERTIFICATE_TIMEOUT.count());
- addMetric(metrics, ControllerMetrics.DEPLOYMENT_BROKEN_SYSTEM_VERSION.last());
-
- addMetric(metrics, ControllerMetrics.OPERATION_APPLICATION.last());
- addMetric(metrics, ControllerMetrics.OPERATION_CHANGEMANAGEMENT.last());
- addMetric(metrics, ControllerMetrics.OPERATION_CONFIGSERVER.last());
- addMetric(metrics, ControllerMetrics.OPERATION_CONTROLLER.last());
- addMetric(metrics, ControllerMetrics.OPERATION_FLAGS.last());
- addMetric(metrics, ControllerMetrics.OPERATION_OS.last());
- addMetric(metrics, ControllerMetrics.OPERATION_ROUTING.last());
- addMetric(metrics, ControllerMetrics.OPERATION_ZONE.last());
-
- addMetric(metrics, ControllerMetrics.REMAINING_ROTATIONS.last());
- addMetric(metrics, ControllerMetrics.DNS_QUEUED_REQUESTS.last());
- addMetric(metrics, ControllerMetrics.ZMS_QUOTA_USAGE.last());
+ addMetric(metrics, ControllerMetrics.DEPLOYMENT_BROKEN_SYSTEM_VERSION.max());
+
+ addMetric(metrics, ControllerMetrics.OPERATION_APPLICATION.max());
+ addMetric(metrics, ControllerMetrics.OPERATION_CHANGEMANAGEMENT.max());
+ addMetric(metrics, ControllerMetrics.OPERATION_CONFIGSERVER.max());
+ addMetric(metrics, ControllerMetrics.OPERATION_CONTROLLER.max());
+ addMetric(metrics, ControllerMetrics.OPERATION_FLAGS.max());
+ addMetric(metrics, ControllerMetrics.OPERATION_OS.max());
+ addMetric(metrics, ControllerMetrics.OPERATION_ROUTING.max());
+ addMetric(metrics, ControllerMetrics.OPERATION_ZONE.max());
+
+ addMetric(metrics, ControllerMetrics.REMAINING_ROTATIONS, EnumSet.of(min, max, last)); // TODO: Vespa 9: Remove last WAIT
+ addMetric(metrics, ControllerMetrics.DNS_QUEUED_REQUESTS.max());
+ addMetric(metrics, ControllerMetrics.ZMS_QUOTA_USAGE.max());
addMetric(metrics, ControllerMetrics.COREDUMP_PROCESSED.count());
+ addMetric(metrics, ControllerMetrics.AUTH0_EXCEPTIONS.count());
- addMetric(metrics, ControllerMetrics.METERING_AGE_SECONDS.last());
+ addMetric(metrics, ControllerMetrics.METERING_AGE_SECONDS.min());
+ addMetric(metrics, ControllerMetrics.METERING_LAST_REPORTED.max());
return metrics;
}
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/MetricSet.java b/metrics/src/main/java/ai/vespa/metrics/set/MetricSet.java
index b8409fb7663..f334690a7ca 100644
--- a/metrics/src/main/java/ai/vespa/metrics/set/MetricSet.java
+++ b/metrics/src/main/java/ai/vespa/metrics/set/MetricSet.java
@@ -1,7 +1,11 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.metrics.set;
+import ai.vespa.metrics.Suffix;
+import ai.vespa.metrics.VespaMetrics;
+
import java.util.Collection;
+import java.util.EnumSet;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.Map;
@@ -88,4 +92,44 @@ public class MetricSet {
return metricMap;
}
+
+ public static class Builder {
+ private final String id;
+ private final Set<Metric> metrics = new LinkedHashSet<>();
+ private final Set<MetricSet> children = new LinkedHashSet<>();
+
+ public Builder(String id) {
+ this.id = id;
+ }
+
+ public Builder metric(String metric) {
+ return metric(new Metric(metric));
+ }
+
+ /** Adds all given suffixes of the given metric to this set. */
+ public Builder metric(VespaMetrics metric, EnumSet<Suffix> suffixes) {
+ suffixes.forEach(suffix -> metrics.add(new Metric(metric.baseName() + "." + suffix.suffix())));
+ return this;
+ }
+
+ public Builder metric(Metric metric) {
+ metrics.add(metric);
+ return this;
+ }
+
+ public Builder metrics(Collection<Metric> metrics) {
+ this.metrics.addAll(metrics);
+ return this;
+ }
+
+ public Builder metricSet(MetricSet child) {
+ children.add(child);
+ return this;
+ }
+
+ public MetricSet build() {
+ return new MetricSet(id, metrics, children);
+ }
+ }
+
}
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/SystemMetrics.java b/metrics/src/main/java/ai/vespa/metrics/set/SystemMetrics.java
index 0560daebc43..a86deb3830b 100644
--- a/metrics/src/main/java/ai/vespa/metrics/set/SystemMetrics.java
+++ b/metrics/src/main/java/ai/vespa/metrics/set/SystemMetrics.java
@@ -30,6 +30,10 @@ public class SystemMetrics {
new Metric(HostedNodeAdminMetrics.MEM_UTIL.baseName()),
new Metric(HostedNodeAdminMetrics.MEM_TOTAL_USED.baseName()),
new Metric(HostedNodeAdminMetrics.MEM_TOTAL_UTIL.baseName()),
+ new Metric(HostedNodeAdminMetrics.MEM_SOCK.baseName()),
+ new Metric(HostedNodeAdminMetrics.MEM_SLAB_RECLAIMABLE.baseName()),
+ new Metric(HostedNodeAdminMetrics.MEM_SLAB.baseName()),
+ new Metric(HostedNodeAdminMetrics.MEM_ANON.baseName()),
new Metric(HostedNodeAdminMetrics.GPU_UTIL.baseName()),
new Metric(HostedNodeAdminMetrics.GPU_MEM_USED.baseName()),
new Metric(HostedNodeAdminMetrics.GPU_MEM_TOTAL.baseName())
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/Vespa9DefaultMetricSet.java b/metrics/src/main/java/ai/vespa/metrics/set/Vespa9DefaultMetricSet.java
new file mode 100644
index 00000000000..a87557981b7
--- /dev/null
+++ b/metrics/src/main/java/ai/vespa/metrics/set/Vespa9DefaultMetricSet.java
@@ -0,0 +1,175 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+// TODO: This class to be used for managed Vespa.
+// TODO: Vespa 9: Let this class replace DefaultMetrics.
+package ai.vespa.metrics.set;
+
+import ai.vespa.metrics.ClusterControllerMetrics;
+import ai.vespa.metrics.ContainerMetrics;
+import ai.vespa.metrics.DistributorMetrics;
+import ai.vespa.metrics.NodeAdminMetrics;
+import ai.vespa.metrics.SearchNodeMetrics;
+import ai.vespa.metrics.SentinelMetrics;
+import ai.vespa.metrics.StorageMetrics;
+
+import java.util.EnumSet;
+import java.util.List;
+
+import static ai.vespa.metrics.Suffix.average;
+import static ai.vespa.metrics.Suffix.count;
+import static ai.vespa.metrics.Suffix.last;
+import static ai.vespa.metrics.Suffix.max;
+import static ai.vespa.metrics.Suffix.min;
+import static ai.vespa.metrics.Suffix.ninety_five_percentile;
+import static ai.vespa.metrics.Suffix.ninety_nine_percentile;
+import static ai.vespa.metrics.Suffix.sum;
+import static ai.vespa.metrics.set.DefaultVespaMetrics.defaultVespaMetricSet;
+
+/**
+ * Metrics for the 'default' consumer, which is used by default for the generic metrics api and
+ * other user facing apis, e.g. 'prometheus/'.
+ *
+ * @author gjoranv
+ * @author yngve
+ */
+public class Vespa9DefaultMetricSet {
+
+ public static final String defaultMetricSetId = "vespa9default";
+
+ public static final MetricSet vespa9defaultMetricSet = createMetricSet();
+
+ private static MetricSet createMetricSet() {
+ return new MetricSet(defaultMetricSetId,
+ List.of(),
+ List.of(defaultVespaMetricSet,
+ BasicMetricSets.containerHttpStatusMetrics(),
+ getContainerMetrics(),
+ getSearchChainMetrics(),
+ getDocprocMetrics(),
+ getSearchNodeMetrics(),
+ getContentMetrics(),
+ getStorageMetrics(),
+ getDistributorMetrics(),
+ getClusterControllerMetrics(),
+ getSentinelMetrics(),
+ getOtherMetrics()));
+ }
+
+ private static MetricSet getContainerMetrics() {
+ return new MetricSet.Builder("default-container")
+ .metric(ContainerMetrics.JDISC_GC_MS, EnumSet.of(max, average))
+ .metric(ContainerMetrics.MEM_HEAP_FREE.average())
+ .metric(ContainerMetrics.FEED_LATENCY, EnumSet.of(sum, count))
+ .metric(ContainerMetrics.CPU.baseName())
+ .metric(ContainerMetrics.JDISC_THREAD_POOL_SIZE.max())
+ .metric(ContainerMetrics.JDISC_THREAD_POOL_ACTIVE_THREADS, EnumSet.of(sum, count, min, max))
+ .metric(ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_CAPACITY.max())
+ .metric(ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_SIZE, EnumSet.of(sum, count, min, max))
+ .metric(ContainerMetrics.SERVER_ACTIVE_THREADS.average())
+
+ // Metrics needed for alerting
+ .metric(ContainerMetrics.JDISC_SINGLETON_IS_ACTIVE.max())
+ .metric(ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_MISSING_CLIENT_CERT.rate())
+ .metric(ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_PROTOCOLS.rate())
+ .metric(ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_CHIFERS.rate())
+ .metric(ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_UNKNOWN.rate())
+ .metric(ContainerMetrics.JDISC_APPLICATION_FAILED_COMPONENT_GRAPHS.rate())
+ .metric(ContainerMetrics.ATHENZ_TENANT_CERT_EXPIRY_SECONDS, EnumSet.of(min, max, last)) // TODO: Vespa 9: Remove max, last
+ .build();
+ }
+
+ private static MetricSet getSearchChainMetrics() {
+ return new MetricSet.Builder("default-search-chain")
+ .metric(ContainerMetrics.QUERIES.rate())
+ .metric(ContainerMetrics.QUERY_LATENCY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile))
+ .metric(ContainerMetrics.HITS_PER_QUERY, EnumSet.of(sum, count, max))
+ .metric(ContainerMetrics.TOTAL_HITS_PER_QUERY, EnumSet.of(sum, count, max))
+ .metric(ContainerMetrics.DEGRADED_QUERIES.rate())
+ .metric(ContainerMetrics.FAILED_QUERIES.rate())
+ .build();
+ }
+
+ private static MetricSet getDocprocMetrics() {
+ return new MetricSet.Builder("default-docproc")
+ .metric(ContainerMetrics.DOCPROC_DOCUMENTS.sum())
+ .build();
+ }
+
+ private static MetricSet getSearchNodeMetrics() {
+ // Metrics needed for alerting
+ return new MetricSet.Builder("default-search-node")
+ .metric(SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK.average())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY.average())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_FEEDING_BLOCKED.max())
+ .build();
+ }
+
+ private static MetricSet getContentMetrics() {
+ return new MetricSet.Builder("default-content")
+ .metric(SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUESTED_DOCUMENTS.rate())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_LATENCY, EnumSet.of(sum, count, max))
+ .metric(SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_LATENCY, EnumSet.of(sum, count, max))
+
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_TOTAL.max())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_READY.max())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_ACTIVE.max())
+
+ .metric(SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK.average())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY.average())
+
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_DOCS_MATCHED.rate())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_DOCS_RERANKED.rate())
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_SETUP_TIME, EnumSet.of(sum, count, max))
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_LATENCY, EnumSet.of(sum, count, max))
+ .metric(SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_RERANK_TIME, EnumSet.of(sum, count, max))
+ .build();
+ }
+
+ private static MetricSet getStorageMetrics() {
+ return new MetricSet.Builder("default-storage")
+ .metric(StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_COUNT.rate())
+ .metric(StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_COUNT.rate())
+ .metric(StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_COUNT.rate())
+ .build();
+ }
+
+ private static MetricSet getDistributorMetrics() {
+ return new MetricSet.Builder("default-distributor")
+ .metric(DistributorMetrics.VDS_DISTRIBUTOR_DOCSSTORED.average())
+
+ // Metrics needed for alerting
+ .metric(DistributorMetrics.VDS_BOUNCER_CLOCK_SKEW_ABORTS.count())
+ .build();
+ }
+
+ private static MetricSet getClusterControllerMetrics() {
+ // Metrics needed for alerting
+ return new MetricSet.Builder("default-cluster-controller")
+ .metric(ClusterControllerMetrics.DOWN_COUNT.max())
+ .metric(ClusterControllerMetrics.MAINTENANCE_COUNT.max())
+ .metric(ClusterControllerMetrics.UP_COUNT, EnumSet.of(max, last)) // TODO: Remove last
+ .metric(ClusterControllerMetrics.IS_MASTER.max())
+ .metric(ClusterControllerMetrics.RESOURCE_USAGE_NODES_ABOVE_LIMIT.max())
+ .metric(ClusterControllerMetrics.RESOURCE_USAGE_MAX_MEMORY_UTILIZATION.max())
+ .metric(ClusterControllerMetrics.RESOURCE_USAGE_MAX_DISK_UTILIZATION.max())
+ .build();
+ }
+
+ private static MetricSet getSentinelMetrics() {
+ // Metrics needed for alerting
+ return new MetricSet.Builder("default-sentinel")
+ .metric(SentinelMetrics.SENTINEL_TOTAL_RESTARTS.max())
+ .build();
+ }
+
+ private static MetricSet getOtherMetrics() {
+ // Metrics needed for alerting
+ return new MetricSet.Builder("default-other")
+ .metric(NodeAdminMetrics.ENDPOINT_CERTIFICATE_EXPIRY_SECONDS.baseName())
+ .metric(NodeAdminMetrics.NODE_CERTIFICATE_EXPIRY_SECONDS.baseName())
+ .build();
+ }
+
+ private Vespa9DefaultMetricSet() { }
+
+}
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java b/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java
new file mode 100644
index 00000000000..2744f6e4ae0
--- /dev/null
+++ b/metrics/src/main/java/ai/vespa/metrics/set/Vespa9VespaMetricSet.java
@@ -0,0 +1,640 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+// TODO: This class to be used for managed Vespa.
+// TODO: Vespa 9: Let this class replace VespaMetricSet.
+package ai.vespa.metrics.set;
+
+import ai.vespa.metrics.ClusterControllerMetrics;
+import ai.vespa.metrics.ContainerMetrics;
+import ai.vespa.metrics.DistributorMetrics;
+import ai.vespa.metrics.LogdMetrics;
+import ai.vespa.metrics.NodeAdminMetrics;
+import ai.vespa.metrics.RoutingLayerMetrics;
+import ai.vespa.metrics.SearchNodeMetrics;
+import ai.vespa.metrics.SentinelMetrics;
+import ai.vespa.metrics.SlobrokMetrics;
+import ai.vespa.metrics.StorageMetrics;
+import ai.vespa.metrics.Suffix;
+import ai.vespa.metrics.VespaMetrics;
+
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Set;
+
+import static ai.vespa.metrics.Suffix.average;
+import static ai.vespa.metrics.Suffix.count;
+import static ai.vespa.metrics.Suffix.last;
+import static ai.vespa.metrics.Suffix.max;
+import static ai.vespa.metrics.Suffix.min;
+import static ai.vespa.metrics.Suffix.ninety_five_percentile;
+import static ai.vespa.metrics.Suffix.ninety_nine_percentile;
+import static ai.vespa.metrics.Suffix.rate;
+import static ai.vespa.metrics.Suffix.sum;
+import static ai.vespa.metrics.set.DefaultVespaMetrics.defaultVespaMetricSet;
+
+/**
+ * Encapsulates vespa service metrics.
+ *
+ * @author gjoranv
+ * @author yngve
+ */
+public class Vespa9VespaMetricSet {
+
+ public static final MetricSet vespa9vespaMetricSet = createMetricSet();
+
+ private static MetricSet createMetricSet() {
+ return new MetricSet("vespa9vespa",
+ getVespaMetrics(),
+ List.of(defaultVespaMetricSet,
+ BasicMetricSets.containerHttpStatusMetrics()));
+ }
+
+ private static Set<Metric> getVespaMetrics() {
+ Set<Metric> metrics = new LinkedHashSet<>();
+
+ metrics.addAll(getSearchNodeMetrics());
+ metrics.addAll(getStorageMetrics());
+ metrics.addAll(getDistributorMetrics());
+ metrics.addAll(getDocprocMetrics());
+ metrics.addAll(getClusterControllerMetrics());
+ metrics.addAll(getSearchChainMetrics());
+ metrics.addAll(getContainerMetrics());
+ metrics.addAll(getSentinelMetrics());
+ metrics.addAll(getOtherMetrics());
+
+ return Collections.unmodifiableSet(metrics);
+ }
+
+ private static Set<Metric> getSentinelMetrics() {
+ Set<Metric> metrics = new LinkedHashSet<>();
+
+ addMetric(metrics, SentinelMetrics.SENTINEL_RESTARTS.count());
+ addMetric(metrics, SentinelMetrics.SENTINEL_TOTAL_RESTARTS.max());
+
+ return metrics;
+ }
+
+ private static Set<Metric> getOtherMetrics() {
+ Set<Metric> metrics = new LinkedHashSet<>();
+
+ addMetric(metrics, SlobrokMetrics.SLOBROK_HEARTBEATS_FAILED.count());
+ addMetric(metrics, SlobrokMetrics.SLOBROK_MISSING_CONSENSUS.count());
+
+ addMetric(metrics, LogdMetrics.LOGD_PROCESSED_LINES.count());
+
+ // Java (JRT) TLS metrics
+ addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_TLS_CERTIFICATE_VERIFICATION_FAILURES.baseName());
+ addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_PEER_AUTHORIZATION_FAILURES.baseName());
+ addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_TLS_CONNECTIONS_ESTABLISHED.baseName());
+ addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_TLS_CONNECTIONS_ESTABLISHED.baseName());
+ addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName());
+ addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName());
+
+ // C++ TLS metrics
+ addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_TLS_HANDSHAKES_FAILED.count());
+ addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_PEER_AUTHORIZATION_FAILURES.count());
+ addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_CLIENT_TLS_CONNECTIONS_ESTABLISHED.count());
+ addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_SERVER_TLS_CONNECTIONS_ESTABLISHED.count());
+ addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_CLIENT_INSECURE_CONNECTIONS_ESTABLISHED.count());
+ addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_SERVER_INSECURE_CONNECTIONS_ESTABLISHED.count());
+ addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_TLS_CONNECTIONS_BROKEN.count());
+ addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_FAILED_TLS_CONFIG_RELOADS.count());
+ // C++ capability metrics
+ addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_RPC_CAPABILITY_CHECKS_FAILED.count());
+ addMetric(metrics, StorageMetrics.VDS_SERVER_NETWORK_STATUS_CAPABILITY_CHECKS_FAILED.count());
+
+ // C++ Fnet metrics
+ addMetric(metrics, StorageMetrics.VDS_SERVER_FNET_NUM_CONNECTIONS.count());
+
+ // NodeAdmin certificate
+ addMetric(metrics, NodeAdminMetrics.ENDPOINT_CERTIFICATE_EXPIRY_SECONDS.baseName());
+ addMetric(metrics, NodeAdminMetrics.NODE_CERTIFICATE_EXPIRY_SECONDS.baseName());
+
+ // Routing layer metrics
+ addMetric(metrics, RoutingLayerMetrics.WORKER_CONNECTIONS.max()); // Hosted Vespa only (routing layer)
+
+ return metrics;
+ }
+
+
+ private static Set<Metric> getContainerMetrics() {
+ Set<Metric> metrics = new LinkedHashSet<>();
+
+ addMetric(metrics, ContainerMetrics.APPLICATION_GENERATION.baseName());
+
+ addMetric(metrics, ContainerMetrics.HANDLED_REQUESTS.count());
+ addMetric(metrics, ContainerMetrics.HANDLED_LATENCY, EnumSet.of(sum, count, max));
+
+ addMetric(metrics, ContainerMetrics.SERVER_NUM_OPEN_CONNECTIONS, EnumSet.of(max, average));
+ addMetric(metrics, ContainerMetrics.SERVER_NUM_CONNECTIONS, EnumSet.of(max, average));
+
+ addMetric(metrics, ContainerMetrics.SERVER_BYTES_RECEIVED, EnumSet.of(sum, count));
+ addMetric(metrics, ContainerMetrics.SERVER_BYTES_SENT, EnumSet.of(sum, count));
+
+ addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_UNHANDLED_EXCEPTIONS, EnumSet.of(sum, count));
+ addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_CAPACITY, EnumSet.of(max));
+ addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_WORK_QUEUE_SIZE, EnumSet.of(sum, count, min, max));
+ addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_REJECTED_TASKS, EnumSet.of(sum, count));
+ addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_SIZE.max());
+ addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_MAX_ALLOWED_SIZE.max());
+ addMetric(metrics, ContainerMetrics.JDISC_THREAD_POOL_ACTIVE_THREADS, EnumSet.of(sum, count, min, max));
+
+ addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_BUSY_THREADS, EnumSet.of(sum, count, max));
+ addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_TOTAL_THREADS.max());
+ addMetric(metrics, ContainerMetrics.JETTY_THREADPOOL_QUEUE_SIZE.max());
+
+ addMetric(metrics, ContainerMetrics.HTTPAPI_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, ContainerMetrics.HTTPAPI_PENDING, EnumSet.of(max, sum, count));
+ addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_OPERATIONS.rate());
+ addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_UPDATES.rate());
+ addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_REMOVES.rate());
+ addMetric(metrics, ContainerMetrics.HTTPAPI_NUM_PUTS.rate());
+ addMetric(metrics, ContainerMetrics.HTTPAPI_SUCCEEDED.rate());
+ addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED.rate());
+ addMetric(metrics, ContainerMetrics.HTTPAPI_PARSE_ERROR.rate());
+ addMetric(metrics, ContainerMetrics.HTTPAPI_CONDITION_NOT_MET.rate());
+ addMetric(metrics, ContainerMetrics.HTTPAPI_NOT_FOUND.rate());
+ addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_UNKNOWN.rate());
+ addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_INSUFFICIENT_STORAGE.rate());
+ addMetric(metrics, ContainerMetrics.HTTPAPI_FAILED_TIMEOUT.rate());
+
+ addMetric(metrics, ContainerMetrics.MEM_HEAP_TOTAL.average());
+ addMetric(metrics, ContainerMetrics.MEM_HEAP_FREE.average());
+ addMetric(metrics, ContainerMetrics.MEM_HEAP_USED, EnumSet.of(average, max));
+ addMetric(metrics, ContainerMetrics.MEM_DIRECT_TOTAL.average());
+ addMetric(metrics, ContainerMetrics.MEM_DIRECT_FREE.average());
+ addMetric(metrics, ContainerMetrics.MEM_DIRECT_USED, EnumSet.of(average, max));
+ addMetric(metrics, ContainerMetrics.MEM_DIRECT_COUNT.max());
+ addMetric(metrics, ContainerMetrics.MEM_NATIVE_TOTAL.average());
+ addMetric(metrics, ContainerMetrics.MEM_NATIVE_FREE.average());
+ addMetric(metrics, ContainerMetrics.MEM_NATIVE_USED.average());
+
+ addMetric(metrics, ContainerMetrics.JDISC_MEMORY_MAPPINGS.max());
+ addMetric(metrics, ContainerMetrics.JDISC_OPEN_FILE_DESCRIPTORS.max());
+
+ addMetric(metrics, ContainerMetrics.JDISC_GC_MS.max());
+ addMetric(metrics, ContainerMetrics.CPU.baseName());
+
+ addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS_TOTAL.sum());
+
+ addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_IS_ACTIVE.max());
+
+ addMetric(metrics, ContainerMetrics.ATHENZ_TENANT_CERT_EXPIRY_SECONDS, EnumSet.of(min, max, last)); // TODO: Vespa 9: Remove max, last
+ addMetric(metrics, ContainerMetrics.CONTAINER_IAM_ROLE_EXPIRY_SECONDS.baseName());
+
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_PREMATURELY_CLOSED.rate());
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_REQUESTS_PER_CONNECTION, EnumSet.of(sum, count, min, max, average));
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_URI_LENGTH, EnumSet.of(sum, count, max));
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_CONTENT_SIZE, EnumSet.of(sum, count, max));
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUESTS, EnumSet.of(rate, count));
+
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_MISSING_CLIENT_CERT.rate());
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_EXPIRED_CLIENT_CERT.rate());
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INVALID_CLIENT_CERT.rate());
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_PROTOCOLS.rate());
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_INCOMPATIBLE_CHIFERS.rate());
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_CONNECTION_CLOSED.rate());
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_SSL_HANDSHAKE_FAILURE_UNKNOWN.rate());
+
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTER_RULE_BLOCKED_REQUESTS.rate());
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTER_RULE_ALLOWED_REQUESTS.rate());
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_REQUEST_HANDLED.rate());
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_REQUEST_UNHANDLED.rate());
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_RESPONSE_HANDLED.rate());
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_FILTERING_RESPONSE_UNHANDLED.rate());
+
+ addMetric(metrics, ContainerMetrics.JDISC_HTTP_HANDLER_UNHANDLED_EXCEPTIONS.rate());
+
+ addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_FAILED_COMPONENT_GRAPHS.rate());
+ addMetric(metrics, ContainerMetrics.JDISC_APPLICATION_COMPONENT_GRAPH_RECONFIGURATIONS.rate());
+
+ addMetric(metrics, ContainerMetrics.FEED_LATENCY, EnumSet.of(sum, count, max));
+ addMetric(metrics, ContainerMetrics.FEED_HTTP_REQUESTS, EnumSet.of(count, rate));
+
+ addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_SUCCEEDED.rate());
+ addMetric(metrics, ContainerMetrics.JDISC_TLS_CAPABILITY_CHECKS_FAILED.rate());
+
+ return metrics;
+ }
+
+ private static Set<Metric> getClusterControllerMetrics() {
+ Set<Metric> metrics = new LinkedHashSet<>();
+
+ addMetric(metrics, ClusterControllerMetrics.DOWN_COUNT.max());
+ addMetric(metrics, ClusterControllerMetrics.INITIALIZING_COUNT.max());
+ addMetric(metrics, ClusterControllerMetrics.MAINTENANCE_COUNT.max());
+ addMetric(metrics, ClusterControllerMetrics.RETIRED_COUNT.max());
+ addMetric(metrics, ClusterControllerMetrics.UP_COUNT, EnumSet.of(max, last)); // TODO: Remove last
+ addMetric(metrics, ClusterControllerMetrics.CLUSTER_STATE_CHANGE_COUNT.baseName());
+ addMetric(metrics, ClusterControllerMetrics.BUSY_TICK_TIME_MS, EnumSet.of(max, sum, count));
+ addMetric(metrics, ClusterControllerMetrics.IDLE_TICK_TIME_MS, EnumSet.of(max, sum, count));
+
+ addMetric(metrics, ClusterControllerMetrics.WORK_MS, EnumSet.of(sum, count));
+
+ addMetric(metrics, ClusterControllerMetrics.IS_MASTER.max());
+
+ // TODO(hakonhall): Update this name once persistent "count" metrics has been implemented.
+ // DO NOT RELY ON THIS METRIC YET.
+ addMetric(metrics, ClusterControllerMetrics.NODE_EVENT_COUNT.baseName());
+ addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_NODES_ABOVE_LIMIT.max());
+ addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_MEMORY_UTILIZATION.max());
+ addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_DISK_UTILIZATION.max());
+ addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MEMORY_LIMIT.max());
+ addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_DISK_LIMIT.max());
+ addMetric(metrics, ClusterControllerMetrics.REINDEXING_PROGRESS.max());
+
+ return metrics;
+ }
+
+ private static Set<Metric> getDocprocMetrics() {
+ Set<Metric> metrics = new LinkedHashSet<>();
+
+ // per chain
+ metrics.add(new Metric("documents_processed.rate"));
+
+ addMetric(metrics, ContainerMetrics.DOCPROC_PROC_TIME, EnumSet.of(sum, count, max));
+ addMetric(metrics, ContainerMetrics.DOCPROC_DOCUMENTS, EnumSet.of(sum, count, max, min));
+
+ return metrics;
+ }
+
+ private static Set<Metric> getSearchChainMetrics() {
+ Set<Metric> metrics = new LinkedHashSet<>();
+
+ addMetric(metrics, ContainerMetrics.PEAK_QPS.max());
+ addMetric(metrics, ContainerMetrics.SEARCH_CONNECTIONS, EnumSet.of(sum, count, max));
+ addMetric(metrics, ContainerMetrics.QUERIES.rate());
+ addMetric(metrics, ContainerMetrics.QUERY_CONTAINER_LATENCY, EnumSet.of(sum, count, max));
+ addMetric(metrics, ContainerMetrics.QUERY_LATENCY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile));
+ addMetric(metrics, ContainerMetrics.QUERY_TIMEOUT, EnumSet.of(sum, count, max, min, ninety_five_percentile, ninety_nine_percentile));
+ addMetric(metrics, ContainerMetrics.FAILED_QUERIES.rate());
+ addMetric(metrics, ContainerMetrics.DEGRADED_QUERIES.rate());
+ addMetric(metrics, ContainerMetrics.HITS_PER_QUERY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile));
+ addMetric(metrics, ContainerMetrics.SEARCH_CONNECTIONS, EnumSet.of(sum, count, max));
+ addMetric(metrics, ContainerMetrics.QUERY_HIT_OFFSET, EnumSet.of(sum, count, max));
+ addMetric(metrics, ContainerMetrics.DOCUMENTS_COVERED.count());
+ addMetric(metrics, ContainerMetrics.DOCUMENTS_TOTAL.count());
+ addMetric(metrics, ContainerMetrics.DOCUMENTS_TARGET_TOTAL.count());
+ addMetric(metrics, ContainerMetrics.JDISC_RENDER_LATENCY, EnumSet.of(min, max, count, sum));
+ addMetric(metrics, ContainerMetrics.QUERY_ITEM_COUNT, EnumSet.of(max, sum, count));
+ addMetric(metrics, ContainerMetrics.TOTAL_HITS_PER_QUERY, EnumSet.of(sum, count, max, ninety_five_percentile, ninety_nine_percentile));
+ addMetric(metrics, ContainerMetrics.EMPTY_RESULTS.rate());
+ addMetric(metrics, ContainerMetrics.REQUESTS_OVER_QUOTA, EnumSet.of(rate, count));
+
+ addMetric(metrics, ContainerMetrics.RELEVANCE_AT_1, EnumSet.of(sum, count));
+ addMetric(metrics, ContainerMetrics.RELEVANCE_AT_3, EnumSet.of(sum, count));
+ addMetric(metrics, ContainerMetrics.RELEVANCE_AT_10, EnumSet.of(sum, count));
+
+ // Errors from search container
+ addMetric(metrics, ContainerMetrics.ERROR_TIMEOUT.rate());
+ addMetric(metrics, ContainerMetrics.ERROR_BACKENDS_OOS.rate());
+ addMetric(metrics, ContainerMetrics.ERROR_PLUGIN_FAILURE.rate());
+ addMetric(metrics, ContainerMetrics.ERROR_BACKEND_COMMUNICATION_ERROR.rate());
+ addMetric(metrics, ContainerMetrics.ERROR_EMPTY_DOCUMENT_SUMMARIES.rate());
+ addMetric(metrics, ContainerMetrics.ERROR_INVALID_QUERY_PARAMETER.rate());
+ addMetric(metrics, ContainerMetrics.ERROR_INTERNAL_SERVER_ERROR.rate());
+ addMetric(metrics, ContainerMetrics.ERROR_MISCONFIGURED_SERVER.rate());
+ addMetric(metrics, ContainerMetrics.ERROR_INVALID_QUERY_TRANSFORMATION.rate());
+ addMetric(metrics, ContainerMetrics.ERROR_RESULTS_WITH_ERRORS.rate());
+ addMetric(metrics, ContainerMetrics.ERROR_UNSPECIFIED.rate());
+ addMetric(metrics, ContainerMetrics.ERROR_UNHANDLED_EXCEPTION.rate());
+
+ return metrics;
+ }
+
+ private static Set<Metric> getSearchNodeMetrics() {
+ Set<Metric> metrics = new LinkedHashSet<>();
+
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_TOTAL.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_READY.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_ACTIVE.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_REMOVED.max());
+
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_DOCS_IN_MEMORY.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_HEART_BEAT_AGE.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_DOCS.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_LATENCY, EnumSet.of(max, sum, count));
+
+ // Search protocol
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REQUEST_SIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_QUERY_REPLY_SIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_LATENCY, EnumSet.of(max, sum, average));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUEST_SIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REPLY_SIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_SEARCH_PROTOCOL_DOCSUM_REQUESTED_DOCUMENTS.count());
+
+ // Executors shared between all document dbs
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_QUEUESIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_ACCEPTED.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_PROTON_UTILIZATION, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_QUEUESIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_ACCEPTED.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FLUSH_UTILIZATION, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_QUEUESIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_ACCEPTED.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_WAKEUPS.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_MATCH_UTILIZATION, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_QUEUESIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_ACCEPTED.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_DOCSUM_UTILIZATION, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_QUEUESIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_ACCEPTED.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_SHARED_UTILIZATION, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_QUEUESIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_ACCEPTED.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_WARMUP_UTILIZATION, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_QUEUESIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_ACCEPTED.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_EXECUTOR_FIELD_WRITER_UTILIZATION, EnumSet.of(max, sum, count));
+
+ // jobs
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_TOTAL.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_ATTRIBUTE_FLUSH.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_MEMORY_INDEX_FLUSH.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DISK_INDEX_FUSION.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DOCUMENT_STORE_FLUSH.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_DOCUMENT_STORE_COMPACT.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_BUCKET_MOVE.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_LID_SPACE_COMPACT.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_JOB_REMOVED_DOCUMENTS_PRUNE.average());
+
+ // Threading service (per document db)
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_QUEUESIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_ACCEPTED.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_MASTER_UTILIZATION, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_QUEUESIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_ACCEPTED.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_INDEX_UTILIZATION, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_QUEUESIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_ACCEPTED.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_THREADING_SERVICE_SUMMARY_UTILIZATION, EnumSet.of(max, sum, count));
+
+ // lid space
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_LIMIT.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_USED_LIDS.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_LIMIT.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_USED_LIDS.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_LIMIT.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_USED_LIDS.max());
+
+ // bucket move
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_BUCKET_MOVE_BUCKETS_PENDING.max());
+
+ // resource usage
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TOTAL.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TOTAL_UTILIZATION.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK_USAGE_TRANSIENT.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TOTAL.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TOTAL_UTILIZATION.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TRANSIENT.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_MAPPINGS.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_OPEN_FILE_DESCRIPTORS.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_FEEDING_BLOCKED.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MALLOC_ARENA.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_ADDRESS_SPACE.max());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_FEEDING_BLOCKED.max());
+
+ // CPU util
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_SETUP, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_READ, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_WRITE, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_COMPACT, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_CPU_UTIL_OTHER, EnumSet.of(max, sum, count));
+
+ // transaction log
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_ENTRIES.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_DISK_USAGE.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_REPLAY_TIME.max());
+
+ // document store
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_DISK_USAGE.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_DISK_BLOAT.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_DISK_USAGE.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_DISK_BLOAT.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_DISK_USAGE.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_DISK_BLOAT.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MAX_BUCKET_SPREAD.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_DOCUMENT_STORE_MEMORY_USAGE_ALLOCATED_BYTES.average());
+
+ // document store cache
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_MEMORY_USAGE.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_HIT_RATE.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_LOOKUPS.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_CACHE_INVALIDATIONS.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_MEMORY_USAGE.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_HIT_RATE.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_LOOKUPS.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_DOCUMENT_STORE_CACHE_INVALIDATIONS.rate());
+
+ // attribute
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_ATTRIBUTE_MEMORY_USAGE_ALLOCATED_BYTES.average());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_ATTRIBUTE_MEMORY_USAGE_ALLOCATED_BYTES.average());
+
+ // index
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_MEMORY_USAGE_ALLOCATED_BYTES.average());
+
+ // matching
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERIES.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_SOFT_DOOMED_QUERIES.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERY_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_QUERY_SETUP_TIME, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_DOCS_MATCHED, EnumSet.of(rate, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERIES.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_SOFT_DOOMED_QUERIES.rate());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_SOFT_DOOM_FACTOR, EnumSet.of(min, max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_QUERY_SETUP_TIME, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_GROUPING_TIME, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_RERANK_TIME, EnumSet.of(max, sum, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_DOCS_MATCHED, EnumSet.of(rate, count));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MATCHING_RANK_PROFILE_LIMITED_QUERIES.rate());
+
+ // feeding
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_FEEDING_COMMIT_OPERATIONS, EnumSet.of(max, sum, count, rate));
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_FEEDING_COMMIT_LATENCY, EnumSet.of(max, sum, count));
+
+ return metrics;
+ }
+
+ private static Set<Metric> getStorageMetrics() {
+ Set<Metric> metrics = new LinkedHashSet<>();
+
+ // TODO - Vespa 9: For the purpose of this file and likely elsewhere, all but the last aggregate specifier,
+ // TODO - Vespa 9: such as 'average' and 'sum' in the metric names below are just confusing and can be mentally
+ // TODO - Vespa 9: disregarded when considering metric names. Clean up for Vespa 9.
+ addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_BUCKETS.average());
+ addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_DOCS.average());
+ addMetric(metrics, StorageMetrics.VDS_DATASTORED_ALLDISKS_BYTES.average());
+ addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEVISITORLIFETIME, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEQUEUEWAIT, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_QUEUESIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_COMPLETED.rate());
+ addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_CREATED.rate());
+ addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_FAILED.rate());
+ addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEMESSAGESENDTIME, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_VISITOR_ALLTHREADS_AVERAGEPROCESSINGTIME, EnumSet.of(max, sum, count));
+
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_QUEUESIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_AVERAGEQUEUEWAIT, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ACTIVE_OPERATIONS_SIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ACTIVE_OPERATIONS_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_WINDOW_SIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_WAITING_THREADS, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_THROTTLE_ACTIVE_TOKENS, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEMETADATAREADLATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEDATAREADLATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGEDATAWRITELATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGE_PUT_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_MERGE_REMOVE_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_THROTTLED_RPC_DIRECT_DISPATCHES.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_THROTTLED_PERSISTENCE_THREAD_POLLS.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLSTRIPES_TIMEOUTS_WAITING_FOR_THROTTLE_TOKEN.rate());
+
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_COUNT.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_FAILED.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_TEST_AND_SET_FAILED.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_PUT_REQUEST_SIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_COUNT.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_FAILED.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_TEST_AND_SET_FAILED.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_REQUEST_SIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_COUNT.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_FAILED.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_GET_REQUEST_SIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_COUNT.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_FAILED.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_TEST_AND_SET_FAILED.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_UPDATE_REQUEST_SIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_COUNT.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_CREATEITERATOR_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_VISIT_COUNT.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_VISIT_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_COUNT.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_REMOVE_LOCATION_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_SPLITBUCKETS_COUNT.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_JOINBUCKETS_COUNT.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_COUNT.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_FAILED.rate());
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_DELETEBUCKETS_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_FILESTOR_ALLTHREADS_SETBUCKETSTATES_COUNT.rate());
+
+ addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_AVERAGEQUEUEWAITINGTIME, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_QUEUESIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_ACTIVE_WINDOW_SIZE, EnumSet.of(max, sum, count));
+ addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_BOUNCED_DUE_TO_BACK_PRESSURE.rate());
+ addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_LOCALLYEXECUTEDMERGES_OK.rate());
+ addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_OK.rate());
+ addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_BUSY.rate());
+ addMetric(metrics, StorageMetrics.VDS_MERGETHROTTLER_MERGECHAINS_FAILURES_TOTAL.rate());
+
+ return metrics;
+ }
+
+ private static Set<Metric> getDistributorMetrics() {
+ Set<Metric> metrics = new LinkedHashSet<>();
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_RECHECKING.average());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_IDEALSTATE_DIFF.average());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_TOOFEWCOPIES.average());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_TOOMANYCOPIES.average());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS.average());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_BUCKETS_NOTRUSTED.average());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MAX_OBSERVED_TIME_SINCE_LAST_GC_SEC.average());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_DONE_OK.rate());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_DONE_FAILED.rate());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_DELETE_BUCKET_PENDING.average());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_DONE_OK.rate());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_DONE_FAILED.rate());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_MERGE_BUCKET_PENDING.average());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_DONE_OK.rate());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_DONE_FAILED.rate());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_SPLIT_BUCKET_PENDING.average());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_DONE_OK.rate());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_DONE_FAILED.rate());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_JOIN_BUCKET_PENDING.average());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_OK.rate());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DONE_FAILED.rate());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_PENDING.average());
+ addMetric(metrics, DistributorMetrics.VDS_IDEALSTATE_GARBAGE_COLLECTION_DOCUMENTS_REMOVED, EnumSet.of(count, rate));
+
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_OK.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TOTAL.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTFOUND.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TEST_AND_SET_FAILED.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_CONCURRENT_MUTATIONS.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTCONNECTED.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_NOTREADY.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_WRONGDISTRIBUTOR.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_SAFE_TIME_NOT_REACHED.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_STORAGEFAILURE.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_TIMEOUT.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_BUSY.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_PUTS_FAILURES_INCONSISTENT_BUCKET.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_OK.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_TOTAL.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_NOTFOUND.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_TEST_AND_SET_FAILED.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVES_FAILURES_CONCURRENT_MUTATIONS.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_OK.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_TOTAL.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_NOTFOUND.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_TEST_AND_SET_FAILED.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_FAILURES_CONCURRENT_MUTATIONS.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_UPDATES_DIVERGING_TIMESTAMP_UPDATES.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVELOCATIONS_OK.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_REMOVELOCATIONS_FAILURES_TOTAL.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_OK.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_FAILURES_TOTAL.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_GETS_FAILURES_NOTFOUND.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_LATENCY, EnumSet.of(max, sum, count));
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_OK.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_TOTAL.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTREADY.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTCONNECTED.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_WRONGDISTRIBUTOR.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_SAFE_TIME_NOT_REACHED.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_STORAGEFAILURE.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_TIMEOUT.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_BUSY.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_INCONSISTENT_BUCKET.rate());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_VISITOR_FAILURES_NOTFOUND.rate());
+
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_DOCSSTORED.average());
+ addMetric(metrics, DistributorMetrics.VDS_DISTRIBUTOR_BYTESSTORED.average());
+
+ addMetric(metrics, DistributorMetrics.VDS_BOUNCER_CLOCK_SKEW_ABORTS.count());
+
+ return metrics;
+ }
+
+ private static void addMetric(Set<Metric> metrics, String nameWithSuffix) {
+ metrics.add(new Metric(nameWithSuffix));
+ }
+
+ private static void addMetric(Set<Metric> metrics, VespaMetrics metric, EnumSet<Suffix> suffixes) {
+ suffixes.forEach(suffix -> metrics.add(new Metric(metric.baseName() + "." + suffix.suffix())));
+ }
+
+ private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) {
+ for (String suffix : aggregateSuffices) {
+ metrics.add(new Metric(metricName + "." + suffix));
+ }
+ }
+
+}
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java b/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java
index b7ed7293d6c..6c4626238eb 100644
--- a/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java
+++ b/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java
@@ -1,4 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+// TODO: Keep the set of metrics in this set stable until Vespa 9.
+// TODO: Vespa 9: Let this class be replaced by Vespa9VespaMetricSet.
package ai.vespa.metrics.set;
import ai.vespa.metrics.ClusterControllerMetrics;
@@ -17,6 +20,7 @@ import ai.vespa.metrics.VespaMetrics;
import java.util.Collections;
import java.util.EnumSet;
import java.util.LinkedHashSet;
+import java.util.List;
import java.util.Set;
import static ai.vespa.metrics.Suffix.average;
@@ -29,7 +33,6 @@ import static ai.vespa.metrics.Suffix.ninety_nine_percentile;
import static ai.vespa.metrics.Suffix.rate;
import static ai.vespa.metrics.Suffix.sum;
import static ai.vespa.metrics.set.DefaultVespaMetrics.defaultVespaMetricSet;
-import static java.util.Collections.singleton;
/**
* Encapsulates vespa service metrics.
@@ -38,9 +41,14 @@ import static java.util.Collections.singleton;
*/
public class VespaMetricSet {
- public static final MetricSet vespaMetricSet = new MetricSet("vespa",
- getVespaMetrics(),
- singleton(defaultVespaMetricSet));
+ public static final MetricSet vespaMetricSet = createMetricSet();
+
+ private static MetricSet createMetricSet() {
+ return new MetricSet("vespa",
+ getVespaMetrics(),
+ List.of(defaultVespaMetricSet,
+ BasicMetricSets.containerHttpStatusMetrics()));
+ }
private static Set<Metric> getVespaMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
@@ -62,7 +70,7 @@ public class VespaMetricSet {
Set<Metric> metrics = new LinkedHashSet<>();
addMetric(metrics, SentinelMetrics.SENTINEL_RESTARTS.count());
- addMetric(metrics, SentinelMetrics.SENTINEL_TOTAL_RESTARTS.last());
+ addMetric(metrics, SentinelMetrics.SENTINEL_TOTAL_RESTARTS, EnumSet.of(max, sum, last)); // TODO: Vespa 9: Remove last, sum?
addMetric(metrics, SentinelMetrics.SENTINEL_UPTIME.last());
addMetric(metrics, SentinelMetrics.SENTINEL_RUNNING, EnumSet.of(count, last));
@@ -80,7 +88,7 @@ public class VespaMetricSet {
// Java (JRT) TLS metrics
addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_TLS_CERTIFICATE_VERIFICATION_FAILURES.baseName());
addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_PEER_AUTHORIZATION_FAILURES.baseName());
- addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_TLS_CONNECIONTS_ESTABLISHED.baseName());
+ addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_TLS_CONNECTIONS_ESTABLISHED.baseName());
addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_TLS_CONNECTIONS_ESTABLISHED.baseName());
addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_SERVER_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName());
addMetric(metrics, ContainerMetrics.JRT_TRANSPORT_CLIENT_UNENCRYPTED_CONNECTIONS_ESTABLISHED.baseName());
@@ -119,6 +127,12 @@ public class VespaMetricSet {
private static Set<Metric> getContainerMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
+ addMetric(metrics, ContainerMetrics.HTTP_STATUS_1XX.rate());
+ addMetric(metrics, ContainerMetrics.HTTP_STATUS_2XX.rate());
+ addMetric(metrics, ContainerMetrics.HTTP_STATUS_3XX.rate());
+ addMetric(metrics, ContainerMetrics.HTTP_STATUS_4XX.rate());
+ addMetric(metrics, ContainerMetrics.HTTP_STATUS_5XX.rate());
+
addMetric(metrics, ContainerMetrics.APPLICATION_GENERATION.baseName());
addMetric(metrics, ContainerMetrics.HANDLED_REQUESTS.count());
@@ -177,10 +191,10 @@ public class VespaMetricSet {
addMetric(metrics, ContainerMetrics.JDISC_GC_COUNT, EnumSet.of(average, max, last)); // TODO: Vespa 9: Remove last
addMetric(metrics, ContainerMetrics.JDISC_GC_MS, EnumSet.of(average, max, last)); // TODO: Vespa 9: Remove last
- addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS.last());
+ addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS_TOTAL, EnumSet.of(sum, last)); // TODO: Vespa 9: Remove last
addMetric(metrics, ContainerMetrics.JDISC_DEACTIVATED_CONTAINERS_WITH_RETAINED_REFS.last());
- addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_IS_ACTIVE.last());
+ addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_IS_ACTIVE, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_COUNT.last());
addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_FAILURE_COUNT.last());
addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_ACTIVATION_MILLIS.last());
@@ -188,15 +202,9 @@ public class VespaMetricSet {
addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_FAILURE_COUNT.last());
addMetric(metrics, ContainerMetrics.JDISC_SINGLETON_DEACTIVATION_MILLIS.last());
- addMetric(metrics, ContainerMetrics.ATHENZ_TENANT_CERT_EXPIRY_SECONDS.last());
+ addMetric(metrics, ContainerMetrics.ATHENZ_TENANT_CERT_EXPIRY_SECONDS, EnumSet.of(min, max, last)); // TODO: Vespa 9: Remove last, max
addMetric(metrics, ContainerMetrics.CONTAINER_IAM_ROLE_EXPIRY_SECONDS.baseName());
- addMetric(metrics, ContainerMetrics.HTTP_STATUS_1XX.rate());
- addMetric(metrics, ContainerMetrics.HTTP_STATUS_2XX.rate());
- addMetric(metrics, ContainerMetrics.HTTP_STATUS_3XX.rate());
- addMetric(metrics, ContainerMetrics.HTTP_STATUS_4XX.rate());
- addMetric(metrics, ContainerMetrics.HTTP_STATUS_5XX.rate());
-
addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_PREMATURELY_CLOSED.rate());
addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_REQUESTS_PER_CONNECTION, EnumSet.of(sum, count, min, max, average));
addMetric(metrics, ContainerMetrics.JDISC_HTTP_REQUEST_URI_LENGTH, EnumSet.of(sum, count, max));
@@ -243,19 +251,19 @@ public class VespaMetricSet {
private static Set<Metric> getClusterControllerMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
- addMetric(metrics, ClusterControllerMetrics.DOWN_COUNT.last());
- addMetric(metrics, ClusterControllerMetrics.INITIALIZING_COUNT.last());
- addMetric(metrics, ClusterControllerMetrics.MAINTENANCE_COUNT.last());
- addMetric(metrics, ClusterControllerMetrics.RETIRED_COUNT.last());
+ addMetric(metrics, ClusterControllerMetrics.DOWN_COUNT, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, ClusterControllerMetrics.INITIALIZING_COUNT, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, ClusterControllerMetrics.MAINTENANCE_COUNT, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, ClusterControllerMetrics.RETIRED_COUNT, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
addMetric(metrics, ClusterControllerMetrics.STOPPING_COUNT.last());
- addMetric(metrics, ClusterControllerMetrics.UP_COUNT.last());
+ addMetric(metrics, ClusterControllerMetrics.UP_COUNT, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
addMetric(metrics, ClusterControllerMetrics.CLUSTER_STATE_CHANGE_COUNT.baseName());
addMetric(metrics, ClusterControllerMetrics.BUSY_TICK_TIME_MS, EnumSet.of(last, max, sum, count)); // TODO: Vespa 9: Remove last
addMetric(metrics, ClusterControllerMetrics.IDLE_TICK_TIME_MS, EnumSet.of(last, max, sum, count)); // TODO: Vespa 9: Remove last
addMetric(metrics, ClusterControllerMetrics.WORK_MS, EnumSet.of(last, sum, count)); // TODO: Vespa 9: Remove last
- addMetric(metrics, ClusterControllerMetrics.IS_MASTER.last());
+ addMetric(metrics, ClusterControllerMetrics.IS_MASTER, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
addMetric(metrics, ClusterControllerMetrics.REMOTE_TASK_QUEUE_SIZE.last());
// TODO(hakonhall): Update this name once persistent "count" metrics has been implemented.
// DO NOT RELY ON THIS METRIC YET.
@@ -263,9 +271,9 @@ public class VespaMetricSet {
addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_NODES_ABOVE_LIMIT, EnumSet.of(last, max)); // TODO: Vespa 9: Remove last
addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_MEMORY_UTILIZATION, EnumSet.of(last, max)); // TODO: Vespa 9: Remove last
addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MAX_DISK_UTILIZATION, EnumSet.of(last, max)); // TODO: Vespa 9: Remove last
- addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MEMORY_LIMIT.last());
- addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_DISK_LIMIT.last());
- addMetric(metrics, ClusterControllerMetrics.REINDEXING_PROGRESS.last());
+ addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_MEMORY_LIMIT, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, ClusterControllerMetrics.RESOURCE_USAGE_DISK_LIMIT, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, ClusterControllerMetrics.REINDEXING_PROGRESS, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
return metrics;
}
@@ -331,15 +339,15 @@ public class VespaMetricSet {
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_CONFIG_GENERATION.last());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_TOTAL.last());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_READY.last());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_ACTIVE.last());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_REMOVED.last());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_TOTAL, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_READY, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_ACTIVE, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DOCUMENTS_REMOVED, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_DOCS_IN_MEMORY.last());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_INDEX_DOCS_IN_MEMORY, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_DISK_USAGE.last());
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_MEMORY_USAGE_ALLOCATED_BYTES.max());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_HEART_BEAT_AGE.last());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_HEART_BEAT_AGE, EnumSet.of(min, last)); // TODO: Vespa 9: Remove last
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_DOCS.rate());
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCSUM_LATENCY, EnumSet.of(max, sum, count));
@@ -410,22 +418,22 @@ public class VespaMetricSet {
// lid space
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_BLOAT_FACTOR.average());
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_FRAGMENTATION_FACTOR.average());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_LIMIT.last());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_HIGHEST_USED_LID.last());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_USED_LIDS.last());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_LID_LIMIT, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_HIGHEST_USED_LID, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_LID_SPACE_USED_LIDS, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_BLOAT_FACTOR.average());
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_FRAGMENTATION_FACTOR.average());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_LIMIT.last());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_HIGHEST_USED_LID.last());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_USED_LIDS.last());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_LID_LIMIT, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_HIGHEST_USED_LID, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_NOTREADY_LID_SPACE_USED_LIDS, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_BLOAT_FACTOR.average());
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_FRAGMENTATION_FACTOR.average());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_LIMIT.last());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_HIGHEST_USED_LID.last());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_USED_LIDS.last());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_LID_LIMIT, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_HIGHEST_USED_LID, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_REMOVED_LID_SPACE_USED_LIDS, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
// bucket move
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_BUCKET_MOVE_BUCKETS_PENDING.last());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_BUCKET_MOVE_BUCKETS_PENDING, EnumSet.of(max, sum, last)); // TODO: Vespa 9: Remove last
// resource usage
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_DISK.average());
@@ -438,7 +446,7 @@ public class VespaMetricSet {
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_USAGE_TRANSIENT.max());
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MEMORY_MAPPINGS.max());
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_OPEN_FILE_DESCRIPTORS.max());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_FEEDING_BLOCKED, EnumSet.of(max,last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_FEEDING_BLOCKED, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_RESOURCE_USAGE_MALLOC_ARENA.max());
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_ADDRESS_SPACE.max());
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_ATTRIBUTE_RESOURCE_USAGE_FEEDING_BLOCKED.max());
@@ -453,7 +461,7 @@ public class VespaMetricSet {
// transaction log
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_ENTRIES.average());
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_DISK_USAGE.average());
- addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_REPLAY_TIME.last());
+ addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_TRANSACTIONLOG_REPLAY_TIME, EnumSet.of(max, last)); // TODO: Vespa 9: Remove last
// document store
addMetric(metrics, SearchNodeMetrics.CONTENT_PROTON_DOCUMENTDB_READY_DOCUMENT_STORE_DISK_USAGE.average());
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/monitoring/MetricSetTest.java b/metrics/src/test/java/ai/vespa/metrics/MetricSetTest.java
index 8235f45aaec..788e9e9836c 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/monitoring/MetricSetTest.java
+++ b/metrics/src/test/java/ai/vespa/metrics/MetricSetTest.java
@@ -1,11 +1,12 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.model.admin.monitoring;
+package ai.vespa.metrics;
import ai.vespa.metrics.set.Metric;
import ai.vespa.metrics.set.MetricSet;
import com.google.common.collect.Sets;
import org.junit.jupiter.api.Test;
+import java.util.EnumSet;
import java.util.List;
import java.util.Map;
@@ -60,4 +61,54 @@ public class MetricSetTest {
assertEquals(3, combinedMetric.dimensions.size());
assertEquals("parentCommonVal", combinedMetric.dimensions.get(COMMON_DIMENSION_KEY));
}
+
+ @Test
+ void it_can_be_generated_from_builder() {
+ MetricSet metricSet = new MetricSet.Builder("test")
+ .metric("metric1")
+ .metric(TestMetrics.ENUM_METRIC1.last())
+ .metric(TestMetrics.ENUM_METRIC2, EnumSet.of(Suffix.sum, Suffix.count))
+ .metric(new Metric("metric2"))
+ .metrics(List.of(new Metric("metric3")))
+ .metricSet(new MetricSet.Builder("child")
+ .metric("child_metric1")
+ .metric("child_metric2")
+ .build())
+ .build();
+
+ Map<String, Metric> metrics = metricSet.getMetrics();
+ assertEquals(8, metrics.size());
+ assertNotNull(metrics.get("metric1"));
+ assertNotNull(metrics.get("emum-metric1.last"));
+ assertNotNull(metrics.get("emum-metric2.sum"));
+ assertNotNull(metrics.get("emum-metric2.count"));
+ assertNotNull(metrics.get("metric2"));
+ assertNotNull(metrics.get("metric3"));
+ assertNotNull(metrics.get("child_metric1"));
+ assertNotNull(metrics.get("child_metric1"));
+ }
+
+ enum TestMetrics implements VespaMetrics {
+ ENUM_METRIC1("emum-metric1"),
+ ENUM_METRIC2("emum-metric2");
+
+ private final String name;
+
+ TestMetrics(String name) {
+ this.name = name;
+ }
+
+ public String baseName() {
+ return name;
+ }
+
+ public Unit unit() {
+ return null;
+ }
+
+ public String description() {
+ return null;
+ }
+
+ }
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/monitoring/MetricTest.java b/metrics/src/test/java/ai/vespa/metrics/MetricTest.java
index f07b8c59322..7a0b85f82cc 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/monitoring/MetricTest.java
+++ b/metrics/src/test/java/ai/vespa/metrics/MetricTest.java
@@ -1,5 +1,5 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.model.admin.monitoring;
+package ai.vespa.metrics;
import ai.vespa.metrics.set.Metric;
import com.google.common.collect.ImmutableMap;
diff --git a/metrics/src/tests/metricmanagertest.cpp b/metrics/src/tests/metricmanagertest.cpp
index 9e6b0f40be3..a6dd141576f 100644
--- a/metrics/src/tests/metricmanagertest.cpp
+++ b/metrics/src/tests/metricmanagertest.cpp
@@ -462,6 +462,7 @@ TEST_F(MetricManagerTest, test_snapshots)
{
MetricLockGuard lockGuard(mm.getMetricLock());
mm.registerMetric(lockGuard, mySet.set);
+ EXPECT_FALSE(mm.any_snapshots_taken(lockGuard)); // well-defined prior to init()
}
mm.init(ConfigUri("raw:"
"consumer[2]\n"
@@ -474,6 +475,7 @@ TEST_F(MetricManagerTest, test_snapshots)
MetricNameVisitor visitor;
{
MetricLockGuard lockGuard(mm.getMetricLock());
+ EXPECT_FALSE(mm.any_snapshots_taken(lockGuard)); // No snapshots yet
mm.visit(lockGuard, mm.getActiveMetrics(lockGuard), visitor, "snapper");
const MetricManager::ConsumerSpec * consumerSpec = mm.getConsumerSpec(lockGuard, "snapper");
EXPECT_EQ(std::string("\n"
@@ -506,6 +508,10 @@ TEST_F(MetricManagerTest, test_snapshots)
ASSERT_VALUES(mm, 5 * 60s, "2,4,4,1,7,9,1,1,8,2,10");
ASSERT_VALUES(mm, 60 * 60s, "");
ASSERT_VALUES(mm, 0 * 60s, "2,4,4,1,7,9,1,1,8,2,10");
+ {
+ auto guard = mm.getMetricLock();
+ EXPECT_TRUE(mm.any_snapshots_taken(guard)); // At least one snapshot has been taken
+ }
// Adding metrics done in second five minute period. Total should
// be updated to account for both
@@ -567,6 +573,14 @@ TEST_F(MetricManagerTest, test_json_output)
"consumer[0].tags[1]\n"
"consumer[0].tags[0] snaptest\n"));
+ {
+ // No snapshots have been taken yet, so the non-total getMetrics call should return
+ // the empty string (i.e. no metrics produced).
+ metrics::StateApiAdapter adapter(mm);
+ auto json_str = adapter.getMetrics("snapper");
+ EXPECT_EQ(json_str, "");
+ }
+
takeSnapshots(mm, 1000);
// Adding metrics to have some values in them
diff --git a/metrics/src/vespa/metrics/countmetric.h b/metrics/src/vespa/metrics/countmetric.h
index 5225cd58f12..fe1f613fbf7 100644
--- a/metrics/src/vespa/metrics/countmetric.h
+++ b/metrics/src/vespa/metrics/countmetric.h
@@ -19,7 +19,7 @@
namespace metrics {
struct AbstractCountMetric : public Metric {
- bool visit(MetricVisitor& visitor, bool tagAsAutoGenerated = false) const override
+ bool visit(MetricVisitor& visitor, bool tagAsAutoGenerated) const override
{
return visitor.visitCountMetric(*this, tagAsAutoGenerated);
}
diff --git a/metrics/src/vespa/metrics/jsonwriter.h b/metrics/src/vespa/metrics/jsonwriter.h
index e4a2e7ca10b..379ed215198 100644
--- a/metrics/src/vespa/metrics/jsonwriter.h
+++ b/metrics/src/vespa/metrics/jsonwriter.h
@@ -18,7 +18,7 @@ class JsonWriter : public MetricVisitor, public vespalib::JsonStreamTypes {
uint64_t _period;
public:
- JsonWriter(vespalib::JsonStream&);
+ explicit JsonWriter(vespalib::JsonStream&);
private:
bool visitSnapshot(const MetricSnapshot&) override;
@@ -29,7 +29,6 @@ private:
bool visitValueMetric(const AbstractValueMetric&, bool autoGenerated) override;
void doneVisiting() override;
- void checkIfArrayNeedsToBeStarted();
void writeCommonPrefix(const Metric& m);
void writeCommonPostfix(const Metric& m);
@@ -37,7 +36,7 @@ private:
void writeInheritedDimensions();
void writeMetricSpecificDimensions(const Metric&);
- bool isLeafMetric(const Metric& m) const { return !m.isMetricSet(); }
+ static bool isLeafMetric(const Metric& m) { return !m.isMetricSet(); }
};
}
diff --git a/metrics/src/vespa/metrics/metricmanager.cpp b/metrics/src/vespa/metrics/metricmanager.cpp
index 2f6fe4c6ba6..fa18ddc383b 100644
--- a/metrics/src/vespa/metrics/metricmanager.cpp
+++ b/metrics/src/vespa/metrics/metricmanager.cpp
@@ -168,6 +168,11 @@ MetricManager::isInitialized() const {
return static_cast<bool>(_configHandle);
}
+bool
+MetricManager::any_snapshots_taken(const MetricLockGuard&) const noexcept {
+ return (!_snapshots.empty() && _snapshots[0]->current_is_assigned());
+}
+
void
MetricManager::init(const config::ConfigUri & uri, bool startThread)
{
@@ -200,7 +205,7 @@ namespace {
struct Path {
vespalib::StringTokenizer _path;
- Path(vespalib::stringref fullpath) : _path(fullpath, ".") { }
+ explicit Path(vespalib::stringref fullpath) : _path(fullpath, ".") { }
vespalib::string toString() const {
vespalib::asciistream ost;
@@ -246,7 +251,7 @@ struct ConsumerMetricBuilder : public MetricVisitor {
};
std::list<Result> result;
- ConsumerMetricBuilder(const Config::Consumer& c) __attribute__((noinline));
+ explicit ConsumerMetricBuilder(const Config::Consumer& c) __attribute__((noinline));
~ConsumerMetricBuilder() __attribute__((noinline));
bool tagAdded(const Metric& metric) {
@@ -486,7 +491,7 @@ MetricManager::configure(const MetricLockGuard & , std::unique_ptr<Config> confi
_totalMetrics = std::make_shared<MetricSnapshot>("All time snapshot", 0s, _activeMetrics.getMetrics(), _snapshotUnsetMetrics);
_totalMetrics->reset(currentTime);
}
- if (_config.get() == 0 || (_config->consumer.size() != config->consumer.size())) {
+ if ( !_config || (_config->consumer.size() != config->consumer.size())) {
_consumerConfigChanged = true;
} else {
for (uint32_t i=0; i<_config->consumer.size(); ++i) {
@@ -553,7 +558,7 @@ MetricManager::visit(const MetricLockGuard & guard, const MetricSnapshot& snapsh
MetricVisitor& visitor, const std::string& consumer) const
{
if (visitor.visitSnapshot(snapshot)) {
- if (consumer == "") {
+ if (consumer.empty()) {
snapshot.getMetrics().visit(visitor);
} else {
const ConsumerSpec * consumerSpec = getConsumerSpec(guard, consumer);
@@ -795,6 +800,7 @@ MetricManager::takeSnapshots(const MetricLockGuard & guard, system_time timeToPr
_activeMetrics.addToSnapshot(firstTarget, false, timeToProcess);
_activeMetrics.addToSnapshot(*_totalMetrics, false, timeToProcess);
_activeMetrics.reset(timeToProcess);
+ _snapshots[0]->tag_current_as_assigned();
LOG(debug, "After snapshotting, active metrics goes from %s to %s, and 5 minute metrics goes from %s to %s.",
to_string(_activeMetrics.getFromTime()).c_str(), to_string(_activeMetrics.getToTime()).c_str(),
to_string(firstTarget.getFromTime()).c_str(), to_string(firstTarget.getToTime()).c_str());
diff --git a/metrics/src/vespa/metrics/metricmanager.h b/metrics/src/vespa/metrics/metricmanager.h
index 6f40e7961f4..cfb3ab2137a 100644
--- a/metrics/src/vespa/metrics/metricmanager.h
+++ b/metrics/src/vespa/metrics/metricmanager.h
@@ -123,7 +123,7 @@ private:
public:
MetricManager();
- MetricManager(std::unique_ptr<Timer> timer);
+ explicit MetricManager(std::unique_ptr<Timer> timer);
~MetricManager();
void stop();
@@ -218,7 +218,7 @@ public:
* snapshots while you are accessing them.
*/
MetricLockGuard getMetricLock() const {
- return MetricLockGuard(_waiter);
+ return {_waiter};
}
/** While accessing the active metrics you should have the metric lock. */
@@ -267,6 +267,8 @@ public:
bool isInitialized() const;
+ [[nodiscard]] bool any_snapshots_taken(const MetricLockGuard&) const noexcept;
+
private:
void takeSnapshots(const MetricLockGuard &, system_time timeToProcess);
diff --git a/metrics/src/vespa/metrics/metricsnapshot.cpp b/metrics/src/vespa/metrics/metricsnapshot.cpp
index 6bcdcc60995..104ad858e43 100644
--- a/metrics/src/vespa/metrics/metricsnapshot.cpp
+++ b/metrics/src/vespa/metrics/metricsnapshot.cpp
@@ -32,7 +32,7 @@ MetricSnapshot::MetricSnapshot(const Metric::String& name, system_time::duration
_snapshot(),
_metrics()
{
- _snapshot.reset(source.clone(_metrics, Metric::INACTIVE, 0, copyUnset));
+ _snapshot.reset(source.clone(_metrics, Metric::INACTIVE, nullptr, copyUnset));
_metrics.shrink_to_fit();
}
@@ -54,7 +54,7 @@ void
MetricSnapshot::recreateSnapshot(const MetricSet& metrics, bool copyUnset)
{
std::vector<Metric::UP> newMetrics;
- Metric* m = metrics.clone(newMetrics, Metric::INACTIVE, 0, copyUnset);
+ Metric* m = metrics.clone(newMetrics, Metric::INACTIVE, nullptr, copyUnset);
assert(m->isMetricSet());
std::unique_ptr<MetricSet> newSnapshot(static_cast<MetricSet*>(m));
newSnapshot->reset();
@@ -78,12 +78,15 @@ MetricSnapshotSet::MetricSnapshotSet(const Metric::String& name, system_time::du
: _count(count),
_builderCount(0),
_current(std::make_unique<MetricSnapshot>(name, period, source, snapshotUnsetMetrics)),
- _building(count == 1 ? nullptr : new MetricSnapshot(name, period, source, snapshotUnsetMetrics))
+ _building(count == 1 ? nullptr : new MetricSnapshot(name, period, source, snapshotUnsetMetrics)),
+ _current_is_assigned(false)
{
_current->reset();
- if (_building.get()) _building->reset();
+ if (_building) _building->reset();
}
+MetricSnapshotSet::~MetricSnapshotSet() = default;
+
MetricSnapshot&
MetricSnapshotSet::getNextTarget()
{
diff --git a/metrics/src/vespa/metrics/metricsnapshot.h b/metrics/src/vespa/metrics/metricsnapshot.h
index 859ee4a4a97..a6a68b43015 100644
--- a/metrics/src/vespa/metrics/metricsnapshot.h
+++ b/metrics/src/vespa/metrics/metricsnapshot.h
@@ -71,15 +71,17 @@ public:
};
class MetricSnapshotSet {
- uint32_t _count; // Number of times we need to add to building period
- // before we have a full time window.
+ const uint32_t _count; // Number of times we need to add to building period
+ // before we have a full time window.
uint32_t _builderCount; // Number of times we've currently added to the
// building instance.
std::unique_ptr<MetricSnapshot> _current; // The last full period
std::unique_ptr<MetricSnapshot> _building; // The building period
+ bool _current_is_assigned;
public:
MetricSnapshotSet(const Metric::String& name, system_time::duration period, uint32_t count,
const MetricSet& source, bool snapshotUnsetMetrics);
+ ~MetricSnapshotSet();
const Metric::String& getName() const { return _current->getName(); }
system_time::duration getPeriod() const { return _current->getPeriod(); }
@@ -94,9 +96,7 @@ public:
MetricSnapshot& getSnapshot(bool temporary) {
return *((temporary && _count > 1) ? _building : _current);
}
- const MetricSnapshot& getSnapshot() const {
- return getSnapshot(false);
- }
+
const MetricSnapshot& getSnapshot(bool temporary) const {
return *((temporary && _count > 1) ? _building : _current);
}
@@ -111,6 +111,13 @@ public:
void recreateSnapshot(const MetricSet& metrics, bool copyUnset);
void addMemoryUsage(MemoryConsumption&) const;
void setFromTime(system_time fromTime);
+
+ [[nodiscard]] bool current_is_assigned() const noexcept {
+ return _current_is_assigned;
+ }
+ void tag_current_as_assigned() noexcept {
+ _current_is_assigned = true;
+ }
};
} // metrics
diff --git a/metrics/src/vespa/metrics/state_api_adapter.cpp b/metrics/src/vespa/metrics/state_api_adapter.cpp
index 136ccf6e06a..6c0cb9a6013 100644
--- a/metrics/src/vespa/metrics/state_api_adapter.cpp
+++ b/metrics/src/vespa/metrics/state_api_adapter.cpp
@@ -11,8 +11,8 @@ StateApiAdapter::getMetrics(const vespalib::string &consumer)
{
MetricLockGuard guard(_manager.getMetricLock());
auto periods = _manager.getSnapshotPeriods(guard);
- if (periods.empty()) {
- return ""; // no configuration yet
+ if (periods.empty() || !_manager.any_snapshots_taken(guard)) {
+ return ""; // no configuration or snapshots yet
}
const MetricSnapshot &snapshot(_manager.getMetricSnapshot(guard, periods[0]));
vespalib::asciistream json;
diff --git a/metrics/src/vespa/metrics/textwriter.h b/metrics/src/vespa/metrics/textwriter.h
index f23d1cf585c..7feffdf22fc 100644
--- a/metrics/src/vespa/metrics/textwriter.h
+++ b/metrics/src/vespa/metrics/textwriter.h
@@ -19,7 +19,7 @@ class TextWriter : public MetricVisitor {
public:
TextWriter(std::ostream& out, vespalib::duration period,
const std::string& regex, bool verbose);
- ~TextWriter();
+ ~TextWriter() override;
bool visitSnapshot(const MetricSnapshot&) override;
void doneVisitingSnapshot(const MetricSnapshot&) override;
diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/configmodelview/ImportedMlModels.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/configmodelview/ImportedMlModels.java
index 05ff1aba877..f0cd0b01fa5 100644
--- a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/configmodelview/ImportedMlModels.java
+++ b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/configmodelview/ImportedMlModels.java
@@ -15,6 +15,7 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
+import java.util.regex.Pattern;
/**
* All models imported from the models/ directory in the application package.
@@ -86,6 +87,7 @@ public class ImportedMlModels {
Arrays.stream(dir.listFiles()).sorted().forEach(child -> {
Optional<MlModelImporter> importer = findImporterOf(child, importers);
if (importer.isPresent()) {
+ validateModelPath(child);
String name = toName(child);
Future<ImportedMlModel> existing = models.get(name);
if (existing != null) {
@@ -139,4 +141,27 @@ public class ImportedMlModels {
return result.substring(0, result.length()-1);
}
+ private static void validateModelPath(File modelFile) {
+ Pattern nameRegexp = Pattern.compile("[A-Za-z0-9_.]*");
+
+ Path path = Path.fromString(modelFile.toString());
+ if (modelFile.isFile())
+ path = stripFileEnding(path);
+
+ boolean afterModels = false;
+ for (String element : path.elements()) {
+ if (afterModels) {
+ if ( ! nameRegexp.matcher(element).matches()) {
+ throw new IllegalArgumentException("When Vespa imports a model from the 'models' directory, it " +
+ "uses the directory structure under 'models' to determine the " +
+ "name of the model. The directory or file name '" + element + "' " +
+ "is not valid. Please rename this to only contain letters, " +
+ "numbers or underscores.");
+ }
+ } else if (element.equals("models")) {
+ afterModels = true;
+ }
+ }
+ }
+
}
diff --git a/node-admin/pom.xml b/node-admin/pom.xml
index 0f153663e3d..cf45e010d14 100644
--- a/node-admin/pom.xml
+++ b/node-admin/pom.xml
@@ -142,11 +142,7 @@
</exclusion>
<exclusion>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-core</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/Cgroup.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/Cgroup.java
index e40e3c6c003..9079aa6fc3f 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/Cgroup.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/Cgroup.java
@@ -24,7 +24,7 @@ import java.util.logging.Logger;
public class Cgroup {
private static final Logger logger = Logger.getLogger(Cgroup.class.getName());
- private static Map<String, Consumer<UnixPath>> cgroupDirectoryCallbacks = new HashMap<>();
+ private static final Map<String, Consumer<UnixPath>> cgroupDirectoryCallbacks = new HashMap<>();
private final Path root;
private final Path relativePath;
@@ -135,6 +135,9 @@ public class Cgroup {
/** Returns the memory controller of this cgroup (memory.* files). */
public MemoryController memory() { return new MemoryController(this); }
+ /** Returns the IO controller of this cgroup (io.* files). */
+ public IoController io() { return new IoController(this); }
+
/**
* Wraps {@code command} to ensure it is executed in this cgroup.
*
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/IoController.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/IoController.java
new file mode 100644
index 00000000000..5bbdd5c3b70
--- /dev/null
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/IoController.java
@@ -0,0 +1,111 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.node.admin.cgroup;
+
+import ai.vespa.validation.Validation;
+import com.yahoo.vespa.hosted.node.admin.component.TaskContext;
+import com.yahoo.vespa.hosted.node.admin.task.util.file.UnixPath;
+
+import java.util.Map;
+import java.util.Optional;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+
+import static java.lang.Integer.parseInt;
+
+/**
+ * Represents a cgroup v2 IO controller, i.e. all io.* files.
+ *
+ * @author freva
+ */
+public class IoController {
+ private static final Logger logger = Logger.getLogger(IoController.class.getName());
+ private final Cgroup cgroup;
+
+ IoController(Cgroup cgroup) {
+ this.cgroup = cgroup;
+ }
+
+ public record Device(int major, int minor) implements Comparable<Device> {
+ public Device {
+ // https://www.halolinux.us/kernel-architecture/representation-of-major-and-minor-numbers.html
+ Validation.requireInRange(major, "device major", 0, 0xFFF);
+ Validation.requireInRange(minor, "device minor", 0, 0xFFFFF);
+ }
+
+ private String toFileContent() { return major + ":" + minor; }
+ private static Device fromString(String device) {
+ String[] parts = device.split(":");
+ return new Device(parseInt(parts[0]), parseInt(parts[1]));
+ }
+
+ @Override
+ public int compareTo(Device o) {
+ return major != o.major ? Integer.compare(major, o.major) : Integer.compare(minor, o.minor);
+ }
+ }
+
+ /**
+ * Defines max allowed IO:
+ * <ul>
+ * <li><b>rbps</b>: Read bytes per seconds</li>
+ * <li><b>riops</b>: Read IO operations per seconds</li>
+ * <li><b>wbps</b>: Write bytes per seconds</li>
+ * <li><b>wiops</b>: Write IO operations per seconds</li>
+ * </ul>.
+ */
+ public record Max(Size rbps, Size wbps, Size riops, Size wiops) {
+ public static Max UNLIMITED = new Max(Size.max(), Size.max(), Size.max(), Size.max());
+
+ // Keys can be specified in any order, this is the order they are outputted in from io.max
+ // https://github.com/torvalds/linux/blob/c1a515d3c0270628df8ae5f5118ba859b85464a2/block/blk-throttle.c#L1541
+ private String toFileContent() { return "rbps=%s wbps=%s riops=%s wiops=%s".formatted(rbps, wbps, riops, wiops); }
+
+ public static Max fromString(String max) {
+ String[] parts = max.split(" ");
+ Size rbps = Size.max(), riops = Size.max(), wbps = Size.max(), wiops = Size.max();
+ for (String part : parts) {
+ if (part.isEmpty()) continue;
+ String[] kv = part.split("=");
+ if (kv.length != 2) throw new IllegalArgumentException("Invalid io.max format: " + max);
+ switch (kv[0]) {
+ case "rbps" -> rbps = Size.from(kv[1]);
+ case "riops" -> riops = Size.from(kv[1]);
+ case "wbps" -> wbps = Size.from(kv[1]);
+ case "wiops" -> wiops = Size.from(kv[1]);
+ default -> throw new IllegalArgumentException("Unknown key " + kv[0]);
+ }
+ }
+ return new Max(rbps, wbps, riops, wiops);
+ }
+ }
+
+ /**
+ * Returns the maximum allowed IO usage, by device, or empty if cgroup is not found.
+ *
+ * @see Max
+ */
+ public Optional<Map<Device, Max>> readMax() {
+ return cgroup.readIfExists("io.max")
+ .map(content -> content
+ .lines()
+ .map(line -> {
+ String[] parts = line.strip().split(" ", 2);
+ return Map.entry(Device.fromString(parts[0]), Max.fromString(parts[1]));
+ })
+ .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)));
+ }
+
+ public boolean updateMax(TaskContext context, Device device, Max max) {
+ Max prevMax = readMax()
+ .map(maxByDevice -> maxByDevice.get(device))
+ .orElse(Max.UNLIMITED);
+ if (prevMax.equals(max)) return false;
+
+ UnixPath path = cgroup.unixPath().resolve("io.max");
+ context.recordSystemModification(logger, "Updating %s for device %s from '%s' to '%s'",
+ path, device.toFileContent(), prevMax.toFileContent(), max.toFileContent());
+ path.writeUtf8File(device.toFileContent() + ' ' + max.toFileContent() + '\n');
+ return true;
+ }
+
+}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/MemoryController.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/MemoryController.java
index 840cd025917..91806b8fd61 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/MemoryController.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/MemoryController.java
@@ -3,6 +3,9 @@ package com.yahoo.vespa.hosted.node.admin.cgroup;
import java.util.List;
import java.util.Optional;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Stream;
/**
* Represents a cgroup v2 memory controller, i.e. all memory.* files.
@@ -31,9 +34,21 @@ public class MemoryController {
return cgroup.readIfExists("memory.current").map(Size::from);
}
- /** @return Number of bytes used to cache filesystem data, including tmpfs and shared memory. */
- public Size readFileSystemCache() {
- return Size.from(readField(cgroup.readLines("memory.stat"), "file"));
+ public Stats readStat() {
+ var lines = cgroup.readLines("memory.stat");
+ return new Stats(
+ Size.from(readField(lines, "file")), Size.from(readField(lines, "sock")), Size.from(readField(lines, "slab")),
+ Size.from(readField(lines, "slab_reclaimable")), Size.from(readField(lines, "anon")));
+ }
+
+ public Optional<Pressure> readPressureIfExists() {
+ return cgroup.readIfExists("memory.pressure")
+ .map(fileContent ->
+ new Pressure(
+ readPressureField(fileContent, "some"),
+ readPressureField(fileContent, "full")
+ )
+ );
}
private static String readField(List<String> lines, String fieldName) {
@@ -45,4 +60,33 @@ public class MemoryController {
.findFirst()
.orElseThrow(() -> new IllegalArgumentException("No such field: " + fieldName));
}
+
+ /**
+ * Fetches the avg60 value from the specified type, i.e. "some" or "full".
+ */
+ private static Double readPressureField(String fileContent, String type) {
+ var pattern = Pattern.compile(type + ".*avg60=(?<avg60>\\d+\\.\\d+).*");
+ return Stream.of(fileContent.split("\n"))
+ .map(pattern::matcher)
+ .filter(Matcher::matches)
+ .map(matcher -> matcher.group("avg60"))
+ .findFirst()
+ .map(Double::parseDouble)
+ .orElseThrow(() -> new IllegalArgumentException("No such field: " + type));
+ }
+
+ /**
+ * @param file Number of bytes used to cache filesystem data, including tmpfs and shared memory.
+ * @param sock Amount of memory used in network transmission buffers.
+ * @param slab Amount of memory used for storing in-kernel data structures.
+ * @param slabReclaimable Part of "slab" that might be reclaimed, such as dentries and inodes.
+ * @param anon Amount of memory used in anonymous mappings such as brk(), sbrk(), and mmap(MAP_ANONYMOUS).
+ */
+ public record Stats(Size file, Size sock, Size slab, Size slabReclaimable, Size anon) {}
+
+ /**
+ * @param some The avg60 value of the "some" pressure level.
+ * @param full The avg60 value of the "full" pressure level.
+ */
+ public record Pressure(double some, double full) {}
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/Size.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/Size.java
index 5e6ca7de8bd..a8cbe2e8afe 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/Size.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/cgroup/Size.java
@@ -10,12 +10,13 @@ import java.util.Objects;
*/
public class Size {
private static final String MAX = "max";
+ private static final Size MAX_SIZE = new Size(true, 0);
private final boolean max;
private final long value;
public static Size max() {
- return new Size(true, 0);
+ return MAX_SIZE;
}
public static Size from(long value) {
@@ -23,7 +24,7 @@ public class Size {
}
public static Size from(String value) {
- return value.equals(MAX) ? new Size(true, 0) : new Size(false, Long.parseLong(value));
+ return value.equals(MAX) ? MAX_SIZE : new Size(false, Long.parseLong(value));
}
private Size(boolean max, long value) {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java
index 264035b86a1..fa933e9622a 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java
@@ -36,7 +36,7 @@ public class ContainerOperations {
public ContainerOperations(ContainerEngine containerEngine, Cgroup cgroup, FileSystem fileSystem, Timer timer) {
this.containerEngine = Objects.requireNonNull(containerEngine);
- this.imageDownloader = new ContainerImageDownloader(containerEngine);
+ this.imageDownloader = new ContainerImageDownloader(containerEngine, timer);
this.imagePruner = new ContainerImagePruner(containerEngine, timer);
this.containerStatsCollector = new ContainerStatsCollector(containerEngine, cgroup, fileSystem);
}
@@ -62,8 +62,8 @@ public class ContainerOperations {
}
/** Pull image asynchronously. Returns true if image is still downloading and false if download is complete */
- public boolean pullImageAsyncIfNeeded(TaskContext context, DockerImage dockerImage, RegistryCredentials registryCredentials) {
- return !imageDownloader.get(context, dockerImage, registryCredentials);
+ public boolean pullImageAsyncIfNeeded(TaskContext context, DockerImage dockerImage, RegistryCredentialsProvider credentialsProvider) {
+ return !imageDownloader.get(context, dockerImage, credentialsProvider);
}
/** Executes a command inside container identified by given context. Does NOT throw on non-zero exit code */
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerStats.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerStats.java
index a5606784c12..1c02072ed2b 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerStats.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerStats.java
@@ -44,8 +44,14 @@ public record ContainerStats(Map<String, NetworkStats> networks,
* @param cache memory used by cache in bytes
* @param usage memory usage in bytes
* @param limit memory limit in bytes
+ * @param sock network transmission buffers in bytes
+ * @param slab in-kernel data structures in bytes
+ * @param slabReclaimable part of "slab" that might be reclaimed in bytes
+ * @param anon anonymous mappings in bytes
*/
- public record MemoryStats(long cache, long usage, long limit) {}
+ public record MemoryStats(long cache, long usage, long limit, long sock, long slab, long slabReclaimable, long anon) {
+ public MemoryStats(long cache, long usage, long limit) { this(cache, usage, limit, 0, 0, 0, 0); }
+ }
/**
* Statistics for CPU usage
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerStatsCollector.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerStatsCollector.java
index 8244666f9e0..0e16e2cabf6 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerStatsCollector.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerStatsCollector.java
@@ -105,8 +105,10 @@ class ContainerStatsCollector {
MemoryController memoryController = rootCgroup.resolveContainer(containerId).memory();
Size max = memoryController.readMax();
long memoryUsageInBytes = memoryController.readCurrent().value();
- long cachedInBytes = memoryController.readFileSystemCache().value();
- return new ContainerStats.MemoryStats(cachedInBytes, memoryUsageInBytes, max.isMax() ? -1 : max.value());
+ var stats = memoryController.readStat();
+ return new ContainerStats.MemoryStats(
+ stats.file().value(), memoryUsageInBytes, max.isMax() ? -1 : max.value(),
+ stats.sock().value(), stats.slab().value(), stats.slabReclaimable().value(), stats.anon().value());
}
private ContainerStats.NetworkStats collectNetworkStats(String iface, int containerPid) throws IOException {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloader.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloader.java
index 1e37e080528..d3327bf5148 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloader.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloader.java
@@ -3,10 +3,13 @@ package com.yahoo.vespa.hosted.node.admin.container.image;
import com.yahoo.concurrent.DaemonThreadFactory;
import com.yahoo.config.provision.DockerImage;
+import com.yahoo.jdisc.Timer;
import com.yahoo.vespa.hosted.node.admin.component.TaskContext;
import com.yahoo.vespa.hosted.node.admin.container.ContainerEngine;
-import com.yahoo.vespa.hosted.node.admin.container.RegistryCredentials;
+import com.yahoo.vespa.hosted.node.admin.container.RegistryCredentialsProvider;
+import java.time.Duration;
+import java.time.Instant;
import java.util.Collections;
import java.util.HashSet;
import java.util.Objects;
@@ -26,13 +29,15 @@ public class ContainerImageDownloader {
private static final Logger LOG = Logger.getLogger(ContainerImageDownloader.class.getName());
private final ContainerEngine containerEngine;
+ private final Timer timer;
private final ExecutorService executorService = Executors.newSingleThreadExecutor(
new DaemonThreadFactory("container-image-downloader")); // Download one image at a time
private final Set<DockerImage> pendingDownloads = Collections.synchronizedSet(new HashSet<>());
- public ContainerImageDownloader(ContainerEngine containerEngine) {
+ public ContainerImageDownloader(ContainerEngine containerEngine, Timer timer) {
this.containerEngine = Objects.requireNonNull(containerEngine);
+ this.timer = Objects.requireNonNull(timer);
}
/**
@@ -40,12 +45,14 @@ public class ContainerImageDownloader {
*
* @return true if the image download has completed.
*/
- public boolean get(TaskContext context, DockerImage image, RegistryCredentials registryCredentials) {
+ public boolean get(TaskContext context, DockerImage image, RegistryCredentialsProvider credentialsProvider) {
if (pendingDownloads.contains(image)) return false;
if (containerEngine.hasImage(context, image)) return true;
executorService.submit(() -> {
try {
- containerEngine.pullImage(context, image, registryCredentials);
+ Instant start = timer.currentTime();
+ containerEngine.pullImage(context, image, credentialsProvider.get());
+ LOG.log(Level.INFO, "Downloaded container image " + image + " in " + Duration.between(start, timer.currentTime()));
} catch (RuntimeException e) {
LOG.log(Level.SEVERE, "Failed to download container image " + image, e);
} finally {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/metrics/Metrics.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/metrics/Metrics.java
index 07a8d545178..e9dbfa0c524 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/metrics/Metrics.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/metrics/Metrics.java
@@ -102,6 +102,17 @@ public class Metrics {
}
}
+ public void deleteMetricByName(String application, String metricName, DimensionType type) {
+ synchronized (monitor) {
+ Optional.ofNullable(metrics.get(type))
+ .map(m -> m.get(application))
+ .map(ApplicationMetrics::metricsByDimensions)
+ .ifPresent(dims ->
+ dims.values().forEach(metrics -> metrics.remove(metricName))
+ );
+ }
+ }
+
Map<Dimensions, Map<String, MetricValue>> getOrCreateApplicationMetrics(String application, DimensionType type) {
return metrics.computeIfAbsent(type, m -> new HashMap<>())
.computeIfAbsent(application, app -> new ApplicationMetrics())
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java
index b6ec0ebbd94..830b7f4ed33 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/identity/AthenzCredentialsMaintainer.java
@@ -80,7 +80,6 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
private final String certificateDnsSuffix;
private final ServiceIdentityProvider hostIdentityProvider;
private final IdentityDocumentClient identityDocumentClient;
- private final BooleanFlag tenantServiceIdentityFlag;
// Used as an optimization to ensure ZTS is not DDoS'ed on continuously failing refresh attempts
private final Map<ContainerName, Instant> lastRefreshAttempt = new ConcurrentHashMap<>();
@@ -89,7 +88,6 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
ConfigServerInfo configServerInfo,
String certificateDnsSuffix,
ServiceIdentityProvider hostIdentityProvider,
- FlagSource flagSource,
Timer timer) {
this.ztsTrustStorePath = ztsTrustStorePath;
this.certificateDnsSuffix = certificateDnsSuffix;
@@ -99,7 +97,6 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
hostIdentityProvider,
new AthenzIdentityVerifier(Set.of(configServerInfo.getConfigServerIdentity())));
this.timer = timer;
- this.tenantServiceIdentityFlag = Flags.NODE_ADMIN_TENANT_SERVICE_REGISTRY.bindTo(flagSource);
}
public boolean converge(NodeAgentContext context) {
@@ -109,11 +106,7 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
if (context.zone().getSystemName().isPublic())
return modified;
- if (shouldWriteTenantServiceIdentity(context)) {
- modified |= maintain(context, TENANT);
- } else {
- modified |= deleteTenantCredentials(context);
- }
+ modified |= maintain(context, TENANT);
return modified;
}
@@ -268,24 +261,6 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
return "node-certificate";
}
- private boolean deleteTenantCredentials(NodeAgentContext context) {
- var siaDirectory = context.paths().of(CONTAINER_SIA_DIRECTORY, context.users().vespa());
- var identityDocumentFile = siaDirectory.resolve(TENANT.getIdentityDocument());
- if (!Files.exists(identityDocumentFile)) return false;
- return getAthenzIdentity(context, TENANT, identityDocumentFile).map(athenzIdentity -> {
- var privateKeyFile = (ContainerPath) SiaUtils.getPrivateKeyFile(siaDirectory, athenzIdentity);
- var certificateFile = (ContainerPath) SiaUtils.getCertificateFile(siaDirectory, athenzIdentity);
- try {
- var modified = Files.deleteIfExists(identityDocumentFile);
- modified |= Files.deleteIfExists(privateKeyFile);
- modified |= Files.deleteIfExists(certificateFile);
- return modified;
- } catch (IOException e) {
- throw new UncheckedIOException(e);
- }
- }).orElse(false);
- }
-
private boolean shouldRefreshCredentials(Duration age) {
return age.compareTo(REFRESH_PERIOD) >= 0;
}
@@ -399,16 +374,6 @@ public class AthenzCredentialsMaintainer implements CredentialsMaintainer {
}
}
- private boolean shouldWriteTenantServiceIdentity(NodeAgentContext context) {
- var version = context.node().currentVespaVersion()
- .orElse(context.node().wantedVespaVersion().orElse(Version.emptyVersion));
- var appId = context.node().owner().orElse(ApplicationId.defaultId());
- return tenantServiceIdentityFlag
- .with(FetchVector.Dimension.VESPA_VERSION, version.toFullString())
- .with(FetchVector.Dimension.APPLICATION_ID, appId.serializedForm())
- .value();
- }
-
private void copyCredsToLegacyPath(NodeAgentContext context, ContainerPath privateKeyFile, ContainerPath certificateFile) throws IOException {
var legacySiaDirectory = context.paths().of(LEGACY_SIA_DIRECTORY, context.users().vespa());
var keysDirectory = legacySiaDirectory.resolve("keys");
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfo.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfo.java
index c743f1c8c85..feafe9fddc9 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfo.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfo.java
@@ -81,6 +81,9 @@ public class SyncFileInfo {
remoteFilename = rotatedOnly && filename.endsWith(".0.log") ? "zookeeper.log" :
"zookeeper.log-" + DATE_TIME_FORMATTER.format(new UnixPath(logFile).getLastModifiedTime());
minDurationBetweenSync = filename.endsWith(".0.log") ? rotatedOnly ? Duration.ofHours(1) : Duration.ZERO : null;
+ } else if (filename.startsWith("start-services.out-")) {
+ compression = Compression.ZSTD;
+ dir = "logs/start-services/";
} else {
compression = filename.endsWith(".zst") ? Compression.NONE : Compression.ZSTD;
if (rotatedOnly && compression != Compression.NONE)
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java
index 210bdf2fcb3..e6a1e68b12c 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentContextImpl.java
@@ -16,7 +16,6 @@ import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.Acl;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
import com.yahoo.vespa.hosted.node.admin.container.ContainerName;
import com.yahoo.vespa.hosted.node.admin.container.ContainerNetworkMode;
-import com.yahoo.vespa.hosted.node.admin.task.util.file.UnixUser;
import com.yahoo.vespa.hosted.node.admin.task.util.fs.ContainerFileSystem;
import java.nio.file.FileSystem;
@@ -170,7 +169,6 @@ public class NodeAgentContextImpl implements NodeAgentContext {
private ContainerNetworkMode containerNetworkMode;
private ZoneApi zone;
private UserNamespace userNamespace;
- private UnixUser vespaUser;
private Path containerStorage;
private FlagSource flagSource;
private double cpuSpeedUp = 1;
@@ -211,12 +209,6 @@ public class NodeAgentContextImpl implements NodeAgentContext {
return this;
}
- public Builder vespaUser(UnixUser vespaUser) {
- this.vespaUser = vespaUser;
- return this;
- }
-
-
/** Sets the file system to use for paths. */
public Builder fileSystem(FileSystem fileSystem) {
return containerStorage(fileSystem.getPath(DEFAULT_CONTAINER_STORAGE.toString()));
@@ -251,7 +243,6 @@ public class NodeAgentContextImpl implements NodeAgentContext {
Objects.requireNonNull(containerStorage, "Must set one of containerStorage or fileSystem");
UserScope userScope = UserScope.create(
- Optional.ofNullable(vespaUser).orElseGet(() -> new UnixUser("vespa", 1000, "vespa", 100)),
Optional.ofNullable(userNamespace).orElseGet(() -> new UserNamespace(100000, 100000, 100000)));
ContainerFileSystem containerFs = ContainerFileSystem.create(containerStorage
.resolve(nodeSpecBuilder.hostname().split("\\.")[0]), userScope);
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
index 4c17bfbe039..466ee65fcc1 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
@@ -17,11 +17,9 @@ import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeState;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.reports.DropDocumentsReport;
import com.yahoo.vespa.hosted.node.admin.configserver.orchestrator.Orchestrator;
-import com.yahoo.vespa.hosted.node.admin.configserver.orchestrator.OrchestratorException;
import com.yahoo.vespa.hosted.node.admin.container.Container;
import com.yahoo.vespa.hosted.node.admin.container.ContainerOperations;
import com.yahoo.vespa.hosted.node.admin.container.ContainerResources;
-import com.yahoo.vespa.hosted.node.admin.container.RegistryCredentials;
import com.yahoo.vespa.hosted.node.admin.container.RegistryCredentialsProvider;
import com.yahoo.vespa.hosted.node.admin.maintenance.ContainerWireguardTask;
import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer;
@@ -431,9 +429,8 @@ public class NodeAgentImpl implements NodeAgent {
NodeSpec node = context.node();
if (node.wantedDockerImage().equals(container.map(c -> c.image()))) return false;
- RegistryCredentials credentials = registryCredentialsProvider.get();
return node.wantedDockerImage()
- .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials))
+ .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, registryCredentialsProvider))
.orElse(false);
}
@@ -486,18 +483,21 @@ public class NodeAgentImpl implements NodeAgent {
lastNode = node;
}
+ // Run this here and now, even though we may immediately remove the container below.
+ // This ensures these maintainers are run even if something fails or returns early.
+ // These maintainers should also run immediately after starting the container (see below).
+ container.filter(c -> c.state().isRunning())
+ .ifPresent(c -> runImportantContainerMaintainers(context, c));
+
switch (node.state()) {
- case ready:
- case reserved:
- case failed:
- case inactive:
- case parked:
+ case ready, reserved, failed, inactive, parked -> {
storageMaintainer.syncLogs(context, true);
+ if (node.state() == NodeState.reserved) downloadImageIfNeeded(context, container);
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context, Optional.empty());
stopServicesIfNeeded(context);
- break;
- case active:
+ }
+ case active -> {
storageMaintainer.syncLogs(context, true);
storageMaintainer.cleanDiskIfFull(context);
storageMaintainer.handleCoreDumpsForContainer(context, container, false);
@@ -513,13 +513,11 @@ public class NodeAgentImpl implements NodeAgent {
containerState = STARTING;
container = Optional.of(startContainer(context));
containerState = UNKNOWN;
+ runImportantContainerMaintainers(context, container.get());
} else {
container = Optional.of(updateContainerIfNeeded(context, container.get()));
}
- aclMaintainer.ifPresent(maintainer -> maintainer.converge(context));
- final Optional<Container> finalContainer = container;
- wireguardTasks.forEach(task -> task.converge(context, finalContainer.get().id()));
startServicesIfNeeded(context);
resumeNodeIfNeeded(context);
if (healthChecker.isPresent()) {
@@ -550,11 +548,8 @@ public class NodeAgentImpl implements NodeAgent {
orchestrator.resume(context.hostname().value());
suspendedInOrchestrator = false;
}
- break;
- case provisioned:
- nodeRepository.setNodeState(context.hostname().value(), NodeState.ready);
- break;
- case dirty:
+ }
+ case dirty -> {
removeContainerIfNeededUpdateContainerState(context, container);
context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready");
credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context));
@@ -562,12 +557,16 @@ public class NodeAgentImpl implements NodeAgent {
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(context, Optional.empty());
nodeRepository.setNodeState(context.hostname().value(), NodeState.ready);
- break;
- default:
- throw ConvergenceException.ofError("UNKNOWN STATE " + node.state().name());
+ }
+ default -> throw ConvergenceException.ofError("Unexpected state " + node.state().name());
}
}
+ private void runImportantContainerMaintainers(NodeAgentContext context, Container runningContainer) {
+ aclMaintainer.ifPresent(maintainer -> maintainer.converge(context));
+ wireguardTasks.forEach(task -> task.converge(context, runningContainer.id()));
+ }
+
private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state);
@@ -609,23 +608,8 @@ public class NodeAgentImpl implements NodeAgent {
if (context.node().state() != NodeState.active) return;
context.log(logger, "Ask Orchestrator for permission to suspend node");
- try {
- orchestrator.suspend(context.hostname().value());
- suspendedInOrchestrator = true;
- } catch (OrchestratorException e) {
- // Ensure the ACLs are up to date: The reason we're unable to suspend may be because some other
- // node is unable to resume because the ACL rules of SOME Docker container is wrong...
- // Same can happen with stale WireGuard config, so update that too
- try {
- aclMaintainer.ifPresent(maintainer -> maintainer.converge(context));
- wireguardTasks.forEach(task -> getContainer(context).ifPresent(c -> task.converge(context, c.id())));
- } catch (RuntimeException suppressed) {
- logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed);
- e.addSuppressed(suppressed);
- }
-
- throw e;
- }
+ orchestrator.suspend(context.hostname().value());
+ suspendedInOrchestrator = true;
}
protected void writeContainerData(NodeAgentContext context, ContainerData containerData) { }
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/UserScope.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/UserScope.java
index 0c4c79172e2..49f249dd2d7 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/UserScope.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/UserScope.java
@@ -45,8 +45,8 @@ public class UserScope {
return Objects.hash(root, vespa, namespace);
}
- /** Creates user scope with default root user */
- public static UserScope create(UnixUser vespaUser, UserNamespace namespace) {
- return new UserScope(UnixUser.ROOT, vespaUser, namespace);
+ /** Creates user scope with default root and vespa user */
+ public static UserScope create(UserNamespace namespace) {
+ return new UserScope(UnixUser.ROOT, UnixUser.VESPA, namespace);
}
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileAttributes.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileAttributes.java
index 332b4e61dc1..c638fe98cdf 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileAttributes.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileAttributes.java
@@ -13,36 +13,13 @@ import java.util.Set;
*
* @author hakonhall
*/
-public class FileAttributes {
-
- private final Instant lastModifiedTime;
- private final int ownerId;
- private final int groupId;
- private final String permissions;
- private final boolean isRegularFile;
- private final boolean isDirectory;
- private final long size;
-
- public FileAttributes(Instant lastModifiedTime, int ownerId, int groupId, String permissions, boolean isRegularFile, boolean isDirectory, long size) {
- this.lastModifiedTime = lastModifiedTime;
- this.ownerId = ownerId;
- this.groupId = groupId;
- this.permissions = permissions;
- this.isRegularFile = isRegularFile;
- this.isDirectory = isDirectory;
- this.size = size;
- }
-
- public Instant lastModifiedTime() { return lastModifiedTime; }
- public int ownerId() { return ownerId; }
- public int groupId() { return groupId; }
- public String permissions() { return permissions; }
- public boolean isRegularFile() { return isRegularFile; }
- public boolean isDirectory() { return isDirectory; }
- public long size() { return size; }
+public record FileAttributes(Instant lastModifiedTime, int ownerId, int groupId, String permissions,
+ boolean isRegularFile, boolean isDirectory, long size, int deviceMajor, int deviceMinor) {
@SuppressWarnings("unchecked")
static FileAttributes fromAttributes(Map<String, Object> attributes) {
+ long dev_t = (long) attributes.get("dev");
+
return new FileAttributes(
((FileTime) attributes.get("lastModifiedTime")).toInstant(),
(int) attributes.get("uid"),
@@ -50,6 +27,11 @@ public class FileAttributes {
PosixFilePermissions.toString(((Set<PosixFilePermission>) attributes.get("permissions"))),
(boolean) attributes.get("isRegularFile"),
(boolean) attributes.get("isDirectory"),
- (long) attributes.get("size"));
+ (long) attributes.get("size"),
+ deviceMajor(dev_t), deviceMinor(dev_t));
}
+
+ // Encoded as MMMM Mmmm mmmM MMmm, where M is a hex digit of the major number and m is a hex digit of the minor number.
+ static int deviceMajor(long dev_t) { return (int) (((dev_t & 0xFFFFF00000000000L) >> 32) | ((dev_t & 0xFFF00) >> 8)); }
+ static int deviceMinor(long dev_t) { return (int) (((dev_t & 0x00000FFFFFF00000L) >> 12) | (dev_t & 0x000FF)); }
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixUser.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixUser.java
index 665bb4b8bbc..78fc4b151c7 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixUser.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixUser.java
@@ -11,6 +11,7 @@ import java.util.Objects;
public class UnixUser {
public static final UnixUser ROOT = new UnixUser("root", 0, "root", 0);
+ public static final UnixUser VESPA = new UnixUser("vespa", 1000, "vespa", 1000);
private final String name;
private final int uid;
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/cgroup/CgroupTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/cgroup/CgroupTest.java
index 27580082020..dd81ea8e76a 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/cgroup/CgroupTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/cgroup/CgroupTest.java
@@ -1,6 +1,4 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.cgroup;
import com.yahoo.vespa.hosted.node.admin.container.ContainerId;
@@ -10,7 +8,6 @@ import com.yahoo.vespa.hosted.node.admin.task.util.file.UnixPath;
import com.yahoo.vespa.test.file.TestFileSystem;
import org.junit.jupiter.api.Test;
-import java.io.IOException;
import java.nio.file.FileSystem;
import java.util.Map;
import java.util.Optional;
@@ -23,6 +20,8 @@ import static com.yahoo.vespa.hosted.node.admin.cgroup.CpuController.StatField.T
import static com.yahoo.vespa.hosted.node.admin.cgroup.CpuController.StatField.USER_USAGE_USEC;
import static com.yahoo.vespa.hosted.node.admin.cgroup.CpuController.sharesToWeight;
import static com.yahoo.vespa.hosted.node.admin.cgroup.CpuController.weightToShares;
+import static com.yahoo.vespa.hosted.node.admin.cgroup.IoController.Device;
+import static com.yahoo.vespa.hosted.node.admin.cgroup.IoController.Max;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
@@ -75,36 +74,46 @@ public class CgroupTest {
}
@Test
- public void reads_cpu_stats() throws IOException {
- cgroupRoot.resolve("cpu.stat").writeUtf8File("usage_usec 17794243\n" +
- "user_usec 16099205\n" +
- "system_usec 1695038\n" +
- "nr_periods 12465\n" +
- "nr_throttled 25\n" +
- "throttled_usec 14256\n");
+ public void reads_cpu_stats() {
+ cgroupRoot.resolve("cpu.stat").writeUtf8File("""
+ usage_usec 17794243
+ user_usec 16099205
+ system_usec 1695038
+ nr_periods 12465
+ nr_throttled 25
+ throttled_usec 14256
+ """);
assertEquals(Map.of(TOTAL_USAGE_USEC, 17794243L, USER_USAGE_USEC, 16099205L, SYSTEM_USAGE_USEC, 1695038L,
TOTAL_PERIODS, 12465L, THROTTLED_PERIODS, 25L, THROTTLED_TIME_USEC, 14256L), containerCgroup.cpu().readStats());
}
@Test
- public void reads_memory_metrics() throws IOException {
+ public void reads_memory_metrics() {
cgroupRoot.resolve("memory.current").writeUtf8File("2525093888\n");
assertEquals(2525093888L, containerCgroup.memory().readCurrent().value());
cgroupRoot.resolve("memory.max").writeUtf8File("4322885632\n");
assertEquals(4322885632L, containerCgroup.memory().readMax().value());
- cgroupRoot.resolve("memory.stat").writeUtf8File("anon 3481600\n" +
- "file 69206016\n" +
- "kernel_stack 73728\n" +
- "slab 3552304\n" +
- "percpu 262336\n" +
- "sock 73728\n" +
- "shmem 8380416\n" +
- "file_mapped 1081344\n" +
- "file_dirty 135168\n");
- assertEquals(69206016L, containerCgroup.memory().readFileSystemCache().value());
+ cgroupRoot.resolve("memory.stat").writeUtf8File("""
+ anon 3481600
+ file 69206016
+ kernel_stack 73728
+ slab 3552304
+ percpu 262336
+ sock 73728
+ shmem 8380416
+ file_mapped 1081344
+ file_dirty 135168
+ slab_reclaimable 1424320
+ """);
+ var stats = containerCgroup.memory().readStat();
+ assertEquals(69206016L, stats.file().value());
+ assertEquals(3481600L, stats.anon().value());
+ assertEquals(3552304L, stats.slab().value());
+ assertEquals(73728L, stats.sock().value());
+ assertEquals(1424320L, stats.slabReclaimable().value());
}
@Test
@@ -117,4 +126,37 @@ public class CgroupTest {
() -> "Original shares: " + originalShares + ", round trip shares: " + roundTripShares + ", diff: " + diff);
}
}
+
+ @Test
+ void reads_io_max() {
+ assertEquals(Optional.empty(), containerCgroup.io().readMax());
+
+ cgroupRoot.resolve("io.max").writeUtf8File("");
+ assertEquals(Optional.of(Map.of()), containerCgroup.io().readMax());
+
+ cgroupRoot.resolve("io.max").writeUtf8File("""
+ 253:1 rbps=11 wbps=max riops=22 wiops=33
+ 253:0 rbps=max wbps=44 riops=max wiops=55
+ """);
+ assertEquals(Map.of(new Device(253, 1), new Max(Size.from(11), Size.max(), Size.from(22), Size.from(33)),
+ new Device(253, 0), new Max(Size.max(), Size.from(44), Size.max(), Size.from(55))),
+ containerCgroup.io().readMax().orElseThrow());
+ }
+
+ @Test
+ void writes_io_max() {
+ Device device = new Device(253, 0);
+ Max initial = new Max(Size.max(), Size.from(44), Size.max(), Size.from(55));
+ assertTrue(containerCgroup.io().updateMax(context, device, initial));
+ assertEquals("253:0 rbps=max wbps=44 riops=max wiops=55\n", cgroupRoot.resolve("io.max").readUtf8File());
+
+ cgroupRoot.resolve("io.max").writeUtf8File("""
+ 253:1 rbps=11 wbps=max riops=22 wiops=33
+ 253:0 rbps=max wbps=44 riops=max wiops=55
+ """);
+ assertFalse(containerCgroup.io().updateMax(context, device, initial));
+
+ cgroupRoot.resolve("io.max").writeUtf8File("");
+ assertFalse(containerCgroup.io().updateMax(context, device, Max.UNLIMITED));
+ }
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/cgroup/IoControllerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/cgroup/IoControllerTest.java
new file mode 100644
index 00000000000..71a05eb4571
--- /dev/null
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/cgroup/IoControllerTest.java
@@ -0,0 +1,19 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.node.admin.cgroup;
+
+import org.junit.jupiter.api.Test;
+
+import static com.yahoo.vespa.hosted.node.admin.cgroup.IoController.Max;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * @author freva
+ */
+class IoControllerTest {
+
+ @Test
+ void parse_io_max() {
+ assertEquals(Max.UNLIMITED, Max.fromString(""));
+ assertEquals(new Max(Size.from(1), Size.max(), Size.max(), Size.max()), Max.fromString("rbps=1 wiops=max"));
+ }
+}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/ContainerStatsCollectorTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/ContainerStatsCollectorTest.java
index d4598c8923f..2990e881640 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/ContainerStatsCollectorTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/ContainerStatsCollectorTest.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.node.admin.container;
import com.yahoo.vespa.hosted.node.admin.cgroup.Cgroup;
+import com.yahoo.vespa.hosted.node.admin.cgroup.MemoryController;
import com.yahoo.vespa.hosted.node.admin.cgroup.Size;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContext;
@@ -97,7 +98,8 @@ public class ContainerStatsCollectorTest {
private void mockMemoryStats(ContainerId containerId) {
when(cgroup.resolveContainer(eq(containerId)).memory().readCurrent()).thenReturn(Size.from(1228017664L));
when(cgroup.resolveContainer(eq(containerId)).memory().readMax()).thenReturn(Size.from(2147483648L));
- when(cgroup.resolveContainer(eq(containerId)).memory().readFileSystemCache()).thenReturn(Size.from(470790144L));
+ when(cgroup.resolveContainer(eq(containerId)).memory().readStat()).thenReturn(
+ new MemoryController.Stats(Size.from(470790144L), Size.from(0), Size.from(0), Size.from(0), Size.from(0)));
}
private void mockCpuStats(ContainerId containerId) throws IOException {
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloaderTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloaderTest.java
index 9fd14e7e665..7f002eee315 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloaderTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloaderTest.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.node.admin.container.image;
import com.yahoo.config.provision.DockerImage;
+import com.yahoo.jdisc.test.TestTimer;
import com.yahoo.vespa.hosted.node.admin.component.TaskContext;
import com.yahoo.vespa.hosted.node.admin.component.TestTaskContext;
import com.yahoo.vespa.hosted.node.admin.container.ContainerEngineMock;
@@ -21,15 +22,15 @@ public class ContainerImageDownloaderTest {
@Timeout(5_000)
void test_download() {
ContainerEngineMock podman = new ContainerEngineMock().asyncImageDownload(true);
- ContainerImageDownloader downloader = new ContainerImageDownloader(podman);
+ ContainerImageDownloader downloader = new ContainerImageDownloader(podman, new TestTimer());
TaskContext context = new TestTaskContext();
DockerImage image = DockerImage.fromString("registry.example.com/repo/vespa:7.42");
- assertFalse(downloader.get(context, image, RegistryCredentials.none), "Download started");
- assertFalse(downloader.get(context, image, RegistryCredentials.none), "Download pending");
+ assertFalse(downloader.get(context, image, () -> RegistryCredentials.none), "Download started");
+ assertFalse(downloader.get(context, image, () -> RegistryCredentials.none), "Download pending");
podman.completeDownloadOf(image);
boolean downloadCompleted;
- while (!(downloadCompleted = downloader.get(context, image, RegistryCredentials.none))) ;
+ while (!(downloadCompleted = downloader.get(context, image, () -> RegistryCredentials.none))) ;
assertTrue(downloadCompleted, "Download completed");
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java
index b0988bbf53e..d0ddd3755d3 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java
@@ -5,6 +5,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.yahoo.jdisc.test.TestTimer;
+import com.yahoo.vespa.hosted.node.admin.component.TaskContext;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeState;
import com.yahoo.vespa.hosted.node.admin.container.ContainerOperations;
@@ -12,6 +13,7 @@ import com.yahoo.vespa.hosted.node.admin.integration.NodeRepoMock;
import com.yahoo.vespa.hosted.node.admin.maintenance.sync.SyncClient;
import com.yahoo.vespa.hosted.node.admin.maintenance.sync.SyncFileInfo;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContextImpl;
+import com.yahoo.vespa.hosted.node.admin.task.util.file.UnixUser;
import com.yahoo.vespa.hosted.node.admin.task.util.process.CommandResult;
import com.yahoo.vespa.test.file.TestFileSystem;
import com.yahoo.yolean.concurrent.Sleeper;
@@ -31,6 +33,7 @@ import static com.yahoo.vespa.hosted.node.admin.maintenance.servicedump.ServiceD
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
+import static org.mockito.ArgumentMatchers.anyList;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
@@ -65,7 +68,7 @@ class VespaServiceDumperImplTest {
void invokes_perf_commands_when_generating_perf_report() {
// Setup mocks
ContainerOperations operations = mock(ContainerOperations.class);
- when(operations.executeCommandInContainer(any(), any(), any()))
+ when(operations.executeCommandInContainer(any(NodeAgentContextImpl.class), any(UnixUser.class), any(String[].class)))
.thenReturn(new CommandResult(null, 0, "12345"))
.thenReturn(new CommandResult(null, 0, ""))
.thenReturn(new CommandResult(null, 0, ""));
@@ -106,7 +109,7 @@ class VespaServiceDumperImplTest {
void invokes_jcmd_commands_when_creating_jfr_recording() {
// Setup mocks
ContainerOperations operations = mock(ContainerOperations.class);
- when(operations.executeCommandInContainer(any(), any(), any()))
+ when(operations.executeCommandInContainer(any(NodeAgentContextImpl.class), any(UnixUser.class), any(String[].class)))
.thenReturn(new CommandResult(null, 0, "12345"))
.thenReturn(new CommandResult(null, 0, "ok"))
.thenReturn(new CommandResult(null, 0, "name=host-admin success"));
@@ -144,7 +147,7 @@ class VespaServiceDumperImplTest {
void invokes_zookeeper_backup_command_when_generating_snapshot() {
// Setup mocks
ContainerOperations operations = mock(ContainerOperations.class);
- when(operations.executeCommandInContainer(any(), any(), any()))
+ when(operations.executeCommandInContainer(any(NodeAgentContextImpl.class), any(UnixUser.class), any(String[].class)))
.thenReturn(new CommandResult(null, 0, "12345"));
SyncClient syncClient = createSyncClientMock();
NodeRepoMock nodeRepository = new NodeRepoMock();
@@ -179,7 +182,7 @@ class VespaServiceDumperImplTest {
void invokes_config_proxy_command_whn_invoking_config_dump() {
// Setup mocks
ContainerOperations operations = mock(ContainerOperations.class);
- when(operations.executeCommandInContainer(any(), any(), any()))
+ when(operations.executeCommandInContainer(any(NodeAgentContextImpl.class), any(UnixUser.class), any(String[].class)))
.thenReturn(new CommandResult(null, 0, "12345"));
SyncClient syncClient = createSyncClientMock();
NodeRepoMock nodeRepository = new NodeRepoMock();
@@ -217,7 +220,8 @@ class VespaServiceDumperImplTest {
void handles_multiple_artifact_types() {
// Setup mocks
ContainerOperations operations = mock(ContainerOperations.class);
- when(operations.executeCommandInContainer(any(), any(), any()))
+ when(operations.executeCommandInContainer(
+ any(NodeAgentContextImpl.class), any(UnixUser.class), any(String[].class)))
// For perf report:
.thenReturn(new CommandResult(null, 0, "12345"))
.thenReturn(new CommandResult(null, 0, ""))
@@ -308,7 +312,7 @@ class VespaServiceDumperImplTest {
private SyncClient createSyncClientMock() {
SyncClient client = mock(SyncClient.class);
- when(client.sync(any(), any(), anyInt()))
+ when(client.sync(any(TaskContext.class), anyList(), anyInt()))
.thenReturn(true);
return client;
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfoTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfoTest.java
index b7aee6706b1..f10f3db9b59 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfoTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/sync/SyncFileInfoTest.java
@@ -38,6 +38,8 @@ public class SyncFileInfoTest {
private static final Path vespaLogPath2 = fileSystem.getPath("/opt/vespa/logs/vespa.log-2021-02-12");
private static final Path zkLogPath0 = fileSystem.getPath("/opt/vespa/logs/zookeeper.configserver.0.log");
private static final Path zkLogPath1 = fileSystem.getPath("/opt/vespa/logs/zookeeper.configserver.1.log");
+ private static final Path startServicesPath1 = fileSystem.getPath("/opt/vespa/logs/start-services.out");
+ private static final Path startServicesPath2 = fileSystem.getPath("/opt/vespa/logs/start-services.out-20230808100143");
@Test
void access_logs() {
@@ -93,6 +95,12 @@ public class SyncFileInfoTest {
assertForLogFile(zkLogPath1, "s3://vespa-data-bucket/vespa/music/main/h432a/logs/zookeeper/zookeeper.log-2022-05-09.14-22-11.zst", ZSTD, false);
}
+ @Test
+ void start_services() {
+ assertForLogFile(startServicesPath1, null, null, true);
+ assertForLogFile(startServicesPath2, "s3://vespa-data-bucket/vespa/music/main/h432a/logs/start-services/start-services.out-20230808100143.zst", ZSTD, true);
+ }
+
private static void assertForLogFile(Path srcPath, String destination, SyncFileInfo.Compression compression, boolean rotatedOnly) {
assertForLogFile(srcPath, destination, compression, null, rotatedOnly);
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
index 0913e1d040a..ef4d6d849f6 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
@@ -487,20 +487,6 @@ public class NodeAgentImplTest {
}
@Test
- void provisionedNodeIsMarkedAsReady() {
- final NodeSpec node = nodeBuilder(NodeState.provisioned)
- .wantedDockerImage(dockerImage)
- .build();
-
- NodeAgentContext context = createContext(node);
- NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
- when(nodeRepository.getOptionalNode(hostName)).thenReturn(Optional.of(node));
-
- nodeAgent.doConverge(context);
- verify(nodeRepository, times(1)).setNodeState(eq(hostName), eq(NodeState.ready));
- }
-
- @Test
void testRestartDeadContainerAfterNodeAdminRestart() {
final NodeSpec node = nodeBuilder(NodeState.active)
.currentDockerImage(dockerImage).wantedDockerImage(dockerImage)
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileAttributesCacheTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileAttributesCacheTest.java
index 8c9188a9409..1b68d1d10a3 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileAttributesCacheTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileAttributesCacheTest.java
@@ -3,9 +3,12 @@ package com.yahoo.vespa.hosted.node.admin.task.util.file;
import org.junit.jupiter.api.Test;
+import java.time.Instant;
import java.util.Optional;
-import static org.junit.jupiter.api.Assertions.*;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
@@ -23,7 +26,8 @@ public class FileAttributesCacheTest {
verify(unixPath, times(1)).getAttributesIfExists();
verifyNoMoreInteractions(unixPath);
- FileAttributes attributes = mock(FileAttributes.class);
+ FileAttributes attributes = new FileAttributes(Instant.EPOCH, 0, 0, "", false, false, 0, 0, 0);
+ when(unixPath.getAttributesIfExists()).thenReturn(Optional.of(attributes));
when(unixPath.getAttributesIfExists()).thenReturn(Optional.of(attributes));
assertTrue(cache.get().isPresent());
verify(unixPath, times(1 + 1)).getAttributesIfExists();
@@ -32,4 +36,4 @@ public class FileAttributesCacheTest {
assertEquals(attributes, cache.getOrThrow());
verifyNoMoreInteractions(unixPath);
}
-} \ No newline at end of file
+}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileAttributesTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileAttributesTest.java
new file mode 100644
index 00000000000..ddcd225a871
--- /dev/null
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/FileAttributesTest.java
@@ -0,0 +1,20 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.node.admin.task.util.file;
+
+import org.junit.jupiter.api.Test;
+
+import static com.yahoo.vespa.hosted.node.admin.task.util.file.FileAttributes.deviceMajor;
+import static com.yahoo.vespa.hosted.node.admin.task.util.file.FileAttributes.deviceMinor;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+/**
+ * @author freva
+ */
+class FileAttributesTest {
+
+ @Test
+ void parse_dev_t() {
+ assertEquals(0x12345BCD, deviceMajor(0x1234567890ABCDEFL));
+ assertEquals(0x67890AEF, deviceMinor(0x1234567890ABCDEFL));
+ }
+}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPathTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPathTest.java
index bbe96272b4b..5d96787214a 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPathTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixPathTest.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.hosted.node.admin.task.util.file;
import com.yahoo.vespa.test.file.TestFileSystem;
import org.junit.jupiter.api.Test;
-import org.opentest4j.AssertionFailedError;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileSystem;
@@ -173,7 +172,7 @@ public class UnixPathTest {
assertEquals("bar", absolutePath.getFilename());
var pathWithoutSlash = new UnixPath("foo");
- assertRuntimeException(IllegalStateException.class, "Path has no parent directory: 'foo'", () -> pathWithoutSlash.getParent());
+ assertRuntimeException(IllegalStateException.class, "Path has no parent directory: 'foo'", pathWithoutSlash::getParent);
assertEquals("foo", pathWithoutSlash.getFilename());
var pathWithSlash = new UnixPath("/foo");
@@ -190,7 +189,7 @@ public class UnixPathTest {
fail("No exception was thrown");
} catch (RuntimeException e) {
if (!baseClass.isInstance(e)) {
- throw new AssertionFailedError("Exception class mismatch", baseClass.getName(), e.getClass().getName());
+ fail("Exception class mismatch " + baseClass.getName() + " != " + e.getClass().getName());
}
assertEquals(message, e.getMessage());
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/fs/ContainerFileSystemTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/fs/ContainerFileSystemTest.java
index 29a78519724..c456edbbd9a 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/fs/ContainerFileSystemTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/fs/ContainerFileSystemTest.java
@@ -31,7 +31,7 @@ class ContainerFileSystemTest {
private final FileSystem fileSystem = TestFileSystem.create();
private final UnixPath containerRootOnHost = new UnixPath(fileSystem.getPath("/data/storage/ctr1"));
- private final UserScope userScope = UserScope.create(new UnixUser("vespa", 1000, "users", 100), new UserNamespace(10_000, 11_000, 10000));
+ private final UserScope userScope = UserScope.create(new UserNamespace(10_000, 11_000, 10000));
private final ContainerFileSystem containerFs = ContainerFileSystem.create(containerRootOnHost.createDirectories().toPath(), userScope);
@Test
@@ -67,7 +67,7 @@ class ContainerFileSystemTest {
unixPath.deleteIfExists();
new UnixPath(containerPath.withUser(userScope.vespa())).writeUtf8File("test123");
- assertOwnership(containerPath, 1000, 100, 11000, 11100);
+ assertOwnership(containerPath, 1000, 1000, 11000, 12000);
}
@Test
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/fs/ContainerUserPrincipalLookupServiceTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/fs/ContainerUserPrincipalLookupServiceTest.java
index 72eec92cf53..41e1667874f 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/fs/ContainerUserPrincipalLookupServiceTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/task/util/fs/ContainerUserPrincipalLookupServiceTest.java
@@ -20,7 +20,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows;
*/
class ContainerUserPrincipalLookupServiceTest {
- private final UserScope userScope = UserScope.create(new UnixUser("vespa", 1000, "users", 100), new UserNamespace(10_000, 11_000, 10000));
+ private final UserScope userScope = UserScope.create(new UserNamespace(10_000, 11_000, 10000));
private final ContainerUserPrincipalLookupService userPrincipalLookupService =
new ContainerUserPrincipalLookupService(TestFileSystem.create().getUserPrincipalLookupService(), userScope);
@@ -31,10 +31,10 @@ class ContainerUserPrincipalLookupServiceTest {
assertEquals("11000", user.baseFsPrincipal().getName());
assertEquals(user, userPrincipalLookupService.lookupPrincipalByName("vespa"));
- ContainerGroupPrincipal group = userPrincipalLookupService.lookupPrincipalByGroupName("100");
- assertEquals("users", group.getName());
- assertEquals("11100", group.baseFsPrincipal().getName());
- assertEquals(group, userPrincipalLookupService.lookupPrincipalByGroupName("users"));
+ ContainerGroupPrincipal group = userPrincipalLookupService.lookupPrincipalByGroupName("1000");
+ assertEquals("vespa", group.getName());
+ assertEquals("12000", group.baseFsPrincipal().getName());
+ assertEquals(group, userPrincipalLookupService.lookupPrincipalByGroupName("vespa"));
assertThrows(UserPrincipalNotFoundException.class, () -> userPrincipalLookupService.lookupPrincipalByName("test"));
}
diff --git a/node-repository/pom.xml b/node-repository/pom.xml
index 52be67024d0..ff2ad112628 100644
--- a/node-repository/pom.xml
+++ b/node-repository/pom.xml
@@ -81,7 +81,7 @@
<dependency>
<groupId>org.questdb</groupId>
<artifactId>questdb</artifactId>
- <version>6.2</version>
+ <version>${questdb.vespa.version}</version>
<scope>compile</scope>
</dependency>
<dependency>
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index 602314bed96..eafaed2a217 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -95,8 +95,7 @@ public class NodeRepository extends AbstractComponent {
metricsDb,
orchestrator,
config.useCuratorClientCache(),
- zone.environment().isProduction() && !zone.cloud().dynamicProvisioning() && !zone.system().isCd() ? 1 : 0,
- config.nodeCacheSize());
+ zone.environment().isProduction() && !zone.cloud().dynamicProvisioning() && !zone.system().isCd() ? 1 : 0);
}
/**
@@ -116,15 +115,14 @@ public class NodeRepository extends AbstractComponent {
MetricsDb metricsDb,
Orchestrator orchestrator,
boolean useCuratorClientCache,
- int spareCount,
- long nodeCacheSize) {
+ int spareCount) {
if (provisionServiceProvider.getHostProvisioner().isPresent() != zone.cloud().dynamicProvisioning())
throw new IllegalArgumentException(String.format(
"dynamicProvisioning property must be 1-to-1 with availability of HostProvisioner, was: dynamicProvisioning=%s, hostProvisioner=%s",
zone.cloud().dynamicProvisioning(), provisionServiceProvider.getHostProvisioner().map(__ -> "present").orElse("empty")));
this.flagSource = flagSource;
- this.db = new CuratorDb(flavors, curator, clock, useCuratorClientCache, nodeCacheSize);
+ this.db = new CuratorDb(flavors, curator, clock, useCuratorClientCache);
this.zone = zone;
this.clock = clock;
this.applications = new Applications(db);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
index 1ca81df824b..796bc2eeb92 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
@@ -208,6 +208,16 @@ public class Cluster {
return minimum(ClusterModel.minScalingDuration(clusterSpec), totalDuration.dividedBy(completedEventCount));
}
+ /** The predicted time this cluster will stay in each resource configuration (including the scaling duration). */
+ public Duration allocationDuration(ClusterSpec clusterSpec) {
+ if (scalingEvents.size() < 2) return Duration.ofHours(12); // Default
+
+ long totalDurationMs = 0;
+ for (int i = 1; i < scalingEvents().size(); i++)
+ totalDurationMs += scalingEvents().get(i).at().toEpochMilli() - scalingEvents().get(i - 1).at().toEpochMilli();
+ return Duration.ofMillis(totalDurationMs / (scalingEvents.size() - 1));
+ }
+
private static Duration minimum(Duration smallestAllowed, Duration duration) {
if (duration.minus(smallestAllowed).isNegative())
return smallestAllowed;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
index c19d76efb35..8069c9c089b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
@@ -10,13 +10,14 @@ import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import java.time.Duration;
import java.util.List;
import java.util.Optional;
/**
* @author bratseth
*/
-public class AllocatableClusterResources {
+public class AllocatableResources {
/** The node count in the cluster */
private final int nodes;
@@ -32,9 +33,9 @@ public class AllocatableClusterResources {
private final double fulfilment;
/** Fake allocatable resources from requested capacity */
- public AllocatableClusterResources(ClusterResources requested,
- ClusterSpec clusterSpec,
- NodeRepository nodeRepository) {
+ public AllocatableResources(ClusterResources requested,
+ ClusterSpec clusterSpec,
+ NodeRepository nodeRepository) {
this.nodes = requested.nodes();
this.groups = requested.groups();
this.realResources = nodeRepository.resourcesCalculator().requestToReal(requested.nodeResources(), nodeRepository.exclusiveAllocation(clusterSpec), false);
@@ -43,7 +44,7 @@ public class AllocatableClusterResources {
this.fulfilment = 1;
}
- public AllocatableClusterResources(NodeList nodes, NodeRepository nodeRepository) {
+ public AllocatableResources(NodeList nodes, NodeRepository nodeRepository) {
this.nodes = nodes.size();
this.groups = (int)nodes.stream().map(node -> node.allocation().get().membership().cluster().group()).distinct().count();
this.realResources = averageRealResourcesOf(nodes.asList(), nodeRepository); // Average since we average metrics over nodes
@@ -52,10 +53,10 @@ public class AllocatableClusterResources {
this.fulfilment = 1;
}
- public AllocatableClusterResources(ClusterResources realResources,
- NodeResources advertisedResources,
- ClusterResources idealResources,
- ClusterSpec clusterSpec) {
+ public AllocatableResources(ClusterResources realResources,
+ NodeResources advertisedResources,
+ ClusterResources idealResources,
+ ClusterSpec clusterSpec) {
this.nodes = realResources.nodes();
this.groups = realResources.groups();
this.realResources = realResources.nodeResources();
@@ -64,12 +65,12 @@ public class AllocatableClusterResources {
this.fulfilment = fulfilment(realResources, idealResources);
}
- private AllocatableClusterResources(int nodes,
- int groups,
- NodeResources realResources,
- NodeResources advertisedResources,
- ClusterSpec clusterSpec,
- double fulfilment) {
+ private AllocatableResources(int nodes,
+ int groups,
+ NodeResources realResources,
+ NodeResources advertisedResources,
+ ClusterSpec clusterSpec,
+ double fulfilment) {
this.nodes = nodes;
this.groups = groups;
this.realResources = realResources;
@@ -79,16 +80,16 @@ public class AllocatableClusterResources {
}
/** Returns this with the redundant node or group removed from counts. */
- public AllocatableClusterResources withoutRedundancy() {
+ public AllocatableResources withoutRedundancy() {
int groupSize = nodes / groups;
int nodesAdjustedForRedundancy = nodes > 1 ? (groups == 1 ? nodes - 1 : nodes - groupSize) : nodes;
int groupsAdjustedForRedundancy = nodes > 1 ? (groups == 1 ? 1 : groups - 1) : groups;
- return new AllocatableClusterResources(nodesAdjustedForRedundancy,
- groupsAdjustedForRedundancy,
- realResources,
- advertisedResources,
- clusterSpec,
- fulfilment);
+ return new AllocatableResources(nodesAdjustedForRedundancy,
+ groupsAdjustedForRedundancy,
+ realResources,
+ advertisedResources,
+ clusterSpec,
+ fulfilment);
}
/**
@@ -112,6 +113,7 @@ public class AllocatableClusterResources {
public ClusterSpec clusterSpec() { return clusterSpec; }
+ /** Returns the standard cost of these resources, in dollars per hour */
public double cost() { return nodes * advertisedResources.cost(); }
/**
@@ -128,11 +130,22 @@ public class AllocatableClusterResources {
return (vcpuFulfilment + memoryGbFulfilment + diskGbFulfilment) / 3;
}
- public boolean preferableTo(AllocatableClusterResources other) {
- if (this.fulfilment < 1 || other.fulfilment < 1) // always fulfil as much as possible
- return this.fulfilment > other.fulfilment;
+ public boolean preferableTo(AllocatableResources other, ClusterModel model) {
+ if (other.fulfilment() < 1 || this.fulfilment() < 1) // always fulfil as much as possible
+ return this.fulfilment() > other.fulfilment();
- return this.cost() < other.cost(); // otherwise, prefer lower cost
+ return this.cost() * toHours(model.allocationDuration()) + this.costChangingFrom(model)
+ <
+ other.cost() * toHours(model.allocationDuration()) + other.costChangingFrom(model);
+ }
+
+ private double toHours(Duration duration) {
+ return duration.toMillis() / 3600000.0;
+ }
+
+ /** The estimated cost of changing from the given current resources to this. */
+ public double costChangingFrom(ClusterModel model) {
+ return new ResourceChange(model, this).cost();
}
@Override
@@ -154,12 +167,13 @@ public class AllocatableClusterResources {
.withBandwidthGbps(sum.bandwidthGbps() / nodes.size());
}
- public static Optional<AllocatableClusterResources> from(ClusterResources wantedResources,
- ApplicationId applicationId,
- ClusterSpec clusterSpec,
- Limits applicationLimits,
- List<NodeResources> availableRealHostResources,
- NodeRepository nodeRepository) {
+ public static Optional<AllocatableResources> from(ClusterResources wantedResources,
+ ApplicationId applicationId,
+ ClusterSpec clusterSpec,
+ Limits applicationLimits,
+ List<NodeResources> availableRealHostResources,
+ ClusterModel model,
+ NodeRepository nodeRepository) {
var systemLimits = nodeRepository.nodeResourceLimits();
boolean exclusive = nodeRepository.exclusiveAllocation(clusterSpec);
if (! exclusive) {
@@ -193,8 +207,8 @@ public class AllocatableClusterResources {
}
else { // Return the cheapest flavor satisfying the requested resources, if any
NodeResources cappedWantedResources = applicationLimits.cap(wantedResources.nodeResources());
- Optional<AllocatableClusterResources> best = Optional.empty();
- Optional<AllocatableClusterResources> bestDisregardingDiskLimit = Optional.empty();
+ Optional<AllocatableResources> best = Optional.empty();
+ Optional<AllocatableResources> bestDisregardingDiskLimit = Optional.empty();
for (Flavor flavor : nodeRepository.flavors().getFlavors()) {
// Flavor decide resources: Real resources are the worst case real resources we'll get if we ask for these advertised resources
NodeResources advertisedResources = nodeRepository.resourcesCalculator().advertisedResourcesOf(flavor);
@@ -216,18 +230,18 @@ public class AllocatableClusterResources {
if ( ! between(applicationLimits.min().nodeResources(), applicationLimits.max().nodeResources(), advertisedResources)) continue;
if ( ! systemLimits.isWithinRealLimits(realResources, applicationId, clusterSpec)) continue;
- var candidate = new AllocatableClusterResources(wantedResources.with(realResources),
- advertisedResources,
- wantedResources,
- clusterSpec);
+ var candidate = new AllocatableResources(wantedResources.with(realResources),
+ advertisedResources,
+ wantedResources,
+ clusterSpec);
if ( ! systemLimits.isWithinAdvertisedDiskLimits(advertisedResources, clusterSpec)) { // TODO: Remove when disk limit is enforced
- if (bestDisregardingDiskLimit.isEmpty() || candidate.preferableTo(bestDisregardingDiskLimit.get())) {
+ if (bestDisregardingDiskLimit.isEmpty() || candidate.preferableTo(bestDisregardingDiskLimit.get(), model)) {
bestDisregardingDiskLimit = Optional.of(candidate);
}
continue;
}
- if (best.isEmpty() || candidate.preferableTo(best.get())) {
+ if (best.isEmpty() || candidate.preferableTo(best.get(), model)) {
best = Optional.of(candidate);
}
}
@@ -237,13 +251,13 @@ public class AllocatableClusterResources {
}
}
- private static AllocatableClusterResources calculateAllocatableResources(ClusterResources wantedResources,
- NodeRepository nodeRepository,
- ApplicationId applicationId,
- ClusterSpec clusterSpec,
- Limits applicationLimits,
- boolean exclusive,
- boolean bestCase) {
+ private static AllocatableResources calculateAllocatableResources(ClusterResources wantedResources,
+ NodeRepository nodeRepository,
+ ApplicationId applicationId,
+ ClusterSpec clusterSpec,
+ Limits applicationLimits,
+ boolean exclusive,
+ boolean bestCase) {
var systemLimits = nodeRepository.nodeResourceLimits();
var advertisedResources = nodeRepository.resourcesCalculator().realToRequest(wantedResources.nodeResources(), exclusive, bestCase);
advertisedResources = systemLimits.enlargeToLegal(advertisedResources, applicationId, clusterSpec, exclusive, true); // Ask for something legal
@@ -255,10 +269,10 @@ public class AllocatableClusterResources {
advertisedResources = advertisedResources.with(NodeResources.StorageType.remote);
realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive, bestCase);
}
- return new AllocatableClusterResources(wantedResources.with(realResources),
- advertisedResources,
- wantedResources,
- clusterSpec);
+ return new AllocatableResources(wantedResources.with(realResources),
+ advertisedResources,
+ wantedResources,
+ clusterSpec);
}
/** Returns true if the given resources could be allocated on any of the given host flavors */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
index 42bb16005ee..f650d8ec269 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
@@ -5,7 +5,6 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.IntRange;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.NodeRepository;
-import com.yahoo.vespa.hosted.provision.provisioning.NodeResourceLimits;
import java.util.Optional;
@@ -35,21 +34,20 @@ public class AllocationOptimizer {
* @return the best allocation, if there are any possible legal allocations, fulfilling the target
* fully or partially, within the limits
*/
- public Optional<AllocatableClusterResources> findBestAllocation(Load loadAdjustment,
- AllocatableClusterResources current,
- ClusterModel clusterModel,
- Limits limits) {
+ public Optional<AllocatableResources> findBestAllocation(Load loadAdjustment,
+ ClusterModel model,
+ Limits limits) {
if (limits.isEmpty())
limits = Limits.of(new ClusterResources(minimumNodes, 1, NodeResources.unspecified()),
new ClusterResources(maximumNodes, maximumNodes, NodeResources.unspecified()),
IntRange.empty());
else
- limits = atLeast(minimumNodes, limits).fullySpecified(current.clusterSpec(), nodeRepository, clusterModel.application().id());
- Optional<AllocatableClusterResources> bestAllocation = Optional.empty();
+ limits = atLeast(minimumNodes, limits).fullySpecified(model.current().clusterSpec(), nodeRepository, model.application().id());
+ Optional<AllocatableResources> bestAllocation = Optional.empty();
var availableRealHostResources = nodeRepository.zone().cloud().dynamicProvisioning()
? nodeRepository.flavors().getFlavors().stream().map(flavor -> flavor.resources()).toList()
: nodeRepository.nodes().list().hosts().stream().map(host -> host.flavor().resources())
- .map(hostResources -> maxResourcesOf(hostResources, clusterModel))
+ .map(hostResources -> maxResourcesOf(hostResources, model))
.toList();
for (int groups = limits.min().groups(); groups <= limits.max().groups(); groups++) {
for (int nodes = limits.min().nodes(); nodes <= limits.max().nodes(); nodes++) {
@@ -58,15 +56,16 @@ public class AllocationOptimizer {
var resources = new ClusterResources(nodes,
groups,
nodeResourcesWith(nodes, groups,
- limits, loadAdjustment, current, clusterModel));
- var allocatableResources = AllocatableClusterResources.from(resources,
- clusterModel.application().id(),
- current.clusterSpec(),
- limits,
- availableRealHostResources,
- nodeRepository);
+ limits, loadAdjustment, model));
+ var allocatableResources = AllocatableResources.from(resources,
+ model.application().id(),
+ model.current().clusterSpec(),
+ limits,
+ availableRealHostResources,
+ model,
+ nodeRepository);
if (allocatableResources.isEmpty()) continue;
- if (bestAllocation.isEmpty() || allocatableResources.get().preferableTo(bestAllocation.get()))
+ if (bestAllocation.isEmpty() || allocatableResources.get().preferableTo(bestAllocation.get(), model))
bestAllocation = allocatableResources;
}
}
@@ -74,8 +73,8 @@ public class AllocationOptimizer {
}
/** Returns the max resources of a host one node may allocate. */
- private NodeResources maxResourcesOf(NodeResources hostResources, ClusterModel clusterModel) {
- if (nodeRepository.exclusiveAllocation(clusterModel.clusterSpec())) return hostResources;
+ private NodeResources maxResourcesOf(NodeResources hostResources, ClusterModel model) {
+ if (nodeRepository.exclusiveAllocation(model.clusterSpec())) return hostResources;
// static, shared hosts: Allocate at most half of the host cpu to simplify management
return hostResources.withVcpu(hostResources.vcpu() / 2);
}
@@ -88,9 +87,8 @@ public class AllocationOptimizer {
int groups,
Limits limits,
Load loadAdjustment,
- AllocatableClusterResources current,
- ClusterModel clusterModel) {
- var loadWithTarget = clusterModel.loadAdjustmentWith(nodes, groups, loadAdjustment);
+ ClusterModel model) {
+ var loadWithTarget = model.loadAdjustmentWith(nodes, groups, loadAdjustment);
// Leave some headroom above the ideal allocation to avoid immediately needing to scale back up
if (loadAdjustment.cpu() < 1 && (1.0 - loadWithTarget.cpu()) < headroomRequiredToScaleDown)
@@ -100,11 +98,11 @@ public class AllocationOptimizer {
if (loadAdjustment.disk() < 1 && (1.0 - loadWithTarget.disk()) < headroomRequiredToScaleDown)
loadAdjustment = loadAdjustment.withDisk(Math.min(1.0, loadAdjustment.disk() * (1.0 + headroomRequiredToScaleDown)));
- loadWithTarget = clusterModel.loadAdjustmentWith(nodes, groups, loadAdjustment);
+ loadWithTarget = model.loadAdjustmentWith(nodes, groups, loadAdjustment);
- var scaled = loadWithTarget.scaled(current.realResources().nodeResources());
+ var scaled = loadWithTarget.scaled(model.current().realResources().nodeResources());
var nonScaled = limits.isEmpty() || limits.min().nodeResources().isUnspecified()
- ? current.advertisedResources().nodeResources()
+ ? model.current().advertisedResources().nodeResources()
: limits.min().nodeResources(); // min=max for non-scaled
return nonScaled.withVcpu(scaled.vcpu()).withMemoryGb(scaled.memoryGb()).withDiskGb(scaled.diskGb());
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
index 32b59319a88..b5f86be68f6 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
@@ -54,40 +54,40 @@ public class Autoscaler {
}
private Autoscaling autoscale(Application application, Cluster cluster, NodeList clusterNodes, Limits limits) {
- ClusterModel clusterModel = new ClusterModel(nodeRepository,
- application,
- clusterNodes.not().retired().clusterSpec(),
- cluster,
- clusterNodes,
- nodeRepository.metricsDb(),
- nodeRepository.clock());
- if (clusterModel.isEmpty()) return Autoscaling.empty();
+ var model = new ClusterModel(nodeRepository,
+ application,
+ clusterNodes.not().retired().clusterSpec(),
+ cluster,
+ clusterNodes,
+ new AllocatableResources(clusterNodes.not().retired(), nodeRepository),
+ nodeRepository.metricsDb(),
+ nodeRepository.clock());
+ if (model.isEmpty()) return Autoscaling.empty();
if (! limits.isEmpty() && cluster.minResources().equals(cluster.maxResources()))
- return Autoscaling.dontScale(Autoscaling.Status.unavailable, "Autoscaling is not enabled", clusterModel);
+ return Autoscaling.dontScale(Autoscaling.Status.unavailable, "Autoscaling is not enabled", model);
- if ( ! clusterModel.isStable(nodeRepository))
- return Autoscaling.dontScale(Status.waiting, "Cluster change in progress", clusterModel);
+ if ( ! model.isStable(nodeRepository))
+ return Autoscaling.dontScale(Status.waiting, "Cluster change in progress", model);
- var current = new AllocatableClusterResources(clusterNodes.not().retired(), nodeRepository);
- var loadAdjustment = clusterModel.loadAdjustment();
+ var loadAdjustment = model.loadAdjustment();
// Ensure we only scale down if we'll have enough headroom to not scale up again given a small load increase
- var target = allocationOptimizer.findBestAllocation(loadAdjustment, current, clusterModel, limits);
+ var target = allocationOptimizer.findBestAllocation(loadAdjustment, model, limits);
if (target.isEmpty())
- return Autoscaling.dontScale(Status.insufficient, "No allocations are possible within configured limits", clusterModel);
+ return Autoscaling.dontScale(Status.insufficient, "No allocations are possible within configured limits", model);
- if (! worthRescaling(current.realResources(), target.get().realResources())) {
+ if (! worthRescaling(model.current().realResources(), target.get().realResources())) {
if (target.get().fulfilment() < 0.9999999)
- return Autoscaling.dontScale(Status.insufficient, "Configured limits prevents ideal scaling of this cluster", clusterModel);
- else if ( ! clusterModel.safeToScaleDown() && clusterModel.idealLoad().any(v -> v < 1.0))
- return Autoscaling.dontScale(Status.ideal, "Cooling off before considering to scale down", clusterModel);
+ return Autoscaling.dontScale(Status.insufficient, "Configured limits prevents ideal scaling of this cluster", model);
+ else if ( ! model.safeToScaleDown() && model.idealLoad().any(v -> v < 1.0))
+ return Autoscaling.dontScale(Status.ideal, "Cooling off before considering to scale down", model);
else
- return Autoscaling.dontScale(Status.ideal, "Cluster is ideally scaled (within configured limits)", clusterModel);
+ return Autoscaling.dontScale(Status.ideal, "Cluster is ideally scaled (within configured limits)", model);
}
- return Autoscaling.scaleTo(target.get().advertisedResources(), clusterModel);
+ return Autoscaling.scaleTo(target.get().advertisedResources(), model);
}
/** Returns true if it is worthwhile to make the given resource change, false if it is too insignificant */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaling.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaling.java
index 0c86108b36c..fad280d6c29 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaling.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaling.java
@@ -120,25 +120,25 @@ public class Autoscaling {
}
/** Creates an autoscaling conclusion which does not change the current allocation for a specified reason. */
- public static Autoscaling dontScale(Status status, String description, ClusterModel clusterModel) {
+ public static Autoscaling dontScale(Status status, String description, ClusterModel model) {
return new Autoscaling(status,
description,
Optional.empty(),
- clusterModel.at(),
- clusterModel.peakLoad(),
- clusterModel.idealLoad(),
- clusterModel.metrics());
+ model.at(),
+ model.peakLoad(),
+ model.idealLoad(),
+ model.metrics());
}
/** Creates an autoscaling conclusion to scale. */
- public static Autoscaling scaleTo(ClusterResources target, ClusterModel clusterModel) {
+ public static Autoscaling scaleTo(ClusterResources target, ClusterModel model) {
return new Autoscaling(Status.rescaling,
"Rescaling initiated due to load changes",
Optional.of(target),
- clusterModel.at(),
- clusterModel.peakLoad(),
- clusterModel.idealLoad(),
- clusterModel.metrics());
+ model.at(),
+ model.peakLoad(),
+ model.idealLoad(),
+ model.metrics());
}
public enum Status {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
index 0d64d4fbb10..8976dd9ff08 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
@@ -50,6 +50,7 @@ public class ClusterModel {
private final Application application;
private final ClusterSpec clusterSpec;
private final Cluster cluster;
+ private final AllocatableResources current;
private final CpuModel cpu = new CpuModel();
private final MemoryModel memory = new MemoryModel();
@@ -63,6 +64,7 @@ public class ClusterModel {
private final Clock clock;
private final Duration scalingDuration;
+ private final Duration allocationDuration;
private final ClusterTimeseries clusterTimeseries;
private final ClusterNodesTimeseries nodeTimeseries;
private final Instant at;
@@ -77,6 +79,7 @@ public class ClusterModel {
ClusterSpec clusterSpec,
Cluster cluster,
NodeList clusterNodes,
+ AllocatableResources current,
MetricsDb metricsDb,
Clock clock) {
this.nodeRepository = nodeRepository;
@@ -84,8 +87,10 @@ public class ClusterModel {
this.clusterSpec = clusterSpec;
this.cluster = cluster;
this.nodes = clusterNodes;
+ this.current = current;
this.clock = clock;
this.scalingDuration = cluster.scalingDuration(clusterSpec);
+ this.allocationDuration = cluster.allocationDuration(clusterSpec);
this.clusterTimeseries = metricsDb.getClusterTimeseries(application.id(), cluster.id());
this.nodeTimeseries = new ClusterNodesTimeseries(scalingDuration(), cluster, nodes, metricsDb);
this.at = clock.instant();
@@ -95,8 +100,10 @@ public class ClusterModel {
Application application,
ClusterSpec clusterSpec,
Cluster cluster,
+ AllocatableResources current,
Clock clock,
Duration scalingDuration,
+ Duration allocationDuration,
ClusterTimeseries clusterTimeseries,
ClusterNodesTimeseries nodeTimeseries) {
this.nodeRepository = nodeRepository;
@@ -104,9 +111,11 @@ public class ClusterModel {
this.clusterSpec = clusterSpec;
this.cluster = cluster;
this.nodes = NodeList.of();
+ this.current = current;
this.clock = clock;
this.scalingDuration = scalingDuration;
+ this.allocationDuration = allocationDuration;
this.clusterTimeseries = clusterTimeseries;
this.nodeTimeseries = nodeTimeseries;
this.at = clock.instant();
@@ -114,6 +123,7 @@ public class ClusterModel {
public Application application() { return application; }
public ClusterSpec clusterSpec() { return clusterSpec; }
+ public AllocatableResources current() { return current; }
private ClusterNodesTimeseries nodeTimeseries() { return nodeTimeseries; }
private ClusterTimeseries clusterTimeseries() { return clusterTimeseries; }
@@ -127,6 +137,27 @@ public class ClusterModel {
/** Returns the predicted duration of a rescaling of this cluster */
public Duration scalingDuration() { return scalingDuration; }
+ /**
+ * Returns the predicted duration of a resource change in this cluster,
+ * until we, or the application , will change it again.
+ */
+ public Duration allocationDuration() { return allocationDuration; }
+
+ public boolean isContent() {
+ return clusterSpec.type().isContent();
+ }
+
+ /** Returns the predicted duration of data redistribution in this cluster. */
+ public Duration redistributionDuration() {
+ if (! isContent()) return Duration.ofMinutes(0);
+ return scalingDuration(); // TODO: Estimate separately
+ }
+
+ /** Returns the predicted duration of replacing all the nodes in this cluster. */
+ public Duration nodeReplacementDuration() {
+ return Duration.ofMinutes(5); // TODO: Estimate?
+ }
+
/** Returns the average of the peak load measurement in each dimension, from each node. */
public Load peakLoad() {
return nodeTimeseries().peakLoad();
@@ -137,6 +168,10 @@ public class ClusterModel {
return loadWith(nodeCount(), groupCount());
}
+ public boolean isExclusive() {
+ return nodeRepository.exclusiveAllocation(clusterSpec);
+ }
+
/** Returns the relative load adjustment that should be made to this cluster given available measurements. */
public Load loadAdjustment() {
if (nodeTimeseries().measurementsPerNode() < 0.5) return Load.one(); // Don't change based on very little data
@@ -237,16 +272,15 @@ public class ClusterModel {
private Load adjustQueryDependentIdealLoadByBcpGroupInfo(Load ideal) {
double currentClusterTotalVcpuPerGroup = nodes.not().retired().first().get().resources().vcpu() * groupSize();
-
double targetQueryRateToHandle = ( canRescaleWithinBcpDeadline() ? averageQueryRate().orElse(0)
: cluster.bcpGroupInfo().queryRate() )
* cluster.bcpGroupInfo().growthRateHeadroom() * trafficShiftHeadroom();
- double neededTotalVcpPerGroup = cluster.bcpGroupInfo().cpuCostPerQuery() * targetQueryRateToHandle / groupCount() +
+ double neededTotalVcpuPerGroup = cluster.bcpGroupInfo().cpuCostPerQuery() * targetQueryRateToHandle / groupCount() +
( 1 - cpu.queryFraction()) * cpu.idealLoad() *
(clusterSpec.type().isContainer() ? 1 : groupSize());
-
- double cpuAdjustment = neededTotalVcpPerGroup / currentClusterTotalVcpuPerGroup;
- return ideal.withCpu(peakLoad().cpu() / cpuAdjustment);
+ // Max 1: Only use bcp group info if it indicates that we need to scale *up*
+ double cpuAdjustment = Math.max(1.0, neededTotalVcpuPerGroup / currentClusterTotalVcpuPerGroup);
+ return ideal.withCpu(ideal.cpu() / cpuAdjustment);
}
private boolean hasScaledIn(Duration period) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ResourceChange.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ResourceChange.java
new file mode 100644
index 00000000000..7a26a217e61
--- /dev/null
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ResourceChange.java
@@ -0,0 +1,94 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.provision.autoscale;
+
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.NodeResources;
+
+import java.time.Duration;
+
+/**
+ * A resource change.
+ *
+ * @author bratseth
+ */
+public class ResourceChange {
+
+ private final AllocatableResources from, to;
+ private final ClusterModel model;
+
+ public ResourceChange(ClusterModel model, AllocatableResources to) {
+ this.from = model.current();
+ this.to = to;
+ this.model = model;
+ }
+
+ /** Returns the estimated total cost of this resource change (coming in addition to the "to" resource cost). */
+ public double cost() {
+ if (model.isContent()) {
+ if (requiresNodeReplacement()) return toHours(model.redistributionDuration()) * from.cost();
+ return toHours(model.redistributionDuration()) * from.advertisedResources().cost() * nodesToRetire();
+ }
+ else {
+ if (requiresNodeReplacement()) return toHours(model.nodeReplacementDuration()) * from.cost();
+ return 0;
+ }
+ }
+
+ private boolean requiresRedistribution() {
+ if ( ! model.clusterSpec().type().isContent()) return false;
+ if (from.nodes() != to.nodes()) return true;
+ if (from.groups() != to.groups()) return true;
+ if (requiresNodeReplacement()) return true;
+ return false;
+ }
+
+ /**
+ * Returns the estimated number of nodes that will be retired by this change,
+ * given that it is a content cluster and no node replacement is necessary.
+ * This is not necessarily always perfectly correct if this changes group layout.
+ */
+ private int nodesToRetire() {
+ return Math.max(0, from.nodes() - to.nodes());
+ }
+
+ /** Returns true if the *existing* nodes of this needs to be replaced in this change. */
+ private boolean requiresNodeReplacement() {
+ var fromNodes = from.advertisedResources().nodeResources();
+ var toNodes = to.advertisedResources().nodeResources();
+
+ if (model.isExclusive()) {
+ return ! fromNodes.equals(toNodes);
+ }
+ else {
+ if ( ! fromNodes.justNonNumbers().equalsWhereSpecified(toNodes.justNonNumbers())) return true;
+ if ( ! canInPlaceResize()) return true;
+ return false;
+ }
+ }
+
+ private double toHours(Duration duration) {
+ return duration.toMillis() / 3600000.0;
+ }
+
+ private boolean canInPlaceResize() {
+ return canInPlaceResize(from.nodes(), from.advertisedResources().nodeResources(),
+ to.nodes(), to.advertisedResources().nodeResources(),
+ model.clusterSpec().type(), model.isExclusive(), from.groups() != to.groups());
+ }
+
+ public static boolean canInPlaceResize(int fromCount, NodeResources fromResources,
+ int toCount, NodeResources toResources,
+ ClusterSpec.Type type, boolean exclusive, boolean hasTopologyChange) {
+ if (exclusive) return false; // exclusive resources must match the host
+
+ // Never allow in-place resize when also changing topology or decreasing cluster size
+ if (hasTopologyChange || toCount < fromCount) return false;
+
+ // Do not allow increasing cluster size and decreasing node resources at the same time for content nodes
+ if (type.isContent() && toCount > fromCount && !toResources.satisfies(fromResources.justNumbers()))
+ return false;
+
+ return true;
+ }
+
+}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
index 92f86325cf7..8638087c5cd 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
@@ -16,7 +16,7 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Applications;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
-import com.yahoo.vespa.hosted.provision.autoscale.AllocatableClusterResources;
+import com.yahoo.vespa.hosted.provision.autoscale.AllocatableResources;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
import com.yahoo.vespa.hosted.provision.autoscale.NodeMetricSnapshot;
@@ -57,11 +57,13 @@ public class AutoscalingMaintainer extends NodeRepositoryMaintainer {
int attempts = 0;
int failures = 0;
+ outer:
for (var applicationNodes : activeNodesByApplication().entrySet()) {
boolean enabled = enabledFlag.with(FetchVector.Dimension.APPLICATION_ID,
applicationNodes.getKey().serializedForm()).value();
if (!enabled) continue;
for (var clusterNodes : nodesByCluster(applicationNodes.getValue()).entrySet()) {
+ if (shuttingDown()) break outer;
attempts++;
if ( ! autoscale(applicationNodes.getKey(), clusterNodes.getKey()))
failures++;
@@ -87,7 +89,7 @@ public class AutoscalingMaintainer extends NodeRepositoryMaintainer {
NodeList clusterNodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId).cluster(clusterId);
cluster = updateCompletion(cluster, clusterNodes);
- var current = new AllocatableClusterResources(clusterNodes.not().retired(), nodeRepository()).advertisedResources();
+ var current = new AllocatableResources(clusterNodes.not().retired(), nodeRepository()).advertisedResources();
// Autoscale unless an autoscaling is already in progress
Autoscaling autoscaling = null;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DiskReplacer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DiskReplacer.java
index 5baa4f63867..0c1d6291baa 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DiskReplacer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/DiskReplacer.java
@@ -1,16 +1,21 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.maintenance;
+import com.yahoo.concurrent.DaemonThreadFactory;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeMutex;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner;
+import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner.RebuildResult;
import com.yahoo.yolean.Exceptions;
import java.time.Duration;
+import java.util.List;
import java.util.Optional;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -22,8 +27,10 @@ import java.util.logging.Logger;
public class DiskReplacer extends NodeRepositoryMaintainer {
private static final Logger log = Logger.getLogger(DiskReplacer.class.getName());
+ private static final int maxBatchSize = 100;
private final HostProvisioner hostProvisioner;
+ private final ExecutorService executor = Executors.newCachedThreadPool(new DaemonThreadFactory("disk-replacer"));
DiskReplacer(NodeRepository nodeRepository, Duration interval, Metric metric, HostProvisioner hostProvisioner) {
super(nodeRepository, interval, metric);
@@ -34,25 +41,28 @@ public class DiskReplacer extends NodeRepositoryMaintainer {
protected double maintain() {
NodeList nodes = nodeRepository().nodes().list().rebuilding(true);
int failures = 0;
- for (var host : nodes) {
- Optional<NodeMutex> optionalMutex = nodeRepository().nodes().lockAndGet(host, Duration.ofSeconds(10));
- if (optionalMutex.isEmpty()) continue;
- try (NodeMutex mutex = optionalMutex.get()) {
- // Re-check flag while holding lock
- host = mutex.node();
- if (!host.status().wantToRebuild()) {
- continue;
- }
- Node updatedNode = hostProvisioner.replaceRootDisk(host);
- if (!updatedNode.status().wantToRebuild()) {
- nodeRepository().nodes().write(updatedNode, mutex);
- }
- } catch (RuntimeException e) {
- failures++;
- log.log(Level.WARNING, "Failed to rebuild " + host.hostname() + ", will retry in " +
- interval() + ": " + Exceptions.toMessageString(e));
+ List<Node> rebuilding;
+ try (var locked = nodeRepository().nodes().lockAndGetAll(nodes.asList(), Optional.of(Duration.ofSeconds(10)))) {
+ rebuilding = locked.nodes().stream().map(NodeMutex::node).toList();
+ RebuildResult result = hostProvisioner.replaceRootDisk(rebuilding);
+
+ for (Node updated : result.rebuilt())
+ if (!updated.status().wantToRebuild())
+ nodeRepository().nodes().write(updated, () -> { });
+
+ for (var entry : result.failed().entrySet()) {
+ ++failures;
+ log.log(Level.WARNING, "Failed to rebuild " + entry.getKey() + ", will retry in " +
+ interval() + ": " + Exceptions.toMessageString(entry.getValue()));
}
}
- return this.asSuccessFactorDeviation(nodes.size(), failures);
+ return this.asSuccessFactorDeviation(rebuilding.size(), failures);
+ }
+
+ @Override
+ public void shutdown() {
+ super.shutdown();
+ executor.shutdown();
}
+
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java
index 28679b504aa..14693c75436 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainer.java
@@ -13,10 +13,9 @@ import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import java.time.Duration;
-import java.time.Instant;
import java.util.Map;
-import java.util.function.Function;
-import java.util.stream.Collectors;
+
+import static java.util.stream.Collectors.toMap;
/**
* The application maintainer regularly redeploys all applications to make sure the node repo and application
@@ -39,31 +38,22 @@ public class PeriodicApplicationMaintainer extends ApplicationMaintainer {
@Override
protected boolean canDeployNow(ApplicationId application) {
- return deployer().activationTime(application)
- // Don't deploy if a regular deploy just happened
- .map(lastDeployTime -> lastDeployTime.isBefore(nodeRepository().clock().instant().minus(minTimeBetweenRedeployments)))
- // We only know last deploy time for applications that were deployed on this config server,
- // the rest will be deployed on another config server
- .orElse(false);
+ return deployer().deployTime(application)
+ .map(lastDeployTime -> lastDeployTime.isBefore(nodeRepository().clock().instant().minus(minTimeBetweenRedeployments))
+ || deployer().readiedReindexingAfter(application, lastDeployTime))
+ .orElse(false);
}
@Override
protected Map<ApplicationId, String> applicationsNeedingMaintenance() {
if (deployer().bootstrapping()) return Map.of();
- // Collect all deployment times before sorting as deployments may happen while we build the set, breaking
- // the comparable contract. Stale times are fine as the time is rechecked in ApplicationMaintainer#deployNow
- Map<ApplicationId, Instant> deploymentTimes = nodesNeedingMaintenance().stream()
- .map(node -> node.allocation().get().owner())
- .distinct()
- .filter(this::canDeployNow)
- .collect(Collectors.toMap(Function.identity(), this::activationTime));
-
- return deploymentTimes.entrySet().stream()
- .sorted(Map.Entry.comparingByValue())
- .map(Map.Entry::getKey)
- .filter(this::shouldMaintain)
- .collect(Collectors.toMap(applicationId -> applicationId, applicationId -> "current deployment being too old"));
+ return nodesNeedingMaintenance().stream()
+ .map(node -> node.allocation().get().owner())
+ .distinct()
+ .filter(this::shouldMaintain)
+ .filter(this::canDeployNow)
+ .collect(toMap(applicationId -> applicationId, applicationId -> "current deployment being too old"));
}
private boolean shouldMaintain(ApplicationId id) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java
index 4071559d841..60688e3f460 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainer.java
@@ -41,8 +41,10 @@ public class ScalingSuggestionsMaintainer extends NodeRepositoryMaintainer {
int attempts = 0;
int failures = 0;
+ outer:
for (var application : activeNodesByApplication().entrySet()) {
for (var cluster : nodesByCluster(application.getValue()).entrySet()) {
+ if (shuttingDown()) break outer;
attempts++;
if ( ! suggest(application.getKey(), cluster.getKey(), cluster.getValue()))
failures++;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/NodeAcl.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/NodeAcl.java
index d0e72cea8fc..d88c9189157 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/NodeAcl.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/NodeAcl.java
@@ -89,7 +89,7 @@ public record NodeAcl(Node node,
// - udp port 51820 from the world
trustedNodes.addAll(trustedNodesForChildrenMatching(node, allNodes, n -> EnumSet.of(NodeType.tenant, NodeType.proxy).contains(n.type()), RPC_PORTS, ipSpace));
trustedPorts.add(4443);
- if (zone.system().isPublic() && zone.cloud().allowEnclave()) {
+ if (zone.cloud().allowEnclave()) {
trustedUdpPorts.add(WIREGUARD_PORT);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
index c388273b1a6..43a135a7e04 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
@@ -88,8 +88,8 @@ public class CuratorDb {
/** Simple cache for deserialized node objects, based on their ZK node version. */
private final Cache<Path, Pair<Integer, Node>> cachedNodes = CacheBuilder.newBuilder().recordStats().build();
- public CuratorDb(NodeFlavors flavors, Curator curator, Clock clock, boolean useCache, long nodeCacheSize) {
- this.nodeSerializer = new NodeSerializer(flavors, nodeCacheSize);
+ public CuratorDb(NodeFlavors flavors, Curator curator, Clock clock, boolean useCache) {
+ this.nodeSerializer = new NodeSerializer(flavors);
this.db = new CachingCurator(curator, root, useCache);
this.clock = clock;
this.provisionIndexCounter = new CuratorCounter(curator, root.append("provisionIndexCounter"));
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
index 7e82ef55917..df39a0230b6 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
@@ -134,7 +134,7 @@ public class NodeSerializer {
// ---------------- Serialization ----------------------------------------------------
- public NodeSerializer(NodeFlavors flavors, long cacheSize) {
+ public NodeSerializer(NodeFlavors flavors) {
this.flavors = flavors;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
index 8a39f309935..5ce5bc8abd0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
@@ -98,10 +98,7 @@ public class CapacityPolicies {
Architecture architecture = adminClusterArchitecture(applicationId);
if (nodeRepository.exclusiveAllocation(clusterSpec)) {
- var resources = legacySmallestExclusiveResources(); //TODO: use 8Gb as default when no apps are using 4Gb
- return versioned(clusterSpec, Map.of(new Version(0), resources,
- new Version(8, 182, 12), resources.with(architecture),
- new Version(8, 187), smallestExclusiveResources().with(architecture)));
+ return smallestExclusiveResources().with(architecture);
}
if (clusterSpec.id().value().equals("cluster-controllers")) {
@@ -131,8 +128,7 @@ public class CapacityPolicies {
// 1.32 fits floor(8/1.32) = 6 cluster controllers on each 8Gb host, and each will have
// 1.32-(0.7+0.6)*(1.32/8) = 1.1 Gb real memory given current taxes.
if (architecture == Architecture.x86_64)
- return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(0.25, 1.14, 10, 0.3),
- new Version(8, 129, 4), new NodeResources(0.25, 1.32, 10, 0.3)));
+ return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(0.25, 1.32, 10, 0.3)));
else
// arm64 nodes need more memory
return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(0.25, 1.50, 10, 0.3)));
@@ -159,13 +155,6 @@ public class CapacityPolicies {
}
// The lowest amount of resources that can be exclusive allocated (i.e. a matching host flavor for this exists)
- private NodeResources legacySmallestExclusiveResources() {
- return (zone.cloud().name().equals(CloudName.GCP))
- ? new NodeResources(1, 4, 50, 0.3)
- : new NodeResources(0.5, 4, 50, 0.3);
- }
-
- // The lowest amount of resources that can be exclusive allocated (i.e. a matching host flavor for this exists)
private NodeResources smallestExclusiveResources() {
return (zone.cloud().name().equals(CloudName.GCP))
? new NodeResources(2, 8, 50, 0.3)
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ContainerImages.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ContainerImages.java
index 59dbb0b3241..583045083a9 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ContainerImages.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/ContainerImages.java
@@ -37,24 +37,17 @@ public class ContainerImages {
Optional<DockerImage> requestedImage = node.allocation()
.flatMap(allocation -> allocation.membership().cluster().dockerImageRepo());
NodeType nodeType = node.type().isHost() ? node.type().childNodeType() : node.type();
- final DockerImage image;
- if (requestedImage.isPresent()) {
- image = requestedImage.get();
- } else if (nodeType == NodeType.tenant) {
- if (!node.resources().gpuResources().isZero()) {
- image = tenantGpuImage.orElseThrow(() -> new IllegalArgumentException(node + " has GPU resources, but there is no GPU container image available"));
- } else {
- image = tenantImage.orElse(defaultImage);
- }
- } else {
- image = defaultImage;
- }
- return rewriteRegistry(image);
- }
-
- /** Rewrite the registry part of given image, using this zone's default image */
- private DockerImage rewriteRegistry(DockerImage image) {
- return image.withRegistry(defaultImage.registry());
+ DockerImage wantedImage =
+ nodeType != NodeType.tenant ?
+ defaultImage :
+ node.resources().gpuResources().isZero() ?
+ tenantImage.orElse(defaultImage) :
+ tenantGpuImage.orElseThrow(() -> new IllegalArgumentException(node + " has GPU resources, but there is no GPU container image available"));
+
+ return requestedImage
+ // Rewrite requested images to make sure they come from a trusted registry
+ .map(image -> image.withRegistry(wantedImage.registry()))
+ .orElse(wantedImage);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
index 66d1a4e8bc8..630c8670bdf 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/HostProvisioner.java
@@ -7,7 +7,9 @@ import com.yahoo.config.provision.NodeAllocationException;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.Node;
+import java.util.Collection;
import java.util.List;
+import java.util.Map;
import java.util.function.Consumer;
import java.util.function.Predicate;
@@ -65,11 +67,13 @@ public interface HostProvisioner {
*/
void deprovision(Node host);
- /** Replace the root (OS) disk of host. Implementations of this are expected to be idempotent.
+ /** Replace the root (OS) disk of hosts. Implementations of this are expected to be idempotent.
*
- * @return the updated node object
+ * @return the node objects for which updates were made
*/
- Node replaceRootDisk(Node host);
+ default RebuildResult replaceRootDisk(Collection<Node> hosts) { throw new UnsupportedOperationException(); }
+
+ record RebuildResult(List<Node> rebuilt, Map<Node, Exception> failed) { }
/**
* Returns the maintenance events scheduled for hosts in this zone, in given cloud accounts. Host events in the
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
index 5a35ed1cc42..5506fdf8ea3 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/LoadBalancerProvisioner.java
@@ -14,11 +14,13 @@ import com.yahoo.config.provision.exception.LoadBalancerServiceException;
import com.yahoo.transaction.NestedTransaction;
import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FetchVector;
+import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancer;
+import com.yahoo.vespa.hosted.provision.lb.LoadBalancer.State;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerId;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerInstance;
import com.yahoo.vespa.hosted.provision.lb.LoadBalancerService;
@@ -62,12 +64,14 @@ public class LoadBalancerProvisioner {
private final CuratorDb db;
private final LoadBalancerService service;
private final BooleanFlag deactivateRouting;
+ private final BooleanFlag ipv6AwsTargetGroups;
public LoadBalancerProvisioner(NodeRepository nodeRepository, LoadBalancerService service) {
this.nodeRepository = nodeRepository;
this.db = nodeRepository.database();
this.service = service;
this.deactivateRouting = PermanentFlags.DEACTIVATE_ROUTING.bindTo(nodeRepository.flagSource());
+ this.ipv6AwsTargetGroups = Flags.IPV6_AWS_TARGET_GROUPS.bindTo(nodeRepository.flagSource());
// Read and write all load balancers to make sure they are stored in the latest version of the serialization format
for (var id : db.readLoadBalancerIds()) {
@@ -124,8 +128,7 @@ public class LoadBalancerProvisioner {
activate(transaction, cluster.getKey(), activatingClusters.get(cluster.getKey()), cluster.getValue());
}
// Deactivate any surplus load balancers, i.e. load balancers for clusters that have been removed
- var surplusLoadBalancers = surplusLoadBalancersOf(transaction.application(), activatingClusters.keySet());
- deactivate(surplusLoadBalancers, transaction.nested());
+ deactivate(surplusLoadBalancersOf(transaction.application(), activatingClusters.keySet()), transaction.nested());
}
/**
@@ -192,22 +195,28 @@ public class LoadBalancerProvisioner {
private void prepare(LoadBalancerId id, ZoneEndpoint zoneEndpoint, CloudAccount cloudAccount) {
Instant now = nodeRepository.clock().instant();
Optional<LoadBalancer> loadBalancer = db.readLoadBalancer(id);
- LoadBalancer newLoadBalancer;
+ LoadBalancer newLoadBalancer = loadBalancer.orElse(new LoadBalancer(id, Optional.empty(), LoadBalancer.State.reserved, now));
LoadBalancer.State fromState = loadBalancer.map(LoadBalancer::state).orElse(null);
- boolean recreateLoadBalancer = loadBalancer.isPresent() && ( ! inAccount(cloudAccount, loadBalancer.get())
- || ! hasCorrectVisibility(loadBalancer.get(), zoneEndpoint));
- if (recreateLoadBalancer) {
- // We have a load balancer, but with the wrong account or visibility.
- // Load balancer must be removed before we can provision a new one with the wanted visibility
- newLoadBalancer = loadBalancer.get().with(LoadBalancer.State.removable, now);
- } else {
- Optional<LoadBalancerInstance> instance = provisionInstance(id, loadBalancer, zoneEndpoint, cloudAccount);
- newLoadBalancer = loadBalancer.isEmpty() ? new LoadBalancer(id, instance, LoadBalancer.State.reserved, now)
- : loadBalancer.get().with(instance);
+ try {
+ if (loadBalancer.isPresent() && ! inAccount(cloudAccount, loadBalancer.get())) {
+ newLoadBalancer = newLoadBalancer.with(State.removable, now);
+ throw new LoadBalancerServiceException("Could not (re)configure " + id + " due to change in cloud account. The operation will be retried on next deployment");
+ }
+ if (loadBalancer.isPresent() && ! hasCorrectVisibility(loadBalancer.get(), zoneEndpoint)) {
+ newLoadBalancer = newLoadBalancer.with(State.removable, now);
+ throw new LoadBalancerServiceException("Could not (re)configure " + id + " due to change in load balancer visibility. The operation will be retried on next deployment");
+ }
+ LoadBalancerInstance instance = provisionInstance(id, loadBalancer, zoneEndpoint, cloudAccount);
+ newLoadBalancer = newLoadBalancer.with(Optional.of(instance));
+ }
+ catch (LoadBalancerServiceException e) {
+ log.log(Level.WARNING, "Failed to provision load balancer", e);
+ newLoadBalancer = newLoadBalancer.with(Optional.empty());
+ throw e;
+ }
+ finally {
+ db.writeLoadBalancer(newLoadBalancer, fromState);
}
- // Always store the load balancer. LoadBalancerExpirer will remove unwanted ones
- db.writeLoadBalancer(newLoadBalancer, fromState);
- requireInstance(id, newLoadBalancer, cloudAccount, zoneEndpoint);
}
private static boolean hasCorrectVisibility(LoadBalancer newLoadBalancer, ZoneEndpoint zoneEndpoint) {
@@ -222,15 +231,20 @@ public class LoadBalancerProvisioner {
if (loadBalancer.isEmpty()) throw new IllegalArgumentException("Could not activate load balancer that was never prepared: " + id);
if (loadBalancer.get().instance().isEmpty()) throw new IllegalArgumentException("Activating " + id + ", but prepare never provisioned a load balancer instance");
- Optional<LoadBalancerInstance> instance = configureInstance(id, nodes, loadBalancer.get(), settings, loadBalancer.get().instance().get().cloudAccount());
- LoadBalancer.State state = instance.isPresent() ? LoadBalancer.State.active : loadBalancer.get().state();
- LoadBalancer newLoadBalancer = loadBalancer.get().with(instance).with(state, now);
- db.writeLoadBalancers(List.of(newLoadBalancer), loadBalancer.get().state(), transaction.nested());
- requireInstance(id, newLoadBalancer, loadBalancer.get().instance().get().cloudAccount(), settings);
+ try {
+ LoadBalancerInstance instance = configureInstance(id, nodes, loadBalancer.get(), settings, loadBalancer.get().instance().get().cloudAccount());
+ db.writeLoadBalancers(List.of(loadBalancer.get().with(Optional.of(instance)).with(State.active, now)),
+ loadBalancer.get().state(), transaction.nested());
+ }
+ catch (LoadBalancerServiceException e) {
+ db.writeLoadBalancers(List.of(loadBalancer.get().with(Optional.empty())),
+ loadBalancer.get().state(), transaction.nested());
+ throw e;
+ }
}
/** Provision a load balancer instance, if necessary */
- private Optional<LoadBalancerInstance> provisionInstance(LoadBalancerId id,
+ private LoadBalancerInstance provisionInstance(LoadBalancerId id,
Optional<LoadBalancer> currentLoadBalancer,
ZoneEndpoint zoneEndpoint,
CloudAccount cloudAccount) {
@@ -246,38 +260,37 @@ public class LoadBalancerProvisioner {
if ( currentLoadBalancer.isPresent()
&& currentLoadBalancer.get().instance().isPresent()
&& currentLoadBalancer.get().instance().get().settings().equals(settings))
- return currentLoadBalancer.get().instance();
+ return currentLoadBalancer.get().instance().get();
log.log(Level.INFO, () -> "Provisioning instance for " + id);
try {
- return Optional.of(service.provision(new LoadBalancerSpec(id.application(), id.cluster(), reals, settings, cloudAccount))
- // Provisioning a private endpoint service requires hard resources to be ready, so we delay it until activation.
- .withServiceIds(currentLoadBalancer.flatMap(LoadBalancer::instance).map(LoadBalancerInstance::serviceIds).orElse(List.of())));
- } catch (Exception e) {
- log.log(Level.WARNING, e, () -> "Could not provision " + id + ". The operation will be retried on next deployment");
+ return service.provision(new LoadBalancerSpec(id.application(), id.cluster(), reals, settings, cloudAccount))
+ // Provisioning a private endpoint service requires hard resources to be ready, so we delay it until activation.
+ .withServiceIds(currentLoadBalancer.flatMap(LoadBalancer::instance).map(LoadBalancerInstance::serviceIds).orElse(List.of()));
+ }
+ catch (Exception e) {
+ throw new LoadBalancerServiceException("Could not provision " + id + ". The operation will be retried on next deployment.", e);
}
- return Optional.empty(); // Will cause activation to fail, but lets us proceed with more preparations.
}
/** Reconfigure a load balancer instance, if necessary */
- private Optional<LoadBalancerInstance> configureInstance(LoadBalancerId id, NodeList nodes,
+ private LoadBalancerInstance configureInstance(LoadBalancerId id, NodeList nodes,
LoadBalancer currentLoadBalancer,
ZoneEndpoint zoneEndpoint,
CloudAccount cloudAccount) {
boolean shouldDeactivateRouting = deactivateRouting.with(FetchVector.Dimension.APPLICATION_ID,
id.application().serializedForm())
.value();
- Set<Real> reals = shouldDeactivateRouting ? Set.of() : realsOf(nodes);
+ Set<Real> reals = shouldDeactivateRouting ? Set.of() : realsOf(nodes, cloudAccount);
log.log(Level.FINE, () -> "Configuring instance for " + id + ", targeting: " + reals);
try {
- return Optional.of(service.configure(currentLoadBalancer.instance().orElseThrow(() -> new IllegalArgumentException("expected existing instance for " + id)),
- new LoadBalancerSpec(id.application(), id.cluster(), reals, zoneEndpoint, cloudAccount),
- shouldDeactivateRouting || currentLoadBalancer.state() != LoadBalancer.State.active));
- } catch (Exception e) {
- log.log(Level.WARNING, e, () -> "Could not (re)configure " + id + ", targeting: " +
- reals + ". The operation will be retried on next deployment");
+ return service.configure(currentLoadBalancer.instance().orElseThrow(() -> new IllegalArgumentException("expected existing instance for " + id)),
+ new LoadBalancerSpec(id.application(), id.cluster(), reals, zoneEndpoint, cloudAccount),
+ shouldDeactivateRouting || currentLoadBalancer.state() != LoadBalancer.State.active);
+ }
+ catch (Exception e) {
+ throw new LoadBalancerServiceException("Could not (re)configure " + id + ", targeting: " + reals, e);
}
- return Optional.empty();
}
/** Returns the load balanced clusters of given application and their nodes */
@@ -293,10 +306,10 @@ public class LoadBalancerProvisioner {
}
/** Returns real servers for given nodes */
- private Set<Real> realsOf(NodeList nodes) {
+ private Set<Real> realsOf(NodeList nodes, CloudAccount cloudAccount) {
Set<Real> reals = new LinkedHashSet<>();
for (var node : nodes) {
- for (var ip : reachableIpAddresses(node)) {
+ for (var ip : reachableIpAddresses(node, cloudAccount)) {
reals.add(new Real(HostName.of(node.hostname()), ip));
}
}
@@ -321,29 +334,19 @@ public class LoadBalancerProvisioner {
}
/** Find IP addresses reachable by the load balancer service */
- private Set<String> reachableIpAddresses(Node node) {
+ private Set<String> reachableIpAddresses(Node node, CloudAccount cloudAccount) {
Set<String> reachable = new LinkedHashSet<>(node.ipConfig().primary());
+ boolean forceIpv6 = ipv6AwsTargetGroups.with(FetchVector.Dimension.CLOUD_ACCOUNT, cloudAccount.account()).value();
+ var protocol = forceIpv6 ? LoadBalancerService.Protocol.ipv6 :
+ service.protocol(node.cloudAccount().isExclave(nodeRepository.zone()));
// Remove addresses unreachable by the load balancer service
- switch (service.protocol(node.cloudAccount().isExclave(nodeRepository.zone()))) {
+ switch (protocol) {
case ipv4 -> reachable.removeIf(IP::isV6);
case ipv6 -> reachable.removeIf(IP::isV4);
}
return reachable;
}
- private void requireInstance(LoadBalancerId id, LoadBalancer loadBalancer, CloudAccount cloudAccount, ZoneEndpoint zoneEndpoint) {
- if (loadBalancer.instance().isEmpty()) {
- // Signal that load balancer is not ready yet
- throw new LoadBalancerServiceException("Could not provision " + id + ". The operation will be retried on next deployment");
- }
- if ( ! inAccount(cloudAccount, loadBalancer)) {
- throw new LoadBalancerServiceException("Could not (re)configure " + id + " due to change in cloud account. The operation will be retried on next deployment");
- }
- if ( ! hasCorrectVisibility(loadBalancer, zoneEndpoint)) {
- throw new LoadBalancerServiceException("Could not (re)configure " + id + " due to change in load balancer visibility. The operation will be retried on next deployment");
- }
- }
-
private static ClusterSpec.Id effectiveId(ClusterSpec cluster) {
return cluster.combinedId().orElse(cluster.id());
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
index 4ac90753ed1..24cf86e8a25 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodePrioritizer.java
@@ -128,6 +128,7 @@ public class NodePrioritizer {
if (spareHosts.contains(host) && !canAllocateToSpareHosts) continue;
if ( ! capacity.hasCapacity(host, requested.resources().get())) continue;
if ( ! allNodes.childrenOf(host).owner(application).cluster(clusterSpec.id()).isEmpty()) continue;
+ if ( ! requested.cloudAccount().isUnspecified() && ! requested.cloudAccount().equals(host.cloudAccount())) continue;
candidates.add(NodeCandidate.createNewChild(requested.resources().get(),
capacity.availableCapacityOf(host),
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index 3d0c1069584..a67a513550a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -23,7 +23,7 @@ import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
-import com.yahoo.vespa.hosted.provision.autoscale.AllocatableClusterResources;
+import com.yahoo.vespa.hosted.provision.autoscale.AllocatableResources;
import com.yahoo.vespa.hosted.provision.autoscale.AllocationOptimizer;
import com.yahoo.vespa.hosted.provision.autoscale.ClusterModel;
import com.yahoo.vespa.hosted.provision.autoscale.Limits;
@@ -182,12 +182,12 @@ public class NodeRepositoryProvisioner implements Provisioner {
.not().retired()
.not().removable();
boolean firstDeployment = nodes.isEmpty();
- AllocatableClusterResources currentResources =
+ var current =
firstDeployment // start at min, preserve current resources otherwise
- ? new AllocatableClusterResources(initialResourcesFrom(requested, clusterSpec, application.id()), clusterSpec, nodeRepository)
- : new AllocatableClusterResources(nodes, nodeRepository);
- var clusterModel = new ClusterModel(nodeRepository, application, clusterSpec, cluster, nodes, nodeRepository.metricsDb(), nodeRepository.clock());
- return within(Limits.of(requested), currentResources, firstDeployment, clusterModel);
+ ? new AllocatableResources(initialResourcesFrom(requested, clusterSpec, application.id()), clusterSpec, nodeRepository)
+ : new AllocatableResources(nodes, nodeRepository);
+ var model = new ClusterModel(nodeRepository, application, clusterSpec, cluster, nodes, current, nodeRepository.metricsDb(), nodeRepository.clock());
+ return within(Limits.of(requested), model, firstDeployment);
}
private ClusterResources initialResourcesFrom(Capacity requested, ClusterSpec clusterSpec, ApplicationId applicationId) {
@@ -197,21 +197,19 @@ public class NodeRepositoryProvisioner implements Provisioner {
/** Make the minimal adjustments needed to the current resources to stay within the limits */
private ClusterResources within(Limits limits,
- AllocatableClusterResources current,
- boolean firstDeployment,
- ClusterModel clusterModel) {
+ ClusterModel model,
+ boolean firstDeployment) {
if (limits.min().equals(limits.max())) return limits.min();
// Don't change current deployments that are still legal
- if (! firstDeployment && current.advertisedResources().isWithin(limits.min(), limits.max()))
- return current.advertisedResources();
+ if (! firstDeployment && model.current().advertisedResources().isWithin(limits.min(), limits.max()))
+ return model.current().advertisedResources();
// Otherwise, find an allocation that preserves the current resources as well as possible
return allocationOptimizer.findBestAllocation(Load.one(),
- current,
- clusterModel,
+ model,
limits)
- .orElseThrow(() -> newNoAllocationPossible(current.clusterSpec(), limits))
+ .orElseThrow(() -> newNoAllocationPossible(model.current().clusterSpec(), limits))
.advertisedResources();
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
index cea0608013d..77f37cadc0b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
@@ -6,6 +6,7 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.autoscale.ResourceChange;
import java.time.Duration;
import java.util.Map;
@@ -162,16 +163,11 @@ public interface NodeSpec {
@Override
public boolean canResize(NodeResources currentNodeResources, NodeResources currentSpareHostResources,
ClusterSpec.Type type, boolean hasTopologyChange, int currentClusterSize) {
- if (exclusive) return false; // exclusive resources must match the host
- // Never allow in-place resize when also changing topology or decreasing cluster size
- if (hasTopologyChange || count < currentClusterSize) return false;
+ return ResourceChange.canInPlaceResize(currentClusterSize, currentNodeResources, count, requestedNodeResources,
+ type, exclusive, hasTopologyChange)
+ &&
+ currentSpareHostResources.add(currentNodeResources.justNumbers()).satisfies(requestedNodeResources);
- // Do not allow increasing cluster size and decreasing node resources at the same time for content nodes
- if (type.isContent() && count > currentClusterSize && !requestedNodeResources.satisfies(currentNodeResources.justNumbers()))
- return false;
-
- // Otherwise, allowed as long as the host can satisfy the new requested resources
- return currentSpareHostResources.add(currentNodeResources.justNumbers()).satisfies(requestedNodeResources);
}
@Override
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
index 349be9e4b47..ea0fbcc7108 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/Preparer.java
@@ -16,9 +16,10 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.node.Agent;
import com.yahoo.vespa.hosted.provision.provisioning.HostProvisioner.HostSharing;
-import java.util.ArrayList;
+import java.util.LinkedHashSet;
import java.util.List;
import java.util.Optional;
+import java.util.Set;
import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.function.Supplier;
@@ -91,10 +92,13 @@ public class Preparer {
HostSharing sharing = hostSharing(cluster, hostType);
Version osVersion = nodeRepository.osVersions().targetFor(hostType).orElse(Version.emptyVersion);
NodeAllocation.HostDeficit deficit = allocation.hostDeficit().get();
- List<Node> hosts = new ArrayList<>();
+ Set<Node> hosts = new LinkedHashSet<>();
Consumer<List<ProvisionedHost>> whenProvisioned = provisionedHosts -> {
- hosts.addAll(provisionedHosts.stream().map(host -> host.generateHost(requested.hostTTL())).toList());
- nodeRepository.nodes().addNodes(hosts, Agent.application);
+ List<Node> newHosts = provisionedHosts.stream()
+ .map(host -> host.generateHost(requested.hostTTL()))
+ .filter(hosts::add)
+ .toList();
+ nodeRepository.nodes().addNodes(newHosts, Agent.application);
// Offer the nodes on the newly provisioned hosts, this should be enough to cover the deficit
List<NodeCandidate> candidates = provisionedHosts.stream()
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java
index 3ed01e00ee6..0573c6a877e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockDeployer.java
@@ -40,15 +40,17 @@ public class MockDeployer implements Deployer {
// For mock deploy anything, changing wantToRetire to retired only
private final NodeRepository nodeRepository;
- /** The number of redeployments done to this, which is also the config generation */
- public int redeployments = 0;
+ /** The number of activations done to this, which is also the config generation */
+ public int activations = 0;
- private final Map<ApplicationId, Instant> lastDeployTimes = new HashMap<>();
+ private final Map<ApplicationId, Instant> lastPrepareTimes = new HashMap<>();
+ private final Map<ApplicationId, Instant> lastActivationTimes = new HashMap<>();
private final Clock clock;
private final ReentrantLock lock = new ReentrantLock();
private boolean failActivate = false;
private boolean bootstrapping = true;
+ private Instant readiedReindexingAt = null;
/** Create a mock deployer which returns empty on every deploy request. */
@Inject
@@ -90,6 +92,8 @@ public class MockDeployer implements Deployer {
public void setBootstrapping(boolean bootstrapping) { this.bootstrapping = bootstrapping; }
+ public void setReadiedReindexingAt(Instant readiedReindexingAt) { this.readiedReindexingAt = readiedReindexingAt; }
+
@Override
public Optional<Deployment> deployFromLocalActive(ApplicationId id, boolean bootstrap) {
return deployFromLocalActive(id, Duration.ofSeconds(60));
@@ -121,7 +125,17 @@ public class MockDeployer implements Deployer {
@Override
public Optional<Instant> activationTime(ApplicationId application) {
- return Optional.ofNullable(lastDeployTimes.get(application));
+ return Optional.ofNullable(lastActivationTimes.get(application));
+ }
+
+ @Override
+ public Optional<Instant> deployTime(ApplicationId application) {
+ return Optional.ofNullable(lastPrepareTimes.get(application));
+ }
+
+ @Override
+ public boolean readiedReindexingAfter(ApplicationId application, Instant instant) {
+ return readiedReindexingAt != null && readiedReindexingAt.isAfter(instant);
}
@Override
@@ -136,7 +150,8 @@ public class MockDeployer implements Deployer {
new MockDeployment(provisioner, new ApplicationContext(applicationId, List.of())).activate();
applications.remove(applicationId);
- lastDeployTimes.remove(applicationId);
+ lastPrepareTimes.remove(applicationId);
+ lastActivationTimes.remove(applicationId);
}
public class MockDeployment implements Deployment {
@@ -155,6 +170,7 @@ public class MockDeployer implements Deployer {
@Override
public void prepare() {
preparedHosts = application.prepare(provisioner);
+ lastPrepareTimes.put(application.id, clock.instant());
}
@Override
@@ -164,15 +180,15 @@ public class MockDeployer implements Deployer {
if (failActivate)
throw new IllegalStateException("failActivate is true");
- redeployments++;
+ activations++;
try (var lock = provisioner.lock(application.id)) {
try (NestedTransaction t = new NestedTransaction()) {
- provisioner.activate(preparedHosts, new ActivationContext(redeployments), new ApplicationTransaction(lock, t));
+ provisioner.activate(preparedHosts, new ActivationContext(activations), new ApplicationTransaction(lock, t));
t.commit();
- lastDeployTimes.put(application.id, clock.instant());
+ lastActivationTimes.put(application.id, clock.instant());
}
}
- return redeployments;
+ return activations;
}
@Override
@@ -191,18 +207,20 @@ public class MockDeployer implements Deployer {
}
@Override
- public void prepare() { }
+ public void prepare() {
+ lastPrepareTimes.put(applicationId, clock.instant());
+ }
@Override
public long activate() {
- lastDeployTimes.put(applicationId, clock.instant());
+ lastActivationTimes.put(applicationId, clock.instant());
for (Node node : nodeRepository.nodes().list().owner(applicationId).state(Node.State.active).retiring()) {
try (NodeMutex lock = nodeRepository.nodes().lockAndGetRequired(node)) {
nodeRepository.nodes().write(lock.node().retire(nodeRepository.clock().instant()), lock);
}
}
- return redeployments++;
+ return activations++;
}
@Override
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
index 965611b9a6e..03923853594 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockHostProvisioner.java
@@ -21,9 +21,11 @@ import com.yahoo.vespa.hosted.provision.provisioning.ProvisionedHost;
import java.time.Instant;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
@@ -32,6 +34,8 @@ import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.stream.IntStream;
+import static com.yahoo.config.provision.NodeType.host;
+
/**
* @author mpolden
*/
@@ -82,7 +86,7 @@ public class MockHostProvisioner implements HostProvisioner {
List<ProvisionedHost> hosts = new ArrayList<>();
for (int index : request.indices()) {
- String hostHostname = request.type() == NodeType.host ? "host" + index : request.type().name() + index;
+ String hostHostname = request.type() == host ? "host" + index : request.type().name() + index;
hosts.add(new ProvisionedHost("id-of-" + request.type().name() + index,
hostHostname,
hostFlavor,
@@ -117,13 +121,17 @@ public class MockHostProvisioner implements HostProvisioner {
}
@Override
- public Node replaceRootDisk(Node host) {
- if (!host.type().isHost()) throw new IllegalArgumentException(host + " is not a host");
- if (rebuildsCompleted.remove(host.hostname())) {
- return host.withWantToRetire(host.status().wantToRetire(), host.status().wantToDeprovision(),
- false, false, Agent.system, Instant.ofEpochMilli(123));
+ public RebuildResult replaceRootDisk(Collection<Node> hosts) {
+ List<Node> updated = new ArrayList<>();
+ Map<Node, Exception> failed = new LinkedHashMap<>();
+ for (Node host : hosts) {
+ if ( ! host.type().isHost()) failed.put(host, new IllegalArgumentException(host + " is not a host"));
+ if (rebuildsCompleted.remove(host.hostname())) {
+ updated.add(host.withWantToRetire(host.status().wantToRetire(), host.status().wantToDeprovision(),
+ false, false, Agent.system, Instant.ofEpochMilli(123)));
+ }
}
- return host;
+ return new RebuildResult(updated, failed);
}
@Override
@@ -219,7 +227,7 @@ public class MockHostProvisioner implements HostProvisioner {
long numAddresses = Math.max(2, Math.round(flavor.resources().bandwidthGbps()));
return IntStream.range(1, (int) numAddresses)
.mapToObj(i -> {
- String hostname = hostType == NodeType.host
+ String hostname = hostType == host
? "host" + hostIndex + "-" + i
: hostType.childNodeType().name() + i;
return HostName.of(hostname);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
index e3f67721eb5..90cf37aa876 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
@@ -96,7 +96,7 @@ public class MockNodeRepository extends NodeRepository {
new MemoryMetricsDb(Clock.fixed(Instant.ofEpochMilli(123), ZoneId.of("Z"))),
new OrchestratorMock(),
true,
- 0, 1000);
+ 0);
this.flavors = flavors;
defaultCloudAccount = zone.cloud().account();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java
index 49702a7d4c1..bf714cd9df1 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java
@@ -54,7 +54,7 @@ public class NodeRepositoryTester {
new MemoryMetricsDb(clock),
new OrchestratorMock(),
true,
- 0, 1000);
+ 0);
}
public NodeRepository nodeRepository() { return nodeRepository; }
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
index 1ed3c13cfff..f64e50310bb 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
@@ -133,7 +133,7 @@ public class RealDataScenarioTest {
}
private static void initFromZk(NodeRepository nodeRepository, Path pathToZkSnapshot) {
- NodeSerializer nodeSerializer = new NodeSerializer(nodeRepository.flavors(), 1000);
+ NodeSerializer nodeSerializer = new NodeSerializer(nodeRepository.flavors());
AtomicBoolean nodeNext = new AtomicBoolean(false);
Pattern zkNodePathPattern = Pattern.compile(".?/provision/v1/nodes/[a-z0-9.-]+\\.(com|cloud).?");
Consumer<String> consumer = input -> {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index d33857d1a1e..4e19d04ffac 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -6,6 +6,7 @@ import com.yahoo.config.provision.ClusterInfo;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.IntRange;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeResources.DiskSpeed;
@@ -18,6 +19,7 @@ import com.yahoo.vespa.hosted.provision.provisioning.DynamicProvisioningTester;
import org.junit.Test;
import java.time.Duration;
+import java.util.List;
import java.util.Optional;
import static com.yahoo.config.provision.NodeResources.DiskSpeed.fast;
@@ -88,7 +90,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(7));
fixture.loader().applyCpuLoad(0.1f, 10);
fixture.tester().assertResources("Scaling cpu down since usage has gone down significantly",
- 6, 1, 1.1, 9.8, 390.2,
+ 9, 1, 1.0, 6.5, 243.9,
fixture.autoscale());
}
@@ -173,7 +175,7 @@ public class AutoscalingTest {
fixture.setScalingDuration(Duration.ofHours(12)); // Fixture sets last completion to be 1 day into the past
fixture.loader().applyLoad(new Load(1.0, 0.1, 1.0), 10);
fixture.tester().assertResources("Scaling up (only) since resource usage is too high",
- 8, 1, 7.1, 9.3, 75.4,
+ 5, 1, 11.7, 15.4, 132.0,
fixture.autoscale());
}
@@ -185,7 +187,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 0.1, 1.0), 10);
fixture.tester().assertResources("Scaling cpu and disk up and memory down",
- 7, 1, 8.2, 4.0, 88.0,
+ 5, 1, 11.7, 4.0, 132.0,
fixture.autoscale());
}
@@ -208,7 +210,7 @@ public class AutoscalingTest {
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up since peak resource usage is too high",
- 8, 1, 4.3, 7.4, 29.0,
+ 5, 1, 7.1, 12.3, 50.7,
fixture.autoscale());
}
@@ -232,7 +234,7 @@ public class AutoscalingTest {
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up cpu since peak resource usage is too high",
- 8, 1, 4.3, 7.7, 34.3,
+ 5, 1, 7.1, 12.8, 60.0,
fixture.autoscale());
}
@@ -393,11 +395,10 @@ public class AutoscalingTest {
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
- fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.loader().applyCpuLoad(0.4, 240);
+ fixture.loader().applyCpuLoad(0.5, 240);
fixture.tester().assertResources("Scaling cpu up",
- 6, 6, 5.0, 7.4, 22.3,
+ 6, 6, 4.5, 7.4, 22.3,
fixture.autoscale());
}
@@ -460,7 +461,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(1.0, 120);
fixture.tester().assertResources("Suggesting above capacity limit",
- 8, 1, 6.2, 7.4, 29.0,
+ 5, 1, 10.2, 12.3, 50.7,
fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
}
@@ -593,13 +594,12 @@ public class AutoscalingTest {
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
- fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
Duration timePassed = fixture.loader().addCpuMeasurements(0.25, 120);
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 200.0 : 100.0, t -> 10.0);
- fixture.tester().assertResources("Scaling up cpu, others down, changing to 1 group is cheaper",
- 7, 1, 3.2, 43.3, 129.8,
+ fixture.tester().assertResources("Changing to 1 group is cheaper",
+ 7, 1, 2.5, 43.3, 129.8,
fixture.autoscale());
}
@@ -650,11 +650,10 @@ public class AutoscalingTest {
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
- fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.16, 0.02, 0.5), 120);
fixture.tester().assertResources("Scaling down memory",
- 7, 1, 2.5, 4.0, 80.2,
+ 6, 1, 2.1, 4.0, 96.2,
fixture.autoscale());
}
@@ -710,16 +709,16 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale up since we assume we need 2x cpu for growth when no scaling time data",
- 8, 1, 1.6, 7.4, 29.0,
+ 5, 1, 2.6, 12.3, 50.7,
fixture.autoscale());
fixture.setScalingDuration(Duration.ofHours(8));
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100, t -> 100.0 + (t < 50 ? t : 100 - t), t -> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
- fixture.loader().addCpuMeasurements(0.25, 200);
+ fixture.loader().addCpuMeasurements(0.20, 200);
fixture.tester().assertResources("Scale down since observed growth is slower than scaling time",
- 8, 1, 1.2, 7.4, 29.0,
+ 5, 1, 1.6, 12.3, 50.7,
fixture.autoscale());
fixture.setScalingDuration(Duration.ofHours(8));
@@ -730,7 +729,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale up since observed growth is faster than scaling time",
- 8, 1, 1.5, 7.4, 29.0,
+ 5, 1, 2.4, 12.3, 50.7,
fixture.autoscale());
}
@@ -747,7 +746,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.7, 200);
fixture.tester().assertResources("Scale up slightly since observed growth is faster than scaling time, but we are not confident",
- 8, 1, 1.3, 7.4, 29.0,
+ 5, 1, 2.2, 12.3, 50.7,
fixture.autoscale());
}
@@ -766,16 +765,16 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester.assertResources("Query and write load is equal -> scale up somewhat",
- 8, 1, 1.8, 7.4, 29.0,
+ 5, 1, 2.9, 12.3, 50.7,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 800.0 : 400.0, t -> 100.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
- // TODO: Ackhually, we scale down here - why?
+ // TODO: Ackhually, we scale up less here - why?
fixture.tester().assertResources("Query load is 4x write load -> scale up more",
- 8, 1, 1.4, 7.4, 29.0,
+ 5, 1, 2.2, 12.3, 50.7,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
@@ -783,7 +782,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Write load is 10x query load -> scale down",
- 6, 1, 1.1, 10.0, 40.5,
+ 5, 1, 1.3, 12.3, 50.7,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
@@ -791,7 +790,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Query only -> larger",
- 8, 1, 2.1, 7.4, 29.0,
+ 5, 1, 3.5, 12.3, 50.7,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
@@ -954,4 +953,32 @@ public class AutoscalingTest {
.build();
}
+ @Test
+ public void change_not_requiring_node_replacement_is_preferred() {
+ var min = new ClusterResources(5, 1, new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote));
+ var max = new ClusterResources(6, 1, new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote));
+
+ List<Flavor> flavors = List.of(new Flavor("arm_16", new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote, NodeResources.Architecture.arm64)),
+ new Flavor("x86_16", new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote, NodeResources.Architecture.x86_64)));
+ var fixture = DynamicProvisioningTester.fixture()
+ .clusterType(ClusterSpec.Type.container)
+ .hostFlavors(flavors)
+ .awsZone(false, Environment.prod)
+ .capacity(Capacity.from(min, max))
+ .initialResources(Optional.of(min.with(min.nodeResources().with(NodeResources.Architecture.x86_64))))
+ .build();
+ var nodes = fixture.nodes().not().retired().asList();
+ assertEquals(5, nodes.size());
+ assertEquals(NodeResources.Architecture.x86_64, nodes.get(0).resources().architecture());
+
+ fixture.tester().clock().advance(Duration.ofHours(5));
+ fixture.loader().applyCpuLoad(0.27, 10); // trigger rescaling, but don't cause fulfilment < 1
+ var autoscaling = fixture.autoscale();
+ fixture.deploy(Capacity.from(autoscaling.resources().get()));
+ nodes = fixture.nodes().not().retired().asList();
+ assertEquals(6, nodes.size());
+ assertEquals("We stay with x86 even though the first matching flavor is arm",
+ NodeResources.Architecture.x86_64, nodes.get(0).resources().architecture());
+ }
+
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java
index 379dbb27d87..be7bc3c44a8 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java
@@ -32,7 +32,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 4.0, 7.4, 29.0,
+ 8, 1, 3.4, 7.4, 29.0,
fixture.autoscale());
// Higher query rate
@@ -40,7 +40,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(200, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 8.0, 7.4, 29.0,
+ 8, 1, 6.8, 7.4, 29.0,
fixture.autoscale());
// Higher headroom
@@ -48,7 +48,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.3, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 4.8, 7.4, 29.0,
+ 8, 1, 4.0, 7.4, 29.0,
fixture.autoscale());
// Higher per query cost
@@ -56,7 +56,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 6.0, 7.4, 29.0,
+ 8, 1, 5.1, 7.4, 29.0,
fixture.autoscale());
// Bcp elsewhere is 0 - use local only
@@ -85,7 +85,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 10.5, 43.2, 190.0,
+ 3, 3, 11.7, 43.2, 190.0,
fixture.autoscale());
// Higher query rate
@@ -93,7 +93,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(200, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 20.9, 43.2, 190.0,
+ 3, 3, 23.1, 43.2, 190.0,
fixture.autoscale());
// Higher headroom
@@ -101,7 +101,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.3, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 12.4, 43.2, 190.0,
+ 3, 3, 13.8, 43.2, 190.0,
fixture.autoscale());
// Higher per query cost
@@ -109,7 +109,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 15.7, 43.2, 190.0,
+ 3, 3, 17.4, 43.2, 190.0,
fixture.autoscale());
}
@@ -127,7 +127,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 4.0, 16.0, 40.8,
+ 4, 1, 8.0, 16.0, 40.8,
fixture.autoscale());
// Higher query rate (mem and disk changes are due to being assigned larger hosts where we get less overhead share
@@ -135,7 +135,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(200, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 8.0, 16.0, 40.8,
+ 7, 1, 8.0, 16.0, 40.8,
fixture.autoscale());
// Higher headroom
@@ -143,7 +143,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.3, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 5, 1, 8.0, 16.0, 40.8,
+ 8, 1, 4.0, 16.0, 40.8,
fixture.autoscale());
// Higher per query cost
@@ -151,7 +151,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 6, 1, 8.0, 16.0, 40.8,
+ 10, 1, 4.0, 16.0, 40.8,
fixture.autoscale());
}
@@ -173,7 +173,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("No need for traffic shift headroom",
- 2, 1, 2.0, 16.0, 40.8,
+ 3, 1, 4.0, 16.0, 40.8,
fixture.autoscale());
}
@@ -186,7 +186,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(200, 1.3, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 14.2, 7.4, 29.0,
+ 8, 1, 11.9, 7.4, 29.0,
fixture.autoscale());
// Some local traffic
@@ -196,7 +196,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration1.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 10.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 6.9, 7.4, 29.0,
+ 8, 1, 6.8, 7.4, 29.0,
fixture.autoscale());
// Enough local traffic to get half the votes
@@ -206,7 +206,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration2.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 50.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 2.9, 7.4, 29.0,
+ 8, 1, 3.0, 7.4, 29.0,
fixture.autoscale());
// Mostly local
@@ -270,6 +270,21 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.autoscale());
}
+ @Test
+ public void test_autoscaling_containers_with_some_local_traffic() {
+ var fixture = DynamicProvisioningTester.fixture().clusterType(ClusterSpec.Type.container).awsProdSetup(true).build();
+
+ // Some local traffic
+ fixture.tester().clock().advance(Duration.ofDays(2));
+ fixture.store(new BcpGroupInfo(200, 1.9, 0.01));
+ Duration duration1 = fixture.loader().addCpuMeasurements(0.58f, 10);
+ fixture.tester().clock().advance(duration1.negated());
+ fixture.loader().addQueryRateMeasurements(10, __ -> 10.0);
+ fixture.tester().assertResources("Not scaling down due to group info, even though it contains much evidence queries are cheap",
+ 3, 1, 4.0, 16.0, 40.8,
+ fixture.autoscale());
+ }
+
/** Tests with varying BCP group info parameters. */
@Test
public void test_autoscaling_metrics() {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
index ec084014a6a..f07d52a4a7f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
@@ -5,17 +5,12 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeResources;
-import com.yahoo.config.provision.Zone;
import com.yahoo.test.ManualClock;
-import com.yahoo.vespa.curator.mock.MockCurator;
-import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.Status;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
-import com.yahoo.vespa.hosted.provision.testutils.MockNodeRepository;
import org.junit.Test;
import java.time.Duration;
@@ -36,10 +31,10 @@ public class ClusterModelTest {
public void unit_adjustment_should_cause_no_change() {
var model = clusterModelWithNoData(); // 5 nodes, 1 group
assertEquals(Load.one(), model.loadAdjustment());
- var target = model.loadAdjustment().scaled(resources());
+ var target = model.loadAdjustment().scaled(nodeResources());
int testingNodes = 5 - 1;
int currentNodes = 5 - 1;
- assertEquals(resources(), model.loadWith(testingNodes, 1).scaled(Load.one().divide(model.loadWith(currentNodes, 1)).scaled(target)));
+ assertEquals(nodeResources(), model.loadWith(testingNodes, 1).scaled(Load.one().divide(model.loadWith(currentNodes, 1)).scaled(target)));
}
@Test
@@ -91,16 +86,23 @@ public class ClusterModelTest {
ManualClock clock = new ManualClock();
Application application = Application.empty(ApplicationId.from("t1", "a1", "i1"));
ClusterSpec clusterSpec = clusterSpec();
- Cluster cluster = cluster(resources());
+ Cluster cluster = cluster();
application = application.with(cluster);
- return new ClusterModel(new ProvisioningTester.Builder().build().nodeRepository(),
+ var nodeRepository = new ProvisioningTester.Builder().build().nodeRepository();
+ return new ClusterModel(nodeRepository,
application.with(status),
- clusterSpec, cluster, clock, Duration.ofMinutes(10),
+ clusterSpec, cluster,
+ new AllocatableResources(clusterResources(), clusterSpec, nodeRepository),
+ clock, Duration.ofMinutes(10), Duration.ofMinutes(5),
timeseries(cluster,100, queryRate, writeRate, clock),
ClusterNodesTimeseries.empty());
}
- private NodeResources resources() {
+ private ClusterResources clusterResources() {
+ return new ClusterResources(5, 1, nodeResources());
+ }
+
+ private NodeResources nodeResources() {
return new NodeResources(1, 10, 100, 1);
}
@@ -111,10 +113,10 @@ public class ClusterModelTest {
.build();
}
- private Cluster cluster(NodeResources resources) {
+ private Cluster cluster() {
return Cluster.create(ClusterSpec.Id.from("test"),
false,
- Capacity.from(new ClusterResources(5, 1, resources)));
+ Capacity.from(clusterResources()));
}
/** Creates the given number of measurements, spaced 5 minutes between, using the given function */
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
index 33d3d3d50dc..78feba14fbf 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
@@ -5,17 +5,14 @@ import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.Cloud;
-import com.yahoo.config.provision.ClusterInfo;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.Flavor;
-import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.Zone;
-import com.yahoo.vespa.curator.mock.MockCurator;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.custom.HostResources;
@@ -29,7 +26,6 @@ import com.yahoo.vespa.hosted.provision.autoscale.awsnodes.AwsHostResourcesCalcu
import com.yahoo.vespa.hosted.provision.autoscale.awsnodes.AwsNodeTypes;
import com.yahoo.vespa.hosted.provision.provisioning.DynamicProvisioningTester;
import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator;
-import com.yahoo.vespa.hosted.provision.testutils.MockNodeRepository;
import java.time.Duration;
import java.util.Arrays;
@@ -72,9 +68,9 @@ public class Fixture {
return tester().nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId));
}
- public AllocatableClusterResources currentResources() {
- return new AllocatableClusterResources(tester.nodeRepository().nodes().list(Node.State.active).owner(applicationId).cluster(clusterId()),
- tester.nodeRepository());
+ public AllocatableResources currentResources() {
+ return new AllocatableResources(tester.nodeRepository().nodes().list(Node.State.active).owner(applicationId).cluster(clusterId()),
+ tester.nodeRepository());
}
public Cluster cluster() {
@@ -89,6 +85,7 @@ public class Fixture {
clusterSpec,
cluster(),
nodes(),
+ new AllocatableResources(nodes(), tester.nodeRepository()),
tester.nodeRepository().metricsDb(),
tester.nodeRepository().clock());
}
@@ -180,6 +177,7 @@ public class Fixture {
new NodeResources(100, 1000, 1000, 1, NodeResources.DiskSpeed.any)));
HostResourcesCalculator resourceCalculator = new DynamicProvisioningTester.MockHostResourcesCalculator(zone);
final InMemoryFlagSource flagSource = new InMemoryFlagSource();
+ boolean reversedFlavorOrder = false;
int hostCount = 0;
public Fixture.Builder zone(Zone zone) {
@@ -228,12 +226,16 @@ public class Fixture {
public Fixture.Builder awsSetup(boolean allowHostSharing, Environment environment) {
return this.awsHostFlavors()
.awsResourceCalculator()
- .zone(new Zone(Cloud.builder().dynamicProvisioning(true)
- .allowHostSharing(allowHostSharing)
- .build(),
- SystemName.Public,
- environment,
- RegionName.from("aws-eu-west-1a")));
+ .awsZone(allowHostSharing, environment);
+ }
+
+ public Fixture.Builder awsZone(boolean allowHostSharing, Environment environment) {
+ return zone(new Zone(Cloud.builder().dynamicProvisioning(true)
+ .allowHostSharing(allowHostSharing)
+ .build(),
+ SystemName.Public,
+ environment,
+ RegionName.from("aws-eu-west-1a")));
}
public Fixture.Builder vespaVersion(Version version) {
@@ -246,6 +248,11 @@ public class Fixture {
return this;
}
+ public Fixture.Builder hostFlavors(List<Flavor> hostFlavors) {
+ this.hostFlavors = hostFlavors;
+ return this;
+ }
+
/** Adds the host resources available on AWS. */
public Fixture.Builder awsHostFlavors() {
this.hostFlavors = AwsNodeTypes.asFlavors();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTester.java
index 523feeeb303..eedf4946e3a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTester.java
@@ -78,8 +78,7 @@ public class CapacityCheckerTester {
new MemoryMetricsDb(clock),
new OrchestratorMock(),
true,
- 0,
- 1000);
+ 0);
}
private void updateCapacityChecker() {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainerTest.java
index e1e2ff3db15..d1eaac8258e 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ExpeditedChangeApplicationMaintainerTest.java
@@ -43,7 +43,7 @@ public class ExpeditedChangeApplicationMaintainerTest {
// Create applications
fixture.activate();
- assertEquals("Initial applications are deployed", 3, fixture.deployer.redeployments);
+ assertEquals("Initial applications are deployed", 3, fixture.deployer.activations);
ExpeditedChangeApplicationMaintainer maintainer = new ExpeditedChangeApplicationMaintainer(fixture.deployer,
new TestMetric(),
nodeRepository,
@@ -51,34 +51,34 @@ public class ExpeditedChangeApplicationMaintainerTest {
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
- assertEquals("No changes -> no redeployments", 3, fixture.deployer.redeployments);
+ assertEquals("No changes -> no redeployments", 3, fixture.deployer.activations);
nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app1).asList().get(3).hostname(), Agent.system, "Failing to unit test");
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
- assertEquals("System change -> no redeployments", 3, fixture.deployer.redeployments);
+ assertEquals("System change -> no redeployments", 3, fixture.deployer.activations);
clock.advance(Duration.ofSeconds(1));
nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app2).asList().get(4).hostname(), Agent.operator, "Manual node failing");
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
- assertEquals("Operator change -> redeployment", 4, fixture.deployer.redeployments);
+ assertEquals("Operator change -> redeployment", 4, fixture.deployer.activations);
clock.advance(Duration.ofSeconds(1));
nodeRepository.nodes().fail(nodeRepository.nodes().list().owner(fixture.app3).asList().get(1).hostname(), Agent.operator, "Manual node failing");
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
- assertEquals("Operator change -> redeployment", 5, fixture.deployer.redeployments);
+ assertEquals("Operator change -> redeployment", 5, fixture.deployer.activations);
clock.advance(Duration.ofSeconds(1));
fixture.tester.makeReadyNodes(1, fixture.nodeResources, NodeType.proxy);
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
- assertEquals("Ready proxy node -> redeployment", 6, fixture.deployer.redeployments);
+ assertEquals("Ready proxy node -> redeployment", 6, fixture.deployer.activations);
clock.advance(Duration.ofMinutes(2));
maintainer.maintain();
- assertEquals("No further operator changes -> no (new) redeployments", 6, fixture.deployer.redeployments);
+ assertEquals("No further operator changes -> no (new) redeployments", 6, fixture.deployer.activations);
}
private static class Fixture {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java
index c63be6d5dc5..12f10e434e6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java
@@ -211,7 +211,7 @@ public class NodeFailerTest {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
- assertEquals(0, tester.deployer.redeployments);
+ assertEquals(0, tester.deployer.activations);
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertFalse(tester.nodeRepository.nodes().node(downHost1).get().isDown());
@@ -224,7 +224,7 @@ public class NodeFailerTest {
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
- assertEquals(0, tester.deployer.redeployments);
+ assertEquals(0, tester.deployer.activations);
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertTrue(tester.nodeRepository.nodes().node(downHost1).get().isDown());
@@ -237,7 +237,7 @@ public class NodeFailerTest {
tester.runMaintainers();
assertFalse(tester.nodeRepository.nodes().node(downHost1).get().isDown());
assertTrue(tester.nodeRepository.nodes().node(downHost1).get().isUp());
- assertEquals(1, tester.deployer.redeployments);
+ assertEquals(1, tester.deployer.activations);
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(downHost2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
@@ -253,7 +253,7 @@ public class NodeFailerTest {
// the host is still down and fails
tester.clock.advance(Duration.ofMinutes(5));
tester.runMaintainers();
- assertEquals(2, tester.deployer.redeployments);
+ assertEquals(2, tester.deployer.activations);
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
@@ -264,7 +264,7 @@ public class NodeFailerTest {
for (int minutes = 0; minutes < 75; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
- assertEquals(2, tester.deployer.redeployments);
+ assertEquals(2, tester.deployer.activations);
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
}
@@ -274,7 +274,7 @@ public class NodeFailerTest {
tester.clock.advance(Duration.ofDays(1));
tester.runMaintainers();
// The node is now failed
- assertEquals(3, tester.deployer.redeployments);
+ assertEquals(3, tester.deployer.activations);
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertTrue("The index of the last failed node is not reused",
@@ -330,7 +330,7 @@ public class NodeFailerTest {
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
- assertEquals(0, tester.deployer.redeployments);
+ assertEquals(0, tester.deployer.activations);
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(0, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
}
@@ -338,7 +338,7 @@ public class NodeFailerTest {
// downHost should now be failed and replaced
tester.clock.advance(Duration.ofDays(1));
tester.runMaintainers();
- assertEquals(1, tester.deployer.redeployments);
+ assertEquals(1, tester.deployer.activations);
assertEquals(1, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(downHost, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).asList().get(0).hostname());
@@ -364,7 +364,7 @@ public class NodeFailerTest {
// Node is failed and replaced
tester.runMaintainers();
- assertEquals(1, tester.deployer.redeployments);
+ assertEquals(1, tester.deployer.activations);
NodeList failedOrActive = tester.nodeRepository.nodes().list(Node.State.active, Node.State.failed);
assertEquals(4, failedOrActive.state(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(Set.of(downNode.hostname()), failedOrActive.state(Node.State.failed).nodeType(NodeType.tenant).hostnames());
@@ -392,7 +392,7 @@ public class NodeFailerTest {
for (int minutes = 0; minutes < 45; minutes += 5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
- assertEquals(0, tester.deployer.redeployments);
+ assertEquals(0, tester.deployer.activations);
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
}
@@ -400,7 +400,7 @@ public class NodeFailerTest {
tester.clock.advance(Duration.ofMinutes(30));
tester.runMaintainers();
- assertEquals(2, tester.deployer.redeployments);
+ assertEquals(2, tester.deployer.activations);
assertEquals(2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(7, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
@@ -409,7 +409,7 @@ public class NodeFailerTest {
// The failing of the host is deferred to the next maintain
tester.runMaintainers();
- assertEquals(2 + 1, tester.deployer.redeployments);
+ assertEquals(2 + 1, tester.deployer.activations);
assertEquals(2, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
@@ -430,7 +430,7 @@ public class NodeFailerTest {
tester.clock.advance(Duration.ofMinutes(30));
tester.runMaintainers();
- assertEquals(3 + 1, tester.deployer.redeployments);
+ assertEquals(3 + 1, tester.deployer.activations);
assertEquals(3, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
@@ -444,7 +444,7 @@ public class NodeFailerTest {
tester.runMaintainers();
tester.runMaintainers(); // The host is failed in the 2. maintain()
- assertEquals(5 + 2, tester.deployer.redeployments);
+ assertEquals(5 + 2, tester.deployer.activations);
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
@@ -458,7 +458,7 @@ public class NodeFailerTest {
tester.clock.advance(Duration.ofDays(1));
tester.runMaintainers();
- assertEquals(6 + 2, tester.deployer.redeployments);
+ assertEquals(6 + 2, tester.deployer.activations);
assertEquals(6, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(NodeType.tenant).size());
assertEquals(8, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.tenant).size());
assertEquals(5, tester.nodeRepository.nodes().list(Node.State.active).nodeType(NodeType.host).size());
@@ -493,7 +493,7 @@ public class NodeFailerTest {
for (int minutes = 0; minutes < 45; minutes +=5 ) {
tester.runMaintainers();
tester.clock.advance(Duration.ofMinutes(5));
- assertEquals( 0, tester.deployer.redeployments);
+ assertEquals( 0, tester.deployer.activations);
assertEquals(count, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
}
@@ -501,7 +501,7 @@ public class NodeFailerTest {
tester.runMaintainers();
// one down host should now be failed, but not two as we are only allowed to fail one proxy
- assertEquals(expectedFailCount, tester.deployer.redeployments);
+ assertEquals(expectedFailCount, tester.deployer.activations);
assertEquals(count - expectedFailCount, tester.nodeRepository.nodes().list(Node.State.active).nodeType(nodeType).size());
assertEquals(expectedFailCount, tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType).size());
tester.nodeRepository.nodes().list(Node.State.failed).nodeType(nodeType)
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java
index 900d77fcb26..f92526282d1 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/PeriodicApplicationMaintainerTest.java
@@ -124,40 +124,85 @@ public class PeriodicApplicationMaintainerTest {
public void application_deploy_inhibits_redeploy_for_a_while() {
fixture.activate();
- assertEquals("No deployment expected", 2, fixture.deployer.redeployments);
+ assertEquals("No deployment expected", 2, fixture.deployer.activations);
// Holds off on deployments a while after starting
fixture.runApplicationMaintainer();
- assertEquals("No deployment expected", 2, fixture.deployer.redeployments);
+ assertEquals("No deployment expected", 2, fixture.deployer.activations);
// Exhaust initial wait period
clock.advance(Duration.ofMinutes(30).plus(Duration.ofSeconds(1)));
// Will not do any deployments, as bootstrapping is still in progress
fixture.runApplicationMaintainer();
- assertEquals("No deployment expected", 2, fixture.deployer.redeployments);
+ assertEquals("No deployment expected", 2, fixture.deployer.activations);
// First deployment of applications will happen now, as bootstrapping is done
fixture.setBootstrapping(false);
fixture.runApplicationMaintainer();
- assertEquals("No deployment expected", 4, fixture.deployer.redeployments);
+ assertEquals("No deployment expected", 4, fixture.deployer.activations);
Instant firstDeployTime = clock.instant();
+ assertEquals(firstDeployTime, fixture.deployer.deployTime(fixture.app1).get());
+ assertEquals(firstDeployTime, fixture.deployer.deployTime(fixture.app2).get());
assertEquals(firstDeployTime, fixture.deployer.activationTime(fixture.app1).get());
assertEquals(firstDeployTime, fixture.deployer.activationTime(fixture.app2).get());
clock.advance(Duration.ofMinutes(5));
fixture.runApplicationMaintainer();
// Too soon: Not redeployed:
- assertEquals("No deployment expected", 4, fixture.deployer.redeployments);
+ assertEquals("No deployment expected", 4, fixture.deployer.activations);
+ assertEquals(firstDeployTime, fixture.deployer.deployTime(fixture.app1).get());
+ assertEquals(firstDeployTime, fixture.deployer.deployTime(fixture.app2).get());
assertEquals(firstDeployTime, fixture.deployer.activationTime(fixture.app1).get());
assertEquals(firstDeployTime, fixture.deployer.activationTime(fixture.app2).get());
clock.advance(Duration.ofMinutes(30));
+ Instant instant1 = clock.instant();
fixture.runApplicationMaintainer();
// Redeployed:
- assertEquals("No deployment expected", 6, fixture.deployer.redeployments);
- assertEquals(clock.instant(), fixture.deployer.activationTime(fixture.app1).get());
- assertEquals(clock.instant(), fixture.deployer.activationTime(fixture.app2).get());
+ assertEquals("No deployment expected", 6, fixture.deployer.activations);
+ assertEquals(instant1, fixture.deployer.deployTime(fixture.app1).get());
+ assertEquals(instant1, fixture.deployer.deployTime(fixture.app2).get());
+ assertEquals(instant1, fixture.deployer.activationTime(fixture.app1).get());
+ assertEquals(instant1, fixture.deployer.activationTime(fixture.app2).get());
+
+ clock.advance(Duration.ofMinutes(30));
+ // Prepare (simulate that activation failed)
+ fixture.prepare();
+ Instant secondDeployTime = clock.instant();
+ fixture.runApplicationMaintainer();
+ // Too soon: Not redeployed, since a deployment (prepare) was done less than 30 minutes ago:
+ assertEquals("No deployment expected", 6, fixture.deployer.activations);
+ assertEquals(secondDeployTime, fixture.deployer.deployTime(fixture.app1).get());
+ assertEquals(secondDeployTime, fixture.deployer.deployTime(fixture.app2).get());
+ assertEquals(instant1, fixture.deployer.activationTime(fixture.app1).get());
+ assertEquals(instant1, fixture.deployer.activationTime(fixture.app2).get());
+
+ clock.advance(Duration.ofMinutes(30));
+ }
+
+ @Test(timeout = 60_000)
+ public void application_deploy_triggered_by_reindexing_ready() {
+ fixture.activate();
+
+ assertEquals("No deployment expected", 2, fixture.deployer.activations);
+
+ // Holds off on deployments a while after starting
+ fixture.setBootstrapping(false);
+ fixture.runApplicationMaintainer();
+ assertEquals("No deployment expected", 2, fixture.deployer.activations);
+
+ Instant firstDeployTime = clock.instant();
+
+ // Reindexing readied before last deploy time triggers nothing.
+ fixture.deployer.setReadiedReindexingAt(firstDeployTime.minusSeconds(1));
+ fixture.runApplicationMaintainer();
+ assertEquals("No deployment expected", 2, fixture.deployer.activations);
+
+ // Reindexing readied after last deploy time triggers nothing.
+ fixture.deployer.setReadiedReindexingAt(firstDeployTime.plusSeconds(1));
+ fixture.runApplicationMaintainer();
+ assertEquals("No deployment expected", 4, fixture.deployer.activations);
}
@Test(timeout = 60_000)
@@ -188,8 +233,8 @@ public class PeriodicApplicationMaintainerTest {
fixture.deployer.lock().unlock();
fixture.runApplicationMaintainer();
Instant deployTime = clock.instant();
- assertEquals(deployTime, fixture.deployer.activationTime(fixture.app1).get());
- assertEquals(deployTime, fixture.deployer.activationTime(fixture.app2).get());
+ assertEquals(deployTime, fixture.deployer.deployTime(fixture.app1).get());
+ assertEquals(deployTime, fixture.deployer.deployTime(fixture.app2).get());
// Too soon: Already deployed recently
clock.advance(Duration.ofMinutes(5));
@@ -228,6 +273,11 @@ public class PeriodicApplicationMaintainerTest {
Duration.ofMinutes(30));
}
+ void prepare() {
+ deployer.deployFromLocalActive(app1, false).get().prepare();
+ deployer.deployFromLocalActive(app2, false).get().prepare();
+ }
+
void activate() {
deployer.deployFromLocalActive(app1, false).get().activate();
deployer.deployFromLocalActive(app2, false).get().activate();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
index f42044c2944..d0ac59b1e15 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
@@ -104,7 +104,7 @@ public class RetiredExpirerTest {
createRetiredExpirer(deployer).run();
assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(4, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
- assertEquals(1, deployer.redeployments);
+ assertEquals(1, deployer.activations);
// inactivated nodes are not retired
for (Node node : nodeRepository.nodes().list(Node.State.inactive).owner(applicationId))
@@ -147,14 +147,14 @@ public class RetiredExpirerTest {
retiredExpirer.run();
assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(2, nodeRepository.nodes().list(Node.State.dirty).owner(applicationId).size());
- assertEquals(1, deployer.redeployments);
+ assertEquals(1, deployer.activations);
verify(orchestrator, times(4)).acquirePermissionToRemove(any());
// Running it again has no effect
retiredExpirer.run();
assertEquals(5, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(2, nodeRepository.nodes().list(Node.State.dirty).owner(applicationId).size());
- assertEquals(1, deployer.redeployments);
+ assertEquals(1, deployer.activations);
verify(orchestrator, times(6)).acquirePermissionToRemove(any());
// Running it again deactivates nodes that have exceeded max retirement period
@@ -163,7 +163,7 @@ public class RetiredExpirerTest {
assertEquals(3, nodeRepository.nodes().list(Node.State.active).owner(applicationId).size());
assertEquals(2, nodeRepository.nodes().list(Node.State.dirty).owner(applicationId).size());
assertEquals(2, nodeRepository.nodes().list(Node.State.inactive).owner(applicationId).size());
- assertEquals(2, deployer.redeployments);
+ assertEquals(2, deployer.activations);
verify(orchestrator, times(6)).acquirePermissionToRemove(any());
// Removed nodes are not retired
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
index 8aaf0eb20e7..3145675325b 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
@@ -75,7 +75,7 @@ public class ScalingSuggestionsMaintainerTest {
assertEquals("8 nodes with [vcpu: 3.2, memory: 4.5 Gb, disk: 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app1, cluster1, tester).resources().get().toString());
- assertEquals("8 nodes with [vcpu: 3.6, memory: 4.7 Gb, disk: 14.2 Gb, bandwidth: 0.1 Gbps, architecture: any]",
+ assertEquals("7 nodes with [vcpu: 4.1, memory: 5.3 Gb, disk: 16.5 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app2, cluster2, tester).resources().get().toString());
// Utilization goes way down
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainerTest.java
index a5ac2be72ee..da64fa2fd64 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainerTest.java
@@ -51,7 +51,7 @@ public class SpareCapacityMaintainerTest {
public void testEmpty() {
var tester = new SpareCapacityMaintainerTester();
tester.maintainer.maintain();
- assertEquals(0, tester.deployer.redeployments);
+ assertEquals(0, tester.deployer.activations);
assertEquals(0, tester.nodeRepository.nodes().list().retired().size());
}
@@ -61,7 +61,7 @@ public class SpareCapacityMaintainerTest {
tester.addHosts(2, new NodeResources(10, 100, 1000, 1));
tester.addNodes(0, 1, new NodeResources(10, 100, 1000, 1), 0);
tester.maintainer.maintain();
- assertEquals(0, tester.deployer.redeployments);
+ assertEquals(0, tester.deployer.activations);
assertEquals(0, tester.nodeRepository.nodes().list().retired().size());
assertEquals(1, tester.metric.values.get("spareHostCapacity"));
}
@@ -72,7 +72,7 @@ public class SpareCapacityMaintainerTest {
tester.addHosts(3, new NodeResources(10, 100, 1000, 1));
tester.addNodes(0, 1, new NodeResources(10, 100, 1000, 1), 0);
tester.maintainer.maintain();
- assertEquals(0, tester.deployer.redeployments);
+ assertEquals(0, tester.deployer.activations);
assertEquals(0, tester.nodeRepository.nodes().list().retired().size());
assertEquals(2, tester.metric.values.get("spareHostCapacity"));
}
@@ -83,7 +83,7 @@ public class SpareCapacityMaintainerTest {
tester.addHosts(2, new NodeResources(10, 100, 1000, 1));
tester.addNodes(0, 2, new NodeResources(10, 100, 1000, 1), 0);
tester.maintainer.maintain();
- assertEquals(0, tester.deployer.redeployments);
+ assertEquals(0, tester.deployer.activations);
assertEquals(0, tester.nodeRepository.nodes().list().retired().size());
assertEquals(0, tester.metric.values.get("spareHostCapacity"));
}
@@ -95,7 +95,7 @@ public class SpareCapacityMaintainerTest {
tester.addNodes(0, 2, new NodeResources(5, 50, 500, 0.5), 0);
tester.addNodes(1, 2, new NodeResources(5, 50, 500, 0.5), 2);
tester.maintainer.maintain();
- assertEquals(0, tester.deployer.redeployments);
+ assertEquals(0, tester.deployer.activations);
assertEquals(0, tester.nodeRepository.nodes().list().retired().size());
assertEquals(2, tester.metric.values.get("spareHostCapacity"));
}
@@ -109,13 +109,13 @@ public class SpareCapacityMaintainerTest {
tester.addNodes(1, 2, new NodeResources(5, 50, 500, 0.5), 2);
tester.addNodes(2, 2, new NodeResources(5, 50, 500, 0.5), 4);
tester.maintainer.maintain();
- assertEquals(1, tester.deployer.redeployments);
+ assertEquals(1, tester.deployer.activations);
assertEquals(1, tester.nodeRepository.nodes().list().retired().size());
assertEquals(1, tester.metric.values.get("spareHostCapacity"));
// Maintaining again is a no-op since the node to move is already retired
tester.maintainer.maintain();
- assertEquals(1, tester.deployer.redeployments);
+ assertEquals(1, tester.deployer.activations);
assertEquals(1, tester.nodeRepository.nodes().list().retired().size());
assertEquals(1, tester.metric.values.get("spareHostCapacity"));
}
@@ -132,7 +132,7 @@ public class SpareCapacityMaintainerTest {
tester.addNodes(2, 2, new NodeResources(5, 50, 500, 0.5), 4);
tester.addNodes(3, 2, new NodeResources(5, 50, 500, 0.5), 6);
tester.maintainer.maintain();
- assertEquals(1, tester.deployer.redeployments);
+ assertEquals(1, tester.deployer.activations);
assertEquals(1, tester.nodeRepository.nodes().list().retired().size());
assertEquals(1, tester.metric.values.get("spareHostCapacity"));
}
@@ -145,7 +145,7 @@ public class SpareCapacityMaintainerTest {
setupMultipleHosts(tester, 4);
tester.maintainer.maintain();
- assertEquals(1, tester.deployer.redeployments);
+ assertEquals(1, tester.deployer.activations);
assertEquals(1, tester.nodeRepository.nodes().list().retired().size());
assertEquals(1, tester.metric.values.get("spareHostCapacity"));
}
@@ -156,7 +156,7 @@ public class SpareCapacityMaintainerTest {
setupMultipleHosts(tester, 3);
tester.maintainer.maintain();
- assertEquals(0, tester.deployer.redeployments);
+ assertEquals(0, tester.deployer.activations);
assertEquals(0, tester.nodeRepository.nodes().list().retired().size());
assertEquals(0, tester.metric.values.get("spareHostCapacity"));
}
@@ -199,7 +199,7 @@ public class SpareCapacityMaintainerTest {
tester.addNodes(6, 1, new NodeResources( 4, 40, 400, 0.4), 6);
tester.maintainer.maintain();
- assertEquals(0, tester.deployer.redeployments);
+ assertEquals(0, tester.deployer.activations);
assertEquals(0, tester.nodeRepository.nodes().list().retired().size());
assertEquals(0, tester.metric.values.get("spareHostCapacity"));
}
@@ -216,7 +216,7 @@ public class SpareCapacityMaintainerTest {
tester.maintainer.maintain();
assertEquals(2, tester.metric.values.get("overcommittedHosts"));
- assertEquals(1, tester.deployer.redeployments);
+ assertEquals(1, tester.deployer.activations);
assertEquals(List.of(new NodeResources( 1.1, 10, 100, 0.1)), tester.nodeRepository.nodes().list().retired().mapToList(Node::resources));
}
@@ -239,7 +239,7 @@ public class SpareCapacityMaintainerTest {
tester.maintainer.maintain();
long totalTime = System.currentTimeMillis() - startTime;
System.out.println("Complete in " + ( totalTime / 1000) + " seconds");
- assertEquals(0, tester.deployer.redeployments);
+ assertEquals(0, tester.deployer.activations);
assertEquals(0, tester.nodeRepository.nodes().list().retired().size());
assertEquals(0, tester.metric.values.get("spareHostCapacity"));
}
@@ -273,8 +273,7 @@ public class SpareCapacityMaintainerTest {
new MemoryMetricsDb(clock),
new OrchestratorMock(),
true,
- 1,
- 1000);
+ 1);
deployer = new MockDeployer(nodeRepository);
maintainer = new SpareCapacityMaintainer(deployer, nodeRepository, metric, Duration.ofDays(1), maxIterations);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java
index c0d6ab90f06..e755f3c3cfc 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java
@@ -24,7 +24,7 @@ public class CuratorDbTest {
private final Curator curator = new MockCurator();
private final CuratorDb zkClient = new CuratorDb(
- FlavorConfigBuilder.createDummies("default"), curator, Clock.systemUTC(), true, 1000);
+ FlavorConfigBuilder.createDummies("default"), curator, Clock.systemUTC(), true);
@Test
public void can_read_stored_host_information() throws Exception {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java
index 56f03423ad2..6e2d1e7fcd6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java
@@ -60,7 +60,7 @@ import static org.junit.Assert.assertTrue;
public class NodeSerializerTest {
private final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default", "large", "ugccloud-container", "arm64", "gpu");
- private final NodeSerializer nodeSerializer = new NodeSerializer(nodeFlavors, 1000);
+ private final NodeSerializer nodeSerializer = new NodeSerializer(nodeFlavors);
private final ManualClock clock = new ManualClock();
@Test
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ContainerImagesTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ContainerImagesTest.java
index bb7ea52ca0e..4537aaef45b 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ContainerImagesTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ContainerImagesTest.java
@@ -24,7 +24,7 @@ public class ContainerImagesTest {
@Test
public void image_selection() {
- DockerImage defaultImage = DockerImage.fromString("registry.example.com/vespa/default");
+ DockerImage defaultImage = DockerImage.fromString("different.example.com/vespa/default");
DockerImage tenantImage = DockerImage.fromString("registry.example.com/vespa/tenant");
DockerImage gpuImage = DockerImage.fromString("registry.example.com/vespa/tenant-gpu");
ContainerImages images = new ContainerImages(defaultImage, Optional.of(tenantImage), Optional.of(gpuImage));
@@ -45,6 +45,13 @@ public class ContainerImagesTest {
DockerImage requested = DockerImage.fromString("registry.example.com/vespa/special");
assertEquals(requested, images.get(node(NodeType.tenant, requested)));
+ // Malicious registry is rewritten to the trusted one
+ DockerImage malicious = DockerImage.fromString("malicious.example.com/vespa/special");
+ assertEquals(requested, images.get(node(NodeType.tenant, malicious)));
+
+ // Requested image registry for config is rewritten to the defaultImage registry
+ assertEquals(DockerImage.fromString("different.example.com/vespa/special"), images.get(node(NodeType.config, requested)));
+
// When there is no custom tenant image, the default one is used
images = new ContainerImages(defaultImage, Optional.empty(), Optional.of(gpuImage));
assertEquals(defaultImage, images.get(node(NodeType.host)));
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index bca48b19ccf..60dd9ce59ef 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -128,8 +128,7 @@ public class ProvisioningTester {
new MemoryMetricsDb(clock),
orchestrator,
true,
- spareCount,
- 1000);
+ spareCount);
this.provisioner = new NodeRepositoryProvisioner(nodeRepository, zone, provisionServiceProvider, new MockMetric());
this.capacityPolicies = new CapacityPolicies(nodeRepository);
this.provisionLogger = new InMemoryProvisionLogger();
diff --git a/orchestrator/pom.xml b/orchestrator/pom.xml
index 3ec98af3565..00244124add 100644
--- a/orchestrator/pom.xml
+++ b/orchestrator/pom.xml
@@ -129,7 +129,7 @@
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-all</artifactId>
+ <artifactId>hamcrest</artifactId>
<scope>test</scope>
</dependency>
<dependency>
diff --git a/parent/pom.xml b/parent/pom.xml
index df20b94ec79..f6a2cba379c 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -45,7 +45,7 @@
<extension>
<groupId>org.apache.maven.archetype</groupId>
<artifactId>archetype-packaging</artifactId>
- <version>2.0</version>
+ <version>3.2.1</version>
</extension>
</extensions>
<pluginManagement>
@@ -58,7 +58,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
- <version>1.7</version>
+ <version>${maven-antrun-plugin.vespa.version}</version>
</plugin>
<plugin>
<groupId>org.apache.felix</groupId>
@@ -227,17 +227,17 @@
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
- <version>1.9.1</version>
+ <version>3.4.0</version>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>exec-maven-plugin</artifactId>
- <version>1.6.0</version>
+ <version>3.1.0</version>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>properties-maven-plugin</artifactId>
- <version>1.0.0</version>
+ <version>${properties-maven-plugin.vespa.version}</version>
</plugin>
<plugin>
<groupId>com.yahoo.vespa</groupId>
@@ -307,7 +307,7 @@
-->
<groupId>org.openrewrite.maven</groupId>
<artifactId>rewrite-maven-plugin</artifactId>
- <version>4.27.0</version>
+ <version>5.4.2</version>
<configuration>
<activeRecipes>
<recipe>org.openrewrite.java.testing.junit5.JUnit5BestPractices</recipe>
@@ -317,7 +317,7 @@
<dependency>
<groupId>org.openrewrite.recipe</groupId>
<artifactId>rewrite-testing-frameworks</artifactId>
- <version>1.24.0</version>
+ <version>2.0.10</version>
</dependency>
</dependencies>
</plugin>
@@ -425,7 +425,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-gpg-plugin</artifactId>
- <version>1.6</version>
+ <version>3.1.0</version>
<executions>
<execution>
<id>sign-artifacts</id>
@@ -493,7 +493,7 @@
<dependency>
<groupId>com.github.luben</groupId>
<artifactId>zstd-jni</artifactId>
- <version>1.5.5-4</version>
+ <version>${luben.zstd.vespa.version}</version>
</dependency>
<dependency>
<!-- Needed by zookeeper, which has an optional dependency.
@@ -548,7 +548,7 @@
<dependency>
<groupId>com.google.jimfs</groupId>
<artifactId>jimfs</artifactId>
- <version>1.2</version>
+ <version>${jimfs.vespa.version}</version>
<scope>test</scope> <!-- TODO: remove scope from parent pom -->
</dependency>
<dependency>
@@ -562,11 +562,6 @@
<version>${icu4j.vespa.version}</version>
</dependency>
<dependency>
- <groupId>com.infradna.tool</groupId>
- <artifactId>bridge-method-annotation</artifactId>
- <version>1.4</version>
- </dependency>
- <dependency>
<groupId>com.microsoft.onnxruntime</groupId>
<artifactId>onnxruntime</artifactId>
<version>${onnxruntime.vespa.version}</version>
@@ -617,7 +612,7 @@
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
- <version>3.12.0</version>
+ <version>${commons-lang3.vespa.version}</version>
</dependency>
<dependency>
<groupId>io.jsonwebtoken</groupId>
@@ -700,11 +695,6 @@
<version>${jna.vespa.version}</version>
</dependency>
<dependency>
- <groupId>net.spy</groupId>
- <artifactId>spymemcached</artifactId>
- <version>2.10.1</version>
- </dependency>
- <dependency>
<groupId>org.antlr</groupId>
<artifactId>antlr-runtime</artifactId>
<version>${antlr.vespa.version}</version>
@@ -747,7 +737,7 @@
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-csv</artifactId>
- <version>1.8</version>
+ <version>${commons-csv.vespa.version}</version>
</dependency>
<dependency>
<groupId>org.apache.felix</groupId>
@@ -858,7 +848,7 @@
<dependency>
<groupId>org.apache.maven.shared</groupId>
<artifactId>maven-dependency-tree</artifactId>
- <version>3.2.0</version>
+ <version>3.2.1</version>
</dependency>
<dependency>
<groupId>org.apache.maven.surefire</groupId>
@@ -909,7 +899,7 @@
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
- <version>3.11.1</version>
+ <version>${assertj.vespa.version}</version>
</dependency>
<dependency>
<groupId>org.codehaus.plexus</groupId>
@@ -932,12 +922,6 @@
<version>${bouncycastle.vespa.version}</version>
</dependency>
<dependency>
- <groupId>org.cthul</groupId>
- <artifactId>cthul-matchers</artifactId>
- <version>1.0</version>
- <scope>test</scope> <!-- TODO: remove scope from parent pom -->
- </dependency>
- <dependency>
<groupId>org.eclipse.collections</groupId>
<artifactId>eclipse-collections</artifactId>
<version>${eclipse-collections.vespa.version}</version>
@@ -1019,21 +1003,18 @@
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-all</artifactId>
+ <artifactId>hamcrest</artifactId>
<version>${hamcrest.vespa.version}</version>
- <scope>test</scope> <!-- TODO: remove scope from parent pom -->
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-core</artifactId>
<version>${hamcrest.vespa.version}</version>
- <scope>test</scope> <!-- TODO: remove scope from parent pom -->
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-library</artifactId>
<version>${hamcrest.vespa.version}</version>
- <scope>test</scope> <!-- TODO: remove scope from parent pom -->
</dependency>
<dependency>
<groupId>org.hdrhistogram</groupId>
@@ -1086,7 +1067,7 @@
See pluginManagement of rewrite-maven-plugin for more details -->
<groupId>org.openrewrite.recipe</groupId>
<artifactId>rewrite-recipe-bom</artifactId>
- <version>1.5.0</version>
+ <version>2.2.1</version>
<type>pom</type>
<scope>import</scope>
</dependency>
@@ -1123,23 +1104,17 @@
<dependency>
<groupId>org.twdata.maven</groupId>
<artifactId>mojo-executor</artifactId>
- <version>2.3.0</version>
+ <version>${mojo-executor.vespa.version}</version>
</dependency>
<dependency>
<groupId>org.xerial.snappy</groupId>
<artifactId>snappy-java</artifactId>
- <version>1.1.10.1</version>
+ <version>${snappy.vespa.version}</version>
</dependency>
<dependency>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-core</artifactId>
- <version>3.2.5</version>
- </dependency>
- <dependency>
- <groupId>uk.co.datumedge</groupId>
- <artifactId>hamcrest-json</artifactId>
- <version>0.2</version>
- <scope>test</scope> <!-- TODO: remove scope from parent pom -->
+ <version>${dropwizard.metrics.vespa.version}</version>
</dependency>
<dependency>
<groupId>xerces</groupId>
@@ -1154,12 +1129,27 @@
<dependency>
<groupId>com.google.errorprone</groupId>
<artifactId>error_prone_annotations</artifactId>
- <version>2.18.0</version>
+ <version>${error-prone-annotations.vespa.version}</version>
</dependency>
<dependency>
<groupId>org.checkerframework</groupId>
<artifactId>checker-qual</artifactId>
- <version>3.30.0</version>
+ <version>${checker-qual.vespa.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.http-client</groupId>
+ <artifactId>google-http-client-apache-v2</artifactId>
+ <version>1.43.3</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.http-client</groupId>
+ <artifactId>google-http-client</artifactId>
+ <version>1.43.3</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.auth</groupId>
+ <artifactId>google-auth-library-oauth2-http</artifactId>
+ <version>1.19.0</version>
</dependency>
</dependencies>
diff --git a/persistence/src/vespa/persistence/spi/clusterstate.cpp b/persistence/src/vespa/persistence/spi/clusterstate.cpp
index ad5039fade1..e6708192d47 100644
--- a/persistence/src/vespa/persistence/spi/clusterstate.cpp
+++ b/persistence/src/vespa/persistence/spi/clusterstate.cpp
@@ -97,7 +97,7 @@ void ClusterState::serialize(vespalib::nbostream& o) const {
assert(_distribution);
assert(_state);
vespalib::asciistream tmp;
- _state->serialize(tmp, false);
+ _state->serialize(tmp);
o << tmp.str() << _nodeIndex;
o << _distribution->serialize();
}
diff --git a/provided-dependencies/pom.xml b/provided-dependencies/pom.xml
index da7cc007053..09d76265466 100755
--- a/provided-dependencies/pom.xml
+++ b/provided-dependencies/pom.xml
@@ -67,7 +67,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
- <version>2.4</version>
+ <version>${maven-jar-plugin.vespa.version}</version>
<configuration>
<archive>
<index>true</index>
diff --git a/renovate.json b/renovate.json
index 6b37e2049cb..a87bb9a67c6 100644
--- a/renovate.json
+++ b/renovate.json
@@ -13,6 +13,8 @@
"com.yahoo.vespa.bundle-plugin:test-bundles",
"com.yahoo.vespa.jdisc_core:test_bundles",
"com.yahoo.vespa:cloud-tenant-base",
+ "com.yahoo.vespa:container-dependency-versions",
+ "com.yahoo.vespa:hosted-tenant-base",
"com.yahoo.vespa:parent",
"com.yahoo.vespa:zookeeper-server-parent",
"github.com/go-json-experiment/json",
diff --git a/searchcore/src/tests/grouping/CMakeLists.txt b/searchcore/src/tests/grouping/CMakeLists.txt
index cacdda484be..b127132cbae 100644
--- a/searchcore/src/tests/grouping/CMakeLists.txt
+++ b/searchcore/src/tests/grouping/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchcore_grouping_test_app TEST
SOURCES
- grouping.cpp
+ grouping_test.cpp
DEPENDS
searchcore_grouping
searchcore_matching
diff --git a/searchcore/src/tests/grouping/grouping.cpp b/searchcore/src/tests/grouping/grouping_test.cpp
index 015aec73999..015aec73999 100644
--- a/searchcore/src/tests/grouping/grouping.cpp
+++ b/searchcore/src/tests/grouping/grouping_test.cpp
diff --git a/searchcore/src/tests/proton/common/attribute_updater/attribute_updater_test.cpp b/searchcore/src/tests/proton/common/attribute_updater/attribute_updater_test.cpp
index 892be2c874f..8c3ce4c5031 100644
--- a/searchcore/src/tests/proton/common/attribute_updater/attribute_updater_test.cpp
+++ b/searchcore/src/tests/proton/common/attribute_updater/attribute_updater_test.cpp
@@ -436,6 +436,15 @@ TEST_F("require that tensor modify update is applied",
f.assertTensor(TensorSpec(f.type).add({{"x", 0}}, 7).add({{"x", 1}}, 5));
}
+TEST_F("require that tensor modify update with 'create: true' is applied to non-existing tensor",
+ TensorFixture<DenseTensorAttribute>("tensor(x[2])", "dense_tensor"))
+{
+ f.applyValueUpdate(*f.attribute, 1,
+ std::make_unique<TensorModifyUpdate>(TensorModifyUpdate::Operation::ADD,
+ makeTensorFieldValue(TensorSpec("tensor(x{})").add({{"x", "1"}}, 3)), 0.0));
+ f.assertTensor(TensorSpec(f.type).add({{"x", 0}}, 0).add({{"x", 1}}, 3));
+}
+
TEST_F("require that tensor add update is applied",
TensorFixture<SerializedFastValueAttribute>("tensor(x{})", "sparse_tensor"))
{
diff --git a/searchcore/src/tests/proton/common/cachedselect_test.cpp b/searchcore/src/tests/proton/common/cachedselect_test.cpp
index 9a9d491467c..02c5128a0a7 100644
--- a/searchcore/src/tests/proton/common/cachedselect_test.cpp
+++ b/searchcore/src/tests/proton/common/cachedselect_test.cpp
@@ -289,7 +289,7 @@ MyDB::addDoc(uint32_t lid,
const Document &
MyDB::getDoc(uint32_t lid) const
{
- LidToDocSP::const_iterator it(_lidToDocSP.find(lid));
+ auto it = _lidToDocSP.find(lid);
ASSERT_TRUE(it != _lidToDocSP.end());
return *it->second;
}
diff --git a/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
index fdfa7522350..62be83f5f51 100644
--- a/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
@@ -239,12 +239,12 @@ struct MyDocumentStore : public test::DummyDocumentStore
{}
~MyDocumentStore() override;
Document::UP read(DocumentIdT lid, const document::DocumentTypeRepo &) const override {
- DocMap::const_iterator itr = _docs.find(lid);
+ auto itr = _docs.find(lid);
if (itr != _docs.end()) {
Document::UP retval(itr->second->clone());
return retval;
}
- return Document::UP();
+ return {};
}
void write(uint64_t syncToken, DocumentIdT lid, const document::Document& doc) override {
_lastSyncToken = syncToken;
@@ -345,7 +345,7 @@ struct MyAttributeWriter : public IAttributeWriter
if (_attrs.count(attrName) == 0) {
return nullptr;
}
- AttrMap::const_iterator itr = _attrMap.find(attrName);
+ auto itr = _attrMap.find(attrName);
return ((itr == _attrMap.end()) ? nullptr : itr->second.get());
}
void put(SerialNum serialNum, const document::Document &doc, DocumentIdT lid, OnWriteDoneType) override {
diff --git a/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp b/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp
index bf960768c89..aeaa35309b5 100644
--- a/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp
@@ -193,7 +193,7 @@ struct MyBucketModifiedHandler : public IBucketModifiedHandler
{
BucketIdVector _modified;
void notifyBucketModified(const BucketId &bucket) override {
- BucketIdVector::const_iterator itr = std::find(_modified.begin(), _modified.end(), bucket);
+ auto itr = std::find(_modified.begin(), _modified.end(), bucket);
if (itr == _modified.end()) {
_modified.push_back(bucket);
}
diff --git a/searchcore/src/tests/proton/feed_and_search/CMakeLists.txt b/searchcore/src/tests/proton/feed_and_search/CMakeLists.txt
index 24588d21d99..e7000af18cb 100644
--- a/searchcore/src/tests/proton/feed_and_search/CMakeLists.txt
+++ b/searchcore/src/tests/proton/feed_and_search/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchcore_feed_and_search_test_app TEST
SOURCES
- feed_and_search.cpp
+ feed_and_search_test.cpp
DEPENDS
searchlib_test
)
diff --git a/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp b/searchcore/src/tests/proton/feed_and_search/feed_and_search_test.cpp
index 6838f61967e..6838f61967e 100644
--- a/searchcore/src/tests/proton/feed_and_search/feed_and_search.cpp
+++ b/searchcore/src/tests/proton/feed_and_search/feed_and_search_test.cpp
diff --git a/searchcore/src/tests/proton/feedtoken/CMakeLists.txt b/searchcore/src/tests/proton/feedtoken/CMakeLists.txt
index 8cf242dd3c1..0e4f3a2a08c 100644
--- a/searchcore/src/tests/proton/feedtoken/CMakeLists.txt
+++ b/searchcore/src/tests/proton/feedtoken/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchcore_feedtoken_test_app TEST
SOURCES
- feedtoken.cpp
+ feedtoken_test.cpp
DEPENDS
searchcore_pcommon
searchcore_proton_metrics
diff --git a/searchcore/src/tests/proton/feedtoken/feedtoken.cpp b/searchcore/src/tests/proton/feedtoken/feedtoken_test.cpp
index 19521dfb4f9..19521dfb4f9 100644
--- a/searchcore/src/tests/proton/feedtoken/feedtoken.cpp
+++ b/searchcore/src/tests/proton/feedtoken/feedtoken_test.cpp
diff --git a/searchcore/src/tests/proton/flushengine/flushengine_test.cpp b/searchcore/src/tests/proton/flushengine/flushengine_test.cpp
index 6cdf0c478ad..2c589085a90 100644
--- a/searchcore/src/tests/proton/flushengine/flushengine_test.cpp
+++ b/searchcore/src/tests/proton/flushengine/flushengine_test.cpp
@@ -691,7 +691,7 @@ bool
asserCorrectHandlers(const FlushEngine::FlushMetaSet & current1, const std::vector<const char *> & targets)
{
bool retval(targets.size() == current1.size());
- FlushEngine::FlushMetaSet::const_iterator curr(current1.begin());
+ auto curr = current1.begin();
if (retval) {
for (const char * target : targets) {
if (target != (curr++)->getName()) {
diff --git a/searchcore/src/tests/proton/index/diskindexcleaner_test.cpp b/searchcore/src/tests/proton/index/diskindexcleaner_test.cpp
index e8926a957b7..7ad6d40a30e 100644
--- a/searchcore/src/tests/proton/index/diskindexcleaner_test.cpp
+++ b/searchcore/src/tests/proton/index/diskindexcleaner_test.cpp
@@ -63,13 +63,11 @@ void createIndex(const string &name) {
vector<string> readIndexes() {
vector<string> indexes;
- FastOS_DirectoryScan dir_scan(index_dir.c_str());
- while (dir_scan.ReadNext()) {
- string name = dir_scan.GetName();
- if (!dir_scan.IsDirectory() || name.find("index.") != 0) {
- continue;
+ std::filesystem::directory_iterator dir_scan(index_dir);
+ for (auto& entry : dir_scan) {
+ if (entry.is_directory() && entry.path().filename().string().find("index.") == 0) {
+ indexes.push_back(entry.path().filename().string());
}
- indexes.push_back(name);
}
return indexes;
}
diff --git a/searchcore/src/tests/proton/index/fusionrunner_test.cpp b/searchcore/src/tests/proton/index/fusionrunner_test.cpp
index c529c7b78c7..9052d024871 100644
--- a/searchcore/src/tests/proton/index/fusionrunner_test.cpp
+++ b/searchcore/src/tests/proton/index/fusionrunner_test.cpp
@@ -25,7 +25,6 @@
#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/vespalib/util/size_literals.h>
#include <vespa/vespalib/stllike/asciistream.h>
-#include <vespa/fastos/file.h>
#include <filesystem>
#include <set>
@@ -213,22 +212,16 @@ void Test::createIndex(const string &dir, uint32_t id, bool fusion) {
set<uint32_t> readFusionIds(const string &dir) {
set<uint32_t> ids;
- FastOS_DirectoryScan dir_scan(dir.c_str());
- while (dir_scan.ReadNext()) {
- if (!dir_scan.IsDirectory()) {
- continue;
+ const vespalib::string prefix("index.fusion.");
+ std::filesystem::directory_iterator dir_scan(dir);
+ for (auto& entry : dir_scan) {
+ if (entry.is_directory() && entry.path().filename().string().find(prefix) == 0) {
+ auto idString = entry.path().filename().string().substr(prefix.size());
+ vespalib::asciistream ist(idString);
+ uint32_t id;
+ ist >> id;
+ ids.insert(id);
}
- vespalib::string name = dir_scan.GetName();
- const vespalib::string prefix("index.fusion.");
- vespalib::string::size_type pos = name.find(prefix);
- if (pos != 0) {
- continue;
- }
- vespalib::string idString = name.substr(prefix.size());
- vespalib::asciistream ist(idString);
- uint32_t id;
- ist >> id;
- ids.insert(id);
}
return ids;
}
diff --git a/searchcore/src/tests/proton/index/indexmanager_test.cpp b/searchcore/src/tests/proton/index/indexmanager_test.cpp
index a7209ea8897..91f585a4f45 100644
--- a/searchcore/src/tests/proton/index/indexmanager_test.cpp
+++ b/searchcore/src/tests/proton/index/indexmanager_test.cpp
@@ -267,22 +267,16 @@ set<uint32_t>
readDiskIds(const string &dir, const string &type)
{
set<uint32_t> ids;
- FastOS_DirectoryScan dir_scan(dir.c_str());
- while (dir_scan.ReadNext()) {
- if (!dir_scan.IsDirectory()) {
- continue;
+ const string flush_prefix("index." + type + ".");
+ std::filesystem::directory_iterator dir_scan(dir);
+ for (auto& entry : dir_scan) {
+ if (entry.is_directory() && entry.path().filename().string().find(flush_prefix) == 0) {
+ auto idString = entry.path().filename().string().substr(flush_prefix.size());
+ vespalib::asciistream ist(idString);
+ uint32_t id;
+ ist >> id;
+ ids.insert(id);
}
- string name = dir_scan.GetName();
- const string flush_prefix("index." + type + ".");
- string::size_type pos = name.find(flush_prefix);
- if (pos != 0) {
- continue;
- }
- vespalib::string idString(name.substr(flush_prefix.size()));
- vespalib::asciistream ist(idString);
- uint32_t id;
- ist >> id;
- ids.insert(id);
}
return ids;
}
diff --git a/searchcore/src/tests/proton/matchengine/CMakeLists.txt b/searchcore/src/tests/proton/matchengine/CMakeLists.txt
index 1a91fd2a799..1452dc20737 100644
--- a/searchcore/src/tests/proton/matchengine/CMakeLists.txt
+++ b/searchcore/src/tests/proton/matchengine/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchcore_matchengine_test_app TEST
SOURCES
- matchengine.cpp
+ matchengine_test.cpp
DEPENDS
searchcore_matchengine
searchcore_matching
diff --git a/searchcore/src/tests/proton/matchengine/matchengine.cpp b/searchcore/src/tests/proton/matchengine/matchengine_test.cpp
index 514c9038945..514c9038945 100644
--- a/searchcore/src/tests/proton/matchengine/matchengine.cpp
+++ b/searchcore/src/tests/proton/matchengine/matchengine_test.cpp
diff --git a/searchcore/src/tests/proton/matching/matching_test.cpp b/searchcore/src/tests/proton/matching/matching_test.cpp
index b59384f1493..6ef462f80c4 100644
--- a/searchcore/src/tests/proton/matching/matching_test.cpp
+++ b/searchcore/src/tests/proton/matching/matching_test.cpp
@@ -1135,12 +1135,12 @@ TEST("require that docsum matcher can extract matching elements from single attr
EXPECT_EQUAL(list[1], 3u);
}
-struct GlobalFilterParamsFixture {
+struct AttributeBlueprintParamsFixture {
BlueprintFactory factory;
search::fef::test::IndexEnvironment index_env;
RankSetup rank_setup;
Properties rank_properties;
- GlobalFilterParamsFixture(double lower_limit, double upper_limit)
+ AttributeBlueprintParamsFixture(double lower_limit, double upper_limit, double target_hits_max_adjustment_factor)
: factory(),
index_env(),
rank_setup(factory, index_env),
@@ -1148,32 +1148,37 @@ struct GlobalFilterParamsFixture {
{
rank_setup.set_global_filter_lower_limit(lower_limit);
rank_setup.set_global_filter_upper_limit(upper_limit);
+ rank_setup.set_target_hits_max_adjustment_factor(target_hits_max_adjustment_factor);
}
- void set_query_properties(vespalib::stringref lower_limit, vespalib::stringref upper_limit) {
+ void set_query_properties(vespalib::stringref lower_limit, vespalib::stringref upper_limit,
+ vespalib::stringref target_hits_max_adjustment_factor) {
rank_properties.add(GlobalFilterLowerLimit::NAME, lower_limit);
rank_properties.add(GlobalFilterUpperLimit::NAME, upper_limit);
+ rank_properties.add(TargetHitsMaxAdjustmentFactor::NAME, target_hits_max_adjustment_factor);
}
AttributeBlueprintParams extract(uint32_t active_docids = 9, uint32_t docid_limit = 10) const {
- return MatchToolsFactory::extract_global_filter_params(rank_setup, rank_properties, active_docids, docid_limit);
+ return MatchToolsFactory::extract_attribute_blueprint_params(rank_setup, rank_properties, active_docids, docid_limit);
}
};
-TEST_F("global filter params are extracted from rank profile", GlobalFilterParamsFixture(0.2, 0.8))
+TEST_F("attribute blueprint params are extracted from rank profile", AttributeBlueprintParamsFixture(0.2, 0.8, 5.0))
{
auto params = f.extract();
EXPECT_EQUAL(0.2, params.global_filter_lower_limit);
EXPECT_EQUAL(0.8, params.global_filter_upper_limit);
+ EXPECT_EQUAL(5.0, params.target_hits_max_adjustment_factor);
}
-TEST_F("global filter params are extracted from query", GlobalFilterParamsFixture(0.2, 0.8))
+TEST_F("attribute blueprint params are extracted from query", AttributeBlueprintParamsFixture(0.2, 0.8, 5.0))
{
- f.set_query_properties("0.15", "0.75");
+ f.set_query_properties("0.15", "0.75", "3.0");
auto params = f.extract();
EXPECT_EQUAL(0.15, params.global_filter_lower_limit);
EXPECT_EQUAL(0.75, params.global_filter_upper_limit);
+ EXPECT_EQUAL(3.0, params.target_hits_max_adjustment_factor);
}
-TEST_F("global filter params are scaled with active hit ratio", GlobalFilterParamsFixture(0.2, 0.8))
+TEST_F("global filter params are scaled with active hit ratio", AttributeBlueprintParamsFixture(0.2, 0.8, 5.0))
{
auto params = f.extract(5, 10);
EXPECT_EQUAL(0.12, params.global_filter_lower_limit);
diff --git a/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp b/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp
index caedd363481..4a223967a89 100644
--- a/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp
+++ b/searchcore/src/tests/proton/proton_config_fetcher/proton_config_fetcher_test.cpp
@@ -123,8 +123,7 @@ struct ConfigTestFixture {
void removeDocType(const std::string & name)
{
- for (DocumenttypesConfigBuilder::DocumenttypeVector::iterator it(documenttypesBuilder.documenttype.begin()),
- mt(documenttypesBuilder.documenttype.end());
+ for (auto it(documenttypesBuilder.documenttype.begin()), mt(documenttypesBuilder.documenttype.end());
it != mt;
it++) {
if ((*it).name.compare(name) == 0) {
@@ -133,8 +132,7 @@ struct ConfigTestFixture {
}
}
- for (ProtonConfigBuilder::DocumentdbVector::iterator it(protonBuilder.documentdb.begin()),
- mt(protonBuilder.documentdb.end());
+ for (auto it(protonBuilder.documentdb.begin()), mt(protonBuilder.documentdb.end());
it != mt;
it++) {
if ((*it).inputdoctypename.compare(name) == 0) {
diff --git a/searchcore/src/tests/proton/statusreport/CMakeLists.txt b/searchcore/src/tests/proton/statusreport/CMakeLists.txt
index 155743b0795..5403857cd4f 100644
--- a/searchcore/src/tests/proton/statusreport/CMakeLists.txt
+++ b/searchcore/src/tests/proton/statusreport/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchcore_statusreport_test_app TEST
SOURCES
- statusreport.cpp
+ statusreport_test.cpp
DEPENDS
searchcore_pcommon
)
diff --git a/searchcore/src/tests/proton/statusreport/statusreport.cpp b/searchcore/src/tests/proton/statusreport/statusreport_test.cpp
index 10520912b2a..10520912b2a 100644
--- a/searchcore/src/tests/proton/statusreport/statusreport.cpp
+++ b/searchcore/src/tests/proton/statusreport/statusreport_test.cpp
diff --git a/searchcore/src/tests/proton/summaryengine/CMakeLists.txt b/searchcore/src/tests/proton/summaryengine/CMakeLists.txt
index a32d005decd..599dfd61e49 100644
--- a/searchcore/src/tests/proton/summaryengine/CMakeLists.txt
+++ b/searchcore/src/tests/proton/summaryengine/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchcore_summaryengine_test_app TEST
SOURCES
- summaryengine.cpp
+ summaryengine_test.cpp
DEPENDS
searchcore_summaryengine
searchcore_pcommon
diff --git a/searchcore/src/tests/proton/summaryengine/summaryengine.cpp b/searchcore/src/tests/proton/summaryengine/summaryengine_test.cpp
index f1183c2556a..f1183c2556a 100644
--- a/searchcore/src/tests/proton/summaryengine/summaryengine.cpp
+++ b/searchcore/src/tests/proton/summaryengine/summaryengine_test.cpp
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp
index 9d86b2d28e1..7df116a5f0c 100644
--- a/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp
@@ -30,7 +30,6 @@
#include <vespa/searchlib/index/dummyfileheadercontext.h>
#include <vespa/searchlib/transactionlog/translogserver.h>
#include <vespa/searchsummary/config/config-juniperrc.h>
-#include <vespa/storage/bucketdb/config-stor-bucket-init.h>
#include <vespa/storage/common/i_storage_chain_builder.h>
#include <vespa/storage/config/config-stor-bouncer.h>
#include <vespa/storage/config/config-stor-communicationmanager.h>
@@ -103,7 +102,6 @@ using vespa::config::content::UpgradingConfigBuilder;
using vespa::config::content::core::BucketspacesConfig;
using vespa::config::content::core::BucketspacesConfigBuilder;
using vespa::config::content::core::StorBouncerConfigBuilder;
-using vespa::config::content::core::StorBucketInitConfigBuilder;
using vespa::config::content::core::StorCommunicationmanagerConfigBuilder;
using vespa::config::content::core::StorDistributormanagerConfigBuilder;
using vespa::config::content::core::StorOpsloggerConfigBuilder;
@@ -355,7 +353,6 @@ struct ServiceLayerConfigSet : public StorageConfigSet
{
PersistenceConfigBuilder persistence;
StorFilestorConfigBuilder stor_filestor;
- StorBucketInitConfigBuilder stor_bucket_init;
StorVisitorConfigBuilder stor_visitor;
ServiceLayerConfigSet(const vespalib::string& base_dir, uint32_t node_idx, const vespalib::string& config_id_in, const IBmDistribution& distribution, const DocumenttypesConfig& documenttypes_in,
@@ -363,7 +360,6 @@ struct ServiceLayerConfigSet : public StorageConfigSet
: StorageConfigSet(base_dir, node_idx, false, config_id_in, distribution, documenttypes_in, slobrok_port, mbus_port, rpc_port, status_port, params),
persistence(),
stor_filestor(),
- stor_bucket_init(),
stor_visitor()
{
stor_filestor.numResponseThreads = params.get_response_threads();
@@ -377,7 +373,6 @@ struct ServiceLayerConfigSet : public StorageConfigSet
StorageConfigSet::add_builders(set);
set.addBuilder(config_id, &persistence);
set.addBuilder(config_id, &stor_filestor);
- set.addBuilder(config_id, &stor_bucket_init);
set.addBuilder(config_id, &stor_visitor);
}
};
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/attributedisklayout.cpp b/searchcore/src/vespa/searchcore/proton/attribute/attributedisklayout.cpp
index 04e263ae20b..40b24f2ec26 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/attributedisklayout.cpp
+++ b/searchcore/src/vespa/searchcore/proton/attribute/attributedisklayout.cpp
@@ -3,7 +3,6 @@
#include "attributedisklayout.h"
#include "attribute_directory.h"
#include <vespa/vespalib/io/fileutil.h>
-#include <vespa/fastos/file.h>
#include <cassert>
#include <filesystem>
@@ -34,12 +33,10 @@ AttributeDiskLayout::listAttributes()
void
AttributeDiskLayout::scanDir()
{
- FastOS_DirectoryScan dir(_baseDir.c_str());
- while (dir.ReadNext()) {
- if (strcmp(dir.GetName(), "..") != 0 && strcmp(dir.GetName(), ".") != 0) {
- if (dir.IsDirectory()) {
- createAttributeDir(dir.GetName());
- }
+ std::filesystem::directory_iterator dir_scan{std::filesystem::path(_baseDir)};
+ for (auto& entry : dir_scan) {
+ if (entry.is_directory()) {
+ createAttributeDir(entry.path().filename().string());
}
}
}
diff --git a/searchcore/src/vespa/searchcore/proton/common/attribute_updater.cpp b/searchcore/src/vespa/searchcore/proton/common/attribute_updater.cpp
index ef9750b5f4c..2b78cdab966 100644
--- a/searchcore/src/vespa/searchcore/proton/common/attribute_updater.cpp
+++ b/searchcore/src/vespa/searchcore/proton/common/attribute_updater.cpp
@@ -223,7 +223,8 @@ AttributeUpdater::handleUpdate(TensorAttribute &vec, uint32_t lid, const ValueUp
updateValue(vec, lid, assign.getValue());
}
} else if (op == ValueUpdate::TensorModify) {
- vec.update_tensor(lid, static_cast<const TensorModifyUpdate &>(upd), false);
+ const auto& modify = static_cast<const TensorModifyUpdate&>(upd);
+ vec.update_tensor(lid, modify, modify.get_default_cell_value().has_value());
} else if (op == ValueUpdate::TensorAdd) {
vec.update_tensor(lid, static_cast<const TensorAddUpdate &>(upd), true);
} else if (op == ValueUpdate::TensorRemove) {
diff --git a/searchcore/src/vespa/searchcore/proton/feedoperation/lidvectorcontext.cpp b/searchcore/src/vespa/searchcore/proton/feedoperation/lidvectorcontext.cpp
index 3aecbc3ca0d..8a4786d337c 100644
--- a/searchcore/src/vespa/searchcore/proton/feedoperation/lidvectorcontext.cpp
+++ b/searchcore/src/vespa/searchcore/proton/feedoperation/lidvectorcontext.cpp
@@ -52,9 +52,9 @@ LidVectorContext::serialize(vespalib::nbostream &os) const
// Use of bitvector when > 1/32 of docs
if (_result.size() > (_docIdLimit / 32)) {
os << static_cast<int32_t>(BITVECTOR);
- BitVector::UP bitVector = BitVector::create(_docIdLimit);
- for (LidVector::const_iterator it(_result.begin()), mt(_result.end()); it != mt; it++) {
- bitVector->setBit(*it);
+ auto bitVector = BitVector::create(_docIdLimit);
+ for (auto docid : _result) {
+ bitVector->setBit(docid);
}
os << *bitVector;
} else {
diff --git a/searchcore/src/vespa/searchcore/proton/feedoperation/removedocumentsoperation.h b/searchcore/src/vespa/searchcore/proton/feedoperation/removedocumentsoperation.h
index cdc738afa11..d0a323b4c9f 100644
--- a/searchcore/src/vespa/searchcore/proton/feedoperation/removedocumentsoperation.h
+++ b/searchcore/src/vespa/searchcore/proton/feedoperation/removedocumentsoperation.h
@@ -30,7 +30,7 @@ public:
const LidVectorContext::SP
getLidsToRemove(uint32_t subDbId) const {
- LidsToRemoveMap::const_iterator found(_lidsToRemoveMap.find(subDbId));
+ auto found = _lidsToRemoveMap.find(subDbId);
return (found != _lidsToRemoveMap.end()) ? found->second : LidVectorContext::SP();
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp b/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp
index c7cbdc29689..a353d4816f6 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp
@@ -176,11 +176,11 @@ MatchToolsFactory(QueryLimiter & queryLimiter,
const search::IDocumentMetaStoreContext::IReadGuard::SP * metaStoreReadGuard,
bool is_search)
: _queryLimiter(queryLimiter),
- _global_filter_params(extract_global_filter_params(rankSetup, rankProperties, metaStore.getNumActiveLids(), searchContext.getDocIdLimit())),
+ _attribute_blueprint_params(extract_attribute_blueprint_params(rankSetup, rankProperties, metaStore.getNumActiveLids(), searchContext.getDocIdLimit())),
_query(),
_match_limiter(),
_queryEnv(indexEnv, attributeContext, rankProperties, searchContext.getIndexes()),
- _requestContext(doom, attributeContext, _queryEnv, _queryEnv.getObjectStore(), _global_filter_params, metaStoreReadGuard),
+ _requestContext(doom, attributeContext, _queryEnv, _queryEnv.getObjectStore(), _attribute_blueprint_params, metaStoreReadGuard),
_mdl(),
_rankSetup(rankSetup),
_featureOverrides(featureOverrides),
@@ -203,8 +203,8 @@ MatchToolsFactory(QueryLimiter & queryLimiter,
_query.fetchPostings();
if (is_search) {
_query.handle_global_filter(searchContext.getDocIdLimit(),
- _global_filter_params.global_filter_lower_limit,
- _global_filter_params.global_filter_upper_limit,
+ _attribute_blueprint_params.global_filter_lower_limit,
+ _attribute_blueprint_params.global_filter_upper_limit,
thread_bundle, trace);
}
_query.freeze();
@@ -324,18 +324,20 @@ MatchToolsFactory::get_feature_rename_map() const
}
AttributeBlueprintParams
-MatchToolsFactory::extract_global_filter_params(const RankSetup& rank_setup, const Properties& rank_properties,
- uint32_t active_docids, uint32_t docid_limit)
+MatchToolsFactory::extract_attribute_blueprint_params(const RankSetup& rank_setup, const Properties& rank_properties,
+ uint32_t active_docids, uint32_t docid_limit)
{
double lower_limit = GlobalFilterLowerLimit::lookup(rank_properties, rank_setup.get_global_filter_lower_limit());
double upper_limit = GlobalFilterUpperLimit::lookup(rank_properties, rank_setup.get_global_filter_upper_limit());
+ double target_hits_max_adjustment_factor = TargetHitsMaxAdjustmentFactor::lookup(rank_properties, rank_setup.get_target_hits_max_adjustment_factor());
// Note that we count the reserved docid 0 as active.
// This ensures that when searchable-copies=1, the ratio is 1.0.
double active_hit_ratio = std::min(active_docids + 1, docid_limit) / static_cast<double>(docid_limit);
return {lower_limit * active_hit_ratio,
- upper_limit * active_hit_ratio};
+ upper_limit * active_hit_ratio,
+ target_hits_max_adjustment_factor};
}
AttributeOperationTask::AttributeOperationTask(const RequestContext & requestContext,
diff --git a/searchcore/src/vespa/searchcore/proton/matching/match_tools.h b/searchcore/src/vespa/searchcore/proton/matching/match_tools.h
index db30ea8d2b2..681690d4c36 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/match_tools.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/match_tools.h
@@ -121,7 +121,7 @@ private:
using IIndexEnvironment = search::fef::IIndexEnvironment;
using IDiversifier = search::queryeval::IDiversifier;
QueryLimiter & _queryLimiter;
- AttributeBlueprintParams _global_filter_params;
+ AttributeBlueprintParams _attribute_blueprint_params;
Query _query;
MaybeMatchPhaseLimiter::UP _match_limiter;
std::unique_ptr<RangeQueryLocator> _rangeLocator;
@@ -177,15 +177,15 @@ public:
const StringStringMap & get_feature_rename_map() const;
/**
- * Extracts global filter parameters from the rank-profile and query.
+ * Extracts attribute blueprint parameters from the rank-profile and query.
*
- * These parameters are expected to be in the range [0.0, 1.0], which matches the range of the estimated hit ratio of the query.
+ * The global filter parameters are expected to be in the range [0.0, 1.0], which matches the range of the estimated hit ratio of the query.
* When searchable-copies > 1, we must scale the parameters to match the effective range of the estimated hit ratio.
* This is done by multiplying with the active hit ratio (active docids / docid limit).
*/
static AttributeBlueprintParams
- extract_global_filter_params(const RankSetup& rank_setup, const Properties& rank_properties,
- uint32_t active_docids, uint32_t docid_limit);
+ extract_attribute_blueprint_params(const RankSetup& rank_setup, const Properties& rank_properties,
+ uint32_t active_docids, uint32_t docid_limit);
};
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/matcher.cpp b/searchcore/src/vespa/searchcore/proton/matching/matcher.cpp
index aedfde2521c..4a4a021d6d5 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/matcher.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/matcher.cpp
@@ -25,6 +25,7 @@ LOG_SETUP(".proton.matching.matcher");
using search::fef::Properties;
using namespace search::fef::indexproperties::matching;
+using namespace search::fef::indexproperties;
using namespace search::engine;
using namespace search::grouping;
using search::DocumentMetaData;
@@ -122,7 +123,7 @@ Matcher::Matcher(const search::index::Schema &schema, Properties props, const ve
_rankSetup(),
_viewResolver(ViewResolver::createFromSchema(schema)),
_statsLock(),
- _stats(),
+ _stats(softtimeout::Factor::lookup(_indexEnv.getProperties())),
_startTime(my_clock::now()),
_clock(clock),
_queryLimiter(queryLimiter),
@@ -149,9 +150,6 @@ Matcher::getStats()
return stats;
}
-using search::fef::indexproperties::softtimeout::Enabled;
-using search::fef::indexproperties::softtimeout::Factor;
-
std::unique_ptr<MatchToolsFactory>
Matcher::create_match_tools_factory(const search::engine::Request &request, ISearchContext &searchContext,
IAttributeContext &attrContext, const search::IDocumentMetaStore &metaStore,
@@ -160,11 +158,11 @@ Matcher::create_match_tools_factory(const search::engine::Request &request, ISea
bool is_search) const
{
const Properties & rankProperties = request.propertiesMap.rankProperties();
- bool softTimeoutEnabled = Enabled::lookup(rankProperties, _rankSetup->getSoftTimeoutEnabled());
- bool hasFactorOverride = Factor::isPresent(rankProperties);
+ bool softTimeoutEnabled = softtimeout::Enabled::lookup(rankProperties, _rankSetup->getSoftTimeoutEnabled());
+ bool hasFactorOverride = softtimeout::Factor::isPresent(rankProperties);
double factor = softTimeoutEnabled
? ( hasFactorOverride
- ? Factor::lookup(rankProperties, _stats.softDoomFactor())
+ ? softtimeout::Factor::lookup(rankProperties, _stats.softDoomFactor())
: _stats.softDoomFactor())
: 0.95;
vespalib::duration safeLeft = std::chrono::duration_cast<vespalib::duration>(request.getTimeLeft() * factor);
diff --git a/searchcore/src/vespa/searchcore/proton/matching/matching_stats.cpp b/searchcore/src/vespa/searchcore/proton/matching/matching_stats.cpp
index 86fb3cf8107..47c0fbc8c55 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/matching_stats.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/matching_stats.cpp
@@ -19,7 +19,7 @@ constexpr double MAX_CHANGE_FACTOR = 5;
} // namespace proton::matching::<unnamed>
-MatchingStats::MatchingStats(double prev_soft_doom_factor)
+MatchingStats::MatchingStats(double prev_soft_doom_factor) noexcept
: _queries(0),
_limited_queries(0),
_docidSpaceCovered(0),
@@ -57,7 +57,7 @@ MatchingStats::merge_partition(const Partition &partition, size_t id)
}
MatchingStats &
-MatchingStats::add(const MatchingStats &rhs)
+MatchingStats::add(const MatchingStats &rhs) noexcept
{
_queries += rhs._queries;
_limited_queries += rhs._limited_queries;
diff --git a/searchcore/src/vespa/searchcore/proton/matching/matching_stats.h b/searchcore/src/vespa/searchcore/proton/matching/matching_stats.h
index 4139bfbaf66..a9f7d3258d9 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/matching_stats.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/matching_stats.h
@@ -24,21 +24,21 @@ private:
double _min;
double _max;
public:
- Avg() : _value(0.0), _count(0), _min(0.0), _max(0.0) {}
- Avg & set(double value) {
+ Avg() noexcept : _value(0.0), _count(0), _min(0.0), _max(0.0) {}
+ Avg & set(double value) noexcept {
_value = value;
_count = 1;
_min = value;
_max = value;
return *this;
}
- double avg() const {
+ double avg() const noexcept {
return (_count > 0) ? (_value / _count) : 0;
}
- size_t count() const { return _count; }
- double min() const { return _min; }
- double max() const { return _max; }
- void add(const Avg &other) {
+ size_t count() const noexcept { return _count; }
+ double min() const noexcept { return _min; }
+ double max() const noexcept { return _max; }
+ void add(const Avg &other) noexcept {
if (_count == 0) {
_min = other._min;
_max = other._max;
@@ -78,31 +78,31 @@ public:
_active_time(),
_wait_time() { }
- Partition &docsCovered(size_t value) { _docsCovered = value; return *this; }
- size_t docsCovered() const { return _docsCovered; }
- Partition &docsMatched(size_t value) { _docsMatched = value; return *this; }
- size_t docsMatched() const { return _docsMatched; }
- Partition &docsRanked(size_t value) { _docsRanked = value; return *this; }
- size_t docsRanked() const { return _docsRanked; }
- Partition &docsReRanked(size_t value) { _docsReRanked = value; return *this; }
- size_t docsReRanked() const { return _docsReRanked; }
- Partition &softDoomed(bool v) { _softDoomed += v ? 1 : 0; return *this; }
- size_t softDoomed() const { return _softDoomed; }
- Partition & doomOvertime(vespalib::duration overtime) { _doomOvertime.set(vespalib::to_s(overtime)); return *this; }
- vespalib::duration doomOvertime() const { return vespalib::from_s(_doomOvertime.max()); }
-
- Partition &active_time(double time_s) { _active_time.set(time_s); return *this; }
- double active_time_avg() const { return _active_time.avg(); }
- size_t active_time_count() const { return _active_time.count(); }
- double active_time_min() const { return _active_time.min(); }
- double active_time_max() const { return _active_time.max(); }
- Partition &wait_time(double time_s) { _wait_time.set(time_s); return *this; }
- double wait_time_avg() const { return _wait_time.avg(); }
- size_t wait_time_count() const { return _wait_time.count(); }
- double wait_time_min() const { return _wait_time.min(); }
- double wait_time_max() const { return _wait_time.max(); }
-
- Partition &add(const Partition &rhs) {
+ Partition &docsCovered(size_t value) noexcept { _docsCovered = value; return *this; }
+ size_t docsCovered() const noexcept { return _docsCovered; }
+ Partition &docsMatched(size_t value) noexcept { _docsMatched = value; return *this; }
+ size_t docsMatched() const noexcept { return _docsMatched; }
+ Partition &docsRanked(size_t value) noexcept { _docsRanked = value; return *this; }
+ size_t docsRanked() const noexcept { return _docsRanked; }
+ Partition &docsReRanked(size_t value) noexcept { _docsReRanked = value; return *this; }
+ size_t docsReRanked() const noexcept { return _docsReRanked; }
+ Partition &softDoomed(bool v) noexcept { _softDoomed += v ? 1 : 0; return *this; }
+ size_t softDoomed() const noexcept { return _softDoomed; }
+ Partition & doomOvertime(vespalib::duration overtime) noexcept { _doomOvertime.set(vespalib::to_s(overtime)); return *this; }
+ vespalib::duration doomOvertime() const noexcept { return vespalib::from_s(_doomOvertime.max()); }
+
+ Partition &active_time(double time_s) noexcept { _active_time.set(time_s); return *this; }
+ double active_time_avg() const noexcept { return _active_time.avg(); }
+ size_t active_time_count() const noexcept { return _active_time.count(); }
+ double active_time_min() const noexcept { return _active_time.min(); }
+ double active_time_max() const noexcept { return _active_time.max(); }
+ Partition &wait_time(double time_s) noexcept { _wait_time.set(time_s); return *this; }
+ double wait_time_avg() const noexcept { return _wait_time.avg(); }
+ size_t wait_time_count() const noexcept { return _wait_time.count(); }
+ double wait_time_min() const noexcept { return _wait_time.min(); }
+ double wait_time_max() const noexcept { return _wait_time.max(); }
+
+ Partition &add(const Partition &rhs) noexcept {
_docsCovered += rhs.docsCovered();
_docsMatched += rhs._docsMatched;
_docsRanked += rhs._docsRanked;
@@ -138,9 +138,10 @@ public:
static constexpr double INITIAL_SOFT_DOOM_FACTOR = 0.5;
MatchingStats(const MatchingStats &) = delete;
MatchingStats & operator = (const MatchingStats &) = delete;
- MatchingStats(MatchingStats &&) = default;
- MatchingStats & operator = (MatchingStats &&) = default;
- MatchingStats(double prev_soft_doom_factor = INITIAL_SOFT_DOOM_FACTOR);
+ MatchingStats(MatchingStats &&) noexcept = default;
+ MatchingStats & operator = (MatchingStats &&) noexcept = default;
+ MatchingStats() noexcept : MatchingStats(INITIAL_SOFT_DOOM_FACTOR) {}
+ MatchingStats(double prev_soft_doom_factor) noexcept;
~MatchingStats();
MatchingStats &queries(size_t value) { _queries = value; return *this; }
@@ -206,7 +207,7 @@ public:
const Partition &getPartition(size_t index) const { return _partitions[index]; }
// used to aggregate accross searches (and configurations)
- MatchingStats &add(const MatchingStats &rhs);
+ MatchingStats &add(const MatchingStats &rhs) noexcept;
};
}
diff --git a/searchcore/src/vespa/searchcore/proton/matching/viewresolver.cpp b/searchcore/src/vespa/searchcore/proton/matching/viewresolver.cpp
index b3ccfd26569..f958a8d7c59 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/viewresolver.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/viewresolver.cpp
@@ -17,7 +17,7 @@ bool
ViewResolver::resolve(vespalib::stringref view,
std::vector<vespalib::string> &fields) const
{
- Map::const_iterator pos = _map.find(view);
+ auto pos = _map.find(view);
if (pos == _map.end()) {
fields.push_back(view);
return false;
diff --git a/searchcore/src/vespa/searchcore/proton/metrics/metrics_engine.cpp b/searchcore/src/vespa/searchcore/proton/metrics/metrics_engine.cpp
index 4f7e0e66d9f..5e799898ccf 100644
--- a/searchcore/src/vespa/searchcore/proton/metrics/metrics_engine.cpp
+++ b/searchcore/src/vespa/searchcore/proton/metrics/metrics_engine.cpp
@@ -27,16 +27,9 @@ MetricsEngine::start(const config::ConfigUri &)
metrics::MetricLockGuard guard(_manager->getMetricLock());
_manager->registerMetric(guard, *_root);
}
-
- // Storage doesnt snapshot unset metrics to save memory. Currently
- // feature seems a bit bugged. Disabling this optimalization for search.
- // Can enable it later when it is confirmed to be working well.
_manager->snapshotUnsetMetrics(true);
-
- // Currently, when injecting a metric manager into the content layer,
- // the content layer require to be the one initializing and starting it.
- // Thus not calling init here, but further out in the application when
- // one knows whether we are running in row/column mode or not
+ // Starting the metric manager worker thread (MetricManager::init()) is not done here, as the service
+ // layer code has not had the opportunity to create its metrics yet. Deferred to service layer init code.
}
void
diff --git a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp
index e4a4e6761aa..a535d180622 100644
--- a/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/fileconfigmanager.cpp
@@ -17,7 +17,6 @@
#include <vespa/config-summary.h>
#include <vespa/searchsummary/config/config-juniperrc.h>
#include <vespa/config/helper/configgetter.hpp>
-#include <vespa/fastos/file.h>
#include <filesystem>
#include <sstream>
#include <cassert>
@@ -193,12 +192,9 @@ std::vector<vespalib::string>
getFileList(const vespalib::string &snapDir)
{
std::vector<vespalib::string> res;
- FastOS_DirectoryScan dirScan(snapDir.c_str());
- while (dirScan.ReadNext()) {
- if (strcmp(dirScan.GetName(), ".") == 0 ||
- strcmp(dirScan.GetName(), "..") == 0)
- continue;
- res.push_back(dirScan.GetName());
+ std::filesystem::directory_iterator dir_scan{std::filesystem::path(snapDir)};
+ for (auto& entry : dir_scan) {
+ res.emplace_back(entry.path().filename().string());
}
std::sort(res.begin(), res.end());
return res;
diff --git a/searchcore/src/vespa/searchcore/proton/server/matchers.cpp b/searchcore/src/vespa/searchcore/proton/server/matchers.cpp
index 969e4b9cbdf..e4a3243d6f4 100644
--- a/searchcore/src/vespa/searchcore/proton/server/matchers.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/matchers.cpp
@@ -57,7 +57,7 @@ Matchers::getStats(const vespalib::string &name) const
std::shared_ptr<Matcher>
Matchers::lookup(const vespalib::string &name) const
{
- Map::const_iterator found(_rpmap.find(name));
+ auto found = _rpmap.find(name);
if (found == _rpmap.end()) {
if (_default) {
vespalib::Issue::report(fmt("Failed to find rank-profile '%s'. Falling back to 'default'", name.c_str()));
diff --git a/searchcore/src/vespa/searchcore/proton/test/userdocuments.h b/searchcore/src/vespa/searchcore/proton/test/userdocuments.h
index 60d9ed3d249..15d41919bb8 100644
--- a/searchcore/src/vespa/searchcore/proton/test/userdocuments.h
+++ b/searchcore/src/vespa/searchcore/proton/test/userdocuments.h
@@ -29,7 +29,7 @@ public:
_docs[userId].addDoc(userDoc);
}
const BucketDocuments &getUserDocs(uint32_t userId) const {
- DocMap::const_iterator itr = _docs.find(userId);
+ auto itr = _docs.find(userId);
assert(itr != _docs.end());
return itr->second;
}
diff --git a/searchcore/src/vespa/searchcorespi/index/diskindexcleaner.cpp b/searchcore/src/vespa/searchcorespi/index/diskindexcleaner.cpp
index 51d6938b13b..8126774078e 100644
--- a/searchcore/src/vespa/searchcorespi/index/diskindexcleaner.cpp
+++ b/searchcore/src/vespa/searchcorespi/index/diskindexcleaner.cpp
@@ -22,13 +22,11 @@ namespace searchcorespi::index {
namespace {
vector<string> readIndexes(const string &base_dir) {
vector<string> indexes;
- FastOS_DirectoryScan dir_scan(base_dir.c_str());
- while (dir_scan.ReadNext()) {
- string name = dir_scan.GetName();
- if (!dir_scan.IsDirectory() || name.find("index.") != 0) {
- continue;
+ std::filesystem::directory_iterator dir_scan{std::filesystem::path(base_dir)};
+ for (auto& entry : dir_scan) {
+ if (entry.is_directory() && entry.path().filename().string().find("index.") == 0) {
+ indexes.emplace_back(entry.path().filename().string());
}
- indexes.push_back(name);
}
return indexes;
}
diff --git a/searchcore/src/vespa/searchcorespi/index/indexreadutilities.cpp b/searchcore/src/vespa/searchcorespi/index/indexreadutilities.cpp
index 14556ddef29..010a3174e1c 100644
--- a/searchcore/src/vespa/searchcorespi/index/indexreadutilities.cpp
+++ b/searchcore/src/vespa/searchcorespi/index/indexreadutilities.cpp
@@ -4,6 +4,7 @@
#include "indexdisklayout.h"
#include <vespa/fastlib/io/bufferedfile.h>
#include <vespa/vespalib/data/fileheader.h>
+#include <filesystem>
#include <set>
#include <vector>
@@ -25,22 +26,21 @@ scanForIndexes(const vespalib::string &baseDir,
std::vector<vespalib::string> &flushDirs,
vespalib::string &fusionDir)
{
- FastOS_DirectoryScan dirScan(baseDir.c_str());
- while (dirScan.ReadNext()) {
- if (!dirScan.IsDirectory()) {
- continue;
- }
- vespalib::string name = dirScan.GetName();
- if (name.find(IndexDiskLayout::FlushDirPrefix) == 0) {
- flushDirs.push_back(name);
- }
- if (name.find(IndexDiskLayout::FusionDirPrefix) == 0) {
- if (!fusionDir.empty()) {
- // Should never happen, since we run cleanup before load.
- LOG(warning, "Base directory '%s' contains multiple fusion indexes",
- baseDir.c_str());
+ std::filesystem::directory_iterator dir_scan{std::filesystem::path(baseDir)};
+ for (auto& entry : dir_scan) {
+ if (entry.is_directory()) {
+ vespalib::string name = entry.path().filename().string();
+ if (name.find(IndexDiskLayout::FlushDirPrefix) == 0) {
+ flushDirs.push_back(name);
+ }
+ if (name.find(IndexDiskLayout::FusionDirPrefix) == 0) {
+ if (!fusionDir.empty()) {
+ // Should never happen, since we run cleanup before load.
+ LOG(warning, "Base directory '%s' contains multiple fusion indexes",
+ baseDir.c_str());
+ }
+ fusionDir = name;
}
- fusionDir = name;
}
}
}
diff --git a/searchlib/CMakeLists.txt b/searchlib/CMakeLists.txt
index e885f9bfba7..c0fdf03d262 100644
--- a/searchlib/CMakeLists.txt
+++ b/searchlib/CMakeLists.txt
@@ -173,7 +173,7 @@ vespa_define_module(
src/tests/hitcollector
src/tests/index/field_length_calculator
src/tests/indexmetainfo
- src/tests/ld-library-path
+ src/tests/ld_library_path
src/tests/memoryindex/compact_words_store
src/tests/memoryindex/datastore
src/tests/memoryindex/document_inverter
diff --git a/searchlib/src/tests/aggregator/CMakeLists.txt b/searchlib/src/tests/aggregator/CMakeLists.txt
index 12c70cb69ff..b892637d685 100644
--- a/searchlib/src/tests/aggregator/CMakeLists.txt
+++ b/searchlib/src/tests/aggregator/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_perdocexpr_test_app TEST
SOURCES
- perdocexpr.cpp
+ perdocexpr_test.cpp
DEPENDS
searchlib
)
diff --git a/searchlib/src/tests/aggregator/perdocexpr.cpp b/searchlib/src/tests/aggregator/perdocexpr_test.cpp
index 1c07843d71e..1c07843d71e 100644
--- a/searchlib/src/tests/aggregator/perdocexpr.cpp
+++ b/searchlib/src/tests/aggregator/perdocexpr_test.cpp
diff --git a/searchlib/src/tests/alignment/CMakeLists.txt b/searchlib/src/tests/alignment/CMakeLists.txt
index b3459d3afa5..965f35d6a28 100644
--- a/searchlib/src/tests/alignment/CMakeLists.txt
+++ b/searchlib/src/tests/alignment/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_alignment_test_app TEST
SOURCES
- alignment.cpp
+ alignment_test.cpp
DEPENDS
searchlib
)
diff --git a/searchlib/src/tests/alignment/alignment.cpp b/searchlib/src/tests/alignment/alignment_test.cpp
index 3c6906f45bf..3c6906f45bf 100644
--- a/searchlib/src/tests/alignment/alignment.cpp
+++ b/searchlib/src/tests/alignment/alignment_test.cpp
diff --git a/searchlib/src/tests/attribute/attribute_test.cpp b/searchlib/src/tests/attribute/attribute_test.cpp
index 547a7f3ab53..dfd7722d728 100644
--- a/searchlib/src/tests/attribute/attribute_test.cpp
+++ b/searchlib/src/tests/attribute/attribute_test.cpp
@@ -18,6 +18,7 @@
#include <vespa/document/update/assignvalueupdate.h>
#include <vespa/document/update/mapvalueupdate.h>
#include <vespa/vespalib/gtest/gtest.h>
+#include <vespa/vespalib/util/mmap_file_allocator.h>
#include <vespa/vespalib/util/mmap_file_allocator_factory.h>
#include <vespa/vespalib/util/round_up_to_page_size.h>
#include <vespa/vespalib/util/size_literals.h>
@@ -2324,17 +2325,11 @@ int
AttributeTest::test_paged_attribute(const vespalib::string& name, const vespalib::string& swapfile, const search::attribute::Config& cfg)
{
int result = 1;
- size_t rounded_size = vespalib::round_up_to_page_size(1);
- size_t lid_mapping_size = 1200;
- size_t sv_maxlid = 1200;
- if (rounded_size == 16_Ki) {
- lid_mapping_size = 4200;
- sv_maxlid = 1300;
- }
- if (rounded_size == 64_Ki) {
- lid_mapping_size = 17000;
- sv_maxlid = 1500;
- }
+ size_t rounded_size = std::max(vespalib::round_up_to_page_size(1), size_t(vespalib::alloc::MmapFileAllocator::default_small_limit));
+ constexpr uint32_t mv_copies = 64;
+ size_t lid_mapping_size = rounded_size / 4 + 100;
+ size_t sv_maxlid = rounded_size / 5 + 100;
+ size_t mv_maxlid = rounded_size / (mv_copies * 5) + 100;
if (cfg.basicType() == search::attribute::BasicType::Type::BOOL) {
lid_mapping_size = rounded_size * 8 + 100;
}
@@ -2354,9 +2349,9 @@ AttributeTest::test_paged_attribute(const vespalib::string& name, const vespalib
EXPECT_LT(size1, size2);
if (cfg.collectionType().isMultiValue()) {
// Grow multi value mapping
- for (uint32_t lid = 1; lid < 100; ++lid) {
+ for (uint32_t lid = 1; lid < mv_maxlid; ++lid) {
av->clearDoc(lid);
- for (uint32_t i = 0; i < 50; ++i) {
+ for (uint32_t i = 0; i < mv_copies; ++i) {
EXPECT_TRUE(v->append(lid, 0, 1));
}
av->commit();
@@ -2367,15 +2362,15 @@ AttributeTest::test_paged_attribute(const vespalib::string& name, const vespalib
}
if (cfg.fastSearch()) {
// Grow enum store
- uint32_t maxlid = cfg.collectionType().isMultiValue() ? 100 : sv_maxlid;
+ uint32_t maxlid = cfg.collectionType().isMultiValue() ? mv_maxlid : sv_maxlid;
for (uint32_t lid = 1; lid < maxlid; ++lid) {
av->clearDoc(lid);
if (cfg.collectionType().isMultiValue()) {
- for (uint32_t i = 0; i < 50; ++i) {
- EXPECT_TRUE(v->append(lid, lid * 100 + i, 1));
+ for (uint32_t i = 0; i < mv_copies; ++i) {
+ EXPECT_TRUE(v->append(lid, lid * mv_copies + i, 1));
}
} else {
- EXPECT_TRUE(v->update(lid, lid * 100));
+ EXPECT_TRUE(v->update(lid, lid));
}
av->commit();
}
diff --git a/searchlib/src/tests/attribute/extendattributes/CMakeLists.txt b/searchlib/src/tests/attribute/extendattributes/CMakeLists.txt
index f733f3d0091..70b7f40c8d9 100644
--- a/searchlib/src/tests/attribute/extendattributes/CMakeLists.txt
+++ b/searchlib/src/tests/attribute/extendattributes/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_extendattribute_test_app TEST
SOURCES
- extendattribute.cpp
+ extendattribute_test.cpp
DEPENDS
searchlib
GTest::GTest
diff --git a/searchlib/src/tests/attribute/extendattributes/extendattribute.cpp b/searchlib/src/tests/attribute/extendattributes/extendattribute_test.cpp
index 3f775e99891..3f775e99891 100644
--- a/searchlib/src/tests/attribute/extendattributes/extendattribute.cpp
+++ b/searchlib/src/tests/attribute/extendattributes/extendattribute_test.cpp
diff --git a/searchlib/src/tests/attribute/guard/CMakeLists.txt b/searchlib/src/tests/attribute/guard/CMakeLists.txt
index 7a2dfe2f8cf..4605426740b 100644
--- a/searchlib/src/tests/attribute/guard/CMakeLists.txt
+++ b/searchlib/src/tests/attribute/guard/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_attributeguard_test_app TEST
SOURCES
- attributeguard.cpp
+ attributeguard_test.cpp
DEPENDS
searchlib
)
diff --git a/searchlib/src/tests/attribute/guard/attributeguard.cpp b/searchlib/src/tests/attribute/guard/attributeguard_test.cpp
index 709316cc6fd..709316cc6fd 100644
--- a/searchlib/src/tests/attribute/guard/attributeguard.cpp
+++ b/searchlib/src/tests/attribute/guard/attributeguard_test.cpp
diff --git a/searchlib/src/tests/attribute/postinglist/CMakeLists.txt b/searchlib/src/tests/attribute/postinglist/CMakeLists.txt
index e0f2e7106cc..068bde766ed 100644
--- a/searchlib/src/tests/attribute/postinglist/CMakeLists.txt
+++ b/searchlib/src/tests/attribute/postinglist/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_postinglist_test_app TEST
SOURCES
- postinglist.cpp
+ postinglist_test.cpp
DEPENDS
searchlib
)
diff --git a/searchlib/src/tests/attribute/postinglist/postinglist.cpp b/searchlib/src/tests/attribute/postinglist/postinglist_test.cpp
index 39e31b23498..39e31b23498 100644
--- a/searchlib/src/tests/attribute/postinglist/postinglist.cpp
+++ b/searchlib/src/tests/attribute/postinglist/postinglist_test.cpp
diff --git a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
index 6ca7d298ee2..0475f8462fc 100644
--- a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
+++ b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
@@ -1320,15 +1320,16 @@ public:
return *_query_tensor;
}
- std::unique_ptr<NearestNeighborBlueprint> make_blueprint(bool approximate = true, double global_filter_lower_limit = 0.05) {
+ std::unique_ptr<NearestNeighborBlueprint> make_blueprint(bool approximate = true,
+ double global_filter_lower_limit = 0.05,
+ double target_hits_max_adjustment_factor = 20.0) {
search::queryeval::FieldSpec field("foo", 0, 0);
auto bp = std::make_unique<NearestNeighborBlueprint>(
field,
std::make_unique<DistanceCalculator>(this->as_dense_tensor(),
create_query_tensor(vec_2d(17, 42))),
- 3, approximate, 5,
- 100100.25,
- global_filter_lower_limit, 1.0, _no_doom.get_doom());
+ 3, approximate, 5, 100100.25,
+ global_filter_lower_limit, 1.0, target_hits_max_adjustment_factor, _no_doom.get_doom());
EXPECT_EQUAL(11u, bp->getState().estimate().estHits);
EXPECT_EQUAL(100100.25 * 100100.25, bp->get_distance_threshold());
return bp;
@@ -1362,6 +1363,19 @@ TEST_F("NN blueprint handles empty filter (post-filtering)", NearestNeighborBlue
EXPECT_EQUAL(NNBA::INDEX_TOP_K, bp->get_algorithm());
}
+TEST_F("NN blueprint adjustment of targetHits is bound (post-filtering)", NearestNeighborBlueprintFixture)
+{
+ auto bp = f.make_blueprint(true, 0.05, 3.5);
+ auto empty_filter = GlobalFilter::create();
+ bp->set_global_filter(*empty_filter, 0.2);
+ // targetHits is adjusted based on the estimated hit ratio of the query,
+ // but bound by target-hits-max-adjustment-factor
+ EXPECT_EQUAL(3u, bp->get_target_hits());
+ EXPECT_EQUAL(10u, bp->get_adjusted_target_hits());
+ EXPECT_EQUAL(10u, bp->getState().estimate().estHits);
+ EXPECT_EQUAL(NNBA::INDEX_TOP_K, bp->get_algorithm());
+}
+
TEST_F("NN blueprint handles strong filter (pre-filtering)", NearestNeighborBlueprintFixture)
{
auto bp = f.make_blueprint();
diff --git a/searchlib/src/tests/common/summaryfeatures/CMakeLists.txt b/searchlib/src/tests/common/summaryfeatures/CMakeLists.txt
index 7ad71a22c84..d0050c96c22 100644
--- a/searchlib/src/tests/common/summaryfeatures/CMakeLists.txt
+++ b/searchlib/src/tests/common/summaryfeatures/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_summaryfeatures_test_app TEST
SOURCES
- summaryfeatures.cpp
+ summaryfeatures_test.cpp
DEPENDS
searchlib
)
diff --git a/searchlib/src/tests/common/summaryfeatures/summaryfeatures.cpp b/searchlib/src/tests/common/summaryfeatures/summaryfeatures_test.cpp
index 73a81be9f90..73a81be9f90 100644
--- a/searchlib/src/tests/common/summaryfeatures/summaryfeatures.cpp
+++ b/searchlib/src/tests/common/summaryfeatures/summaryfeatures_test.cpp
diff --git a/searchlib/src/tests/diskindex/pagedict4/CMakeLists.txt b/searchlib/src/tests/diskindex/pagedict4/CMakeLists.txt
index a0dca470f22..99183b13bc5 100644
--- a/searchlib/src/tests/diskindex/pagedict4/CMakeLists.txt
+++ b/searchlib/src/tests/diskindex/pagedict4/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_pagedict4_test_app TEST
SOURCES
- pagedict4test.cpp
+ pagedict4_test.cpp
DEPENDS
searchlib_test
searchlib
diff --git a/searchlib/src/tests/diskindex/pagedict4/pagedict4test.cpp b/searchlib/src/tests/diskindex/pagedict4/pagedict4_test.cpp
index 408cf370c59..408cf370c59 100644
--- a/searchlib/src/tests/diskindex/pagedict4/pagedict4test.cpp
+++ b/searchlib/src/tests/diskindex/pagedict4/pagedict4_test.cpp
diff --git a/searchlib/src/tests/features/CMakeLists.txt b/searchlib/src/tests/features/CMakeLists.txt
index c5268e307ac..c9c05e565be 100644
--- a/searchlib/src/tests/features/CMakeLists.txt
+++ b/searchlib/src/tests/features/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_prod_features_test_app TEST
SOURCES
- prod_features.cpp
+ prod_features_test.cpp
prod_features_framework.cpp
prod_features_attributematch.cpp
prod_features_fieldmatch.cpp
diff --git a/searchlib/src/tests/features/beta/CMakeLists.txt b/searchlib/src/tests/features/beta/CMakeLists.txt
index 11b34d3dbdd..dfaee48efeb 100644
--- a/searchlib/src/tests/features/beta/CMakeLists.txt
+++ b/searchlib/src/tests/features/beta/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_beta_features_test_app TEST
SOURCES
- beta_features.cpp
+ beta_features_test.cpp
DEPENDS
searchlib
)
diff --git a/searchlib/src/tests/features/beta/beta_features.cpp b/searchlib/src/tests/features/beta/beta_features_test.cpp
index 622228ff168..622228ff168 100644
--- a/searchlib/src/tests/features/beta/beta_features.cpp
+++ b/searchlib/src/tests/features/beta/beta_features_test.cpp
diff --git a/searchlib/src/tests/features/prod_features_attributematch.cpp b/searchlib/src/tests/features/prod_features_attributematch.cpp
index fd6fdca96fc..057d7a821d4 100644
--- a/searchlib/src/tests/features/prod_features_attributematch.cpp
+++ b/searchlib/src/tests/features/prod_features_attributematch.cpp
@@ -1,6 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include "prod_features.h"
+#include "prod_features_test.h"
#include <vespa/searchlib/features/attributematchfeature.h>
#include <vespa/searchlib/attribute/attributefactory.h>
#include <vespa/searchlib/attribute/attributevector.h>
diff --git a/searchlib/src/tests/features/prod_features_fieldmatch.cpp b/searchlib/src/tests/features/prod_features_fieldmatch.cpp
index c6ff6232c23..61d9313bae6 100644
--- a/searchlib/src/tests/features/prod_features_fieldmatch.cpp
+++ b/searchlib/src/tests/features/prod_features_fieldmatch.cpp
@@ -1,6 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include "prod_features.h"
+#include "prod_features_test.h"
#include <vespa/searchlib/features/fieldmatchfeature.h>
#include <vespa/log/log.h>
diff --git a/searchlib/src/tests/features/prod_features_fieldtermmatch.cpp b/searchlib/src/tests/features/prod_features_fieldtermmatch.cpp
index be4711e1fec..efd4dc7eb4f 100644
--- a/searchlib/src/tests/features/prod_features_fieldtermmatch.cpp
+++ b/searchlib/src/tests/features/prod_features_fieldtermmatch.cpp
@@ -1,6 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include "prod_features.h"
+#include "prod_features_test.h"
#include <vespa/searchlib/features/fieldtermmatchfeature.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/log/log.h>
diff --git a/searchlib/src/tests/features/prod_features_framework.cpp b/searchlib/src/tests/features/prod_features_framework.cpp
index 1a0c5014edb..6c89092e41d 100644
--- a/searchlib/src/tests/features/prod_features_framework.cpp
+++ b/searchlib/src/tests/features/prod_features_framework.cpp
@@ -2,7 +2,7 @@
#include <vespa/log/log.h>
LOG_SETUP(".prod_features_framework");
-#include "prod_features.h"
+#include "prod_features_test.h"
#include <vespa/searchlib/features/valuefeature.h>
using namespace search::features;
diff --git a/searchlib/src/tests/features/prod_features.cpp b/searchlib/src/tests/features/prod_features_test.cpp
index c22d3b3abb8..10d1a9bdc8e 100644
--- a/searchlib/src/tests/features/prod_features.cpp
+++ b/searchlib/src/tests/features/prod_features_test.cpp
@@ -1,6 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include "prod_features.h"
+#include "prod_features_test.h"
#include <vespa/searchcommon/attribute/config.h>
#include <vespa/searchlib/attribute/attributefactory.h>
#include <vespa/searchlib/attribute/attributeguard.h>
diff --git a/searchlib/src/tests/features/prod_features.h b/searchlib/src/tests/features/prod_features_test.h
index 28b564b341e..28b564b341e 100644
--- a/searchlib/src/tests/features/prod_features.h
+++ b/searchlib/src/tests/features/prod_features_test.h
diff --git a/searchlib/src/tests/fef/featureoverride/CMakeLists.txt b/searchlib/src/tests/fef/featureoverride/CMakeLists.txt
index 2314f8ed03a..414b0c126d0 100644
--- a/searchlib/src/tests/fef/featureoverride/CMakeLists.txt
+++ b/searchlib/src/tests/fef/featureoverride/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_featureoverride_test_app TEST
SOURCES
- featureoverride.cpp
+ featureoverride_test.cpp
DEPENDS
searchlib
)
diff --git a/searchlib/src/tests/fef/featureoverride/featureoverride.cpp b/searchlib/src/tests/fef/featureoverride/featureoverride_test.cpp
index f5082871a3f..f5082871a3f 100644
--- a/searchlib/src/tests/fef/featureoverride/featureoverride.cpp
+++ b/searchlib/src/tests/fef/featureoverride/featureoverride_test.cpp
diff --git a/searchlib/src/tests/forcelink/CMakeLists.txt b/searchlib/src/tests/forcelink/CMakeLists.txt
index a6af5a77b0c..4e303d6f3b3 100644
--- a/searchlib/src/tests/forcelink/CMakeLists.txt
+++ b/searchlib/src/tests/forcelink/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_forcelink_test_app TEST
SOURCES
- forcelink.cpp
+ forcelink_test.cpp
DEPENDS
searchlib
)
diff --git a/searchlib/src/tests/forcelink/forcelink.cpp b/searchlib/src/tests/forcelink/forcelink_test.cpp
index 189804c3975..189804c3975 100644
--- a/searchlib/src/tests/forcelink/forcelink.cpp
+++ b/searchlib/src/tests/forcelink/forcelink_test.cpp
diff --git a/searchlib/src/tests/ld-library-path/CMakeLists.txt b/searchlib/src/tests/ld-library-path/CMakeLists.txt
deleted file mode 100644
index e064d98e42c..00000000000
--- a/searchlib/src/tests/ld-library-path/CMakeLists.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(searchlib_ld-library-path_test_app TEST
- SOURCES
- ld-library-path.cpp
- DEPENDS
-)
-vespa_add_test(NAME searchlib_ld-library-path_test_app COMMAND searchlib_ld-library-path_test_app)
diff --git a/searchlib/src/tests/ld-library-path/.gitignore b/searchlib/src/tests/ld_library_path/.gitignore
index 5f02ecfc8f8..5f02ecfc8f8 100644
--- a/searchlib/src/tests/ld-library-path/.gitignore
+++ b/searchlib/src/tests/ld_library_path/.gitignore
diff --git a/searchlib/src/tests/ld_library_path/CMakeLists.txt b/searchlib/src/tests/ld_library_path/CMakeLists.txt
new file mode 100644
index 00000000000..388a498f517
--- /dev/null
+++ b/searchlib/src/tests/ld_library_path/CMakeLists.txt
@@ -0,0 +1,7 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchlib_ld_library_path_test_app TEST
+ SOURCES
+ ld_library_path_test.cpp
+ DEPENDS
+)
+vespa_add_test(NAME searchlib_ld_library_path_test_app COMMAND searchlib_ld_library_path_test_app)
diff --git a/searchlib/src/tests/ld-library-path/ld-library-path.cpp b/searchlib/src/tests/ld_library_path/ld_library_path_test.cpp
index 2f19110c6fe..2f19110c6fe 100644
--- a/searchlib/src/tests/ld-library-path/ld-library-path.cpp
+++ b/searchlib/src/tests/ld_library_path/ld_library_path_test.cpp
diff --git a/searchlib/src/tests/nativerank/CMakeLists.txt b/searchlib/src/tests/nativerank/CMakeLists.txt
index 549a7526745..a4984374a4b 100644
--- a/searchlib/src/tests/nativerank/CMakeLists.txt
+++ b/searchlib/src/tests/nativerank/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_nativerank_test_app TEST
SOURCES
- nativerank.cpp
+ nativerank_test.cpp
DEPENDS
searchlib
)
diff --git a/searchlib/src/tests/nativerank/nativerank.cpp b/searchlib/src/tests/nativerank/nativerank_test.cpp
index 90920a1f351..90920a1f351 100644
--- a/searchlib/src/tests/nativerank/nativerank.cpp
+++ b/searchlib/src/tests/nativerank/nativerank_test.cpp
diff --git a/searchlib/src/tests/queryeval/CMakeLists.txt b/searchlib/src/tests/queryeval/CMakeLists.txt
index 65ae2dd8a6d..c24a661de22 100644
--- a/searchlib/src/tests/queryeval/CMakeLists.txt
+++ b/searchlib/src/tests/queryeval/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_queryeval_test_app TEST
SOURCES
- queryeval.cpp
+ queryeval_test.cpp
DEPENDS
searchlib
searchlib_test
diff --git a/searchlib/src/tests/queryeval/nearest_neighbor/nearest_neighbor_test.cpp b/searchlib/src/tests/queryeval/nearest_neighbor/nearest_neighbor_test.cpp
index b9599a0c75d..f3545499231 100644
--- a/searchlib/src/tests/queryeval/nearest_neighbor/nearest_neighbor_test.cpp
+++ b/searchlib/src/tests/queryeval/nearest_neighbor/nearest_neighbor_test.cpp
@@ -126,11 +126,12 @@ SimpleResult find_matches(Fixture &env, const Value &qtv, double threshold = std
auto dff = search::tensor::make_distance_function_factory(DistanceMetric::Euclidean, qtv.cells().type);
auto df = dff->for_query_vector(qtv.cells());
threshold = df->convert_threshold(threshold);
- DistanceCalculator dist_calc(attr, std::move(df));
NearestNeighborDistanceHeap dh(2);
dh.set_distance_threshold(threshold);
const GlobalFilter &filter = *env._global_filter;
- auto search = NearestNeighborIterator::create(strict, tfmd, dist_calc, dh, filter);
+ auto search = NearestNeighborIterator::create(strict, tfmd,
+ std::make_unique<DistanceCalculator>(attr, qtv),
+ dh, filter);
if (strict) {
return SimpleResult().searchStrict(*search, attr.getNumDocs());
} else {
@@ -253,10 +254,11 @@ std::vector<feature_t> get_rawscores(Fixture &env, const Value &qtv) {
auto &tfmd = *(md->resolveTermField(0));
auto &attr = *(env._attr);
auto dff = search::tensor::make_distance_function_factory(DistanceMetric::Euclidean, qtv.cells().type);
- DistanceCalculator dist_calc(attr, dff->for_query_vector(qtv.cells()));
NearestNeighborDistanceHeap dh(2);
auto dummy_filter = GlobalFilter::create();
- auto search = NearestNeighborIterator::create(strict, tfmd, dist_calc, dh, *dummy_filter);
+ auto search = NearestNeighborIterator::create(strict, tfmd,
+ std::make_unique<DistanceCalculator>(attr, qtv),
+ dh, *dummy_filter);
uint32_t limit = attr.getNumDocs();
uint32_t docid = 1;
search->initRange(docid, limit);
diff --git a/searchlib/src/tests/queryeval/queryeval.cpp b/searchlib/src/tests/queryeval/queryeval_test.cpp
index 698bf7c08d5..698bf7c08d5 100644
--- a/searchlib/src/tests/queryeval/queryeval.cpp
+++ b/searchlib/src/tests/queryeval/queryeval_test.cpp
diff --git a/searchlib/src/tests/queryeval/sourceblender/CMakeLists.txt b/searchlib/src/tests/queryeval/sourceblender/CMakeLists.txt
index 3962343060c..266e26c1651 100644
--- a/searchlib/src/tests/queryeval/sourceblender/CMakeLists.txt
+++ b/searchlib/src/tests/queryeval/sourceblender/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_sourceblender_test_app TEST
SOURCES
- sourceblender.cpp
+ sourceblender_test.cpp
DEPENDS
searchlib
searchlib_test
diff --git a/searchlib/src/tests/queryeval/sourceblender/sourceblender.cpp b/searchlib/src/tests/queryeval/sourceblender/sourceblender_test.cpp
index 15a277d51c6..15a277d51c6 100644
--- a/searchlib/src/tests/queryeval/sourceblender/sourceblender.cpp
+++ b/searchlib/src/tests/queryeval/sourceblender/sourceblender_test.cpp
diff --git a/searchlib/src/tests/ranksetup/ranksetup_test.cpp b/searchlib/src/tests/ranksetup/ranksetup_test.cpp
index 50d9d36f575..f708df0a862 100644
--- a/searchlib/src/tests/ranksetup/ranksetup_test.cpp
+++ b/searchlib/src/tests/ranksetup/ranksetup_test.cpp
@@ -533,6 +533,9 @@ void RankSetupTest::testRankSetup()
env.getProperties().add(mutate::on_second_phase::Operation::NAME, "=7");
env.getProperties().add(mutate::on_summary::Attribute::NAME, "c");
env.getProperties().add(mutate::on_summary::Operation::NAME, "-=2");
+ env.getProperties().add(matching::GlobalFilterLowerLimit::NAME, "0.3");
+ env.getProperties().add(matching::GlobalFilterUpperLimit::NAME, "0.7");
+ env.getProperties().add(matching::TargetHitsMaxAdjustmentFactor::NAME, "5.0");
RankSetup rs(_factory, env);
EXPECT_FALSE(rs.has_match_features());
@@ -571,7 +574,9 @@ void RankSetupTest::testRankSetup()
EXPECT_EQUAL(rs.getMutateOnSecondPhase()._operation, "=7");
EXPECT_EQUAL(rs.getMutateOnSummary()._attribute, "c");
EXPECT_EQUAL(rs.getMutateOnSummary()._operation, "-=2");
-
+ EXPECT_EQUAL(rs.get_global_filter_lower_limit(), 0.3);
+ EXPECT_EQUAL(rs.get_global_filter_upper_limit(), 0.7);
+ EXPECT_EQUAL(rs.get_target_hits_max_adjustment_factor(), 5.0);
}
bool
diff --git a/searchlib/src/tests/sortresults/CMakeLists.txt b/searchlib/src/tests/sortresults/CMakeLists.txt
index 345d2ccb186..04069bf7ca4 100644
--- a/searchlib/src/tests/sortresults/CMakeLists.txt
+++ b/searchlib/src/tests/sortresults/CMakeLists.txt
@@ -1,8 +1,8 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(searchlib_sortresults_app TEST
+vespa_add_executable(searchlib_sortresults_test_app TEST
SOURCES
- sorttest.cpp
+ sortresults_test.cpp
DEPENDS
searchlib
)
-vespa_add_test(NAME searchlib_sortresults_app COMMAND searchlib_sortresults_app)
+vespa_add_test(NAME searchlib_sortresults_test_app COMMAND searchlib_sortresults_test_app)
diff --git a/searchlib/src/tests/sortresults/sorttest.cpp b/searchlib/src/tests/sortresults/sortresults_test.cpp
index bbd6d0b72ce..bbd6d0b72ce 100644
--- a/searchlib/src/tests/sortresults/sorttest.cpp
+++ b/searchlib/src/tests/sortresults/sortresults_test.cpp
diff --git a/searchlib/src/tests/sortspec/CMakeLists.txt b/searchlib/src/tests/sortspec/CMakeLists.txt
index a9b3a906804..9da0def9c9e 100644
--- a/searchlib/src/tests/sortspec/CMakeLists.txt
+++ b/searchlib/src/tests/sortspec/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_multilevelsort_test_app TEST
SOURCES
- multilevelsort.cpp
+ multilevelsort_test.cpp
DEPENDS
searchlib
)
diff --git a/searchlib/src/tests/sortspec/multilevelsort.cpp b/searchlib/src/tests/sortspec/multilevelsort_test.cpp
index 001903ff302..001903ff302 100644
--- a/searchlib/src/tests/sortspec/multilevelsort.cpp
+++ b/searchlib/src/tests/sortspec/multilevelsort_test.cpp
diff --git a/searchlib/src/tests/transactionlog/translogclient_test.cpp b/searchlib/src/tests/transactionlog/translogclient_test.cpp
index 0f19a0b233b..fdccc221252 100644
--- a/searchlib/src/tests/transactionlog/translogclient_test.cpp
+++ b/searchlib/src/tests/transactionlog/translogclient_test.cpp
@@ -35,7 +35,7 @@ std::unique_ptr<Session> openDomainTest(TransLogClient & tls, const vespalib::st
bool fillDomainTest(Session * s1, const vespalib::string & name);
void fillDomainTest(Session * s1, size_t numPackets, size_t numEntries);
void fillDomainTest(Session * s1, size_t numPackets, size_t numEntries, size_t entrySize);
-uint32_t countFiles(const vespalib::string &dir);
+uint32_t countFiles(const std::string& dir);
void checkFilledDomainTest(Session &s1, size_t numEntries);
bool visitDomainTest(TransLogClient & tls, Session * s1, const vespalib::string & name);
void createAndFillDomain(const vespalib::string & dir, const vespalib::string & name, Encoding encoding, size_t preExistingDomains);
@@ -392,15 +392,12 @@ fillDomainTest(Session * s1, size_t numPackets, size_t numEntries, size_t entryS
uint32_t
-countFiles(const vespalib::string &dir)
+countFiles(const std::string& dir)
{
uint32_t res = 0;
- FastOS_DirectoryScan dirScan(dir.c_str());
- while (dirScan.ReadNext()) {
- const char *ename = dirScan.GetName();
- if (strcmp(ename, ".") == 0 ||
- strcmp(ename, "..") == 0)
- continue;
+ std::filesystem::directory_iterator dir_scan(dir);
+ for (auto& entry : dir_scan) {
+ (void) entry;
++res;
}
return res;
diff --git a/searchlib/src/tests/true/CMakeLists.txt b/searchlib/src/tests/true/CMakeLists.txt
index a2856f3391e..b119d0a25e4 100644
--- a/searchlib/src/tests/true/CMakeLists.txt
+++ b/searchlib/src/tests/true/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(searchlib_true_test_app TEST
SOURCES
- true.cpp
+ true_test.cpp
DEPENDS
)
vespa_add_test(NAME searchlib_true_test_app COMMAND searchlib_true_test_app)
diff --git a/searchlib/src/tests/true/true.cpp b/searchlib/src/tests/true/true_test.cpp
index 8dee60ddd40..8dee60ddd40 100644
--- a/searchlib/src/tests/true/true.cpp
+++ b/searchlib/src/tests/true/true_test.cpp
diff --git a/searchlib/src/tests/url/CMakeLists.txt b/searchlib/src/tests/url/CMakeLists.txt
index 5352a04fea1..d6400d5b651 100644
--- a/searchlib/src/tests/url/CMakeLists.txt
+++ b/searchlib/src/tests/url/CMakeLists.txt
@@ -1,9 +1,9 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(searchlib_testurl_app TEST
+vespa_add_executable(searchlib_url_test_app TEST
SOURCES
- testurl.cpp
+ url_test.cpp
DEPENDS
searchlib
)
-vespa_add_test(NAME searchlib_testurl_app COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/dotest.sh
- DEPENDS searchlib_testurl_app)
+vespa_add_test(NAME searchlib_url_test_app COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/dotest.sh
+ DEPENDS searchlib_url_test_app)
diff --git a/searchlib/src/tests/url/dotest.sh b/searchlib/src/tests/url/dotest.sh
index ce97b427a2d..ee9baf9bf2f 100755
--- a/searchlib/src/tests/url/dotest.sh
+++ b/searchlib/src/tests/url/dotest.sh
@@ -4,7 +4,7 @@ set -e
# Run test
echo "Testing the FastS_URL class..."
-$VALGRIND ./searchlib_testurl_app
+$VALGRIND ./searchlib_url_test_app
if [ $? -eq 0 ]; then
echo "SUCCESS: Test on FastS_URL passed!"
else
diff --git a/searchlib/src/tests/url/testurl.cpp b/searchlib/src/tests/url/url_test.cpp
index 64a3827495a..64a3827495a 100644
--- a/searchlib/src/tests/url/testurl.cpp
+++ b/searchlib/src/tests/url/url_test.cpp
diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp b/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp
index be631be6dca..453b7b321b9 100644
--- a/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp
@@ -842,14 +842,16 @@ public:
}
try {
auto calc = tensor::DistanceCalculator::make_with_validation(_attr, *query_tensor);
+ const auto& params = getRequestContext().get_attribute_blueprint_params();
setResult(std::make_unique<queryeval::NearestNeighborBlueprint>(_field,
std::move(calc),
n.get_target_num_hits(),
n.get_allow_approximate(),
n.get_explore_additional_hits(),
n.get_distance_threshold(),
- getRequestContext().get_attribute_blueprint_params().global_filter_lower_limit,
- getRequestContext().get_attribute_blueprint_params().global_filter_upper_limit,
+ params.global_filter_lower_limit,
+ params.global_filter_upper_limit,
+ params.target_hits_max_adjustment_factor,
getRequestContext().getDoom()));
} catch (const vespalib::IllegalArgumentException& ex) {
return fail_nearest_neighbor_term(n, ex.getMessage());
diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_params.h b/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_params.h
index 39f58c5382e..64213235c23 100644
--- a/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_params.h
+++ b/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_params.h
@@ -13,17 +13,21 @@ struct AttributeBlueprintParams
{
double global_filter_lower_limit;
double global_filter_upper_limit;
+ double target_hits_max_adjustment_factor;
AttributeBlueprintParams(double global_filter_lower_limit_in,
- double global_filter_upper_limit_in)
+ double global_filter_upper_limit_in,
+ double target_hits_max_adjustment_factor_in)
: global_filter_lower_limit(global_filter_lower_limit_in),
- global_filter_upper_limit(global_filter_upper_limit_in)
+ global_filter_upper_limit(global_filter_upper_limit_in),
+ target_hits_max_adjustment_factor(target_hits_max_adjustment_factor_in)
{
}
AttributeBlueprintParams()
: AttributeBlueprintParams(fef::indexproperties::matching::GlobalFilterLowerLimit::DEFAULT_VALUE,
- fef::indexproperties::matching::GlobalFilterUpperLimit::DEFAULT_VALUE)
+ fef::indexproperties::matching::GlobalFilterUpperLimit::DEFAULT_VALUE,
+ fef::indexproperties::matching::TargetHitsMaxAdjustmentFactor::DEFAULT_VALUE)
{
}
};
diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
index d3553ad003f..a187e690158 100644
--- a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
@@ -668,7 +668,7 @@ vespalib::string
lsSingleFile(const vespalib::string & fileName)
{
fs::path path(fileName);
- return make_string("%s %20" PRIu64 " %12" PRId64, fileName.c_str(), vespalib::count_ns(fs::last_write_time(path).time_since_epoch()), fs::file_size(path));
+ return make_string("%s %20" PRIu64 " %12" PRIdMAX, fileName.c_str(), vespalib::count_ns(fs::last_write_time(path).time_since_epoch()), fs::file_size(path));
}
}
@@ -891,10 +891,10 @@ LogDataStore::NameIdSet
LogDataStore::scanDir(const vespalib::string &dir, const vespalib::string &suffix)
{
NameIdSet baseFiles;
- FastOS_DirectoryScan dirScan(dir.c_str());
- while (dirScan.ReadNext()) {
- if (dirScan.IsRegular()) {
- vespalib::stringref file(dirScan.GetName());
+ std::filesystem::directory_iterator dir_scan{std::filesystem::path(dir)};
+ for (auto& entry : dir_scan) {
+ if (entry.is_regular_file()) {
+ vespalib::string file(entry.path().filename().string());
if (file.size() > suffix.size() &&
file.find(suffix.c_str()) == file.size() - suffix.size()) {
vespalib::string base(file.substr(0, file.find(suffix.c_str())));
@@ -911,7 +911,7 @@ LogDataStore::scanDir(const vespalib::string &dir, const vespalib::string &suffi
base.c_str(), err, getLastErrorString().c_str()));
}
} else {
- LOG(debug, "Skipping '%s' since it does not end with '%s'", file.data(), suffix.c_str());
+ LOG(debug, "Skipping '%s' since it does not end with '%s'", file.c_str(), suffix.c_str());
}
}
}
diff --git a/searchlib/src/vespa/searchlib/expression/resultnodes.cpp b/searchlib/src/vespa/searchlib/expression/resultnodes.cpp
index 7fb3ab1b6cf..8f9f1b7ca06 100644
--- a/searchlib/src/vespa/searchlib/expression/resultnodes.cpp
+++ b/searchlib/src/vespa/searchlib/expression/resultnodes.cpp
@@ -438,7 +438,9 @@ void
RawResultNode::setBuffer(const void *buf, size_t sz)
{
_value.resize(sz + 1);
- memcpy(_value.data(), buf, sz);
+ if (sz > 0) {
+ memcpy(_value.data(), buf, sz);
+ }
_value.back() = 0;
_value.resize(sz);
}
diff --git a/searchlib/src/vespa/searchlib/features/tensor_factory_blueprint.cpp b/searchlib/src/vespa/searchlib/features/tensor_factory_blueprint.cpp
index 3e5d1da6a1a..7c267413a86 100644
--- a/searchlib/src/vespa/searchlib/features/tensor_factory_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/features/tensor_factory_blueprint.cpp
@@ -36,7 +36,8 @@ TensorFactoryBlueprint::TensorFactoryBlueprint(const vespalib::string &baseName)
: Blueprint(baseName),
_sourceType(),
_sourceParam(),
- _dimension("0") // default dimension is set to the source param if not specified.
+ _dimension("0"), // default dimension is set to the source param if not specified.
+ _valueType(vespalib::eval::ValueType::error_type())
{
}
diff --git a/searchlib/src/vespa/searchlib/features/tensor_factory_blueprint.h b/searchlib/src/vespa/searchlib/features/tensor_factory_blueprint.h
index 26fcc79b6f5..47ccb038ac7 100644
--- a/searchlib/src/vespa/searchlib/features/tensor_factory_blueprint.h
+++ b/searchlib/src/vespa/searchlib/features/tensor_factory_blueprint.h
@@ -4,6 +4,7 @@
#include <vespa/searchlib/fef/blueprint.h>
#include <vespa/vespalib/stllike/string.h>
+#include <vespa/eval/eval/value_type.h>
namespace search::features {
@@ -19,6 +20,7 @@ protected:
vespalib::string _sourceType;
vespalib::string _sourceParam;
vespalib::string _dimension;
+ vespalib::eval::ValueType _valueType;
bool extractSource(const vespalib::string &source);
TensorFactoryBlueprint(const vespalib::string &baseName);
diff --git a/searchlib/src/vespa/searchlib/features/tensor_from_attribute_executor.h b/searchlib/src/vespa/searchlib/features/tensor_from_attribute_executor.h
index 5a3fede76e8..7b04d10cea2 100644
--- a/searchlib/src/vespa/searchlib/features/tensor_from_attribute_executor.h
+++ b/searchlib/src/vespa/searchlib/features/tensor_from_attribute_executor.h
@@ -28,9 +28,9 @@ private:
public:
TensorFromAttributeExecutor(const search::attribute::IAttributeVector *attribute,
- const vespalib::string &dimension)
+ const vespalib::eval::ValueType &valueType)
: _attribute(attribute),
- _type(vespalib::eval::ValueType::make_type(CellType::DOUBLE, {{dimension}})),
+ _type(valueType),
_attrBuffer(),
_addr_ref(),
_tensor()
diff --git a/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp b/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp
index b72a75bd19f..f36c1dbfdaa 100644
--- a/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp
+++ b/searchlib/src/vespa/searchlib/features/tensor_from_labels_feature.cpp
@@ -41,15 +41,23 @@ TensorFromLabelsBlueprint::setup(const search::fef::IIndexEnvironment &env,
// _params[0] = source ('attribute(name)' OR 'query(param)');
// _params[1] = dimension (optional);
bool validSource = extractSource(params[0].getValue());
+ if (! validSource) {
+ return fail("invalid source: '%s'", params[0].getValue().c_str());
+ }
if (params.size() == 2) {
_dimension = params[1].getValue();
} else {
_dimension = _sourceParam;
}
+ auto vt = ValueType::make_type(CellType::DOUBLE, {{_dimension}});
+ _valueType = ValueType::from_spec(vt.to_spec());
+ if (_valueType.is_error()) {
+ return fail("invalid dimension name: '%s'", _dimension.c_str());
+ }
describeOutput("tensor",
"The tensor created from the given source (attribute field or query parameter)",
- FeatureType::object(ValueType::make_type(CellType::DOUBLE, {{_dimension}})));
- return validSource;
+ FeatureType::object(_valueType));
+ return true;
}
namespace {
@@ -57,23 +65,24 @@ namespace {
FeatureExecutor &
createAttributeExecutor(const search::fef::IQueryEnvironment &env,
const vespalib::string &attrName,
- const vespalib::string &dimension, vespalib::Stash &stash)
+ const ValueType &valueType,
+ vespalib::Stash &stash)
{
const IAttributeVector *attribute = env.getAttributeContext().getAttribute(attrName);
if (attribute == NULL) {
Issue::report("tensor_from_labels feature: The attribute vector '%s' was not found."
" Returning empty tensor.", attrName.c_str());
- return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash);
+ return ConstantTensorExecutor::createEmpty(valueType, stash);
}
if (attribute->isFloatingPointType()) {
Issue::report("tensor_from_labels feature: The attribute vector '%s' must have basic type string or integer."
" Returning empty tensor.", attrName.c_str());
- return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash);
+ return ConstantTensorExecutor::createEmpty(valueType, stash);
}
if (attribute->getCollectionType() == search::attribute::CollectionType::WSET) {
Issue::report("tensor_from_labels feature: The attribute vector '%s' is a weighted set - use tensorFromWeightedSet instead."
" Returning empty tensor.", attrName.c_str());
- return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash);
+ return ConstantTensorExecutor::createEmpty(valueType, stash);
}
// Note that for array attribute vectors the default weight is 1.0 for all values.
// This means we can get the attribute content as weighted content and build
@@ -81,25 +90,25 @@ createAttributeExecutor(const search::fef::IQueryEnvironment &env,
if (attribute->isIntegerType()) {
// Using WeightedStringContent ensures that the integer values are converted
// to strings while extracting them from the attribute.
- return stash.create<TensorFromAttributeExecutor<WeightedStringContent>>(attribute, dimension);
+ return stash.create<TensorFromAttributeExecutor<WeightedStringContent>>(attribute, valueType);
}
// When the underlying attribute is of type string we can reference these values
// using WeightedConstCharContent.
- return stash.create<TensorFromAttributeExecutor<WeightedConstCharContent>>(attribute, dimension);
+ return stash.create<TensorFromAttributeExecutor<WeightedConstCharContent>>(attribute, valueType);
}
FeatureExecutor &
createQueryExecutor(const search::fef::IQueryEnvironment &env,
const vespalib::string &queryKey,
- const vespalib::string &dimension, vespalib::Stash &stash)
+ const ValueType &valueType,
+ vespalib::Stash &stash)
{
- ValueType type = ValueType::make_type(CellType::DOUBLE, {{dimension}});
search::fef::Property prop = env.getProperties().lookup(queryKey);
if (prop.found() && !prop.get().empty()) {
std::vector<vespalib::string> vector;
ArrayParser::parse(prop.get(), vector);
auto factory = FastValueBuilderFactory::get();
- auto builder = factory.create_value_builder<double>(type, 1, 1, vector.size());
+ auto builder = factory.create_value_builder<double>(valueType, 1, 1, vector.size());
std::vector<vespalib::stringref> addr_ref;
for (const auto &elem : vector) {
addr_ref.clear();
@@ -109,7 +118,7 @@ createQueryExecutor(const search::fef::IQueryEnvironment &env,
}
return ConstantTensorExecutor::create(builder->build(std::move(builder)), stash);
}
- return ConstantTensorExecutor::createEmpty(type, stash);
+ return ConstantTensorExecutor::createEmpty(valueType, stash);
}
}
@@ -118,11 +127,11 @@ FeatureExecutor &
TensorFromLabelsBlueprint::createExecutor(const search::fef::IQueryEnvironment &env, vespalib::Stash &stash) const
{
if (_sourceType == ATTRIBUTE_SOURCE) {
- return createAttributeExecutor(env, _sourceParam, _dimension, stash);
+ return createAttributeExecutor(env, _sourceParam, _valueType, stash);
} else if (_sourceType == QUERY_SOURCE) {
- return createQueryExecutor(env, _sourceParam, _dimension, stash);
+ return createQueryExecutor(env, _sourceParam, _valueType, stash);
}
- return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{_dimension}}), stash);
+ return ConstantTensorExecutor::createEmpty(_valueType, stash);
}
} // namespace features
diff --git a/searchlib/src/vespa/searchlib/features/tensor_from_weighted_set_feature.cpp b/searchlib/src/vespa/searchlib/features/tensor_from_weighted_set_feature.cpp
index cbe262a0cbd..312f9ee2bc6 100644
--- a/searchlib/src/vespa/searchlib/features/tensor_from_weighted_set_feature.cpp
+++ b/searchlib/src/vespa/searchlib/features/tensor_from_weighted_set_feature.cpp
@@ -54,15 +54,23 @@ TensorFromWeightedSetBlueprint::setup(const search::fef::IIndexEnvironment &env,
// _params[0] = source ('attribute(name)' OR 'query(param)');
// _params[1] = dimension (optional);
bool validSource = extractSource(params[0].getValue());
+ if (! validSource) {
+ return fail("invalid source: '%s'", params[0].getValue().c_str());
+ }
if (params.size() == 2) {
_dimension = params[1].getValue();
} else {
_dimension = _sourceParam;
}
+ auto vt = ValueType::make_type(CellType::DOUBLE, {{_dimension}});
+ _valueType = ValueType::from_spec(vt.to_spec());
+ if (_valueType.is_error()) {
+ return fail("invalid dimension name: '%s'", _dimension.c_str());
+ }
describeOutput("tensor",
"The tensor created from the given weighted set source (attribute field or query parameter)",
- FeatureType::object(ValueType::make_type(CellType::DOUBLE, {{_dimension}})));
- return validSource;
+ FeatureType::object(_valueType));
+ return true;
}
namespace {
@@ -70,45 +78,45 @@ namespace {
FeatureExecutor &
createAttributeExecutor(const search::fef::IQueryEnvironment &env,
const vespalib::string &attrName,
- const vespalib::string &dimension,
+ const ValueType &valueType,
vespalib::Stash &stash)
{
const IAttributeVector *attribute = env.getAttributeContext().getAttribute(attrName);
if (attribute == NULL) {
Issue::report("tensor_from_weighted_set feature: The attribute vector '%s' was not found."
" Returning empty tensor.", attrName.c_str());
- return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash);
+ return ConstantTensorExecutor::createEmpty(valueType, stash);
}
if (attribute->getCollectionType() != search::attribute::CollectionType::WSET ||
attribute->isFloatingPointType())
{
Issue::report("tensor_from_weighted_set feature: The attribute vector '%s' is NOT of type weighted set of string or integer."
" Returning empty tensor.", attrName.c_str());
- return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{dimension}}), stash);
+ return ConstantTensorExecutor::createEmpty(valueType, stash);
}
if (attribute->isIntegerType()) {
// Using WeightedStringContent ensures that the integer values are converted
// to strings while extracting them from the attribute.
- return stash.create<TensorFromAttributeExecutor<WeightedStringContent>>(attribute, dimension);
+ return stash.create<TensorFromAttributeExecutor<WeightedStringContent>>(attribute, valueType);
}
// When the underlying attribute is of type string we can reference these values
// using WeightedConstCharContent.
- return stash.create<TensorFromAttributeExecutor<WeightedConstCharContent>>(attribute, dimension);
+ return stash.create<TensorFromAttributeExecutor<WeightedConstCharContent>>(attribute, valueType);
}
FeatureExecutor &
createQueryExecutor(const search::fef::IQueryEnvironment &env,
const vespalib::string &queryKey,
- const vespalib::string &dimension, vespalib::Stash &stash)
+ const ValueType &valueType,
+ vespalib::Stash &stash)
{
- ValueType type = ValueType::make_type(CellType::DOUBLE, {{dimension}});
search::fef::Property prop = env.getProperties().lookup(queryKey);
if (prop.found() && !prop.get().empty()) {
WeightedStringVector vector;
WeightedSetParser::parse(prop.get(), vector);
auto factory = FastValueBuilderFactory::get();
size_t sz = vector._data.size();
- auto builder = factory.create_value_builder<double>(type, 1, 1, sz);
+ auto builder = factory.create_value_builder<double>(valueType, 1, 1, sz);
std::vector<vespalib::stringref> addr_ref;
for (const auto &elem : vector._data) {
addr_ref.clear();
@@ -118,7 +126,7 @@ createQueryExecutor(const search::fef::IQueryEnvironment &env,
}
return ConstantTensorExecutor::create(builder->build(std::move(builder)), stash);
}
- return ConstantTensorExecutor::createEmpty(type, stash);
+ return ConstantTensorExecutor::createEmpty(valueType, stash);
}
}
@@ -127,11 +135,11 @@ FeatureExecutor &
TensorFromWeightedSetBlueprint::createExecutor(const search::fef::IQueryEnvironment &env, vespalib::Stash &stash) const
{
if (_sourceType == ATTRIBUTE_SOURCE) {
- return createAttributeExecutor(env, _sourceParam, _dimension, stash);
+ return createAttributeExecutor(env, _sourceParam, _valueType, stash);
} else if (_sourceType == QUERY_SOURCE) {
- return createQueryExecutor(env, _sourceParam, _dimension, stash);
+ return createQueryExecutor(env, _sourceParam, _valueType, stash);
}
- return ConstantTensorExecutor::createEmpty(ValueType::make_type(CellType::DOUBLE, {{_dimension}}), stash);
+ return ConstantTensorExecutor::createEmpty(_valueType, stash);
}
} // namespace features
diff --git a/searchlib/src/vespa/searchlib/fef/indexproperties.cpp b/searchlib/src/vespa/searchlib/fef/indexproperties.cpp
index 8be44ce0a0c..7871e66970e 100644
--- a/searchlib/src/vespa/searchlib/fef/indexproperties.cpp
+++ b/searchlib/src/vespa/searchlib/fef/indexproperties.cpp
@@ -422,6 +422,22 @@ GlobalFilterUpperLimit::lookup(const Properties &props, double defaultValue)
return lookupDouble(props, NAME, defaultValue);
}
+const vespalib::string TargetHitsMaxAdjustmentFactor::NAME("vespa.matching.nns.target_hits_max_adjustment_factor");
+
+const double TargetHitsMaxAdjustmentFactor::DEFAULT_VALUE(20.0);
+
+double
+TargetHitsMaxAdjustmentFactor::lookup(const Properties& props)
+{
+ return lookup(props, DEFAULT_VALUE);
+}
+
+double
+TargetHitsMaxAdjustmentFactor::lookup(const Properties& props, double defaultValue)
+{
+ return lookupDouble(props, NAME, defaultValue);
+}
+
} // namespace matching
namespace softtimeout {
diff --git a/searchlib/src/vespa/searchlib/fef/indexproperties.h b/searchlib/src/vespa/searchlib/fef/indexproperties.h
index f538e7bef2e..4f38a27d3fe 100644
--- a/searchlib/src/vespa/searchlib/fef/indexproperties.h
+++ b/searchlib/src/vespa/searchlib/fef/indexproperties.h
@@ -313,6 +313,21 @@ namespace matching {
static double lookup(const Properties &props);
static double lookup(const Properties &props, double defaultValue);
};
+
+ /**
+ * Property to control the auto-adjustment of targetHits in a nearestNeighbor search using HNSW index with post-filtering.
+ *
+ * The targetHits is auto-adjusted in an effort to expose targetHits hits to first-phase ranking after post-filtering:
+ * adjustedTargetHits = min(targetHits / estimatedHitRatio, targetHits * targetHitsMaxAdjustmentFactor).
+ *
+ * This property ensures an upper bound of adjustedTargetHits, avoiding that the search in the HNSW index takes too long.
+ **/
+ struct TargetHitsMaxAdjustmentFactor {
+ static const vespalib::string NAME;
+ static const double DEFAULT_VALUE;
+ static double lookup(const Properties &props);
+ static double lookup(const Properties &props, double defaultValue);
+ };
}
namespace softtimeout {
diff --git a/searchlib/src/vespa/searchlib/fef/ranksetup.cpp b/searchlib/src/vespa/searchlib/fef/ranksetup.cpp
index 4c676d5ba5c..9d4e547feef 100644
--- a/searchlib/src/vespa/searchlib/fef/ranksetup.cpp
+++ b/searchlib/src/vespa/searchlib/fef/ranksetup.cpp
@@ -66,9 +66,9 @@ RankSetup::RankSetup(const BlueprintFactory &factory, const IIndexEnvironment &i
_diversityCutoffStrategy("loose"),
_softTimeoutEnabled(false),
_softTimeoutTailCost(0.1),
- _softTimeoutFactor(0.5),
_global_filter_lower_limit(0.0),
_global_filter_upper_limit(1.0),
+ _target_hits_max_adjustment_factor(20.0),
_mutateOnMatch(),
_mutateOnFirstPhase(),
_mutateOnSecondPhase(),
@@ -120,9 +120,9 @@ RankSetup::configure()
setRankScoreDropLimit(hitcollector::RankScoreDropLimit::lookup(_indexEnv.getProperties()));
setSoftTimeoutEnabled(softtimeout::Enabled::lookup(_indexEnv.getProperties()));
setSoftTimeoutTailCost(softtimeout::TailCost::lookup(_indexEnv.getProperties()));
- setSoftTimeoutFactor(softtimeout::Factor::lookup(_indexEnv.getProperties()));
set_global_filter_lower_limit(matching::GlobalFilterLowerLimit::lookup(_indexEnv.getProperties()));
set_global_filter_upper_limit(matching::GlobalFilterUpperLimit::lookup(_indexEnv.getProperties()));
+ set_target_hits_max_adjustment_factor(matching::TargetHitsMaxAdjustmentFactor::lookup(_indexEnv.getProperties()));
_mutateOnMatch._attribute = mutate::on_match::Attribute::lookup(_indexEnv.getProperties());
_mutateOnMatch._operation = mutate::on_match::Operation::lookup(_indexEnv.getProperties());
_mutateOnFirstPhase._attribute = mutate::on_first_phase::Attribute::lookup(_indexEnv.getProperties());
diff --git a/searchlib/src/vespa/searchlib/fef/ranksetup.h b/searchlib/src/vespa/searchlib/fef/ranksetup.h
index 783c1506ff0..72432c2ed8a 100644
--- a/searchlib/src/vespa/searchlib/fef/ranksetup.h
+++ b/searchlib/src/vespa/searchlib/fef/ranksetup.h
@@ -74,9 +74,9 @@ private:
vespalib::string _diversityCutoffStrategy;
bool _softTimeoutEnabled;
double _softTimeoutTailCost;
- double _softTimeoutFactor;
double _global_filter_lower_limit;
double _global_filter_upper_limit;
+ double _target_hits_max_adjustment_factor;
MutateOperation _mutateOnMatch;
MutateOperation _mutateOnFirstPhase;
MutateOperation _mutateOnSecondPhase;
@@ -211,11 +211,6 @@ public:
**/
uint32_t getArraySize() const { return _arraySize; }
- /** whether match phase should do graceful degradation */
- bool hasMatchPhaseDegradation() const {
- return (_degradationAttribute.size() > 0);
- }
-
/** get name of attribute to use for graceful degradation in match phase */
vespalib::string getDegradationAttribute() const {
return _degradationAttribute;
@@ -390,25 +385,17 @@ public:
**/
void setIgnoreDefaultRankFeatures(bool flag) { _ignoreDefaultRankFeatures = flag; }
- /**
- * Get the flag indicating whether we should ignore the default
- * rank features (the ones specified by the plugins themselves)
- *
- * @return true means ignore default rank features
- **/
- bool getIgnoreDefaultRankFeatures() { return _ignoreDefaultRankFeatures; }
-
void setSoftTimeoutEnabled(bool v) { _softTimeoutEnabled = v; }
bool getSoftTimeoutEnabled() const { return _softTimeoutEnabled; }
void setSoftTimeoutTailCost(double v) { _softTimeoutTailCost = v; }
double getSoftTimeoutTailCost() const { return _softTimeoutTailCost; }
- void setSoftTimeoutFactor(double v) { _softTimeoutFactor = v; }
- double getSoftTimeoutFactor() const { return _softTimeoutFactor; }
void set_global_filter_lower_limit(double v) { _global_filter_lower_limit = v; }
double get_global_filter_lower_limit() const { return _global_filter_lower_limit; }
void set_global_filter_upper_limit(double v) { _global_filter_upper_limit = v; }
double get_global_filter_upper_limit() const { return _global_filter_upper_limit; }
+ void set_target_hits_max_adjustment_factor(double v) { _target_hits_max_adjustment_factor = v; }
+ double get_target_hits_max_adjustment_factor() const { return _target_hits_max_adjustment_factor; }
/**
* This method may be used to indicate that certain features
diff --git a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp
index 87ddb8b6edc..a70f387100b 100644
--- a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp
@@ -43,6 +43,7 @@ NearestNeighborBlueprint::NearestNeighborBlueprint(const queryeval::FieldSpec& f
double distance_threshold,
double global_filter_lower_limit,
double global_filter_upper_limit,
+ double target_hits_max_adjustment_factor,
const vespalib::Doom& doom)
: ComplexLeafBlueprint(field),
_distance_calc(std::move(distance_calc)),
@@ -55,6 +56,7 @@ NearestNeighborBlueprint::NearestNeighborBlueprint(const queryeval::FieldSpec& f
_distance_threshold(std::numeric_limits<double>::max()),
_global_filter_lower_limit(global_filter_lower_limit),
_global_filter_upper_limit(global_filter_upper_limit),
+ _target_hits_max_adjustment_factor(target_hits_max_adjustment_factor),
_distance_heap(target_hits),
_found_hits(),
_algorithm(Algorithm::EXACT),
@@ -95,8 +97,10 @@ NearestNeighborBlueprint::set_global_filter(const GlobalFilter &global_filter, d
} else { // post-filtering case
// The goal is to expose 'targetHits' hits to first-phase ranking.
// We try to achieve this by adjusting targetHits based on the estimated hit ratio of the query before post-filtering.
+ // However, this is bound by 'target-hits-max-adjustment-factor' to limit the cost of searching the HNSW index.
if (estimated_hit_ratio > 0.0) {
- _adjusted_target_hits = static_cast<double>(_target_hits) / estimated_hit_ratio;
+ _adjusted_target_hits = std::min(static_cast<double>(_target_hits) / estimated_hit_ratio,
+ static_cast<double>(_target_hits) * _target_hits_max_adjustment_factor);
}
}
if (_algorithm != Algorithm::EXACT_FALLBACK) {
@@ -133,7 +137,8 @@ NearestNeighborBlueprint::createLeafSearch(const search::fef::TermFieldMatchData
default:
;
}
- return NearestNeighborIterator::create(strict, tfmd, *_distance_calc,
+ return NearestNeighborIterator::create(strict, tfmd,
+ std::make_unique<search::tensor::DistanceCalculator>(_attr_tensor, _query_tensor),
_distance_heap, *_global_filter);
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.h
index f88cdd5adb1..174f0b23125 100644
--- a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.h
@@ -38,6 +38,7 @@ private:
double _distance_threshold;
double _global_filter_lower_limit;
double _global_filter_upper_limit;
+ double _target_hits_max_adjustment_factor;
mutable NearestNeighborDistanceHeap _distance_heap;
std::vector<search::tensor::NearestNeighborIndex::Neighbor> _found_hits;
Algorithm _algorithm;
@@ -55,6 +56,7 @@ public:
double distance_threshold,
double global_filter_lower_limit,
double global_filter_upper_limit,
+ double target_hits_max_adjustment_factor,
const vespalib::Doom& doom);
NearestNeighborBlueprint(const NearestNeighborBlueprint&) = delete;
NearestNeighborBlueprint& operator=(const NearestNeighborBlueprint&) = delete;
diff --git a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.cpp b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.cpp
index 92c9a21db83..a71a8e6a49a 100644
--- a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.cpp
@@ -23,9 +23,8 @@ template <bool strict, bool has_filter>
class NearestNeighborImpl : public NearestNeighborIterator
{
public:
-
NearestNeighborImpl(Params params_in)
- : NearestNeighborIterator(params_in),
+ : NearestNeighborIterator(std::move(params_in)),
_lastScore(0.0)
{
}
@@ -53,7 +52,7 @@ public:
}
void doUnpack(uint32_t docId) override {
- double score = params().distance_calc.function().to_rawscore(_lastScore);
+ double score = params().distance_calc->function().to_rawscore(_lastScore);
params().tfmd.setRawScore(docId, score);
params().distanceHeap.used(_lastScore);
}
@@ -62,7 +61,7 @@ public:
private:
double computeDistance(uint32_t docId, double limit) {
- return params().distance_calc.calc_with_limit(docId, limit);
+ return params().distance_calc->calc_with_limit(docId, limit);
}
double _lastScore;
@@ -75,14 +74,14 @@ namespace {
template <bool has_filter>
std::unique_ptr<NearestNeighborIterator>
-resolve_strict(bool strict, const NearestNeighborIterator::Params &params)
+resolve_strict(bool strict, NearestNeighborIterator::Params params)
{
if (strict) {
using NNI = NearestNeighborImpl<true, has_filter>;
- return std::make_unique<NNI>(params);
+ return std::make_unique<NNI>(std::move(params));
} else {
using NNI = NearestNeighborImpl<false, has_filter>;
- return std::make_unique<NNI>(params);
+ return std::make_unique<NNI>(std::move(params));
}
}
@@ -92,15 +91,15 @@ std::unique_ptr<NearestNeighborIterator>
NearestNeighborIterator::create(
bool strict,
fef::TermFieldMatchData &tfmd,
- const search::tensor::DistanceCalculator &distance_calc,
+ std::unique_ptr<search::tensor::DistanceCalculator> distance_calc,
NearestNeighborDistanceHeap &distanceHeap,
const GlobalFilter &filter)
{
- Params params(tfmd, distance_calc, distanceHeap, filter);
+ Params params(tfmd, std::move(distance_calc), distanceHeap, filter);
if (filter.is_active()) {
- return resolve_strict<true>(strict, params);
+ return resolve_strict<true>(strict, std::move(params));
} else {
- return resolve_strict<false>(strict, params);
+ return resolve_strict<false>(strict, std::move(params));
}
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.h b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.h
index fe3f8d51d06..884f0f2f3eb 100644
--- a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.h
+++ b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.h
@@ -24,29 +24,29 @@ public:
struct Params {
fef::TermFieldMatchData &tfmd;
- const search::tensor::DistanceCalculator &distance_calc;
+ std::unique_ptr<search::tensor::DistanceCalculator> distance_calc;
NearestNeighborDistanceHeap &distanceHeap;
const GlobalFilter &filter;
Params(fef::TermFieldMatchData &tfmd_in,
- const search::tensor::DistanceCalculator &distance_calc_in,
+ std::unique_ptr<search::tensor::DistanceCalculator> distance_calc_in,
NearestNeighborDistanceHeap &distanceHeap_in,
const GlobalFilter &filter_in)
: tfmd(tfmd_in),
- distance_calc(distance_calc_in),
+ distance_calc(std::move(distance_calc_in)),
distanceHeap(distanceHeap_in),
filter(filter_in)
{}
};
NearestNeighborIterator(Params params_in)
- : _params(params_in)
+ : _params(std::move(params_in))
{}
static std::unique_ptr<NearestNeighborIterator> create(
bool strict,
fef::TermFieldMatchData &tfmd,
- const search::tensor::DistanceCalculator &distance_calc,
+ std::unique_ptr<search::tensor::DistanceCalculator> distance_calc,
NearestNeighborDistanceHeap &distanceHeap,
const GlobalFilter &filter);
diff --git a/searchlib/src/vespa/searchlib/tensor/distance_calculator.cpp b/searchlib/src/vespa/searchlib/tensor/distance_calculator.cpp
index 5759b4b74ea..f65c7103540 100644
--- a/searchlib/src/vespa/searchlib/tensor/distance_calculator.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/distance_calculator.cpp
@@ -30,14 +30,6 @@ DistanceCalculator::DistanceCalculator(const tensor::ITensorAttribute& attr_tens
assert(_dist_fun);
}
-DistanceCalculator::DistanceCalculator(const tensor::ITensorAttribute& attr_tensor,
- BoundDistanceFunction::UP function_in)
- : _attr_tensor(attr_tensor),
- _query_tensor(nullptr),
- _dist_fun(std::move(function_in))
-{
-}
-
DistanceCalculator::~DistanceCalculator() = default;
namespace {
diff --git a/searchlib/src/vespa/searchlib/tensor/distance_calculator.h b/searchlib/src/vespa/searchlib/tensor/distance_calculator.h
index b65f4ff1868..f44bc0d33cf 100644
--- a/searchlib/src/vespa/searchlib/tensor/distance_calculator.h
+++ b/searchlib/src/vespa/searchlib/tensor/distance_calculator.h
@@ -29,12 +29,6 @@ public:
DistanceCalculator(const tensor::ITensorAttribute& attr_tensor,
const vespalib::eval::Value& query_tensor_in);
- /**
- * Only used by unit tests where ownership of query tensor and distance function is handled outside.
- */
- DistanceCalculator(const tensor::ITensorAttribute& attr_tensor,
- BoundDistanceFunction::UP function_in);
-
~DistanceCalculator();
const tensor::ITensorAttribute& attribute_tensor() const { return _attr_tensor; }
diff --git a/searchlib/src/vespa/searchlib/transactionlog/domain.cpp b/searchlib/src/vespa/searchlib/transactionlog/domain.cpp
index 6d8cd5b206c..e0e910fb53f 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/domain.cpp
+++ b/searchlib/src/vespa/searchlib/transactionlog/domain.cpp
@@ -14,6 +14,7 @@
#include <algorithm>
#include <thread>
#include <cassert>
+#include <filesystem>
#include <future>
#include <vespa/log/log.h>
@@ -485,27 +486,20 @@ Domain::SerialNumList
Domain::scanDir()
{
SerialNumList res;
-
- FastOS_DirectoryScan dirScan(dir().c_str());
-
- const char *wantPrefix = _name.c_str();
- size_t wantPrefixLen = strlen(wantPrefix);
-
- while (dirScan.ReadNext()) {
- const char *ename = dirScan.GetName();
- if (strcmp(ename, ".") == 0 ||
- strcmp(ename, "..") == 0)
- continue;
- if (strncmp(ename, wantPrefix, wantPrefixLen) != 0)
- continue;
- if (ename[wantPrefixLen] != '-')
- continue;
- const char *p = ename + wantPrefixLen + 1;
- uint64_t num = strtoull(p, nullptr, 10);
- string checkName = fmt("%s-%016" PRIu64, _name.c_str(), num);
- if (strcmp(checkName.c_str(), ename) != 0)
- continue;
- res.push_back(static_cast<SerialNum>(num));
+ std::filesystem::directory_iterator dir_scan{std::filesystem::path(dir())};
+ vespalib::string prefix = _name + "-";
+ for (auto& entry : dir_scan) {
+ if (entry.is_regular_file()) {
+ vespalib::string ename = entry.path().filename().string();
+ if (ename.substr(0, prefix.size()) == prefix) {
+ const char *p = &ename[prefix.size()];
+ uint64_t num = strtoull(p, nullptr, 10);
+ string check_name = fmt("%s-%016" PRIu64, _name.c_str(), num);
+ if (check_name == ename) {
+ res.push_back(static_cast<SerialNum>(num));
+ }
+ }
+ }
}
std::sort(res.begin(), res.end());
return res;
diff --git a/searchlib/src/vespa/searchlib/util/fileutil.cpp b/searchlib/src/vespa/searchlib/util/fileutil.cpp
index cdeca9ce95c..c2f86224312 100644
--- a/searchlib/src/vespa/searchlib/util/fileutil.cpp
+++ b/searchlib/src/vespa/searchlib/util/fileutil.cpp
@@ -36,6 +36,7 @@ LoadedMmap::LoadedMmap(const vespalib::string &fileName)
if (sz) {
void *tmpBuffer = mmap(nullptr, sz, PROT_READ, MAP_PRIVATE, fd.fd(), 0);
if (tmpBuffer != MAP_FAILED) {
+ madvise(tmpBuffer, sz, MADV_DONTDUMP);
_mapSize = sz;
_mapBuffer = tmpBuffer;
uint32_t hl = GenericHeader::getMinSize();
diff --git a/searchsummary/src/vespa/juniper/sumdesc.cpp b/searchsummary/src/vespa/juniper/sumdesc.cpp
index aa6aededa0c..e88f7971666 100644
--- a/searchsummary/src/vespa/juniper/sumdesc.cpp
+++ b/searchsummary/src/vespa/juniper/sumdesc.cpp
@@ -623,9 +623,8 @@ int SummaryDesc::complete_extended_token(unsigned char* start, ssize_t length,
/* Return a highlight tagged summary string from this summary
* description
*/
-std::string SummaryDesc::get_summary(const char* buffer, size_t bytes,
- const SummaryConfig* sumconf,
- size_t& char_size)
+std::string
+SummaryDesc::get_summary(const char* buffer, size_t bytes, const SummaryConfig* sumconf, size_t& char_size)
{
std::vector<char> s;
ssize_t prev_end = 0;
@@ -759,7 +758,7 @@ std::string SummaryDesc::get_summary(const char* buffer, size_t bytes,
s.size(), a.charLen());
_sumconf = NULL; // Not valid after this call.
char_size = a.charLen();
- return std::string(&s[0], s.size());
+ return std::string(s.begin(), s.end());
}
diff --git a/security-utils/src/main/java/com/yahoo/security/SharedKeyGenerator.java b/security-utils/src/main/java/com/yahoo/security/SharedKeyGenerator.java
index 22503292413..5582bd4d106 100644
--- a/security-utils/src/main/java/com/yahoo/security/SharedKeyGenerator.java
+++ b/security-utils/src/main/java/com/yahoo/security/SharedKeyGenerator.java
@@ -105,7 +105,7 @@ public class SharedKeyGenerator {
private static AeadCipher makeAesGcmCipher(SecretSharedKey secretSharedKey, boolean forEncryption) {
var aeadParams = new AEADParameters(new KeyParameter(secretSharedKey.secretKey().getEncoded()),
AES_GCM_AUTH_TAG_BITS, FIXED_96BIT_IV_FOR_SINGLE_USE_KEY);
- var cipher = new GCMBlockCipher(new AESEngine());
+ var cipher = GCMBlockCipher.newInstance(AESEngine.newInstance());
cipher.init(forEncryption, aeadParams);
return AeadCipher.of(cipher);
}
diff --git a/storage/specs/bucketinfo/bucketinfo.tla b/storage/specs/bucketinfo/bucketinfo.tla
index ba540ae1763..8ce604a029c 100644
--- a/storage/specs/bucketinfo/bucketinfo.tla
+++ b/storage/specs/bucketinfo/bucketinfo.tla
@@ -1,6 +1,6 @@
------------------------------ MODULE bucketinfo ------------------------------
-EXTENDS Naturals, FiniteSets, Sequences, Integers, TLC
+EXTENDS FiniteSets, Sequences, Integers, TLC
(***************************************************************************)
(* This spec models the state synchronization mechanisms for a single data *)
@@ -42,13 +42,13 @@ CONSTANTS DistributorNodes, ContentNode, ClusterStates,
NodeEpochs, MutatingOps, Null
ASSUME /\ IsFiniteSet(DistributorNodes)
- /\ Cardinality(DistributorNodes) > 0
+ /\ DistributorNodes # {}
/\ IsFiniteSet(ClusterStates)
- /\ Cardinality(ClusterStates) > 0
+ /\ ClusterStates # {}
/\ IsFiniteSet(NodeEpochs)
- /\ Cardinality(NodeEpochs) > 0
+ /\ NodeEpochs # {}
/\ IsFiniteSet(MutatingOps)
- /\ Cardinality(MutatingOps) > 0
+ /\ MutatingOps # {}
/\ ClusterStates \subseteq (Nat \ {0})
/\ NodeEpochs \subseteq (Nat \ {0})
/\ DistributorNodes \intersect {ContentNode} = {}
@@ -61,7 +61,7 @@ variables
messages = {}; \* model messages as unordered set to test reordering "for free"
define
- SeqToSet(s) == {s[i]: i \in 1..Len(s)}
+ SeqToSet(s) == {s[i]: i \in DOMAIN s}
HasMessage(t, d) == \E m \in messages: (m.type = t /\ m.dest = d)
@@ -368,11 +368,11 @@ end process;
end algorithm;*)
-\* BEGIN TRANSLATION (chksum(pcal) = "7a179595" /\ chksum(tla) = "2d89f470")
+\* BEGIN TRANSLATION (chksum(pcal) = "7a26a2a5" /\ chksum(tla) = "379d6205")
VARIABLES proposedMuts, publishedStates, storEpoch, messages
(* define statement *)
-SeqToSet(s) == {s[i]: i \in 1..Len(s)}
+SeqToSet(s) == {s[i]: i \in DOMAIN s}
HasMessage(t, d) == \E m \in messages: (m.type = t /\ m.dest = d)
diff --git a/storage/src/tests/bucketdb/bucketmanagertest.cpp b/storage/src/tests/bucketdb/bucketmanagertest.cpp
index ea3a782d432..ca3aae78270 100644
--- a/storage/src/tests/bucketdb/bucketmanagertest.cpp
+++ b/storage/src/tests/bucketdb/bucketmanagertest.cpp
@@ -22,7 +22,7 @@
#include <vespa/vdslib/state/random.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vdslib/state/clusterstate.h>
-#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/gtest/gtest.h>
#include <future>
@@ -45,8 +45,7 @@ struct TestBucketInfo {
uint32_t count;
uint32_t partition;
- api::BucketInfo getInfo() const
- { return api::BucketInfo(crc, count, size); }
+ api::BucketInfo getInfo() const { return {crc, count, size}; }
};
std::ostream& operator<<(std::ostream& out, const TestBucketInfo& info) {
@@ -68,19 +67,15 @@ public:
uint32_t _emptyBuckets;
document::Document::SP _document;
- ~BucketManagerTest();
+ ~BucketManagerTest() override;
void setupTestEnvironment(bool fakePersistenceLayer = true,
bool noDelete = false);
void addBucketsToDB(uint32_t count);
- bool wasBlockedDueToLastModified(api::StorageMessage* msg,
- uint64_t lastModified);
- void insertSingleBucket(const document::BucketId& bucket,
- const api::BucketInfo& info);
+ bool wasBlockedDueToLastModified(api::StorageMessage* msg, uint64_t lastModified);
+ void insertSingleBucket(const document::BucketId& bucket, const api::BucketInfo& info);
void waitUntilRequestsAreProcessing(size_t nRequests = 1);
- void doTestMutationOrdering(
- ConcurrentOperationFixture& fixture,
- const TestParams& params);
+ void doTestMutationOrdering( ConcurrentOperationFixture& fixture, const TestParams& params);
void doTestConflictingReplyIsEnqueued(
const document::BucketId& bucket,
const api::StorageCommand::SP& treeMutationCmd,
@@ -92,14 +87,13 @@ public:
const document::BucketId& bucketForSplit,
api::Timestamp mutationTimestamp);
void sendSingleBucketInfoRequest(const document::BucketId& id);
- void assertRequestWithBadHashIsRejected(
- ConcurrentOperationFixture& fixture);
+ void assertRequestWithBadHashIsRejected(ConcurrentOperationFixture& fixture);
protected:
- void update_min_used_bits() {
+ void update_min_used_bits() const {
_manager->updateMinUsedBits();
}
- void trigger_metric_manager_update() {
+ void trigger_metric_manager_update() const {
std::mutex l;
_manager->updateMetrics(BucketManager::MetricLockGuard(l));
}
@@ -137,8 +131,7 @@ std::string getMkDirDisk(const std::string & rootFolder, int disk) {
return os.str();
}
-void BucketManagerTest::setupTestEnvironment(bool fakePersistenceLayer,
- bool noDelete)
+void BucketManagerTest::setupTestEnvironment(bool fakePersistenceLayer, bool noDelete)
{
vdstestlib::DirConfig config(getStandardConfig(true, "bucketmanagertest"));
std::string rootFolder = getRootFolder(config);
@@ -149,8 +142,7 @@ void BucketManagerTest::setupTestEnvironment(bool fakePersistenceLayer,
assert(system(getMkDirDisk(rootFolder, 1).c_str()) == 0);
auto repo = std::make_shared<const DocumentTypeRepo>(
- *ConfigGetter<DocumenttypesConfig>::getConfig(
- "config-doctypes", FileSpec("../config-doctypes.cfg")));
+ *ConfigGetter<DocumenttypesConfig>::getConfig("config-doctypes", FileSpec("../config-doctypes.cfg")));
_top = std::make_unique<DummyStorageLink>();
_node = std::make_unique<TestServiceLayerApp>(NodeIndex(0), config.getConfigId());
_node->setTypeRepo(repo);
@@ -164,9 +156,9 @@ void BucketManagerTest::setupTestEnvironment(bool fakePersistenceLayer,
_bottom = bottom.get();
_top->push_back(std::move(bottom));
} else {
- auto bottom = std::make_unique<FileStorManager>(
- config::ConfigUri(config.getConfigId()),
- _node->getPersistenceProvider(), _node->getComponentRegister(), *_node, _node->get_host_info());
+ auto bottom = std::make_unique<FileStorManager>(config::ConfigUri(config.getConfigId()),
+ _node->getPersistenceProvider(), _node->getComponentRegister(),
+ *_node, _node->get_host_info());
_top->push_back(std::move(bottom));
}
// Generate a doc to use for testing..
@@ -183,8 +175,7 @@ void BucketManagerTest::addBucketsToDB(uint32_t count)
document::BucketId id(16, randomizer.nextUint32());
id = id.stripUnused();
if (_bucketInfo.empty()) {
- id = _node->getBucketIdFactory().getBucketId(
- _document->getId()).stripUnused();
+ id = _node->getBucketIdFactory().getBucketId(_document->getId()).stripUnused();
}
TestBucketInfo info;
info.crc = randomizer.nextUint32();
@@ -204,16 +195,13 @@ void BucketManagerTest::addBucketsToDB(uint32_t count)
++_emptyBuckets;
for (const auto& bi : _bucketInfo) {
bucketdb::StorageBucketInfo entry;
- entry.setBucketInfo(api::BucketInfo(bi.second.crc,
- bi.second.count,
- bi.second.size));
+ entry.setBucketInfo(api::BucketInfo(bi.second.crc, bi.second.count, bi.second.size));
_node->getStorageBucketDatabase().insert(bi.first, entry, "foo");
}
}
bool
-BucketManagerTest::wasBlockedDueToLastModified(api::StorageMessage* msg,
- uint64_t lastModified)
+BucketManagerTest::wasBlockedDueToLastModified(api::StorageMessage* msg, uint64_t lastModified)
{
setupTestEnvironment();
document::BucketId id(16, 1);
@@ -238,8 +226,7 @@ BucketManagerTest::wasBlockedDueToLastModified(api::StorageMessage* msg,
// Check that bucket database now has the operation's timestamp as last modified.
{
- StorBucketDatabase::WrappedEntry entry(
- _node->getStorageBucketDatabase().get(id, "foo"));
+ StorBucketDatabase::WrappedEntry entry(_node->getStorageBucketDatabase().get(id, "foo"));
assert(entry->info.getLastModified() == lastModified);
}
@@ -280,8 +267,7 @@ TEST_F(BucketManagerTest, distribution_bit_change_on_create_bucket){
update_min_used_bits();
EXPECT_EQ(16u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
- std::shared_ptr<api::CreateBucketCommand> cmd(
- new api::CreateBucketCommand(makeDocumentBucket(document::BucketId(4, 5678))));
+ auto cmd = std::make_shared<api::CreateBucketCommand>(makeDocumentBucket(document::BucketId(4, 5678)));
_top->sendDown(cmd);
EXPECT_EQ(4u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
}
@@ -290,8 +276,7 @@ TEST_F(BucketManagerTest, min_used_bits_from_component_is_honored) {
setupTestEnvironment();
// Let these differ in order to test state update behavior.
_node->getComponentRegister().getMinUsedBitsTracker().setMinUsedBits(10);
- lib::NodeState ns(
- *_node->getStateUpdater().getReportedNodeState());
+ lib::NodeState ns(*_node->getStateUpdater().getReportedNodeState());
ns.setMinUsedBits(13);
_node->getStateUpdater().setReportedNodeState(ns);
addBucketsToDB(30);
@@ -301,8 +286,7 @@ TEST_F(BucketManagerTest, min_used_bits_from_component_is_honored) {
// 12 >= 10, so no update of reported state (left at 13; this should of
// course not happen in practice, but used for faking in the test)
- std::shared_ptr<api::CreateBucketCommand> cmd(
- new api::CreateBucketCommand(makeDocumentBucket(document::BucketId(12, 5678))));
+ auto cmd = std::make_shared<api::CreateBucketCommand>(makeDocumentBucket(document::BucketId(12, 5678)));
_top->sendDown(cmd);
EXPECT_EQ(13u, _node->getStateUpdater().getReportedNodeState()->getMinUsedBits());
}
@@ -322,20 +306,15 @@ TEST_F(BucketManagerTest, DISABLED_request_bucket_info_with_state) {
_node->setClusterState(states.back());
for (uint32_t i=0; i<states.size(); ++i) {
- api::SetSystemStateCommand::SP cmd(
- new api::SetSystemStateCommand(states[i]));
- _manager->onDown(cmd);
+ _manager->onDown(std::make_shared<api::SetSystemStateCommand>(states[i]));
}
// Send a request bucket info command that will be outdated and failed.
- std::shared_ptr<api::RequestBucketInfoCommand> cmd1(
- new api::RequestBucketInfoCommand(makeBucketSpace(), 0, states[1]));
+ auto cmd1 = std::make_shared<api::RequestBucketInfoCommand>(makeBucketSpace(), 0, states[1]);
// Send two request bucket info commands that will be processed together
// when the bucket manager is idle, as states are equivalent
- std::shared_ptr<api::RequestBucketInfoCommand> cmd2(
- new api::RequestBucketInfoCommand(makeBucketSpace(), 0, states[2]));
- std::shared_ptr<api::RequestBucketInfoCommand> cmd3(
- new api::RequestBucketInfoCommand(makeBucketSpace(), 0, states[3]));
+ auto cmd2 = std::make_shared<api::RequestBucketInfoCommand>(makeBucketSpace(), 0, states[2]);
+ auto cmd3 = std::make_shared<api::RequestBucketInfoCommand>(makeBucketSpace(), 0, states[3]);
// Tag server initialized before starting
_top->open();
@@ -355,34 +334,28 @@ TEST_F(BucketManagerTest, DISABLED_request_bucket_info_with_state) {
std::map<uint64_t, api::RequestBucketInfoReply::SP> replies;
for (uint32_t i=0; i<3; ++i) {
replies[_top->getReply(i)->getMsgId()]
- = std::dynamic_pointer_cast<api::RequestBucketInfoReply>(
- _top->getReply(i));
+ = std::dynamic_pointer_cast<api::RequestBucketInfoReply>(_top->getReply(i));
}
- std::shared_ptr<api::RequestBucketInfoReply> reply1(
- replies[cmd1->getMsgId()]);
- std::shared_ptr<api::RequestBucketInfoReply> reply2(
- replies[cmd2->getMsgId()]);
- std::shared_ptr<api::RequestBucketInfoReply> reply3(
- replies[cmd3->getMsgId()]);
+ auto reply1 = replies[cmd1->getMsgId()];
+ auto reply2 = replies[cmd2->getMsgId()];
+ auto reply3 = replies[cmd3->getMsgId()];
_top->reset();
ASSERT_TRUE(reply1.get());
ASSERT_TRUE(reply2.get());
ASSERT_TRUE(reply3.get());
EXPECT_EQ(api::ReturnCode(api::ReturnCode::REJECTED,
- "Ignoring bucket info request for cluster state version 1 as "
- "versions from version 2 differs from this state."),
- reply1->getResult());
+ "Ignoring bucket info request for cluster state version 1 as "
+ "versions from version 2 differs from this state."),
+ reply1->getResult());
EXPECT_EQ(api::ReturnCode(api::ReturnCode::REJECTED,
- "There is already a newer bucket info request for "
- "this node from distributor 0"),
- reply2->getResult());
+ "There is already a newer bucket info request for this node from distributor 0"),
+ reply2->getResult());
EXPECT_EQ(api::ReturnCode(api::ReturnCode::OK),
- reply3->getResult());
+ reply3->getResult());
api::RequestBucketInfoReply::Entry entry;
ASSERT_EQ(18u, reply3->getBucketInfo().size());
- entry = api::RequestBucketInfoReply::Entry(
- document::BucketId(16, 0xe8c8), api::BucketInfo(0x79d04f78, 11153, 1851385240u));
+ entry = api::RequestBucketInfoReply::Entry( document::BucketId(16, 0xe8c8), api::BucketInfo(0x79d04f78, 11153, 1851385240u));
EXPECT_EQ(entry, reply3->getBucketInfo()[0]);
}
}
@@ -407,9 +380,8 @@ TEST_F(BucketManagerTest, request_bucket_info_with_list) {
ASSERT_TRUE(reply.get());
EXPECT_EQ(api::ReturnCode(api::ReturnCode::OK), reply->getResult());
ASSERT_EQ(1u, reply->getBucketInfo().size());
- api::RequestBucketInfoReply::Entry entry(
- document::BucketId(16, 0xe8c8),
- api::BucketInfo(0x79d04f78, 11153, 1851385240u));
+ api::RequestBucketInfoReply::Entry entry(document::BucketId(16, 0xe8c8),
+ api::BucketInfo(0x79d04f78, 11153, 1851385240u));
EXPECT_EQ(entry, reply->getBucketInfo()[0]);
}
}
@@ -421,8 +393,7 @@ TEST_F(BucketManagerTest, swallow_notify_bucket_change_reply) {
_node->getDoneInitializeHandler().notifyDoneInitializing();
_top->doneInit();
- api::NotifyBucketChangeCommand cmd(makeDocumentBucket(document::BucketId(1, 16)),
- api::BucketInfo());
+ api::NotifyBucketChangeCommand cmd(makeDocumentBucket(document::BucketId(1, 16)), api::BucketInfo());
auto reply = std::make_shared<api::NotifyBucketChangeReply>(cmd);
_top->sendDown(reply);
@@ -444,8 +415,7 @@ TEST_F(BucketManagerTest, metrics_generation) {
}
}
entry.setBucketInfo(info);
- _node->getStorageBucketDatabase().insert(document::BucketId(16, i),
- entry, "foo");
+ _node->getStorageBucketDatabase().insert(document::BucketId(16, i), entry, "foo");
}
_node->getDoneInitializeHandler().notifyDoneInitializing();
_top->doneInit();
@@ -518,11 +488,24 @@ TEST_F(BucketManagerTest, metrics_are_tracked_per_bucket_space) {
EXPECT_EQ(0, global_m->second->ready_buckets.getLast());
verify_db_memory_metrics_present(global_m->second->bucket_db_metrics);
+ using namespace vespalib::jsonstream;
+ vespalib::asciistream ascii;
+ vespalib::JsonStream jsonStream(ascii, false);
+ jsonStream << Object() << "values" << Array();
+ _manager->report(jsonStream);
+ jsonStream << End();
+ EXPECT_EQ(std::string("{\"values\":["
+ "{\"name\":\"vds.datastored.bucket_space.buckets_total\",\"values\":{\"last\":1},\"dimensions\":{\"bucketSpace\":\"global\"}},"
+ "{\"name\":\"vds.datastored.bucket_space.buckets_total\",\"values\":{\"last\":1},\"dimensions\":{\"bucketSpace\":\"default\"}},"
+ "{\"name\":\"vds.datastored.alldisks.docs\",\"values\":{\"last\":250}},"
+ "{\"name\":\"vds.datastored.alldisks.bytes\",\"values\":{\"last\":500}},"
+ "{\"name\":\"vds.datastored.alldisks.buckets\",\"values\":{\"last\":2}}"
+ "]"),
+ std::string(ascii.c_str()));
}
void
-BucketManagerTest::insertSingleBucket(const document::BucketId& bucket,
- const api::BucketInfo& info)
+BucketManagerTest::insertSingleBucket(const document::BucketId& bucket, const api::BucketInfo& info)
{
bucketdb::StorageBucketInfo entry;
entry.setBucketInfo(info);
@@ -542,9 +525,7 @@ namespace {
struct WithBuckets {
std::map<document::BucketId, api::BucketInfo> _bucketsAndInfo;
- WithBuckets& add(const document::BucketId& id,
- const api::BucketInfo& info)
- {
+ WithBuckets& add(const document::BucketId& id, const api::BucketInfo& info) {
_bucketsAndInfo[id] = info;
return *this;
}
@@ -599,22 +580,18 @@ public:
return _self._node->getStorageBucketDatabase().get(bucket, "foo");
}
- auto createRemoveCommand(const document::BucketId& bucket,
- api::Timestamp timestamp = 123456) const
+ auto createRemoveCommand(const document::BucketId& bucket, api::Timestamp timestamp = 123456) const
{
// Note: this is a dummy message; its contained document ID will not
// map to the provided bucket ID (at least it's extremely unlikely..)
- return std::make_shared<api::RemoveCommand>(
- makeDocumentBucket(bucket),
- document::DocumentId("id:foo:testdoctype1::bar"),
- timestamp);
+ return std::make_shared<api::RemoveCommand>(makeDocumentBucket(bucket),
+ document::DocumentId("id:foo:testdoctype1::bar"), timestamp);
}
auto createPutCommand(const document::BucketId& bucket) const {
auto doc = _self._node->getTestDocMan().createDocument(
"a foo walks into a bar", "id:foo:testdoctype1::bar1");
- return std::make_shared<api::PutCommand>(
- makeDocumentBucket(bucket), std::move(doc), api::Timestamp(123456));
+ return std::make_shared<api::PutCommand>(makeDocumentBucket(bucket), std::move(doc), api::Timestamp(123456));
}
auto createUpdateCommand(const document::BucketId& bucket) const {
@@ -623,8 +600,7 @@ public:
*_self._node->getTestDocMan().getTypeRepo()
.getDocumentType("testdoctype1"),
document::DocumentId("id:foo:testdoctype1::bar2"));
- return std::make_shared<api::UpdateCommand>(
- makeDocumentBucket(bucket), update, api::Timestamp(123456));
+ return std::make_shared<api::UpdateCommand>(makeDocumentBucket(bucket), update, api::Timestamp(123456));
}
auto createFullFetchCommand() const {
@@ -639,10 +615,6 @@ public:
return std::make_shared<api::RequestBucketInfoCommand>(makeBucketSpace(), 0, *_state, hash);
}
- auto createFullFetchCommandWithHash(document::BucketSpace space, vespalib::stringref hash) const {
- return std::make_shared<api::RequestBucketInfoCommand>(space, 0, *_state, hash);
- }
-
auto acquireBucketLockAndSendInfoRequest(const document::BucketId& bucket) {
auto guard = acquireBucketLock(bucket);
// Send down processing command which will block.
@@ -676,13 +648,11 @@ public:
}
auto awaitAndGetReplies(size_t nReplies) {
- _self._top->waitForMessages(
- nReplies, BucketManagerTest::MESSAGE_WAIT_TIME);
+ _self._top->waitForMessages(nReplies, BucketManagerTest::MESSAGE_WAIT_TIME);
return _self._top->getReplies();
}
- void assertOrderedAfterBucketReply(size_t nBucketReplies,
- const api::MessageType& msgType)
+ void assertOrderedAfterBucketReply(size_t nBucketReplies, const api::MessageType& msgType)
{
const size_t nTotal = nBucketReplies + 1;
auto replies = awaitAndGetReplies(nTotal);
@@ -736,15 +706,6 @@ group[2].nodes[2].index 5
return GlobalBucketSpaceDistributionConverter::convert_to_global(*default_distr);
}
- void set_grouped_distribution_configs() {
- auto default_distr = default_grouped_distribution();
- _self._node->getComponentRegister().getBucketSpaceRepo()
- .get(document::FixedBucketSpaces::default_space()).setDistribution(std::move(default_distr));
- auto global_distr = derived_global_grouped_distribution();
- _self._node->getComponentRegister().getBucketSpaceRepo()
- .get(document::FixedBucketSpaces::global_space()).setDistribution(std::move(global_distr));
- }
-
private:
BucketManagerTest& _self;
std::shared_ptr<lib::ClusterState> _state;
@@ -767,8 +728,7 @@ TEST_F(BucketManagerTest, split_reply_ordered_after_bucket_reply) {
// Let bucket manager breathe again.
guard.unlock();
- fixture.assertOrderedAfterBucketReply(
- 1, api::MessageType::SPLITBUCKET_REPLY);
+ fixture.assertOrderedAfterBucketReply(1, api::MessageType::SPLITBUCKET_REPLY);
}
TEST_F(BucketManagerTest, join_reply_ordered_after_bucket_reply) {
@@ -787,8 +747,7 @@ TEST_F(BucketManagerTest, join_reply_ordered_after_bucket_reply) {
fixture.bounceWithReply(*joinCmd);
guard.unlock();
- fixture.assertOrderedAfterBucketReply(
- 1, api::MessageType::JOINBUCKETS_REPLY);
+ fixture.assertOrderedAfterBucketReply(1, api::MessageType::JOINBUCKETS_REPLY);
}
// Technically, deletes being ordered after bucket info replies won't help
@@ -809,8 +768,7 @@ TEST_F(BucketManagerTest, delete_reply_ordered_after_bucket_reply) {
guard.unlock();
- fixture.assertOrderedAfterBucketReply(
- 1, api::MessageType::DELETEBUCKET_REPLY);
+ fixture.assertOrderedAfterBucketReply(1, api::MessageType::DELETEBUCKET_REPLY);
}
TEST_F(BucketManagerTest, only_enqueue_when_processing_request) {
@@ -860,8 +818,7 @@ TEST_F(BucketManagerTest, order_replies_after_bucket_specific_request) {
infoRoundtrip.get();
// At this point, we know 2 messages are in the top queue since the
// async future guarantees this for completion.
- fixture.assertOrderedAfterBucketReply(
- 1, api::MessageType::SPLITBUCKET_REPLY);
+ fixture.assertOrderedAfterBucketReply(1, api::MessageType::SPLITBUCKET_REPLY);
}
// Test is similar to order_replies_after_bucket_specific_request, but has
@@ -897,8 +854,7 @@ TEST_F(BucketManagerTest, queued_replies_only_dispatched_when_all_processing_don
singleBucketInfo.get();
fullFetch.get();
- fixture.assertOrderedAfterBucketReply(
- 2, api::MessageType::SPLITBUCKET_REPLY);
+ fixture.assertOrderedAfterBucketReply(2, api::MessageType::SPLITBUCKET_REPLY);
}
// Hide boring, repetetive code to allow for chaining of setters (and auto-
@@ -927,9 +883,7 @@ TestParams::TestParams(const TestParams &) = default;
TestParams::~TestParams() = default;
void
-BucketManagerTest::doTestMutationOrdering(
- ConcurrentOperationFixture& fixture,
- const TestParams& params)
+BucketManagerTest::doTestMutationOrdering(ConcurrentOperationFixture& fixture, const TestParams& params)
{
fixture.setUp(WithBuckets()
.add(params.bucket(), api::BucketInfo(50, 100, 200)));
@@ -948,9 +902,7 @@ BucketManagerTest::doTestMutationOrdering(
// to avoid test deadlocks, and priorities may alter the execution order
// anyway. The important thing is that reply orders are not altered.
fixture.bounceWithReply(*params.treeMutation());
- fixture.bounceWithReply(*params.documentMutation(),
- api::ReturnCode::OK,
- params.remappedTo());
+ fixture.bounceWithReply(*params.documentMutation(), api::ReturnCode::OK, params.remappedTo());
guard.unlock();
fixture.assertReplyOrdering(params.expectedOrdering());
@@ -979,18 +931,16 @@ BucketManagerTest::doTestConflictingReplyIsEnqueued(
TEST_F(BucketManagerTest, mutation_replies_for_split_bucket_are_enqueued) {
document::BucketId bucket(17, 0);
- doTestConflictingReplyIsEnqueued(
- bucket,
- std::make_shared<api::SplitBucketCommand>(makeDocumentBucket(bucket)),
- api::MessageType::SPLITBUCKET_REPLY);
+ doTestConflictingReplyIsEnqueued(bucket,
+ std::make_shared<api::SplitBucketCommand>(makeDocumentBucket(bucket)),
+ api::MessageType::SPLITBUCKET_REPLY);
}
TEST_F(BucketManagerTest, mutation_replies_for_deleted_bucket_are_enqueued) {
document::BucketId bucket(17, 0);
- doTestConflictingReplyIsEnqueued(
- bucket,
- std::make_shared<api::DeleteBucketCommand>(makeDocumentBucket(bucket)),
- api::MessageType::DELETEBUCKET_REPLY);
+ doTestConflictingReplyIsEnqueued(bucket,
+ std::make_shared<api::DeleteBucketCommand>(makeDocumentBucket(bucket)),
+ api::MessageType::DELETEBUCKET_REPLY);
}
TEST_F(BucketManagerTest, mutation_replies_for_joined_bucket_are_enqueued) {
@@ -1074,14 +1024,11 @@ BucketManagerTest::scheduleBucketInfoRequestWithConcurrentOps(
const document::BucketId& bucketForSplit,
api::Timestamp mutationTimestamp)
{
- auto mutation(
- fixture.createRemoveCommand(bucketForRemove, mutationTimestamp));
+ auto mutation(fixture.createRemoveCommand(bucketForRemove, mutationTimestamp));
_top->sendDown(mutation);
- auto guard = fixture.acquireBucketLockAndSendInfoRequest(
- bucketForRemove);
+ auto guard = fixture.acquireBucketLockAndSendInfoRequest(bucketForRemove);
- auto conflictingOp(
- std::make_shared<api::SplitBucketCommand>(makeDocumentBucket(bucketForSplit)));
+ auto conflictingOp = std::make_shared<api::SplitBucketCommand>(makeDocumentBucket(bucketForSplit));
_top->sendDown(conflictingOp);
fixture.bounceWithReply(*conflictingOp);
fixture.bounceWithReply(*mutation);
@@ -1100,9 +1047,7 @@ TEST_F(BucketManagerTest, bucket_conflict_set_is_cleared_between_blocking_reques
// Do a single round of starting and completing a request bucket info
// command with queueing and adding of `firstConflictBucket` to the set
// of conflicting buckets.
- scheduleBucketInfoRequestWithConcurrentOps(
- fixture, firstConflictBucket,
- firstConflictBucket, api::Timestamp(1000));
+ scheduleBucketInfoRequestWithConcurrentOps( fixture, firstConflictBucket, firstConflictBucket, api::Timestamp(1000));
// Barrier for completion of first round of replies. Subsequently remove
// all replies to get a clean slate.
@@ -1112,9 +1057,7 @@ TEST_F(BucketManagerTest, bucket_conflict_set_is_cleared_between_blocking_reques
// Do a second round with a different bucket as the conflict. The
// mutation towards the first conflict bucket should now _not_ be queued
// as it was for an entirely different request bucket round.
- scheduleBucketInfoRequestWithConcurrentOps(
- fixture, firstConflictBucket,
- secondConflictBucket, api::Timestamp(1001));
+ scheduleBucketInfoRequestWithConcurrentOps(fixture, firstConflictBucket, secondConflictBucket, api::Timestamp(1001));
// Remove is not ordered after the split here since it should not be
// queued.
@@ -1184,8 +1127,7 @@ TEST_F(BucketManagerTest, conflict_set_only_cleared_after_all_bucket_requests_do
}
void
-BucketManagerTest::assertRequestWithBadHashIsRejected(
- ConcurrentOperationFixture& fixture)
+BucketManagerTest::assertRequestWithBadHashIsRejected(ConcurrentOperationFixture& fixture)
{
// Test by default sets up 10 nodes in config. Pretend we only know of 3.
auto infoCmd = fixture.createFullFetchCommandWithHash("(0;0;1;2)");
diff --git a/storage/src/tests/common/testhelper.cpp b/storage/src/tests/common/testhelper.cpp
index 8580ba8a8d2..1aecd72172a 100644
--- a/storage/src/tests/common/testhelper.cpp
+++ b/storage/src/tests/common/testhelper.cpp
@@ -48,7 +48,6 @@ vdstestlib::DirConfig getStandardConfig(bool storagenode, const std::string & ro
config = &dc.addConfig("messagebus");
config = &dc.addConfig("stor-prioritymapping");
config = &dc.addConfig("stor-bucketdbupdater");
- config = &dc.addConfig("stor-bucket-init");
config = &dc.addConfig("metricsmanager");
config->set("consumer[2]");
config->set("consumer[0].name", "\"status\"");
@@ -81,9 +80,6 @@ vdstestlib::DirConfig getStandardConfig(bool storagenode, const std::string & ro
// Don't want test to call exit()
config->set("fail_disk_after_error_count", "0");
config = &dc.addConfig("stor-bouncer");
- config = &dc.addConfig("stor-integritychecker");
- config = &dc.addConfig("stor-bucketmover");
- config = &dc.addConfig("stor-messageforwarder");
config = &dc.addConfig("stor-server");
config->set("cluster_name", clusterName);
config->set("enable_dead_lock_detector", "false");
diff --git a/storage/src/tests/distributor/btree_bucket_database_test.cpp b/storage/src/tests/distributor/btree_bucket_database_test.cpp
index 14d5a4142a8..40575cacfba 100644
--- a/storage/src/tests/distributor/btree_bucket_database_test.cpp
+++ b/storage/src/tests/distributor/btree_bucket_database_test.cpp
@@ -19,15 +19,15 @@ using document::BucketId;
namespace {
-BucketCopy BC(uint32_t node_idx, uint32_t state) {
+BucketCopy BC(uint16_t node_idx, uint32_t state) {
api::BucketInfo info(0x123, state, state);
- return BucketCopy(0, node_idx, info);
+ return {0, node_idx, info};
}
BucketInfo BI(uint32_t node_idx, uint32_t state) {
BucketInfo bi;
- bi.addNode(BC(node_idx, state), toVector<uint16_t>(0));
+ bi.addNode(BC(node_idx, state), {0});
return bi;
}
diff --git a/storage/src/tests/distributor/bucketdatabasetest.cpp b/storage/src/tests/distributor/bucketdatabasetest.cpp
index fcc64e0cccf..032b8ad8a9c 100644
--- a/storage/src/tests/distributor/bucketdatabasetest.cpp
+++ b/storage/src/tests/distributor/bucketdatabasetest.cpp
@@ -1,9 +1,9 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "bucketdatabasetest.h"
+#include <vespa/storage/storageutil/utils.h>
#include <vespa/vespalib/util/benchmark_timer.h>
#include <chrono>
-#include <iomanip>
#include <algorithm>
namespace storage::distributor {
@@ -16,21 +16,21 @@ void BucketDatabaseTest::SetUp() {
namespace {
-BucketCopy BC(uint32_t nodeIdx) {
+BucketCopy BC(uint16_t nodeIdx) {
return BucketCopy(0, nodeIdx, api::BucketInfo());
}
-BucketInfo BI(uint32_t nodeIdx) {
+BucketInfo BI(uint16_t nodeIdx) {
BucketInfo bi;
- bi.addNode(BC(nodeIdx), toVector<uint16_t>(0));
+ bi.addNode(BC(nodeIdx), {0});
return bi;
}
-BucketInfo BI3(uint32_t node0, uint32_t node1, uint32_t node2) {
+BucketInfo BI3(uint16_t node0, uint16_t node1, uint16_t node2) {
BucketInfo bi;
- bi.addNode(BC(node0), toVector<uint16_t>(node0, node1, node2));
- bi.addNode(BC(node1), toVector<uint16_t>(node0, node1, node2));
- bi.addNode(BC(node2), toVector<uint16_t>(node0, node1, node2));
+ bi.addNode(BC(node0), {node0, node1, node2});
+ bi.addNode(BC(node1), {node0, node1, node2});
+ bi.addNode(BC(node2), {node0, node1, node2});
return bi;
}
diff --git a/storage/src/tests/distributor/bucketdatabasetest.h b/storage/src/tests/distributor/bucketdatabasetest.h
index 33f914f8fd2..f24a62728d3 100644
--- a/storage/src/tests/distributor/bucketdatabasetest.h
+++ b/storage/src/tests/distributor/bucketdatabasetest.h
@@ -2,7 +2,6 @@
#pragma once
#include <vespa/storage/bucketdb/bucketdatabase.h>
-#include <vespa/storage/storageutil/utils.h>
#include <vespa/vespalib/gtest/gtest.h>
#include <functional>
@@ -11,19 +10,14 @@ namespace storage::distributor {
struct BucketDatabaseTest : public ::testing::TestWithParam<std::shared_ptr<BucketDatabase>> {
void SetUp() override ;
- std::string doFindParents(const std::vector<document::BucketId>& ids,
- const document::BucketId& searchId);
- std::string doFindAll(const std::vector<document::BucketId>& ids,
- const document::BucketId& searchId);
+ std::string doFindParents(const std::vector<document::BucketId>& ids, const document::BucketId& searchId);
+ std::string doFindAll(const std::vector<document::BucketId>& ids, const document::BucketId& searchId);
document::BucketId doCreate(const std::vector<document::BucketId>& ids,
- uint32_t minBits,
- const document::BucketId& wantedId);
+ uint32_t minBits, const document::BucketId& wantedId);
BucketDatabase& db() noexcept { return *GetParam(); }
- using UBoundFunc = std::function<
- document::BucketId(const BucketDatabase&,
- const document::BucketId&)>;
+ using UBoundFunc = std::function<document::BucketId(const BucketDatabase&, const document::BucketId&)>;
void doTestUpperBound(const UBoundFunc& f);
};
diff --git a/storage/src/tests/distributor/bucketdbmetricupdatertest.cpp b/storage/src/tests/distributor/bucketdbmetricupdatertest.cpp
index c4536c6fa2c..57a7fb529be 100644
--- a/storage/src/tests/distributor/bucketdbmetricupdatertest.cpp
+++ b/storage/src/tests/distributor/bucketdbmetricupdatertest.cpp
@@ -7,7 +7,6 @@
#include <vespa/vespalib/util/memoryusage.h>
#include <vespa/vespalib/gtest/gtest.h>
#include <string>
-#include <sstream>
namespace storage::distributor {
@@ -16,19 +15,16 @@ using namespace ::testing;
struct BucketDBMetricUpdaterTest : Test {
void visitBucketWith2Copies1Trusted(BucketDBMetricUpdater& metricUpdater);
- void visitBucketWith2CopiesBothTrusted(
- BucketDBMetricUpdater& metricUpdater);
+ void visitBucketWith2CopiesBothTrusted(BucketDBMetricUpdater& metricUpdater);
void visitBucketWith1Copy(BucketDBMetricUpdater& metricUpdater);
- using NodeToReplicasMap = std::unordered_map<uint16_t, uint32_t>;
+ using NodeToReplicasMap = MinReplicaMap;
NodeToReplicasMap replicaStatsOf(BucketDBMetricUpdater& metricUpdater);
BucketDBMetricUpdaterTest();
};
-BucketDBMetricUpdaterTest::BucketDBMetricUpdaterTest()
-{
-}
+BucketDBMetricUpdaterTest::BucketDBMetricUpdaterTest() = default;
namespace {
@@ -38,8 +34,6 @@ void addNode(BucketInfo& info, uint16_t node, uint32_t crc) {
info.addNode(BucketCopy(1234, node, apiInfo), order);
}
-using Trusted = bool;
-
BucketInfo
makeInfo(uint32_t copy0Crc)
{
@@ -208,7 +202,7 @@ TEST_F(BucketDBMetricUpdaterTest, buckets_with_varying_trustedness) {
{
BucketInfo info(makeInfo(100, 200));
info.resetTrusted();
- BucketDatabase::Entry e(document::BucketId(16, 3), info);
+ BucketDatabase::Entry e(document::BucketId(16, 3), std::move(info));
metricUpdater.visit(e, 2);
}
metricUpdater.completeRound(false);
@@ -239,7 +233,7 @@ TEST_F(BucketDBMetricUpdaterTest, pick_largest_copy_if_no_trusted) {
// No trusted copies, so must pick second copy.
BucketInfo info(makeInfo(100, 200));
info.resetTrusted();
- BucketDatabase::Entry e(document::BucketId(16, 2), info);
+ BucketDatabase::Entry e(document::BucketId(16, 2), std::move(info));
metricUpdater.visit(e, 2);
metricUpdater.completeRound(false);
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
@@ -271,36 +265,33 @@ TEST_F(BucketDBMetricUpdaterTest, complete_round_clears_working_state) {
// Replicas on nodes 0 and 1.
void
-BucketDBMetricUpdaterTest::visitBucketWith2Copies1Trusted(
- BucketDBMetricUpdater& metricUpdater)
+BucketDBMetricUpdaterTest::visitBucketWith2Copies1Trusted(BucketDBMetricUpdater& metricUpdater)
{
BucketInfo info;
addNode(info, 0, 100);
addNode(info, 1, 101); // Note different checksums => #trusted = 1
- BucketDatabase::Entry e(document::BucketId(16, 1), info);
+ BucketDatabase::Entry e(document::BucketId(16, 1), std::move(info));
metricUpdater.visit(e, 2);
}
// Replicas on nodes 0 and 2.
void
-BucketDBMetricUpdaterTest::visitBucketWith2CopiesBothTrusted(
- BucketDBMetricUpdater& metricUpdater)
+BucketDBMetricUpdaterTest::visitBucketWith2CopiesBothTrusted(BucketDBMetricUpdater& metricUpdater)
{
BucketInfo info;
addNode(info, 0, 200);
addNode(info, 2, 200);
- BucketDatabase::Entry e(document::BucketId(16, 2), info);
+ BucketDatabase::Entry e(document::BucketId(16, 2), std::move(info));
metricUpdater.visit(e, 2);
}
// Single replica on node 2.
void
-BucketDBMetricUpdaterTest::visitBucketWith1Copy(
- BucketDBMetricUpdater& metricUpdater)
+BucketDBMetricUpdaterTest::visitBucketWith1Copy(BucketDBMetricUpdater& metricUpdater)
{
BucketInfo info;
addNode(info, 2, 100);
- BucketDatabase::Entry e(document::BucketId(16, 1), info);
+ BucketDatabase::Entry e(document::BucketId(16, 1), std::move(info));
metricUpdater.visit(e, 2);
}
diff --git a/storage/src/tests/distributor/bucketstateoperationtest.cpp b/storage/src/tests/distributor/bucketstateoperationtest.cpp
index 42ee4675e26..c9fab0b37e5 100644
--- a/storage/src/tests/distributor/bucketstateoperationtest.cpp
+++ b/storage/src/tests/distributor/bucketstateoperationtest.cpp
@@ -3,6 +3,7 @@
#include <tests/distributor/distributor_stripe_test_util.h>
#include <vespa/storage/distributor/operations/idealstate/setbucketstateoperation.h>
#include <vespa/storage/distributor/top_level_distributor.h>
+#include <vespa/storage/storageutil/utils.h>
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/vespalib/gtest/gtest.h>
#include "dummy_cluster_context.h"
diff --git a/storage/src/tests/distributor/check_condition_test.cpp b/storage/src/tests/distributor/check_condition_test.cpp
index 757a9329ea6..617401dd271 100644
--- a/storage/src/tests/distributor/check_condition_test.cpp
+++ b/storage/src/tests/distributor/check_condition_test.cpp
@@ -5,6 +5,7 @@
#include <vespa/document/fieldset/fieldsets.h>
#include <vespa/documentapi/messagebus/messages/testandsetcondition.h>
#include <vespa/storage/distributor/node_supported_features.h>
+#include <vespa/storage/distributor/operations/cancel_scope.h>
#include <vespa/storage/distributor/operations/external/check_condition.h>
#include <vespa/storage/distributor/persistence_operation_metric_set.h>
#include <vespa/storageapi/message/persistence.h>
@@ -227,6 +228,20 @@ TEST_F(CheckConditionTest, failed_gets_completes_check_with_error_outcome) {
});
}
+TEST_F(CheckConditionTest, check_fails_if_condition_explicitly_cancelled) {
+ test_cond_with_2_gets_sent([&](auto& cond) {
+ cond.handle_reply(_sender, make_matched_reply(0));
+ cond.cancel(_sender, CancelScope::of_fully_cancelled());
+ cond.handle_reply(_sender, make_matched_reply(1));
+ }, [&](auto& outcome) {
+ EXPECT_FALSE(outcome.matched_condition());
+ EXPECT_FALSE(outcome.not_found());
+ EXPECT_TRUE(outcome.failed());
+ EXPECT_EQ(outcome.error_code().getResult(), api::ReturnCode::ABORTED);
+ });
+}
+
+// TODO deprecate in favor of cancelling
TEST_F(CheckConditionTest, check_fails_if_replica_set_changed_between_start_and_completion) {
test_cond_with_2_gets_sent([&](auto& cond) {
cond.handle_reply(_sender, make_matched_reply(0));
@@ -242,6 +257,7 @@ TEST_F(CheckConditionTest, check_fails_if_replica_set_changed_between_start_and_
});
}
+// TODO deprecate in favor of cancelling
TEST_F(CheckConditionTest, check_fails_if_bucket_ownership_changed_between_start_and_completion_pending_transition_case) {
test_cond_with_2_gets_sent([&](auto& cond) {
cond.handle_reply(_sender, make_matched_reply(0));
@@ -255,6 +271,7 @@ TEST_F(CheckConditionTest, check_fails_if_bucket_ownership_changed_between_start
});
}
+// TODO deprecate in favor of cancelling
TEST_F(CheckConditionTest, check_fails_if_bucket_ownership_changed_between_start_and_completion_completed_transition_case) {
test_cond_with_2_gets_sent([&](auto& cond) {
cond.handle_reply(_sender, make_matched_reply(0));
diff --git a/storage/src/tests/distributor/distributor_bucket_space_test.cpp b/storage/src/tests/distributor/distributor_bucket_space_test.cpp
index 41e0dafdaaf..00bc803e81c 100644
--- a/storage/src/tests/distributor/distributor_bucket_space_test.cpp
+++ b/storage/src/tests/distributor/distributor_bucket_space_test.cpp
@@ -100,19 +100,19 @@ DistributorBucketSpaceTest::CountVector
DistributorBucketSpaceTest::count_service_layer_buckets(const std::vector<BucketId>& buckets)
{
CountVector result(3);
- std::vector<uint16_t> ideal_nodes;
for (auto& bucket : buckets) {
- auto &ideal_nodes_bundle = bucket_space.get_ideal_service_layer_nodes_bundle(bucket);
+ const auto & ideal_nodes_bundle = bucket_space.get_ideal_service_layer_nodes_bundle(bucket);
for (uint32_t i = 0; i < 3; ++i) {
+ IdealServiceLayerNodesBundle::ConstNodesRef ideal_nodes;
switch (i) {
case 0:
- ideal_nodes = ideal_nodes_bundle.get_available_nodes();
+ ideal_nodes = ideal_nodes_bundle.available_nodes();
break;
case 1:
- ideal_nodes = ideal_nodes_bundle.get_available_nonretired_nodes();
+ ideal_nodes = ideal_nodes_bundle.available_nonretired_nodes();
break;
case 2:
- ideal_nodes = ideal_nodes_bundle.get_available_nonretired_or_maintenance_nodes();
+ ideal_nodes = ideal_nodes_bundle.available_nonretired_or_maintenance_nodes();
break;
default:
;
diff --git a/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp b/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp
index 6dfab5abc21..a72dfec2d94 100644
--- a/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp
+++ b/storage/src/tests/distributor/distributor_host_info_reporter_test.cpp
@@ -14,7 +14,7 @@ namespace storage::distributor {
using End = vespalib::JsonStream::End;
using File = vespalib::File;
-using MinReplicaStats = std::unordered_map<uint16_t, uint32_t>;
+using MinReplicaStats = MinReplicaMap;
using Object = vespalib::JsonStream::Object;
using PerNodeBucketSpacesStats = BucketSpacesStatsProvider::PerNodeBucketSpacesStats;
using BucketSpacesStats = BucketSpacesStatsProvider::BucketSpacesStats;
@@ -36,7 +36,7 @@ struct MockedMinReplicaProvider : MinReplicaProvider
MinReplicaStats minReplica;
~MockedMinReplicaProvider() override;
- std::unordered_map<uint16_t, uint32_t> getMinReplica() const override {
+ MinReplicaMap getMinReplica() const override {
return minReplica;
}
};
diff --git a/storage/src/tests/distributor/distributor_stripe_test_util.cpp b/storage/src/tests/distributor/distributor_stripe_test_util.cpp
index 7a64eda28ff..5babde49380 100644
--- a/storage/src/tests/distributor/distributor_stripe_test_util.cpp
+++ b/storage/src/tests/distributor/distributor_stripe_test_util.cpp
@@ -10,6 +10,7 @@
#include <vespa/storage/distributor/distributormetricsset.h>
#include <vespa/storage/distributor/ideal_state_total_metrics.h>
#include <vespa/storage/distributor/node_supported_features_repo.h>
+#include <vespa/storage/storageutil/utils.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/text/stringtokenizer.h>
#include <vespa/vespalib/stllike/hash_map.hpp>
@@ -40,34 +41,22 @@ DistributorStripeTestUtil::createLinks()
_node = std::make_unique<TestDistributorApp>(_config.getConfigId());
_metrics = std::make_shared<DistributorMetricSet>();
_ideal_state_metrics = std::make_shared<IdealStateMetricSet>();
- _stripe = std::make_unique<DistributorStripe>(_node->getComponentRegister(),
- *_metrics,
- *_ideal_state_metrics,
- _node->node_identity(),
- _messageSender,
- *this,
- _done_initializing);
+ _stripe = std::make_unique<DistributorStripe>(_node->getComponentRegister(), *_metrics, *_ideal_state_metrics,
+ _node->node_identity(), _messageSender, *this, _done_initializing);
}
void
-DistributorStripeTestUtil::setup_stripe(int redundancy,
- int nodeCount,
- const std::string& systemState,
- uint32_t earlyReturn,
- bool requirePrimaryToBeWritten)
+DistributorStripeTestUtil::setup_stripe(int redundancy, int nodeCount, const std::string& systemState,
+ uint32_t earlyReturn, bool requirePrimaryToBeWritten)
{
setup_stripe(redundancy, nodeCount, lib::ClusterStateBundle(lib::ClusterState(systemState)), earlyReturn, requirePrimaryToBeWritten);
}
void
-DistributorStripeTestUtil::setup_stripe(int redundancy,
- int node_count,
- const lib::ClusterStateBundle& state,
- uint32_t early_return,
- bool require_primary_to_be_written)
+DistributorStripeTestUtil::setup_stripe(int redundancy, int node_count, const lib::ClusterStateBundle& state,
+ uint32_t early_return, bool require_primary_to_be_written)
{
- lib::Distribution::DistributionConfigBuilder config(
- lib::Distribution::getDefaultDistributionConfig(redundancy, node_count).get());
+ lib::Distribution::DistributionConfigBuilder config(lib::Distribution::getDefaultDistributionConfig(redundancy, node_count).get());
config.redundancy = redundancy;
config.initialRedundancy = early_return;
config.ensurePrimaryPersisted = require_primary_to_be_written;
@@ -93,8 +82,7 @@ DistributorStripeTestUtil::setup_stripe(int redundancy,
void
DistributorStripeTestUtil::set_redundancy(uint32_t redundancy)
{
- auto distribution = std::make_shared<lib::Distribution>(
- lib::Distribution::getDefaultDistributionConfig(redundancy, 100));
+ auto distribution = std::make_shared<lib::Distribution>(lib::Distribution::getDefaultDistributionConfig(redundancy, 100));
// Same rationale for not triggering a full distribution change as
// in setup_stripe() above
_node->getComponentRegister().setDistribution(distribution);
@@ -217,8 +205,7 @@ DistributorStripeTestUtil::getIdealStr(document::BucketId id, const lib::Cluster
}
std::vector<uint16_t> nodes;
- getDistribution().getIdealNodes(
- lib::NodeType::STORAGE, state, id, nodes);
+ getDistribution().getIdealNodes(lib::NodeType::STORAGE, state, id, nodes, "uim");
std::sort(nodes.begin(), nodes.end());
std::ostringstream ost;
ost << id << ": " << dumpVector(nodes);
@@ -226,8 +213,7 @@ DistributorStripeTestUtil::getIdealStr(document::BucketId id, const lib::Cluster
}
void
-DistributorStripeTestUtil::addIdealNodes(const lib::ClusterState& state,
- const document::BucketId& id)
+DistributorStripeTestUtil::addIdealNodes(const lib::ClusterState& state, const document::BucketId& id)
{
BucketDatabase::Entry entry = getBucket(id);
@@ -236,15 +222,11 @@ DistributorStripeTestUtil::addIdealNodes(const lib::ClusterState& state,
}
std::vector<uint16_t> res;
- getDistribution().getIdealNodes(
- lib::NodeType::STORAGE, state, id, res);
+ getDistribution().getIdealNodes(lib::NodeType::STORAGE, state, id, res, "uim");
for (uint32_t i = 0; i < res.size(); ++i) {
- if (state.getNodeState(lib::Node(lib::NodeType::STORAGE, res[i])).getState() !=
- lib::State::MAINTENANCE)
- {
- entry->addNode(BucketCopy(0, res[i], api::BucketInfo(1,1,1)),
- toVector<uint16_t>(0));
+ if (state.getNodeState(lib::Node(lib::NodeType::STORAGE, res[i])).getState() != lib::State::MAINTENANCE) {
+ entry->addNode(BucketCopy(0, res[i], api::BucketInfo(1,1,1)), {0});
}
}
@@ -292,10 +274,7 @@ DistributorStripeTestUtil::addNodesToBucketDB(const document::Bucket& bucket, co
}
uint16_t idx = atoi(tok2[0].data());
- BucketCopy node(
- 0,
- idx,
- info);
+ BucketCopy node(0, idx, info);
// Allow user to manually override trusted and active.
if (tok3.size() > flagsIdx && tok3[flagsIdx] == "t") {
@@ -309,44 +288,32 @@ DistributorStripeTestUtil::addNodesToBucketDB(const document::Bucket& bucket, co
}
void
-DistributorStripeTestUtil::addNodesToBucketDB(const document::BucketId& id,
- const std::string& nodeStr)
-{
+DistributorStripeTestUtil::addNodesToBucketDB(const document::BucketId& id, const std::string& nodeStr) {
addNodesToBucketDB(document::Bucket(makeBucketSpace(), id), nodeStr);
}
void
-DistributorStripeTestUtil::removeFromBucketDB(const document::BucketId& id)
-{
+DistributorStripeTestUtil::removeFromBucketDB(const document::BucketId& id) {
getBucketDatabase().remove(id);
}
void
-DistributorStripeTestUtil::addIdealNodes(const document::BucketId& id)
-{
+DistributorStripeTestUtil::addIdealNodes(const document::BucketId& id) {
// TODO STRIPE roundabout way of getting state bundle..!
addIdealNodes(*operation_context().cluster_state_bundle().getBaselineClusterState(), id);
}
void
-DistributorStripeTestUtil::insertBucketInfo(document::BucketId id,
- uint16_t node,
- uint32_t checksum,
- uint32_t count,
- uint32_t size,
- bool trusted,
- bool active)
+DistributorStripeTestUtil::insertBucketInfo(document::BucketId id, uint16_t node, uint32_t checksum,
+ uint32_t count, uint32_t size, bool trusted, bool active)
{
api::BucketInfo info(checksum, count, size);
insertBucketInfo(id, node, info, trusted, active);
}
void
-DistributorStripeTestUtil::insertBucketInfo(document::BucketId id,
- uint16_t node,
- const api::BucketInfo& info,
- bool trusted,
- bool active)
+DistributorStripeTestUtil::insertBucketInfo(document::BucketId id, uint16_t node, const api::BucketInfo& info,
+ bool trusted, bool active)
{
BucketDatabase::Entry entry = getBucketDatabase().get(id);
if (!entry.valid()) {
@@ -358,9 +325,7 @@ DistributorStripeTestUtil::insertBucketInfo(document::BucketId id,
info2.setActive();
}
BucketCopy copy(operation_context().generate_unique_timestamp(), node, info2);
-
- entry->addNode(copy.setTrusted(trusted), toVector<uint16_t>(0));
-
+ entry->addNode(copy.setTrusted(trusted), {0});
getBucketDatabase().update(entry);
}
@@ -371,9 +336,7 @@ DistributorStripeTestUtil::dumpBucket(const document::BucketId& bid)
}
void
-DistributorStripeTestUtil::sendReply(Operation& op,
- int idx,
- api::ReturnCode::Result result)
+DistributorStripeTestUtil::sendReply(Operation& op, int idx, api::ReturnCode::Result result)
{
if (idx == -1) {
idx = _sender.commands().size() - 1;
@@ -387,20 +350,17 @@ DistributorStripeTestUtil::sendReply(Operation& op,
}
BucketDatabase::Entry
-DistributorStripeTestUtil::getBucket(const document::Bucket& bucket) const
-{
+DistributorStripeTestUtil::getBucket(const document::Bucket& bucket) const {
return getBucketDatabase(bucket.getBucketSpace()).get(bucket.getBucketId());
}
BucketDatabase::Entry
-DistributorStripeTestUtil::getBucket(const document::BucketId& bId) const
-{
+DistributorStripeTestUtil::getBucket(const document::BucketId& bId) const {
return getBucketDatabase().get(bId);
}
void
-DistributorStripeTestUtil::disableBucketActivationInConfig(bool disable)
-{
+DistributorStripeTestUtil::disableBucketActivationInConfig(bool disable) {
ConfigBuilder builder;
builder.disableBucketActivation = disable;
configure_stripe(builder);
@@ -437,14 +397,12 @@ DistributorStripeTestUtil::doc_selection_parser() const {
}
DistributorMetricSet&
-DistributorStripeTestUtil::metrics()
-{
+DistributorStripeTestUtil::metrics() {
return *_metrics;
}
bool
-DistributorStripeTestUtil::tick()
-{
+DistributorStripeTestUtil::tick() {
return _stripe->tick();
}
@@ -553,8 +511,7 @@ DistributorStripeTestUtil::getBucketSpaces() const
void
DistributorStripeTestUtil::enable_cluster_state(vespalib::stringref state)
{
- getBucketDBUpdater().simulate_cluster_state_bundle_activation(
- lib::ClusterStateBundle(lib::ClusterState(state)));
+ getBucketDBUpdater().simulate_cluster_state_bundle_activation(lib::ClusterStateBundle(lib::ClusterState(state)));
}
void
diff --git a/storage/src/tests/distributor/distributor_stripe_test_util.h b/storage/src/tests/distributor/distributor_stripe_test_util.h
index 9963b2c96b4..272301bf4a6 100644
--- a/storage/src/tests/distributor/distributor_stripe_test_util.h
+++ b/storage/src/tests/distributor/distributor_stripe_test_util.h
@@ -7,6 +7,7 @@
#include <tests/common/teststorageapp.h>
#include <vespa/storage/common/hostreporter/hostinfo.h>
#include <vespa/storage/distributor/stripe_host_info_notifier.h>
+#include <vespa/storage/storageutil/utils.h>
namespace storage {
diff --git a/storage/src/tests/distributor/garbagecollectiontest.cpp b/storage/src/tests/distributor/garbagecollectiontest.cpp
index c2f4836f4cb..b1cf1cbc636 100644
--- a/storage/src/tests/distributor/garbagecollectiontest.cpp
+++ b/storage/src/tests/distributor/garbagecollectiontest.cpp
@@ -71,8 +71,7 @@ struct GarbageCollectionOperationTest : Test, DistributorStripeTestUtil {
std::shared_ptr<GarbageCollectionOperation> create_op() {
auto op = std::make_shared<GarbageCollectionOperation>(
- dummy_cluster_context, BucketAndNodes(makeDocumentBucket(_bucket_id),
- toVector<uint16_t>(0, 1)));
+ dummy_cluster_context, BucketAndNodes(makeDocumentBucket(_bucket_id), {0, 1}));
op->setIdealStateManager(&getIdealStateManager());
return op;
}
@@ -114,9 +113,8 @@ struct GarbageCollectionOperationTest : Test, DistributorStripeTestUtil {
ASSERT_EQ(entry->getNodeCount(), info.size());
EXPECT_EQ(entry->getLastGarbageCollectionTime(), last_gc_time);
for (size_t i = 0; i < info.size(); ++i) {
- EXPECT_EQ(info[i], entry->getNode(i)->getBucketInfo())
- << "Mismatching info for node " << i << ": " << info[i] << " vs "
- << entry->getNode(i)->getBucketInfo();
+ auto& node = entry->getNodeRef(i);
+ EXPECT_EQ(info[i], node.getBucketInfo()) << "Mismatching DB bucket info for node " << node.getNode();
}
}
@@ -173,6 +171,51 @@ TEST_F(GarbageCollectionOperationTest, replica_bucket_info_not_added_to_db_until
EXPECT_EQ(70u, gc_removed_documents_metric()); // Use max of received metrics
}
+TEST_F(GarbageCollectionOperationTest, no_replica_bucket_info_added_to_db_if_operation_fully_canceled) {
+ auto op = create_op();
+ op->start(_sender);
+ ASSERT_EQ(2, _sender.commands().size());
+
+ reply_to_nth_request(*op, 0, 1234, 70);
+ op->cancel(_sender, CancelScope::of_fully_cancelled());
+ reply_to_nth_request(*op, 1, 4567, 60);
+
+ // DB state is unchanged. Note that in a real scenario, the DB entry will have been removed
+ // as part of the ownership change, but there are already non-cancellation behaviors that
+ // avoid creating buckets from scratch in the DB if they do not exist, so just checking to
+ // see if the bucket exists or not risks hiding missing cancellation edge handling.
+ ASSERT_NO_FATAL_FAILURE(assert_bucket_db_contains({api::BucketInfo(250, 50, 300), api::BucketInfo(250, 50, 300)}, 0));
+ // However, we still update our metrics if we _did_ remove documents on one or more nodes
+ EXPECT_EQ(70u, gc_removed_documents_metric());
+}
+
+TEST_F(GarbageCollectionOperationTest, no_replica_bucket_info_added_to_db_for_cancelled_node) {
+ auto op = create_op();
+ op->start(_sender);
+ ASSERT_EQ(2, _sender.commands().size());
+
+ reply_to_nth_request(*op, 0, 1234, 70);
+ op->cancel(_sender, CancelScope::of_node_subset({0}));
+ reply_to_nth_request(*op, 1, 4567, 60);
+
+ // DB state is unchanged for node 0, changed for node 1
+ ASSERT_NO_FATAL_FAILURE(assert_bucket_db_contains({api::BucketInfo(250, 50, 300), api::BucketInfo(4567, 90, 500)}, 34));
+}
+
+TEST_F(GarbageCollectionOperationTest, node_cancellation_is_cumulative) {
+ auto op = create_op();
+ op->start(_sender);
+ ASSERT_EQ(2, _sender.commands().size());
+
+ reply_to_nth_request(*op, 0, 1234, 70);
+ op->cancel(_sender, CancelScope::of_node_subset({0}));
+ op->cancel(_sender, CancelScope::of_node_subset({1}));
+ reply_to_nth_request(*op, 1, 4567, 60);
+
+ // DB state is unchanged for both nodes
+ ASSERT_NO_FATAL_FAILURE(assert_bucket_db_contains({api::BucketInfo(250, 50, 300), api::BucketInfo(250, 50, 300)}, 0));
+}
+
TEST_F(GarbageCollectionOperationTest, gc_bucket_info_does_not_overwrite_later_sequenced_bucket_info_writes) {
auto op = create_op();
op->start(_sender);
@@ -364,6 +407,16 @@ TEST_F(GarbageCollectionOperationPhase1FailureTest, no_second_phase_if_bucket_in
receive_phase1_replies_and_assert_no_phase_2_started();
}
+TEST_F(GarbageCollectionOperationPhase1FailureTest, no_second_phase_if_operation_fully_cancelled_between_phases) {
+ _op->cancel(_sender, CancelScope::of_fully_cancelled());
+ receive_phase1_replies_and_assert_no_phase_2_started();
+}
+
+TEST_F(GarbageCollectionOperationPhase1FailureTest, no_second_phase_if_operation_partially_cancelled_between_phases) {
+ _op->cancel(_sender, CancelScope::of_node_subset({0}));
+ receive_phase1_replies_and_assert_no_phase_2_started();
+}
+
TEST_F(GarbageCollectionOperationTest, document_level_write_locks_are_checked_and_held_if_acquired) {
enable_two_phase_gc();
auto op = create_op();
diff --git a/storage/src/tests/distributor/mock_tickable_stripe.h b/storage/src/tests/distributor/mock_tickable_stripe.h
index ec2f978c029..77a6f537d28 100644
--- a/storage/src/tests/distributor/mock_tickable_stripe.h
+++ b/storage/src/tests/distributor/mock_tickable_stripe.h
@@ -23,7 +23,7 @@ struct MockTickableStripe : TickableStripe {
const lib::Distribution&,
const lib::ClusterState&,
const char*,
- const std::unordered_set<uint16_t>&,
+ const OutdatedNodes &,
const std::vector<dbtransition::Entry>&) override
{
abort();
diff --git a/storage/src/tests/distributor/multi_thread_stripe_access_guard_test.cpp b/storage/src/tests/distributor/multi_thread_stripe_access_guard_test.cpp
index 6bc98ef022e..db89b30efb2 100644
--- a/storage/src/tests/distributor/multi_thread_stripe_access_guard_test.cpp
+++ b/storage/src/tests/distributor/multi_thread_stripe_access_guard_test.cpp
@@ -25,7 +25,7 @@ struct AggregationTestingMockTickableStripe : MockTickableStripe {
}
void merge_entries_into_db(document::BucketSpace, api::Timestamp, const lib::Distribution&,
- const lib::ClusterState&, const char*, const std::unordered_set<uint16_t>&,
+ const lib::ClusterState&, const char*, const OutdatedNodes &,
const std::vector<dbtransition::Entry>& entries_in) override {
entries = entries_in;
}
diff --git a/storage/src/tests/distributor/operationtargetresolvertest.cpp b/storage/src/tests/distributor/operationtargetresolvertest.cpp
index 2d41b0f4d32..19ca81e933f 100644
--- a/storage/src/tests/distributor/operationtargetresolvertest.cpp
+++ b/storage/src/tests/distributor/operationtargetresolvertest.cpp
@@ -3,7 +3,6 @@
#include <tests/distributor/distributor_stripe_test_util.h>
#include <vespa/config/helper/configgetter.h>
#include <vespa/config/helper/configgetter.hpp>
-#include <vespa/document/config/config-documenttypes.h>
#include <vespa/document/repo/documenttyperepo.h>
#include <vespa/document/test/make_bucket_space.h>
#include <vespa/document/test/make_document_bucket.h>
@@ -14,7 +13,6 @@
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/vdslib/distribution/distribution.h>
-#include <vespa/vdslib/distribution/idealnodecalculatorimpl.h>
#include <vespa/vespalib/gtest/gtest.h>
using document::BucketId;
@@ -112,14 +110,10 @@ struct TestTargets {
} // anonymous
BucketInstanceList
-OperationTargetResolverTest::getInstances(const BucketId& id,
- bool stripToRedundancy)
+OperationTargetResolverTest::getInstances(const BucketId& id, bool stripToRedundancy)
{
- lib::IdealNodeCalculatorImpl idealNodeCalc;
auto &bucketSpaceRepo(operation_context().bucket_space_repo());
auto &distributorBucketSpace(bucketSpaceRepo.get(makeBucketSpace()));
- idealNodeCalc.setDistribution(distributorBucketSpace.getDistribution());
- idealNodeCalc.setClusterState(distributorBucketSpace.getClusterState());
OperationTargetResolverImpl resolver(
distributorBucketSpace, distributorBucketSpace.getBucketDatabase(), 16,
distributorBucketSpace.getDistribution().getRedundancy(),
@@ -142,24 +136,6 @@ TEST_F(OperationTargetResolverTest, simple) {
.sendsTo(BucketId(16, 0), 0);
}
-TEST_F(OperationTargetResolverTest, multiple_nodes) {
- setup_stripe(1, 2, "storage:2 distributor:1");
-
- auto &bucketSpaceRepo(operation_context().bucket_space_repo());
- auto &distributorBucketSpace(bucketSpaceRepo.get(makeBucketSpace()));
- for (int i = 0; i < 100; ++i) {
- addNodesToBucketDB(BucketId(16, i), "0=0,1=0");
-
- lib::IdealNodeCalculatorImpl idealNodeCalc;
- idealNodeCalc.setDistribution(distributorBucketSpace.getDistribution());
- idealNodeCalc.setClusterState(distributorBucketSpace.getClusterState());
- lib::IdealNodeList idealNodes(
- idealNodeCalc.getIdealStorageNodes(BucketId(16, i)));
- uint16_t expectedNode = idealNodes[0].getIndex();
- MY_ASSERT_THAT(BucketId(32, i)).sendsTo(BucketId(16, i), expectedNode);
- }
-}
-
TEST_F(OperationTargetResolverTest, choose_ideal_state_when_many_copies) {
setup_stripe(2, 4, "storage:4 distributor:1");
addNodesToBucketDB(BucketId(16, 0), "0=0,1=0,2=0,3=0"); // ideal nodes: 1, 3
diff --git a/storage/src/tests/distributor/pendingmessagetrackertest.cpp b/storage/src/tests/distributor/pendingmessagetrackertest.cpp
index 3bfa1027a82..8277281206d 100644
--- a/storage/src/tests/distributor/pendingmessagetrackertest.cpp
+++ b/storage/src/tests/distributor/pendingmessagetrackertest.cpp
@@ -162,10 +162,14 @@ TEST_F(PendingMessageTrackerTest, simple) {
clock.setAbsoluteTimeInSeconds(1);
PendingMessageTracker tracker(compReg, 0);
+ std::ostringstream dummy; // Enable time tracking
+ tracker.reportStatus(dummy, framework::HttpUrlPath("/pendingmessages?order=bucket"));
+
auto remove = std::make_shared<api::RemoveCommand>(
makeDocumentBucket(document::BucketId(16, 1234)),
document::DocumentId("id:footype:testdoc:n=1234:foo"), 1001);
remove->setAddress(makeStorageAddress(0));
+
tracker.insert(remove);
{
@@ -238,6 +242,8 @@ TEST_F(PendingMessageTrackerTest, multiple_messages) {
compReg.setClock(clock);
clock.setAbsoluteTimeInSeconds(1);
PendingMessageTracker tracker(compReg, 0);
+ std::ostringstream dummy; // Enable time tracking
+ tracker.reportStatus(dummy, framework::HttpUrlPath("/pendingmessages?order=bucket"));
insertMessages(tracker);
diff --git a/storage/src/tests/distributor/putoperationtest.cpp b/storage/src/tests/distributor/putoperationtest.cpp
index 76b6741442e..ee87fe84df6 100644
--- a/storage/src/tests/distributor/putoperationtest.cpp
+++ b/storage/src/tests/distributor/putoperationtest.cpp
@@ -6,6 +6,7 @@
#include <vespa/storage/distributor/top_level_distributor.h>
#include <vespa/storage/distributor/distributor_bucket_space.h>
#include <vespa/storage/distributor/distributor_stripe.h>
+#include <vespa/storage/distributor/operations/cancel_scope.h>
#include <vespa/storage/distributor/operations/external/putoperation.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storageapi/message/persistence.h>
@@ -208,6 +209,43 @@ TEST_F(PutOperationTest, failed_CreateBucket_removes_replica_from_db_and_sends_R
_sender.getCommands(true, true, 4));
}
+TEST_F(PutOperationTest, failed_CreateBucket_does_not_send_RequestBucketInfo_if_op_fully_canceled) {
+ setup_stripe(2, 2, "distributor:1 storage:2");
+
+ auto doc = createDummyDocument("test", "test");
+ sendPut(createPut(doc));
+
+ ASSERT_EQ("Create bucket => 1,Create bucket => 0,Put => 1,Put => 0", _sender.getCommands(true));
+
+ op->cancel(_sender, CancelScope::of_fully_cancelled());
+ sendReply(0, api::ReturnCode::TIMEOUT, api::BucketInfo()); // CreateBucket to node 1
+
+ // DB is not touched (note: normally node 1 would be removed at the cancel-edge).
+ ASSERT_EQ("BucketId(0x4000000000008f09) : "
+ "node(idx=1,crc=0x1,docs=0/0,bytes=0/0,trusted=true,active=true,ready=false), "
+ "node(idx=0,crc=0x1,docs=0/0,bytes=0/0,trusted=true,active=false,ready=false)",
+ dumpBucket(operation_context().make_split_bit_constrained_bucket_id(doc->getId())));
+ // No new requests sent
+ ASSERT_EQ("", _sender.getCommands(true, true, 4));
+}
+
+TEST_F(PutOperationTest, failed_CreateBucket_does_not_send_RequestBucketInfo_for_cancelled_nodes) {
+ setup_stripe(2, 2, "distributor:1 storage:2");
+
+ auto doc = createDummyDocument("test", "test");
+ sendPut(createPut(doc));
+
+ ASSERT_EQ("Create bucket => 1,Create bucket => 0,Put => 1,Put => 0", _sender.getCommands(true));
+
+ op->cancel(_sender, CancelScope::of_node_subset({0}));
+ sendReply(0, api::ReturnCode::TIMEOUT, api::BucketInfo()); // CreateBucket to node 1
+ sendReply(1, api::ReturnCode::TIMEOUT, api::BucketInfo()); // CreateBucket to node 0
+
+ // Bucket info recheck only sent to node 1, as it's not cancelled
+ ASSERT_EQ("RequestBucketInfoCommand(1 buckets, super bucket BucketId(0x4000000000008f09). ) => 1",
+ _sender.getCommands(true, true, 4));
+}
+
TEST_F(PutOperationTest, send_inline_split_before_put_if_bucket_too_large) {
setup_stripe(1, 1, "storage:1 distributor:1");
auto cfg = make_config();
@@ -272,6 +310,26 @@ TEST_F(PutOperationTest, return_success_if_op_acked_on_all_replicas_even_if_buck
_sender.getLastReply());
}
+TEST_F(PutOperationTest, return_success_if_op_acked_on_all_replicas_even_if_operation_cancelled) {
+ setup_stripe(2, 2, "storage:2 distributor:1");
+ createAndSendSampleDocument(TIMEOUT);
+
+ ASSERT_EQ("Put(BucketId(0x4000000000001dd4), "
+ "id:test:testdoctype1::, timestamp 100, size 45) => 0,"
+ "Put(BucketId(0x4000000000001dd4), "
+ "id:test:testdoctype1::, timestamp 100, size 45) => 1",
+ _sender.getCommands(true, true));
+
+ op->cancel(_sender, CancelScope::of_fully_cancelled());
+
+ sendReply(0);
+ sendReply(1);
+
+ ASSERT_EQ("PutReply(id:test:testdoctype1::, BucketId(0x0000000000000000), "
+ "timestamp 100) ReturnCode(NONE)",
+ _sender.getLastReply());
+}
+
TEST_F(PutOperationTest, storage_failed) {
setup_stripe(2, 1, "storage:1 distributor:1");
@@ -491,7 +549,7 @@ TEST_F(PutOperationTest, update_correct_bucket_on_remapped_put) {
{
std::shared_ptr<api::StorageCommand> msg2 = _sender.command(0);
- std::shared_ptr<api::StorageReply> reply(msg2->makeReply().release());
+ std::shared_ptr<api::StorageReply> reply(msg2->makeReply());
auto* sreply = dynamic_cast<api::PutReply*>(reply.get());
ASSERT_TRUE(sreply);
sreply->remapBucketId(document::BucketId(17, 13));
@@ -511,6 +569,7 @@ TEST_F(PutOperationTest, update_correct_bucket_on_remapped_put) {
dumpBucket(document::BucketId(17, 13)));
}
+// TODO make this redundant through operation cancelling
TEST_F(PutOperationTest, replica_not_resurrected_in_db_when_node_down_in_active_state) {
setup_stripe(Redundancy(3), NodeCount(3), "distributor:1 storage:3");
@@ -535,6 +594,7 @@ TEST_F(PutOperationTest, replica_not_resurrected_in_db_when_node_down_in_active_
dumpBucket(operation_context().make_split_bit_constrained_bucket_id(doc->getId())));
}
+// TODO make this redundant through operation cancelling
TEST_F(PutOperationTest, replica_not_resurrected_in_db_when_node_down_in_pending_state) {
setup_stripe(Redundancy(3), NodeCount(4), "version:1 distributor:1 storage:3");
@@ -568,6 +628,8 @@ TEST_F(PutOperationTest, replica_not_resurrected_in_db_when_node_down_in_pending
// TODO probably also do this for updates and removes
// TODO consider if we should use the pending state verbatim for computing targets if it exists
+// TODO make this redundant through operation cancelling
+// ... actually; FIXME shouldn't the ExternalOperationHandler already cover this??
TEST_F(PutOperationTest, put_is_failed_with_busy_if_target_down_in_pending_state) {
setup_stripe(Redundancy(3), NodeCount(4), "version:1 distributor:1 storage:3");
auto doc = createDummyDocument("test", "test");
@@ -584,6 +646,65 @@ TEST_F(PutOperationTest, put_is_failed_with_busy_if_target_down_in_pending_state
_sender.getLastReply(true));
}
+TEST_F(PutOperationTest, db_not_updated_if_operation_cancelled_by_ownership_change) {
+ setup_stripe(Redundancy(3), NodeCount(3), "distributor:1 storage:3");
+
+ auto doc = createDummyDocument("test", "uri");
+ auto bucket = operation_context().make_split_bit_constrained_bucket_id(doc->getId());
+ auto remap_bucket = BucketId(bucket.getUsedBits() + 1, bucket.getId());
+ addNodesToBucketDB(bucket, "0=1/2/3/t,1=1/2/3/t,2=1/2/3/t");
+
+ sendPut(createPut(doc));
+
+ ASSERT_EQ("Put => 1,Put => 2,Put => 0", _sender.getCommands(true));
+
+ operation_context().remove_nodes_from_bucket_database(makeDocumentBucket(bucket), {0, 1, 2});
+ op->cancel(_sender, CancelScope::of_fully_cancelled());
+
+ // Normally DB updates triggered by replies don't _create_ buckets in the DB, unless
+ // they're remapped buckets. Use a remapping to ensure we hit a create-if-missing DB path.
+ {
+ std::shared_ptr<api::StorageCommand> msg2 = _sender.command(0);
+ std::shared_ptr<api::StorageReply> reply(msg2->makeReply());
+ auto* sreply = dynamic_cast<api::PutReply*>(reply.get());
+ ASSERT_TRUE(sreply);
+ sreply->remapBucketId(remap_bucket);
+ sreply->setBucketInfo(api::BucketInfo(1,2,3,4,5));
+ op->receive(_sender, reply);
+ }
+
+ sendReply(1, api::ReturnCode::OK, api::BucketInfo(5, 6, 7));
+ sendReply(2, api::ReturnCode::OK, api::BucketInfo(7, 8, 9));
+
+ EXPECT_EQ("NONEXISTING", dumpBucket(bucket));
+ EXPECT_EQ("NONEXISTING", dumpBucket(remap_bucket));
+}
+
+TEST_F(PutOperationTest, individually_cancelled_nodes_are_not_updated_in_db) {
+ setup_stripe(Redundancy(3), NodeCount(3), "distributor:1 storage:3");
+
+ auto doc = createDummyDocument("test", "uri");
+ auto bucket = operation_context().make_split_bit_constrained_bucket_id(doc->getId());
+ addNodesToBucketDB(bucket, "0=1/2/3/t,1=1/2/3/t,2=1/2/3/t");
+
+ sendPut(createPut(doc));
+ ASSERT_EQ("Put => 1,Put => 2,Put => 0", _sender.getCommands(true));
+
+ // Simulate nodes 0 and 2 going down
+ operation_context().remove_nodes_from_bucket_database(makeDocumentBucket(bucket), {0, 2});
+ // Cancelling shall be cumulative
+ op->cancel(_sender, CancelScope::of_node_subset({0}));
+ op->cancel(_sender, CancelScope::of_node_subset({2}));
+
+ sendReply(0, api::ReturnCode::OK, api::BucketInfo(5, 6, 7));
+ sendReply(1, api::ReturnCode::OK, api::BucketInfo(6, 7, 8));
+ sendReply(2, api::ReturnCode::OK, api::BucketInfo(9, 8, 7));
+
+ EXPECT_EQ("BucketId(0x4000000000000593) : "
+ "node(idx=1,crc=0x5,docs=6/6,bytes=7/7,trusted=true,active=false,ready=false)",
+ dumpBucket(bucket));
+}
+
TEST_F(PutOperationTest, send_to_retired_nodes_if_no_up_nodes_available) {
setup_stripe(Redundancy(2), NodeCount(2),
"distributor:1 storage:2 .0.s:r .1.s:r");
@@ -761,6 +882,38 @@ TEST_F(PutOperationTest, failed_condition_probe_fails_op_with_returned_error) {
_sender.getLastReply());
}
+TEST_F(PutOperationTest, ownership_cancellation_during_condition_probe_fails_operation_on_probe_completion) {
+ ASSERT_NO_FATAL_FAILURE(set_up_tas_put_with_2_inconsistent_replica_nodes());
+
+ op->receive(_sender, make_get_reply(*sent_get_command(0), 0, false, false));
+ op->cancel(_sender, CancelScope::of_fully_cancelled());
+ op->receive(_sender, make_get_reply(*sent_get_command(1), 0, false, false));
+
+ ASSERT_EQ("Get => 1,Get => 0", _sender.getCommands(true));
+ ASSERT_EQ("PutReply(id:test:testdoctype1::test, "
+ "BucketId(0x0000000000000000), timestamp 100) "
+ "ReturnCode(ABORTED, Failed during write repair condition probe step. Reason: "
+ "Operation has been cancelled (likely due to a cluster state change))",
+ _sender.getLastReply());
+}
+
+TEST_F(PutOperationTest, replica_subset_cancellation_during_condition_probe_fails_operation_on_probe_completion) {
+ ASSERT_NO_FATAL_FAILURE(set_up_tas_put_with_2_inconsistent_replica_nodes());
+
+ op->receive(_sender, make_get_reply(*sent_get_command(0), 0, false, false));
+ // 1 of 2 nodes; we still abort after the read phase since we cannot possibly fulfill
+ // the write phase for all replicas.
+ op->cancel(_sender, CancelScope::of_node_subset({0}));
+ op->receive(_sender, make_get_reply(*sent_get_command(1), 0, false, false));
+
+ ASSERT_EQ("Get => 1,Get => 0", _sender.getCommands(true));
+ ASSERT_EQ("PutReply(id:test:testdoctype1::test, "
+ "BucketId(0x0000000000000000), timestamp 100) "
+ "ReturnCode(ABORTED, Failed during write repair condition probe step. Reason: "
+ "Operation has been cancelled (likely due to a cluster state change))",
+ _sender.getLastReply());
+}
+
TEST_F(PutOperationTest, create_flag_in_parent_put_is_propagated_to_sent_puts) {
setup_stripe(Redundancy(2), NodeCount(2), "version:1 storage:2 distributor:1");
auto doc = createDummyDocument("test", "test");
diff --git a/storage/src/tests/distributor/removeoperationtest.cpp b/storage/src/tests/distributor/removeoperationtest.cpp
index d169c80a95d..3fad2c194a2 100644
--- a/storage/src/tests/distributor/removeoperationtest.cpp
+++ b/storage/src/tests/distributor/removeoperationtest.cpp
@@ -68,6 +68,7 @@ struct RemoveOperationTest : Test, DistributorStripeTestUtil {
std::unique_ptr<api::StorageReply> reply(removec->makeReply());
auto* removeR = dynamic_cast<api::RemoveReply*>(reply.get());
removeR->setOldTimestamp(oldTimestamp);
+ removeR->setBucketInfo(api::BucketInfo(1,2,3,4,5));
callback.onReceive(_sender, std::shared_ptr<api::StorageReply>(reply.release()));
}
@@ -307,6 +308,45 @@ TEST_F(ExtRemoveOperationTest, failed_condition_probe_fails_op_with_returned_err
_sender.getLastReply());
}
+// Note: we don't exhaustively test cancellation edges here, as we assume that Put/Update/Remove ops
+// share the same underlying PersistenceMessageTracker logic. See PutOperationTest for more tests.
+
+TEST_F(ExtRemoveOperationTest, cancellation_during_condition_probe_fails_operation_on_probe_completion) {
+ ASSERT_NO_FATAL_FAILURE(set_up_tas_remove_with_2_nodes(ReplicaState::INCONSISTENT));
+
+ reply_with(make_get_reply(0, 50, false, true));
+ op->cancel(_sender, CancelScope::of_fully_cancelled());
+ reply_with(make_get_reply(1, 50, false, true));
+
+ ASSERT_EQ("Get => 1,Get => 0", _sender.getCommands(true));
+ EXPECT_EQ("RemoveReply(BucketId(0x0000000000000000), "
+ "id:test:test::uri, "
+ "timestamp 100, not found) "
+ "ReturnCode(ABORTED, Failed during write repair condition probe step. Reason: "
+ "Operation has been cancelled (likely due to a cluster state change))",
+ _sender.getLastReply());
+}
+
+TEST_F(ExtRemoveOperationTest, cancelled_nodes_are_not_updated_in_db) {
+ ASSERT_NO_FATAL_FAILURE(set_up_tas_remove_with_2_nodes(ReplicaState::CONSISTENT));
+ ASSERT_EQ("Remove => 1,Remove => 0", _sender.getCommands(true));
+
+ operation_context().remove_nodes_from_bucket_database(makeDocumentBucket(bucketId), {1});
+ op->cancel(_sender, CancelScope::of_node_subset({1}));
+
+ replyToMessage(*op, 0, 50);
+ replyToMessage(*op, 1, 50);
+
+ EXPECT_EQ("BucketId(0x4000000000000593) : "
+ "node(idx=0,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false)",
+ dumpBucket(bucketId));
+ // Reply is still OK since the operation went through on the content nodes
+ ASSERT_EQ("RemoveReply(BucketId(0x0000000000000000), "
+ "id:test:test::uri, timestamp 100, removed doc from 50) ReturnCode(NONE)",
+ _sender.getLastReply());
+
+}
+
TEST_F(ExtRemoveOperationTest, trace_is_propagated_from_condition_probe_gets_ok_probe_case) {
ASSERT_NO_FATAL_FAILURE(set_up_tas_remove_with_2_nodes(ReplicaState::INCONSISTENT));
diff --git a/storage/src/tests/distributor/simplemaintenancescannertest.cpp b/storage/src/tests/distributor/simplemaintenancescannertest.cpp
index 723b0baa6cd..3d3c58ba842 100644
--- a/storage/src/tests/distributor/simplemaintenancescannertest.cpp
+++ b/storage/src/tests/distributor/simplemaintenancescannertest.cpp
@@ -82,7 +82,7 @@ TEST_F(SimpleMaintenanceScannerTest, prioritize_single_bucket) {
TEST_F(SimpleMaintenanceScannerTest, prioritize_single_bucket_alt_bucket_space) {
document::BucketSpace bucketSpace(4);
_bucketSpaceRepo->add(bucketSpace, std::make_unique<DistributorBucketSpace>());
- _scanner->reset();
+ (void)_scanner->fetch_and_reset();
addBucketToDb(bucketSpace, 1);
std::string expected("PrioritizedBucket(Bucket(BucketSpace(0x0000000000000004), BucketId(0x4000000000000001)), pri VERY_HIGH)\n");
@@ -148,7 +148,7 @@ TEST_F(SimpleMaintenanceScannerTest, reset) {
ASSERT_TRUE(scanEntireDatabase(0));
EXPECT_EQ(expected, _priorityDb->toString());
- _scanner->reset();
+ (void)_scanner->fetch_and_reset();
ASSERT_TRUE(scanEntireDatabase(3));
expected = "PrioritizedBucket(Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)), pri VERY_HIGH)\n"
@@ -165,7 +165,7 @@ TEST_F(SimpleMaintenanceScannerTest, pending_maintenance_operation_statistics) {
"split bucket: 0, join bucket: 0, "
"set bucket state: 0, garbage collection: 0");
{
- auto stats(_scanner->getPendingMaintenanceStats());
+ const auto & stats = _scanner->getPendingMaintenanceStats();
EXPECT_EQ(expectedEmpty, stringifyGlobalPendingStats(stats));
}
@@ -173,16 +173,16 @@ TEST_F(SimpleMaintenanceScannerTest, pending_maintenance_operation_statistics) {
// All mock operations generated have the merge type.
{
- auto stats(_scanner->getPendingMaintenanceStats());
+ const auto & stats = _scanner->getPendingMaintenanceStats();
std::string expected("delete bucket: 0, merge bucket: 2, "
"split bucket: 0, join bucket: 0, "
"set bucket state: 0, garbage collection: 0");
EXPECT_EQ(expected, stringifyGlobalPendingStats(stats));
}
- _scanner->reset();
+ (void)_scanner->fetch_and_reset();
{
- auto stats(_scanner->getPendingMaintenanceStats());
+ const auto & stats = _scanner->getPendingMaintenanceStats();
EXPECT_EQ(expectedEmpty, stringifyGlobalPendingStats(stats));
}
}
@@ -191,14 +191,14 @@ TEST_F(SimpleMaintenanceScannerTest, per_node_maintenance_stats_are_tracked) {
addBucketToDb(1);
addBucketToDb(3);
{
- auto stats(_scanner->getPendingMaintenanceStats());
+ const auto & stats = _scanner->getPendingMaintenanceStats();
NodeMaintenanceStats emptyStats;
EXPECT_EQ(emptyStats, stats.perNodeStats.forNode(0, makeBucketSpace()));
}
ASSERT_TRUE(scanEntireDatabase(2));
// Mock is currently hardwired to increment movingOut for node 1 and
// copyingIn for node 2 per bucket iterated (we've got 2).
- auto stats(_scanner->getPendingMaintenanceStats());
+ const auto & stats = _scanner->getPendingMaintenanceStats();
{
NodeMaintenanceStats wantedNode1Stats;
wantedNode1Stats.movingOut = 2;
@@ -301,7 +301,7 @@ TEST_F(SimpleMaintenanceScannerTest, merge_pending_maintenance_stats) {
TEST_F(SimpleMaintenanceScannerTest, empty_bucket_db_is_immediately_done_by_default) {
auto res = _scanner->scanNext();
EXPECT_TRUE(res.isDone());
- _scanner->reset();
+ (void)_scanner->fetch_and_reset();
res = _scanner->scanNext();
EXPECT_TRUE(res.isDone());
}
diff --git a/storage/src/tests/distributor/statecheckerstest.cpp b/storage/src/tests/distributor/statecheckerstest.cpp
index 16854cd63c6..13c982f5a77 100644
--- a/storage/src/tests/distributor/statecheckerstest.cpp
+++ b/storage/src/tests/distributor/statecheckerstest.cpp
@@ -7,6 +7,7 @@
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/storage/distributor/top_level_bucket_db_updater.h>
#include <vespa/storage/distributor/top_level_distributor.h>
+#include <vespa/storage/distributor/activecopy.h>
#include <vespa/storage/distributor/distributor_bucket_space.h>
#include <vespa/storage/distributor/distributor_stripe.h>
#include <vespa/storage/distributor/operations/idealstate/mergeoperation.h>
@@ -1383,9 +1384,8 @@ std::string StateCheckersTest::testGarbageCollection(
getBucketDatabase().update(e);
NodeMaintenanceStatsTracker statsTracker;
- StateChecker::Context c(node_context(), operation_context(),
- getDistributorBucketSpace(), statsTracker,
- makeDocumentBucket(e.getBucketId()));
+ StateChecker::Context c(node_context(), operation_context(), getDistributorBucketSpace(),
+ statsTracker, makeDocumentBucket(e.getBucketId()));
getClock().setAbsoluteTimeInSeconds(nowTimestamp);
return testStateChecker(checker, c, false, PendingMessage(), includePriority, includeSchedulingPri);
}
@@ -1394,38 +1394,29 @@ TEST_F(StateCheckersTest, garbage_collection) {
// BucketId(17, 0) has id (and thus 'hash') 0x4400000000000000. With a
// check interval modulo of 3600, this implies a start point of 848.
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testGarbageCollection(900, 3600 + 847, 3600));
+ EXPECT_EQ("NO OPERATIONS GENERATED", testGarbageCollection(900, 3600 + 847, 3600));
- EXPECT_EQ("[Needs garbage collection: Last check at 900, current time 4448, "
- "configured interval 3600]",
+ EXPECT_EQ("[Needs garbage collection: Last check at 900, current time 4448, configured interval 3600]",
testGarbageCollection(900, 3600 + 848, 3600));
- EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, "
- "configured interval 3600]",
+ EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, configured interval 3600]",
testGarbageCollection(3, 4000, 3600));
// GC start point 3648.
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testGarbageCollection(3, 3647, 8000));
+ EXPECT_EQ("NO OPERATIONS GENERATED", testGarbageCollection(3, 3647, 8000));
- EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, "
- "configured interval 3600]",
+ EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, configured interval 3600]",
testGarbageCollection(3, 4000, 3600));
// GC explicitly disabled.
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testGarbageCollection(3, 4000, 0));
+ EXPECT_EQ("NO OPERATIONS GENERATED", testGarbageCollection(3, 4000, 0));
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testGarbageCollection(3, 3, 1));
+ EXPECT_EQ("NO OPERATIONS GENERATED", testGarbageCollection(3, 3, 1));
- EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, "
- "configured interval 300] (pri 200)",
+ EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, configured interval 300] (pri 200)",
testGarbageCollection(3, 4000, 300, 1, true));
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testGarbageCollection(3850, 4000, 300, 1));
+ EXPECT_EQ("NO OPERATIONS GENERATED", testGarbageCollection(3850, 4000, 300, 1));
}
TEST_F(StateCheckersTest, gc_ops_are_prioritized_with_low_priority_category) {
@@ -1597,11 +1588,12 @@ TEST_F(StateCheckersTest, context_populates_ideal_state_containers) {
StateChecker::Context c(node_context(), operation_context(),
getDistributorBucketSpace(), statsTracker, makeDocumentBucket({17, 0}));
- ASSERT_THAT(c.idealState, ElementsAre(1, 3));
- // TODO replace with UnorderedElementsAre once we can build gmock without issues
- std::vector<uint16_t> ideal_state(c.unorderedIdealState.begin(), c.unorderedIdealState.end());
- std::sort(ideal_state.begin(), ideal_state.end());
- ASSERT_THAT(ideal_state, ElementsAre(1, 3));
+ ASSERT_EQ(2, c.idealState().size());
+ ASSERT_EQ(1, c.idealState()[0]);
+ ASSERT_EQ(3, c.idealState()[1]);
+ for (uint16_t node : c.idealState()) {
+ ASSERT_TRUE(c.idealStateBundle.is_nonretired_or_maintenance(node));
+ }
}
namespace {
@@ -1616,8 +1608,7 @@ public:
explicit StateCheckerRunner(StateCheckersTest& fixture);
~StateCheckerRunner();
- StateCheckerRunner& addToDb(const document::BucketId& bid,
- const std::string& bucketInfo)
+ StateCheckerRunner& addToDb(const document::BucketId& bid, const std::string& bucketInfo)
{
_fixture.addNodesToBucketDB(bid, bucketInfo);
return *this;
@@ -1652,8 +1643,7 @@ public:
Checker checker;
StateChecker::Context c(_fixture.node_context(), _fixture.operation_context(),
_fixture.getDistributorBucketSpace(), _statsTracker, makeDocumentBucket(bid));
- _result = _fixture.testStateChecker(
- checker, c, false, StateCheckersTest::PendingMessage(), false);
+ _result = _fixture.testStateChecker(checker, c, false, StateCheckersTest::PendingMessage(), false);
}
const std::string& result() const { return _result; }
@@ -1749,4 +1739,9 @@ TEST_F(StateCheckersTest, stats_updates_for_maximum_time_since_gc_run) {
EXPECT_EQ(runner.stats().max_observed_time_since_last_gc(), 1900s);
}
+TEST(ActiveCopyTest, control_size) {
+ EXPECT_EQ(12, sizeof(ActiveCopy));
+ EXPECT_EQ(64, sizeof(IdealServiceLayerNodesBundle));
+}
+
}
diff --git a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
index 567e0a947da..7eb9dfe6269 100644
--- a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
+++ b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
@@ -65,12 +65,9 @@ public:
close();
}
- std::shared_ptr<RequestBucketInfoReply> make_fake_bucket_reply(
- const lib::ClusterState& state,
- const RequestBucketInfoCommand& cmd,
- int storageIndex,
- uint32_t bucketCount,
- uint32_t invalidBucketCount = 0)
+ std::shared_ptr<RequestBucketInfoReply>
+ make_fake_bucket_reply(const lib::ClusterState& state, const RequestBucketInfoCommand& cmd,
+ int storageIndex, uint32_t bucketCount,uint32_t invalidBucketCount = 0)
{
auto sreply = std::make_shared<RequestBucketInfoReply>(cmd);
sreply->setAddress(storage_address(storageIndex));
@@ -84,19 +81,14 @@ public:
}
std::vector<uint16_t> nodes;
- distributor_bucket_space(bucket).getDistribution().getIdealNodes(
- lib::NodeType::STORAGE, state, bucket, nodes);
+ distributor_bucket_space(bucket).getDistribution().getIdealNodes(lib::NodeType::STORAGE, state, bucket, nodes, "uim");
for (uint32_t j = 0; j < nodes.size(); ++j) {
if (nodes[j] == storageIndex) {
if (i >= bucketCount) {
- vec.push_back(api::RequestBucketInfoReply::Entry(
- document::BucketId(16, i),
- api::BucketInfo()));
+ vec.emplace_back(document::BucketId(16, i), api::BucketInfo());
} else {
- vec.push_back(api::RequestBucketInfoReply::Entry(
- document::BucketId(16, i),
- api::BucketInfo(10,1,1)));
+ vec.emplace_back(document::BucketId(16, i), api::BucketInfo(10,1,1));
}
}
}
@@ -105,45 +97,34 @@ public:
return sreply;
}
- void fake_bucket_reply(const lib::ClusterState &state,
- const api::StorageCommand &cmd,
- uint32_t bucket_count,
- uint32_t invalid_bucket_count = 0)
+ void fake_bucket_reply(const lib::ClusterState &state, const api::StorageCommand &cmd,
+ uint32_t bucket_count, uint32_t invalid_bucket_count = 0)
{
ASSERT_EQ(cmd.getType(), MessageType::REQUESTBUCKETINFO);
const api::StorageMessageAddress& address(*cmd.getAddress());
bucket_db_updater().onRequestBucketInfoReply(
- make_fake_bucket_reply(state,
- dynamic_cast<const RequestBucketInfoCommand &>(cmd),
- address.getIndex(),
- bucket_count,
- invalid_bucket_count));
+ make_fake_bucket_reply(state, dynamic_cast<const RequestBucketInfoCommand &>(cmd),
+ address.getIndex(), bucket_count, invalid_bucket_count));
}
- void fake_bucket_reply(const lib::ClusterState &state,
- const api::StorageCommand &cmd,
- uint32_t bucket_count,
+ void fake_bucket_reply(const lib::ClusterState &state, const api::StorageCommand &cmd, uint32_t bucket_count,
const std::function<void(api::RequestBucketInfoReply&)>& reply_decorator)
{
ASSERT_EQ(cmd.getType(), MessageType::REQUESTBUCKETINFO);
const api::StorageMessageAddress& address(*cmd.getAddress());
- auto reply = make_fake_bucket_reply(state,
- dynamic_cast<const RequestBucketInfoCommand &>(cmd),
- address.getIndex(),
- bucket_count, 0);
+ auto reply = make_fake_bucket_reply(state, dynamic_cast<const RequestBucketInfoCommand &>(cmd),
+ address.getIndex(), bucket_count, 0);
reply_decorator(*reply);
bucket_db_updater().onRequestBucketInfoReply(reply);
}
- void send_fake_reply_for_single_bucket_request(
- const api::RequestBucketInfoCommand& rbi)
+ void send_fake_reply_for_single_bucket_request(const api::RequestBucketInfoCommand& rbi)
{
ASSERT_EQ(size_t(1), rbi.getBuckets().size());
const document::BucketId& bucket(rbi.getBuckets()[0]);
auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi);
- reply->getBucketInfo().push_back(
- api::RequestBucketInfoReply::Entry(bucket, api::BucketInfo(20, 10, 12, 50, 60, true, true)));
+ reply->getBucketInfo().emplace_back(bucket, api::BucketInfo(20, 10, 12, 50, 60, true, true));
stripe_of_bucket(bucket).bucket_db_updater().onRequestBucketInfoReply(reply);
}
@@ -154,15 +135,11 @@ public:
}
std::vector<uint16_t> nodes;
- distributor_bucket_space(id).getDistribution().getIdealNodes(
- lib::NodeType::STORAGE, state, document::BucketId(id), nodes);
+ distributor_bucket_space(id).getDistribution().getIdealNodes(lib::NodeType::STORAGE, state, document::BucketId(id), nodes, "uim");
if (nodes.size() != entry->getNodeCount()) {
- return vespalib::make_string("Bucket Id %s has %d nodes in "
- "ideal state, but has only %d in DB",
- id.toString().c_str(),
- (int)nodes.size(),
- (int)entry->getNodeCount());
+ return vespalib::make_string("Bucket Id %s has %d nodes in ideal state, but has only %d in DB",
+ id.toString().c_str(), (int)nodes.size(), (int)entry->getNodeCount());
}
for (uint32_t i = 0; i<nodes.size(); i++) {
@@ -175,10 +152,7 @@ public:
}
if (!found) {
- return vespalib::make_string(
- "Bucket Id %s has no copy from node %d",
- id.toString().c_str(),
- nodes[i]);
+ return vespalib::make_string("Bucket Id %s has no copy from node %d", id.toString().c_str(), nodes[i]);
}
}
@@ -188,13 +162,11 @@ public:
struct OrderByIncreasingNodeIndex {
template <typename T>
bool operator()(const T& lhs, const T& rhs) {
- return (lhs->getAddress()->getIndex()
- < rhs->getAddress()->getIndex());
+ return (lhs->getAddress()->getIndex() < rhs->getAddress()->getIndex());
}
};
- void sort_sent_messages_by_index(DistributorMessageSenderStub& sender,
- size_t sortFromOffset = 0)
+ void sort_sent_messages_by_index(DistributorMessageSenderStub& sender, size_t sortFromOffset = 0)
{
std::sort(sender.commands().begin() + sortFromOffset,
sender.commands().end(),
diff --git a/storage/src/tests/distributor/top_level_distributor_test.cpp b/storage/src/tests/distributor/top_level_distributor_test.cpp
index dad6f477d83..94f8821f9c8 100644
--- a/storage/src/tests/distributor/top_level_distributor_test.cpp
+++ b/storage/src/tests/distributor/top_level_distributor_test.cpp
@@ -92,7 +92,7 @@ struct TopLevelDistributorTest : Test, TopLevelDistributorTestUtil {
return _distributor->getBucketSpacesStats();
}
- std::unordered_map<uint16_t, uint32_t> distributor_min_replica_stats() {
+ MinReplicaMap distributor_min_replica_stats() {
return _distributor->getMinReplica();
}
@@ -504,7 +504,7 @@ void assert_invalid_bucket_stats_for_all_spaces(
ASSERT_FALSE(space_iter->second.valid());
}
-void assert_min_replica_stats_zeroed(const std::unordered_map<uint16_t, uint32_t>& stats, uint16_t node_index) {
+void assert_min_replica_stats_zeroed(const MinReplicaMap & stats, uint16_t node_index) {
auto iter = stats.find(node_index);
ASSERT_TRUE(iter != stats.cend());
EXPECT_EQ(iter->second, 0);
diff --git a/storage/src/tests/distributor/top_level_distributor_test_util.cpp b/storage/src/tests/distributor/top_level_distributor_test_util.cpp
index 9677ea568e8..6bbe7a47da2 100644
--- a/storage/src/tests/distributor/top_level_distributor_test_util.cpp
+++ b/storage/src/tests/distributor/top_level_distributor_test_util.cpp
@@ -10,6 +10,7 @@
#include <vespa/storage/distributor/distributor_stripe_pool.h>
#include <vespa/storage/distributor/distributor_stripe_thread.h>
#include <vespa/storage/distributor/distributor_total_metrics.h>
+#include <vespa/storage/storageutil/utils.h>
#include <vespa/storage/common/bucket_stripe_utils.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/text/stringtokenizer.h>
@@ -187,7 +188,7 @@ TopLevelDistributorTestUtil::get_ideal_str(document::BucketId id, const lib::Clu
return id.toString();
}
std::vector<uint16_t> nodes;
- _component->getDistribution()->getIdealNodes(lib::NodeType::STORAGE, state, id, nodes);
+ _component->getDistribution()->getIdealNodes(lib::NodeType::STORAGE, state, id, nodes, "uim");
std::sort(nodes.begin(), nodes.end());
std::ostringstream ost;
ost << id << ": " << dumpVector(nodes);
@@ -205,14 +206,11 @@ TopLevelDistributorTestUtil::add_ideal_nodes(const lib::ClusterState& state, con
std::vector<uint16_t> res;
assert(_component.get());
- _component->getDistribution()->getIdealNodes(lib::NodeType::STORAGE, state, id, res);
+ _component->getDistribution()->getIdealNodes(lib::NodeType::STORAGE, state, id, res, "uim");
for (uint32_t i = 0; i < res.size(); ++i) {
- if (state.getNodeState(lib::Node(lib::NodeType::STORAGE, res[i])).getState() !=
- lib::State::MAINTENANCE)
- {
- entry->addNode(BucketCopy(0, res[i], api::BucketInfo(1,1,1)),
- toVector<uint16_t>(0));
+ if (state.getNodeState(lib::Node(lib::NodeType::STORAGE, res[i])).getState() != lib::State::MAINTENANCE) {
+ entry->addNode(BucketCopy(0, res[i], api::BucketInfo(1,1,1)), toVector<uint16_t>(0));
}
}
diff --git a/storage/src/tests/distributor/top_level_distributor_test_util.h b/storage/src/tests/distributor/top_level_distributor_test_util.h
index cd5db7c8f80..51700848733 100644
--- a/storage/src/tests/distributor/top_level_distributor_test_util.h
+++ b/storage/src/tests/distributor/top_level_distributor_test_util.h
@@ -7,7 +7,6 @@
#include <tests/common/teststorageapp.h>
#include <vespa/storage/common/hostreporter/hostinfo.h>
#include <vespa/storage/frameworkimpl/component/distributorcomponentregisterimpl.h>
-#include <vespa/storage/storageutil/utils.h>
#include <vespa/storageapi/message/state.h>
#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h>
diff --git a/storage/src/tests/distributor/twophaseupdateoperationtest.cpp b/storage/src/tests/distributor/twophaseupdateoperationtest.cpp
index da32225cde3..1907335545a 100644
--- a/storage/src/tests/distributor/twophaseupdateoperationtest.cpp
+++ b/storage/src/tests/distributor/twophaseupdateoperationtest.cpp
@@ -4,16 +4,16 @@
#include <vespa/config/helper/configgetter.h>
#include <vespa/document/base/testdocrepo.h>
#include <vespa/document/fieldset/fieldsets.h>
-#include <vespa/document/repo/documenttyperepo.h>
#include <vespa/document/fieldvalue/intfieldvalue.h>
+#include <vespa/document/repo/documenttyperepo.h>
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/document/update/arithmeticvalueupdate.h>
-#include <vespa/storage/distributor/top_level_distributor.h>
#include <vespa/storage/distributor/distributor_stripe.h>
#include <vespa/storage/distributor/externaloperationhandler.h>
#include <vespa/storage/distributor/operations/external/twophaseupdateoperation.h>
+#include <vespa/storage/distributor/top_level_distributor.h>
#include <vespa/storageapi/message/persistence.h>
-#include <vespa/vespalib/gtest/gtest.h>
+#include <gtest/gtest.h>
#include <gmock/gmock.h>
namespace storage::distributor {
@@ -30,8 +30,9 @@ using namespace ::testing;
struct TwoPhaseUpdateOperationTest : Test, DistributorStripeTestUtil {
document::TestDocRepo _testRepo;
std::shared_ptr<const DocumentTypeRepo> _repo;
- const DocumentType* _doc_type;
+ const DocumentType* _doc_type{nullptr};
DistributorMessageSenderStub _sender;
+ BucketId _bucket_id{0x400000000000cac4};
TwoPhaseUpdateOperationTest();
~TwoPhaseUpdateOperationTest() override;
@@ -39,7 +40,7 @@ struct TwoPhaseUpdateOperationTest : Test, DistributorStripeTestUtil {
void checkMessageSettingsPropagatedTo(
const api::StorageCommand::SP& msg) const;
- std::string getUpdatedValueFromLastPut(DistributorMessageSenderStub&);
+ static std::string getUpdatedValueFromLastPut(DistributorMessageSenderStub&);
void SetUp() override {
_repo = _testRepo.getTypeRepoSp();
@@ -57,20 +58,21 @@ struct TwoPhaseUpdateOperationTest : Test, DistributorStripeTestUtil {
close();
}
- void replyToMessage(Operation& callback,
- DistributorMessageSenderStub& sender,
- uint32_t index,
- uint64_t oldTimestamp,
- api::ReturnCode::Result result = api::ReturnCode::OK);
+ static void replyToMessage(
+ Operation& callback,
+ DistributorMessageSenderStub& sender,
+ uint32_t index,
+ uint64_t oldTimestamp,
+ api::ReturnCode::Result result = api::ReturnCode::OK);
- void replyToPut(
+ static void replyToPut(
Operation& callback,
DistributorMessageSenderStub& sender,
uint32_t index,
api::ReturnCode::Result result = api::ReturnCode::OK,
const std::string& traceMsg = "");
- void replyToCreateBucket(
+ static void replyToCreateBucket(
Operation& callback,
DistributorMessageSenderStub& sender,
uint32_t index,
@@ -85,7 +87,7 @@ struct TwoPhaseUpdateOperationTest : Test, DistributorStripeTestUtil {
api::ReturnCode::Result result = api::ReturnCode::OK,
const std::string& traceMsg = "");
- void reply_to_metadata_get(
+ static void reply_to_metadata_get(
Operation& callback,
DistributorMessageSenderStub& sender,
uint32_t index,
@@ -93,7 +95,7 @@ struct TwoPhaseUpdateOperationTest : Test, DistributorStripeTestUtil {
api::ReturnCode::Result result = api::ReturnCode::OK,
const std::string& trace_msg = "");
- void reply_to_get_with_tombstone(
+ static void reply_to_get_with_tombstone(
Operation& callback,
DistributorMessageSenderStub& sender,
uint32_t index,
@@ -148,11 +150,17 @@ struct TwoPhaseUpdateOperationTest : Test, DistributorStripeTestUtil {
Timestamp highest_get_timestamp,
Timestamp expected_response_timestamp);
- std::shared_ptr<TwoPhaseUpdateOperation> set_up_2_inconsistent_replicas_and_start_update(bool enable_3phase = true) {
- setup_stripe(2, 2, "storage:2 distributor:1");
+ void do_update_fails_if_cancelled_prior_to_safe_path_metadata_get_completion(bool in_sync_replicas);
+
+ void enable_3phase_updates(bool enable = true) {
auto cfg = make_config();
- cfg->set_enable_metadata_only_fetch_phase_for_inconsistent_updates(enable_3phase);
+ cfg->set_enable_metadata_only_fetch_phase_for_inconsistent_updates(enable);
configure_stripe(cfg);
+ }
+
+ std::shared_ptr<TwoPhaseUpdateOperation> set_up_2_inconsistent_replicas_and_start_update(bool enable_3phase = true) {
+ setup_stripe(2, 2, "storage:2 distributor:1");
+ enable_3phase_updates(enable_3phase);
auto cb = sendUpdate("0=1/2/3,1=2/3/4"); // Inconsistent replicas.
cb->start(_sender);
return cb;
@@ -199,13 +207,13 @@ TwoPhaseUpdateOperationTest::replyToPut(
{
std::shared_ptr<api::StorageMessage> msg2 = sender.command(index);
auto& putc = dynamic_cast<PutCommand&>(*msg2);
- std::unique_ptr<api::StorageReply> reply(putc.makeReply());
+ std::shared_ptr<api::StorageReply> reply(putc.makeReply());
reply->setResult(api::ReturnCode(result, ""));
+ dynamic_cast<api::PutReply&>(*reply).setBucketInfo(api::BucketInfo(1,2,3,4,5));
if (!traceMsg.empty()) {
MBUS_TRACE(reply->getTrace(), 1, traceMsg);
}
- callback.receive(sender,
- std::shared_ptr<StorageReply>(reply.release()));
+ callback.receive(sender, reply);
}
void
@@ -217,10 +225,9 @@ TwoPhaseUpdateOperationTest::replyToCreateBucket(
{
std::shared_ptr<api::StorageMessage> msg2 = sender.command(index);
auto& putc = dynamic_cast<CreateBucketCommand&>(*msg2);
- std::unique_ptr<api::StorageReply> reply(putc.makeReply());
+ std::shared_ptr<api::StorageReply> reply(putc.makeReply());
reply->setResult(api::ReturnCode(result, ""));
- callback.receive(sender,
- std::shared_ptr<StorageReply>(reply.release()));
+ callback.receive(sender, reply);
}
void
@@ -312,6 +319,7 @@ TwoPhaseUpdateOperationTest::sendUpdate(const std::string& bucketState,
update->setCreateIfNonExistent(options._createIfNonExistent);
document::BucketId id = operation_context().make_split_bit_constrained_bucket_id(update->getId());
+ assert(id == _bucket_id);
document::BucketId id2 = document::BucketId(id.getUsedBits() + 1, id.getRawId());
if (bucketState.length()) {
@@ -554,6 +562,33 @@ TEST_F(TwoPhaseUpdateOperationTest, fast_path_inconsistent_timestamps_inconsiste
_sender.getLastReply(true));
}
+TEST_F(TwoPhaseUpdateOperationTest, fast_path_cancellation_transitively_cancels_nested_update_operation) {
+ setup_stripe(2, 2, "storage:2 distributor:1");
+ enable_3phase_updates();
+ auto op = sendUpdate("0=1/2/3,1=1/2/3");
+ op->start(_sender);
+
+ ASSERT_EQ("Update => 0,Update => 1", _sender.getCommands(true));
+
+ replyToMessage(*op, _sender, 0, 110);
+
+ operation_context().remove_nodes_from_bucket_database(makeDocumentBucket(_bucket_id), {1});
+ op->cancel(_sender, CancelScope::of_node_subset({1}));
+
+ replyToMessage(*op, _sender, 1, 110);
+
+ // Client operation itself should return success since the update went through on all replica nodes
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 110) "
+ "ReturnCode(NONE)",
+ _sender.getLastReply(true));
+
+ EXPECT_EQ("BucketId(0x400000000000cac4) : "
+ "node(idx=0,crc=0x123,docs=1/1,bytes=100/100,trusted=true,active=false,ready=false)",
+ dumpBucket(_bucket_id));
+}
+
void
TwoPhaseUpdateOperationTest::checkMessageSettingsPropagatedTo(
const api::StorageCommand::SP& msg) const
@@ -713,6 +748,38 @@ TEST_F(TwoPhaseUpdateOperationTest, update_fails_if_safe_path_gets_fail) {
EXPECT_EQ(metrics().updates.failures.storagefailure.getValue(), 1);
}
+void TwoPhaseUpdateOperationTest::do_update_fails_if_cancelled_prior_to_safe_path_metadata_get_completion(bool in_sync_replicas) {
+ setup_stripe(2, 2, "storage:2 distributor:1");
+ enable_3phase_updates();
+ auto op = sendUpdate("0=1/2/3,1=2/3/4", UpdateOptions().createIfNonExistent(true));
+ op->start(_sender);
+
+ ASSERT_EQ("Get => 0,Get => 1", _sender.getCommands(true));
+ replyToGet(*op, _sender, 0, 70);
+
+ operation_context().remove_nodes_from_bucket_database(makeDocumentBucket(_bucket_id), {1});
+ op->cancel(_sender, CancelScope::of_node_subset({1}));
+
+ replyToGet(*op, _sender, 1, in_sync_replicas ? 70 : 80);
+
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(BUCKET_NOT_FOUND, The update operation was cancelled due to a cluster "
+ "state change between executing the read and write phases of a write-repair update)",
+ _sender.getLastReply(true));
+
+ // TODO custom cancellation failure metric?
+}
+
+TEST_F(TwoPhaseUpdateOperationTest, update_fails_if_cancelled_prior_to_safe_path_metadata_get_completion_consistent_case) {
+ do_update_fails_if_cancelled_prior_to_safe_path_metadata_get_completion(true);
+}
+
+TEST_F(TwoPhaseUpdateOperationTest, update_fails_if_cancelled_prior_to_safe_path_metadata_get_completion_inconsistent_case) {
+ do_update_fails_if_cancelled_prior_to_safe_path_metadata_get_completion(false);
+}
+
TEST_F(TwoPhaseUpdateOperationTest, update_fails_if_apply_throws_exception) {
setup_stripe(2, 2, "storage:2 distributor:1");
// Create update for wrong doctype which will fail the update.
@@ -1214,6 +1281,59 @@ TEST_F(ThreePhaseUpdateTest, puts_are_sent_after_receiving_full_document_get) {
EXPECT_EQ(1, m.ok.getValue());
}
+TEST_F(ThreePhaseUpdateTest, update_fails_if_cancelled_between_metadata_gets_and_full_get) {
+ auto op = set_up_2_inconsistent_replicas_and_start_update();
+ ASSERT_EQ("Get => 0,Get => 1", _sender.getCommands(true));
+ reply_to_metadata_get(*op, _sender, 0, 2000);
+ reply_to_metadata_get(*op, _sender, 1, 1000);
+ ASSERT_EQ("Get => 0", _sender.getCommands(true, false, 2));
+
+ operation_context().remove_nodes_from_bucket_database(makeDocumentBucket(_bucket_id), {1});
+ op->cancel(_sender, CancelScope::of_node_subset({1}));
+
+ replyToGet(*op, _sender, 2, 2000U);
+ ASSERT_EQ("", _sender.getCommands(true, false, 3)); // No puts sent
+
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 0) "
+ "ReturnCode(BUCKET_NOT_FOUND, The update operation was cancelled due to a cluster "
+ "state change between executing the read and write phases of a write-repair update)",
+ _sender.getLastReply(true));
+
+ // TODO cancellation metrics?
+}
+
+TEST_F(ThreePhaseUpdateTest, fast_path_cancellation_transitively_cancels_nested_put_operation) {
+ auto op = set_up_2_inconsistent_replicas_and_start_update();
+ ASSERT_EQ("Get => 0,Get => 1", _sender.getCommands(true));
+ reply_to_metadata_get(*op, _sender, 0, 2000);
+ reply_to_metadata_get(*op, _sender, 1, 1000);
+
+ ASSERT_EQ("Get => 0", _sender.getCommands(true, false, 2));
+ replyToGet(*op, _sender, 2, 2000U);
+
+ operation_context().remove_nodes_from_bucket_database(makeDocumentBucket(_bucket_id), {0});
+ op->cancel(_sender, CancelScope::of_node_subset({0}));
+
+ ASSERT_EQ("Put => 1,Put => 0", _sender.getCommands(true, false, 3));
+ replyToPut(*op, _sender, 3);
+ replyToPut(*op, _sender, 4);
+
+ // Update itself is ACKed
+ EXPECT_EQ("UpdateReply(id:ns:testdoctype1::1, "
+ "BucketId(0x0000000000000000), "
+ "timestamp 0, timestamp of updated doc: 2000) "
+ "ReturnCode(NONE)",
+ _sender.getLastReply(true));
+
+ // But cancelled replicas are not reintroduced into the bucket DB
+ EXPECT_EQ("BucketId(0x400000000000cac4) : "
+ "node(idx=1,crc=0x1,docs=2/4,bytes=3/5,trusted=true,active=false,ready=false)",
+ dumpBucket(_bucket_id));
+}
+
+
TEST_F(ThreePhaseUpdateTest, consistent_meta_get_timestamps_can_restart_in_fast_path) {
auto cb = set_up_2_inconsistent_replicas_and_start_update();
ASSERT_EQ("Get => 0,Get => 1", _sender.getCommands(true));
@@ -1277,8 +1397,7 @@ TEST_F(ThreePhaseUpdateTest, update_failed_with_transient_error_code_if_replica_
auto cb = sendUpdate("0=1/2/3,1=2/3/4"); // 2 replicas, room for 1 more.
cb->start(_sender);
// Add new replica to deterministic test bucket after gets have been sent
- BucketId bucket(0x400000000000cac4); // Always the same in the test.
- addNodesToBucketDB(bucket, "0=1/2/3,1=2/3/4,2=3/3/3");
+ addNodesToBucketDB(_bucket_id, "0=1/2/3,1=2/3/4,2=3/3/3");
Timestamp old_timestamp = 500;
ASSERT_EQ("Get => 0,Get => 1", _sender.getCommands(true));
diff --git a/storage/src/tests/distributor/updateoperationtest.cpp b/storage/src/tests/distributor/updateoperationtest.cpp
index d0ae31b9524..fefc88a27c2 100644
--- a/storage/src/tests/distributor/updateoperationtest.cpp
+++ b/storage/src/tests/distributor/updateoperationtest.cpp
@@ -11,7 +11,7 @@
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/storageapi/message/state.h>
-#include <vespa/vespalib/gtest/gtest.h>
+#include <gtest/gtest.h>
using config::ConfigGetter;
using config::FileSpec;
@@ -29,11 +29,14 @@ struct UpdateOperationTest : Test, DistributorStripeTestUtil {
std::shared_ptr<const DocumentTypeRepo> _repo;
const DocumentType* _html_type;
+ UpdateOperationTest()
+ : _repo(std::make_shared<DocumentTypeRepo>(*ConfigGetter<DocumenttypesConfig>::
+ getConfig("config-doctypes", FileSpec("../config-doctypes.cfg")))),
+ _html_type(_repo->getDocumentType("text/html"))
+ {
+ }
+
void SetUp() override {
- _repo.reset(
- new DocumentTypeRepo(*ConfigGetter<DocumenttypesConfig>::
- getConfig("config-doctypes", FileSpec("../config-doctypes.cfg"))));
- _html_type = _repo->getDocumentType("text/html");
createLinks();
}
@@ -241,4 +244,31 @@ TEST_F(UpdateOperationTest, inconsistent_create_if_missing_updates_picks_largest
EXPECT_EQ(2, m.diverging_timestamp_updates.getValue());
}
+// Note: we don't exhaustively test cancellation edges here, as we assume that Put/Update/Remove ops
+// share the same underlying PersistenceMessageTracker logic. See PutOperationTest for more tests.
+
+TEST_F(UpdateOperationTest, cancelled_nodes_are_not_updated_in_db) {
+ setup_stripe(Redundancy(3), NodeCount(3), "distributor:1 storage:3");
+
+ std::shared_ptr<UpdateOperation> op = sendUpdate("0=1/2/3,1=1/2/3,2=1/2/3");
+ DistributorMessageSenderStub sender;
+ op->start(sender);
+
+ ASSERT_EQ("Update => 0,Update => 1,Update => 2", sender.getCommands(true));
+
+ // Simulate nodes 0 and 2 going down
+ operation_context().remove_nodes_from_bucket_database(makeDocumentBucket(_bId), {0, 2});
+ // Cancelling shall be cumulative
+ op->cancel(_sender, CancelScope::of_node_subset({0}));
+ op->cancel(_sender, CancelScope::of_node_subset({2}));
+
+ replyToMessage(*op, sender, 0, 120);
+ replyToMessage(*op, sender, 1, 120);
+ replyToMessage(*op, sender, 2, 120);
+
+ EXPECT_EQ("BucketId(0x400000000000cac4) : "
+ "node(idx=1,crc=0x2,docs=4/4,bytes=6/6,trusted=true,active=false,ready=false)",
+ dumpBucket(_bId));
+}
+
}
diff --git a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
index f39222722fc..91ccf2b123f 100644
--- a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
+++ b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
@@ -15,7 +15,6 @@
#include <vespa/document/fieldvalue/stringfieldvalue.h>
#include <vespa/document/fieldvalue/document.h>
#include <vespa/document/datatype/documenttype.h>
-#include <vespa/fastos/file.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
#include <vespa/persistence/spi/test.h>
#include <vespa/persistence/spi/bucket_tasks.h>
@@ -207,32 +206,6 @@ struct FileStorTestBase : Test {
FileStorTestBase::~FileStorTestBase() = default;
-std::string findFile(const std::string& path, const std::string& file) {
- FastOS_DirectoryScan dirScan(path.c_str());
- while (dirScan.ReadNext()) {
- if (dirScan.GetName()[0] == '.') {
- // Ignore current and parent dir.. Ignores hidden files too, but
- // that doesn't matter as we're not trying to find them.
- continue;
- }
- std::string filename(dirScan.GetName());
- if (dirScan.IsDirectory()) {
- std::string result = findFile(path + "/" + filename, file);
- if (result != "") {
- return result;
- }
- }
- if (filename == file) {
- return path + "/" + filename;
- }
- }
- return "";
-}
-
-bool fileExistsWithin(const std::string& path, const std::string& file) {
- return !(findFile(path, file) == "");
-}
-
std::unique_ptr<DiskThread>
createThread(PersistenceHandler & persistenceHandler,
FileStorHandler& filestorHandler,
diff --git a/storage/src/tests/storageserver/statemanagertest.cpp b/storage/src/tests/storageserver/statemanagertest.cpp
index 5764460f5bb..d757a83db01 100644
--- a/storage/src/tests/storageserver/statemanagertest.cpp
+++ b/storage/src/tests/storageserver/statemanagertest.cpp
@@ -1,6 +1,5 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/metrics/metricmanager.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storageapi/message/state.h>
#include <vespa/vdslib/state/cluster_state_bundle.h>
@@ -20,10 +19,9 @@ using namespace ::testing;
namespace storage {
-struct StateManagerTest : Test {
+struct StateManagerTest : Test, NodeStateReporter {
std::unique_ptr<TestServiceLayerApp> _node;
std::unique_ptr<DummyStorageLink> _upper;
- std::unique_ptr<metrics::MetricManager> _metricManager;
StateManager* _manager;
DummyStorageLink* _lower;
@@ -42,6 +40,7 @@ struct StateManagerTest : Test {
std::string get_node_info() const {
return _manager->getNodeInfo();
}
+ void report(vespalib::JsonStream &) const override {}
void extract_cluster_state_version_from_host_info(uint32_t& version_out);
};
@@ -60,11 +59,8 @@ StateManagerTest::SetUp()
_node = std::make_unique<TestServiceLayerApp>(NodeIndex(2));
// Clock will increase 1 sec per call.
_node->getClock().setAbsoluteTimeInSeconds(1);
- _metricManager = std::make_unique<metrics::MetricManager>();
_upper = std::make_unique<DummyStorageLink>();
- _manager = new StateManager(_node->getComponentRegister(),
- *_metricManager,
- std::make_unique<HostInfo>());
+ _manager = new StateManager(_node->getComponentRegister(), std::make_unique<HostInfo>(), *this, false);
_lower = new DummyStorageLink();
_upper->push_back(StorageLink::UP(_manager));
_upper->push_back(StorageLink::UP(_lower));
@@ -83,7 +79,6 @@ StateManagerTest::TearDown() {
_upper->flush();
_upper.reset();
_node.reset();
- _metricManager.reset();
}
void
diff --git a/storage/src/vespa/storage/bucketdb/.gitignore b/storage/src/vespa/storage/bucketdb/.gitignore
index 54d8faa8201..3df72b601a2 100644
--- a/storage/src/vespa/storage/bucketdb/.gitignore
+++ b/storage/src/vespa/storage/bucketdb/.gitignore
@@ -7,5 +7,3 @@
.libs
Makefile
config-stor-bucketdb.*
-/config-stor-bucket-init.h
-/config-stor-bucket-init.cpp
diff --git a/storage/src/vespa/storage/bucketdb/CMakeLists.txt b/storage/src/vespa/storage/bucketdb/CMakeLists.txt
index fe60426325c..b16795a90ba 100644
--- a/storage/src/vespa/storage/bucketdb/CMakeLists.txt
+++ b/storage/src/vespa/storage/bucketdb/CMakeLists.txt
@@ -15,5 +15,3 @@ vespa_add_library(storage_bucketdb OBJECT
)
vespa_generate_config(storage_bucketdb stor-bucketdb.def)
install_config_definition(stor-bucketdb.def vespa.config.content.core.stor-bucketdb.def)
-vespa_generate_config(storage_bucketdb stor-bucket-init.def)
-install_config_definition(stor-bucket-init.def vespa.config.content.core.stor-bucket-init.def)
diff --git a/storage/src/vespa/storage/bucketdb/bucketcopy.h b/storage/src/vespa/storage/bucketdb/bucketcopy.h
index e8d1db1d824..ca629a6cd8e 100644
--- a/storage/src/vespa/storage/bucketdb/bucketcopy.h
+++ b/storage/src/vespa/storage/bucketdb/bucketcopy.h
@@ -7,10 +7,10 @@ namespace storage {
class BucketCopy {
private:
- uint64_t _timestamp;
+ uint64_t _timestamp;
api::BucketInfo _info;
- uint16_t _flags;
- uint16_t _node;
+ uint16_t _flags;
+ uint16_t _node;
public:
static const int TRUSTED = 1;
@@ -18,9 +18,7 @@ public:
BucketCopy() noexcept
: _timestamp(0), _flags(0), _node(0xffff) {}
- BucketCopy(uint64_t timestamp,
- uint16_t nodeIdx,
- const api::BucketInfo& info) noexcept
+ BucketCopy(uint64_t timestamp, uint16_t nodeIdx, const api::BucketInfo& info) noexcept
: _timestamp(timestamp),
_info(info),
_flags(0),
@@ -76,16 +74,14 @@ public:
_info.setActive(setactive);
}
- bool consistentWith(const BucketCopy& other,
- bool countInvalidAsConsistent = false) const noexcept
- {
+ bool consistentWith(const BucketCopy& other) const noexcept {
// If both are valid, check checksum and doc count.
if (valid() && other.valid()) {
return (getChecksum() == other.getChecksum()
&& getDocumentCount() == other.getDocumentCount());
}
- return countInvalidAsConsistent;
+ return false;
}
void print(std::ostream&, bool verbose, const std::string& indent) const;
@@ -93,9 +89,7 @@ public:
std::string toString() const;
bool operator==(const BucketCopy& other) const noexcept {
- return
- getBucketInfo() == other.getBucketInfo() &&
- _flags == other._flags;
+ return (getBucketInfo() == other.getBucketInfo()) && (_flags == other._flags);
}
};
diff --git a/storage/src/vespa/storage/bucketdb/bucketdatabase.h b/storage/src/vespa/storage/bucketdb/bucketdatabase.h
index 4e0b727036a..d3fdce8f0d8 100644
--- a/storage/src/vespa/storage/bucketdb/bucketdatabase.h
+++ b/storage/src/vespa/storage/bucketdb/bucketdatabase.h
@@ -22,26 +22,29 @@ public:
BucketInfoType _info;
public:
- EntryBase() : _bucketId(0) {} // Invalid entry
- EntryBase(const document::BucketId& bId, const BucketInfoType& bucketInfo)
- : _bucketId(bId), _info(bucketInfo) {}
- EntryBase(const document::BucketId& bId, BucketInfoType&& bucketInfo)
- : _bucketId(bId), _info(std::move(bucketInfo)) {}
- explicit EntryBase(const document::BucketId& bId) : _bucketId(bId) {}
-
- bool operator==(const EntryBase& other) const {
+ EntryBase() noexcept : _bucketId(0), _info() {} // Invalid entry
+ EntryBase(const document::BucketId& bId, BucketInfoType&& bucketInfo) noexcept
+ : _bucketId(bId),
+ _info(std::move(bucketInfo))
+ {}
+ explicit EntryBase(const document::BucketId& bId) noexcept : _bucketId(bId), _info() {}
+ EntryBase(EntryBase &&) noexcept = default;
+ EntryBase & operator=(EntryBase &&) noexcept = default;
+ EntryBase(const EntryBase &) = default;
+ EntryBase & operator=(const EntryBase &) = default;
+ bool operator==(const EntryBase& other) const noexcept {
return (_bucketId == other._bucketId && _info == other._info);
}
- bool valid() const { return (_bucketId.getRawId() != 0); }
+ bool valid() const noexcept { return (_bucketId.getRawId() != 0); }
std::string toString() const;
- const document::BucketId& getBucketId() const { return _bucketId; }
- const BucketInfoType& getBucketInfo() const { return _info; }
- BucketInfoType& getBucketInfo() { return _info; }
- BucketInfoType* operator->() { return &_info; }
- const BucketInfoType* operator->() const { return &_info; }
+ const document::BucketId& getBucketId() const noexcept { return _bucketId; }
+ const BucketInfoType& getBucketInfo() const noexcept { return _info; }
+ BucketInfoType& getBucketInfo() noexcept { return _info; }
+ BucketInfoType* operator->() noexcept { return &_info; }
+ const BucketInfoType* operator->() const noexcept { return &_info; }
- static EntryBase createInvalid() {
+ static EntryBase createInvalid() noexcept {
return EntryBase();
}
};
diff --git a/storage/src/vespa/storage/bucketdb/bucketinfo.cpp b/storage/src/vespa/storage/bucketdb/bucketinfo.cpp
index dcf49b4d022..d2ff7b53403 100644
--- a/storage/src/vespa/storage/bucketdb/bucketinfo.cpp
+++ b/storage/src/vespa/storage/bucketdb/bucketinfo.cpp
@@ -9,9 +9,9 @@ namespace storage {
template class BucketInfoBase<std::vector<BucketCopy>>;
template class BucketInfoBase<vespalib::ConstArrayRef<BucketCopy>>;
-BucketInfo::BucketInfo() : BucketInfoBase() {}
+BucketInfo::BucketInfo() noexcept : BucketInfoBase() {}
-BucketInfo::BucketInfo(uint32_t lastGarbageCollection, std::vector<BucketCopy> nodes)
+BucketInfo::BucketInfo(uint32_t lastGarbageCollection, std::vector<BucketCopy> nodes) noexcept
: BucketInfoBase(lastGarbageCollection, std::move(nodes))
{}
@@ -23,7 +23,7 @@ BucketInfo::BucketInfo(BucketInfo&&) noexcept = default;
BucketInfo& BucketInfo::operator=(BucketInfo&&) noexcept = default;
void
-BucketInfo::updateTrusted() {
+BucketInfo::updateTrusted() noexcept {
if (validAndConsistent()) {
for (uint32_t i = 0; i < _nodes.size(); i++) {
_nodes[i].setTrusted();
@@ -51,7 +51,7 @@ BucketInfo::updateTrusted() {
}
void
-BucketInfo::resetTrusted() {
+BucketInfo::resetTrusted() noexcept {
for (uint32_t i = 0; i < _nodes.size(); i++) {
_nodes[i].clearTrusted();
}
@@ -63,10 +63,10 @@ namespace {
struct Sorter {
const std::vector<uint16_t>& _order;
- Sorter(const std::vector<uint16_t>& recommendedOrder) :
+ Sorter(const std::vector<uint16_t>& recommendedOrder) noexcept :
_order(recommendedOrder) {}
- bool operator() (const BucketCopy& a, const BucketCopy& b) {
+ bool operator() (const BucketCopy& a, const BucketCopy& b) noexcept {
int order_a = -1;
for (uint32_t i = 0; i < _order.size(); i++) {
if (_order[i] == a.getNode()) {
@@ -119,8 +119,7 @@ BucketInfo::addNodes(const std::vector<BucketCopy>& newCopies,
if (found) {
if (found->getTimestamp() < newCopies[i].getTimestamp()) {
- found->setBucketInfo(newCopies[i].getTimestamp(),
- newCopies[i].getBucketInfo());
+ found->setBucketInfo(newCopies[i].getTimestamp(), newCopies[i].getBucketInfo());
}
} else {
_nodes.push_back(newCopies[i]);
@@ -135,19 +134,15 @@ BucketInfo::addNodes(const std::vector<BucketCopy>& newCopies,
}
void
-BucketInfo::addNode(const BucketCopy& newCopy,
- const std::vector<uint16_t>& recommendedOrder)
+BucketInfo::addNode(const BucketCopy& newCopy, const std::vector<uint16_t>& recommendedOrder)
{
- addNodes(toVector<BucketCopy>(newCopy),
- recommendedOrder);
+ addNodes(toVector<BucketCopy>(newCopy), recommendedOrder);
}
bool
BucketInfo::removeNode(unsigned short node, TrustedUpdate update)
{
- for (std::vector<BucketCopy>::iterator iter = _nodes.begin();
- iter != _nodes.end();
- iter++) {
+ for (auto iter = _nodes.begin(); iter != _nodes.end(); ++iter) {
if (iter->getNode() == node) {
_nodes.erase(iter);
if (update == TrustedUpdate::UPDATE) {
@@ -162,11 +157,9 @@ BucketInfo::removeNode(unsigned short node, TrustedUpdate update)
BucketCopy*
BucketInfo::getNodeInternal(uint16_t node)
{
- for (std::vector<BucketCopy>::iterator iter = _nodes.begin();
- iter != _nodes.end();
- iter++) {
- if (iter->getNode() == node) {
- return &*iter;
+ for (BucketCopy & copy : _nodes) {
+ if (copy.getNode() == node) {
+ return &copy;
}
}
return 0;
diff --git a/storage/src/vespa/storage/bucketdb/bucketinfo.h b/storage/src/vespa/storage/bucketdb/bucketinfo.h
index 1870d4c91d4..9c024c31fd3 100644
--- a/storage/src/vespa/storage/bucketdb/bucketinfo.h
+++ b/storage/src/vespa/storage/bucketdb/bucketinfo.h
@@ -25,15 +25,15 @@ protected:
uint32_t _lastGarbageCollection;
NodeSeq _nodes;
public:
- BucketInfoBase()
+ BucketInfoBase() noexcept
: _lastGarbageCollection(0),
_nodes()
{}
- BucketInfoBase(uint32_t lastGarbageCollection, const NodeSeq& nodes)
+ BucketInfoBase(uint32_t lastGarbageCollection, const NodeSeq& nodes) noexcept
: _lastGarbageCollection(lastGarbageCollection),
_nodes(nodes)
{}
- BucketInfoBase(uint32_t lastGarbageCollection, NodeSeq&& nodes)
+ BucketInfoBase(uint32_t lastGarbageCollection, NodeSeq&& nodes) noexcept
: _lastGarbageCollection(lastGarbageCollection),
_nodes(std::move(nodes))
{}
@@ -47,28 +47,28 @@ public:
/**
* @return Returns the last time when this bucket was "garbage collected".
*/
- uint32_t getLastGarbageCollectionTime() const { return _lastGarbageCollection; }
+ uint32_t getLastGarbageCollectionTime() const noexcept { return _lastGarbageCollection; }
/** True if the bucket contains no documents and is consistent. */
- bool emptyAndConsistent() const;
+ bool emptyAndConsistent() const noexcept;
/**
Check that all copies have complete bucket information and are
consistent with eachother.
*/
- bool validAndConsistent() const;
+ bool validAndConsistent() const noexcept;
/**
* True if the bucket contains at least one invalid copy
*/
- bool hasInvalidCopy() const;
+ bool hasInvalidCopy() const noexcept;
/**
* Returns the number of trusted nodes this entry has.
*/
- uint16_t getTrustedCount() const;
+ uint16_t getTrustedCount() const noexcept;
- bool hasTrusted() const {
+ bool hasTrusted() const noexcept {
return getTrustedCount() != 0;
}
@@ -78,14 +78,14 @@ public:
* @param countInCompleteAsInconsistent If false, nodes that are incomplete
* are always counted as consistent with complete nodes.
*/
- bool consistentNodes(bool countInvalidAsConsistent = false) const;
+ bool consistentNodes() const noexcept;
void print(std::ostream&, bool verbose, const std::string& indent) const;
/**
* Returns the bucket copy struct for the given node, null if nonexisting
*/
- const BucketCopy* getNode(uint16_t node) const;
+ const BucketCopy* getNode(uint16_t node) const noexcept;
/**
* Returns the number of nodes this entry has.
@@ -95,14 +95,14 @@ public:
/**
* Returns a list of the nodes this entry has.
*/
- std::vector<uint16_t> getNodes() const;
+ std::vector<uint16_t> getNodes() const noexcept;
/**
Returns a reference to the node with the given index in the node
array. This operation has undefined behaviour if the index given
is not within the node count.
*/
- const BucketCopy& getNodeRef(uint16_t idx) const {
+ const BucketCopy& getNodeRef(uint16_t idx) const noexcept {
return _nodes[idx];
}
@@ -117,14 +117,25 @@ public:
std::string toString() const;
- uint32_t getHighestDocumentCount() const;
- uint32_t getHighestTotalDocumentSize() const;
- uint32_t getHighestMetaCount() const;
- uint32_t getHighestUsedFileSize() const;
-
- bool hasRecentlyCreatedEmptyCopy() const;
-
- bool operator==(const BucketInfoBase& other) const;
+ uint32_t getHighestDocumentCount() const noexcept;
+ uint32_t getHighestMetaCount() const noexcept;
+ uint32_t getHighestUsedFileSize() const noexcept;
+ struct Highest {
+ Highest() noexcept : _documentCount(0),_totalDocumentSize(0),_metaCount(0),_usedFileSize(0) {}
+ void update(const BucketCopy & n) noexcept {
+ _documentCount = std::max(_documentCount, n.getDocumentCount());
+ _totalDocumentSize = std::max(_totalDocumentSize, n.getTotalDocumentSize());
+ _metaCount = std::max(_metaCount, n.getMetaCount());
+ _usedFileSize = std::max(_usedFileSize, n.getUsedFileSize());
+ }
+ uint32_t _documentCount;
+ uint32_t _totalDocumentSize;
+ uint32_t _metaCount;
+ uint32_t _usedFileSize;
+ };
+ Highest getHighest() const noexcept;
+ bool hasRecentlyCreatedEmptyCopy() const noexcept;
+ bool operator==(const BucketInfoBase& other) const noexcept;
};
template <typename NodeSeq>
@@ -140,8 +151,8 @@ public:
class BucketInfo : public BucketInfoBase<std::vector<BucketCopy>> {
public:
- BucketInfo();
- BucketInfo(uint32_t lastGarbageCollection, std::vector<BucketCopy> nodes);
+ BucketInfo() noexcept;
+ BucketInfo(uint32_t lastGarbageCollection, std::vector<BucketCopy> nodes) noexcept;
~BucketInfo();
BucketInfo(const BucketInfo&);
@@ -152,20 +163,20 @@ public:
/**
* Sets the last time the bucket was "garbage collected".
*/
- void setLastGarbageCollectionTime(uint32_t timestamp) {
+ void setLastGarbageCollectionTime(uint32_t timestamp) noexcept {
_lastGarbageCollection = timestamp;
}
/**
Update trusted flags if bucket is now complete and consistent.
*/
- void updateTrusted();
+ void updateTrusted() noexcept;
/**
Removes any historical information on trustedness, and sets the bucket copies to
trusted if they are now complete and consistent.
*/
- void resetTrusted();
+ void resetTrusted() noexcept;
/**
Adds the given node.
@@ -184,8 +195,7 @@ public:
/**
Simplified API for the common case of inserting one node. See addNodes().
*/
- void addNode(const BucketCopy& newCopy,
- const std::vector<uint16_t>& recommendedOrder);
+ void addNode(const BucketCopy& newCopy, const std::vector<uint16_t>& recommendedOrder);
/**
Updates bucket information for a node. Does nothing if the node
diff --git a/storage/src/vespa/storage/bucketdb/bucketinfo.hpp b/storage/src/vespa/storage/bucketdb/bucketinfo.hpp
index b7e8c5925c5..a8a1069d587 100644
--- a/storage/src/vespa/storage/bucketdb/bucketinfo.hpp
+++ b/storage/src/vespa/storage/bucketdb/bucketinfo.hpp
@@ -9,16 +9,18 @@
namespace storage {
template <typename NodeSeq>
-std::string BucketInfoBase<NodeSeq>::toString() const {
+std::string
+BucketInfoBase<NodeSeq>::toString() const {
std::ostringstream ost;
print(ost, true, "");
return ost.str();
}
template <typename NodeSeq>
-bool BucketInfoBase<NodeSeq>::emptyAndConsistent() const {
- for (uint32_t i = 0; i < _nodes.size(); i++) {
- if (!_nodes[i].empty()) {
+bool
+BucketInfoBase<NodeSeq>::emptyAndConsistent() const noexcept {
+ for (const auto & n : _nodes) {
+ if (!n.empty()) {
return false;
}
}
@@ -26,9 +28,10 @@ bool BucketInfoBase<NodeSeq>::emptyAndConsistent() const {
}
template <typename NodeSeq>
-bool BucketInfoBase<NodeSeq>::validAndConsistent() const {
- for (uint32_t i = 0; i < _nodes.size(); i++) {
- if (!_nodes[i].valid()) {
+bool
+BucketInfoBase<NodeSeq>::validAndConsistent() const noexcept {
+ for (const auto & n : _nodes) {
+ if (!n.valid()) {
return false;
}
}
@@ -36,9 +39,10 @@ bool BucketInfoBase<NodeSeq>::validAndConsistent() const {
}
template <typename NodeSeq>
-bool BucketInfoBase<NodeSeq>::hasInvalidCopy() const {
- for (uint32_t i = 0; i < _nodes.size(); i++) {
- if (!_nodes[i].valid()) {
+bool
+BucketInfoBase<NodeSeq>::hasInvalidCopy() const noexcept {
+ for (const auto & n : _nodes){
+ if (!n.valid()) {
return true;
}
}
@@ -46,10 +50,11 @@ bool BucketInfoBase<NodeSeq>::hasInvalidCopy() const {
}
template <typename NodeSeq>
-uint16_t BucketInfoBase<NodeSeq>::getTrustedCount() const {
+uint16_t
+BucketInfoBase<NodeSeq>::getTrustedCount() const noexcept {
uint32_t trustedCount = 0;
- for (uint32_t i = 0; i < _nodes.size(); i++) {
- if (_nodes[i].trusted()) {
+ for (const auto & n : _nodes) {
+ if (n.trusted()) {
trustedCount++;
}
}
@@ -57,11 +62,11 @@ uint16_t BucketInfoBase<NodeSeq>::getTrustedCount() const {
}
template <typename NodeSeq>
-bool BucketInfoBase<NodeSeq>::consistentNodes(bool countInvalidAsConsistent) const {
+bool
+BucketInfoBase<NodeSeq>::consistentNodes() const noexcept {
int compareIndex = 0;
for (uint32_t i = 1; i < _nodes.size(); i++) {
- if (!_nodes[i].consistentWith(_nodes[compareIndex],
- countInvalidAsConsistent)) return false;
+ if (!_nodes[i].consistentWith(_nodes[compareIndex])) return false;
}
return true;
}
@@ -90,14 +95,16 @@ struct ReplicaMetadata {
};
};
-constexpr bool is_majority(size_t n, size_t m) {
+constexpr bool
+is_majority(size_t n, size_t m) noexcept {
return (n >= (m / 2) + 1);
}
}
template <typename NodeSeq>
-api::BucketInfo BucketInfoBase<NodeSeq>::majority_consistent_bucket_info() const noexcept {
+api::BucketInfo
+BucketInfoBase<NodeSeq>::majority_consistent_bucket_info() const noexcept {
if (_nodes.size() < 3) {
return {};
}
@@ -116,7 +123,8 @@ api::BucketInfo BucketInfoBase<NodeSeq>::majority_consistent_bucket_info() const
}
template <typename NodeSeq>
-void BucketInfoBase<NodeSeq>::print(std::ostream& out, bool verbose, const std::string& indent) const {
+void
+BucketInfoBase<NodeSeq>::print(std::ostream& out, bool verbose, const std::string& indent) const {
if (_nodes.size() == 0) {
out << "no nodes";
}
@@ -129,7 +137,8 @@ void BucketInfoBase<NodeSeq>::print(std::ostream& out, bool verbose, const std::
}
template <typename NodeSeq>
-const BucketCopy* BucketInfoBase<NodeSeq>::getNode(uint16_t node) const {
+const BucketCopy*
+BucketInfoBase<NodeSeq>::getNode(uint16_t node) const noexcept {
for (const auto& n : _nodes) {
if (n.getNode() == node) {
return &n;
@@ -139,54 +148,61 @@ const BucketCopy* BucketInfoBase<NodeSeq>::getNode(uint16_t node) const {
}
template <typename NodeSeq>
-std::vector<uint16_t> BucketInfoBase<NodeSeq>::getNodes() const {
+std::vector<uint16_t>
+BucketInfoBase<NodeSeq>::getNodes() const noexcept {
std::vector<uint16_t> result;
- for (uint32_t i = 0; i < _nodes.size(); i++) {
- result.emplace_back(_nodes[i].getNode());
+ result.reserve(_nodes.size());
+ for (const auto & n : _nodes) {
+ result.emplace_back(n.getNode());
}
return result;
}
template <typename NodeSeq>
-uint32_t BucketInfoBase<NodeSeq>::getHighestDocumentCount() const {
- uint32_t highest = 0;
- for (uint32_t i = 0; i < _nodes.size(); ++i) {
- highest = std::max(highest, _nodes[i].getDocumentCount());
+typename BucketInfoBase<NodeSeq>::Highest
+BucketInfoBase<NodeSeq>::getHighest() const noexcept {
+ Highest highest;
+ for (const auto & n : _nodes) {
+ highest.update(n);
}
return highest;
}
template <typename NodeSeq>
-uint32_t BucketInfoBase<NodeSeq>::getHighestTotalDocumentSize() const {
+uint32_t
+BucketInfoBase<NodeSeq>::getHighestDocumentCount() const noexcept {
uint32_t highest = 0;
- for (uint32_t i = 0; i < _nodes.size(); ++i) {
- highest = std::max(highest, _nodes[i].getTotalDocumentSize());
+ for (const auto & n : _nodes) {
+ highest = std::max(highest, n.getDocumentCount());
}
return highest;
}
template <typename NodeSeq>
-uint32_t BucketInfoBase<NodeSeq>::getHighestMetaCount() const {
+uint32_t
+BucketInfoBase<NodeSeq>::getHighestMetaCount() const noexcept {
uint32_t highest = 0;
- for (uint32_t i = 0; i < _nodes.size(); ++i) {
- highest = std::max(highest, _nodes[i].getMetaCount());
+ for (const auto & n : _nodes) {
+ highest = std::max(highest, n.getMetaCount());
}
return highest;
}
template <typename NodeSeq>
-uint32_t BucketInfoBase<NodeSeq>::getHighestUsedFileSize() const {
+uint32_t
+BucketInfoBase<NodeSeq>::getHighestUsedFileSize() const noexcept {
uint32_t highest = 0;
- for (uint32_t i = 0; i < _nodes.size(); ++i) {
- highest = std::max(highest, _nodes[i].getUsedFileSize());
+ for (const auto & n : _nodes) {
+ highest = std::max(highest, n.getUsedFileSize());
}
return highest;
}
template <typename NodeSeq>
-bool BucketInfoBase<NodeSeq>::hasRecentlyCreatedEmptyCopy() const {
- for (uint32_t i = 0; i < _nodes.size(); ++i) {
- if (_nodes[i].wasRecentlyCreated()) {
+bool
+BucketInfoBase<NodeSeq>::hasRecentlyCreatedEmptyCopy() const noexcept {
+ for (const auto & n : _nodes) {
+ if (n.wasRecentlyCreated()) {
return true;
}
}
@@ -194,7 +210,8 @@ bool BucketInfoBase<NodeSeq>::hasRecentlyCreatedEmptyCopy() const {
}
template <typename NodeSeq>
-bool BucketInfoBase<NodeSeq>::operator==(const BucketInfoBase<NodeSeq>& other) const {
+bool
+BucketInfoBase<NodeSeq>::operator==(const BucketInfoBase<NodeSeq>& other) const noexcept {
if (_nodes.size() != other._nodes.size()) {
return false;
}
@@ -210,6 +227,6 @@ bool BucketInfoBase<NodeSeq>::operator==(const BucketInfoBase<NodeSeq>& other) c
}
return true;
-};
+}
}
diff --git a/storage/src/vespa/storage/bucketdb/bucketmanager.cpp b/storage/src/vespa/storage/bucketdb/bucketmanager.cpp
index 57d3c241cf0..c8c36f94579 100644
--- a/storage/src/vespa/storage/bucketdb/bucketmanager.cpp
+++ b/storage/src/vespa/storage/bucketdb/bucketmanager.cpp
@@ -17,6 +17,7 @@
#include <vespa/storageapi/message/state.h>
#include <vespa/storageapi/message/bucketsplitting.h>
#include <vespa/storageapi/message/stat.h>
+#include <vespa/metrics/jsonwriter.h>
#include <vespa/document/bucket/fixed_bucket_spaces.h>
#include <vespa/vespalib/util/stringfmt.h>
#include <ranges>
@@ -73,7 +74,8 @@ BucketManager::~BucketManager()
closeNextLink();
}
-void BucketManager::onClose()
+void
+BucketManager::onClose()
{
// Stop internal thread such that we don't send any more messages down.
if (_thread) {
@@ -90,56 +92,60 @@ BucketManager::print(std::ostream& out, bool ,const std::string& ) const
namespace {
-template<bool log>
class DistributorInfoGatherer
{
using ResultArray = api::RequestBucketInfoReply::EntryVector;
DistributorStateCache _state;
std::unordered_map<uint16_t, ResultArray>& _result;
- const document::BucketIdFactory& _factory;
- std::shared_ptr<const lib::Distribution> _storageDistribution;
+ bool _spam;
public:
- DistributorInfoGatherer(
- const lib::ClusterState& systemState,
- std::unordered_map<uint16_t, ResultArray>& result,
- const document::BucketIdFactory& factory,
- std::shared_ptr<const lib::Distribution> distribution)
- : _state(*distribution, systemState),
+ DistributorInfoGatherer(const lib::ClusterState& systemState,
+ std::unordered_map<uint16_t, ResultArray>& result,
+ const lib::Distribution & distribution,
+ bool spam) noexcept;
+
+ StorBucketDatabase::Decision operator()(uint64_t bucketId, const StorBucketDatabase::Entry& data);
+
+};
+
+DistributorInfoGatherer::DistributorInfoGatherer(const lib::ClusterState& systemState,
+ std::unordered_map<uint16_t, ResultArray>& result,
+ const lib::Distribution & distribution,
+ bool spam) noexcept
+ : _state(distribution, systemState),
_result(result),
- _factory(factory),
- _storageDistribution(std::move(distribution))
- {
- }
+ _spam(spam)
+{
+}
- StorBucketDatabase::Decision operator()(uint64_t bucketId,const StorBucketDatabase::Entry& data)
- {
- document::BucketId b(document::BucketId::keyToBucketId(bucketId));
- try{
- uint16_t i = _state.getOwner(b);
- auto it = _result.find(i);
- if constexpr (log) {
- LOG(spam, "Bucket %s (reverse %" PRIu64 "), should be handled by distributor %u which we are %sgenerating state for.",
- b.toString().c_str(), bucketId, i, it == _result.end() ? "not " : "");
- }
- if (it != _result.end()) {
- api::RequestBucketInfoReply::Entry entry;
- entry._bucketId = b;
- entry._info = data.getBucketInfo();
- it->second.push_back(entry);
- }
- } catch (lib::TooFewBucketBitsInUseException& e) {
- LOGBP(warning, "Cannot assign bucket %s to a distributor as bucket only specifies %u bits.",
- b.toString().c_str(), b.getUsedBits());
- } catch (lib::NoDistributorsAvailableException& e) {
- LOGBP(warning, "No distributors available while processing request bucket info. Distribution hash: %s, cluster state: %s",
- _state.getDistribution().getNodeGraph().getDistributionConfigHash().c_str(), _state.getClusterState().toString().c_str());
+StorBucketDatabase::Decision
+DistributorInfoGatherer::operator()(uint64_t bucketId, const StorBucketDatabase::Entry& data)
+{
+ document::BucketId b(document::BucketId::keyToBucketId(bucketId));
+ try {
+ uint16_t i = _state.getOwner(b);
+ auto it = _result.find(i);
+ if (_spam) {
+ LOG(spam, "Bucket %s (reverse %" PRIu64 "), should be handled by distributor %u which we are %sgenerating state for.",
+ b.toString().c_str(), bucketId, i, it == _result.end() ? "not " : "");
+ }
+ if (it != _result.end()) {
+ api::RequestBucketInfoReply::Entry entry;
+ entry._bucketId = b;
+ entry._info = data.getBucketInfo();
+ it->second.push_back(entry);
}
- return StorBucketDatabase::Decision::CONTINUE;
+ } catch (lib::TooFewBucketBitsInUseException& e) {
+ LOGBP(warning, "Cannot assign bucket %s to a distributor as bucket only specifies %u bits.",
+ b.toString().c_str(), b.getUsedBits());
+ } catch (lib::NoDistributorsAvailableException& e) {
+ LOGBP(warning, "No distributors available while processing request bucket info. Distribution hash: %s, cluster state: %s",
+ _state.getDistribution().getNodeGraph().getDistributionConfigHash().c_str(), _state.getClusterState().toString().c_str());
}
-
-};
+ return StorBucketDatabase::Decision::CONTINUE;
+}
struct MetricsUpdater {
struct Count {
@@ -151,7 +157,6 @@ struct MetricsUpdater {
constexpr Count() noexcept : docs(0), bytes(0), buckets(0), active(0), ready(0) {}
};
-
Count count;
uint32_t lowestUsedBit;
@@ -193,58 +198,94 @@ struct MetricsUpdater {
} // End of anonymous namespace
+namespace {
+
+void
+output(vespalib::JsonStream & json, vespalib::stringref name, uint64_t value, vespalib::stringref bucketSpace) {
+ using namespace vespalib::jsonstream;
+ json << Object();
+ json << "name" << name;
+ json << "values" << Object() << "last" << value << End();
+ if ( ! bucketSpace.empty()) {
+ json << "dimensions" << Object();
+ json << "bucketSpace" << bucketSpace;
+ json << End();
+ }
+ json << End();
+}
+
+void
+output(vespalib::JsonStream & json, vespalib::stringref name, uint64_t value) {
+ output(json, name, value, "");
+}
+
+MetricsUpdater
+getMetrics(const StorBucketDatabase & db) {
+ MetricsUpdater m;
+ auto guard = db.acquire_read_guard();
+ guard->for_each(std::ref(m));
+ return m;
+}
+
+}
+
+void
+BucketManager::report(vespalib::JsonStream & json) const {
+ MetricsUpdater total;
+ for (const auto& space : _component.getBucketSpaceRepo()) {
+ MetricsUpdater m = getMetrics(space.second->bucketDatabase());
+ output(json, "vds.datastored.bucket_space.buckets_total", m.count.buckets,
+ document::FixedBucketSpaces::to_string(space.first));
+ total.add(m);
+ }
+ const auto & src = total.count;
+ output(json, "vds.datastored.alldisks.docs", src.docs);
+ output(json, "vds.datastored.alldisks.bytes", src.bytes);
+ output(json, "vds.datastored.alldisks.buckets", src.buckets);
+}
+
StorBucketDatabase::Entry
BucketManager::getBucketInfo(const document::Bucket &bucket) const
{
- StorBucketDatabase::WrappedEntry entry(_component.getBucketDatabase(bucket.getBucketSpace()).get(bucket.getBucketId(), "BucketManager::getBucketInfo"));
- return *entry;
+ return *_component.getBucketDatabase(bucket.getBucketSpace()).get(bucket.getBucketId(), "BucketManager::getBucketInfo");
}
void
-BucketManager::updateMetrics(bool updateDocCount)
+BucketManager::updateMetrics() const
{
- LOG(debug, "Iterating bucket database to update metrics%s%s",
- updateDocCount ? "" : ", minusedbits only",
- _doneInitialized ? "" : ", server is not done initializing");
-
- if (!updateDocCount || _doneInitialized) {
- MetricsUpdater total;
- for (const auto& space : _component.getBucketSpaceRepo()) {
- MetricsUpdater m;
- auto guard = space.second->bucketDatabase().acquire_read_guard();
- guard->for_each(std::ref(m));
- total.add(m);
- if (updateDocCount) {
- auto bm = _metrics->bucket_spaces.find(space.first);
- assert(bm != _metrics->bucket_spaces.end());
- bm->second->buckets_total.set(m.count.buckets);
- bm->second->docs.set(m.count.docs);
- bm->second->bytes.set(m.count.bytes);
- bm->second->active_buckets.set(m.count.active);
- bm->second->ready_buckets.set(m.count.ready);
- }
- }
- if (updateDocCount) {
- auto & dest = *_metrics->disk;
- const auto & src = total.count;
- dest.buckets.addValue(src.buckets);
- dest.docs.addValue(src.docs);
- dest.bytes.addValue(src.bytes);
- dest.active.addValue(src.active);
- dest.ready.addValue(src.ready);
- }
+ MetricsUpdater total;
+ for (const auto& space : _component.getBucketSpaceRepo()) {
+ MetricsUpdater m = getMetrics(space.second->bucketDatabase());
+ total.add(m);
+ auto bm = _metrics->bucket_spaces.find(space.first);
+ assert(bm != _metrics->bucket_spaces.end());
+ bm->second->buckets_total.set(m.count.buckets);
+ bm->second->docs.set(m.count.docs);
+ bm->second->bytes.set(m.count.bytes);
+ bm->second->active_buckets.set(m.count.active);
+ bm->second->ready_buckets.set(m.count.ready);
}
+ auto & dest = *_metrics->disk;
+ const auto & src = total.count;
+ dest.buckets.addValue(src.buckets);
+ dest.docs.addValue(src.docs);
+ dest.bytes.addValue(src.bytes);
+ dest.active.addValue(src.active);
+ dest.ready.addValue(src.ready);
update_bucket_db_memory_usage_metrics();
}
-void BucketManager::update_bucket_db_memory_usage_metrics() {
+
+void
+BucketManager::update_bucket_db_memory_usage_metrics() const {
for (const auto& space : _component.getBucketSpaceRepo()) {
auto bm = _metrics->bucket_spaces.find(space.first);
bm->second->bucket_db_metrics.memory_usage.update(space.second->bucketDatabase().detailed_memory_usage());
}
}
-void BucketManager::updateMinUsedBits()
+void
+BucketManager::updateMinUsedBits()
{
MetricsUpdater m;
_component.getBucketSpaceRepo().for_each_bucket(std::ref(m));
@@ -494,8 +535,7 @@ BucketManager::processRequestBucketInfoCommands(document::BucketSpace bucketSpac
reqs.size(), bucketSpace.toString().c_str(), clusterState->toString().c_str(), our_hash.c_str());
std::lock_guard clusterStateGuard(_clusterStateLock);
- for (auto it = reqs.rbegin(); it != reqs.rend(); it++) {
- const auto & req = *it;
+ for (const auto & req : std::ranges::reverse_view(reqs)) {
// Currently small requests should not be forwarded to worker thread
assert(req->hasSystemState());
const auto their_hash = req->getDistributionHash();
@@ -566,13 +606,12 @@ BucketManager::processRequestBucketInfoCommands(document::BucketSpace bucketSpac
framework::MilliSecTimer runStartTime(_component.getClock());
// Don't allow logging to lower performance of inner loop.
// Call other type of instance if logging
- const document::BucketIdFactory& idFac(_component.getBucketIdFactory());
if (LOG_WOULD_LOG(spam)) {
- DistributorInfoGatherer<true> builder(*clusterState, result, idFac, distribution);
+ DistributorInfoGatherer builder(*clusterState, result, *distribution, true);
_component.getBucketDatabase(bucketSpace).for_each_chunked(std::ref(builder),
"BucketManager::processRequestBucketInfoCommands-1");
} else {
- DistributorInfoGatherer<false> builder(*clusterState, result, idFac, distribution);
+ DistributorInfoGatherer builder(*clusterState, result, *distribution, false);
_component.getBucketDatabase(bucketSpace).for_each_chunked(std::ref(builder),
"BucketManager::processRequestBucketInfoCommands-2");
}
diff --git a/storage/src/vespa/storage/bucketdb/bucketmanager.h b/storage/src/vespa/storage/bucketdb/bucketmanager.h
index cb0bc6a9f95..eea5719ad3b 100644
--- a/storage/src/vespa/storage/bucketdb/bucketmanager.h
+++ b/storage/src/vespa/storage/bucketdb/bucketmanager.h
@@ -13,6 +13,7 @@
#include <vespa/storage/bucketdb/config-stor-bucketdb.h>
#include <vespa/storage/common/servicelayercomponent.h>
#include <vespa/storage/common/storagelinkqueued.h>
+#include <vespa/storage/common/nodestateupdater.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storageframework/generic/metric/metricupdatehook.h>
#include <vespa/storageframework/generic/status/statusreporter.h>
@@ -28,6 +29,7 @@ namespace storage {
class BucketManager : public StorageLink,
public framework::StatusReporter,
+ public NodeStateReporter,
private framework::Runnable,
private framework::MetricUpdateHook
{
@@ -37,7 +39,9 @@ public:
using BucketInfoRequestMap = std::unordered_map<document::BucketSpace, BucketInfoRequestList, document::BucketSpace::hash>;
private:
- config::ConfigUri _configUri;
+ using ReplyQueue = std::vector<api::StorageReply::SP>;
+ using ConflictingBuckets = std::unordered_set<document::BucketId, document::BucketId::hash>;
+ config::ConfigUri _configUri;
BucketInfoRequestMap _bucketInfoRequests;
/**
@@ -49,30 +53,28 @@ private:
/**
* Lock kept for access to 3 values below concerning cluster state.
*/
- std::mutex _clusterStateLock;
+ std::mutex _clusterStateLock;
+ mutable std::mutex _queueProcessingLock;
- mutable std::mutex _queueProcessingLock;
- using ReplyQueue = std::vector<api::StorageReply::SP>;
- using ConflictingBuckets = std::unordered_set<document::BucketId, document::BucketId::hash>;
- ReplyQueue _queuedReplies;
- ConflictingBuckets _conflictingBuckets;
+ ReplyQueue _queuedReplies;
+ ConflictingBuckets _conflictingBuckets;
// The most current cluster state versions that we've observed on the way _down_
// through the chain, i.e. prior to being enabled on the node.
- uint32_t _last_cluster_state_version_initiated;
+ uint32_t _last_cluster_state_version_initiated;
// The most current cluster state we've observed on the way _up_ through the
// chain, i.e. after being enabled on the node.
- uint32_t _last_cluster_state_version_completed;
- bool _doneInitialized;
- size_t _requestsCurrentlyProcessing;
- ServiceLayerComponent _component;
+ uint32_t _last_cluster_state_version_completed;
+ bool _doneInitialized;
+ size_t _requestsCurrentlyProcessing;
+ ServiceLayerComponent _component;
std::shared_ptr<BucketManagerMetrics> _metrics;
- std::unique_ptr<framework::Thread> _thread;
- std::chrono::milliseconds _simulated_processing_delay;
+ std::unique_ptr<framework::Thread> _thread;
+ std::chrono::milliseconds _simulated_processing_delay;
class ScopedQueueDispatchGuard {
BucketManager& _mgr;
public:
- ScopedQueueDispatchGuard(BucketManager&);
+ explicit ScopedQueueDispatchGuard(BucketManager&);
~ScopedQueueDispatchGuard();
ScopedQueueDispatchGuard(const ScopedQueueDispatchGuard&) = delete;
@@ -83,7 +85,7 @@ public:
BucketManager(const config::ConfigUri&, ServiceLayerComponentRegister&);
BucketManager(const BucketManager&) = delete;
BucketManager& operator=(const BucketManager&) = delete;
- ~BucketManager();
+ ~BucketManager() override;
void startWorkerThread();
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
@@ -94,10 +96,10 @@ public:
/** Get info for given bucket (Used for whitebox testing) */
StorBucketDatabase::Entry getBucketInfo(const document::Bucket &id) const;
- void force_db_sweep_and_metric_update() { updateMetrics(true); }
+ void force_db_sweep_and_metric_update() { updateMetrics(); }
bool onUp(const std::shared_ptr<api::StorageMessage>&) override;
-
+ void report(vespalib::JsonStream &writer) const override;
private:
friend struct BucketManagerTest;
@@ -112,9 +114,9 @@ private:
void onDoneInit() override { _doneInitialized = true; }
void onClose() override;
- void updateMetrics(bool updateDocCount);
- void updateMetrics(const MetricLockGuard &) override { updateMetrics(true); }
- void update_bucket_db_memory_usage_metrics();
+ void updateMetrics() const;
+ void updateMetrics(const MetricLockGuard &) override { updateMetrics(); }
+ void update_bucket_db_memory_usage_metrics() const;
void updateMinUsedBits();
bool onRequestBucketInfo(const std::shared_ptr<api::RequestBucketInfoCommand>&) override;
@@ -127,8 +129,7 @@ private:
* Returns whether request was enqueued (and should thus not be forwarded
* by the caller).
*/
- bool enqueueAsConflictIfProcessingRequest(
- const api::StorageReply::SP& reply);
+ bool enqueueAsConflictIfProcessingRequest(const api::StorageReply::SP& reply);
/**
* Signals that code is entering a section where certain bucket tree
diff --git a/storage/src/vespa/storage/bucketdb/stor-bucket-init.def b/storage/src/vespa/storage/bucketdb/stor-bucket-init.def
deleted file mode 100644
index c3344e324a5..00000000000
--- a/storage/src/vespa/storage/bucketdb/stor-bucket-init.def
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-namespace=vespa.config.content.core
-
-# The maximum number of alien files found during init that should be stored in
-# memory so they can be viewed on status page.
-max_alien_files_logged int default=10 restart
-
-# The maximum number of pending info reads to each disk during initialization.
-max_pending_info_reads_per_disk int default=20 restart
-
-# The minimum number of pending info reads to each disk during initialization.
-# When pending falls below this, we will resume database scan to add more
-# pending up to the maximum setting.
-min_pending_info_reads_per_disk int default=4 restart
-
-# The priority of the read bucket info requests the initializer sends to the
-# persistence layer. Currently chosen so that such operations will not pre-
-# empt any regular external load or ideal state operations, but they will block
-# very low priority background operations such as periodic GC (default pri of
-# 200). A tradeoff must be made between fast initialization and the availability
-# of data on the initializing node.
-info_read_priority int default=185 restart
-
-# The priority of the list bucket requests the initializer sends to the
-# persistence layer. Should always be lower than the read priority to ensure
-# starting to read wont make listing wait. However, listing is currently pretty
-# much required to be done before starting anyhow, so this option does little
-# unless your directories are not hardware independent.
-list_priority int default=100 restart
-
-# Whether the initializer should complete listing before starting to read
-# bucket information. Shouldnt matter much performance wise so always set to
-# true as it is now. Setting it false, disks done listing first will start
-# to process info requests a bit earlier than otherwise.
-complete_list_before_starting_read bool default=false restart
diff --git a/storage/src/vespa/storage/common/distributorcomponent.h b/storage/src/vespa/storage/common/distributorcomponent.h
index 06bb49a6090..6542bf2ddfe 100644
--- a/storage/src/vespa/storage/common/distributorcomponent.h
+++ b/storage/src/vespa/storage/common/distributorcomponent.h
@@ -34,13 +34,6 @@
namespace storage {
-namespace bucketdb {
- class DistrBucketDatabase;
-}
-namespace lib {
- class IdealNodeCalculator;
-}
-
using DistributorConfig = vespa::config::content::core::internal::InternalStorDistributormanagerType;
using VisitorConfig = vespa::config::content::core::internal::InternalStorVisitordispatcherType;
diff --git a/storage/src/vespa/storage/common/nodestateupdater.h b/storage/src/vespa/storage/common/nodestateupdater.h
index 842828a1b89..3d32b9e4b4b 100644
--- a/storage/src/vespa/storage/common/nodestateupdater.h
+++ b/storage/src/vespa/storage/common/nodestateupdater.h
@@ -25,7 +25,9 @@
#include <string>
#include <vespa/vdslib/state/nodestate.h>
+#include "vespa/vespalib/util/jsonstream.h"
+namespace metrics { class JsonWriter; }
namespace storage {
namespace lib { class ClusterStateBundle; }
@@ -77,6 +79,12 @@ struct NodeStateUpdater {
virtual void request_almost_immediate_node_state_replies() = 0;
};
+class NodeStateReporter {
+public:
+ virtual ~NodeStateReporter() = default;
+ virtual void report(vespalib::JsonStream &writer) const = 0;
+};
+
} // storage
diff --git a/storage/src/vespa/storage/common/statusmetricconsumer.cpp b/storage/src/vespa/storage/common/statusmetricconsumer.cpp
index 68866027cd1..342680318bd 100644
--- a/storage/src/vespa/storage/common/statusmetricconsumer.cpp
+++ b/storage/src/vespa/storage/common/statusmetricconsumer.cpp
@@ -8,7 +8,6 @@
#include <vespa/metrics/metricmanager.h>
#include <vespa/storageapi/messageapi/storagemessage.h>
#include <vespa/vespalib/stllike/asciistream.h>
-#include <vespa/vespalib/util/xmlstream.h>
#include <vespa/log/log.h>
LOG_SETUP(".status.metricreporter");
diff --git a/storage/src/vespa/storage/config/CMakeLists.txt b/storage/src/vespa/storage/config/CMakeLists.txt
index 58996592677..cd3d99d0ccc 100644
--- a/storage/src/vespa/storage/config/CMakeLists.txt
+++ b/storage/src/vespa/storage/config/CMakeLists.txt
@@ -12,16 +12,10 @@ vespa_generate_config(storage_storageconfig stor-server.def)
install_config_definition(stor-server.def vespa.config.content.core.stor-server.def)
vespa_generate_config(storage_storageconfig stor-status.def)
install_config_definition(stor-status.def vespa.config.content.core.stor-status.def)
-vespa_generate_config(storage_storageconfig stor-messageforwarder.def)
-install_config_definition(stor-messageforwarder.def vespa.config.content.core.stor-messageforwarder.def)
vespa_generate_config(storage_storageconfig stor-opslogger.def)
install_config_definition(stor-opslogger.def vespa.config.content.core.stor-opslogger.def)
vespa_generate_config(storage_storageconfig stor-visitordispatcher.def)
install_config_definition(stor-visitordispatcher.def vespa.config.content.core.stor-visitordispatcher.def)
-vespa_generate_config(storage_storageconfig stor-integritychecker.def)
-install_config_definition(stor-integritychecker.def vespa.config.content.core.stor-integritychecker.def)
-vespa_generate_config(storage_storageconfig stor-bucketmover.def)
-install_config_definition(stor-bucketmover.def vespa.config.content.core.stor-bucketmover.def)
vespa_generate_config(storage_storageconfig stor-bouncer.def)
install_config_definition(stor-bouncer.def vespa.config.content.core.stor-bouncer.def)
vespa_generate_config(storage_storageconfig stor-prioritymapping.def)
diff --git a/storage/src/vespa/storage/config/stor-bucketmover.def b/storage/src/vespa/storage/config/stor-bucketmover.def
deleted file mode 100644
index 80192c37ed7..00000000000
--- a/storage/src/vespa/storage/config/stor-bucketmover.def
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-namespace=vespa.config.content.core
-
-## Minimum time between bucket database iterations in the bucket mover. The
-## minumum time is used when disks starts to get pretty full and we have plenty
-## stuff we can move.
-## restart flag was added automatically and needs to be verified.
-minimum_recheck_interval_in_seconds int default=60 restart
-
-## Maximum time between bucket database iterations in the bucket mover. The
-## maximum time is used when disks have plenty free space, so moving data is
-## not critical.
-## restart flag was added automatically and needs to be verified.
-maximum_recheck_interval_in_seconds int default=3600 restart
-
-## Number of buckets to cache at a time when reading the bucket database
-## restart flag was added automatically and needs to be verified.
-bucket_iteration_chunk int default=1000 restart
-
-## Maximum fill rate above average fill rate for a target disk to be eligible
-## as a target for a bucket move operation.
-## restart flag was added automatically and needs to be verified.
-max_target_fill_rate_above_average double default=0.01 restart
-
-## Number of bucket mover runs to keep in history vector
-## restart flag was added automatically and needs to be verified.
-max_history_size int default=10 restart
-
-## Max concurrent pending bucket move operations scheduled in total.
-## restart flag was added automatically and needs to be verified.
-max_pending int default=5 restart
-
-## Operation delay. If set, the bucket mover will wait for this amount of
-## milliseconds between each operation. Useful in testing to make move run go
-## slow enough to view without that much data.
-## restart flag was added automatically and needs to be verified.
-operation_delay int default=0 restart
diff --git a/storage/src/vespa/storage/config/stor-integritychecker.def b/storage/src/vespa/storage/config/stor-integritychecker.def
deleted file mode 100644
index 80685cbb58f..00000000000
--- a/storage/src/vespa/storage/config/stor-integritychecker.def
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-namespace=vespa.config.content.core
-
-## Minutes after midnight when integrity checker is allowed to start running.
-## 0 means it will start/continue run at midnight.
-dailycyclestart int default=0
-
-## Minutes after midnight when integrity checker is not allowed to run anymore.
-## If this equals dailycyclestart it is allowed to run all day. dailycyclestop
-## is allowed to be less than dailycyclestart.
-dailycyclestop int default=0
-
-## Status of what is allowed done on what weekdays. Should be a string with
-## seven characters, where the first represent sunday, the seventh saturday.
-## The possible options are RrCc- which means:
-## R - If state becomes R, and current cycle does not verify file content,
-## abort current cycle, otherwise continue it. Start new cycle verifying
-## all content of all files.
-## r - Continue current cycle. Start new cycle using cheap partial file
-## verification.
-## c - Continue current cycle. Dont start a new cycle.
-weeklycycle string default="Rrrrrrr"
-
-## Max concurrent pending bucket verifications. For max speed, each disk thread
-## should have one to work with all the time. Default is 1, to ensure little
-## resources are consumed by this process by default. Once request priority
-## has been introduced, this default may become higher.
-maxpending int default=2
-
-## Minimum time since last cycle before starting a new one in minutes.
-## Defaults to 24 hours.
-mincycletime int default=1440
-
-## Minimum time in seconds between each request. To throttle the system even
-## slower if continuous one pending puts on more load on the system than you
-## want. Works with multiple pending messages, though it doesnt make much sense
-## unless maxpending equals 1.
-requestdelay int default=0
diff --git a/storage/src/vespa/storage/config/stor-messageforwarder.def b/storage/src/vespa/storage/config/stor-messageforwarder.def
deleted file mode 100644
index b17e6ddae3b..00000000000
--- a/storage/src/vespa/storage/config/stor-messageforwarder.def
+++ /dev/null
@@ -1,4 +0,0 @@
-# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-namespace=vespa.config.content.core
-
-route string default="" restart
diff --git a/storage/src/vespa/storage/distributor/CMakeLists.txt b/storage/src/vespa/storage/distributor/CMakeLists.txt
index 184fee5d2c9..c889afcc77c 100644
--- a/storage/src/vespa/storage/distributor/CMakeLists.txt
+++ b/storage/src/vespa/storage/distributor/CMakeLists.txt
@@ -10,6 +10,7 @@ vespa_add_library(storage_distributor OBJECT
bucket_spaces_stats_provider.cpp
bucketgctimecalculator.cpp
bucketlistmerger.cpp
+ cancelled_replicas_pruner.cpp
clusterinformation.cpp
crypto_uuid_generator.cpp
distributor_bucket_space.cpp
diff --git a/storage/src/vespa/storage/distributor/activecopy.cpp b/storage/src/vespa/storage/distributor/activecopy.cpp
index a4ee4a51135..4c35d42a0e7 100644
--- a/storage/src/vespa/storage/distributor/activecopy.cpp
+++ b/storage/src/vespa/storage/distributor/activecopy.cpp
@@ -1,49 +1,35 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "activecopy.h"
-
-#include <vespa/storage/storageutil/utils.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/stllike/asciistream.h>
#include <algorithm>
#include <cassert>
+#include <ostream>
namespace std {
- template<typename T>
- std::ostream& operator<<(std::ostream& out, const std::vector<T>& v) {
- out << "[";
- for (uint32_t i=0; i<v.size(); ++i) {
- out << "\n " << v[i];
- }
- if (!v.empty()) {
- out << "\n";
- }
- return out << "]";
+
+template<typename T>
+std::ostream& operator<<(std::ostream& out, const std::vector<T>& v) {
+ out << "[";
+ for (uint32_t i=0; i<v.size(); ++i) {
+ out << "\n " << v[i];
+ }
+ if (!v.empty()) {
+ out << "\n";
}
+ return out << "]";
+}
+
}
namespace storage::distributor {
-ActiveCopy::ActiveCopy(uint16_t node, const BucketDatabase::Entry& e, const std::vector<uint16_t>& idealState) :
- _nodeIndex(node),
- _ideal(0xffff)
-{
- const BucketCopy* copy = e->getNode(node);
- assert(copy != nullptr);
- _doc_count = copy->getDocumentCount();
- _ready = copy->ready();
- _active = copy->active();
- for (uint32_t i=0; i<idealState.size(); ++i) {
- if (idealState[i] == node) {
- _ideal = i;
- break;
- }
- }
-}
+using IndexList = lib::Distribution::IndexList;
vespalib::string
ActiveCopy::getReason() const {
- if (_ready && (_doc_count > 0) && (_ideal < 0xffff)) {
+ if (_ready && (_doc_count > 0) && valid_ideal()) {
vespalib::asciistream ost;
ost << "copy is ready, has " << _doc_count
<< " docs and ideal state priority " << _ideal;
@@ -54,7 +40,7 @@ ActiveCopy::getReason() const {
return ost.str();
} else if (_ready) {
return "copy is ready";
- } else if ((_doc_count > 0) && (_ideal < 0xffff)) {
+ } else if ((_doc_count > 0) && valid_ideal()) {
vespalib::asciistream ost;
ost << "copy has " << _doc_count << " docs and ideal state priority " << _ideal;
return ost.str();
@@ -64,7 +50,7 @@ ActiveCopy::getReason() const {
return ost.str();
} else if (_active) {
return "copy is already active";
- } else if (_ideal < 0xffff) {
+ } else if (valid_ideal()) {
vespalib::asciistream ost;
ost << "copy is ideal state priority " << _ideal;
return ost.str();
@@ -82,7 +68,7 @@ operator<<(std::ostream& out, const ActiveCopy & e) {
if (e._doc_count > 0) {
out << ", doc_count " << e._doc_count;
}
- if (e._ideal < 0xffff) {
+ if (e.valid_ideal()) {
out << ", ideal pri " << e._ideal;
}
out << ")";
@@ -91,66 +77,66 @@ operator<<(std::ostream& out, const ActiveCopy & e) {
namespace {
- struct ActiveStateOrder {
- bool operator()(const ActiveCopy & e1, const ActiveCopy & e2) {
- if (e1._ready != e2._ready) {
- return e1._ready;
- }
- if (e1._doc_count != e2._doc_count) {
- return e1._doc_count > e2._doc_count;
- }
- if (e1._ideal != e2._ideal) {
- return e1._ideal < e2._ideal;
- }
- if (e1._active != e2._active) {
- return e1._active;
- }
- return e1._nodeIndex < e2._nodeIndex;
- }
- };
-
- std::vector<uint16_t>
- buildValidNodeIndexList(BucketDatabase::Entry& e) {
- std::vector<uint16_t> result;
- result.reserve(e->getNodeCount());
- for (uint32_t i=0, n=e->getNodeCount(); i < n; ++i) {
- const BucketCopy& cp = e->getNodeRef(i);
- if (!cp.valid()) {
- continue;
- }
+IndexList
+buildValidNodeIndexList(const BucketDatabase::Entry& e) {
+ IndexList result;
+ result.reserve(e->getNodeCount());
+ for (uint32_t i=0, n=e->getNodeCount(); i < n; ++i) {
+ const BucketCopy& cp = e->getNodeRef(i);
+ if (cp.valid()) {
result.push_back(cp.getNode());
}
- return result;
}
+ return result;
+}
- std::vector<ActiveCopy>
- buildNodeList(BucketDatabase::Entry& e,
- const std::vector<uint16_t>& nodeIndexes,
- const std::vector<uint16_t>& idealState)
- {
- std::vector<ActiveCopy> result;
- result.reserve(nodeIndexes.size());
- for (uint16_t nodeIndex : nodeIndexes) {
- result.emplace_back(nodeIndex, e, idealState);
- }
- return result;
+using SmallActiveCopyList = vespalib::SmallVector<ActiveCopy, 2>;
+static_assert(sizeof(SmallActiveCopyList) == 40);
+
+SmallActiveCopyList
+buildNodeList(const BucketDatabase::Entry& e,vespalib::ConstArrayRef<uint16_t> nodeIndexes, const IdealServiceLayerNodesBundle::Node2Index & idealState)
+{
+ SmallActiveCopyList result;
+ result.reserve(nodeIndexes.size());
+ for (uint16_t nodeIndex : nodeIndexes) {
+ const BucketCopy *copy = e->getNode(nodeIndex);
+ assert(copy);
+ result.emplace_back(nodeIndex, *copy, idealState.lookup(nodeIndex));
}
+ return result;
}
+}
+
+struct ActiveStateOrder {
+ bool operator()(const ActiveCopy & e1, const ActiveCopy & e2) noexcept {
+ if (e1._ready != e2._ready) {
+ return e1._ready;
+ }
+ if (e1._doc_count != e2._doc_count) {
+ return e1._doc_count > e2._doc_count;
+ }
+ if (e1._ideal != e2._ideal) {
+ return e1._ideal < e2._ideal;
+ }
+ if (e1._active != e2._active) {
+ return e1._active;
+ }
+ return e1.nodeIndex() < e2.nodeIndex();
+ }
+};
+
ActiveList
-ActiveCopy::calculate(const std::vector<uint16_t>& idealState,
- const lib::Distribution& distribution,
- BucketDatabase::Entry& e,
- uint32_t max_activation_inhibited_out_of_sync_groups)
+ActiveCopy::calculate(const Node2Index & idealState, const lib::Distribution& distribution,
+ const BucketDatabase::Entry& e, uint32_t max_activation_inhibited_out_of_sync_groups)
{
- std::vector<uint16_t> validNodesWithCopy = buildValidNodeIndexList(e);
+ IndexList validNodesWithCopy = buildValidNodeIndexList(e);
if (validNodesWithCopy.empty()) {
return ActiveList();
}
- using IndexList = std::vector<uint16_t>;
std::vector<IndexList> groups;
if (distribution.activePerGroup()) {
- groups = distribution.splitNodesIntoLeafGroups(std::move(validNodesWithCopy));
+ groups = distribution.splitNodesIntoLeafGroups(validNodesWithCopy);
} else {
groups.push_back(std::move(validNodesWithCopy));
}
@@ -162,7 +148,7 @@ ActiveCopy::calculate(const std::vector<uint16_t>& idealState,
: api::BucketInfo()); // Invalid by default
uint32_t inhibited_groups = 0;
for (const auto& group_nodes : groups) {
- std::vector<ActiveCopy> entries = buildNodeList(e, group_nodes, idealState);
+ SmallActiveCopyList entries = buildNodeList(e, group_nodes, idealState);
auto best = std::min_element(entries.begin(), entries.end(), ActiveStateOrder());
if ((groups.size() > 1) &&
(inhibited_groups < max_activation_inhibited_out_of_sync_groups) &&
@@ -180,24 +166,22 @@ ActiveCopy::calculate(const std::vector<uint16_t>& idealState,
}
void
-ActiveList::print(std::ostream& out, bool verbose,
- const std::string& indent) const
+ActiveList::print(std::ostream& out, bool verbose, const std::string& indent) const
{
out << "[";
if (verbose) {
for (size_t i=0; i<_v.size(); ++i) {
- out << "\n" << indent << " "
- << _v[i]._nodeIndex << " " << _v[i].getReason();
+ out << "\n" << indent << " " << _v[i].nodeIndex() << " " << _v[i].getReason();
}
if (!_v.empty()) {
out << "\n" << indent;
}
} else {
if (!_v.empty()) {
- out << _v[0]._nodeIndex;
+ out << _v[0].nodeIndex();
}
for (size_t i=1; i<_v.size(); ++i) {
- out << " " << _v[i]._nodeIndex;
+ out << " " << _v[i].nodeIndex();
}
}
out << "]";
@@ -207,7 +191,7 @@ bool
ActiveList::contains(uint16_t node) const noexcept
{
for (const auto& candidate : _v) {
- if (node == candidate._nodeIndex) {
+ if (node == candidate.nodeIndex()) {
return true;
}
}
diff --git a/storage/src/vespa/storage/distributor/activecopy.h b/storage/src/vespa/storage/distributor/activecopy.h
index 258fe3cdf16..91dfb3f0bd0 100644
--- a/storage/src/vespa/storage/distributor/activecopy.h
+++ b/storage/src/vespa/storage/distributor/activecopy.h
@@ -2,25 +2,43 @@
#pragma once
+#include "ideal_service_layer_nodes_bundle.h"
#include <vespa/storage/bucketdb/bucketdatabase.h>
namespace storage::lib { class Distribution; }
namespace storage::distributor {
class ActiveList;
+struct ActiveStateOrder;
-struct ActiveCopy {
- constexpr ActiveCopy() noexcept : _nodeIndex(-1), _ideal(-1), _doc_count(0), _ready(false), _active(false) { }
- ActiveCopy(uint16_t node, const BucketDatabase::Entry& e, const std::vector<uint16_t>& idealState);
+class ActiveCopy {
+ using Index = IdealServiceLayerNodesBundle::Index;
+ using Node2Index = IdealServiceLayerNodesBundle::Node2Index;
+public:
+ constexpr ActiveCopy() noexcept
+ : _nodeIndex(Index::invalid()),
+ _ideal(Index::invalid()),
+ _doc_count(0),
+ _ready(false),
+ _active(false)
+ { }
+ ActiveCopy(uint16_t node, const BucketCopy & copy, uint16_t ideal) noexcept
+ : _nodeIndex(node),
+ _ideal(ideal),
+ _doc_count(copy.getDocumentCount()),
+ _ready(copy.ready()),
+ _active(copy.active())
+ { }
vespalib::string getReason() const;
friend std::ostream& operator<<(std::ostream& out, const ActiveCopy& e);
- static ActiveList calculate(const std::vector<uint16_t>& idealState,
- const lib::Distribution&,
- BucketDatabase::Entry&,
- uint32_t max_activation_inhibited_out_of_sync_groups);
-
+ static ActiveList calculate(const Node2Index & idealState, const lib::Distribution&,
+ const BucketDatabase::Entry&, uint32_t max_activation_inhibited_out_of_sync_groups);
+ uint16_t nodeIndex() const noexcept { return _nodeIndex; }
+private:
+ friend ActiveStateOrder;
+ bool valid_ideal() const noexcept { return _ideal < Index::invalid(); }
uint16_t _nodeIndex;
uint16_t _ideal;
uint32_t _doc_count;
@@ -29,18 +47,17 @@ struct ActiveCopy {
};
class ActiveList : public vespalib::Printable {
- std::vector<ActiveCopy> _v;
-
public:
- ActiveList() {}
- ActiveList(std::vector<ActiveCopy>&& v) : _v(std::move(v)) { }
+ ActiveList() noexcept {}
+ ActiveList(std::vector<ActiveCopy>&& v) noexcept : _v(std::move(v)) { }
- ActiveCopy& operator[](size_t i) noexcept { return _v[i]; }
const ActiveCopy& operator[](size_t i) const noexcept { return _v[i]; }
[[nodiscard]] bool contains(uint16_t) const noexcept;
[[nodiscard]] bool empty() const noexcept { return _v.empty(); }
size_t size() const noexcept { return _v.size(); }
void print(std::ostream&, bool verbose, const std::string& indent) const override;
+private:
+ std::vector<ActiveCopy> _v;
};
}
diff --git a/storage/src/vespa/storage/distributor/bucketdb/bucketdbmetricupdater.cpp b/storage/src/vespa/storage/distributor/bucketdb/bucketdbmetricupdater.cpp
index fc6c957b737..dfcbbf63946 100644
--- a/storage/src/vespa/storage/distributor/bucketdb/bucketdbmetricupdater.cpp
+++ b/storage/src/vespa/storage/distributor/bucketdb/bucketdbmetricupdater.cpp
@@ -6,19 +6,26 @@
namespace storage::distributor {
-BucketDBMetricUpdater::Stats::Stats()
+BucketDBMetricUpdater::Stats::Stats() noexcept
: _docCount(0),
_byteCount(0),
_tooFewCopies(0),
_tooManyCopies(0),
_noTrusted(0),
- _totalBuckets(0)
+ _totalBuckets(0),
+ _mutable_db_mem_usage(),
+ _read_only_db_mem_usage(),
+ _minBucketReplica()
{
}
BucketDBMetricUpdater::Stats::Stats(const Stats &rhs) = default;
+BucketDBMetricUpdater::Stats & BucketDBMetricUpdater::Stats::operator=(const Stats &rhs) = default;
+BucketDBMetricUpdater::Stats::Stats(Stats &&rhs) noexcept = default;
+BucketDBMetricUpdater::Stats & BucketDBMetricUpdater::Stats::operator=(Stats &&rhs) noexcept = default;
+BucketDBMetricUpdater::Stats::~Stats() = default;
-BucketDBMetricUpdater::BucketDBMetricUpdater()
+BucketDBMetricUpdater::BucketDBMetricUpdater() noexcept
: _workingStats(),
_lastCompleteStats(),
_replicaCountingMode(ReplicaCountingMode::TRUSTED),
@@ -35,8 +42,7 @@ BucketDBMetricUpdater::resetStats()
}
void
-BucketDBMetricUpdater::visit(const BucketDatabase::Entry& entry,
- uint32_t redundancy)
+BucketDBMetricUpdater::visit(const BucketDatabase::Entry& entry, uint32_t redundancy)
{
if (entry->getNodeCount() == 0) {
// We used to have an assert on >0 but that caused some crashes, see
@@ -90,9 +96,7 @@ BucketDBMetricUpdater::visit(const BucketDatabase::Entry& entry,
}
void
-BucketDBMetricUpdater::updateMinReplicationStats(
- const BucketDatabase::Entry& entry,
- uint32_t trustedCopies)
+BucketDBMetricUpdater::updateMinReplicationStats(const BucketDatabase::Entry& entry, uint32_t trustedCopies)
{
auto& minBucketReplica = _workingStats._minBucketReplica;
for (uint32_t i = 0; i < entry->getNodeCount(); i++) {
@@ -103,9 +107,9 @@ BucketDBMetricUpdater::updateMinReplicationStats(
// sync across each other.
// Regardless of counting mode we still have to take the minimum
// replica count across all buckets present on any given node.
- const uint32_t countedReplicas(
- (_replicaCountingMode == ReplicaCountingMode::TRUSTED)
- ? trustedCopies : entry->getNodeCount());
+ const uint32_t countedReplicas = (_replicaCountingMode == ReplicaCountingMode::TRUSTED)
+ ? trustedCopies
+ : entry->getNodeCount();
auto it = minBucketReplica.find(node);
if (it == minBucketReplica.end()) {
minBucketReplica[node] = countedReplicas;
@@ -118,17 +122,18 @@ BucketDBMetricUpdater::updateMinReplicationStats(
void
BucketDBMetricUpdater::completeRound(bool resetWorkingStats)
{
- _lastCompleteStats = _workingStats;
+
_hasCompleteStats = true;
if (resetWorkingStats) {
+ _lastCompleteStats = std::move(_workingStats);
resetStats();
+ } else {
+ _lastCompleteStats = _workingStats;
}
}
void
-BucketDBMetricUpdater::Stats::propagateMetrics(
- IdealStateMetricSet& idealStateMetrics,
- DistributorMetricSet& distributorMetrics)
+BucketDBMetricUpdater::Stats::propagateMetrics(IdealStateMetricSet& idealStateMetrics, DistributorMetricSet& distributorMetrics) const
{
distributorMetrics.docsStored.set(_docCount);
distributorMetrics.bytesStored.set(_byteCount);
diff --git a/storage/src/vespa/storage/distributor/bucketdb/bucketdbmetricupdater.h b/storage/src/vespa/storage/distributor/bucketdb/bucketdbmetricupdater.h
index 2edb86cbaa2..366c2f2dc41 100644
--- a/storage/src/vespa/storage/distributor/bucketdb/bucketdbmetricupdater.h
+++ b/storage/src/vespa/storage/distributor/bucketdb/bucketdbmetricupdater.h
@@ -2,10 +2,11 @@
#pragma once
+#include <vespa/storage/distributor/min_replica_provider.h>
#include <vespa/storage/bucketdb/bucketdatabase.h>
#include <vespa/storage/config/config-stor-distributormanager.h>
#include <vespa/vespalib/util/memoryusage.h>
-#include <unordered_map>
+#include <vespa/vespalib/stllike/hash_map.h>
namespace storage::distributor {
@@ -25,11 +26,12 @@ public:
vespalib::MemoryUsage _mutable_db_mem_usage;
vespalib::MemoryUsage _read_only_db_mem_usage;
- Stats();
+ Stats() noexcept;
+ Stats(Stats &&rhs) noexcept;
+ Stats & operator=(Stats &&rhs) noexcept;
Stats(const Stats &rhs);
- ~Stats() = default;
-
- Stats &operator=(const Stats &rhs) = default;
+ Stats & operator=(const Stats &rhs);
+ ~Stats();
/**
* For each node N, look at all the buckets that have or should have a
@@ -47,24 +49,24 @@ public:
* Note: If no buckets have been found for a node, that node is not in
* this map.
*/
- std::unordered_map<uint16_t, uint32_t> _minBucketReplica;
+ MinReplicaMap _minBucketReplica;
/**
* Propagate state values to the appropriate metric values.
*/
- void propagateMetrics(IdealStateMetricSet&, DistributorMetricSet&);
+ void propagateMetrics(IdealStateMetricSet&, DistributorMetricSet&) const;
};
using ReplicaCountingMode = vespa::config::content::core::StorDistributormanagerConfig::MinimumReplicaCountingMode;
private:
- Stats _workingStats;
- Stats _lastCompleteStats;
+ Stats _workingStats;
+ Stats _lastCompleteStats;
ReplicaCountingMode _replicaCountingMode;
- bool _hasCompleteStats;
+ bool _hasCompleteStats;
public:
- BucketDBMetricUpdater();
+ BucketDBMetricUpdater() noexcept;
~BucketDBMetricUpdater();
void setMinimumReplicaCountingMode(ReplicaCountingMode mode) noexcept {
@@ -91,11 +93,11 @@ public:
/**
* Returns true iff completeRound() has been called at least once.
*/
- bool hasCompletedRound() const {
+ bool hasCompletedRound() const noexcept {
return _hasCompleteStats;
}
- Stats getLastCompleteStats() const {
+ const Stats & getLastCompleteStats() const noexcept {
return _lastCompleteStats;
}
diff --git a/storage/src/vespa/storage/distributor/cancelled_replicas_pruner.cpp b/storage/src/vespa/storage/distributor/cancelled_replicas_pruner.cpp
new file mode 100644
index 00000000000..f453a722d2c
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/cancelled_replicas_pruner.cpp
@@ -0,0 +1,22 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "cancelled_replicas_pruner.h"
+
+namespace storage::distributor {
+
+std::vector<BucketCopy> prune_cancelled_nodes(std::span<const BucketCopy> replicas, const CancelScope& cancel_scope) {
+ if (cancel_scope.fully_cancelled()) {
+ return {};
+ }
+ std::vector<BucketCopy> pruned_replicas;
+ // Expect that there will be an input entry for each cancelled node in the common case.
+ pruned_replicas.reserve((replicas.size() >= cancel_scope.cancelled_nodes().size())
+ ? replicas.size() - cancel_scope.cancelled_nodes().size() : 0);
+ for (auto& candidate : replicas) {
+ if (!cancel_scope.node_is_cancelled(candidate.getNode())) {
+ pruned_replicas.emplace_back(candidate);
+ }
+ }
+ return pruned_replicas;
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/cancelled_replicas_pruner.h b/storage/src/vespa/storage/distributor/cancelled_replicas_pruner.h
new file mode 100644
index 00000000000..f12f78e569f
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/cancelled_replicas_pruner.h
@@ -0,0 +1,17 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/storage/bucketdb/bucketcopy.h>
+#include <vespa/storage/distributor/operations/cancel_scope.h>
+#include <span>
+#include <vector>
+
+namespace storage::distributor {
+
+/**
+ * Returns a new vector that contains all entries of `replicas` whose nodes are _not_ tagged as
+ * cancelled in `cancel_scope`. Returned entry ordering is identical to input ordering.
+ */
+[[nodiscard]] std::vector<BucketCopy> prune_cancelled_nodes(std::span<const BucketCopy> replicas, const CancelScope& cancel_scope);
+
+}
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp b/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
index 5969ccad4cb..7ba9c67b156 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
@@ -108,9 +108,7 @@ DistributorBucketSpace::owns_bucket_in_state(
}
bool
-DistributorBucketSpace::owns_bucket_in_state(
- const lib::ClusterState& clusterState,
- document::BucketId bucket) const
+DistributorBucketSpace::owns_bucket_in_state(const lib::ClusterState& clusterState, document::BucketId bucket) const
{
return owns_bucket_in_state(*_distribution, clusterState, bucket);
}
@@ -123,9 +121,9 @@ setup_ideal_nodes_bundle(IdealServiceLayerNodesBundle& ideal_nodes_bundle,
const lib::ClusterState& cluster_state,
document::BucketId bucket)
{
- ideal_nodes_bundle.set_available_nodes(distribution.getIdealStorageNodes(cluster_state, bucket, up_states));
- ideal_nodes_bundle.set_available_nonretired_nodes(distribution.getIdealStorageNodes(cluster_state, bucket, nonretired_up_states));
- ideal_nodes_bundle.set_available_nonretired_or_maintenance_nodes(distribution.getIdealStorageNodes(cluster_state, bucket, nonretired_or_maintenance_up_states));
+ ideal_nodes_bundle.set_nodes(distribution.getIdealStorageNodes(cluster_state, bucket, up_states),
+ distribution.getIdealStorageNodes(cluster_state, bucket, nonretired_up_states),
+ distribution.getIdealStorageNodes(cluster_state, bucket, nonretired_or_maintenance_up_states));
}
/*
@@ -152,16 +150,16 @@ DistributorBucketSpace::get_ideal_service_layer_nodes_bundle(document::BucketId
setup_ideal_nodes_bundle(ideal_nodes_bundle, *_distribution, *_clusterState, bucket);
return ideal_nodes_bundle;
}
- document::BucketId lookup_bucket(is_split_group_bucket(bucket) ? bucket.getUsedBits() : _distribution_bits, bucket.getId());
+ document::BucketId lookup_bucket(_distribution_bits, bucket.getId());
auto itr = _ideal_nodes.find(lookup_bucket);
if (itr != _ideal_nodes.end()) {
- return itr->second;
+ return *itr->second;
}
- IdealServiceLayerNodesBundle ideal_nodes_bundle;
- setup_ideal_nodes_bundle(ideal_nodes_bundle, *_distribution, *_clusterState, lookup_bucket);
+ auto ideal_nodes_bundle = std::make_unique<IdealServiceLayerNodesBundle>();
+ setup_ideal_nodes_bundle(*ideal_nodes_bundle, *_distribution, *_clusterState, lookup_bucket);
auto insres = _ideal_nodes.insert(std::make_pair(lookup_bucket, std::move(ideal_nodes_bundle)));
assert(insres.second);
- return insres.first->second;
+ return *insres.first->second;
}
BucketOwnershipFlags
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space.h b/storage/src/vespa/storage/distributor/distributor_bucket_space.h
index f38556a664c..a66f0e5e983 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space.h
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space.h
@@ -41,7 +41,7 @@ class DistributorBucketSpace {
std::shared_ptr<const lib::ClusterState> _pending_cluster_state;
std::vector<bool> _available_nodes;
mutable vespalib::hash_map<document::BucketId, BucketOwnershipFlags, document::BucketId::hash> _ownerships;
- mutable vespalib::hash_map<document::BucketId, IdealServiceLayerNodesBundle, document::BucketId::hash> _ideal_nodes;
+ mutable vespalib::hash_map<document::BucketId, std::unique_ptr<IdealServiceLayerNodesBundle>, document::BucketId::hash> _ideal_nodes;
void clear();
void enumerate_available_nodes();
diff --git a/storage/src/vespa/storage/distributor/distributor_host_info_reporter.cpp b/storage/src/vespa/storage/distributor/distributor_host_info_reporter.cpp
index bb7e573c980..46c9a526a8d 100644
--- a/storage/src/vespa/storage/distributor/distributor_host_info_reporter.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_host_info_reporter.cpp
@@ -3,15 +3,9 @@
#include "bucket_spaces_stats_provider.h"
#include "distributor_host_info_reporter.h"
#include "min_replica_provider.h"
-#include "pendingmessagetracker.h"
-
#include <set>
-using std::set;
-using std::unordered_map;
-
-namespace storage {
-namespace distributor {
+namespace storage::distributor {
using BucketSpacesStats = BucketSpacesStatsProvider::BucketSpacesStats;
using PerNodeBucketSpacesStats = BucketSpacesStatsProvider::PerNodeBucketSpacesStats;
@@ -48,10 +42,10 @@ writeBucketSpacesStats(vespalib::JsonStream& stream,
void
outputStorageNodes(vespalib::JsonStream& output,
- const unordered_map<uint16_t, uint32_t>& minReplica,
+ const MinReplicaMap & minReplica,
const PerNodeBucketSpacesStats& bucketSpacesStats)
{
- set<uint16_t> nodes;
+ std::set<uint16_t> nodes;
for (const auto& element : minReplica) {
nodes.insert(element.first);
}
@@ -104,6 +98,5 @@ DistributorHostInfoReporter::report(vespalib::JsonStream& output)
output << End();
}
-} // distributor
-} // storage
+}
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.cpp b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
index 616fd77fdd7..ad1cce46bea 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
@@ -120,8 +120,7 @@ DistributorStripe::sendReply(const std::shared_ptr<api::StorageReply>& reply)
}
void DistributorStripe::send_shutdown_abort_reply(const std::shared_ptr<api::StorageMessage>& msg) {
- api::StorageReply::UP reply(
- std::dynamic_pointer_cast<api::StorageCommand>(msg)->makeReply());
+ auto reply = std::dynamic_pointer_cast<api::StorageCommand>(msg)->makeReply();
reply->setResult(api::ReturnCode(api::ReturnCode::ABORTED, "Distributor is shutting down"));
send_up_with_tracking(std::shared_ptr<api::StorageMessage>(reply.release()));
}
@@ -179,8 +178,7 @@ DistributorStripe::handle_or_enqueue_message(const std::shared_ptr<api::StorageM
}
void
-DistributorStripe::handleCompletedMerge(
- const std::shared_ptr<api::MergeBucketReply>& reply)
+DistributorStripe::handleCompletedMerge(const std::shared_ptr<api::MergeBucketReply>& reply)
{
_maintenanceOperationOwner.handleReply(reply);
}
@@ -236,9 +234,7 @@ DistributorStripe::handleReply(const std::shared_ptr<api::StorageReply>& reply)
}
bool
-DistributorStripe::generateOperation(
- const std::shared_ptr<api::StorageMessage>& msg,
- Operation::SP& operation)
+DistributorStripe::generateOperation(const std::shared_ptr<api::StorageMessage>& msg, Operation::SP& operation)
{
return _externalOperationHandler.handleMessage(msg, operation);
}
@@ -277,7 +273,6 @@ DistributorStripe::getClusterStateBundle() const
void
DistributorStripe::enableClusterStateBundle(const lib::ClusterStateBundle& state)
{
- lib::Node my_node(lib::NodeType::DISTRIBUTOR, getDistributorIndex());
lib::ClusterStateBundle oldState = _clusterStateBundle;
_clusterStateBundle = state;
propagateClusterStates();
@@ -286,6 +281,7 @@ DistributorStripe::enableClusterStateBundle(const lib::ClusterStateBundle& state
enterRecoveryMode();
// Clear all active messages on nodes that are down.
+ // TODO this should also be done on nodes that are no longer part of the config!
const uint16_t old_node_count = oldState.getBaselineClusterState()->getNodeCount(lib::NodeType::STORAGE);
const uint16_t new_node_count = baseline_state.getNodeCount(lib::NodeType::STORAGE);
for (uint16_t i = 0; i < std::max(old_node_count, new_node_count); ++i) {
@@ -319,7 +315,7 @@ DistributorStripe::enterRecoveryMode()
{
LOG(debug, "Entering recovery mode");
_schedulingMode = MaintenanceScheduler::RECOVERY_SCHEDULING_MODE;
- _scanner->reset();
+ (void)_scanner->fetch_and_reset(); // Just drop accumulated stats on the floor.
// We enter recovery mode due to cluster state or distribution config changes.
// Until we have completed a new DB scan round, we don't know the state of our
// newly owned buckets and must not report stats for these out to the cluster
@@ -415,7 +411,6 @@ public:
bool check(uint32_t msgType, uint16_t node, uint8_t pri) override {
(void) node;
- (void) pri;
if (msgType == api::MessageType::SPLITBUCKET_ID && pri <= maxPri) {
found = true;
return false;
@@ -428,9 +423,7 @@ public:
}
void
-DistributorStripe::checkBucketForSplit(document::BucketSpace bucketSpace,
- const BucketDatabase::Entry& e,
- uint8_t priority)
+DistributorStripe::checkBucketForSplit(document::BucketSpace bucketSpace, const BucketDatabase::Entry& e, uint8_t priority)
{
if (!getConfig().doInlineSplit()) {
return;
@@ -440,16 +433,13 @@ DistributorStripe::checkBucketForSplit(document::BucketSpace bucketSpace,
// appropriate priority.
SplitChecker checker(priority);
for (uint32_t i = 0; i < e->getNodeCount(); ++i) {
- _pendingMessageTracker.checkPendingMessages(e->getNodeRef(i).getNode(),
- document::Bucket(bucketSpace, e.getBucketId()),
- checker);
+ _pendingMessageTracker.checkPendingMessages(e->getNodeRef(i).getNode(), document::Bucket(bucketSpace, e.getBucketId()), checker);
if (checker.found) {
return;
}
}
- Operation::SP operation =
- _idealStateManager.generateInterceptingSplit(bucketSpace, e, priority);
+ Operation::SP operation = _idealStateManager.generateInterceptingSplit(bucketSpace, e, priority);
if (operation.get()) {
_maintenanceOperationOwner.start(operation, priority);
@@ -458,8 +448,7 @@ DistributorStripe::checkBucketForSplit(document::BucketSpace bucketSpace,
// TODO STRIPE must be invoked by top-level bucket db updater probably
void
-DistributorStripe::propagateDefaultDistribution(
- std::shared_ptr<const lib::Distribution> distribution)
+DistributorStripe::propagateDefaultDistribution(std::shared_ptr<const lib::Distribution> distribution)
{
auto global_distr = GlobalBucketSpaceDistributionConverter::convert_to_global(*distribution);
for (auto* repo : {_bucketSpaceRepo.get(), _readOnlyBucketSpaceRepo.get()}) {
@@ -562,7 +551,7 @@ void DistributorStripe::startExternalOperations() {
_fetchedMessages.clear();
}
-std::unordered_map<uint16_t, uint32_t>
+MinReplicaMap
DistributorStripe::getMinReplica() const
{
std::lock_guard guard(_metricLock);
@@ -616,11 +605,9 @@ PerNodeBucketSpacesStats
toBucketSpacesStats(const NodeMaintenanceStatsTracker &maintenanceStats)
{
PerNodeBucketSpacesStats result;
- for (const auto &nodeEntry : maintenanceStats.perNodeStats()) {
- for (const auto &bucketSpaceEntry : nodeEntry.second) {
- auto bucketSpace = document::FixedBucketSpaces::to_string(bucketSpaceEntry.first);
- result[nodeEntry.first][bucketSpace] = toBucketSpaceStats(bucketSpaceEntry.second);
- }
+ for (const auto &entry : maintenanceStats.perNodeStats()) {
+ auto bucketSpace = document::FixedBucketSpaces::to_string(entry.first.bucketSpace());
+ result[entry.first.node()][bucketSpace] = toBucketSpaceStats(entry.second);
}
return result;
}
@@ -655,7 +642,7 @@ DistributorStripe::updateInternalMetricsForCompletedScan()
_bucketDBMetricUpdater.completeRound();
_bucketDbStats = _bucketDBMetricUpdater.getLastCompleteStats();
- _maintenanceStats = _scanner->getPendingMaintenanceStats();
+ _maintenanceStats = _scanner->fetch_and_reset();
auto new_space_stats = toBucketSpacesStats(_maintenanceStats.perNodeStats);
if (merge_no_longer_pending_edge(_bucketSpacesStats, new_space_stats)) {
_must_send_updated_host_info = true;
@@ -696,12 +683,9 @@ DistributorStripe::scanNextBucket()
updateInternalMetricsForCompletedScan();
leaveRecoveryMode();
send_updated_host_info_if_required();
- _scanner->reset();
} else {
const auto &distribution(_bucketSpaceRepo->get(scanResult.getBucketSpace()).getDistribution());
- _bucketDBMetricUpdater.visit(
- scanResult.getEntry(),
- distribution.getRedundancy());
+ _bucketDBMetricUpdater.visit(scanResult.getEntry(), distribution.getRedundancy());
}
return scanResult;
}
@@ -823,12 +807,6 @@ DistributorStripe::getActiveIdealStateOperations() const
return _maintenanceOperationOwner.toString();
}
-std::string
-DistributorStripe::getActiveOperations() const
-{
- return _operationOwner.toString();
-}
-
StripeAccessGuard::PendingOperationStats
DistributorStripe::pending_operation_stats() const
{
@@ -881,7 +859,7 @@ DistributorStripe::merge_entries_into_db(document::BucketSpace bucket_space,
const lib::Distribution& distribution,
const lib::ClusterState& new_state,
const char* storage_up_states,
- const std::unordered_set<uint16_t>& outdated_nodes,
+ const OutdatedNodes& outdated_nodes,
const std::vector<dbtransition::Entry>& entries)
{
bucket_db_updater().merge_entries_into_db(bucket_space, gathered_at_timestamp, distribution,
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.h b/storage/src/vespa/storage/distributor/distributor_stripe.h
index 801efa0ff73..338a6c72125 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.h
@@ -122,7 +122,6 @@ public:
StripeAccessGuard::PendingOperationStats pending_operation_stats() const override;
std::string getActiveIdealStateOperations() const;
- std::string getActiveOperations() const;
framework::ThreadWaitInfo doNonCriticalTick(framework::ThreadIndex);
@@ -219,7 +218,7 @@ private:
/**
* Return a copy of the latest min replica data, see MinReplicaProvider.
*/
- std::unordered_map<uint16_t, uint32_t> getMinReplica() const override;
+ MinReplicaMap getMinReplica() const override;
PerNodeBucketSpacesStats getBucketSpacesStats() const override;
@@ -286,7 +285,7 @@ private:
const lib::Distribution& distribution,
const lib::ClusterState& new_state,
const char* storage_up_states,
- const std::unordered_set<uint16_t>& outdated_nodes,
+ const OutdatedNodes & outdated_nodes,
const std::vector<dbtransition::Entry>& entries) override;
void update_read_snapshot_before_db_pruning() override;
void update_read_snapshot_after_db_pruning(const lib::ClusterStateBundle& new_state) override;
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp b/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
index 9a5fd595b1d..47b89b2dd19 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
@@ -5,6 +5,7 @@
#include "distributor_bucket_space.h"
#include "pendingmessagetracker.h"
#include "storage_node_up_states.h"
+#include <vespa/storage/storageutil/utils.h>
#include <vespa/storageframework/generic/clock/clock.h>
#include <vespa/document/select/parser.h>
#include <vespa/vdslib/state/cluster_state_bundle.h>
@@ -17,12 +18,11 @@ using document::BucketSpace;
namespace storage::distributor {
-DistributorStripeComponent::DistributorStripeComponent(
- DistributorStripeInterface& distributor,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
- DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
- DistributorComponentRegister& compReg,
- const std::string& name)
+DistributorStripeComponent::DistributorStripeComponent(DistributorStripeInterface& distributor,
+ DistributorBucketSpaceRepo& bucketSpaceRepo,
+ DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
+ DistributorComponentRegister& compReg,
+ const std::string& name)
: storage::DistributorComponent(compReg, name),
_distributor(distributor),
_bucketSpaceRepo(bucketSpaceRepo),
@@ -44,30 +44,6 @@ DistributorStripeComponent::sendUp(const api::StorageMessage::SP& msg)
_distributor.getMessageSender().sendUp(msg);
}
-void
-DistributorStripeComponent::enumerateUnavailableNodes(
- std::vector<uint16_t>& unavailableNodes,
- const lib::ClusterState& s,
- const document::Bucket& bucket,
- const std::vector<BucketCopy>& candidates) const
-{
- const auto* up_states = storage_node_up_states();
- for (uint32_t i = 0; i < candidates.size(); ++i) {
- const BucketCopy& copy(candidates[i]);
- const lib::NodeState& ns(
- s.getNodeState(lib::Node(lib::NodeType::STORAGE, copy.getNode())));
- if (!ns.getState().oneOf(up_states)) {
- LOG(debug,
- "Trying to add a bucket copy to %s whose node is marked as "
- "down in the cluster state: %s. Ignoring it since no zombies "
- "are allowed!",
- bucket.toString().c_str(),
- copy.toString().c_str());
- unavailableNodes.emplace_back(copy.getNode());
- }
- }
-}
-
namespace {
/**
@@ -78,18 +54,19 @@ class UpdateBucketDatabaseProcessor : public BucketDatabase::EntryUpdateProcesso
const std::vector<BucketCopy>& _changed_nodes;
std::vector<uint16_t> _ideal_nodes;
bool _reset_trusted;
+ using ConstNodesRef = IdealServiceLayerNodesBundle::ConstNodesRef;
public:
- UpdateBucketDatabaseProcessor(const framework::Clock& clock, const std::vector<BucketCopy>& changed_nodes, std::vector<uint16_t> ideal_nodes, bool reset_trusted);
+ UpdateBucketDatabaseProcessor(const framework::Clock& clock, const std::vector<BucketCopy>& changed_nodes, ConstNodesRef ideal_nodes, bool reset_trusted);
~UpdateBucketDatabaseProcessor() override;
BucketDatabase::Entry create_entry(const document::BucketId& bucket) const override;
bool process_entry(BucketDatabase::Entry &entry) const override;
};
-UpdateBucketDatabaseProcessor::UpdateBucketDatabaseProcessor(const framework::Clock& clock, const std::vector<BucketCopy>& changed_nodes, std::vector<uint16_t> ideal_nodes, bool reset_trusted)
+UpdateBucketDatabaseProcessor::UpdateBucketDatabaseProcessor(const framework::Clock& clock, const std::vector<BucketCopy>& changed_nodes, ConstNodesRef ideal_nodes, bool reset_trusted)
: BucketDatabase::EntryUpdateProcessor(),
_clock(clock),
_changed_nodes(changed_nodes),
- _ideal_nodes(std::move(ideal_nodes)),
+ _ideal_nodes(ideal_nodes.cbegin(), ideal_nodes.cend()),
_reset_trusted(reset_trusted)
{
}
@@ -97,8 +74,7 @@ UpdateBucketDatabaseProcessor::UpdateBucketDatabaseProcessor(const framework::Cl
UpdateBucketDatabaseProcessor::~UpdateBucketDatabaseProcessor() = default;
BucketDatabase::Entry
-UpdateBucketDatabaseProcessor::create_entry(const document::BucketId &bucket) const
-{
+UpdateBucketDatabaseProcessor::create_entry(const document::BucketId &bucket) const {
return BucketDatabase::Entry(bucket, BucketInfo());
}
@@ -125,21 +101,16 @@ UpdateBucketDatabaseProcessor::process_entry(BucketDatabase::Entry &entry) const
}
void
-DistributorStripeComponent::update_bucket_database(
- const document::Bucket& bucket,
- const std::vector<BucketCopy>& changed_nodes,
- uint32_t update_flags)
+DistributorStripeComponent::update_bucket_database(const document::Bucket& bucket,
+ const std::vector<BucketCopy>& changed_nodes, uint32_t update_flags)
{
auto &bucketSpace(_bucketSpaceRepo.get(bucket.getBucketSpace()));
assert(!(bucket.getBucketId() == document::BucketId()));
BucketOwnership ownership(bucketSpace.check_ownership_in_pending_and_current_state(bucket.getBucketId()));
if (!ownership.isOwned()) {
- LOG(debug,
- "Trying to add %s to database that we do not own according to "
- "cluster state '%s' - ignoring!",
- bucket.toString().c_str(),
- ownership.getNonOwnedState().toString().c_str());
+ LOG(debug, "Trying to add %s to database that we do not own according to cluster state '%s' - ignoring!",
+ bucket.toString().c_str(), ownership.getNonOwnedState().toString().c_str());
return;
}
@@ -168,7 +139,7 @@ DistributorStripeComponent::update_bucket_database(
UpdateBucketDatabaseProcessor processor(getClock(),
found_down_node ? up_nodes : changed_nodes,
- bucketSpace.get_ideal_service_layer_nodes_bundle(bucket.getBucketId()).get_available_nodes(),
+ bucketSpace.get_ideal_service_layer_nodes_bundle(bucket.getBucketId()).available_nodes(),
(update_flags & DatabaseUpdate::RESET_TRUSTED) != 0);
bucketSpace.getBucketDatabase().process_update(bucket.getBucketId(), processor, (update_flags & DatabaseUpdate::CREATE_IF_NONEXISTING) != 0);
@@ -184,8 +155,7 @@ DistributorStripeComponent::node_address(uint16_t node_index) const noexcept
// Implements DistributorStripeOperationContext
void
-DistributorStripeComponent::remove_nodes_from_bucket_database(const document::Bucket& bucket,
- const std::vector<uint16_t>& nodes)
+DistributorStripeComponent::remove_nodes_from_bucket_database(const document::Bucket& bucket, const std::vector<uint16_t>& nodes)
{
auto &bucketSpace(_bucketSpaceRepo.get(bucket.getBucketSpace()));
BucketDatabase::Entry dbentry = bucketSpace.getBucketDatabase().get(bucket.getBucketId());
@@ -193,21 +163,15 @@ DistributorStripeComponent::remove_nodes_from_bucket_database(const document::Bu
if (dbentry.valid()) {
for (uint32_t i = 0; i < nodes.size(); ++i) {
if (dbentry->removeNode(nodes[i])) {
- LOG(debug,
- "Removed node %d from bucket %s. %u copies remaining",
- nodes[i],
- bucket.toString().c_str(),
- dbentry->getNodeCount());
+ LOG(debug, "Removed node %d from bucket %s. %u copies remaining",
+ nodes[i], bucket.toString().c_str(), dbentry->getNodeCount());
}
}
if (dbentry->getNodeCount() != 0) {
bucketSpace.getBucketDatabase().update(dbentry);
} else {
- LOG(debug,
- "After update, bucket %s now has no copies. "
- "Removing from database.",
- bucket.toString().c_str());
+ LOG(debug, "After update, bucket %s now has no copies. Removing from database.", bucket.toString().c_str());
bucketSpace.getBucketDatabase().remove(bucket.getBucketId());
}
@@ -218,7 +182,6 @@ document::BucketId
DistributorStripeComponent::make_split_bit_constrained_bucket_id(const document::DocumentId& doc_id) const
{
document::BucketId id(getBucketIdFactory().getBucketId(doc_id));
-
id.setUsedBits(_distributor.getConfig().getMinimalBucketSplit());
return id.stripUnused();
}
@@ -239,28 +202,18 @@ DistributorStripeComponent::get_sibling(const document::BucketId& bid) const
zeroBucket = document::BucketId(1, 0);
oneBucket = document::BucketId(1, 1);
} else {
- document::BucketId joinedBucket = document::BucketId(
- bid.getUsedBits() - 1,
- bid.getId());
-
- zeroBucket = document::BucketId(
- bid.getUsedBits(),
- joinedBucket.getId());
-
+ document::BucketId joinedBucket = document::BucketId(bid.getUsedBits() - 1,bid.getId());
+ zeroBucket = document::BucketId(bid.getUsedBits(), joinedBucket.getId());
uint64_t hiBit = 1;
hiBit <<= (bid.getUsedBits() - 1);
- oneBucket = document::BucketId(
- bid.getUsedBits(),
- joinedBucket.getId() | hiBit);
+ oneBucket = document::BucketId(bid.getUsedBits(), joinedBucket.getId() | hiBit);
}
return (zeroBucket == bid) ? oneBucket : zeroBucket;
}
bool
-DistributorStripeComponent::has_pending_message(uint16_t node_index,
- const document::Bucket& bucket,
- uint32_t message_type) const
+DistributorStripeComponent::has_pending_message(uint16_t node_index, const document::Bucket& bucket, uint32_t message_type) const
{
const auto& sender = static_cast<const DistributorStripeMessageSender&>(getDistributor());
return sender.getPendingMessageTracker().hasPendingMessage(node_index, bucket, message_type);
@@ -275,8 +228,7 @@ DistributorStripeComponent::cluster_state_bundle() const
bool
DistributorStripeComponent::storage_node_is_up(document::BucketSpace bucket_space, uint32_t node_index) const
{
- const lib::NodeState& ns = cluster_state_bundle().getDerivedClusterState(bucket_space)->getNodeState(
- lib::Node(lib::NodeType::STORAGE, node_index));
+ const auto & ns = cluster_state_bundle().getDerivedClusterState(bucket_space)->getNodeState(lib::Node(lib::NodeType::STORAGE, node_index));
return ns.getState().oneOf(storage_node_up_states());
}
@@ -294,4 +246,14 @@ DistributorStripeComponent::parse_selection(const vespalib::string& selection) c
return parser.parse(selection);
}
+void
+DistributorStripeComponent::update_bucket_database(const document::Bucket& bucket, const BucketCopy& changed_node, uint32_t update_flags) {
+ update_bucket_database(bucket, toVector<BucketCopy>(changed_node),update_flags);
+}
+
+void
+DistributorStripeComponent::remove_node_from_bucket_database(const document::Bucket& bucket, uint16_t node_index) {
+ remove_nodes_from_bucket_database(bucket, toVector<uint16_t>(node_index));
+}
+
}
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_component.h b/storage/src/vespa/storage/distributor/distributor_stripe_component.h
index 5bcf9eec76d..8fd439992f7 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_component.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_component.h
@@ -8,7 +8,6 @@
#include "operationowner.h"
#include "statechecker.h"
#include <vespa/storage/common/distributorcomponent.h>
-#include <vespa/storage/storageutil/utils.h>
#include <vespa/storageapi/messageapi/storagecommand.h>
#include <vespa/storageapi/buckets/bucketinfo.h>
@@ -68,37 +67,25 @@ public:
/**
* Simple API for the common case of modifying a single node.
*/
- void update_bucket_database(const document::Bucket& bucket,
- const BucketCopy& changed_node,
- uint32_t update_flags) override {
- update_bucket_database(bucket,
- toVector<BucketCopy>(changed_node),
- update_flags);
- }
-
+ void update_bucket_database(const document::Bucket& bucket, const BucketCopy& changed_node, uint32_t update_flags) override;
/**
* Adds the given copies to the bucket database.
*/
- void update_bucket_database(const document::Bucket& bucket,
- const std::vector<BucketCopy>& changed_nodes,
- uint32_t update_flags) override;
+ void update_bucket_database(const document::Bucket& bucket, const std::vector<BucketCopy>& changed_nodes, uint32_t update_flags) override;
/**
* Removes a copy from the given bucket from the bucket database.
* If the resulting bucket is empty afterwards, removes the entire
* bucket entry from the bucket database.
*/
- void remove_node_from_bucket_database(const document::Bucket& bucket, uint16_t node_index) override {
- remove_nodes_from_bucket_database(bucket, toVector<uint16_t>(node_index));
- }
+ void remove_node_from_bucket_database(const document::Bucket& bucket, uint16_t node_index) override;
/**
* Removes the given bucket copies from the bucket database.
* If the resulting bucket is empty afterwards, removes the entire
* bucket entry from the bucket database.
*/
- void remove_nodes_from_bucket_database(const document::Bucket& bucket,
- const std::vector<uint16_t>& nodes) override;
+ void remove_nodes_from_bucket_database(const document::Bucket& bucket, const std::vector<uint16_t>& nodes) override;
const DistributorBucketSpaceRepo& bucket_space_repo() const noexcept override {
return _bucketSpaceRepo;
@@ -129,9 +116,7 @@ public:
const DistributorConfiguration& distributor_config() const noexcept override {
return getDistributor().getConfig();
}
- void send_inline_split_if_bucket_too_large(document::BucketSpace bucket_space,
- const BucketDatabase::Entry& entry,
- uint8_t pri) override {
+ void send_inline_split_if_bucket_too_large(document::BucketSpace bucket_space, const BucketDatabase::Entry& entry, uint8_t pri) override {
getDistributor().checkBucketForSplit(bucket_space, entry, pri);
}
OperationRoutingSnapshot read_snapshot_for_bucket(const document::Bucket& bucket) const override {
@@ -143,9 +128,7 @@ public:
const PendingMessageTracker& pending_message_tracker() const noexcept override {
return getDistributor().getPendingMessageTracker();
}
- bool has_pending_message(uint16_t node_index,
- const document::Bucket& bucket,
- uint32_t message_type) const override;
+ bool has_pending_message(uint16_t node_index, const document::Bucket& bucket, uint32_t message_type) const override;
const lib::ClusterState* pending_cluster_state_or_null(const document::BucketSpace& bucket_space) const override {
return getDistributor().pendingClusterStateOrNull(bucket_space);
}
@@ -171,15 +154,7 @@ public:
std::unique_ptr<document::select::Node> parse_selection(const vespalib::string& selection) const override;
private:
- void enumerateUnavailableNodes(
- std::vector<uint16_t>& unavailableNodes,
- const lib::ClusterState& s,
- const document::Bucket& bucket,
- const std::vector<BucketCopy>& candidates) const;
DistributorStripeInterface& _distributor;
-
-protected:
-
DistributorBucketSpaceRepo& _bucketSpaceRepo;
DistributorBucketSpaceRepo& _readOnlyBucketSpaceRepo;
};
diff --git a/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.cpp b/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.cpp
index 0d37219356e..1ce5e5c589f 100644
--- a/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.cpp
+++ b/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.cpp
@@ -1,19 +1,60 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "ideal_service_layer_nodes_bundle.h"
-#include <vespa/vdslib/distribution/idealnodecalculator.h>
+#include <vespa/vespalib/stllike/hash_map.hpp>
namespace storage::distributor {
-IdealServiceLayerNodesBundle::IdealServiceLayerNodesBundle() noexcept
- : _available_nodes(),
- _available_nonretired_nodes(),
- _available_nonretired_or_maintenance_nodes()
-{
+namespace {
+constexpr size_t BUILD_HASH_LIMIT = 32;
}
-IdealServiceLayerNodesBundle::IdealServiceLayerNodesBundle(IdealServiceLayerNodesBundle &&) noexcept = default;
+struct IdealServiceLayerNodesBundle::LookupMap : public vespalib::hash_map<uint16_t, Index> {
+ using Parent = vespalib::hash_map<uint16_t, Index>;
+ using Parent::Parent;
+};
+IdealServiceLayerNodesBundle::IdealServiceLayerNodesBundle() noexcept = default;
+IdealServiceLayerNodesBundle::IdealServiceLayerNodesBundle(IdealServiceLayerNodesBundle &&) noexcept = default;
IdealServiceLayerNodesBundle::~IdealServiceLayerNodesBundle() = default;
+void
+IdealServiceLayerNodesBundle::set_nodes(ConstNodesRef nodes,
+ ConstNodesRef nonretired_nodes,
+ ConstNodesRef nonretired_or_maintenance_nodes)
+{
+ _nodes.clear();
+ _nodes.reserve(nodes.size() + nonretired_nodes.size() + nonretired_or_maintenance_nodes.size());
+ std::for_each(nodes.cbegin(), nodes.cend(), [this](uint16_t n) { _nodes.emplace_back(n); });
+ _available_sz = nodes.size();
+ std::for_each(nonretired_nodes.cbegin(), nonretired_nodes.cend(), [this](uint16_t n) { _nodes.emplace_back(n); });
+ _nonretired_sz = nonretired_nodes.size();
+ std::for_each(nonretired_or_maintenance_nodes.cbegin(), nonretired_or_maintenance_nodes.cend(), [this](uint16_t n) { _nodes.emplace_back(n); });
+
+ if (nonretired_or_maintenance_nodes.size() > BUILD_HASH_LIMIT) {
+ _nonretired_or_maintenance_node_2_index = std::make_unique<LookupMap>(nonretired_or_maintenance_nodes.size());
+ for (uint16_t i(0); i < nonretired_or_maintenance_nodes.size(); i++) {
+ _nonretired_or_maintenance_node_2_index->insert(std::make_pair(nonretired_or_maintenance_nodes[i], Index(i)));
+ }
+ }
+}
+
+IdealServiceLayerNodesBundle::Index
+IdealServiceLayerNodesBundle::ConstNodesRef2Index::lookup(uint16_t node) const noexcept {
+ for (uint16_t i(0); i < _idealState.size(); i++) {
+ if (node == _idealState[i]) return Index(i);
+ }
+ return Index::invalid();
+}
+
+IdealServiceLayerNodesBundle::Index
+IdealServiceLayerNodesBundle::nonretired_or_maintenance_index(uint16_t node) const noexcept {
+ if (_nonretired_or_maintenance_node_2_index) {
+ const auto found = _nonretired_or_maintenance_node_2_index->find(node);
+ return (found != _nonretired_or_maintenance_node_2_index->end()) ? found->second : Index::invalid();
+ } else {
+ return ConstNodesRef2Index(available_nonretired_or_maintenance_nodes()).lookup(node);
+ }
+}
+
}
diff --git a/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.h b/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.h
index 929ec7aadc1..1fce5bf0813 100644
--- a/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.h
+++ b/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.h
@@ -1,8 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vector>
-#include <cstdint>
+#include <vespa/vespalib/util/small_vector.h>
namespace storage::distributor {
@@ -10,28 +9,63 @@ namespace storage::distributor {
* Bundle of ideal service layer nodes for a bucket.
*/
class IdealServiceLayerNodesBundle {
- std::vector<uint16_t> _available_nodes;
- std::vector<uint16_t> _available_nonretired_nodes;
- std::vector<uint16_t> _available_nonretired_or_maintenance_nodes;
public:
+ using ConstNodesRef = vespalib::ConstArrayRef<uint16_t>;
+ class Index {
+ public:
+ constexpr explicit Index(uint16_t index) noexcept : _index(index) {}
+ constexpr bool valid() const noexcept {
+ return _index < MAX_INDEX;
+ }
+ constexpr operator uint16_t () const noexcept { return _index; }
+ static constexpr Index invalid() noexcept { return Index(MAX_INDEX); }
+ private:
+ static constexpr uint16_t MAX_INDEX = 0xffff;
+ uint16_t _index;
+ };
+ struct Node2Index {
+ virtual ~Node2Index() = default;
+ virtual Index lookup(uint16_t node) const noexcept = 0;
+ };
+ class NonRetiredOrMaintenance2Index final : public Node2Index {
+ public:
+ NonRetiredOrMaintenance2Index(const IdealServiceLayerNodesBundle & idealState) noexcept : _idealState(idealState) {}
+ Index lookup(uint16_t node) const noexcept override {
+ return _idealState.nonretired_or_maintenance_index(node);
+ }
+ private:
+ const IdealServiceLayerNodesBundle & _idealState;
+ };
+ class ConstNodesRef2Index final : public Node2Index {
+ public:
+ ConstNodesRef2Index(ConstNodesRef idealState) noexcept : _idealState(idealState) {}
+ Index lookup(uint16_t node) const noexcept override;
+ private:
+ ConstNodesRef _idealState;
+ };
IdealServiceLayerNodesBundle() noexcept;
IdealServiceLayerNodesBundle(IdealServiceLayerNodesBundle &&) noexcept;
~IdealServiceLayerNodesBundle();
- void set_available_nodes(std::vector<uint16_t> available_nodes) {
- _available_nodes = std::move(available_nodes);
+ void set_nodes(ConstNodesRef nodes, ConstNodesRef nonretired_nodes, ConstNodesRef nonretired_or_maintenance_nodes);
+ ConstNodesRef available_nodes() const noexcept { return {_nodes.data(), _available_sz}; }
+ ConstNodesRef available_nonretired_nodes() const noexcept { return {_nodes.data() + _available_sz, _nonretired_sz}; }
+ ConstNodesRef available_nonretired_or_maintenance_nodes() const noexcept {
+ uint16_t offset = _available_sz + _nonretired_sz;
+ return {_nodes.data() + offset, _nodes.size() - offset};
}
- void set_available_nonretired_nodes(std::vector<uint16_t> available_nonretired_nodes) {
- _available_nonretired_nodes = std::move(available_nonretired_nodes);
- }
- void set_available_nonretired_or_maintenance_nodes(std::vector<uint16_t> available_nonretired_or_maintenance_nodes) {
- _available_nonretired_or_maintenance_nodes = std::move(available_nonretired_or_maintenance_nodes);
- }
- std::vector<uint16_t> get_available_nodes() const { return _available_nodes; }
- std::vector<uint16_t> get_available_nonretired_nodes() const { return _available_nonretired_nodes; }
- std::vector<uint16_t> get_available_nonretired_or_maintenance_nodes() const {
- return _available_nonretired_or_maintenance_nodes;
+ bool is_nonretired_or_maintenance(uint16_t node) const noexcept {
+ return nonretired_or_maintenance_index(node) != Index::invalid();
}
+ NonRetiredOrMaintenance2Index nonretired_or_maintenance_to_index() const noexcept { return {*this}; }
+ ConstNodesRef2Index available_to_index() const noexcept { return {available_nodes()}; }
+private:
+ struct LookupMap;
+ Index nonretired_or_maintenance_index(uint16_t node) const noexcept;
+ vespalib::SmallVector<uint16_t,16> _nodes;
+ std::unique_ptr<LookupMap> _nonretired_or_maintenance_node_2_index;
+ uint16_t _available_sz;
+ uint16_t _nonretired_sz;
};
}
diff --git a/storage/src/vespa/storage/distributor/idealstatemanager.cpp b/storage/src/vespa/storage/distributor/idealstatemanager.cpp
index cad141e76ed..bc928ca3d41 100644
--- a/storage/src/vespa/storage/distributor/idealstatemanager.cpp
+++ b/storage/src/vespa/storage/distributor/idealstatemanager.cpp
@@ -10,10 +10,9 @@
#include <vespa/storageapi/message/persistence.h>
#include <vespa/document/bucket/fixed_bucket_spaces.h>
#include <vespa/vespalib/util/assert.h>
-#include <vespa/vespalib/stllike/hash_map.hpp>
#include <vespa/log/log.h>
-LOG_SETUP(".distributor.operation.queue");
+LOG_SETUP(".distributor.idealstatemanager");
using document::BucketSpace;
using storage::lib::Node;
@@ -21,10 +20,9 @@ using storage::lib::NodeType;
namespace storage::distributor {
-IdealStateManager::IdealStateManager(
- const DistributorNodeContext& node_ctx,
- DistributorStripeOperationContext& op_ctx,
- IdealStateMetricSet& metrics)
+IdealStateManager::IdealStateManager(const DistributorNodeContext& node_ctx,
+ DistributorStripeOperationContext& op_ctx,
+ IdealStateMetricSet& metrics)
: _metrics(metrics),
_stateCheckers(),
_splitBucketStateChecker(nullptr),
@@ -56,9 +54,7 @@ IdealStateManager::fillParentAndChildBuckets(StateChecker::Context& c)
{
c.db.getAll(c.getBucketId(), c.entries);
if (c.entries.empty()) {
- LOG(spam,
- "Did not find bucket %s in bucket database",
- c.bucket.toString().c_str());
+ LOG(spam, "Did not find bucket %s in bucket database", c.bucket.toString().c_str());
}
}
void
@@ -85,8 +81,7 @@ namespace {
* overwriting if already explicitly set.
*/
bool
-canOverwriteResult(const StateChecker::Result& existing,
- const StateChecker::Result& candidate)
+canOverwriteResult(const StateChecker::Result& existing, const StateChecker::Result& candidate)
{
return (!existing.getPriority().requiresMaintenance()
&& candidate.getPriority().requiresMaintenance());
@@ -101,9 +96,7 @@ IdealStateManager::runStateCheckers(StateChecker::Context& c) const
// We go through _all_ active state checkers so that statistics can be
// collected across all checkers, not just the ones that are highest pri.
for (const auto & checker : _stateCheckers) {
- if (!operation_context().distributor_config().stateCheckerIsActive(
- checker->getName()))
- {
+ if (!operation_context().distributor_config().stateCheckerIsActive(checker->getName())) {
LOG(spam, "Skipping state checker %s", checker->getName());
continue;
}
@@ -116,7 +109,8 @@ IdealStateManager::runStateCheckers(StateChecker::Context& c) const
return highestPri;
}
-void IdealStateManager::verify_only_live_nodes_in_context(const StateChecker::Context& c) const {
+void
+IdealStateManager::verify_only_live_nodes_in_context(const StateChecker::Context& c) const {
if (_has_logged_phantom_replica_warning) {
return;
}
@@ -125,11 +119,8 @@ void IdealStateManager::verify_only_live_nodes_in_context(const StateChecker::Co
const auto& state = c.systemState.getNodeState(lib::Node(lib::NodeType::STORAGE, index));
// Only nodes in Up, Initializing or Retired should ever be present in the DB.
if (!state.getState().oneOf("uir")) {
- LOG(error, "%s in bucket DB is on node %u, which is in unavailable state %s. "
- "Current cluster state is '%s'",
- c.entry.getBucketId().toString().c_str(),
- index,
- state.getState().toString().c_str(),
+ LOG(error, "%s in bucket DB is on node %u, which is in unavailable state %s. Current cluster state is '%s'",
+ c.entry.getBucketId().toString().c_str(), index, state.getState().toString().c_str(),
c.systemState.toString().c_str());
ASSERT_ONCE_OR_LOG(false, "Bucket DB contains replicas on unavailable node", 10000);
_has_logged_phantom_replica_warning = true;
@@ -138,9 +129,7 @@ void IdealStateManager::verify_only_live_nodes_in_context(const StateChecker::Co
}
StateChecker::Result
-IdealStateManager::generateHighestPriority(
- const document::Bucket& bucket,
- NodeMaintenanceStatsTracker& statsTracker) const
+IdealStateManager::generateHighestPriority(const document::Bucket& bucket, NodeMaintenanceStatsTracker& statsTracker) const
{
auto& distributorBucketSpace = _op_ctx.bucket_space_repo().get(bucket.getBucketSpace());
StateChecker::Context c(node_context(), operation_context(), distributorBucketSpace, statsTracker, bucket);
@@ -159,9 +148,7 @@ IdealStateManager::generateHighestPriority(
}
MaintenancePriorityAndType
-IdealStateManager::prioritize(
- const document::Bucket& bucket,
- NodeMaintenanceStatsTracker& statsTracker) const
+IdealStateManager::prioritize(const document::Bucket& bucket, NodeMaintenanceStatsTracker& statsTracker) const
{
StateChecker::Result generated(generateHighestPriority(bucket, statsTracker));
MaintenancePriority priority(generated.getPriority());
@@ -172,8 +159,7 @@ IdealStateManager::prioritize(
}
IdealStateOperation::SP
-IdealStateManager::generateInterceptingSplit(BucketSpace bucketSpace,
- const BucketDatabase::Entry& e,
+IdealStateManager::generateInterceptingSplit(BucketSpace bucketSpace, const BucketDatabase::Entry& e,
api::StorageMessage::Priority pri)
{
NodeMaintenanceStatsTracker statsTracker;
@@ -199,18 +185,15 @@ MaintenanceOperation::SP
IdealStateManager::generate(const document::Bucket& bucket) const
{
NodeMaintenanceStatsTracker statsTracker;
- IdealStateOperation::SP op(
- generateHighestPriority(bucket, statsTracker).createOperation());
+ IdealStateOperation::SP op(generateHighestPriority(bucket, statsTracker).createOperation());
if (op.get()) {
- op->setIdealStateManager(
- const_cast<IdealStateManager*>(this));
+ op->setIdealStateManager(const_cast<IdealStateManager*>(this));
}
return op;
}
std::vector<MaintenanceOperation::SP>
-IdealStateManager::generateAll(const document::Bucket &bucket,
- NodeMaintenanceStatsTracker& statsTracker) const
+IdealStateManager::generateAll(const document::Bucket &bucket, NodeMaintenanceStatsTracker& statsTracker) const
{
auto& distributorBucketSpace = _op_ctx.bucket_space_repo().get(bucket.getBucketSpace());
StateChecker::Context c(node_context(), operation_context(), distributorBucketSpace, statsTracker, bucket);
@@ -234,15 +217,11 @@ IdealStateManager::generateAll(const document::Bucket &bucket,
}
void
-IdealStateManager::getBucketStatus(
- BucketSpace bucketSpace,
- const BucketDatabase::ConstEntryRef& entry,
- NodeMaintenanceStatsTracker& statsTracker,
- std::ostream& out) const
+IdealStateManager::getBucketStatus(BucketSpace bucketSpace, const BucketDatabase::ConstEntryRef& entry,
+ NodeMaintenanceStatsTracker& statsTracker, std::ostream& out) const
{
document::Bucket bucket(bucketSpace, entry.getBucketId());
- std::vector<MaintenanceOperation::SP> operations(
- generateAll(bucket, statsTracker));
+ std::vector<MaintenanceOperation::SP> operations(generateAll(bucket, statsTracker));
if (operations.empty()) {
out << entry.getBucketId() << " : ";
} else {
@@ -261,13 +240,15 @@ IdealStateManager::getBucketStatus(
out << "[" << entry->toString() << "]<br>\n";
}
-void IdealStateManager::dump_bucket_space_db_status(document::BucketSpace bucket_space, std::ostream& out) const {
+void
+IdealStateManager::dump_bucket_space_db_status(document::BucketSpace bucket_space, std::ostream& out) const {
StatusBucketVisitor proc(*this, bucket_space, out);
auto& distributorBucketSpace = _op_ctx.bucket_space_repo().get(bucket_space);
distributorBucketSpace.getBucketDatabase().for_each_upper_bound(proc);
}
-void IdealStateManager::getBucketStatus(std::ostream& out) const {
+void
+IdealStateManager::getBucketStatus(std::ostream& out) const {
LOG(debug, "Dumping bucket database valid at cluster state version %u",
operation_context().cluster_state_bundle().getVersion());
diff --git a/storage/src/vespa/storage/distributor/idealstatemanager.h b/storage/src/vespa/storage/distributor/idealstatemanager.h
index 0c9e3ffa1c6..39a662e4a81 100644
--- a/storage/src/vespa/storage/distributor/idealstatemanager.h
+++ b/storage/src/vespa/storage/distributor/idealstatemanager.h
@@ -49,18 +49,14 @@ public:
MaintenanceOperation::SP generate(const document::Bucket& bucket) const override;
// MaintenanceOperationGenerator
- std::vector<MaintenanceOperation::SP> generateAll(
- const document::Bucket& bucket,
- NodeMaintenanceStatsTracker& statsTracker) const override;
+ std::vector<MaintenanceOperation::SP> generateAll(const document::Bucket& bucket, NodeMaintenanceStatsTracker& statsTracker) const override;
/**
* If the given bucket is too large, generate a split operation for it,
* with higher priority than the given one.
*/
- IdealStateOperation::SP generateInterceptingSplit(
- document::BucketSpace bucketSpace,
- const BucketDatabase::Entry& e,
- api::StorageMessage::Priority pri);
+ IdealStateOperation::SP generateInterceptingSplit(document::BucketSpace bucketSpace, const BucketDatabase::Entry& e,
+ api::StorageMessage::Priority pri);
IdealStateMetricSet& getMetrics() noexcept { return _metrics; }
@@ -78,9 +74,7 @@ private:
void verify_only_live_nodes_in_context(const StateChecker::Context& c) const;
static void fillParentAndChildBuckets(StateChecker::Context& c);
static void fillSiblingBucket(StateChecker::Context& c);
- StateChecker::Result generateHighestPriority(
- const document::Bucket& bucket,
- NodeMaintenanceStatsTracker& statsTracker) const;
+ StateChecker::Result generateHighestPriority(const document::Bucket& bucket, NodeMaintenanceStatsTracker& statsTracker) const;
StateChecker::Result runStateCheckers(StateChecker::Context& c) const;
static BucketDatabase::Entry* getEntryForPrimaryBucket(StateChecker::Context& c);
diff --git a/storage/src/vespa/storage/distributor/idealstatemetricsset.cpp b/storage/src/vespa/storage/distributor/idealstatemetricsset.cpp
index d79bf2c4810..ea345176dd0 100644
--- a/storage/src/vespa/storage/distributor/idealstatemetricsset.cpp
+++ b/storage/src/vespa/storage/distributor/idealstatemetricsset.cpp
@@ -134,7 +134,7 @@ IdealStateMetricSet::IdealStateMetricSet()
IdealStateMetricSet::~IdealStateMetricSet() = default;
-void IdealStateMetricSet::setPendingOperations(const std::vector<uint64_t>& newMetrics) {
+void IdealStateMetricSet::setPendingOperations(std::span<uint64_t, IdealStateOperation::OPERATION_COUNT> newMetrics) {
for (uint32_t i = 0; i < IdealStateOperation::OPERATION_COUNT; i++) {
operations[i]->pending.set(newMetrics[i]);
}
diff --git a/storage/src/vespa/storage/distributor/idealstatemetricsset.h b/storage/src/vespa/storage/distributor/idealstatemetricsset.h
index 6528ad7dc72..e51e58ba3a4 100644
--- a/storage/src/vespa/storage/distributor/idealstatemetricsset.h
+++ b/storage/src/vespa/storage/distributor/idealstatemetricsset.h
@@ -5,6 +5,7 @@
#include <vespa/metrics/valuemetric.h>
#include <vespa/metrics/countmetric.h>
#include <vespa/storage/distributor/operations/idealstate/idealstateoperation.h>
+#include <span>
namespace storage::distributor {
@@ -61,7 +62,7 @@ public:
IdealStateMetricSet();
~IdealStateMetricSet() override;
- void setPendingOperations(const std::vector<uint64_t>& newMetrics);
+ void setPendingOperations(std::span<uint64_t, IdealStateOperation::OPERATION_COUNT> newMetrics);
};
} // storage::distributor
diff --git a/storage/src/vespa/storage/distributor/maintenance/maintenancescanner.h b/storage/src/vespa/storage/distributor/maintenance/maintenancescanner.h
index 4fec2e57cbc..b894ec9a1cd 100644
--- a/storage/src/vespa/storage/distributor/maintenance/maintenancescanner.h
+++ b/storage/src/vespa/storage/distributor/maintenance/maintenancescanner.h
@@ -23,17 +23,23 @@ public:
static ScanResult createDone() { return ScanResult(true); }
static ScanResult createNotDone(document::BucketSpace bucketSpace, BucketDatabase::Entry entry) {
- return ScanResult(bucketSpace, entry);
+ return ScanResult(bucketSpace, std::move(entry));
}
private:
- explicit ScanResult(bool done) : _done(done), _bucketSpace(document::BucketSpace::invalid()), _entry() {}
- ScanResult(document::BucketSpace bucketSpace, const BucketDatabase::Entry& e) : _done(false), _bucketSpace(bucketSpace), _entry(e) {}
+ explicit ScanResult(bool done) noexcept
+ : _done(done),
+ _bucketSpace(document::BucketSpace::invalid()),
+ _entry()
+ {}
+ ScanResult(document::BucketSpace bucketSpace, BucketDatabase::Entry e) noexcept
+ : _done(false),
+ _bucketSpace(bucketSpace),
+ _entry(std::move(e))
+ {}
};
virtual ScanResult scanNext() = 0;
-
- virtual void reset() = 0;
};
}
diff --git a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp
index 47b8aec4aa6..b10f5abd0f1 100644
--- a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp
+++ b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp
@@ -1,42 +1,42 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "node_maintenance_stats_tracker.h"
+#include <vespa/vespalib/stllike/hash_map.hpp>
+#include <vespa/vespalib/stllike/hash_map_equal.hpp>
#include <ostream>
namespace storage::distributor {
const NodeMaintenanceStats NodeMaintenanceStatsTracker::_emptyNodeMaintenanceStats;
-void
-NodeMaintenanceStats::merge(const NodeMaintenanceStats& rhs)
-{
- movingOut += rhs.movingOut;
- syncing += rhs.syncing;
- copyingIn += rhs.copyingIn;
- copyingOut += rhs.copyingOut;
- total += rhs.total;
+NodeMaintenanceStats &
+NodeMaintenanceStatsTracker::stats(uint16_t node, document::BucketSpace bucketSpace) {
+ return _node_stats[BucketSpaceAndNode(node, bucketSpace)];
}
-namespace {
+const NodeMaintenanceStats &
+NodeMaintenanceStatsTracker::stats(uint16_t node, document::BucketSpace bucketSpace) const noexcept {
+ auto nodeItr = _node_stats.find(BucketSpaceAndNode(node, bucketSpace));
+ return (nodeItr != _node_stats.end()) ? nodeItr->second : _emptyNodeMaintenanceStats;
+}
-void
-merge_bucket_spaces_stats(NodeMaintenanceStatsTracker::BucketSpacesStats& dest,
- const NodeMaintenanceStatsTracker::BucketSpacesStats& src)
-{
- for (const auto& entry : src) {
- auto bucket_space = entry.first;
- dest[bucket_space].merge(entry.second);
- }
+const NodeMaintenanceStats&
+NodeMaintenanceStatsTracker::forNode(uint16_t node, document::BucketSpace bucketSpace) const noexcept {
+ return stats(node, bucketSpace);
}
+bool
+NodeMaintenanceStatsTracker::operator==(const NodeMaintenanceStatsTracker& rhs) const noexcept {
+ return ((_node_stats == rhs._node_stats) &&
+ (_max_observed_time_since_last_gc == rhs._max_observed_time_since_last_gc));
}
void
NodeMaintenanceStatsTracker::merge(const NodeMaintenanceStatsTracker& rhs)
{
for (const auto& entry : rhs._node_stats) {
- auto node_index = entry.first;
- merge_bucket_spaces_stats(_node_stats[node_index], entry.second);
+ auto key = entry.first;
+ _node_stats[key].merge(entry.second);
}
_max_observed_time_since_last_gc = std::max(_max_observed_time_since_last_gc,
rhs._max_observed_time_since_last_gc);
@@ -55,13 +55,24 @@ operator<<(std::ostream& os, const NodeMaintenanceStats& stats)
return os;
}
-NodeMaintenanceStatsTracker::NodeMaintenanceStatsTracker()
+NodeMaintenanceStatsTracker::NodeMaintenanceStatsTracker() noexcept
: _node_stats(),
_total_stats(),
_max_observed_time_since_last_gc(0)
{}
+NodeMaintenanceStatsTracker::NodeMaintenanceStatsTracker(NodeMaintenanceStatsTracker &&) noexcept = default;
+NodeMaintenanceStatsTracker & NodeMaintenanceStatsTracker::operator =(NodeMaintenanceStatsTracker &&) noexcept = default;
+NodeMaintenanceStatsTracker::NodeMaintenanceStatsTracker(const NodeMaintenanceStatsTracker &) = default;
NodeMaintenanceStatsTracker::~NodeMaintenanceStatsTracker() = default;
+void
+NodeMaintenanceStatsTracker::reset(size_t nodes) {
+ _node_stats.clear();
+ _node_stats.resize(nodes);
+ _total_stats = NodeMaintenanceStats();
+ _max_observed_time_since_last_gc = vespalib::duration::zero();
+}
+
}
diff --git a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
index deeef118685..a5cb12de9a4 100644
--- a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
+++ b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
@@ -1,9 +1,9 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <unordered_map>
#include <vespa/document/bucket/bucketspace.h>
#include <vespa/vespalib/util/time.h>
+#include <vespa/vespalib/stllike/hash_map.h>
namespace storage::distributor {
@@ -37,7 +37,13 @@ struct NodeMaintenanceStats
return !(*this == other);
}
- void merge(const NodeMaintenanceStats& rhs);
+ void merge(const NodeMaintenanceStats& rhs) noexcept {
+ movingOut += rhs.movingOut;
+ syncing += rhs.syncing;
+ copyingIn += rhs.copyingIn;
+ copyingOut += rhs.copyingOut;
+ total += rhs.total;
+ }
};
std::ostream& operator<<(std::ostream&, const NodeMaintenanceStats&);
@@ -45,8 +51,23 @@ std::ostream& operator<<(std::ostream&, const NodeMaintenanceStats&);
class NodeMaintenanceStatsTracker
{
public:
- using BucketSpacesStats = std::unordered_map<document::BucketSpace, NodeMaintenanceStats, document::BucketSpace::hash>;
- using PerNodeStats = std::unordered_map<uint16_t, BucketSpacesStats>;
+ class BucketSpaceAndNode {
+ public:
+ BucketSpaceAndNode(uint16_t node_in, document::BucketSpace bucketSpace_in) noexcept
+ : _bucketSpace(bucketSpace_in),
+ _node(node_in)
+ {}
+ uint32_t hash() const noexcept { return (uint32_t(_node) << 2) | (_bucketSpace.getId() & 0x3); }
+ bool operator == (const BucketSpaceAndNode & b) const noexcept {
+ return (_bucketSpace == b._bucketSpace) && (_node == b._node);
+ }
+ document::BucketSpace bucketSpace() const noexcept { return _bucketSpace; }
+ uint16_t node() const noexcept { return _node; }
+ private:
+ document::BucketSpace _bucketSpace;
+ uint16_t _node;
+ };
+ using PerNodeStats = vespalib::hash_map<BucketSpaceAndNode, NodeMaintenanceStats>;
private:
PerNodeStats _node_stats;
@@ -55,32 +76,39 @@ private:
static const NodeMaintenanceStats _emptyNodeMaintenanceStats;
+ NodeMaintenanceStats & stats(uint16_t node, document::BucketSpace bucketSpace);
+ const NodeMaintenanceStats & stats(uint16_t node, document::BucketSpace bucketSpace) const noexcept;
public:
- NodeMaintenanceStatsTracker();
+ NodeMaintenanceStatsTracker() noexcept;
+ NodeMaintenanceStatsTracker(NodeMaintenanceStatsTracker &&) noexcept;
+ NodeMaintenanceStatsTracker & operator =(NodeMaintenanceStatsTracker &&) noexcept;
+ NodeMaintenanceStatsTracker(const NodeMaintenanceStatsTracker &);
~NodeMaintenanceStatsTracker();
+ void reset(size_t nodes);
+ size_t numNodes() const { return _node_stats.size(); }
void incMovingOut(uint16_t node, document::BucketSpace bucketSpace) {
- ++_node_stats[node][bucketSpace].movingOut;
+ ++stats(node, bucketSpace).movingOut;
++_total_stats.movingOut;
}
void incSyncing(uint16_t node, document::BucketSpace bucketSpace) {
- ++_node_stats[node][bucketSpace].syncing;
+ ++stats(node, bucketSpace).syncing;
++_total_stats.syncing;
}
void incCopyingIn(uint16_t node, document::BucketSpace bucketSpace) {
- ++_node_stats[node][bucketSpace].copyingIn;
+ ++stats(node, bucketSpace).copyingIn;
++_total_stats.copyingIn;
}
void incCopyingOut(uint16_t node, document::BucketSpace bucketSpace) {
- ++_node_stats[node][bucketSpace].copyingOut;
+ ++stats(node, bucketSpace).copyingOut;
++_total_stats.copyingOut;
}
void incTotal(uint16_t node, document::BucketSpace bucketSpace) {
- ++_node_stats[node][bucketSpace].total;
+ ++stats(node, bucketSpace).total;
++_total_stats.total;
}
@@ -92,18 +120,9 @@ public:
* Returned statistics for a given node index and bucket space, or all zero statistics
* if none have been recorded yet
*/
- const NodeMaintenanceStats& forNode(uint16_t node, document::BucketSpace bucketSpace) const {
- auto nodeItr = _node_stats.find(node);
- if (nodeItr != _node_stats.end()) {
- auto bucketSpaceItr = nodeItr->second.find(bucketSpace);
- if (bucketSpaceItr != nodeItr->second.end()) {
- return bucketSpaceItr->second;
- }
- }
- return _emptyNodeMaintenanceStats;
- }
+ const NodeMaintenanceStats& forNode(uint16_t node, document::BucketSpace bucketSpace) const noexcept;
- const PerNodeStats& perNodeStats() const {
+ const PerNodeStats& perNodeStats() const noexcept {
return _node_stats;
}
@@ -118,10 +137,7 @@ public:
return _max_observed_time_since_last_gc;
}
- bool operator==(const NodeMaintenanceStatsTracker& rhs) const {
- return ((_node_stats == rhs._node_stats) &&
- (_max_observed_time_since_last_gc == rhs._max_observed_time_since_last_gc));
- }
+ bool operator==(const NodeMaintenanceStatsTracker& rhs) const noexcept;
void merge(const NodeMaintenanceStatsTracker& rhs);
};
diff --git a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
index cb60e3eb0fc..ab27f2d2e43 100644
--- a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
+++ b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
@@ -13,22 +13,22 @@ SimpleMaintenanceScanner::SimpleMaintenanceScanner(BucketPriorityDatabase& bucke
_priorityGenerator(priorityGenerator),
_bucketSpaceRepo(bucketSpaceRepo),
_bucketSpaceItr(_bucketSpaceRepo.begin()),
- _bucketCursor()
+ _bucketCursor(),
+ _pendingMaintenance()
{
}
SimpleMaintenanceScanner::~SimpleMaintenanceScanner() = default;
bool
-SimpleMaintenanceScanner::GlobalMaintenanceStats::operator==(const GlobalMaintenanceStats& rhs) const
+SimpleMaintenanceScanner::GlobalMaintenanceStats::operator==(const GlobalMaintenanceStats& rhs) const noexcept
{
return pending == rhs.pending;
}
void
-SimpleMaintenanceScanner::GlobalMaintenanceStats::merge(const GlobalMaintenanceStats& rhs)
+SimpleMaintenanceScanner::GlobalMaintenanceStats::merge(const GlobalMaintenanceStats& rhs) noexcept
{
- assert(pending.size() == rhs.pending.size());
for (size_t i = 0; i < pending.size(); ++i) {
pending[i] += rhs.pending[i];
}
@@ -41,11 +41,20 @@ SimpleMaintenanceScanner::PendingMaintenanceStats::merge(const PendingMaintenanc
perNodeStats.merge(rhs.perNodeStats);
}
-SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats() = default;
+SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats() noexcept = default;
SimpleMaintenanceScanner::PendingMaintenanceStats::~PendingMaintenanceStats() = default;
SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats(const PendingMaintenanceStats &) = default;
+SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats(PendingMaintenanceStats &&) noexcept = default;
SimpleMaintenanceScanner::PendingMaintenanceStats &
-SimpleMaintenanceScanner::PendingMaintenanceStats::operator = (const PendingMaintenanceStats &) = default;
+SimpleMaintenanceScanner::PendingMaintenanceStats::operator = (PendingMaintenanceStats &&) noexcept = default;
+
+SimpleMaintenanceScanner::PendingMaintenanceStats
+SimpleMaintenanceScanner::PendingMaintenanceStats::fetch_and_reset() {
+ PendingMaintenanceStats prev = std::move(*this);
+ global = GlobalMaintenanceStats();
+ perNodeStats.reset(prev.perNodeStats.numNodes());
+ return prev;
+}
MaintenanceScanner::ScanResult
SimpleMaintenanceScanner::scanNext()
@@ -64,16 +73,16 @@ SimpleMaintenanceScanner::scanNext()
countBucket(_bucketSpaceItr->first, entry.getBucketInfo());
prioritizeBucket(document::Bucket(_bucketSpaceItr->first, entry.getBucketId()));
_bucketCursor = entry.getBucketId();
- return ScanResult::createNotDone(_bucketSpaceItr->first, entry);
+ return ScanResult::createNotDone(_bucketSpaceItr->first, std::move(entry));
}
}
-void
-SimpleMaintenanceScanner::reset()
+SimpleMaintenanceScanner::PendingMaintenanceStats
+SimpleMaintenanceScanner::fetch_and_reset()
{
_bucketCursor = document::BucketId();
_bucketSpaceItr = _bucketSpaceRepo.begin();
- _pendingMaintenance = PendingMaintenanceStats();
+ return _pendingMaintenance.fetch_and_reset();
}
void
@@ -99,8 +108,7 @@ SimpleMaintenanceScanner::prioritizeBucket(const document::Bucket &bucket)
}
std::ostream&
-operator<<(std::ostream& os,
- const SimpleMaintenanceScanner::GlobalMaintenanceStats& stats)
+operator<<(std::ostream& os, const SimpleMaintenanceScanner::GlobalMaintenanceStats& stats)
{
using MO = MaintenanceOperation;
os << "delete bucket: " << stats.pending[MO::DELETE_BUCKET]
diff --git a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h
index a867d4a5267..3d1a57a6422 100644
--- a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h
+++ b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h
@@ -13,21 +13,24 @@ class SimpleMaintenanceScanner : public MaintenanceScanner
{
public:
struct GlobalMaintenanceStats {
- std::vector<uint64_t> pending;
+ std::array<uint64_t, MaintenanceOperation::OPERATION_COUNT> pending;
- GlobalMaintenanceStats()
- : pending(MaintenanceOperation::OPERATION_COUNT)
+ GlobalMaintenanceStats() noexcept
+ : pending()
{ }
- bool operator==(const GlobalMaintenanceStats& rhs) const;
- void merge(const GlobalMaintenanceStats& rhs);
+ bool operator==(const GlobalMaintenanceStats& rhs) const noexcept;
+ void merge(const GlobalMaintenanceStats& rhs) noexcept;
};
struct PendingMaintenanceStats {
- PendingMaintenanceStats();
+ PendingMaintenanceStats() noexcept;
PendingMaintenanceStats(const PendingMaintenanceStats &);
- PendingMaintenanceStats &operator = (const PendingMaintenanceStats &);
+ PendingMaintenanceStats &operator = (const PendingMaintenanceStats &) = delete;
+ PendingMaintenanceStats(PendingMaintenanceStats &&) noexcept;
+ PendingMaintenanceStats &operator = (PendingMaintenanceStats &&) noexcept;
~PendingMaintenanceStats();
- GlobalMaintenanceStats global;
+ [[nodiscard]] PendingMaintenanceStats fetch_and_reset();
+ GlobalMaintenanceStats global;
NodeMaintenanceStatsTracker perNodeStats;
void merge(const PendingMaintenanceStats& rhs);
@@ -47,14 +50,15 @@ public:
const DistributorBucketSpaceRepo& bucketSpaceRepo);
SimpleMaintenanceScanner(const SimpleMaintenanceScanner&) = delete;
SimpleMaintenanceScanner& operator=(const SimpleMaintenanceScanner&) = delete;
- ~SimpleMaintenanceScanner();
+ ~SimpleMaintenanceScanner() override;
ScanResult scanNext() override;
- void reset() override;
+ [[nodiscard]] PendingMaintenanceStats fetch_and_reset();
// TODO: move out into own interface!
void prioritizeBucket(const document::Bucket &id);
+ // TODO Only for testing
const PendingMaintenanceStats& getPendingMaintenanceStats() const noexcept {
return _pendingMaintenance;
}
diff --git a/storage/src/vespa/storage/distributor/messagetracker.cpp b/storage/src/vespa/storage/distributor/messagetracker.cpp
index 8830e5ecabc..842238aa24c 100644
--- a/storage/src/vespa/storage/distributor/messagetracker.cpp
+++ b/storage/src/vespa/storage/distributor/messagetracker.cpp
@@ -3,6 +3,7 @@
#include "messagetracker.h"
#include <vespa/storageapi/messageapi/bucketcommand.h>
#include <vespa/storageapi/messageapi/bucketreply.h>
+#include <vespa/vespalib/stllike/hash_map.hpp>
#include <cinttypes>
#include <vespa/log/log.h>
@@ -19,10 +20,11 @@ MessageTracker::~MessageTracker() = default;
void
MessageTracker::flushQueue(MessageSender& sender)
{
- for (uint32_t i = 0; i < _commandQueue.size(); i++) {
- _commandQueue[i]._msg->setAddress(api::StorageMessageAddress::create(_cluster_ctx.cluster_name_ptr(), lib::NodeType::STORAGE, _commandQueue[i]._target));
- _sentMessages[_commandQueue[i]._msg->getMsgId()] = _commandQueue[i]._target;
- sender.sendCommand(_commandQueue[i]._msg);
+ _sentMessages.resize(_sentMessages.size() + _commandQueue.size());
+ for (const auto & toSend : _commandQueue) {
+ toSend._msg->setAddress(api::StorageMessageAddress::create(_cluster_ctx.cluster_name_ptr(), lib::NodeType::STORAGE, toSend._target));
+ _sentMessages[toSend._msg->getMsgId()] = toSend._target;
+ sender.sendCommand(toSend._msg);
}
_commandQueue.clear();
@@ -31,21 +33,14 @@ MessageTracker::flushQueue(MessageSender& sender)
uint16_t
MessageTracker::handleReply(api::BucketReply& reply)
{
- std::map<uint64_t, uint16_t>::iterator found = _sentMessages.find(reply.getMsgId());
- if (found == _sentMessages.end()) {
+ const auto found = _sentMessages.find(reply.getMsgId());
+ if (found == _sentMessages.end()) [[unlikely]] {
LOG(warning, "Received reply %" PRIu64 " for callback which we have no recollection of", reply.getMsgId());
return (uint16_t)-1;
- } else {
- uint16_t node = found->second;
- _sentMessages.erase(found);
- return node;
}
-}
-
-bool
-MessageTracker::finished()
-{
- return _sentMessages.empty();
+ uint16_t node = found->second;
+ _sentMessages.erase(found);
+ return node;
}
}
diff --git a/storage/src/vespa/storage/distributor/messagetracker.h b/storage/src/vespa/storage/distributor/messagetracker.h
index 73e2461eb7a..a0234f425a0 100644
--- a/storage/src/vespa/storage/distributor/messagetracker.h
+++ b/storage/src/vespa/storage/distributor/messagetracker.h
@@ -4,8 +4,7 @@
#include <vespa/storage/common/cluster_context.h>
#include <vespa/storage/common/messagesender.h>
#include <vespa/vespalib/stllike/string.h>
-#include <vector>
-#include <map>
+#include <vespa/vespalib/stllike/hash_map.h>
namespace storage::api {
class BucketCommand;
@@ -18,16 +17,17 @@ class MessageTracker {
public:
class ToSend {
public:
- ToSend(std::shared_ptr<api::BucketCommand> msg, uint16_t target) noexcept :
- _msg(std::move(msg)), _target(target) {};
+ ToSend(std::shared_ptr<api::BucketCommand> msg, uint16_t target) noexcept
+ : _msg(std::move(msg)), _target(target)
+ {}
std::shared_ptr<api::BucketCommand> _msg;
uint16_t _target;
};
MessageTracker(const ClusterContext& cluster_context);
- MessageTracker(MessageTracker&&) = default;
- MessageTracker& operator=(MessageTracker&&) = delete;
+ MessageTracker(MessageTracker&&) noexcept = default;
+ MessageTracker& operator=(MessageTracker&&) noexcept = delete;
MessageTracker(const MessageTracker &) = delete;
MessageTracker& operator=(const MessageTracker&) = delete;
~MessageTracker();
@@ -35,6 +35,9 @@ public:
void queueCommand(std::shared_ptr<api::BucketCommand> msg, uint16_t target) {
_commandQueue.emplace_back(std::move(msg), target);
}
+ void reserve_more_commands(size_t sz) {
+ _commandQueue.reserve(_commandQueue.size() + sz);
+ }
void flushQueue(MessageSender& sender);
@@ -46,13 +49,15 @@ public:
/**
Returns true if all messages sent have been received.
*/
- bool finished();
+ bool finished() const noexcept {
+ return _sentMessages.empty();
+ }
protected:
- std::vector<ToSend> _commandQueue;
+ std::vector<ToSend> _commandQueue;
// Keeps track of which node a message was sent to.
- std::map<uint64_t, uint16_t> _sentMessages;
- const ClusterContext& _cluster_ctx;
+ vespalib::hash_map<uint64_t, uint16_t> _sentMessages;
+ const ClusterContext& _cluster_ctx;
};
}
diff --git a/storage/src/vespa/storage/distributor/min_replica_provider.cpp b/storage/src/vespa/storage/distributor/min_replica_provider.cpp
index f503672af39..52780b99948 100644
--- a/storage/src/vespa/storage/distributor/min_replica_provider.cpp
+++ b/storage/src/vespa/storage/distributor/min_replica_provider.cpp
@@ -5,8 +5,7 @@
namespace storage::distributor {
void
-merge_min_replica_stats(std::unordered_map<uint16_t, uint32_t>& dest,
- const std::unordered_map<uint16_t, uint32_t>& src)
+merge_min_replica_stats(MinReplicaMap & dest, const MinReplicaMap & src)
{
for (const auto& entry : src) {
auto node_index = entry.first;
diff --git a/storage/src/vespa/storage/distributor/min_replica_provider.h b/storage/src/vespa/storage/distributor/min_replica_provider.h
index a4374b906fe..75d3a150d21 100644
--- a/storage/src/vespa/storage/distributor/min_replica_provider.h
+++ b/storage/src/vespa/storage/distributor/min_replica_provider.h
@@ -1,11 +1,12 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <stdint.h>
-#include <unordered_map>
+#include <vespa/vespalib/stllike/hash_map.h>
namespace storage::distributor {
+using MinReplicaMap = vespalib::hash_map<uint16_t, uint32_t>;
+
class MinReplicaProvider
{
public:
@@ -17,11 +18,10 @@ public:
* Can be called at any time after registration from another thread context
* and the call must thus be thread safe and data race free.
*/
- virtual std::unordered_map<uint16_t, uint32_t> getMinReplica() const = 0;
+ virtual MinReplicaMap getMinReplica() const = 0;
};
-void merge_min_replica_stats(std::unordered_map<uint16_t, uint32_t>& dest,
- const std::unordered_map<uint16_t, uint32_t>& src);
+void merge_min_replica_stats(MinReplicaMap & dest, const MinReplicaMap & src);
}
diff --git a/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp
index b00e4ce3cba..01c2875671b 100644
--- a/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp
+++ b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.cpp
@@ -83,7 +83,7 @@ MultiThreadedStripeAccessGuard::merge_entries_into_db(document::BucketSpace buck
const lib::Distribution& distribution,
const lib::ClusterState& new_state,
const char* storage_up_states,
- const std::unordered_set<uint16_t>& outdated_nodes,
+ const OutdatedNodes & outdated_nodes,
const std::vector<dbtransition::Entry>& entries)
{
if (entries.empty()) {
diff --git a/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h
index c52a01fdded..7a58a784eda 100644
--- a/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h
+++ b/storage/src/vespa/storage/distributor/multi_threaded_stripe_access_guard.h
@@ -46,7 +46,7 @@ public:
const lib::Distribution& distribution,
const lib::ClusterState& new_state,
const char* storage_up_states,
- const std::unordered_set<uint16_t>& outdated_nodes,
+ const OutdatedNodes & outdated_nodes,
const std::vector<dbtransition::Entry>& entries) override;
void update_read_snapshot_before_db_pruning() override;
diff --git a/storage/src/vespa/storage/distributor/nodeinfo.cpp b/storage/src/vespa/storage/distributor/nodeinfo.cpp
index 6bb1949d606..3e645f57393 100644
--- a/storage/src/vespa/storage/distributor/nodeinfo.cpp
+++ b/storage/src/vespa/storage/distributor/nodeinfo.cpp
@@ -5,14 +5,16 @@
namespace storage::distributor {
-NodeInfo::NodeInfo(const framework::Clock& clock)
+NodeInfo::NodeInfo(const framework::Clock& clock) noexcept
: _clock(clock) {}
-uint32_t NodeInfo::getPendingCount(uint16_t idx) const {
+uint32_t
+NodeInfo::getPendingCount(uint16_t idx) const {
return getNode(idx)._pending;
}
-bool NodeInfo::isBusy(uint16_t idx) const {
+bool
+NodeInfo::isBusy(uint16_t idx) const {
const SingleNodeInfo& info = getNode(idx);
if (info._busyUntilTime.time_since_epoch().count() != 0) {
if (_clock.getMonotonicTime() > info._busyUntilTime) {
@@ -25,15 +27,18 @@ bool NodeInfo::isBusy(uint16_t idx) const {
return false;
}
-void NodeInfo::setBusy(uint16_t idx, vespalib::duration for_duration) {
+void
+NodeInfo::setBusy(uint16_t idx, vespalib::duration for_duration) {
getNode(idx)._busyUntilTime = _clock.getMonotonicTime() + for_duration;
}
-void NodeInfo::incPending(uint16_t idx) {
+void
+NodeInfo::incPending(uint16_t idx) {
getNode(idx)._pending++;
}
-void NodeInfo::decPending(uint16_t idx) {
+void
+NodeInfo::decPending(uint16_t idx) {
SingleNodeInfo& info = getNode(idx);
if (info._pending > 0) {
@@ -41,12 +46,14 @@ void NodeInfo::decPending(uint16_t idx) {
}
}
-void NodeInfo::clearPending(uint16_t idx) {
+void
+NodeInfo::clearPending(uint16_t idx) {
SingleNodeInfo& info = getNode(idx);
info._pending = 0;
}
-NodeInfo::SingleNodeInfo& NodeInfo::getNode(uint16_t idx) {
+NodeInfo::SingleNodeInfo&
+NodeInfo::getNode(uint16_t idx) {
const auto index_lbound = static_cast<size_t>(idx) + 1;
while (_nodes.size() < index_lbound) {
_nodes.emplace_back();
@@ -55,7 +62,8 @@ NodeInfo::SingleNodeInfo& NodeInfo::getNode(uint16_t idx) {
return _nodes[idx];
}
-const NodeInfo::SingleNodeInfo& NodeInfo::getNode(uint16_t idx) const {
+const NodeInfo::SingleNodeInfo&
+NodeInfo::getNode(uint16_t idx) const {
const auto index_lbound = static_cast<size_t>(idx) + 1;
while (_nodes.size() < index_lbound) {
_nodes.emplace_back();
diff --git a/storage/src/vespa/storage/distributor/nodeinfo.h b/storage/src/vespa/storage/distributor/nodeinfo.h
index 7f0716d7804..446739ca7e9 100644
--- a/storage/src/vespa/storage/distributor/nodeinfo.h
+++ b/storage/src/vespa/storage/distributor/nodeinfo.h
@@ -17,30 +17,24 @@ namespace storage::distributor {
class NodeInfo {
public:
- explicit NodeInfo(const framework::Clock& clock);
-
+ explicit NodeInfo(const framework::Clock& clock) noexcept;
uint32_t getPendingCount(uint16_t idx) const;
-
bool isBusy(uint16_t idx) const;
-
void setBusy(uint16_t idx, vespalib::duration for_duration);
-
void incPending(uint16_t idx);
-
void decPending(uint16_t idx);
-
void clearPending(uint16_t idx);
private:
struct SingleNodeInfo {
- SingleNodeInfo() : _pending(0), _busyUntilTime() {}
+ SingleNodeInfo() noexcept : _pending(0), _busyUntilTime() {}
- uint32_t _pending;
+ uint32_t _pending;
mutable vespalib::steady_time _busyUntilTime;
};
mutable std::vector<SingleNodeInfo> _nodes;
- const framework::Clock& _clock;
+ const framework::Clock& _clock;
const SingleNodeInfo& getNode(uint16_t idx) const;
SingleNodeInfo& getNode(uint16_t idx);
diff --git a/storage/src/vespa/storage/distributor/operationowner.cpp b/storage/src/vespa/storage/distributor/operationowner.cpp
index 7b7c9f431f7..c92544c8cb5 100644
--- a/storage/src/vespa/storage/distributor/operationowner.cpp
+++ b/storage/src/vespa/storage/distributor/operationowner.cpp
@@ -73,7 +73,7 @@ OperationOwner::onClose()
void
OperationOwner::erase(api::StorageMessage::Id msgId)
{
- _sentMessageMap.pop(msgId);
+ (void)_sentMessageMap.pop(msgId);
}
}
diff --git a/storage/src/vespa/storage/distributor/operations/CMakeLists.txt b/storage/src/vespa/storage/distributor/operations/CMakeLists.txt
index 5c6a1f3d84c..8cf0470f674 100644
--- a/storage/src/vespa/storage/distributor/operations/CMakeLists.txt
+++ b/storage/src/vespa/storage/distributor/operations/CMakeLists.txt
@@ -1,6 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_library(storage_distributoroperation OBJECT
SOURCES
+ cancel_scope.cpp
operation.cpp
DEPENDS
)
diff --git a/storage/src/vespa/storage/distributor/operations/cancel_scope.cpp b/storage/src/vespa/storage/distributor/operations/cancel_scope.cpp
new file mode 100644
index 00000000000..af62b369517
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/operations/cancel_scope.cpp
@@ -0,0 +1,52 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "cancel_scope.h"
+
+namespace storage::distributor {
+
+CancelScope::CancelScope()
+ : _cancelled_nodes(),
+ _fully_cancelled(false)
+{
+}
+
+CancelScope::CancelScope(fully_cancelled_ctor_tag) noexcept
+ : _cancelled_nodes(),
+ _fully_cancelled(true)
+{
+}
+
+CancelScope::CancelScope(CancelledNodeSet nodes) noexcept
+ : _cancelled_nodes(std::move(nodes)),
+ _fully_cancelled(false)
+{
+}
+
+CancelScope::~CancelScope() = default;
+
+CancelScope::CancelScope(const CancelScope&) = default;
+CancelScope& CancelScope::operator=(const CancelScope&) = default;
+
+CancelScope::CancelScope(CancelScope&&) noexcept = default;
+CancelScope& CancelScope::operator=(CancelScope&&) noexcept = default;
+
+void CancelScope::add_cancelled_node(uint16_t node) {
+ _cancelled_nodes.insert(node);
+}
+
+void CancelScope::merge(const CancelScope& other) {
+ _fully_cancelled |= other._fully_cancelled;
+ // Not using iterator insert(first, last) since that explicitly resizes,
+ for (uint16_t node : other._cancelled_nodes) {
+ _cancelled_nodes.insert(node);
+ }
+}
+
+CancelScope CancelScope::of_fully_cancelled() noexcept {
+ return CancelScope(fully_cancelled_ctor_tag{});
+}
+
+CancelScope CancelScope::of_node_subset(CancelledNodeSet nodes) noexcept {
+ return CancelScope(std::move(nodes));
+}
+
+}
diff --git a/storage/src/vespa/storage/distributor/operations/cancel_scope.h b/storage/src/vespa/storage/distributor/operations/cancel_scope.h
new file mode 100644
index 00000000000..7619a64d39f
--- /dev/null
+++ b/storage/src/vespa/storage/distributor/operations/cancel_scope.h
@@ -0,0 +1,62 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/vespalib/stllike/hash_set.h>
+
+namespace storage::distributor {
+
+/**
+ * In the face of concurrent cluster state changes, cluster topology reconfigurations etc.,
+ * it's possible for there to be pending mutating operations to nodes that the distributor
+ * no longer should keep track of. Such operations must therefore be _cancelled_, either
+ * fully or partially. A CancelScope represents the granularity at which an operation should
+ * be cancelled.
+ *
+ * In the case of one or more nodes becoming unavailable, `fully_cancelled()` will be false
+ * and `node_is_cancelled(x)` will return whether node `x` is explicitly cancelled.
+ *
+ * In the case of ownership transfers, `fully_cancelled()` will be true since the distributor
+ * should no longer have any knowledge of the bucket. `node_is_cancelled(x)` is always
+ * implicitly true for all values of `x` for full cancellations.
+ */
+class CancelScope {
+public:
+ using CancelledNodeSet = vespalib::hash_set<uint16_t>;
+private:
+ CancelledNodeSet _cancelled_nodes;
+ bool _fully_cancelled;
+
+ struct fully_cancelled_ctor_tag {};
+
+ explicit CancelScope(fully_cancelled_ctor_tag) noexcept;
+ explicit CancelScope(CancelledNodeSet nodes) noexcept;
+public:
+ CancelScope();
+ ~CancelScope();
+
+ CancelScope(const CancelScope&);
+ CancelScope& operator=(const CancelScope&);
+
+ CancelScope(CancelScope&&) noexcept;
+ CancelScope& operator=(CancelScope&&) noexcept;
+
+ void add_cancelled_node(uint16_t node);
+ void merge(const CancelScope& other);
+
+ [[nodiscard]] bool fully_cancelled() const noexcept { return _fully_cancelled; }
+ [[nodiscard]] bool is_cancelled() const noexcept {
+ return (_fully_cancelled || !_cancelled_nodes.empty());
+ }
+ [[nodiscard]] bool node_is_cancelled(uint16_t node) const noexcept {
+ return (fully_cancelled() || _cancelled_nodes.contains(node));
+ }
+
+ [[nodiscard]] const CancelledNodeSet& cancelled_nodes() const noexcept {
+ return _cancelled_nodes;
+ }
+
+ static CancelScope of_fully_cancelled() noexcept;
+ static CancelScope of_node_subset(CancelledNodeSet nodes) noexcept;
+};
+
+}
diff --git a/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp b/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp
index 0e12e3e3019..bd7f3709575 100644
--- a/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/check_condition.cpp
@@ -104,7 +104,12 @@ CheckCondition::handle_reply(DistributorStripeMessageSender& sender,
}
}
-void CheckCondition::cancel(DistributorStripeMessageSender& sender) {
+void CheckCondition::cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope) {
+ IntermediateMessageSender proxy_sender(_sent_message_map, _cond_get_op, sender);
+ _cond_get_op->cancel(proxy_sender, cancel_scope);
+}
+
+void CheckCondition::close(DistributorStripeMessageSender& sender) {
IntermediateMessageSender proxy_sender(_sent_message_map, _cond_get_op, sender);
_cond_get_op->onClose(proxy_sender);
// We don't propagate any generated reply from the GetOperation, as its existence
@@ -163,6 +168,12 @@ void CheckCondition::handle_internal_get_operation_reply(std::shared_ptr<api::St
reply->steal_trace());
return;
}
+ if (_cond_get_op->is_cancelled()) {
+ _outcome.emplace(api::ReturnCode(api::ReturnCode::ABORTED,
+ "Operation has been cancelled (likely due to a cluster state change)"),
+ reply->steal_trace());
+ return;
+ }
auto state_version_now = _bucket_space.getClusterState().getVersion();
if (_bucket_space.has_pending_cluster_state()) {
state_version_now = _bucket_space.get_pending_cluster_state().getVersion();
diff --git a/storage/src/vespa/storage/distributor/operations/external/check_condition.h b/storage/src/vespa/storage/distributor/operations/external/check_condition.h
index 999b79adc3d..92a8bc62ae6 100644
--- a/storage/src/vespa/storage/distributor/operations/external/check_condition.h
+++ b/storage/src/vespa/storage/distributor/operations/external/check_condition.h
@@ -17,6 +17,7 @@ namespace storage::api { class StorageReply; }
namespace storage::distributor {
+class CancelScope;
class DistributorBucketSpace;
class DistributorNodeContext;
class DistributorStripeMessageSender;
@@ -122,7 +123,8 @@ public:
void start_and_send(DistributorStripeMessageSender& sender);
void handle_reply(DistributorStripeMessageSender& sender,
const std::shared_ptr<api::StorageReply>& reply);
- void cancel(DistributorStripeMessageSender& sender);
+ void cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope);
+ void close(DistributorStripeMessageSender& sender);
[[nodiscard]] std::optional<Outcome>& maybe_outcome() noexcept {
return _outcome;
diff --git a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
index 8c6fdb314f3..e7832fd19e5 100644
--- a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
@@ -10,7 +10,6 @@
#include <vespa/storage/distributor/storage_node_up_states.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/vdslib/distribution/distribution.h>
-#include <vespa/vdslib/distribution/idealnodecalculatorimpl.h>
#include <vespa/vdslib/state/clusterstate.h>
#include <algorithm>
@@ -67,13 +66,11 @@ PutOperation::insertDatabaseEntryAndScheduleCreateBucket(const OperationTargetLi
assert(!multipleBuckets);
(void) multipleBuckets;
BucketDatabase::Entry entry(_bucket_space.getBucketDatabase().get(lastBucket));
- std::vector<uint16_t> idealState(
- _bucket_space.get_ideal_service_layer_nodes_bundle(lastBucket).get_available_nodes());
- active = ActiveCopy::calculate(idealState, _bucket_space.getDistribution(), entry,
+ active = ActiveCopy::calculate(_bucket_space.get_ideal_service_layer_nodes_bundle(lastBucket).available_to_index(), _bucket_space.getDistribution(), entry,
_op_ctx.distributor_config().max_activation_inhibited_out_of_sync_groups());
LOG(debug, "Active copies for bucket %s: %s", entry.getBucketId().toString().c_str(), active.toString().c_str());
for (uint32_t i=0; i<active.size(); ++i) {
- BucketCopy copy(*entry->getNode(active[i]._nodeIndex));
+ BucketCopy copy(*entry->getNode(active[i].nodeIndex()));
copy.setActive(true);
entry->updateNode(copy);
}
@@ -112,6 +109,8 @@ PutOperation::sendPutToBucketOnNode(document::BucketSpace bucketSpace, const doc
bool PutOperation::has_unavailable_targets_in_pending_state(const OperationTargetList& targets) const {
// TODO handle this explicitly as part of operation abort/cancel edge
+ // -> we have yet to send anything at this point
+ // -> shouldn't ExternalOperationHandler deal with this before starting the op?
auto* pending_state = _op_ctx.pending_cluster_state_or_null(_msg->getBucket().getBucketSpace());
if (!pending_state) {
return false;
@@ -211,11 +210,11 @@ void PutOperation::start_direct_put_dispatch(DistributorStripeMessageSender& sen
}
if (!createBucketBatch.empty()) {
- _tracker.queueMessageBatch(createBucketBatch);
+ _tracker.queueMessageBatch(std::move(createBucketBatch));
}
std::vector<PersistenceMessageTracker::ToSend> putBatch;
-
+ putBatch.reserve(targets.size());
// Now send PUTs
for (const auto& target : targets) {
sendPutToBucketOnNode(_msg->getBucket().getBucketSpace(), target.getBucketId(),
@@ -223,7 +222,7 @@ void PutOperation::start_direct_put_dispatch(DistributorStripeMessageSender& sen
}
if (!putBatch.empty()) {
- _tracker.queueMessageBatch(putBatch);
+ _tracker.queueMessageBatch(std::move(putBatch));
} else {
const char* error = "Can't store document: No storage nodes available";
LOG(debug, "%s", error);
@@ -248,6 +247,15 @@ void PutOperation::start_direct_put_dispatch(DistributorStripeMessageSender& sen
_msg = std::shared_ptr<api::PutCommand>();
}
+void
+PutOperation::on_cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope)
+{
+ if (_check_condition) {
+ _check_condition->cancel(sender, cancel_scope);
+ }
+ _tracker.cancel(cancel_scope);
+}
+
bool
PutOperation::shouldImplicitlyActivateReplica(const OperationTargetList& targets) const
{
@@ -305,7 +313,7 @@ void
PutOperation::onClose(DistributorStripeMessageSender& sender)
{
if (_check_condition) {
- _check_condition->cancel(sender);
+ _check_condition->close(sender);
}
_tracker.fail(sender, api::ReturnCode(api::ReturnCode::ABORTED, "Process is shutting down"));
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/putoperation.h b/storage/src/vespa/storage/distributor/operations/external/putoperation.h
index 635accc1865..8b8e3e15375 100644
--- a/storage/src/vespa/storage/distributor/operations/external/putoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/putoperation.h
@@ -60,6 +60,8 @@ private:
void sendPutToBucketOnNode(document::BucketSpace bucketSpace, const document::BucketId& bucketId,
uint16_t node, std::vector<PersistenceMessageTracker::ToSend>& putBatch);
+ void on_cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope) override;
+
[[nodiscard]] bool shouldImplicitlyActivateReplica(const OperationTargetList& targets) const;
[[nodiscard]] bool has_unavailable_targets_in_pending_state(const OperationTargetList& targets) const;
diff --git a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
index dd6e1e93791..5f52a8208fc 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
@@ -79,13 +79,11 @@ RemoveLocationOperation::onStart(DistributorStripeMessageSender& sender)
std::vector<uint16_t> nodes = e->getNodes();
for (uint32_t i = 0; i < nodes.size(); i++) {
- std::shared_ptr<api::RemoveLocationCommand> command(
- new api::RemoveLocationCommand(
- _msg->getDocumentSelection(),
- document::Bucket(_msg->getBucket().getBucketSpace(), e.getBucketId())));
+ auto command = std::make_shared<api::RemoveLocationCommand>(_msg->getDocumentSelection(),
+ document::Bucket(_msg->getBucket().getBucketSpace(), e.getBucketId()));
copyMessageSettings(*_msg, *command);
- _tracker.queueCommand(command, nodes[i]);
+ _tracker.queueCommand(std::move(command), nodes[i]);
sent = true;
}
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
index 96182b0744f..be43aac3d9e 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
@@ -65,9 +65,7 @@ void RemoveOperation::start_conditional_remove(DistributorStripeMessageSender& s
void RemoveOperation::start_direct_remove_dispatch(DistributorStripeMessageSender& sender) {
LOG(spam, "Started remove on document %s", _msg->getDocumentId().toString().c_str());
- document::BucketId bucketId(
- _node_ctx.bucket_id_factory().getBucketId(
- _msg->getDocumentId()));
+ document::BucketId bucketId(_node_ctx.bucket_id_factory().getBucketId(_msg->getDocumentId()));
std::vector<BucketDatabase::Entry> entries;
_bucket_space.getBucketDatabase().getParents(bucketId, entries);
@@ -79,8 +77,7 @@ void RemoveOperation::start_direct_remove_dispatch(DistributorStripeMessageSende
messages.reserve(e->getNodeCount());
for (uint32_t i = 0; i < e->getNodeCount(); i++) {
auto command = std::make_shared<api::RemoveCommand>(document::Bucket(_msg->getBucket().getBucketSpace(), e.getBucketId()),
- _msg->getDocumentId(),
- _msg->getTimestamp());
+ _msg->getDocumentId(), _msg->getTimestamp());
copyMessageSettings(*_msg, *command);
command->getTrace().setLevel(_msg->getTrace().getLevel());
@@ -90,7 +87,7 @@ void RemoveOperation::start_direct_remove_dispatch(DistributorStripeMessageSende
sent = true;
}
- _tracker.queueMessageBatch(messages);
+ _tracker.queueMessageBatch(std::move(messages));
}
if (!sent) {
@@ -159,9 +156,21 @@ void RemoveOperation::on_completed_check_condition(CheckCondition::Outcome& outc
void
RemoveOperation::onClose(DistributorStripeMessageSender& sender)
{
+ if (_check_condition) {
+ _check_condition->close(sender);
+ }
_tracker.fail(sender, api::ReturnCode(api::ReturnCode::ABORTED, "Process is shutting down"));
}
+void
+RemoveOperation::on_cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope)
+{
+ if (_check_condition) {
+ _check_condition->cancel(sender, cancel_scope);
+ }
+ _tracker.cancel(cancel_scope);
+}
+
bool RemoveOperation::has_condition() const noexcept {
return _msg->hasTestAndSetCondition();
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/removeoperation.h b/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
index 9f3a98294ea..221def81fdc 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/removeoperation.h
@@ -29,6 +29,7 @@ public:
void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
void onClose(DistributorStripeMessageSender& sender) override;
+ void on_cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope) override;
private:
PersistenceMessageTrackerImpl _tracker_instance;
diff --git a/storage/src/vespa/storage/distributor/operations/external/statbucketoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/statbucketoperation.cpp
index a0b3f12f76b..1a8d1cb8f88 100644
--- a/storage/src/vespa/storage/distributor/operations/external/statbucketoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/statbucketoperation.cpp
@@ -4,6 +4,7 @@
#include <vespa/storageapi/message/persistence.h>
#include <vespa/storageapi/message/stat.h>
#include <vespa/storage/distributor/distributor_bucket_space.h>
+#include <sstream>
#include <vespa/log/log.h>
LOG_SETUP(".distributor.operations.external.stat_bucket");
diff --git a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
index 73c65f54b21..2d1c469d072 100644
--- a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.cpp
@@ -13,6 +13,7 @@
#include <vespa/storage/distributor/distributor_bucket_space_repo.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/vdslib/state/cluster_state_bundle.h>
+#include <vespa/vespalib/stllike/hash_set.hpp>
#include <cinttypes>
#include <vespa/log/log.h>
@@ -68,10 +69,8 @@ TwoPhaseUpdateOperation::stateToString(SendState state) noexcept
case SendState::SINGLE_GET_SENT: return "SINGLE_GET_SENT";
case SendState::FULL_GETS_SENT: return "FULL_GETS_SENT";
case SendState::PUTS_SENT: return "PUTS_SENT";
- default:
- assert(!"Unknown state");
- return "";
}
+ abort();
}
void
@@ -130,7 +129,7 @@ TwoPhaseUpdateOperation::get_bucket_database_entries() const
}
bool
-TwoPhaseUpdateOperation::isFastPathPossible(const std::vector<BucketDatabase::Entry>& entries) const
+TwoPhaseUpdateOperation::isFastPathPossible(const std::vector<BucketDatabase::Entry>& entries)
{
// Fast path iff bucket exists AND is consistent (split and copies).
if (entries.size() != 1) {
@@ -245,6 +244,16 @@ TwoPhaseUpdateOperation::sendLostOwnershipTransientErrorReply(DistributorStripeM
}
void
+TwoPhaseUpdateOperation::send_operation_cancelled_reply(DistributorStripeMessageSender& sender)
+{
+ sendReplyWithResult(sender,
+ api::ReturnCode(api::ReturnCode::BUCKET_NOT_FOUND,
+ "The update operation was cancelled due to a cluster state change "
+ "between executing the read and write phases of a write-repair "
+ "update"));
+}
+
+void
TwoPhaseUpdateOperation::send_feed_blocked_error_reply(DistributorStripeMessageSender& sender)
{
sendReplyWithResult(sender,
@@ -257,7 +266,8 @@ void
TwoPhaseUpdateOperation::schedulePutsWithUpdatedDocument(std::shared_ptr<document::Document> doc,
api::Timestamp putTimestamp, DistributorStripeMessageSender& sender)
{
- if (lostBucketOwnershipBetweenPhases()) {
+ assert(!is_cancelled());
+ if (lostBucketOwnershipBetweenPhases()) { // TODO deprecate with cancellation
sendLostOwnershipTransientErrorReply(sender);
return;
}
@@ -281,6 +291,8 @@ TwoPhaseUpdateOperation::schedulePutsWithUpdatedDocument(std::shared_ptr<documen
void
TwoPhaseUpdateOperation::onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply>& msg)
{
+ // In the case of cancellations, we let existing operations complete, but must not
+ // start new ones that are unaware of the cancellations.
if (_mode == Mode::FAST_PATH) {
handleFastPathReceive(sender, msg);
} else {
@@ -304,7 +316,10 @@ TwoPhaseUpdateOperation::handleFastPathReceive(DistributorStripeMessageSender& s
sendReplyWithResult(sender, getReply.getResult());
return;
}
-
+ if (is_cancelled()) {
+ send_operation_cancelled_reply(sender);
+ return;
+ }
if (!getReply.getDocument().get()) {
// Weird, document is no longer there ... Just fail.
sendReplyWithResult(sender, api::ReturnCode(api::ReturnCode::INTERNAL_FAILURE, ""));
@@ -316,7 +331,7 @@ TwoPhaseUpdateOperation::handleFastPathReceive(DistributorStripeMessageSender& s
std::shared_ptr<Operation> callback = _sentMessageMap.pop(msg->getMsgId());
assert(callback.get());
- Operation & callbackOp = *callback;
+ Operation& callbackOp = *callback;
IntermediateMessageSender intermediate(_sentMessageMap, std::move(callback), sender);
callbackOp.receive(intermediate, msg);
@@ -326,12 +341,12 @@ TwoPhaseUpdateOperation::handleFastPathReceive(DistributorStripeMessageSender& s
addTraceFromReply(*intermediate._reply);
auto& cb = dynamic_cast<UpdateOperation&>(callbackOp);
- std::pair<document::BucketId, uint16_t> bestNode = cb.getNewestTimestampLocation();
+ auto [newest_bucket, newest_node] = cb.getNewestTimestampLocation();
auto intermediate_update_reply = std::dynamic_pointer_cast<api::UpdateReply>(intermediate._reply);
assert(intermediate_update_reply);
if (!intermediate_update_reply->getResult().success() ||
- bestNode.first == document::BucketId(0))
+ (newest_bucket == document::BucketId(0)))
{
if (intermediate_update_reply->getResult().success() &&
(intermediate_update_reply->getOldTimestamp() == 0))
@@ -343,9 +358,14 @@ TwoPhaseUpdateOperation::handleFastPathReceive(DistributorStripeMessageSender& s
} else {
LOG(debug, "Update(%s) fast path: was inconsistent!", update_doc_id().c_str());
+ if (is_cancelled()) {
+ send_operation_cancelled_reply(sender);
+ return;
+ }
+
_updateReply = std::move(intermediate_update_reply);
- _fast_path_repair_source_node = bestNode.second;
- document::Bucket bucket(_updateCmd->getBucket().getBucketSpace(), bestNode.first);
+ _fast_path_repair_source_node = newest_node;
+ document::Bucket bucket(_updateCmd->getBucket().getBucketSpace(), newest_bucket);
auto cmd = std::make_shared<api::GetCommand>(bucket, _updateCmd->getDocumentId(), document::AllFields::NAME);
copyMessageSettings(*_updateCmd, *cmd);
@@ -383,7 +403,7 @@ TwoPhaseUpdateOperation::handleSafePathReceive(DistributorStripeMessageSender& s
callbackOp.receive(intermediate, msg);
if (!intermediate._reply.get()) {
- return; // Not enough replies received yet or we're draining callbacks.
+ return; // Not enough replies received yet, or we're draining callbacks.
}
addTraceFromReply(*intermediate._reply);
if (_sendState == SendState::METADATA_GETS_SENT) {
@@ -445,6 +465,13 @@ void TwoPhaseUpdateOperation::handle_safe_path_received_metadata_get(
"One or more metadata Get operations failed; aborting Update"));
return;
}
+ if (is_cancelled()) {
+ send_operation_cancelled_reply(sender);
+ return;
+ }
+ // Replicas _removed_ is handled by cancellation, but a concurrent state change may happen
+ // that _adds_ one or more available content nodes, which we cannot then blindly write to.
+ // So we have to explicitly check this edge case.
if (!replica_set_unchanged_after_get_operation()) {
// Use BUCKET_NOT_FOUND to trigger a silent retry.
LOG(debug, "Update(%s): replica set has changed after metadata get phase", update_doc_id().c_str());
@@ -490,6 +517,10 @@ TwoPhaseUpdateOperation::handleSafePathReceivedGet(DistributorStripeMessageSende
sendReplyWithResult(sender, reply.getResult());
return;
}
+ if (is_cancelled()) {
+ send_operation_cancelled_reply(sender);
+ return;
+ }
// Single Get could technically be considered consistent with itself, so make
// sure we never treat that as sufficient for restarting in the fast path.
if ((_sendState != SendState::SINGLE_GET_SENT) && may_restart_with_fast_path(reply)) {
@@ -558,7 +589,8 @@ bool TwoPhaseUpdateOperation::replica_set_unchanged_after_get_operation() const
void TwoPhaseUpdateOperation::restart_with_fast_path_due_to_consistent_get_timestamps(DistributorStripeMessageSender& sender) {
LOG(debug, "Update(%s): all Gets returned in initial safe path were consistent, restarting in fast path mode",
update_doc_id().c_str());
- if (lostBucketOwnershipBetweenPhases()) {
+ assert(!is_cancelled());
+ if (lostBucketOwnershipBetweenPhases()) { // TODO remove once cancellation is wired
sendLostOwnershipTransientErrorReply(sender);
return;
}
@@ -579,7 +611,7 @@ TwoPhaseUpdateOperation::processAndMatchTasCondition(DistributorStripeMessageSen
std::unique_ptr<document::select::Node> selection;
try {
- selection = _parser.parse_selection(_updateCmd->getCondition().getSelection());
+ selection = _parser.parse_selection(_updateCmd->getCondition().getSelection());
} catch (const document::select::ParsingFailedException & e) {
sendReplyWithResult(sender, api::ReturnCode(
api::ReturnCode::ILLEGAL_PARAMETERS,
@@ -679,6 +711,22 @@ TwoPhaseUpdateOperation::onClose(DistributorStripeMessageSender& sender) {
}
}
+void
+TwoPhaseUpdateOperation::on_cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope) {
+ // We have to explicitly cancel any and all pending Operation instances that have been
+ // launched by this operation. This is to ensure any DB updates they may transitively
+ // perform are aware of all cancellations that have occurred.
+ // There may be many messages pending for any given operation, so unique-ify them prior
+ // to avoid duplicate cancellation invocations.
+ vespalib::hash_set<Operation*> ops;
+ for (auto& msg_op : _sentMessageMap) {
+ ops.insert(msg_op.second.get());
+ }
+ for (auto* op : ops) {
+ op->cancel(sender, cancel_scope);
+ }
+}
+
vespalib::string TwoPhaseUpdateOperation::update_doc_id() const {
assert(_updateCmd.get() != nullptr);
return _updateCmd->getDocumentId().toString();
diff --git a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h
index d2ad5359fa6..7f64bb8d56c 100644
--- a/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/twophaseupdateoperation.h
@@ -71,6 +71,8 @@ public:
void onClose(DistributorStripeMessageSender& sender) override;
+ void on_cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope) override;
+
private:
enum class SendState {
NONE_SENT,
@@ -94,19 +96,20 @@ private:
void sendReplyWithResult(DistributorStripeMessageSender&, const api::ReturnCode&);
void ensureUpdateReplyCreated();
- std::vector<BucketDatabase::Entry> get_bucket_database_entries() const;
- bool isFastPathPossible(const std::vector<BucketDatabase::Entry>& entries) const;
+ [[nodiscard]] std::vector<BucketDatabase::Entry> get_bucket_database_entries() const;
+ [[nodiscard]] static bool isFastPathPossible(const std::vector<BucketDatabase::Entry>& entries);
void startFastPathUpdate(DistributorStripeMessageSender& sender, std::vector<BucketDatabase::Entry> entries);
void startSafePathUpdate(DistributorStripeMessageSender&);
- bool lostBucketOwnershipBetweenPhases() const;
+ [[nodiscard]] bool lostBucketOwnershipBetweenPhases() const;
void sendLostOwnershipTransientErrorReply(DistributorStripeMessageSender&);
+ void send_operation_cancelled_reply(DistributorStripeMessageSender& sender);
void send_feed_blocked_error_reply(DistributorStripeMessageSender& sender);
void schedulePutsWithUpdatedDocument(
std::shared_ptr<document::Document>,
api::Timestamp,
DistributorStripeMessageSender&);
void applyUpdateToDocument(document::Document&) const;
- std::shared_ptr<document::Document> createBlankDocument() const;
+ [[nodiscard]] std::shared_ptr<document::Document> createBlankDocument() const;
void setUpdatedForTimestamp(api::Timestamp);
void handleFastPathReceive(DistributorStripeMessageSender&,
const std::shared_ptr<api::StorageReply>&);
@@ -120,20 +123,20 @@ private:
void handle_safe_path_received_single_full_get(DistributorStripeMessageSender&, api::GetReply&);
void handleSafePathReceivedGet(DistributorStripeMessageSender&, api::GetReply&);
void handleSafePathReceivedPut(DistributorStripeMessageSender&, const api::PutReply&);
- bool shouldCreateIfNonExistent() const;
+ [[nodiscard]] bool shouldCreateIfNonExistent() const;
bool processAndMatchTasCondition(
DistributorStripeMessageSender& sender,
const document::Document& candidateDoc);
- bool satisfiesUpdateTimestampConstraint(api::Timestamp) const;
+ [[nodiscard]] bool satisfiesUpdateTimestampConstraint(api::Timestamp) const;
void addTraceFromReply(api::StorageReply& reply);
- bool hasTasCondition() const noexcept;
+ [[nodiscard]] bool hasTasCondition() const noexcept;
void replyWithTasFailure(DistributorStripeMessageSender& sender,
vespalib::stringref message);
bool may_restart_with_fast_path(const api::GetReply& reply);
- bool replica_set_unchanged_after_get_operation() const;
+ [[nodiscard]] bool replica_set_unchanged_after_get_operation() const;
void restart_with_fast_path_due_to_consistent_get_timestamps(DistributorStripeMessageSender& sender);
// Precondition: reply has not yet been sent.
- vespalib::string update_doc_id() const;
+ [[nodiscard]] vespalib::string update_doc_id() const;
using ReplicaState = std::vector<std::pair<document::BucketId, uint16_t>>;
diff --git a/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp
index 8988f2589ce..60bddebbb89 100644
--- a/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp
@@ -106,19 +106,18 @@ UpdateOperation::onStart(DistributorStripeMessageSender& sender)
const std::vector<uint16_t>& nodes = entry->getNodes();
std::vector<MessageTracker::ToSend> messages;
+ messages.reserve(nodes.size());
for (uint16_t node : nodes) {
- auto command = std::make_shared<api::UpdateCommand>(
- document::Bucket(_msg->getBucket().getBucketSpace(), entry.getBucketId()),
- _msg->getUpdate(),
- _msg->getTimestamp());
+ auto command = std::make_shared<api::UpdateCommand>(document::Bucket(_msg->getBucket().getBucketSpace(), entry.getBucketId()),
+ _msg->getUpdate(), _msg->getTimestamp());
copyMessageSettings(*_msg, *command);
command->setOldTimestamp(_msg->getOldTimestamp());
command->setCondition(_msg->getCondition());
messages.emplace_back(std::move(command), node);
}
- _tracker.queueMessageBatch(messages);
+ _tracker.queueMessageBatch(std::move(messages));
}
_tracker.flushQueue(sender);
@@ -208,6 +207,13 @@ UpdateOperation::onClose(DistributorStripeMessageSender& sender)
_tracker.fail(sender, api::ReturnCode(api::ReturnCode::ABORTED, "Process is shutting down"));
}
+void
+UpdateOperation::on_cancel(DistributorStripeMessageSender&, const CancelScope& cancel_scope)
+{
+ _tracker.cancel(cancel_scope);
+}
+
+
// The backend behavior of "create-if-missing" updates is to return the timestamp of the
// _new_ update operation if the document was created from scratch. The two-phase update
// operation logic auto-detects unexpected inconsistencies and tries to reconcile
diff --git a/storage/src/vespa/storage/distributor/operations/external/updateoperation.h b/storage/src/vespa/storage/distributor/operations/external/updateoperation.h
index 96fd878a324..7d2131d426d 100644
--- a/storage/src/vespa/storage/distributor/operations/external/updateoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/external/updateoperation.h
@@ -31,6 +31,7 @@ public:
std::string getStatus() const override { return ""; };
void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> & msg) override;
void onClose(DistributorStripeMessageSender& sender) override;
+ void on_cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope) override;
std::pair<document::BucketId, uint16_t> getNewestTimestampLocation() const {
return _newestTimestampLocation;
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp
index 5599f9fb51e..bf64fa2eb82 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "garbagecollectionoperation.h"
+#include <vespa/storage/distributor/cancelled_replicas_pruner.h>
#include <vespa/storage/distributor/idealstatemanager.h>
#include <vespa/storage/distributor/idealstatemetricsset.h>
#include <vespa/storage/distributor/top_level_distributor.h>
@@ -22,6 +23,7 @@ GarbageCollectionOperation::GarbageCollectionOperation(const ClusterContext& clu
_cluster_state_version_at_phase1_start_time(0),
_remove_candidates(),
_replica_info(),
+ _cancel_scope(),
_max_documents_removed(0),
_is_done(false)
{}
@@ -87,7 +89,7 @@ void GarbageCollectionOperation::send_current_phase_remove_locations(Distributor
command->setPriority((_phase != Phase::WriteRemovesPhase)
? _priority
: _manager->operation_context().distributor_config().default_external_feed_priority());
- _tracker.queueCommand(command, nodes[i]);
+ _tracker.queueCommand(std::move(command), nodes[i]);
}
_tracker.flushQueue(sender);
}
@@ -148,6 +150,10 @@ GarbageCollectionOperation::onReceive(DistributorStripeMessageSender& sender,
}
}
+void GarbageCollectionOperation::on_cancel(DistributorStripeMessageSender&, const CancelScope& cancel_scope) {
+ _cancel_scope.merge(cancel_scope);
+}
+
void GarbageCollectionOperation::update_replica_response_info_from_reply(uint16_t from_node, const api::RemoveLocationReply& reply) {
_replica_info.emplace_back(_manager->operation_context().generate_unique_timestamp(),
from_node, reply.getBucketInfo());
@@ -186,6 +192,11 @@ bool GarbageCollectionOperation::may_start_write_phase() const {
if (!_ok) {
return false; // Already broken, no reason to proceed.
}
+ if (is_cancelled()) {
+ LOG(debug, "GC(%s): not sending write phase; operation has been explicitly cancelled",
+ getBucket().toString().c_str());
+ return false;
+ }
const auto state_version_now = _bucketSpace->getClusterState().getVersion();
if ((state_version_now != _cluster_state_version_at_phase1_start_time) ||
_bucketSpace->has_pending_cluster_state())
@@ -250,9 +261,17 @@ void GarbageCollectionOperation::update_last_gc_timestamp_in_db() {
}
void GarbageCollectionOperation::merge_received_bucket_info_into_db() {
- // TODO avoid two separate DB ops for this. Current API currently does not make this elegant.
- _manager->operation_context().update_bucket_database(getBucket(), _replica_info);
- update_last_gc_timestamp_in_db();
+ if (_cancel_scope.is_cancelled()) {
+ if (_cancel_scope.fully_cancelled()) {
+ return;
+ }
+ _replica_info = prune_cancelled_nodes(_replica_info, _cancel_scope);
+ }
+ if (!_replica_info.empty()) {
+ // TODO avoid two separate DB ops for this. Current API currently does not make this elegant.
+ _manager->operation_context().update_bucket_database(getBucket(), _replica_info);
+ update_last_gc_timestamp_in_db();
+ } // else: effectively fully cancelled, no touching the DB.
}
void GarbageCollectionOperation::update_gc_metrics() {
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h b/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h
index 27dc519dcc2..97efbe694de 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.h
@@ -3,10 +3,11 @@
#include "idealstateoperation.h"
#include <vespa/document/base/documentid.h>
+#include <vespa/persistence/spi/id_and_timestamp.h>
#include <vespa/storage/bucketdb/bucketcopy.h>
#include <vespa/storage/distributor/messagetracker.h>
#include <vespa/storage/distributor/operation_sequencer.h>
-#include <vespa/persistence/spi/id_and_timestamp.h>
+#include <vespa/storage/distributor/operations/cancel_scope.h>
#include <vespa/vespalib/stllike/hash_map.h>
#include <vector>
@@ -22,13 +23,14 @@ public:
void onStart(DistributorStripeMessageSender& sender) override;
void onReceive(DistributorStripeMessageSender& sender, const std::shared_ptr<api::StorageReply> &) override;
+ void on_cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope) override;
const char* getName() const noexcept override { return "garbagecollection"; };
Type getType() const noexcept override { return GARBAGE_COLLECTION; }
bool shouldBlockThisOperation(uint32_t, uint16_t, uint8_t) const override;
- bool is_two_phase() const noexcept {
+ [[nodiscard]] bool is_two_phase() const noexcept {
return ((_phase == Phase::ReadMetadataPhase) || (_phase == Phase::WriteRemovesPhase));
}
- bool is_done() const noexcept { return _is_done; }
+ [[nodiscard]] bool is_done() const noexcept { return _is_done; }
protected:
MessageTracker _tracker;
@@ -54,13 +56,14 @@ private:
RemoveCandidates _remove_candidates;
std::vector<SequencingHandle> _gc_write_locks;
std::vector<BucketCopy> _replica_info;
+ CancelScope _cancel_scope;
uint32_t _max_documents_removed;
bool _is_done;
static RemoveCandidates steal_selection_matches_as_candidates(api::RemoveLocationReply& reply);
void send_current_phase_remove_locations(DistributorStripeMessageSender& sender);
- std::vector<spi::IdAndTimestamp> compile_phase_two_send_set() const;
+ [[nodiscard]] std::vector<spi::IdAndTimestamp> compile_phase_two_send_set() const;
void handle_ok_legacy_reply(uint16_t from_node, const api::RemoveLocationReply& reply);
void handle_ok_phase1_reply(api::RemoveLocationReply& reply);
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
index 0e9873f3434..616c4962dca 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
@@ -81,12 +81,11 @@ JoinOperation::enqueueJoinMessagePerTargetNode(
return false;
}
for (const auto& node : nodeToBuckets) {
- std::shared_ptr<api::JoinBucketsCommand> msg(
- new api::JoinBucketsCommand(getBucket()));
+ auto msg = std::make_shared<api::JoinBucketsCommand>(getBucket());
msg->getSourceBuckets() = node.second;
msg->setTimeout(MAX_TIMEOUT);
setCommandMeta(*msg);
- _tracker.queueCommand(msg, node.first);
+ _tracker.queueCommand(std::move(msg), node.first);
}
return true;
}
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp
index e46ccebffba..7bec6bbe53a 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp
@@ -24,16 +24,11 @@ RemoveBucketOperation::onStartInternal(DistributorStripeMessageSender& sender)
uint16_t node = getNodes()[i];
const BucketCopy* copy(entry->getNode(node));
if (!copy) {
- LOG(debug, "Node %u was removed between scheduling remove "
- "operation and starting it; not sending DeleteBucket to it",
- node);
+ LOG(debug, "Node %u was removed between scheduling remove operation and starting it; not sending DeleteBucket to it", node);
continue;
}
- LOG(debug, "Sending DeleteBucket for %s to node %u",
- getBucketId().toString().c_str(),
- node);
- std::shared_ptr<api::DeleteBucketCommand> msg(
- new api::DeleteBucketCommand(getBucket()));
+ LOG(debug, "Sending DeleteBucket for %s to node %u", getBucketId().toString().c_str(), node);
+ auto msg = std::make_shared<api::DeleteBucketCommand>(getBucket());
setCommandMeta(*msg);
msg->setBucketInfo(copy->getBucketInfo());
msgs.push_back(std::make_pair(node, msg));
@@ -42,8 +37,8 @@ RemoveBucketOperation::onStartInternal(DistributorStripeMessageSender& sender)
_ok = true;
if (!getNodes().empty()) {
_manager->operation_context().remove_nodes_from_bucket_database(getBucket(), getNodes());
- for (uint32_t i = 0; i < msgs.size(); ++i) {
- _tracker.queueCommand(msgs[i].second, msgs[i].first);
+ for (auto & msg : msgs) {
+ _tracker.queueCommand(std::move(msg.second), msg.first);
}
_tracker.flushQueue(sender);
}
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp
index 00906d22ea4..9547bee6583 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp
@@ -26,11 +26,9 @@ SetBucketStateOperation::enqueueSetBucketStateCommand(uint16_t node, bool active
active
? api::SetBucketStateCommand::ACTIVE
: api::SetBucketStateCommand::INACTIVE);
- LOG(debug, "Enqueuing %s for %s to node %u",
- active ? "Activate" : "Deactivate",
- getBucketId().toString().c_str(), node);
+ LOG(debug, "Enqueuing %s for %s to node %u", active ? "Activate" : "Deactivate", getBucketId().toString().c_str(), node);
setCommandMeta(*msg);
- _tracker.queueCommand(msg, node);
+ _tracker.queueCommand(std::move(msg), node);
}
bool
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
index 8e64fb227a7..d704a42e96b 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
@@ -35,7 +35,7 @@ SplitOperation::onStart(DistributorStripeMessageSender& sender)
msg->setMinByteSize(_splitSize);
msg->setTimeout(MAX_TIMEOUT);
setCommandMeta(*msg);
- _tracker.queueCommand(msg, entry->getNodeRef(i).getNode());
+ _tracker.queueCommand(std::move(msg), entry->getNodeRef(i).getNode());
_ok = true;
}
diff --git a/storage/src/vespa/storage/distributor/operations/operation.cpp b/storage/src/vespa/storage/distributor/operations/operation.cpp
index 4d82de170ae..9f944a94178 100644
--- a/storage/src/vespa/storage/distributor/operations/operation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/operation.cpp
@@ -12,7 +12,8 @@ LOG_SETUP(".distributor.callback");
namespace storage::distributor {
Operation::Operation()
- : _startTime()
+ : _startTime(),
+ _cancelled(false)
{
}
@@ -45,6 +46,11 @@ Operation::copyMessageSettings(const api::StorageCommand& source, api::StorageCo
target.setPriority(source.getPriority());
}
+void Operation::cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope) {
+ _cancelled = true;
+ on_cancel(sender, cancel_scope);
+}
+
void
Operation::on_blocked()
{
diff --git a/storage/src/vespa/storage/distributor/operations/operation.h b/storage/src/vespa/storage/distributor/operations/operation.h
index bc7e510a5b6..64caacfc642 100644
--- a/storage/src/vespa/storage/distributor/operations/operation.h
+++ b/storage/src/vespa/storage/distributor/operations/operation.h
@@ -16,6 +16,7 @@ class StorageComponent;
namespace distributor {
+class CancelScope;
class DistributorStripeOperationContext;
class PendingMessageTracker;
class OperationSequencer;
@@ -40,7 +41,7 @@ public:
on the owner of the message that was replied to.
*/
virtual void receive(DistributorStripeMessageSender& sender,
- const std::shared_ptr<api::StorageReply> & msg)
+ const std::shared_ptr<api::StorageReply> & msg)
{
onReceive(sender, msg);
}
@@ -60,6 +61,22 @@ public:
void start(DistributorStripeMessageSender& sender);
/**
+ * Explicitly cancel the operation. Cancelled operations may or may not (depending on
+ * the operation implementation) be immediately aborted, but they should either way
+ * never insert any bucket information _for cancelled nodes_ into the bucket DB after
+ * cancel() has been called.
+ */
+ void cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope);
+
+ /**
+ * Whether cancel() has been invoked at least once on this instance. This does not
+ * distinguish between cancellations caused by ownership transfers and those caused
+ * by nodes becoming unavailable; Operation implementations that care about this need
+ * to implement cancel() themselves and inspect the provided CancelScope.
+ */
+ [[nodiscard]] bool is_cancelled() const noexcept { return _cancelled; }
+
+ /**
* Returns true if we are blocked to start this operation given
* the pending messages.
*/
@@ -93,8 +110,15 @@ private:
const std::shared_ptr<api::StorageReply> & msg) = 0;
protected:
+ virtual void on_cancel(DistributorStripeMessageSender& sender, const CancelScope& cancel_scope) {
+ (void)sender;
+ (void)cancel_scope;
+ }
+
static constexpr vespalib::duration MAX_TIMEOUT = 3600s;
+
vespalib::system_time _startTime;
+ bool _cancelled;
};
}
diff --git a/storage/src/vespa/storage/distributor/operationtargetresolver.h b/storage/src/vespa/storage/distributor/operationtargetresolver.h
index 5e3c4a73f66..2de477d03e5 100644
--- a/storage/src/vespa/storage/distributor/operationtargetresolver.h
+++ b/storage/src/vespa/storage/distributor/operationtargetresolver.h
@@ -15,23 +15,23 @@ namespace storage::distributor {
class OperationTarget : public vespalib::AsciiPrintable
{
document::Bucket _bucket;
- lib::Node _node;
- bool _newCopy;
+ lib::Node _node;
+ bool _newCopy;
public:
- OperationTarget() : _newCopy(true) {}
- OperationTarget(const document::Bucket& bucket, const lib::Node& node, bool newCopy)
+ OperationTarget() noexcept : _newCopy(true) {}
+ OperationTarget(const document::Bucket& bucket, const lib::Node& node, bool newCopy) noexcept
: _bucket(bucket), _node(node), _newCopy(newCopy) {}
- document::BucketId getBucketId() const { return _bucket.getBucketId(); }
- document::Bucket getBucket() const { return _bucket; }
- const lib::Node& getNode() const { return _node; }
- bool isNewCopy() const { return _newCopy; }
+ document::BucketId getBucketId() const noexcept { return _bucket.getBucketId(); }
+ document::Bucket getBucket() const noexcept { return _bucket; }
+ const lib::Node& getNode() const noexcept { return _node; }
+ bool isNewCopy() const noexcept { return _newCopy; }
- bool operator==(const OperationTarget& o) const {
+ bool operator==(const OperationTarget& o) const noexcept {
return (_bucket == o._bucket && _node == o._node && _newCopy == o._newCopy);
}
- bool operator!=(const OperationTarget& o) const {
+ bool operator!=(const OperationTarget& o) const noexcept {
return !(operator==(o));
}
@@ -40,13 +40,13 @@ public:
class OperationTargetList : public std::vector<OperationTarget> {
public:
- bool hasAnyNewCopies() const {
+ bool hasAnyNewCopies() const noexcept {
for (size_t i=0; i<size(); ++i) {
if (operator[](i).isNewCopy()) return true;
}
return false;
}
- bool hasAnyExistingCopies() const {
+ bool hasAnyExistingCopies() const noexcept {
for (size_t i=0; i<size(); ++i) {
if (!operator[](i).isNewCopy()) return true;
}
@@ -63,8 +63,7 @@ public:
PUT
};
- virtual OperationTargetList getTargets(OperationType type,
- const document::BucketId& id) = 0;
+ virtual OperationTargetList getTargets(OperationType type, const document::BucketId& id) = 0;
};
}
diff --git a/storage/src/vespa/storage/distributor/operationtargetresolverimpl.cpp b/storage/src/vespa/storage/distributor/operationtargetresolverimpl.cpp
index 6a9d7e0e6da..eb08cf51f43 100644
--- a/storage/src/vespa/storage/distributor/operationtargetresolverimpl.cpp
+++ b/storage/src/vespa/storage/distributor/operationtargetresolverimpl.cpp
@@ -9,23 +9,8 @@
namespace storage::distributor {
-namespace {
-
-lib::IdealNodeList
-make_node_list(const std::vector<uint16_t>& nodes)
-{
- lib::IdealNodeList list;
- for (auto node : nodes) {
- list.push_back(lib::Node(lib::NodeType::STORAGE, node));
- }
- return list;
-}
-
-}
-
-BucketInstance::BucketInstance(
- const document::BucketId& id, const api::BucketInfo& info,
- lib::Node node, uint16_t idealLocationPriority, bool trusted, bool exist)
+BucketInstance::BucketInstance(const document::BucketId& id, const api::BucketInfo& info, lib::Node node,
+ uint16_t idealLocationPriority, bool trusted, bool exist) noexcept
: _bucket(id), _info(info), _node(node),
_idealLocationPriority(idealLocationPriority), _trusted(trusted), _exist(exist)
{
@@ -39,32 +24,25 @@ BucketInstance::print(vespalib::asciistream& out, const PrintProperties&) const
std::ostringstream ost;
ost << std::hex << _bucket.getId();
- out << "(" << ost.str() << ", "
- << infoString << ", node " << _node.getIndex()
- << ", ideal " << _idealLocationPriority
- << (_trusted ? ", trusted" : "")
- << (_exist ? "" : ", new copy")
- << ")";
+ out << "(" << ost.str() << ", " << infoString << ", node " << _node.getIndex() << ", ideal " << _idealLocationPriority
+ << (_trusted ? ", trusted" : "") << (_exist ? "" : ", new copy") << ")";
}
bool
BucketInstanceList::contains(lib::Node node) const {
- for (uint32_t i=0; i<_instances.size(); ++i) {
- if (_instances[i]._node == node) return true;
+ for (const auto & instance : _instances) {
+ if (instance._node == node) return true;
}
return false;
}
void
-BucketInstanceList::add(BucketDatabase::Entry& e,
- const lib::IdealNodeList& idealState)
+BucketInstanceList::add(const BucketDatabase::Entry& e, const IdealServiceLayerNodesBundle::Node2Index & idealState)
{
for (uint32_t i = 0; i < e.getBucketInfo().getNodeCount(); ++i) {
const BucketCopy& copy(e.getBucketInfo().getNodeRef(i));
lib::Node node(lib::NodeType::STORAGE, copy.getNode());
- _instances.push_back(BucketInstance(
- e.getBucketId(), copy.getBucketInfo(), node,
- idealState.indexOf(node), copy.trusted()));
+ _instances.emplace_back(e.getBucketId(), copy.getBucketInfo(), node, idealState.lookup(copy.getNode()), copy.trusted(), true);
}
}
@@ -73,9 +51,9 @@ BucketInstanceList::populate(const document::BucketId& specificId, const Distrib
{
std::vector<BucketDatabase::Entry> entries;
db.getParents(specificId, entries);
- for (uint32_t i=0; i<entries.size(); ++i) {
- lib::IdealNodeList idealNodes(make_node_list(distributor_bucket_space.get_ideal_service_layer_nodes_bundle(entries[i].getBucketId()).get_available_nonretired_or_maintenance_nodes()));
- add(entries[i], idealNodes);
+ for (const auto & entry : entries) {
+ auto node2Index = distributor_bucket_space.get_ideal_service_layer_nodes_bundle(entry.getBucketId()).nonretired_or_maintenance_to_index();
+ add(entry, node2Index);
}
}
@@ -102,40 +80,33 @@ BucketInstanceList::limitToRedundancyCopies(uint16_t redundancy)
}
document::BucketId
-BucketInstanceList::leastSpecificLeafBucketInSubtree(
- const document::BucketId& candidateId,
- const document::BucketId& mostSpecificId,
- const BucketDatabase& db) const
+BucketInstanceList::leastSpecificLeafBucketInSubtree(const document::BucketId& candidateId,
+ const document::BucketId& mostSpecificId,
+ const BucketDatabase& db)
{
assert(candidateId.contains(mostSpecificId));
document::BucketId treeNode = candidateId;
// treeNode may reach at most 58 bits since buckets at 58 bits by definition
// cannot have any children.
while (db.childCount(treeNode) != 0) {
- treeNode = document::BucketId(treeNode.getUsedBits() + 1,
- mostSpecificId.getRawId());
+ treeNode = document::BucketId(treeNode.getUsedBits() + 1, mostSpecificId.getRawId());
}
assert(treeNode.contains(mostSpecificId));
return treeNode;
}
void
-BucketInstanceList::extendToEnoughCopies(
- const DistributorBucketSpace& distributor_bucket_space,
- const BucketDatabase& db,
- const document::BucketId& targetIfNonPreExisting,
- const document::BucketId& mostSpecificId)
+BucketInstanceList::extendToEnoughCopies(const DistributorBucketSpace& distributor_bucket_space, const BucketDatabase& db,
+ const document::BucketId& targetIfNonPreExisting, const document::BucketId& mostSpecificId)
{
- document::BucketId newTarget(_instances.empty() ? targetIfNonPreExisting
- : _instances[0]._bucket);
+ document::BucketId newTarget(_instances.empty() ? targetIfNonPreExisting : _instances[0]._bucket);
newTarget = leastSpecificLeafBucketInSubtree(newTarget, mostSpecificId, db);
- lib::IdealNodeList idealNodes(make_node_list(distributor_bucket_space.get_ideal_service_layer_nodes_bundle(newTarget).get_available_nonretired_nodes()));
+ const auto & idealNodes = distributor_bucket_space.get_ideal_service_layer_nodes_bundle(newTarget).available_nonretired_nodes();
for (uint32_t i=0; i<idealNodes.size(); ++i) {
- if (!contains(idealNodes[i])) {
- _instances.push_back(BucketInstance(
- newTarget, api::BucketInfo(), idealNodes[i],
- i, false, false));
+ lib::Node node(lib::NodeType::STORAGE, idealNodes[i]);
+ if (!contains(node)) {
+ _instances.emplace_back(newTarget, api::BucketInfo(), node, i, false, false);
}
}
}
@@ -145,7 +116,7 @@ BucketInstanceList::createTargets(document::BucketSpace bucketSpace)
{
OperationTargetList result;
for (const auto& bi : _instances) {
- result.push_back(OperationTarget(document::Bucket(bucketSpace, bi._bucket), bi._node, !bi._exist));
+ result.emplace_back(document::Bucket(bucketSpace, bi._bucket), bi._node, !bi._exist);
}
return result;
}
@@ -188,22 +159,17 @@ struct InstanceOrder {
} // anonymous
BucketInstanceList
-OperationTargetResolverImpl::getAllInstances(OperationType type,
- const document::BucketId& id)
+OperationTargetResolverImpl::getAllInstances(OperationType type, const document::BucketId& id)
{
BucketInstanceList instances;
if (type == PUT) {
instances.populate(id, _distributor_bucket_space, _bucketDatabase);
instances.sort(InstanceOrder());
instances.removeNodeDuplicates();
- instances.extendToEnoughCopies(
- _distributor_bucket_space,
- _bucketDatabase,
- _bucketDatabase.getAppropriateBucket(_minUsedBucketBits, id),
- id);
+ instances.extendToEnoughCopies(_distributor_bucket_space, _bucketDatabase,
+ _bucketDatabase.getAppropriateBucket(_minUsedBucketBits, id), id);
} else {
- throw vespalib::IllegalArgumentException(
- "Unsupported operation type given", VESPA_STRLOC);
+ throw vespalib::IllegalArgumentException("Unsupported operation type given", VESPA_STRLOC);
}
return instances;
}
diff --git a/storage/src/vespa/storage/distributor/operationtargetresolverimpl.h b/storage/src/vespa/storage/distributor/operationtargetresolverimpl.h
index 9ff65475fa4..b76388da9bc 100644
--- a/storage/src/vespa/storage/distributor/operationtargetresolverimpl.h
+++ b/storage/src/vespa/storage/distributor/operationtargetresolverimpl.h
@@ -3,8 +3,8 @@
#pragma once
#include "operationtargetresolver.h"
+#include "ideal_service_layer_nodes_bundle.h"
#include <vespa/storage/bucketdb/bucketdatabase.h>
-#include <vespa/vdslib/distribution/idealnodecalculator.h>
#include <algorithm>
namespace storage::distributor {
@@ -19,11 +19,11 @@ struct BucketInstance : public vespalib::AsciiPrintable {
bool _trusted;
bool _exist;
- BucketInstance() : _idealLocationPriority(0xffff),
- _trusted(false), _exist(false) {}
+ BucketInstance() noexcept
+ : _idealLocationPriority(0xffff), _trusted(false), _exist(false) {}
BucketInstance(const document::BucketId& id, const api::BucketInfo& info,
lib::Node node, uint16_t idealLocationPriority, bool trusted,
- bool exist = true);
+ bool exist) noexcept;
void print(vespalib::asciistream& out, const PrintProperties&) const override;
};
@@ -42,10 +42,10 @@ class BucketInstanceList : public vespalib::AsciiPrintable {
* Postconditions:
* <return value>.contains(mostSpecificId)
*/
- document::BucketId leastSpecificLeafBucketInSubtree(
- const document::BucketId& candidateId,
- const document::BucketId& mostSpecificId,
- const BucketDatabase& db) const;
+ static document::BucketId
+ leastSpecificLeafBucketInSubtree(const document::BucketId& candidateId,
+ const document::BucketId& mostSpecificId,
+ const BucketDatabase& db);
public:
void add(const BucketInstance& instance) { _instances.push_back(instance); }
@@ -65,7 +65,7 @@ public:
const document::BucketId& mostSpecificId);
void populate(const document::BucketId&, const DistributorBucketSpace&, BucketDatabase&);
- void add(BucketDatabase::Entry& e, const lib::IdealNodeList& idealState);
+ void add(const BucketDatabase::Entry& e, const IdealServiceLayerNodesBundle::Node2Index & idealState);
template <typename Order>
void sort(const Order& order) {
@@ -79,9 +79,9 @@ public:
class OperationTargetResolverImpl : public OperationTargetResolver {
const DistributorBucketSpace& _distributor_bucket_space;
- BucketDatabase& _bucketDatabase;
- uint32_t _minUsedBucketBits;
- uint16_t _redundancy;
+ BucketDatabase& _bucketDatabase;
+ uint32_t _minUsedBucketBits;
+ uint16_t _redundancy;
document::BucketSpace _bucketSpace;
public:
@@ -97,8 +97,7 @@ public:
_bucketSpace(bucketSpace)
{}
- BucketInstanceList getAllInstances(OperationType type,
- const document::BucketId& id);
+ BucketInstanceList getAllInstances(OperationType type, const document::BucketId& id);
BucketInstanceList getInstances(OperationType type, const document::BucketId& id) {
BucketInstanceList result(getAllInstances(type, id));
result.limitToRedundancyCopies(_redundancy);
diff --git a/storage/src/vespa/storage/distributor/outdated_nodes.h b/storage/src/vespa/storage/distributor/outdated_nodes.h
index cef799ee4aa..d014a3074a4 100644
--- a/storage/src/vespa/storage/distributor/outdated_nodes.h
+++ b/storage/src/vespa/storage/distributor/outdated_nodes.h
@@ -2,10 +2,10 @@
#pragma once
-#include <unordered_set>
+#include <vespa/vespalib/stllike/hash_set.h>
namespace storage::distributor::dbtransition {
-using OutdatedNodes = std::unordered_set<uint16_t>;
+using OutdatedNodes = vespalib::hash_set<uint16_t>;
}
diff --git a/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.cpp b/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.cpp
index 62de3b50b51..19cc7bc522f 100644
--- a/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.cpp
+++ b/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.cpp
@@ -113,10 +113,7 @@ PendingBucketSpaceDbTransition::DbMerger::removeCopiesFromNodesThatWereRequested
&& (info.getTimestamp() < _creation_timestamp)
&& e->removeNode(entryNode, TrustedUpdate::DEFER))
{
- LOG(spam,
- "Removed bucket %s from node %d",
- bucketId.toString().c_str(),
- entryNode);
+ LOG(spam, "Removed bucket %s from node %d", bucketId.toString().c_str(), entryNode);
updated = true;
// After removing current node, getNodeRef(i) will point to the _next_ node, so don't increment `i`.
} else {
@@ -391,8 +388,7 @@ PendingBucketSpaceDbTransition::markAllAvailableNodesAsRequiringRequest()
}
void
-PendingBucketSpaceDbTransition::addAdditionalNodesToOutdatedSet(
- const std::unordered_set<uint16_t>& nodes)
+PendingBucketSpaceDbTransition::addAdditionalNodesToOutdatedSet(const OutdatedNodes & nodes)
{
const uint16_t nodeCount(newStateStorageNodeCount());
for (uint16_t node : nodes) {
diff --git a/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.h b/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.h
index bce0c9bdc93..9fb6e4ed315 100644
--- a/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.h
+++ b/storage/src/vespa/storage/distributor/pending_bucket_space_db_transition.h
@@ -76,7 +76,7 @@ public:
const lib::Distribution& _distribution;
const lib::ClusterState& _new_state;
const char* _storage_up_states;
- const std::unordered_set<uint16_t>& _outdated_nodes; // TODO hash_set
+ const OutdatedNodes & _outdated_nodes; // TODO hash_set
const std::vector<dbtransition::Entry>& _entries;
uint32_t _iter;
public:
@@ -84,7 +84,7 @@ public:
const lib::Distribution& distribution,
const lib::ClusterState& new_state,
const char* storage_up_states,
- const std::unordered_set<uint16_t>& outdated_nodes,
+ const OutdatedNodes & outdated_nodes,
const std::vector<dbtransition::Entry>& entries)
: _creation_timestamp(creation_timestamp),
_distribution(distribution),
diff --git a/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp b/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp
index 5b8fa6b69e3..7b3cdacf702 100644
--- a/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp
+++ b/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp
@@ -17,6 +17,7 @@ PendingMessageTracker::PendingMessageTracker(framework::ComponentRegister& cr, u
_nodeInfo(_component.getClock()),
_nodeBusyDuration(60s),
_deferred_read_tasks(),
+ _trackTime(false),
_lock()
{
_component.registerStatusPage(*this);
@@ -69,6 +70,13 @@ pairAsRange(Pair pair)
return PairAsRange<Pair>(std::move(pair));
}
+document::Bucket
+getBucket(const api::StorageMessage & msg) {
+ return (msg.getType() != api::MessageType::REQUESTBUCKETINFO)
+ ? msg.getBucket()
+ : document::Bucket(msg.getBucket().getBucketSpace(), dynamic_cast<const api::RequestBucketInfoCommand&>(msg).super_bucket_id());
+}
+
}
std::vector<uint64_t>
@@ -91,17 +99,19 @@ PendingMessageTracker::clearMessagesForNode(uint16_t node)
void
PendingMessageTracker::insert(const std::shared_ptr<api::StorageMessage>& msg)
{
- std::lock_guard guard(_lock);
if (msg->getAddress()) {
// TODO STRIPE reevaluate if getBucket() on RequestBucketInfo msgs should transparently return superbucket..!
- document::Bucket bucket = (msg->getType() != api::MessageType::REQUESTBUCKETINFO)
- ? msg->getBucket()
- : document::Bucket(msg->getBucket().getBucketSpace(),
- dynamic_cast<api::RequestBucketInfoCommand&>(*msg).super_bucket_id());
- _messages.emplace(currentTime(), msg->getType().getId(), msg->getPriority(), msg->getMsgId(),
- bucket, msg->getAddress()->getIndex());
-
- _nodeInfo.incPending(msg->getAddress()->getIndex());
+ document::Bucket bucket = getBucket(*msg);
+ {
+ // We will not start tracking time until we have been asked for html at least once.
+ // Time tracking is only used for presenting pending messages for debugging.
+ TimePoint now = (_trackTime.load(std::memory_order_relaxed)) ? currentTime() : TimePoint();
+ std::lock_guard guard(_lock);
+ _messages.emplace(now, msg->getType().getId(), msg->getPriority(), msg->getMsgId(),
+ bucket, msg->getAddress()->getIndex());
+
+ _nodeInfo.incPending(msg->getAddress()->getIndex());
+ }
LOG(debug, "Sending message %s with id %" PRIu64 " to %s",
msg->toString().c_str(), msg->getMsgId(), msg->getAddress()->toString().c_str());
@@ -111,15 +121,13 @@ PendingMessageTracker::insert(const std::shared_ptr<api::StorageMessage>& msg)
document::Bucket
PendingMessageTracker::reply(const api::StorageReply& r)
{
- std::unique_lock guard(_lock);
document::Bucket bucket;
-
LOG(debug, "Got reply: %s", r.toString().c_str());
uint64_t msgId = r.getMsgId();
+ std::unique_lock guard(_lock);
MessagesByMsgId& msgs = boost::multi_index::get<0>(_messages);
MessagesByMsgId::iterator iter = msgs.find(msgId);
-
if (iter != msgs.end()) {
bucket = iter->bucket;
_nodeInfo.decPending(r.getAddress()->getIndex());
@@ -127,7 +135,6 @@ PendingMessageTracker::reply(const api::StorageReply& r)
if (code == api::ReturnCode::BUSY || code == api::ReturnCode::TIMEOUT) {
_nodeInfo.setBusy(r.getAddress()->getIndex(), _nodeBusyDuration);
}
- LOG(debug, "Erased message with id %" PRIu64 " for bucket %s", msgId, bucket.toString().c_str());
msgs.erase(msgId);
auto deferred_tasks = get_deferred_ops_if_bucket_writes_drained(bucket);
// Deferred tasks may try to send messages, which in turn will invoke the PendingMessageTracker.
@@ -139,6 +146,7 @@ PendingMessageTracker::reply(const api::StorageReply& r)
for (auto& task : deferred_tasks) {
task->run(TaskRunState::OK);
}
+ LOG(debug, "Erased message with id %" PRIu64 " for bucket %s", msgId, bucket.toString().c_str());
}
return bucket;
@@ -328,6 +336,7 @@ PendingMessageTracker::getStatusPerNode(std::ostream& out) const
void
PendingMessageTracker::reportHtmlStatus(std::ostream& out, const framework::HttpUrlPath& path) const
{
+ _trackTime.store(true, std::memory_order_relaxed);
if (!path.hasAttribute("order")) {
getStatusStartPage(out);
} else if (path.getAttribute("order") == "bucket") {
diff --git a/storage/src/vespa/storage/distributor/pendingmessagetracker.h b/storage/src/vespa/storage/distributor/pendingmessagetracker.h
index fb672d5ee31..4b5655d3f3c 100644
--- a/storage/src/vespa/storage/distributor/pendingmessagetracker.h
+++ b/storage/src/vespa/storage/distributor/pendingmessagetracker.h
@@ -178,11 +178,12 @@ private:
document::Bucket::hash
>;
- Messages _messages;
- framework::Component _component;
- NodeInfo _nodeInfo;
- vespalib::duration _nodeBusyDuration;
- DeferredBucketTaskMap _deferred_read_tasks;
+ Messages _messages;
+ framework::Component _component;
+ NodeInfo _nodeInfo;
+ vespalib::duration _nodeBusyDuration;
+ DeferredBucketTaskMap _deferred_read_tasks;
+ mutable std::atomic<bool> _trackTime;
// Since distributor is currently single-threaded, this will only
// contend when status page is being accessed. It is, however, required
diff --git a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
index a30663bde2f..498f3a5feab 100644
--- a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
+++ b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
@@ -1,10 +1,12 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "persistencemessagetracker.h"
+#include "cancelled_replicas_pruner.h"
#include "distributor_bucket_space_repo.h"
#include "distributor_bucket_space.h"
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/storageapi/message/persistence.h>
+#include <algorithm>
#include <vespa/log/log.h>
LOG_SETUP(".persistencemessagetracker");
@@ -18,12 +20,15 @@ PersistenceMessageTrackerImpl::PersistenceMessageTrackerImpl(
DistributorStripeOperationContext& op_ctx,
api::Timestamp revertTimestamp)
: MessageTracker(node_ctx),
+ _remapBucketInfo(),
+ _bucketInfo(),
_metric(metric),
_reply(std::move(reply)),
_op_ctx(op_ctx),
_revertTimestamp(revertTimestamp),
_trace(_reply->getTrace().getLevel()),
_requestTimer(node_ctx.clock()),
+ _cancel_scope(),
_n_persistence_replies_total(0),
_n_successful_persistence_replies(0),
_priority(_reply->getPriority()),
@@ -34,8 +39,32 @@ PersistenceMessageTrackerImpl::PersistenceMessageTrackerImpl(
PersistenceMessageTrackerImpl::~PersistenceMessageTrackerImpl() = default;
void
+PersistenceMessageTrackerImpl::cancel(const CancelScope& cancel_scope)
+{
+ _cancel_scope.merge(cancel_scope);
+}
+
+void
+PersistenceMessageTrackerImpl::prune_cancelled_nodes_if_present(
+ BucketInfoMap& bucket_and_replicas,
+ const CancelScope& cancel_scope)
+{
+ for (auto& info : bucket_and_replicas) {
+ info.second = prune_cancelled_nodes(info.second, cancel_scope);
+ }
+}
+
+void
PersistenceMessageTrackerImpl::updateDB()
{
+ if (_cancel_scope.is_cancelled()) {
+ if (_cancel_scope.fully_cancelled()) {
+ return; // Fully cancelled ops cannot mutate the DB at all
+ }
+ prune_cancelled_nodes_if_present(_bucketInfo, _cancel_scope);
+ prune_cancelled_nodes_if_present(_remapBucketInfo, _cancel_scope);
+ }
+
for (const auto & entry : _bucketInfo) {
_op_ctx.update_bucket_database(entry.first, entry.second);
}
@@ -65,9 +94,7 @@ PersistenceMessageTrackerImpl::fail(MessageSender& sender, const api::ReturnCode
}
uint16_t
-PersistenceMessageTrackerImpl::receiveReply(
- MessageSender& sender,
- api::BucketInfoReply& reply)
+PersistenceMessageTrackerImpl::receiveReply(MessageSender& sender, api::BucketInfoReply& reply)
{
uint16_t node = handleReply(reply);
@@ -79,9 +106,7 @@ PersistenceMessageTrackerImpl::receiveReply(
}
void
-PersistenceMessageTrackerImpl::revert(
- MessageSender& sender,
- const std::vector<BucketNodePair>& revertNodes)
+PersistenceMessageTrackerImpl::revert(MessageSender& sender, const std::vector<BucketNodePair>& revertNodes)
{
if (_revertTimestamp != 0) {
// Since we're reverting, all received bucket info is voided.
@@ -101,15 +126,18 @@ PersistenceMessageTrackerImpl::revert(
}
void
-PersistenceMessageTrackerImpl::queueMessageBatch(const std::vector<MessageTracker::ToSend>& messages) {
+PersistenceMessageTrackerImpl::queueMessageBatch(std::vector<MessageTracker::ToSend> messages) {
_messageBatches.emplace_back();
- for (const auto & message : messages) {
+ auto & batch = _messageBatches.back();
+ batch.reserve(messages.size());
+ reserve_more_commands(messages.size());
+ for (auto & message : messages) {
if (_reply) {
message._msg->getTrace().setLevel(_reply->getTrace().getLevel());
}
- _messageBatches.back().push_back(message._msg->getMsgId());
- queueCommand(message._msg, message._target);
+ batch.push_back(message._msg->getMsgId());
+ queueCommand(std::move(message._msg), message._target);
}
}
@@ -153,24 +181,18 @@ PersistenceMessageTrackerImpl::canSendReplyEarly() const
}
void
-PersistenceMessageTrackerImpl::addBucketInfoFromReply(
- uint16_t node,
- const api::BucketInfoReply& reply)
+PersistenceMessageTrackerImpl::addBucketInfoFromReply(uint16_t node, const api::BucketInfoReply& reply)
{
document::Bucket bucket(reply.getBucket());
const api::BucketInfo& bucketInfo(reply.getBucketInfo());
if (reply.hasBeenRemapped()) {
LOG(debug, "Bucket %s: Received remapped bucket info %s from node %d",
- bucket.toString().c_str(),
- bucketInfo.toString().c_str(),
- node);
+ bucket.toString().c_str(), bucketInfo.toString().c_str(), node);
_remapBucketInfo[bucket].emplace_back(_op_ctx.generate_unique_timestamp(), node, bucketInfo);
} else {
LOG(debug, "Bucket %s: Received bucket info %s from node %d",
- bucket.toString().c_str(),
- bucketInfo.toString().c_str(),
- node);
+ bucket.toString().c_str(), bucketInfo.toString().c_str(), node);
_bucketInfo[bucket].emplace_back(_op_ctx.generate_unique_timestamp(), node, bucketInfo);
}
}
@@ -179,17 +201,12 @@ void
PersistenceMessageTrackerImpl::logSuccessfulReply(uint16_t node, const api::BucketInfoReply& reply) const
{
LOG(spam, "Bucket %s: Received successful reply %s",
- reply.getBucketId().toString().c_str(),
- reply.toString().c_str());
+ reply.getBucketId().toString().c_str(), reply.toString().c_str());
if (!reply.getBucketInfo().valid()) {
- LOG(error,
- "Reply %s from node %d contained invalid bucket "
- "information %s. This is a bug! Please report "
- "this to the Vespa team",
- reply.toString().c_str(),
- node,
- reply.getBucketInfo().toString().c_str());
+ LOG(error, "Reply %s from node %d contained invalid bucket information %s. This is a bug! "
+ "Please report this to the Vespa team",
+ reply.toString().c_str(), node, reply.getBucketInfo().toString().c_str());
}
}
@@ -233,27 +250,27 @@ void
PersistenceMessageTrackerImpl::updateFailureResult(const api::BucketInfoReply& reply)
{
LOG(debug, "Bucket %s: Received failed reply %s with result %s",
- reply.getBucketId().toString().c_str(),
- reply.toString().c_str(),
- reply.getResult().toString().c_str());
- if (reply.getResult().getResult() >
- _reply->getResult().getResult())
- {
+ reply.getBucketId().toString().c_str(), reply.toString().c_str(), reply.getResult().toString().c_str());
+ if (reply.getResult().getResult() > _reply->getResult().getResult()) {
_reply->setResult(reply.getResult());
}
_success = false;
}
+bool
+PersistenceMessageTrackerImpl::node_is_effectively_cancelled(uint16_t node) const noexcept
+{
+ return _cancel_scope.node_is_cancelled(node); // Implicitly covers the fully cancelled case
+}
+
void
-PersistenceMessageTrackerImpl::handleCreateBucketReply(
- api::BucketInfoReply& reply,
- uint16_t node)
+PersistenceMessageTrackerImpl::handleCreateBucketReply(api::BucketInfoReply& reply, uint16_t node)
{
- LOG(spam, "Received CreateBucket reply for %s from node %u",
- reply.getBucketId().toString().c_str(), node);
+ LOG(spam, "Received CreateBucket reply for %s from node %u", reply.getBucketId().toString().c_str(), node);
if (!reply.getResult().success()
- && reply.getResult().getResult() != api::ReturnCode::EXISTS)
+ && (reply.getResult().getResult() != api::ReturnCode::EXISTS)
+ && !node_is_effectively_cancelled(node))
{
LOG(spam, "Create bucket reply failed, so deleting it from bucket db");
// We don't know if the bucket exists at this point, so we remove it from the DB.
@@ -268,9 +285,7 @@ PersistenceMessageTrackerImpl::handleCreateBucketReply(
}
void
-PersistenceMessageTrackerImpl::handlePersistenceReply(
- api::BucketInfoReply& reply,
- uint16_t node)
+PersistenceMessageTrackerImpl::handlePersistenceReply(api::BucketInfoReply& reply, uint16_t node)
{
++_n_persistence_replies_total;
if (reply.getBucketInfo().valid()) {
@@ -295,10 +310,7 @@ PersistenceMessageTrackerImpl::transfer_trace_state_to_reply()
}
void
-PersistenceMessageTrackerImpl::updateFromReply(
- MessageSender& sender,
- api::BucketInfoReply& reply,
- uint16_t node)
+PersistenceMessageTrackerImpl::updateFromReply(MessageSender& sender, api::BucketInfoReply& reply, uint16_t node)
{
_trace.addChild(reply.steal_trace());
diff --git a/storage/src/vespa/storage/distributor/persistencemessagetracker.h b/storage/src/vespa/storage/distributor/persistencemessagetracker.h
index 923ecf45649..8c44d70062c 100644
--- a/storage/src/vespa/storage/distributor/persistencemessagetracker.h
+++ b/storage/src/vespa/storage/distributor/persistencemessagetracker.h
@@ -4,19 +4,20 @@
#include "distributor_stripe_component.h"
#include "distributormetricsset.h"
#include "messagetracker.h"
+#include <vespa/storage/distributor/operations/cancel_scope.h>
#include <vespa/storageframework/generic/clock/timer.h>
#include <vespa/storageapi/messageapi/bucketinfocommand.h>
#include <vespa/storageapi/messageapi/bucketinforeply.h>
-
namespace storage::distributor {
struct PersistenceMessageTracker {
virtual ~PersistenceMessageTracker() = default;
using ToSend = MessageTracker::ToSend;
+ virtual void cancel(const CancelScope& cancel_scope) = 0;
virtual void fail(MessageSender&, const api::ReturnCode&) = 0;
- virtual void queueMessageBatch(const std::vector<ToSend>&) = 0;
+ virtual void queueMessageBatch(std::vector<ToSend> messages) = 0;
virtual uint16_t receiveReply(MessageSender&, api::BucketInfoReply&) = 0;
virtual std::shared_ptr<api::BucketInfoReply>& getReply() = 0;
virtual void updateFromReply(MessageSender&, api::BucketInfoReply&, uint16_t node) = 0;
@@ -27,14 +28,9 @@ struct PersistenceMessageTracker {
};
class PersistenceMessageTrackerImpl final
- : public PersistenceMessageTracker,
- public MessageTracker
+ : public PersistenceMessageTracker,
+ public MessageTracker
{
-private:
- using BucketInfoMap = std::map<document::Bucket, std::vector<BucketCopy>>;
- BucketInfoMap _remapBucketInfo;
- BucketInfoMap _bucketInfo;
-
public:
PersistenceMessageTrackerImpl(PersistenceOperationMetricSet& metric,
std::shared_ptr<api::BucketInfoReply> reply,
@@ -43,6 +39,8 @@ public:
api::Timestamp revertTimestamp = 0);
~PersistenceMessageTrackerImpl() override;
+ void cancel(const CancelScope& cancel_scope) override;
+
void updateDB();
void updateMetrics();
[[nodiscard]] bool success() const noexcept { return _success; }
@@ -65,11 +63,14 @@ public:
have at most (messages.size() - initial redundancy) messages left in the
queue and have it's first message be done.
*/
- void queueMessageBatch(const std::vector<MessageTracker::ToSend>& messages) override;
+ void queueMessageBatch(std::vector<MessageTracker::ToSend> messages) override;
private:
- using MessageBatch = std::vector<uint64_t>;
+ using MessageBatch = std::vector<uint64_t>;
+ using BucketInfoMap = std::map<document::Bucket, std::vector<BucketCopy>>;
+ BucketInfoMap _remapBucketInfo;
+ BucketInfoMap _bucketInfo;
std::vector<MessageBatch> _messageBatches;
PersistenceOperationMetricSet& _metric;
std::shared_ptr<api::BucketInfoReply> _reply;
@@ -78,20 +79,24 @@ private:
std::vector<BucketNodePair> _revertNodes;
mbus::Trace _trace;
framework::MilliSecTimer _requestTimer;
+ CancelScope _cancel_scope;
uint32_t _n_persistence_replies_total;
uint32_t _n_successful_persistence_replies;
uint8_t _priority;
bool _success;
- bool canSendReplyEarly() const;
+ static void prune_cancelled_nodes_if_present(BucketInfoMap& bucket_and_replicas,
+ const CancelScope& cancel_scope);
+ [[nodiscard]] bool canSendReplyEarly() const;
void addBucketInfoFromReply(uint16_t node, const api::BucketInfoReply& reply);
void logSuccessfulReply(uint16_t node, const api::BucketInfoReply& reply) const;
- bool hasSentReply() const noexcept { return !_reply; }
- bool shouldRevert() const;
- bool has_majority_successful_replies() const noexcept;
- bool has_minority_test_and_set_failure() const noexcept;
+ [[nodiscard]] bool hasSentReply() const noexcept { return !_reply; }
+ [[nodiscard]] bool shouldRevert() const;
+ [[nodiscard]] bool has_majority_successful_replies() const noexcept;
+ [[nodiscard]] bool has_minority_test_and_set_failure() const noexcept;
void sendReply(MessageSender& sender);
void updateFailureResult(const api::BucketInfoReply& reply);
+ [[nodiscard]] bool node_is_effectively_cancelled(uint16_t node) const noexcept;
void handleCreateBucketReply(api::BucketInfoReply& reply, uint16_t node);
void handlePersistenceReply(api::BucketInfoReply& reply, uint16_t node);
void transfer_trace_state_to_reply();
diff --git a/storage/src/vespa/storage/distributor/sentmessagemap.h b/storage/src/vespa/storage/distributor/sentmessagemap.h
index 70bee311f78..951ed6a6877 100644
--- a/storage/src/vespa/storage/distributor/sentmessagemap.h
+++ b/storage/src/vespa/storage/distributor/sentmessagemap.h
@@ -10,19 +10,23 @@ class Operation;
class SentMessageMap {
public:
+ using Map = std::map<api::StorageMessage::Id, std::shared_ptr<Operation>>;
+
SentMessageMap();
~SentMessageMap();
- std::shared_ptr<Operation> pop(api::StorageMessage::Id id);
- std::shared_ptr<Operation> pop();
+ [[nodiscard]] std::shared_ptr<Operation> pop(api::StorageMessage::Id id);
+ [[nodiscard]] std::shared_ptr<Operation> pop();
void insert(api::StorageMessage::Id id, const std::shared_ptr<Operation> & msg);
void clear();
- uint32_t size() const { return _map.size(); }
+ [[nodiscard]] uint32_t size() const { return _map.size(); }
[[nodiscard]] bool empty() const noexcept { return _map.empty(); }
std::string toString() const;
+
+ Map::const_iterator begin() const noexcept { return _map.cbegin(); }
+ Map::const_iterator end() const noexcept { return _map.cend(); }
private:
- using Map = std::map<api::StorageMessage::Id, std::shared_ptr<Operation>>;
Map _map;
};
diff --git a/storage/src/vespa/storage/distributor/statechecker.cpp b/storage/src/vespa/storage/distributor/statechecker.cpp
index eaff1f0b780..7b30be53c13 100644
--- a/storage/src/vespa/storage/distributor/statechecker.cpp
+++ b/storage/src/vespa/storage/distributor/statechecker.cpp
@@ -4,6 +4,8 @@
#include "distributor_stripe_component.h"
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vdslib/state/clusterstate.h>
+#include <vespa/vespalib/stllike/hash_set_insert.hpp>
+#include <sstream>
#include <vespa/log/log.h>
LOG_SETUP(".distributor.statechecker");
@@ -50,13 +52,11 @@ public:
StateChecker::Result
StateChecker::Result::noMaintenanceNeeded()
{
- return Result(std::unique_ptr<ResultImpl>());
+ return Result({});
}
StateChecker::Result
-StateChecker::Result::createStoredResult(
- IdealStateOperation::UP operation,
- MaintenancePriority::Priority priority)
+StateChecker::Result::createStoredResult(IdealStateOperation::UP operation, MaintenancePriority::Priority priority)
{
return Result(std::make_unique<StoredResultImpl>(std::move(operation), MaintenancePriority(priority)));
}
@@ -73,15 +73,13 @@ StateChecker::Context::Context(const DistributorNodeContext& node_ctx_in,
distributorConfig(op_ctx_in.distributor_config()),
distribution(distributorBucketSpace.getDistribution()),
gcTimeCalculator(op_ctx_in.bucket_id_hasher(), distributorConfig.getGarbageCollectionInterval()),
+ idealStateBundle(distributorBucketSpace.get_ideal_service_layer_nodes_bundle(bucket.getBucketId())),
node_ctx(node_ctx_in),
op_ctx(op_ctx_in),
db(distributorBucketSpace.getBucketDatabase()),
stats(statsTracker),
merges_inhibited_in_bucket_space(distributorBucketSpace.merges_inhibited())
-{
- idealState = distributorBucketSpace.get_ideal_service_layer_nodes_bundle(bucket.getBucketId()).get_available_nonretired_or_maintenance_nodes();
- unorderedIdealState.insert(idealState.begin(), idealState.end());
-}
+{ }
StateChecker::Context::~Context() = default;
diff --git a/storage/src/vespa/storage/distributor/statechecker.h b/storage/src/vespa/storage/distributor/statechecker.h
index 348d90bc712..d120b5e62d7 100644
--- a/storage/src/vespa/storage/distributor/statechecker.h
+++ b/storage/src/vespa/storage/distributor/statechecker.h
@@ -2,11 +2,12 @@
#pragma once
#include "bucketgctimecalculator.h"
+#include "ideal_service_layer_nodes_bundle.h"
#include <vespa/storage/distributor/maintenance/maintenancepriority.h>
#include <vespa/storage/distributor/operations/idealstate/idealstateoperation.h>
#include <vespa/storage/common/storagecomponent.h>
#include <vespa/storage/bucketdb/bucketdatabase.h>
-#include <unordered_set>
+#include <vespa/vespalib/stllike/hash_set.h>
#include <map>
#include <set>
@@ -63,27 +64,21 @@ public:
std::vector<BucketDatabase::Entry> entries;
// Common
- const lib::ClusterState& systemState;
- const lib::ClusterState* pending_cluster_state; // nullptr if no state is pending.
- const DistributorConfiguration& distributorConfig;
- const lib::Distribution& distribution;
- BucketGcTimeCalculator gcTimeCalculator;
-
- // Separate ideal state into ordered sequence and unordered set, as we
- // need to both know the actual order (activation prioritization etc) as
- // well as have the ability to quickly check if a node is in an ideal
- // location.
- std::vector<uint16_t> idealState;
- std::unordered_set<uint16_t> unorderedIdealState;
-
- const DistributorNodeContext& node_ctx;
- const DistributorStripeOperationContext& op_ctx;
- const BucketDatabase& db;
- NodeMaintenanceStatsTracker& stats;
- const bool merges_inhibited_in_bucket_space;
-
- const BucketDatabase::Entry& getSiblingEntry() const noexcept {
- return siblingEntry;
+ const lib::ClusterState & systemState;
+ const lib::ClusterState * pending_cluster_state; // nullptr if no state is pending.
+ const DistributorConfiguration & distributorConfig;
+ const lib::Distribution & distribution;
+ BucketGcTimeCalculator gcTimeCalculator;
+ const IdealServiceLayerNodesBundle & idealStateBundle;
+ const DistributorNodeContext & node_ctx;
+ const DistributorStripeOperationContext & op_ctx;
+ const BucketDatabase & db;
+ NodeMaintenanceStatsTracker & stats;
+ const bool merges_inhibited_in_bucket_space;
+
+ const BucketDatabase::Entry& getSiblingEntry() const noexcept { return siblingEntry; }
+ IdealServiceLayerNodesBundle::ConstNodesRef idealState() const noexcept {
+ return idealStateBundle.available_nonretired_or_maintenance_nodes();
}
document::Bucket getBucket() const noexcept { return bucket; }
@@ -107,28 +102,19 @@ public:
std::unique_ptr<ResultImpl> _impl;
public:
IdealStateOperation::UP createOperation() {
- return (_impl
- ? _impl->createOperation()
- : IdealStateOperation::UP());
+ return (_impl ? _impl->createOperation() : IdealStateOperation::UP());
}
MaintenancePriority getPriority() const {
- return (_impl
- ? _impl->getPriority()
- : MaintenancePriority());
+ return (_impl ? _impl->getPriority() : MaintenancePriority());
}
MaintenanceOperation::Type getType() const {
- return (_impl
- ? _impl->getType()
- : MaintenanceOperation::OPERATION_COUNT);
-
+ return (_impl ? _impl->getType() : MaintenanceOperation::OPERATION_COUNT);
}
static Result noMaintenanceNeeded();
- static Result createStoredResult(
- IdealStateOperation::UP operation,
- MaintenancePriority::Priority priority);
+ static Result createStoredResult(IdealStateOperation::UP operation, MaintenancePriority::Priority priority);
private:
explicit Result(std::unique_ptr<ResultImpl> impl)
: _impl(std::move(impl))
diff --git a/storage/src/vespa/storage/distributor/statecheckers.cpp b/storage/src/vespa/storage/distributor/statecheckers.cpp
index f9c26bf113e..478faa38232 100644
--- a/storage/src/vespa/storage/distributor/statecheckers.cpp
+++ b/storage/src/vespa/storage/distributor/statecheckers.cpp
@@ -27,9 +27,7 @@ SplitBucketStateChecker::validForSplit(Context& c)
{
// Can't split if we have no nodes.
if (c.entry->getNodeCount() == 0) {
- LOG(spam,
- "Can't split bucket %s, since it has no copies",
- c.bucket.toString().c_str());
+ LOG(spam, "Can't split bucket %s, since it has no copies", c.bucket.toString().c_str());
return false;
}
@@ -44,38 +42,30 @@ SplitBucketStateChecker::validForSplit(Context& c)
double
SplitBucketStateChecker::getBucketSizeRelativeToMax(Context& c)
{
- const BucketInfo& info(c.entry.getBucketInfo());
- const uint32_t highestDocumentCount(info.getHighestDocumentCount());
- const uint32_t highestTotalDocumentSize(info.getHighestTotalDocumentSize());
- const uint32_t highestMetaCount(info.getHighestMetaCount());
- const uint32_t highestUsedFileSize(info.getHighestUsedFileSize());
+ auto highest = c.entry.getBucketInfo().getHighest();
- if (highestDocumentCount < 2) {
+ if (highest._documentCount < 2) {
return 0;
}
double byteSplitRatio = 0;
if (c.distributorConfig.getSplitSize() > 0) {
- byteSplitRatio = static_cast<double>(highestTotalDocumentSize)
- / c.distributorConfig.getSplitSize();
+ byteSplitRatio = static_cast<double>(highest._totalDocumentSize) / c.distributorConfig.getSplitSize();
}
double docSplitRatio = 0;
if (c.distributorConfig.getSplitCount() > 0) {
- docSplitRatio = static_cast<double>(highestDocumentCount)
- / c.distributorConfig.getSplitCount();
+ docSplitRatio = static_cast<double>(highest._documentCount) / c.distributorConfig.getSplitCount();
}
double fileSizeRatio = 0;
if (c.distributorConfig.getSplitSize() > 0) {
- fileSizeRatio = static_cast<double>(highestUsedFileSize)
- / (2 * c.distributorConfig.getSplitSize());
+ fileSizeRatio = static_cast<double>(highest._usedFileSize) / (2 * c.distributorConfig.getSplitSize());
}
double metaSplitRatio = 0;
if (c.distributorConfig.getSplitCount() > 0) {
- metaSplitRatio = static_cast<double>(highestMetaCount)
- / (2 * c.distributorConfig.getSplitCount());
+ metaSplitRatio = static_cast<double>(highest._metaCount) / (2 * c.distributorConfig.getSplitCount());
}
return std::max(std::max(byteSplitRatio, docSplitRatio),
@@ -83,47 +73,31 @@ SplitBucketStateChecker::getBucketSizeRelativeToMax(Context& c)
}
StateChecker::Result
-SplitBucketStateChecker::generateMinimumBucketSplitOperation(
- Context& c)
+SplitBucketStateChecker::generateMinimumBucketSplitOperation(Context& c)
{
- auto so = std::make_unique<SplitOperation>(
- c.node_ctx,
- BucketAndNodes(c.getBucket(), c.entry->getNodes()),
- c.distributorConfig.getMinimalBucketSplit(),
- 0,
- 0);
+ auto so = std::make_unique<SplitOperation>(c.node_ctx, BucketAndNodes(c.getBucket(), c.entry->getNodes()),
+ c.distributorConfig.getMinimalBucketSplit(), 0, 0);
so->setPriority(c.distributorConfig.getMaintenancePriorities().splitDistributionBits);
- so->setDetailedReason(
- "[Splitting bucket because the current system size requires "
- "a higher minimum split bit]");
+ so->setDetailedReason("[Splitting bucket because the current system size requires a higher minimum split bit]");
return Result::createStoredResult(std::move(so), MaintenancePriority::MEDIUM);
}
StateChecker::Result
-SplitBucketStateChecker::generateMaxSizeExceededSplitOperation(
- Context& c)
+SplitBucketStateChecker::generateMaxSizeExceededSplitOperation(Context& c)
{
- auto so = std::make_unique<SplitOperation>(
- c.node_ctx,
- BucketAndNodes(c.getBucket(), c.entry->getNodes()),
- 58,
- c.distributorConfig.getSplitCount(),
- c.distributorConfig.getSplitSize());
+ auto so = std::make_unique<SplitOperation>(c.node_ctx, BucketAndNodes(c.getBucket(), c.entry->getNodes()), 58,
+ c.distributorConfig.getSplitCount(), c.distributorConfig.getSplitSize());
so->setPriority(c.distributorConfig.getMaintenancePriorities().splitLargeBucket);
- const BucketInfo& info(c.entry.getBucketInfo());
+ auto highest = c.entry.getBucketInfo().getHighest();
vespalib::asciistream ost;
ost << "[Splitting bucket because its maximum size ("
- << info.getHighestTotalDocumentSize()
- << " b, "
- << info.getHighestDocumentCount()
- << " docs, "
- << info.getHighestMetaCount()
- << " meta, "
- << info.getHighestUsedFileSize()
- << " b total"
+ << highest._totalDocumentSize << " b, "
+ << highest._documentCount << " docs, "
+ << highest._metaCount << " meta, "
+ << highest._usedFileSize << " b total"
<< ") is higher than the configured limit of ("
<< c.distributorConfig.getSplitSize()
<< ", " << c.distributorConfig.getSplitCount() << ")]";
@@ -159,9 +133,10 @@ JoinBucketsStateChecker::isFirstSibling(const document::BucketId& bucketId)
namespace {
+using ConstNodesRef = IdealServiceLayerNodesBundle::ConstNodesRef;
+
bool
-equalNodeSet(const std::vector<uint16_t>& idealState,
- const BucketDatabase::Entry& dbEntry)
+equalNodeSet(ConstNodesRef idealState, const BucketDatabase::Entry& dbEntry)
{
if (idealState.size() != dbEntry->getNodeCount()) {
return false;
@@ -179,12 +154,10 @@ equalNodeSet(const std::vector<uint16_t>& idealState,
bool
bucketAndSiblingReplicaLocationsEqualIdealState(const StateChecker::Context& context)
{
- if (!equalNodeSet(context.idealState, context.entry)) {
+ if (!equalNodeSet(context.idealState(), context.entry)) {
return false;
}
- std::vector<uint16_t> siblingIdealState(
- context.distribution.getIdealStorageNodes(
- context.systemState, context.siblingBucket));
+ std::vector<uint16_t> siblingIdealState = context.distribution.getIdealStorageNodes(context.systemState, context.siblingBucket);
if (!equalNodeSet(siblingIdealState, context.siblingEntry)) {
return false;
}
@@ -204,6 +177,42 @@ inconsistentJoinIsAllowed(const StateChecker::Context& context)
&& bucketAndSiblingReplicaLocationsEqualIdealState(context));
}
+bool
+isInconsistentlySplit(const StateChecker::Context& c)
+{
+ return (c.entries.size() > 1);
+}
+
+// We don't want to invoke joins on buckets that have more replicas than
+// required. This is in particular because joins cause ideal states to change
+// for the target buckets and trigger merges. Since the removal of the non-
+// ideal replicas is done by the DeleteBuckets state-checker, it will become
+// preempted by potential follow-up joins unless we explicitly avoid these.
+bool
+contextBucketHasTooManyReplicas(const StateChecker::Context& c)
+{
+ return (c.entry->getNodeCount() > c.distribution.getRedundancy());
+}
+
+bool
+bucketAtDistributionBitLimit(const document::BucketId& bucket, const StateChecker::Context& c)
+{
+ return (bucket.getUsedBits() <= std::max(uint32_t(c.systemState.getDistributionBitCount()),
+ c.distributorConfig.getMinimalBucketSplit()));
+}
+
+bool
+legalBucketSplitLevel(const document::BucketId& bucket, const StateChecker::Context& c)
+{
+ return bucket.getUsedBits() >= c.distributorConfig.getMinimalBucketSplit();
+}
+
+bool
+bucketHasMultipleChildren(const document::BucketId& bucket, const StateChecker::Context& c)
+{
+ return c.db.childCount(bucket) > 1;
+}
+
} // anon ns
bool
@@ -213,41 +222,29 @@ JoinBucketsStateChecker::siblingsAreInSync(const Context& context)
const auto& siblingEntry(context.siblingEntry);
if (entry->getNodeCount() != siblingEntry->getNodeCount()) {
- LOG(spam,
- "Not joining bucket %s because sibling bucket %s had different "
- "node count",
- context.bucket.toString().c_str(),
- context.siblingBucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because sibling bucket %s had different node count",
+ context.bucket.toString().c_str(), context.siblingBucket.toString().c_str());
return false;
}
bool siblingsCoLocated = true;
for (uint32_t i = 0; i < entry->getNodeCount(); ++i) {
- if (entry->getNodeRef(i).getNode()
- != siblingEntry->getNodeRef(i).getNode())
- {
+ if (entry->getNodeRef(i).getNode() != siblingEntry->getNodeRef(i).getNode()) {
siblingsCoLocated = false;
break;
}
}
if (!siblingsCoLocated && !inconsistentJoinIsAllowed(context)) {
- LOG(spam,
- "Not joining bucket %s because sibling bucket %s "
- "does not have the same node set, or inconsistent joins cannot be "
- "performed either due to config or because replicas were not in "
- "their ideal location",
- context.bucket.toString().c_str(),
- context.siblingBucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because sibling bucket %s does not have the same node set, or inconsistent "
+ "joins cannot be performed either due to config or because replicas were not in their ideal location",
+ context.bucket.toString().c_str(), context.siblingBucket.toString().c_str());
return false;
}
if (!entry->validAndConsistent() || !siblingEntry->validAndConsistent()) {
- LOG(spam,
- "Not joining bucket %s because it or %s is out of sync "
- "and syncing it may cause it to become too large",
- context.bucket.toString().c_str(),
- context.siblingBucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because it or %s is out of sync and syncing it may cause it to become too large",
+ context.bucket.toString().c_str(), context.siblingBucket.toString().c_str());
return false;
}
@@ -275,58 +272,27 @@ JoinBucketsStateChecker::singleBucketJoinIsEnabled(const Context& c)
return c.distributorConfig.getEnableJoinForSiblingLessBuckets();
}
-namespace {
-
-// We don't want to invoke joins on buckets that have more replicas than
-// required. This is in particular because joins cause ideal states to change
-// for the target buckets and trigger merges. Since the removal of the non-
-// ideal replicas is done by the DeleteBuckets state-checker, it will become
-// preempted by potential follow-up joins unless we explicitly avoid these.
-bool
-contextBucketHasTooManyReplicas(const StateChecker::Context& c)
-{
- return (c.entry->getNodeCount() > c.distribution.getRedundancy());
-}
-
-bool
-bucketAtDistributionBitLimit(const document::BucketId& bucket, const StateChecker::Context& c)
-{
- return (bucket.getUsedBits() <= std::max(
- uint32_t(c.systemState.getDistributionBitCount()),
- c.distributorConfig.getMinimalBucketSplit()));
-}
-
-}
-
bool
JoinBucketsStateChecker::shouldJoin(const Context& c)
{
if (c.entry->getNodeCount() == 0) {
- LOG(spam, "Not joining bucket %s because it has no nodes",
- c.bucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because it has no nodes", c.bucket.toString().c_str());
return false;
}
if (contextBucketHasTooManyReplicas(c)) {
- LOG(spam, "Not joining %s because it has too high replication level",
- c.bucket.toString().c_str());
+ LOG(spam, "Not joining %s because it has too high replication level", c.bucket.toString().c_str());
return false;
}
if (c.distributorConfig.getJoinSize() == 0 && c.distributorConfig.getJoinCount() == 0) {
- LOG(spam, "Not joining bucket %s because join is disabled",
- c.bucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because join is disabled", c.bucket.toString().c_str());
return false;
}
if (bucketAtDistributionBitLimit(c.getBucketId(), c)) {
- LOG(spam,
- "Not joining bucket %s because it is below the min split "
- "count (config: %u, cluster state: %u, bucket has: %u)",
- c.bucket.toString().c_str(),
- c.distributorConfig.getMinimalBucketSplit(),
- c.systemState.getDistributionBitCount(),
- c.getBucketId().getUsedBits());
+ LOG(spam, "Not joining bucket %s because it is below the min split count (config: %u, cluster state: %u, bucket has: %u)",
+ c.bucket.toString().c_str(), c.distributorConfig.getMinimalBucketSplit(), c.systemState.getDistributionBitCount(), c.getBucketId().getUsedBits());
return false;
}
@@ -336,11 +302,8 @@ JoinBucketsStateChecker::shouldJoin(const Context& c)
if (c.getSiblingEntry().valid()) {
if (!isFirstSibling(c.getBucketId())) {
- LOG(spam,
- "Not joining bucket %s because it is the second sibling of "
- "%s and not the first",
- c.bucket.toString().c_str(),
- c.siblingBucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because it is the second sibling of %s and not the first",
+ c.bucket.toString().c_str(), c.siblingBucket.toString().c_str());
return false;
}
if (!siblingsAreInSync(c)) {
@@ -402,22 +365,6 @@ JoinBucketsStateChecker::smallEnoughToJoin(const Context& c)
return true;
}
-namespace {
-
-bool
-legalBucketSplitLevel(const document::BucketId& bucket, const StateChecker::Context& c)
-{
- return bucket.getUsedBits() >= c.distributorConfig.getMinimalBucketSplit();
-}
-
-bool
-bucketHasMultipleChildren(const document::BucketId& bucket, const StateChecker::Context& c)
-{
- return c.db.childCount(bucket) > 1;
-}
-
-}
-
document::Bucket
JoinBucketsStateChecker::computeJoinBucket(const Context& c)
{
@@ -463,24 +410,13 @@ JoinBucketsStateChecker::check(Context& c) const
sourceBuckets.push_back(c.getBucketId());
}
sourceBuckets.push_back(c.getBucketId());
- auto op = std::make_unique<JoinOperation>(
- c.node_ctx,
- BucketAndNodes(joinedBucket, c.entry->getNodes()),
- sourceBuckets);
+ auto op = std::make_unique<JoinOperation>(c.node_ctx, BucketAndNodes(joinedBucket, c.entry->getNodes()), sourceBuckets);
op->setPriority(c.distributorConfig.getMaintenancePriorities().joinBuckets);
vespalib::asciistream ost;
- ost << "[Joining buckets "
- << sourceBuckets[1].toString()
- << " and " << sourceBuckets[0].toString()
- << " because their size ("
- << getTotalUsedFileSize(c)
- << " bytes, "
- << getTotalMetaCount(c)
- << " docs) is less than the configured limit of ("
- << c.distributorConfig.getJoinSize()
- << ", "
- << c.distributorConfig.getJoinCount()
- << ")";
+ ost << "[Joining buckets " << sourceBuckets[1].toString() << " and " << sourceBuckets[0].toString()
+ << " because their size (" << getTotalUsedFileSize(c) << " bytes, "
+ << getTotalMetaCount(c) << " docs) is less than the configured limit of ("
+ << c.distributorConfig.getJoinSize() << ", " << c.distributorConfig.getJoinCount() << ")";
op->setDetailedReason(ost.str());
@@ -516,8 +452,7 @@ vespalib::string
SplitInconsistentStateChecker::getReason(const document::BucketId& bucketId, const std::vector<BucketDatabase::Entry>& entries)
{
vespalib::asciistream reason;
- reason << "[Bucket is inconsistently split (list includes "
- << vespalib::hex << "0x" << bucketId.getId();
+ reason << "[Bucket is inconsistently split (list includes " << vespalib::hex << "0x" << bucketId.getId();
for (uint32_t i = 0, found = 0; i < entries.size() && found < 3; i++) {
if (!(entries[i].getBucketId() == bucketId)) {
@@ -530,24 +465,11 @@ SplitInconsistentStateChecker::getReason(const document::BucketId& bucketId, con
reason << " and " << vespalib::dec << entries.size() - 4 << " others";
}
- reason << ") Splitting it to improve the problem (max used bits "
- << vespalib::dec
- << getHighestUsedBits(entries)
- << ")]";
+ reason << ") Splitting it to improve the problem (max used bits " << vespalib::dec << getHighestUsedBits(entries) << ")]";
return reason.str();
}
-namespace {
-
-bool
-isInconsistentlySplit(const StateChecker::Context& c)
-{
- return (c.entries.size() > 1);
-}
-
-}
-
StateChecker::Result
SplitInconsistentStateChecker::check(Context& c) const
{
@@ -559,12 +481,8 @@ SplitInconsistentStateChecker::check(Context& c) const
return Result::noMaintenanceNeeded();
}
- auto op = std::make_unique<SplitOperation>(
- c.node_ctx,
- BucketAndNodes(c.getBucket(), c.entry->getNodes()),
- getHighestUsedBits(c.entries),
- 0,
- 0);
+ auto op = std::make_unique<SplitOperation>(c.node_ctx, BucketAndNodes(c.getBucket(), c.entry->getNodes()),
+ getHighestUsedBits(c.entries), 0, 0);
op->setPriority(c.distributorConfig.getMaintenancePriorities().splitInconsistentBucket);
op->setDetailedReason(getReason(c.getBucketId(), c.entries));
@@ -573,24 +491,24 @@ SplitInconsistentStateChecker::check(Context& c) const
namespace {
-bool containsMaintenanceNode(const std::vector<uint16_t>& ideal, const StateChecker::Context& c)
+bool
+containsMaintenanceNode(ConstNodesRef ideal, const StateChecker::Context& c)
{
for (uint16_t n : ideal) {
- lib::Node node(lib::NodeType::STORAGE, n);
- if (c.systemState.getNodeState(node).getState() == lib::State::MAINTENANCE) {
+ if (c.systemState.getNodeState(lib::Node(lib::NodeType::STORAGE, n)).getState() == lib::State::MAINTENANCE) {
return true;
}
}
return false;
}
-bool ideal_node_is_unavailable_in_pending_state(const StateChecker::Context& c) {
+bool
+ideal_node_is_unavailable_in_pending_state(const StateChecker::Context& c) {
if (!c.pending_cluster_state) {
return false;
}
- for (uint16_t n : c.idealState) {
- lib::Node node(lib::NodeType::STORAGE, n);
- if (!c.pending_cluster_state->getNodeState(node).getState().oneOf("uir")){
+ for (uint16_t n : c.idealState()) {
+ if (!c.pending_cluster_state->getNodeState(lib::Node(lib::NodeType::STORAGE, n)).getState().oneOf("uir")){
return true;
}
}
@@ -598,9 +516,7 @@ bool ideal_node_is_unavailable_in_pending_state(const StateChecker::Context& c)
}
bool
-consistentApartFromEmptyBucketsInNonIdealLocationAndInvalidEntries(
- const std::vector<uint16_t>& idealNodes,
- const BucketInfo& entry)
+consistentApartFromEmptyBucketsInNonIdealLocationAndInvalidEntries(ConstNodesRef idealNodes, const BucketInfo& entry)
{
api::BucketInfo info;
for (uint32_t i=0, n=entry.getNodeCount(); i<n; ++i) {
@@ -634,17 +550,8 @@ consistentApartFromEmptyBucketsInNonIdealLocationAndInvalidEntries(
class MergeNodes
{
public:
- MergeNodes()
- : _reason(), _nodes(), _problemFlags(0), _priority(255)
- {}
-
- explicit MergeNodes(const BucketDatabase::Entry& entry)
- : _reason(), _nodes(), _problemFlags(0), _priority(255)
- {
- for (uint16_t i = 0; i < entry->getNodeCount(); i++) {
- addNode(entry->getNodeRef(i).getNode());
- }
- }
+ MergeNodes() noexcept;
+ explicit MergeNodes(const BucketDatabase::Entry& entry);
MergeNodes(MergeNodes && rhs) noexcept = default;
MergeNodes & operator =(MergeNodes && rhs) noexcept = delete;
MergeNodes(const MergeNodes & rhs) = delete;
@@ -652,40 +559,16 @@ public:
~MergeNodes();
- void operator+=(const MergeNodes& other) {
- _reason << other._reason.str();
- _problemFlags |= other._problemFlags;
- _nodes.insert(_nodes.end(), other._nodes.begin(), other._nodes.end());
- updatePriority(other._priority);
- }
-
- bool shouldMerge() const {
+ bool shouldMerge() const noexcept {
return _problemFlags != 0;
}
- void markMoveToIdealLocation(uint16_t node, uint8_t msgPriority) {
- _reason << "[Moving bucket to ideal node " << node << "]";
- addProblem(NON_IDEAL_LOCATION);
- addNode(node);
- updatePriority(msgPriority);
- }
-
- void markOutOfSync(const StateChecker::Context& c, uint8_t msgPriority) {
- _reason << "[Synchronizing buckets with different checksums "
- << c.entry->toString()
- << "]";
- addProblem(OUT_OF_SYNC);
- updatePriority(msgPriority);
- }
-
- void markMissingReplica(uint16_t node, uint8_t msgPriority) {
- _reason << "[Adding missing node " << node << "]";
- addProblem(MISSING_REPLICA);
- addNode(node);
- updatePriority(msgPriority);
- }
+ void operator+=(const MergeNodes& other);
+ void markMoveToIdealLocation(uint16_t node, uint8_t msgPriority);
+ void markOutOfSync(const StateChecker::Context& c, uint8_t msgPriority);
+ void markMissingReplica(uint16_t node, uint8_t msgPriority);
- bool needsMoveOnly() const {
+ bool needsMoveOnly() const noexcept {
return _problemFlags == NON_IDEAL_LOCATION;
}
@@ -698,11 +581,11 @@ public:
std::string reason() const { return _reason.str(); }
private:
- void updatePriority(uint8_t pri) {
+ void updatePriority(uint8_t pri) noexcept {
_priority = std::min(pri, _priority);
}
- void addProblem(uint8_t newProblem) {
+ void addProblem(uint8_t newProblem) noexcept {
_problemFlags |= newProblem;
}
@@ -713,16 +596,68 @@ private:
};
vespalib::asciistream _reason;
std::vector<uint16_t> _nodes;
- uint8_t _problemFlags;
- uint8_t _priority;
+ uint8_t _problemFlags;
+ uint8_t _priority;
};
+MergeNodes::MergeNodes() noexcept
+ : _reason(),
+ _nodes(),
+ _problemFlags(0),
+ _priority(255)
+{}
+
+MergeNodes::MergeNodes(const BucketDatabase::Entry& entry)
+ : _reason(),
+ _nodes(),
+ _problemFlags(0),
+ _priority(255)
+{
+ _nodes.reserve(entry->getNodeCount());
+ for (uint16_t i = 0; i < entry->getNodeCount(); i++) {
+ addNode(entry->getNodeRef(i).getNode());
+ }
+}
+
MergeNodes::~MergeNodes() = default;
+
+void
+MergeNodes::operator+=(const MergeNodes& other) {
+ _reason << other._reason.str();
+ _problemFlags |= other._problemFlags;
+ _nodes.reserve(_nodes.size() + other._nodes.size());
+ _nodes.insert(_nodes.end(), other._nodes.begin(), other._nodes.end());
+ updatePriority(other._priority);
+}
+
+void
+MergeNodes::markMoveToIdealLocation(uint16_t node, uint8_t msgPriority) {
+ _reason << "[Moving bucket to ideal node " << node << "]";
+ addProblem(NON_IDEAL_LOCATION);
+ addNode(node);
+ updatePriority(msgPriority);
+}
+
+void
+MergeNodes::markOutOfSync(const StateChecker::Context& c, uint8_t msgPriority) {
+ _reason << "[Synchronizing buckets with different checksums " << c.entry->toString() << "]";
+ addProblem(OUT_OF_SYNC);
+ updatePriority(msgPriority);
+}
+
+void
+MergeNodes::markMissingReplica(uint16_t node, uint8_t msgPriority) {
+ _reason << "[Adding missing node " << node << "]";
+ addProblem(MISSING_REPLICA);
+ addNode(node);
+ updatePriority(msgPriority);
+}
+
bool
-presentInIdealState(const StateChecker::Context& c, uint16_t node)
+presentInIdealState(const StateChecker::Context& c, uint16_t node) noexcept
{
- return c.unorderedIdealState.find(node) != c.unorderedIdealState.end();
+ return c.idealStateBundle.is_nonretired_or_maintenance(node);
}
void
@@ -730,7 +665,7 @@ addStatisticsForNonIdealNodes(const StateChecker::Context& c, bool missingReplic
{
// Common case is that ideal state == actual state with no missing replicas.
// If so, do nothing.
- if (!missingReplica && (c.idealState.size() == c.entry->getNodeCount())) {
+ if (!missingReplica && (c.idealState().size() == c.entry->getNodeCount())) {
return;
}
for (uint32_t j = 0; j < c.entry->getNodeCount(); ++j) {
@@ -745,6 +680,9 @@ addStatisticsForNonIdealNodes(const StateChecker::Context& c, bool missingReplic
}
}
+MergeNodes checkForNodesMissingFromIdealState(StateChecker::Context& c) __attribute__((noinline));
+MergeNodes checkIfBucketsAreOutOfSyncAndNeedMerging(StateChecker::Context& c) __attribute__((noinline));
+
MergeNodes
checkForNodesMissingFromIdealState(StateChecker::Context& c)
{
@@ -753,26 +691,23 @@ checkForNodesMissingFromIdealState(StateChecker::Context& c)
// Check if we need to add copies to get to ideal state.
if (!c.entry->emptyAndConsistent()) {
bool hasMissingReplica = false;
- for (uint32_t i = 0; i < c.idealState.size(); i++) {
+ for (uint16_t node : c.idealState()) {
bool found = false;
for (uint32_t j = 0; j < c.entry->getNodeCount(); j++) {
- if (c.entry->getNodeRef(j).getNode() == c.idealState[i]) {
+ if (c.entry->getNodeRef(j).getNode() == node) {
found = true;
break;
}
}
if (!found) {
- const DistributorConfiguration::MaintenancePriorities& mp(
- c.distributorConfig.getMaintenancePriorities());
- if (c.idealState.size() > c.entry->getNodeCount()) {
- ret.markMissingReplica(c.idealState[i],
- mp.mergeTooFewCopies);
+ const auto & mp = c.distributorConfig.getMaintenancePriorities();
+ if (c.idealState().size() > c.entry->getNodeCount()) {
+ ret.markMissingReplica(node, mp.mergeTooFewCopies);
} else {
- ret.markMoveToIdealLocation(c.idealState[i],
- mp.mergeMoveToIdealNode);
+ ret.markMoveToIdealLocation(node,mp.mergeMoveToIdealNode);
}
- c.stats.incCopyingIn(c.idealState[i], c.getBucketSpace());
+ c.stats.incCopyingIn(node, c.getBucketSpace());
hasMissingReplica = true;
}
}
@@ -795,12 +730,8 @@ MergeNodes
checkIfBucketsAreOutOfSyncAndNeedMerging(StateChecker::Context& c)
{
MergeNodes ret;
- if (!consistentApartFromEmptyBucketsInNonIdealLocationAndInvalidEntries(
- c.idealState,
- c.entry.getBucketInfo()))
- {
- auto pri(c.distributorConfig.getMaintenancePriorities()
- .mergeOutOfSyncCopies);
+ if (!consistentApartFromEmptyBucketsInNonIdealLocationAndInvalidEntries(c.idealState(),c.entry.getBucketInfo())) {
+ auto pri(c.distributorConfig.getMaintenancePriorities().mergeOutOfSyncCopies);
ret.markOutOfSync(c, pri);
addStatisticsForOutOfSyncCopies(c);
}
@@ -839,7 +770,7 @@ SynchronizeAndMoveStateChecker::check(Context& c) const
if (isInconsistentlySplit(c)) {
return Result::noMaintenanceNeeded();
}
- if (containsMaintenanceNode(c.idealState, c)) {
+ if (containsMaintenanceNode(c.idealState(), c)) {
return Result::noMaintenanceNeeded();
}
if (ideal_node_is_unavailable_in_pending_state(c)) {
@@ -856,16 +787,14 @@ SynchronizeAndMoveStateChecker::check(Context& c) const
result += checkIfBucketsAreOutOfSyncAndNeedMerging(c);
if (result.shouldMerge()) {
- IdealStateOperation::UP op(
- new MergeOperation(BucketAndNodes(c.getBucket(), result.nodes()),
- c.distributorConfig.getMaxNodesPerMerge()));
+ auto op = std::make_unique<MergeOperation>(BucketAndNodes(c.getBucket(), result.nodes()),
+ c.distributorConfig.getMaxNodesPerMerge());
op->setDetailedReason(result.reason());
MaintenancePriority::Priority schedPri;
if ((c.getBucketSpace() == document::FixedBucketSpaces::default_space())
|| !c.distributorConfig.prioritize_global_bucket_merges())
{
- schedPri = (result.needsMoveOnly() ? MaintenancePriority::LOW
- : MaintenancePriority::MEDIUM);
+ schedPri = (result.needsMoveOnly() ? MaintenancePriority::LOW : MaintenancePriority::MEDIUM);
op->setPriority(result.priority());
} else {
// Since the default bucket space has a dependency on the global bucket space,
@@ -877,10 +806,8 @@ SynchronizeAndMoveStateChecker::check(Context& c) const
return Result::createStoredResult(std::move(op), schedPri);
} else {
- LOG(spam, "Bucket %s: No need for merge, as bucket is in consistent state "
- "(or inconsistent buckets are empty) %s",
- c.bucket.toString().c_str(),
- c.entry->toString().c_str());
+ LOG(spam, "Bucket %s: No need for merge, as bucket is in consistent state (or inconsistent buckets are empty) %s",
+ c.bucket.toString().c_str(), c.entry->toString().c_str());
return Result::noMaintenanceNeeded();
}
}
@@ -895,7 +822,7 @@ DeleteExtraCopiesStateChecker::bucketHasNoData(const Context& c)
bool
DeleteExtraCopiesStateChecker::copyIsInIdealState(const BucketCopy& cp, const Context& c)
{
- return hasItem(c.idealState, cp.getNode());
+ return c.idealStateBundle.is_nonretired_or_maintenance(cp.getNode());
}
bool
@@ -911,9 +838,7 @@ DeleteExtraCopiesStateChecker::addToRemoveSet(
std::vector<uint16_t>& removedCopies,
vespalib::asciistream& reasons)
{
- reasons << "[Removing " << reasonForRemoval
- << " from node " << copyToRemove.getNode()
- << ']';
+ reasons << "[Removing " << reasonForRemoval << " from node " << copyToRemove.getNode() << ']';
removedCopies.push_back(copyToRemove.getNode());
}
@@ -981,7 +906,7 @@ DeleteExtraCopiesStateChecker::check(Context& c) const
}
// Maintain symmetry with merge; don't try to mess with nodes that have an
// ideal copy on a node set in maintenance mode.
- if (containsMaintenanceNode(c.idealState, c)) {
+ if (containsMaintenanceNode(c.idealState(), c)) {
return Result::noMaintenanceNeeded();
}
@@ -989,8 +914,7 @@ DeleteExtraCopiesStateChecker::check(Context& c) const
std::vector<uint16_t> removedCopies;
if (bucketHasNoData(c)) {
- reasons << "[Removing all copies since bucket is empty:"
- << c.entry->toString() << "]";
+ reasons << "[Removing all copies since bucket is empty:" << c.entry->toString() << "]";
for (uint32_t j = 0, cnt = c.entry->getNodeCount(); j < cnt; ++j) {
removedCopies.push_back(c.entry->getNodeRef(j).getNode());
@@ -1004,9 +928,7 @@ DeleteExtraCopiesStateChecker::check(Context& c) const
}
if (!removedCopies.empty()) {
- auto ro = std::make_unique<RemoveBucketOperation>(
- c.node_ctx,
- BucketAndNodes(c.getBucket(), removedCopies));
+ auto ro = std::make_unique<RemoveBucketOperation>(c.node_ctx, BucketAndNodes(c.getBucket(), removedCopies));
ro->setPriority(c.distributorConfig.getMaintenancePriorities().deleteBucketCopy);
ro->setDetailedReason(reasons.str());
@@ -1020,7 +942,7 @@ bool
BucketStateStateChecker::shouldSkipActivationDueToMaintenance(const ActiveList& activeNodes, const Context& c)
{
for (uint32_t i = 0; i < activeNodes.size(); ++i) {
- const auto node_index = activeNodes[i]._nodeIndex;
+ const auto node_index = activeNodes[i].nodeIndex();
const BucketCopy* cp(c.entry->getNode(node_index));
if (!cp || cp->active()) {
continue;
@@ -1030,7 +952,7 @@ BucketStateStateChecker::shouldSkipActivationDueToMaintenance(const ActiveList&
// If copy is not ready, we don't want to activate it if a node
// is set in maintenance. Doing so would imply that we want proton
// to start background indexing.
- return containsMaintenanceNode(c.idealState, c);
+ return containsMaintenanceNode(c.idealState(), c);
} // else: activation does not imply indexing, so we can safely do it at any time.
}
}
@@ -1058,9 +980,9 @@ BucketStateStateChecker::check(Context& c) const
return Result::noMaintenanceNeeded();
}
- ActiveList activeNodes(
- ActiveCopy::calculate(c.idealState, c.distribution, c.entry,
- c.distributorConfig.max_activation_inhibited_out_of_sync_groups()));
+ ActiveList activeNodes = ActiveCopy::calculate(c.idealStateBundle.nonretired_or_maintenance_to_index(),
+ c.distribution, c.entry,
+ c.distributorConfig.max_activation_inhibited_out_of_sync_groups());
if (activeNodes.empty()) {
return Result::noMaintenanceNeeded();
}
@@ -1071,13 +993,12 @@ BucketStateStateChecker::check(Context& c) const
vespalib::asciistream reason;
std::vector<uint16_t> operationNodes;
for (uint32_t i=0; i<activeNodes.size(); ++i) {
- const BucketCopy* cp = c.entry->getNode(activeNodes[i]._nodeIndex);
+ const BucketCopy* cp = c.entry->getNode(activeNodes[i].nodeIndex());
if (cp == nullptr || cp->active()) {
continue;
}
- operationNodes.push_back(activeNodes[i]._nodeIndex);
- reason << "[Setting node " << activeNodes[i]._nodeIndex << " as active: "
- << activeNodes[i].getReason() << "]";
+ operationNodes.push_back(activeNodes[i].nodeIndex());
+ reason << "[Setting node " << activeNodes[i].nodeIndex() << " as active: " << activeNodes[i].getReason() << "]";
}
// Deactivate all copies that are currently marked as active.
@@ -1088,7 +1009,7 @@ BucketStateStateChecker::check(Context& c) const
}
bool shouldBeActive = false;
for (uint32_t j=0; j<activeNodes.size(); ++j) {
- if (activeNodes[j]._nodeIndex == cp.getNode()) {
+ if (activeNodes[j].nodeIndex() == cp.getNode()) {
shouldBeActive = true;
}
}
@@ -1104,12 +1025,9 @@ BucketStateStateChecker::check(Context& c) const
std::vector<uint16_t> activeNodeIndexes;
for (uint32_t i=0; i<activeNodes.size(); ++i) {
- activeNodeIndexes.push_back(activeNodes[i]._nodeIndex);
+ activeNodeIndexes.push_back(activeNodes[i].nodeIndex());
}
- auto op = std::make_unique<SetBucketStateOperation>(
- c.node_ctx,
- BucketAndNodes(c.getBucket(), operationNodes),
- activeNodeIndexes);
+ auto op = std::make_unique<SetBucketStateOperation>(c.node_ctx, BucketAndNodes(c.getBucket(), operationNodes), activeNodeIndexes);
// If activeNodes > 1, we're dealing with a active-per-leaf group case and
// we currently always send high pri activations.
@@ -1135,7 +1053,7 @@ GarbageCollectionStateChecker::needs_garbage_collection(const Context& c, vespal
if (c.entry->getNodeCount() == 0) {
return false;
}
- if (containsMaintenanceNode(c.idealState, c)) {
+ if (containsMaintenanceNode(c.idealState(), c)) {
return false;
}
std::chrono::seconds lastRunAt(c.entry->getLastGarbageCollectionTime());
diff --git a/storage/src/vespa/storage/distributor/stripe_access_guard.h b/storage/src/vespa/storage/distributor/stripe_access_guard.h
index 2ed40cfcf2e..d2d3615b776 100644
--- a/storage/src/vespa/storage/distributor/stripe_access_guard.h
+++ b/storage/src/vespa/storage/distributor/stripe_access_guard.h
@@ -4,6 +4,7 @@
#include "bucket_space_distribution_configs.h"
#include "pending_bucket_space_db_transition_entry.h"
#include "potential_data_loss_report.h"
+#include "outdated_nodes.h"
#include <vespa/document/bucket/bucketspace.h>
#include <vespa/storageapi/defs.h>
#include <unordered_set> // TODO use hash_set instead
@@ -30,6 +31,7 @@ class NodeSupportedFeaturesRepo;
*/
class StripeAccessGuard {
public:
+ using OutdatedNodes = dbtransition::OutdatedNodes;
virtual ~StripeAccessGuard() = default;
virtual void flush_and_close() = 0;
@@ -51,7 +53,7 @@ public:
const lib::Distribution& distribution,
const lib::ClusterState& new_state,
const char* storage_up_states,
- const std::unordered_set<uint16_t>& outdated_nodes,
+ const OutdatedNodes& outdated_nodes,
const std::vector<dbtransition::Entry>& entries) = 0;
virtual void update_read_snapshot_before_db_pruning() = 0;
diff --git a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
index 8fce8c3137a..4c8e51908b0 100644
--- a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
+++ b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
@@ -257,7 +257,7 @@ StripeBucketDBUpdater::merge_entries_into_db(document::BucketSpace bucket_space,
const lib::Distribution& distribution,
const lib::ClusterState& new_state,
const char* storage_up_states,
- const std::unordered_set<uint16_t>& outdated_nodes,
+ const OutdatedNodes & outdated_nodes,
const std::vector<dbtransition::Entry>& entries)
{
auto& s = _op_ctx.bucket_space_repo().get(bucket_space);
@@ -767,6 +767,7 @@ StripeBucketDBUpdater::MergingNodeRemover::merge(storage::BucketDatabase::Merger
}
std::vector<BucketCopy> remainingCopies;
+ remainingCopies.reserve(e->getNodeCount());
for (uint16_t i = 0; i < e->getNodeCount(); i++) {
const uint16_t node_idx = e->getNodeRef(i).getNode();
if (storage_node_is_available(node_idx)) {
diff --git a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h
index 04efe91e9e7..b8b729edbeb 100644
--- a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h
+++ b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.h
@@ -33,6 +33,7 @@ class StripeBucketDBUpdater final
public api::MessageHandler
{
public:
+ using OutdatedNodes = dbtransition::OutdatedNodes;
StripeBucketDBUpdater(const DistributorNodeContext& node_ctx,
DistributorStripeOperationContext& op_ctx,
DistributorStripeInterface& owner,
@@ -178,7 +179,7 @@ private:
const lib::Distribution& distribution,
const lib::ClusterState& new_state,
const char* storage_up_states,
- const std::unordered_set<uint16_t>& outdated_nodes,
+ const OutdatedNodes & outdated_nodes,
const std::vector<dbtransition::Entry>& entries);
void enqueueRecheckUntilPendingStateEnabled(uint16_t node, const document::Bucket&);
diff --git a/storage/src/vespa/storage/distributor/tickable_stripe.h b/storage/src/vespa/storage/distributor/tickable_stripe.h
index e458043ac64..499cb41ee34 100644
--- a/storage/src/vespa/storage/distributor/tickable_stripe.h
+++ b/storage/src/vespa/storage/distributor/tickable_stripe.h
@@ -24,6 +24,7 @@ class NodeSupportedFeaturesRepo;
*/
class TickableStripe {
public:
+ using OutdatedNodes = dbtransition::OutdatedNodes;
virtual ~TickableStripe() = default;
// Perform a single operation tick of the stripe logic.
@@ -53,7 +54,7 @@ public:
const lib::Distribution& distribution,
const lib::ClusterState& new_state,
const char* storage_up_states,
- const std::unordered_set<uint16_t>& outdated_nodes,
+ const OutdatedNodes & outdated_nodes,
const std::vector<dbtransition::Entry>& entries) = 0;
virtual void update_read_snapshot_before_db_pruning() = 0;
diff --git a/storage/src/vespa/storage/distributor/top_level_distributor.cpp b/storage/src/vespa/storage/distributor/top_level_distributor.cpp
index 80c096135fa..f957af5362e 100644
--- a/storage/src/vespa/storage/distributor/top_level_distributor.cpp
+++ b/storage/src/vespa/storage/distributor/top_level_distributor.cpp
@@ -340,10 +340,10 @@ TopLevelDistributor::propagate_default_distribution_thread_unsafe(
}
}
-std::unordered_map<uint16_t, uint32_t>
+MinReplicaMap
TopLevelDistributor::getMinReplica() const
{
- std::unordered_map<uint16_t, uint32_t> result;
+ MinReplicaMap result;
for (const auto& stripe : _stripes) {
merge_min_replica_stats(result, stripe->getMinReplica());
}
@@ -360,15 +360,6 @@ TopLevelDistributor::getBucketSpacesStats() const
return result;
}
-SimpleMaintenanceScanner::PendingMaintenanceStats
-TopLevelDistributor::pending_maintenance_stats() const {
- SimpleMaintenanceScanner::PendingMaintenanceStats result;
- for (const auto& stripe : _stripes) {
- result.merge(stripe->pending_maintenance_stats());
- }
- return result;
-}
-
void
TopLevelDistributor::propagateInternalScanMetricsToExternal()
{
diff --git a/storage/src/vespa/storage/distributor/top_level_distributor.h b/storage/src/vespa/storage/distributor/top_level_distributor.h
index aa3a7b3655d..278a68f72c6 100644
--- a/storage/src/vespa/storage/distributor/top_level_distributor.h
+++ b/storage/src/vespa/storage/distributor/top_level_distributor.h
@@ -142,10 +142,9 @@ private:
/**
* Return a copy of the latest min replica data, see MinReplicaProvider.
*/
- std::unordered_map<uint16_t, uint32_t> getMinReplica() const override;
+ MinReplicaMap getMinReplica() const override;
PerNodeBucketSpacesStats getBucketSpacesStats() const override;
- SimpleMaintenanceScanner::PendingMaintenanceStats pending_maintenance_stats() const;
/**
* Atomically publish internal metrics to external ideal state metrics.
diff --git a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp
index 33022c65e24..808f19be3e5 100644
--- a/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp
+++ b/storage/src/vespa/storage/persistence/filestorage/filestorhandlerimpl.cpp
@@ -1040,8 +1040,8 @@ FileStorHandlerImpl::Stripe::getMessage(monitor_guard & guard, PriorityIdx & idx
return {std::move(locker), std::move(msg), std::move(throttle_token)};
} else {
std::shared_ptr<api::StorageReply> msgReply(makeQueueTimeoutReply(*msg));
- guard.unlock();
_cond->notify_all();
+ guard.unlock();
_messageSender.sendReply(msgReply);
return {};
}
@@ -1113,9 +1113,6 @@ FileStorHandlerImpl::Stripe::schedule_and_get_next_async_message(MessageEntry en
update_cached_queue_size(guard);
auto lockedMessage = get_next_async_message(guard);
if ( ! lockedMessage.msg) {
- if (guard.owns_lock()) {
- guard.unlock();
- }
_cond->notify_one();
}
return lockedMessage;
@@ -1185,7 +1182,6 @@ FileStorHandlerImpl::Stripe::release(const document::Bucket & bucket,
_lockedBuckets.erase(iter); // No more locks held
}
bool emptySharedLocks = entry._sharedLocks.empty();
- guard.unlock();
if (wasExclusive) {
_cond->notify_all();
} else if (emptySharedLocks) {
@@ -1201,7 +1197,6 @@ FileStorHandlerImpl::Stripe::decrease_active_sync_merges_counter() noexcept
const bool may_have_blocked_merge = (_active_merges == _owner._max_active_merges_per_stripe);
--_active_merges;
if (may_have_blocked_merge) {
- guard.unlock();
_cond->notify_all();
}
}
diff --git a/storage/src/vespa/storage/persistence/mergehandler.cpp b/storage/src/vespa/storage/persistence/mergehandler.cpp
index dcc3602c5e7..36d2393e148 100644
--- a/storage/src/vespa/storage/persistence/mergehandler.cpp
+++ b/storage/src/vespa/storage/persistence/mergehandler.cpp
@@ -15,6 +15,7 @@
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/util/isequencedtaskexecutor.h>
#include <algorithm>
+#include <sstream>
#include <vespa/log/log.h>
LOG_SETUP(".persistence.mergehandler");
diff --git a/storage/src/vespa/storage/persistence/persistenceutil.h b/storage/src/vespa/storage/persistence/persistenceutil.h
index c3fcb68ddc8..4bd0222bb9e 100644
--- a/storage/src/vespa/storage/persistence/persistenceutil.h
+++ b/storage/src/vespa/storage/persistence/persistenceutil.h
@@ -10,7 +10,6 @@
#include <vespa/persistence/spi/result.h>
#include <vespa/persistence/spi/context.h>
#include <vespa/vespalib/io/fileutil.h>
-#include <vespa/storage/storageutil/utils.h>
namespace storage::api {
class StorageMessage;
diff --git a/storage/src/vespa/storage/persistence/processallhandler.cpp b/storage/src/vespa/storage/persistence/processallhandler.cpp
index c22b08c5ca5..a6f6bd5d3fe 100644
--- a/storage/src/vespa/storage/persistence/processallhandler.cpp
+++ b/storage/src/vespa/storage/persistence/processallhandler.cpp
@@ -8,6 +8,7 @@
#include <vespa/persistence/spi/persistenceprovider.h>
#include <vespa/persistence/spi/docentry.h>
#include <vespa/vespalib/util/stringfmt.h>
+#include <sstream>
#include <vespa/log/log.h>
LOG_SETUP(".persistence.processall");
diff --git a/storage/src/vespa/storage/storageserver/communicationmanager.cpp b/storage/src/vespa/storage/storageserver/communicationmanager.cpp
index 362717f70e6..95ed9188422 100644
--- a/storage/src/vespa/storage/storageserver/communicationmanager.cpp
+++ b/storage/src/vespa/storage/storageserver/communicationmanager.cpp
@@ -272,7 +272,8 @@ CommunicationManager::~CommunicationManager()
LOG(debug, "Deleting link %s.", toString().c_str());
}
-void CommunicationManager::onClose()
+void
+CommunicationManager::onClose()
{
// Avoid getting config during shutdown
_configFetcher.reset();
@@ -328,7 +329,8 @@ CommunicationManager::configureMessageBusLimits(const CommunicationManagerConfig
: cfg.mbusContentNodeMaxPendingSize);
}
-void CommunicationManager::configure(std::unique_ptr<CommunicationManagerConfig> config)
+void
+CommunicationManager::configure(std::unique_ptr<CommunicationManagerConfig> config)
{
// Only allow dynamic (live) reconfiguration of message bus limits.
if (_mbus) {
@@ -494,8 +496,7 @@ CommunicationManager::sendMessageBusMessage(const std::shared_ptr<api::StorageCo
}
bool
-CommunicationManager::sendCommand(
- const std::shared_ptr<api::StorageCommand> & msg)
+CommunicationManager::sendCommand(const std::shared_ptr<api::StorageCommand> & msg)
{
if (!msg->getAddress()) {
LOGBP(warning, "Got command without address of type %s in CommunicationManager::sendCommand",
@@ -570,9 +571,8 @@ CommunicationManager::serializeNodeState(const api::GetNodeStateReply& gns, std:
}
void
-CommunicationManager::sendDirectRPCReply(
- RPCRequestWrapper& request,
- const std::shared_ptr<api::StorageReply>& reply)
+CommunicationManager::sendDirectRPCReply(RPCRequestWrapper& request,
+ const std::shared_ptr<api::StorageReply>& reply)
{
std::string_view requestName(request.getMethodName()); // TODO non-name based dispatch
// TODO rework this entire dispatch mechanism :D
@@ -616,9 +616,8 @@ CommunicationManager::sendDirectRPCReply(
}
void
-CommunicationManager::sendMessageBusReply(
- StorageTransportContext& context,
- const std::shared_ptr<api::StorageReply>& reply)
+CommunicationManager::sendMessageBusReply(StorageTransportContext& context,
+ const std::shared_ptr<api::StorageReply>& reply)
{
// Using messagebus for communication.
mbus::Reply::UP replyUP;
@@ -653,8 +652,7 @@ CommunicationManager::sendMessageBusReply(
}
bool
-CommunicationManager::sendReply(
- const std::shared_ptr<api::StorageReply>& reply)
+CommunicationManager::sendReply(const std::shared_ptr<api::StorageReply>& reply)
{
// Relaxed load since we're not doing any dependent reads that aren't
// already covered by some other form of explicit synchronization.
diff --git a/storage/src/vespa/storage/storageserver/distributornode.cpp b/storage/src/vespa/storage/storageserver/distributornode.cpp
index 45abd34e131..ab80381f5d4 100644
--- a/storage/src/vespa/storage/storageserver/distributornode.cpp
+++ b/storage/src/vespa/storage/storageserver/distributornode.cpp
@@ -39,7 +39,7 @@ DistributorNode::DistributorNode(
set_storage_chain_builder(std::move(storage_chain_builder));
}
try {
- initialize();
+ initialize(*this);
} catch (const vespalib::Exception & e) {
shutdownDistributor();
throw;
diff --git a/storage/src/vespa/storage/storageserver/distributornode.h b/storage/src/vespa/storage/storageserver/distributornode.h
index 7224abfa59c..5d61c86d48a 100644
--- a/storage/src/vespa/storage/storageserver/distributornode.h
+++ b/storage/src/vespa/storage/storageserver/distributornode.h
@@ -10,6 +10,7 @@
#include "distributornodecontext.h"
#include "storagenode.h"
+#include "vespa/vespalib/util/jsonstream.h"
#include <vespa/storage/common/distributorcomponent.h>
#include <vespa/storageframework/generic/thread/tickingthread.h>
#include <mutex>
@@ -22,7 +23,8 @@ class IStorageChainBuilder;
class DistributorNode
: public StorageNode,
- private UniqueTimeCalculator
+ private UniqueTimeCalculator,
+ private NodeStateReporter
{
framework::TickingThreadPool::UP _threadPool;
std::unique_ptr<distributor::DistributorStripePool> _stripe_pool;
@@ -58,8 +60,9 @@ public:
void handleConfigChange(vespa::config::content::core::StorVisitordispatcherConfig&);
private:
- void initializeNodeSpecific() override;
+ void report(vespalib::JsonStream &) const override { /* no-op */ }
void perform_post_chain_creation_init_steps() override { /* no-op */ }
+ void initializeNodeSpecific() override;
void createChain(IStorageChainBuilder &builder) override;
api::Timestamp generate_unique_timestamp() override;
diff --git a/storage/src/vespa/storage/storageserver/rpc/slime_cluster_state_bundle_codec.cpp b/storage/src/vespa/storage/storageserver/rpc/slime_cluster_state_bundle_codec.cpp
index 8c994991b9b..ea049493348 100644
--- a/storage/src/vespa/storage/storageserver/rpc/slime_cluster_state_bundle_codec.cpp
+++ b/storage/src/vespa/storage/storageserver/rpc/slime_cluster_state_bundle_codec.cpp
@@ -44,7 +44,7 @@ OutputBuf::~OutputBuf() = default;
vespalib::string serialize_state(const lib::ClusterState& state) {
vespalib::asciistream as;
- state.serialize(as, false);
+ state.serialize(as);
return as.str();
}
diff --git a/storage/src/vespa/storage/storageserver/servicelayernode.cpp b/storage/src/vespa/storage/storageserver/servicelayernode.cpp
index b76e2cca02a..65615bea2dd 100644
--- a/storage/src/vespa/storage/storageserver/servicelayernode.cpp
+++ b/storage/src/vespa/storage/storageserver/servicelayernode.cpp
@@ -39,6 +39,17 @@ ServiceLayerNode::ServiceLayerNode(const config::ConfigUri & configUri, ServiceL
{
}
+void
+ServiceLayerNode::report(vespalib::JsonStream &stream) const
+{
+ using namespace vespalib::jsonstream;
+ if (_bucket_manager) {
+ stream << "metrics" << Object() << "values" << Array();
+ _bucket_manager->report(stream);
+ stream << End() << End();
+ }
+}
+
void ServiceLayerNode::init()
{
assert( ! _init_has_been_called);
@@ -50,7 +61,7 @@ void ServiceLayerNode::init()
}
try{
- initialize();
+ initialize(*this);
} catch (spi::HandledException& e) {
requestShutdown("Failed to initialize: " + e.getMessage());
throw;
@@ -164,9 +175,9 @@ ServiceLayerNode::createChain(IStorageChainBuilder &builder)
auto bucket_manager = std::make_unique<BucketManager>(_configUri, _context.getComponentRegister());
_bucket_manager = bucket_manager.get();
builder.add(std::move(bucket_manager));
- builder.add(std::make_unique<VisitorManager>(_configUri, _context.getComponentRegister(), static_cast<VisitorMessageSessionFactory &>(*this), _externalVisitors));
- builder.add(std::make_unique<ModifiedBucketChecker>(
- _context.getComponentRegister(), _persistenceProvider, _configUri));
+ builder.add(std::make_unique<VisitorManager>(_configUri, _context.getComponentRegister(),
+ static_cast<VisitorMessageSessionFactory &>(*this), _externalVisitors));
+ builder.add(std::make_unique<ModifiedBucketChecker>(_context.getComponentRegister(), _persistenceProvider, _configUri));
auto state_manager = releaseStateManager();
auto filstor_manager = std::make_unique<FileStorManager>(_configUri, _persistenceProvider, _context.getComponentRegister(),
getDoneInitializeHandler(), state_manager->getHostInfo());
diff --git a/storage/src/vespa/storage/storageserver/servicelayernode.h b/storage/src/vespa/storage/storageserver/servicelayernode.h
index 4b719a0330b..e308c020856 100644
--- a/storage/src/vespa/storage/storageserver/servicelayernode.h
+++ b/storage/src/vespa/storage/storageserver/servicelayernode.h
@@ -11,8 +11,10 @@
#include "applicationgenerationfetcher.h"
#include "servicelayernodecontext.h"
#include "storagenode.h"
+#include "vespa/vespalib/util/jsonstream.h"
#include <vespa/storage/visiting/visitormessagesessionfactory.h>
#include <vespa/storage/common/visitorfactory.h>
+#include <vespa/storage/common/nodestateupdater.h>
namespace storage {
@@ -23,18 +25,19 @@ class FileStorManager;
class ServiceLayerNode
: public StorageNode,
- private VisitorMessageSessionFactory
+ private VisitorMessageSessionFactory,
+ private NodeStateReporter
{
- ServiceLayerNodeContext& _context;
- spi::PersistenceProvider& _persistenceProvider;
- VisitorFactory::Map _externalVisitors;
+ ServiceLayerNodeContext & _context;
+ spi::PersistenceProvider & _persistenceProvider;
+ VisitorFactory::Map _externalVisitors;
// FIXME: Should probably use the fetcher in StorageNode
- std::unique_ptr<config::ConfigFetcher> _configFetcher;
- BucketManager* _bucket_manager;
- FileStorManager* _fileStorManager;
- bool _init_has_been_called;
+ std::unique_ptr<config::ConfigFetcher> _configFetcher;
+ BucketManager * _bucket_manager;
+ FileStorManager * _fileStorManager;
+ bool _init_has_been_called;
public:
using UP = std::unique_ptr<ServiceLayerNode>;
@@ -55,6 +58,7 @@ public:
ResumeGuard pause() override;
private:
+ void report(vespalib::JsonStream &writer) const override;
void subscribeToConfigs() override;
void initializeNodeSpecific() override;
void perform_post_chain_creation_init_steps() override;
diff --git a/storage/src/vespa/storage/storageserver/statemanager.cpp b/storage/src/vespa/storage/storageserver/statemanager.cpp
index c228229e4ef..742f994cb2d 100644
--- a/storage/src/vespa/storage/storageserver/statemanager.cpp
+++ b/storage/src/vespa/storage/storageserver/statemanager.cpp
@@ -19,6 +19,7 @@
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/util/time.h>
#include <fstream>
+#include <ranges>
#include <vespa/log/log.h>
LOG_SETUP(".state.manager");
@@ -48,13 +49,13 @@ StateManager::StateManagerMetrics::~StateManagerMetrics() = default;
using lib::ClusterStateBundle;
StateManager::StateManager(StorageComponentRegister& compReg,
- metrics::MetricManager& metricManager,
std::unique_ptr<HostInfo> hostInfo,
+ const NodeStateReporter & reporter,
bool testMode)
: StorageLink("State manager"),
framework::HtmlStatusReporter("systemstate", "Node and system state"),
_component(compReg, "statemanager"),
- _metricManager(metricManager),
+ _nodeStateReporter(reporter),
_metrics(std::make_unique<StateManagerMetrics>()),
_stateLock(),
_stateCond(),
@@ -141,9 +142,9 @@ StateManager::reportHtmlStatus(std::ostream& out,
<< "<h1>System state history</h1>\n"
<< "<table border=\"1\"><tr>"
<< "<th>Received at time</th><th>State</th></tr>\n";
- for (auto it = _systemStateHistory.rbegin(); it != _systemStateHistory.rend(); it++) {
- out << "<tr><td>" << vespalib::to_string(vespalib::to_utc(it->first)) << "</td><td>"
- << xml_content_escaped(it->second->getBaselineClusterState()->toString()) << "</td></tr>\n";
+ for (const auto & it : std::ranges::reverse_view(_systemStateHistory)) {
+ out << "<tr><td>" << vespalib::to_string(vespalib::to_utc(it.first)) << "</td><td>"
+ << xml_content_escaped(it.second->getBaselineClusterState()->toString()) << "</td></tr>\n";
}
out << "</table>\n";
}
@@ -571,17 +572,8 @@ StateManager::getNodeInfo() const
vespalib::JsonStream stream(json, true);
stream << Object();
{ // Print metrics
- stream << "metrics";
try {
- metrics::MetricLockGuard lock(_metricManager.getMetricLock());
- auto periods(_metricManager.getSnapshotPeriods(lock));
- if (!periods.empty()) {
- const metrics::MetricSnapshot& snapshot(_metricManager.getMetricSnapshot(lock, periods[0]));
- metrics::JsonWriter metricJsonWriter(stream);
- _metricManager.visit(lock, snapshot, metricJsonWriter, "fleetcontroller");
- } else {
- stream << Object() << "error" << "no snapshot periods" << End();
- }
+ _nodeStateReporter.report(stream);
} catch (vespalib::Exception& e) {
stream << Object() << "error" << e.getMessage() << End();
}
diff --git a/storage/src/vespa/storage/storageserver/statemanager.h b/storage/src/vespa/storage/storageserver/statemanager.h
index 3b1291b1c3f..a69675adb1b 100644
--- a/storage/src/vespa/storage/storageserver/statemanager.h
+++ b/storage/src/vespa/storage/storageserver/statemanager.h
@@ -49,7 +49,7 @@ class StateManager : public NodeStateUpdater,
struct StateManagerMetrics;
StorageComponent _component;
- metrics::MetricManager& _metricManager;
+ const NodeStateReporter & _nodeStateReporter;
std::unique_ptr<StateManagerMetrics> _metrics;
mutable std::mutex _stateLock;
std::condition_variable _stateCond;
@@ -80,8 +80,8 @@ class StateManager : public NodeStateUpdater,
std::atomic<bool> _requested_almost_immediate_node_state_replies;
public:
- explicit StateManager(StorageComponentRegister&, metrics::MetricManager&,
- std::unique_ptr<HostInfo>, bool testMode = false);
+ explicit StateManager(StorageComponentRegister&, std::unique_ptr<HostInfo>,
+ const NodeStateReporter & reporter, bool testMode);
~StateManager() override;
void onOpen() override;
@@ -110,7 +110,6 @@ public:
private:
struct ExternalStateLock;
- friend struct ExternalStateLock;
friend struct StateManagerTest;
void notifyStateListeners();
diff --git a/storage/src/vespa/storage/storageserver/storagenode.cpp b/storage/src/vespa/storage/storageserver/storagenode.cpp
index f0f410d3076..99a879e19db 100644
--- a/storage/src/vespa/storage/storageserver/storagenode.cpp
+++ b/storage/src/vespa/storage/storageserver/storagenode.cpp
@@ -127,7 +127,7 @@ StorageNode::subscribeToConfigs()
}
void
-StorageNode::initialize()
+StorageNode::initialize(const NodeStateReporter & nodeStateReporter)
{
// Avoid racing with concurrent reconfigurations before we've set up the entire
// node component stack.
@@ -164,8 +164,8 @@ StorageNode::initialize()
// dead lock detector too, but not before open()
_stateManager = std::make_unique<StateManager>(
_context.getComponentRegister(),
- _context.getComponentRegister().getMetricManager(),
std::move(_hostInfo),
+ nodeStateReporter,
_singleThreadedDebugMode);
_context.getComponentRegister().setNodeStateUpdater(*_stateManager);
@@ -176,11 +176,11 @@ StorageNode::initialize()
initializeNodeSpecific();
- _statusMetrics = std::make_unique<StatusMetricConsumer>(
- _context.getComponentRegister(), _context.getComponentRegister().getMetricManager());
- _stateReporter = std::make_unique<StateReporter>(
- _context.getComponentRegister(), _context.getComponentRegister().getMetricManager(),
- _generationFetcher);
+ _statusMetrics = std::make_unique<StatusMetricConsumer>(_context.getComponentRegister(),
+ _context.getComponentRegister().getMetricManager());
+ _stateReporter = std::make_unique<StateReporter>(_context.getComponentRegister(),
+ _context.getComponentRegister().getMetricManager(),
+ _generationFetcher);
// Start deadlock detector
_deadLockDetector = std::make_unique<DeadLockDetector>(_context.getComponentRegister());
diff --git a/storage/src/vespa/storage/storageserver/storagenode.h b/storage/src/vespa/storage/storageserver/storagenode.h
index 344181adad5..5a521d7c66c 100644
--- a/storage/src/vespa/storage/storageserver/storagenode.h
+++ b/storage/src/vespa/storage/storageserver/storagenode.h
@@ -43,6 +43,7 @@ class StatusMetricConsumer;
class StatusWebServer;
class StorageComponent;
class StorageLink;
+class NodeStateReporter;
struct DeadLockDetector;
struct StorageMetricSet;
struct StorageNodeContext;
@@ -172,7 +173,7 @@ protected:
*/
std::unique_ptr<StateManager> releaseStateManager();
- void initialize();
+ void initialize(const NodeStateReporter & reporter);
virtual void subscribeToConfigs();
virtual void initializeNodeSpecific() = 0;
virtual void perform_post_chain_creation_init_steps() = 0;
diff --git a/storage/src/vespa/storage/storageutil/distributorstatecache.h b/storage/src/vespa/storage/storageutil/distributorstatecache.h
index 8c4d07e39bf..7073b141bc9 100644
--- a/storage/src/vespa/storage/storageutil/distributorstatecache.h
+++ b/storage/src/vespa/storage/storageutil/distributorstatecache.h
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
+
#include <vespa/vdslib/state/clusterstate.h>
#include <vespa/vdslib/distribution/distribution.h>
@@ -9,9 +10,7 @@ namespace storage {
class DistributorStateCache
{
public:
- DistributorStateCache(
- const lib::Distribution& distr,
- const lib::ClusterState& state)
+ DistributorStateCache(const lib::Distribution& distr, const lib::ClusterState& state) noexcept
: _distribution(distr),
_state(state),
_distrBitMask(0xffffffffffffffffull),
@@ -22,8 +21,7 @@ public:
_distrBitMask >>= (64 - state.getDistributionBitCount());
}
- uint16_t getOwner(const document::BucketId& bid,
- const char* upStates = "ui")
+ uint16_t getOwner(const document::BucketId& bid, const char* upStates = "ui")
{
uint64_t distributionBits = bid.getRawId() & _distrBitMask;
@@ -38,11 +36,11 @@ public:
return i;
}
- const lib::Distribution& getDistribution() const {
+ const lib::Distribution& getDistribution() const noexcept {
return _distribution;
}
- const lib::ClusterState& getClusterState() const {
+ const lib::ClusterState& getClusterState() const noexcept {
return _state;
}
diff --git a/storage/src/vespa/storage/storageutil/utils.h b/storage/src/vespa/storage/storageutil/utils.h
index debb7e71ace..3d3f5b85d71 100644
--- a/storage/src/vespa/storage/storageutil/utils.h
+++ b/storage/src/vespa/storage/storageutil/utils.h
@@ -1,7 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vector>
+#include <vespa/vespalib/util/arrayref.h>
#include <sstream>
namespace storage {
@@ -10,50 +10,55 @@ namespace storage {
* Creates a vector of the given type with one entry in it.
*/
template<class A>
-std::vector<A> toVector(A entry) {
+std::vector<A>
+toVector(A entry) {
std::vector<A> entries;
entries.push_back(entry);
return entries;
-};
+}
/**
* Creates a vector of the given type with two entries in it.
*/
template<class A>
-std::vector<A> toVector(A entry, A entry2) {
+std::vector<A>
+toVector(A entry, A entry2) {
std::vector<A> entries;
entries.push_back(entry);
entries.push_back(entry2);
return entries;
-};
+}
/**
* Creates a vector of the given type with two entries in it.
*/
template<class A>
-std::vector<A> toVector(A entry, A entry2, A entry3) {
+std::vector<A>
+toVector(A entry, A entry2, A entry3) {
std::vector<A> entries;
entries.push_back(entry);
entries.push_back(entry2);
entries.push_back(entry3);
return entries;
-};
+}
/**
* Creates a vector of the given type with two entries in it.
*/
template<class A>
-std::vector<A> toVector(A entry, A entry2, A entry3, A entry4) {
+std::vector<A>
+toVector(A entry, A entry2, A entry3, A entry4) {
std::vector<A> entries;
entries.push_back(entry);
entries.push_back(entry2);
entries.push_back(entry3);
entries.push_back(entry4);
return entries;
-};
+}
template<class A>
-std::string dumpVector(const std::vector<A>& vec) {
+std::string
+dumpVector(const std::vector<A>& vec) {
std::ostringstream ost;
for (uint32_t i = 0; i < vec.size(); ++i) {
if (!ost.str().empty()) {
@@ -65,27 +70,5 @@ std::string dumpVector(const std::vector<A>& vec) {
return ost.str();
}
-template<class A>
-bool hasItem(const std::vector<A>& vec, A entry) {
- for (uint32_t i = 0; i < vec.size(); ++i) {
- if (vec[i] == entry) {
- return true;
- }
- }
-
- return false;
-}
-
-template<typename T>
-struct ConfigReader : public T::Subscriber, public T
-{
- T& config; // Alter to inherit T to simplify but kept this for compatability
-
- ConfigReader(const std::string& configId) : config(*this) {
- T::subscribe(configId, *this);
- }
- void configure(const T& c) { config = c; }
-};
-
}
diff --git a/storage/src/vespa/storage/tools/getidealstate.cpp b/storage/src/vespa/storage/tools/getidealstate.cpp
index 8b120924aaa..9e80517f4f7 100644
--- a/storage/src/vespa/storage/tools/getidealstate.cpp
+++ b/storage/src/vespa/storage/tools/getidealstate.cpp
@@ -64,18 +64,13 @@ Options::Options(int argc, const char* const* argv)
Options::~Options() {}
-void processBucket(const lib::Distribution& distribution,
- const lib::ClusterState& clusterState,
- const std::string& upStates,
- const document::BucketId& bucket)
+void processBucket(const lib::Distribution& distribution, const lib::ClusterState& clusterState,
+ const std::string& upStates, const document::BucketId& bucket)
{
std::ostringstream ost;
- std::vector<uint16_t> storageNodes(distribution.getIdealStorageNodes(
- clusterState, bucket, upStates.c_str()));
- uint16_t distributorNode(distribution.getIdealDistributorNode(
- clusterState, bucket, upStates.c_str()));
- ost << bucket << " distributor: " << distributorNode
- << ", storage:";
+ std::vector<uint16_t> storageNodes(distribution.getIdealStorageNodes(clusterState, bucket, upStates.c_str()));
+ uint16_t distributorNode(distribution.getIdealDistributorNode(clusterState, bucket, upStates.c_str()));
+ ost << bucket << " distributor: " << distributorNode << ", storage:";
for (uint32_t i=0; i<storageNodes.size(); ++i) {
ost << " " << storageNodes[i];
}
diff --git a/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.cpp b/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.cpp
index 9b7c4919403..4cc32a2fc3d 100644
--- a/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.cpp
+++ b/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.cpp
@@ -4,17 +4,14 @@
namespace storage::framework {
-HtmlStatusReporter::HtmlStatusReporter(vespalib::stringref id,
- vespalib::stringref name)
+HtmlStatusReporter::HtmlStatusReporter(vespalib::stringref id, vespalib::stringref name)
: StatusReporter(id, name)
-{
-}
+{ }
HtmlStatusReporter::~HtmlStatusReporter() = default;
void
-HtmlStatusReporter::reportHtmlHeader(std::ostream& out,
- const HttpUrlPath& path) const
+HtmlStatusReporter::reportHtmlHeader(std::ostream& out, const HttpUrlPath& path) const
{
out << "<html>\n"
<< "<head>\n"
@@ -26,8 +23,7 @@ HtmlStatusReporter::reportHtmlHeader(std::ostream& out,
}
void
-HtmlStatusReporter::reportHtmlFooter(std::ostream& out,
- const HttpUrlPath&) const
+HtmlStatusReporter::reportHtmlFooter(std::ostream& out, const HttpUrlPath&) const
{
out << "</body>\n</html>\n";
}
@@ -39,8 +35,7 @@ HtmlStatusReporter::getReportContentType(const HttpUrlPath&) const
}
bool
-HtmlStatusReporter::reportStatus(std::ostream& out,
- const HttpUrlPath& path) const
+HtmlStatusReporter::reportStatus(std::ostream& out, const HttpUrlPath& path) const
{
if (!isValidStatusRequest()) return false;
reportHtmlHeader(out, path);
diff --git a/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.h b/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.h
index 4ffba20a3fa..ee3d65b0de3 100644
--- a/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.h
+++ b/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.h
@@ -29,8 +29,7 @@ struct HtmlStatusReporter : public StatusReporter {
* some code in the <head></head> part of the HTML, such as javascript
* functions.
*/
- virtual void reportHtmlHeaderAdditions(std::ostream&,
- const HttpUrlPath&) const {}
+ virtual void reportHtmlHeaderAdditions(std::ostream&, const HttpUrlPath&) const {}
/**
* Write a default HTML header. It writes the start of an HTML
diff --git a/storageserver/src/tests/testhelper.cpp b/storageserver/src/tests/testhelper.cpp
index f36253ce9d3..73a9938e4c5 100644
--- a/storageserver/src/tests/testhelper.cpp
+++ b/storageserver/src/tests/testhelper.cpp
@@ -33,7 +33,6 @@ vdstestlib::DirConfig getStandardConfig(bool storagenode) {
config = &dc.addConfig("messagebus");
config = &dc.addConfig("stor-prioritymapping");
config = &dc.addConfig("stor-bucketdbupdater");
- config = &dc.addConfig("stor-bucket-init");
config = &dc.addConfig("metricsmanager");
config->set("consumer[1]");
config->set("consumer[0].name", "\"status\"");
@@ -62,8 +61,6 @@ vdstestlib::DirConfig getStandardConfig(bool storagenode) {
config->set("revert_time_period", "2000000000");
config = &dc.addConfig("stor-bouncer");
config = &dc.addConfig("stor-integritychecker");
- config = &dc.addConfig("stor-bucketmover");
- config = &dc.addConfig("stor-messageforwarder");
config = &dc.addConfig("stor-server");
config->set("enable_dead_lock_detector", "false");
config->set("enable_dead_lock_detector_warnings", "false");
diff --git a/storageserver/src/vespa/storageserver/app/servicelayerprocess.cpp b/storageserver/src/vespa/storageserver/app/servicelayerprocess.cpp
index 7096249815f..8d19d7a356d 100644
--- a/storageserver/src/vespa/storageserver/app/servicelayerprocess.cpp
+++ b/storageserver/src/vespa/storageserver/app/servicelayerprocess.cpp
@@ -16,7 +16,8 @@ namespace storage {
namespace {
-ContentBucketDbOptions bucket_db_options_from_config(const config::ConfigUri& config_uri) {
+ContentBucketDbOptions
+bucket_db_options_from_config(const config::ConfigUri& config_uri) {
using vespa::config::content::core::StorServerConfig;
auto server_config = config::ConfigGetter<StorServerConfig>::getConfig(
config_uri.getConfigId(), config_uri.getContext());
diff --git a/streamingvisitors/src/tests/charbuffer/CMakeLists.txt b/streamingvisitors/src/tests/charbuffer/CMakeLists.txt
index 5d0c0068d37..0cefae9d4c1 100644
--- a/streamingvisitors/src/tests/charbuffer/CMakeLists.txt
+++ b/streamingvisitors/src/tests/charbuffer/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(vsm_charbuffer_test_app TEST
SOURCES
- charbuffer.cpp
+ charbuffer_test.cpp
DEPENDS
streamingvisitors
)
diff --git a/streamingvisitors/src/tests/charbuffer/charbuffer.cpp b/streamingvisitors/src/tests/charbuffer/charbuffer_test.cpp
index 736d35459cb..736d35459cb 100644
--- a/streamingvisitors/src/tests/charbuffer/charbuffer.cpp
+++ b/streamingvisitors/src/tests/charbuffer/charbuffer_test.cpp
diff --git a/streamingvisitors/src/tests/docsum/CMakeLists.txt b/streamingvisitors/src/tests/docsum/CMakeLists.txt
index 87c46409053..f8ff09d8063 100644
--- a/streamingvisitors/src/tests/docsum/CMakeLists.txt
+++ b/streamingvisitors/src/tests/docsum/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(vsm_docsum_test_app TEST
SOURCES
- docsum.cpp
+ docsum_test.cpp
DEPENDS
streamingvisitors
)
diff --git a/streamingvisitors/src/tests/docsum/docsum.cpp b/streamingvisitors/src/tests/docsum/docsum_test.cpp
index b99ee360bee..b99ee360bee 100644
--- a/streamingvisitors/src/tests/docsum/docsum.cpp
+++ b/streamingvisitors/src/tests/docsum/docsum_test.cpp
diff --git a/streamingvisitors/src/tests/document/CMakeLists.txt b/streamingvisitors/src/tests/document/CMakeLists.txt
index 5ea12dc5e2d..279226e5d90 100644
--- a/streamingvisitors/src/tests/document/CMakeLists.txt
+++ b/streamingvisitors/src/tests/document/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(vsm_document_test_app TEST
SOURCES
- document.cpp
+ document_test.cpp
DEPENDS
streamingvisitors
)
diff --git a/streamingvisitors/src/tests/document/document.cpp b/streamingvisitors/src/tests/document/document_test.cpp
index acebd9ed4a4..acebd9ed4a4 100644
--- a/streamingvisitors/src/tests/document/document.cpp
+++ b/streamingvisitors/src/tests/document/document_test.cpp
diff --git a/streamingvisitors/src/tests/textutil/CMakeLists.txt b/streamingvisitors/src/tests/textutil/CMakeLists.txt
index 59817d01137..56bf2ede996 100644
--- a/streamingvisitors/src/tests/textutil/CMakeLists.txt
+++ b/streamingvisitors/src/tests/textutil/CMakeLists.txt
@@ -1,7 +1,7 @@
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_executable(vsm_textutil_test_app TEST
SOURCES
- textutil.cpp
+ textutil_test.cpp
DEPENDS
streamingvisitors
)
diff --git a/streamingvisitors/src/tests/textutil/textutil.cpp b/streamingvisitors/src/tests/textutil/textutil_test.cpp
index aeff4600781..aeff4600781 100644
--- a/streamingvisitors/src/tests/textutil/textutil.cpp
+++ b/streamingvisitors/src/tests/textutil/textutil_test.cpp
diff --git a/tenant-cd-api/pom.xml b/tenant-cd-api/pom.xml
index aa795dfcb97..33bafb2909a 100644
--- a/tenant-cd-api/pom.xml
+++ b/tenant-cd-api/pom.xml
@@ -60,7 +60,7 @@
<dependency>
<groupId>org.opentest4j</groupId>
<artifactId>opentest4j</artifactId>
- <version>1.2.0</version>
+ <version>${opentest4j.vespa.version}</version>
</dependency>
<dependency>
<groupId>org.apiguardian</groupId>
diff --git a/testutil/pom.xml b/testutil/pom.xml
index f3bc9cb2212..7f57b6defee 100644
--- a/testutil/pom.xml
+++ b/testutil/pom.xml
@@ -13,7 +13,7 @@
<packaging>jar</packaging>
<version>8-SNAPSHOT</version>
<name>${project.artifactId}</name>
- <description>Library of useful Hamcrest matchers.</description>
+ <description>Library of useful test utilities.</description>
<dependencies>
<dependency>
<groupId>com.google.guava</groupId>
@@ -34,12 +34,7 @@
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-core</artifactId>
- <scope>compile</scope>
- </dependency>
- <dependency>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>hamcrest</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
diff --git a/testutil/src/main/java/com/yahoo/test/LinePatternMatcher.java b/testutil/src/main/java/com/yahoo/test/LinePatternMatcher.java
index 855a0bc851e..c870c0b29b4 100644
--- a/testutil/src/main/java/com/yahoo/test/LinePatternMatcher.java
+++ b/testutil/src/main/java/com/yahoo/test/LinePatternMatcher.java
@@ -3,7 +3,6 @@ package com.yahoo.test;
import org.hamcrest.BaseMatcher;
import org.hamcrest.Description;
-import org.hamcrest.Factory;
import org.hamcrest.Matcher;
/**
@@ -35,8 +34,7 @@ public class LinePatternMatcher extends BaseMatcher<String> {
return false;
}
- @Factory
- public static <T> Matcher<String> containsLineWithPattern(String pattern) {
+ public static Matcher<String> containsLineWithPattern(String pattern) {
return new LinePatternMatcher(pattern);
}
diff --git a/testutil/src/main/java/com/yahoo/vespa/test/file/UnixUidGidAttributeProvider.java b/testutil/src/main/java/com/yahoo/vespa/test/file/UnixUidGidAttributeProvider.java
index 903706a503d..25f66bf5cb0 100644
--- a/testutil/src/main/java/com/yahoo/vespa/test/file/UnixUidGidAttributeProvider.java
+++ b/testutil/src/main/java/com/yahoo/vespa/test/file/UnixUidGidAttributeProvider.java
@@ -22,7 +22,6 @@ import com.google.common.jimfs.File;
import com.google.common.jimfs.FileLookup;
import java.nio.file.attribute.FileAttributeView;
-import java.nio.file.attribute.FileTime;
import java.nio.file.attribute.GroupPrincipal;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.UserPrincipal;
@@ -79,41 +78,39 @@ public class UnixUidGidAttributeProvider extends AttributeProvider {
@SuppressWarnings("unchecked")
@Override
public Object get(File file, String attribute) {
- switch (attribute) {
- case "uid":
- UserPrincipal user = (UserPrincipal) file.getAttribute("owner", "owner");
- return getUniqueId(user);
- case "gid":
- GroupPrincipal group = (GroupPrincipal) file.getAttribute("posix", "group");
- return getUniqueId(group);
- case "mode":
- Set<PosixFilePermission> permissions =
- (Set<PosixFilePermission>) file.getAttribute("posix", "permissions");
- return toMode(permissions);
- case "ctime":
- return FileTime.fromMillis(file.getCreationTime());
- case "rdev":
- return 0L;
- case "dev":
- return 1L;
- case "ino":
- return file.id();
- case "nlink":
- return file.links();
- default:
- return null;
- }
+ return switch (attribute) {
+ case "uid" -> {
+ var user = (UserPrincipal) file.getAttribute("owner", "owner");
+ yield getUniqueId(user);
+ }
+ case "gid" -> {
+ var group = (GroupPrincipal) file.getAttribute("posix", "group");
+ yield getUniqueId(group);
+ }
+ case "mode" -> {
+ var permissions = (Set<PosixFilePermission>) file.getAttribute("posix", "permissions");
+ yield toMode(permissions);
+ }
+ case "ctime" -> file.getCreationTime();
+ case "rdev" -> 0L;
+ case "dev" -> 1L;
+ case "ino" -> file.id();
+ case "nlink" -> file.links();
+ default -> null;
+ };
}
@Override
public void set(File file, String view, String attribute, Object value, boolean create) {
switch (attribute) {
- case "uid":
+ case "uid" -> {
file.setAttribute("owner", "owner", new BasicUserPrincipal(String.valueOf(value)));
return;
- case "gid":
+ }
+ case "gid" -> {
file.setAttribute("posix", "group", new BasicGroupPrincipal(String.valueOf(value)));
return;
+ }
}
throw unsettable(view, attribute, create);
}
@@ -124,35 +121,16 @@ public class UnixUidGidAttributeProvider extends AttributeProvider {
for (PosixFilePermission permission : permissions) {
checkNotNull(permission);
switch (permission) {
- case OWNER_READ:
- result |= 0400;
- break;
- case OWNER_WRITE:
- result |= 0200;
- break;
- case OWNER_EXECUTE:
- result |= 0100;
- break;
- case GROUP_READ:
- result |= 0040;
- break;
- case GROUP_WRITE:
- result |= 0020;
- break;
- case GROUP_EXECUTE:
- result |= 0010;
- break;
- case OTHERS_READ:
- result |= 0004;
- break;
- case OTHERS_WRITE:
- result |= 0002;
- break;
- case OTHERS_EXECUTE:
- result |= 0001;
- break;
- default:
- throw new AssertionError(); // no other possible values
+ case OWNER_READ -> result |= 0400;
+ case OWNER_WRITE -> result |= 0200;
+ case OWNER_EXECUTE -> result |= 0100;
+ case GROUP_READ -> result |= 0040;
+ case GROUP_WRITE -> result |= 0020;
+ case GROUP_EXECUTE -> result |= 0010;
+ case OTHERS_READ -> result |= 0004;
+ case OTHERS_WRITE -> result |= 0002;
+ case OTHERS_EXECUTE -> result |= 0001;
+ default -> throw new AssertionError(); // no other possible values
}
}
return result;
diff --git a/vdslib/src/tests/distribution/CMakeLists.txt b/vdslib/src/tests/distribution/CMakeLists.txt
index c4ae8b0291c..3f3be1e1cad 100644
--- a/vdslib/src/tests/distribution/CMakeLists.txt
+++ b/vdslib/src/tests/distribution/CMakeLists.txt
@@ -3,7 +3,6 @@ vespa_add_library(vdslib_testdistribution
SOURCES
distributiontest.cpp
grouptest.cpp
- idealnodecalculatorimpltest.cpp
DEPENDS
vdslib
GTest::GTest
diff --git a/vdslib/src/tests/distribution/distributiontest.cpp b/vdslib/src/tests/distribution/distributiontest.cpp
index b5c756aece9..ce07711a069 100644
--- a/vdslib/src/tests/distribution/distributiontest.cpp
+++ b/vdslib/src/tests/distribution/distributiontest.cpp
@@ -5,7 +5,6 @@
#include <vespa/config/subscription/configuri.h>
#include <vespa/fastos/file.h>
#include <vespa/vdslib/distribution/distribution.h>
-#include <vespa/vdslib/distribution/idealnodecalculator.h>
#include <vespa/vdslib/state/clusterstate.h>
#include <vespa/vdslib/state/random.h>
#include <vespa/vespalib/data/slime/slime.h>
@@ -53,9 +52,7 @@ TEST(DistributionTest, test_verify_java_distributions)
long maxBucket = 1;
long mask = 0;
- for (uint32_t distributionBits = 0; distributionBits <= 32;
- ++distributionBits)
- {
+ for (uint32_t distributionBits = 0; distributionBits <= 32; ++distributionBits) {
state.setDistributionBitCount(distributionBits);
RandomGen randomizer(distributionBits);
for (uint32_t bucketIndex = 0; bucketIndex < 64; ++bucketIndex) {
@@ -66,11 +63,8 @@ TEST(DistributionTest, test_verify_java_distributions)
bucketId = randomizer.nextUint64();
}
document::BucketId bucket(distributionBits, bucketId);
- for (uint32_t redundancy = 1;
- redundancy <= distr.getRedundancy(); ++redundancy)
- {
- int distributorIndex = distr.getIdealDistributorNode(
- state, bucket, "uim");
+ for (uint32_t redundancy = 1; redundancy <= distr.getRedundancy(); ++redundancy) {
+ int distributorIndex = distr.getIdealDistributorNode(state, bucket, "uim");
of << distributionBits << " " << (bucketId & mask)
<< " " << redundancy << " " << distributorIndex << "\n";
}
@@ -89,6 +83,51 @@ TEST(DistributionTest, test_verify_java_distributions)
namespace {
+/**
+* A list of ideal nodes, sorted in preferred order. Wraps a vector to hide
+* unneeded details, and make it easily printable.
+*/
+class IdealNodeList : public document::Printable {
+public:
+ IdealNodeList() noexcept;
+ ~IdealNodeList();
+
+ void push_back(const Node& node) {
+ _idealNodes.push_back(node);
+ }
+
+ const Node& operator[](uint32_t i) const noexcept { return _idealNodes[i]; }
+ uint32_t size() const noexcept { return _idealNodes.size(); }
+ bool contains(const Node& n) const noexcept {
+ return indexOf(n) != 0xffff;
+ }
+ uint16_t indexOf(const Node& n) const noexcept {
+ for (uint16_t i=0; i<_idealNodes.size(); ++i) {
+ if (n == _idealNodes[i]) return i;
+ }
+ return 0xffff;
+ }
+
+ void print(std::ostream& out, bool, const std::string &) const override;
+private:
+ std::vector<Node> _idealNodes;
+};
+
+IdealNodeList::IdealNodeList() noexcept = default;
+IdealNodeList::~IdealNodeList() = default;
+
+void
+IdealNodeList::print(std::ostream& out, bool , const std::string &) const
+{
+ out << "[";
+ for (uint32_t i=0; i<_idealNodes.size(); ++i) {
+ if (i != 0) out << ", ";
+ out << _idealNodes[i];
+ }
+ out << "]";
+}
+
+
struct ExpectedResult {
ExpectedResult() { }
ExpectedResult(const ExpectedResult &) = default;
@@ -102,22 +141,16 @@ struct ExpectedResult {
};
void
-verifyJavaDistribution(const vespalib::string& name,
- const ClusterState& state,
- const Distribution& distribution,
- const NodeType& nodeType,
- uint16_t redundancy,
- uint16_t nodeCount,
- vespalib::stringref upStates,
- const std::vector<ExpectedResult> results)
+verifyJavaDistribution(const vespalib::string& name, const ClusterState& state, const Distribution& distribution,
+ const NodeType& nodeType, uint16_t redundancy, uint16_t nodeCount,
+ vespalib::stringref upStates, const std::vector<ExpectedResult> results)
{
(void) nodeCount;
for (uint32_t i=0, n=results.size(); i<n; ++i) {
std::string testId = name + " " + results[i].bucket.toString();
try {
std::vector<uint16_t> nvect;
- distribution.getIdealNodes(nodeType, state, results[i].bucket,
- nvect, upStates.data(), redundancy);
+ distribution.getIdealNodes(nodeType, state, results[i].bucket, nvect, upStates.data(), redundancy);
IdealNodeList nodes;
for (uint32_t j=0, m=nvect.size(); j<m; ++j) {
nodes.push_back(Node(nodeType, nvect[j]));
@@ -155,8 +188,7 @@ auto readFile(const std::string & filename) {
TEST(DistributionTest, test_verify_java_distributions_2)
{
- vespalib::DirectoryList files(
- vespalib::listDirectory("distribution/testdata"));
+ vespalib::DirectoryList files(vespalib::listDirectory("distribution/testdata"));
for (uint32_t i=0, n=files.size(); i<n; ++i) {
size_t pos = files[i].find(".java.results");
if (pos == vespalib::string::npos || pos + 13 != files[i].size()) {
@@ -189,8 +221,7 @@ TEST(DistributionTest, test_verify_java_distributions_2)
ClusterState cs(c["cluster-state"].asString().make_string());
std::string distConfig(c["distribution"].asString().make_string());
Distribution d(distConfig);
- const NodeType& nt(
- NodeType::get(c["node-type"].asString().make_string()));
+ const NodeType& nt(NodeType::get(c["node-type"].asString().make_string()));
uint32_t redundancy(c["redundancy"].asLong());
uint32_t nodeCount(c["node-count"].asLong());
vespalib::string upStates(c["up-states"].asString().make_string());
@@ -209,8 +240,7 @@ TEST(DistributionTest, test_verify_java_distributions_2)
}
results.push_back(result);
}
- verifyJavaDistribution(name, cs, d, nt, redundancy, nodeCount,
- upStates, results);
+ verifyJavaDistribution(name, cs, d, nt, redundancy, nodeCount, upStates, results);
//std::cerr << name << ": Verified " << results.size() << " tests.\n";
}
}
@@ -223,8 +253,7 @@ TEST(DistributionTest, test_unchanged_distribution)
std::ifstream in("distribution/testdata/41-distributordistribution");
for (unsigned i = 0; i < 64_Ki; i++) {
- uint16_t node = distr.getIdealDistributorNode(
- state, document::BucketId(16, i), "u");
+ uint16_t node = distr.getIdealDistributorNode(state, document::BucketId(16, i), "u");
char buf[100];
in.getline(buf, 100);
@@ -272,9 +301,7 @@ struct MyTest {
document::BucketId bucket(16, i);
std::vector<uint16_t> nodes;
ClusterState clusterState(_state);
- _distribution->getIdealNodes(
- *_nodeType, clusterState, bucket, nodes,
- _upStates, _redundancy);
+ _distribution->getIdealNodes(*_nodeType, clusterState, bucket, nodes, _upStates, _redundancy);
for (uint32_t j=0; j<nodes.size(); ++j) {
++result[nodes[j]];
}
@@ -293,8 +320,7 @@ MyTest::MyTest()
{ }
MyTest::~MyTest() = default;
-std::vector<uint16_t> createNodeCountList(const std::string& source,
- std::vector<uint16_t>& vals) {
+std::vector<uint16_t> createNodeCountList(const std::string& source, std::vector<uint16_t>& vals) {
std::vector<uint16_t> result(vals.size(), 0);
vespalib::StringTokenizer st(source, " ");
for (uint32_t i=0; i<st.size(); ++i) {
@@ -375,15 +401,9 @@ TEST(DistributionTest, testHighSplitBit)
document::BucketId bid1 = document::BucketId(bits, base);
document::BucketId bid2 = document::BucketId(bits, base);
- std::vector<uint16_t> nodes1 =
- distr.getIdealStorageNodes(state,
- bid1,
- "u");
+ std::vector<uint16_t> nodes1 = distr.getIdealStorageNodes(state, bid1, "u");
- std::vector<uint16_t> nodes2 =
- distr.getIdealStorageNodes(state,
- bid2,
- "u");
+ std::vector<uint16_t> nodes2 = distr.getIdealStorageNodes(state, bid2, "u");
ost1 << bid1 << " vs. " << bid2 << ": ";
ost2 << bid1 << " vs. " << bid2 << ": ";
@@ -424,16 +444,14 @@ TEST(DistributionTest, test_distribution)
s1 << "storage:" << n << std::endl;
ClusterState systemState(s1.str());
- Distribution distr(
- Distribution::getDefaultDistributionConfig(3, n));
+ Distribution distr(Distribution::getDefaultDistributionConfig(3, n));
std::vector<std::pair<uint64_t, std::vector<uint16_t> > > _distribution(b);
std::vector<int> _nodeCount(n, 0);
for (int i = 0; i < b; i++) {
_distribution[i].first = i;
- _distribution[i].second = distr.getIdealStorageNodes(
- systemState, document::BucketId(26, i));
+ _distribution[i].second = distr.getIdealStorageNodes(systemState, document::BucketId(26, i));
sort(_distribution[i].second.begin(), _distribution[i].second.end());
auto unique_nodes = std::distance(_distribution[i].second.begin(), unique(_distribution[i].second.begin(), _distribution[i].second.end()));
_distribution[i].second.resize(unique_nodes);
@@ -469,9 +487,7 @@ TEST(DistributionTest, test_move)
{
ClusterState systemState("storage:3");
document::BucketId bucket(16, 0x8b4f67ae);
-
Distribution distr(Distribution::getDefaultDistributionConfig(2, 3));
-
res = distr.getIdealStorageNodes(systemState, bucket);
EXPECT_EQ(size_t(2), res.size());
}
@@ -479,11 +495,8 @@ TEST(DistributionTest, test_move)
std::vector<uint16_t> res2;
{
ClusterState systemState("storage:4");
-
Distribution distr(Distribution::getDefaultDistributionConfig(2, 4));
-
document::BucketId bucket(16, 0x8b4f67ae);
-
res2 = distr.getIdealStorageNodes(systemState, bucket);
EXPECT_EQ(size_t(2), res2.size());
}
@@ -506,8 +519,7 @@ TEST(DistributionTest, test_move_constraints)
std::vector<std::vector<uint16_t> > initBuckets(10000);
for (unsigned i = 0; i < initBuckets.size(); i++) {
- initBuckets[i] = distr.getIdealStorageNodes(
- clusterState, document::BucketId(16, i));
+ initBuckets[i] = distr.getIdealStorageNodes(clusterState, document::BucketId(16, i));
sort(initBuckets[i].begin(), initBuckets[i].end());
}
@@ -517,8 +529,7 @@ TEST(DistributionTest, test_move_constraints)
ClusterState systemState("storage:11 .10.s:d");
for (unsigned i = 0; i < addedDownBuckets.size(); i++) {
- addedDownBuckets[i] = distr.getIdealStorageNodes(
- systemState, document::BucketId(16, i));
+ addedDownBuckets[i] = distr.getIdealStorageNodes(systemState, document::BucketId(16, i));
sort(addedDownBuckets[i].begin(), addedDownBuckets[i].end());
}
for (unsigned i = 0; i < initBuckets.size(); i++) {
@@ -541,15 +552,14 @@ TEST(DistributionTest, test_move_constraints)
ClusterState systemState("storage:10 .0.s:d");
for (unsigned i = 0; i < removed0Buckets.size(); i++) {
- removed0Buckets[i] = distr.getIdealStorageNodes(
- systemState, document::BucketId(16, i));
+ removed0Buckets[i] = distr.getIdealStorageNodes(systemState, document::BucketId(16, i));
sort(removed0Buckets[i].begin(), removed0Buckets[i].end());
}
for (unsigned i = 0; i < initBuckets.size(); i++) {
std::vector<uint16_t> movedAway;
set_difference(initBuckets[i].begin(), initBuckets[i].end(),
- removed0Buckets[i].begin(), removed0Buckets[i].end(),
- back_inserter(movedAway));
+ removed0Buckets[i].begin(), removed0Buckets[i].end(),
+ back_inserter(movedAway));
if (movedAway.size() > 0) {
if (movedAway[0] != 0) {
std::cerr << i << ": ";
@@ -572,15 +582,14 @@ TEST(DistributionTest, test_move_constraints)
ClusterState systemState("storage:11");
for (unsigned i = 0; i < added10Buckets.size(); i++) {
- added10Buckets[i] = distr.getIdealStorageNodes(
- systemState, document::BucketId(16, i));
+ added10Buckets[i] = distr.getIdealStorageNodes(systemState, document::BucketId(16, i));
sort(added10Buckets[i].begin(), added10Buckets[i].end());
}
for (unsigned i = 0; i < initBuckets.size(); i++) {
std::vector<uint16_t> movedInto;
std::set_difference(added10Buckets[i].begin(), added10Buckets[i].end(),
- initBuckets[i].begin(), initBuckets[i].end(),
- std::inserter(movedInto, movedInto.begin()));
+ initBuckets[i].begin(), initBuckets[i].end(),
+ std::inserter(movedInto, movedInto.begin()));
if (movedInto.size() > 0) {
ASSERT_EQ((size_t)1, movedInto.size());
EXPECT_EQ((uint16_t)10, movedInto[0]);
@@ -601,11 +610,9 @@ TEST(DistributionTest, test_distribution_bits)
for (unsigned i = 0; i < 100; i++) {
int val = rand();
- uint32_t index = distr.getIdealDistributorNode(
- state1, document::BucketId(19, val), "u");
+ uint32_t index = distr.getIdealDistributorNode(state1, document::BucketId(19, val), "u");
ost1 << index << " ";
- index = distr.getIdealDistributorNode(
- state2, document::BucketId(19, val), "u");
+ index = distr.getIdealDistributorNode(state2, document::BucketId(19, val), "u");
ost2 << index << " ";
}
@@ -620,10 +627,8 @@ TEST(DistributionTest, test_redundancy_hierarchical_distribution)
Distribution distr2(Distribution::getDefaultDistributionConfig(2, 10));
for (unsigned i = 0; i < 100; i++) {
- uint16_t d1 = distr1.getIdealDistributorNode(
- state, document::BucketId(16, i), "u");
- uint16_t d2 = distr2.getIdealDistributorNode(
- state, document::BucketId(16, i), "u");
+ uint16_t d1 = distr1.getIdealDistributorNode(state, document::BucketId(16, i), "u");
+ uint16_t d2 = distr2.getIdealDistributorNode(state, document::BucketId(16, i), "u");
EXPECT_EQ(d1, d2);
}
}
@@ -653,20 +658,17 @@ TEST(DistributionTest, test_hierarchical_distribution)
ClusterState state("distributor:6 storage:6");
for (uint32_t i = 0; i < 3; ++i) {
- EXPECT_EQ(
- vespalib::string("rack0"),
- distr.getNodeGraph().getGroupForNode(i)->getName());
+ EXPECT_EQ(vespalib::string("rack0"),
+ distr.getNodeGraph().getGroupForNode(i)->getName());
}
for (uint32_t i = 3; i < 6; ++i) {
- EXPECT_EQ(
- vespalib::string("rack1"),
- distr.getNodeGraph().getGroupForNode(i)->getName());
+ EXPECT_EQ(vespalib::string("rack1"),
+ distr.getNodeGraph().getGroupForNode(i)->getName());
}
std::vector<int> mainNode(6);
for (uint32_t i=0; i<100; ++i) {
- std::vector<uint16_t> nodes = distr.getIdealStorageNodes(
- state, document::BucketId(16, i), "u");
+ std::vector<uint16_t> nodes = distr.getIdealStorageNodes(state, document::BucketId(16, i), "u");
ASSERT_EQ((size_t) 4, nodes.size());
EXPECT_LT(nodes[0], mainNode.size());
++mainNode[nodes[0]];
@@ -710,8 +712,7 @@ TEST(DistributionTest, test_group_capacity)
int group0count = 0;
int group1count = 0;
for (uint32_t i = 0; i < 1000; i++) {
- std::vector<uint16_t> nodes = distr.getIdealStorageNodes(
- state, document::BucketId(16, i), "u");
+ std::vector<uint16_t> nodes = distr.getIdealStorageNodes(state, document::BucketId(16, i), "u");
if (nodes[0] == 0 || nodes[0] == 1 || nodes[0] == 2) {
group0count++;
}
@@ -794,14 +795,12 @@ TEST(DistributionTest, test_hierarchical_no_redistribution)
EXPECT_EQ(numBuckets, v.size());
v.clear();
- state.setNodeState(Node(NodeType::STORAGE, 0),
- NodeState(NodeType::STORAGE, State::DOWN));
+ state.setNodeState(Node(NodeType::STORAGE, 0),NodeState(NodeType::STORAGE, State::DOWN));
std::vector< std::vector<uint16_t> > distr2(4);
for (size_t i = 0; i < numBuckets; i++) {
- nodes = distribution.getIdealStorageNodes(
- state, document::BucketId(16, i), "u");
+ nodes = distribution.getIdealStorageNodes(state, document::BucketId(16, i), "u");
for (uint16_t j=0; j<nodes.size(); ++j) {
ASSERT_TRUE(0 != nodes[j]);
distr2[nodes[j]].push_back(i);
@@ -1010,7 +1009,7 @@ group[2].nodes[1].retired false
auto nodes_of = [&](uint32_t bucket){
std::vector<uint16_t> actual;
- distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, bucket), actual);
+ distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, bucket), actual, "uim");
return actual;
};
@@ -1071,9 +1070,13 @@ TEST(DistributionTest, DISABLED_benchmark_ideal_state_for_many_groups) {
std::vector<uint16_t> actual;
uint32_t bucket = 0;
auto min_time = vespalib::BenchmarkTimer::benchmark([&]{
- distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, (bucket++ & 0xffffU)), actual);
+ distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, (bucket++ & 0xffffU)), actual, "uim");
}, 5.0);
fprintf(stderr, "%.10f seconds\n", min_time);
}
+TEST(DistributionTest, control_size_of_IndexList) {
+ EXPECT_EQ(24u, sizeof(Distribution::IndexList));
+}
+
}
diff --git a/vdslib/src/tests/distribution/idealnodecalculatorimpltest.cpp b/vdslib/src/tests/distribution/idealnodecalculatorimpltest.cpp
deleted file mode 100644
index 4159491097c..00000000000
--- a/vdslib/src/tests/distribution/idealnodecalculatorimpltest.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/config-stor-distribution.h>
-#include <vespa/vdslib/distribution/idealnodecalculatorimpl.h>
-#include <vespa/vdslib/distribution/distribution.h>
-#include <vespa/vdslib/state/clusterstate.h>
-#include <vespa/vespalib/gtest/gtest.h>
-
-namespace storage::lib {
-
-/**
- * Class is just a wrapper for distribution, so little needs to be tested. Just
- * that:
- *
- * - get ideal nodes calls gets propagated correctly.
- * - Changes in distribution/cluster state is picked up.
- */
-
-TEST(IdealNodeCalculatorImplTest, test_normal_usage)
-{
- ClusterState state("storage:10");
- Distribution distr(Distribution::getDefaultDistributionConfig(3, 10));
- IdealNodeCalculatorImpl impl;
- IdealNodeCalculatorConfigurable& configurable(impl);
- IdealNodeCalculator& calc(impl);
- configurable.setDistribution(distr);
- configurable.setClusterState(state);
-
- std::string expected("[storage.8, storage.9, storage.6]");
- EXPECT_EQ(
- expected,
- calc.getIdealStorageNodes(document::BucketId(16, 5)).toString());
-}
-
-}
diff --git a/vdslib/src/tests/state/clusterstatetest.cpp b/vdslib/src/tests/state/clusterstatetest.cpp
index a08ec007f55..0b278177453 100644
--- a/vdslib/src/tests/state/clusterstatetest.cpp
+++ b/vdslib/src/tests/state/clusterstatetest.cpp
@@ -13,10 +13,10 @@ using ::testing::ContainsRegex;
namespace storage::lib {
-#define VERIFY3(source, result, type, typestr) { \
+#define VERIFY3(source, result, typestr) { \
vespalib::asciistream ost; \
try { \
- state->serialize(ost, type); \
+ state->serialize(ost); \
} catch (std::exception& e) { \
FAIL() << ("Failed to serialize system state " \
+ state->toString(true) + " in " + std::string(typestr) \
@@ -26,24 +26,18 @@ namespace storage::lib {
vespalib::string(typestr) + " \"" + ost.str() + "\"") << state->toString(true); \
}
-#define VERIFY2(serialized, result, testOld, testNew) { \
+#define VERIFY2(serialized, result) { \
std::unique_ptr<ClusterState> state; \
try { \
state.reset(new ClusterState(serialized)); \
} catch (std::exception& e) { \
- FAIL() << ("Failed to parse '" + std::string(serialized) \
- + "': " + e.what()); \
+ FAIL() << ("Failed to parse '" + std::string(serialized) + "': " + e.what()); \
} \
- if (testOld) VERIFY3(serialized, result, true, "Old") \
- if (testNew) VERIFY3(serialized, result, false, "New") \
+ VERIFY3(serialized, result, "New") \
}
-#define VERIFYSAMEOLD(serialized) VERIFY2(serialized, serialized, true, false)
-#define VERIFYOLD(serialized, result) VERIFY2(serialized, result, true, false)
-#define VERIFYSAMENEW(serialized) VERIFY2(serialized, serialized, false, true)
-#define VERIFYNEW(serialized, result) VERIFY2(serialized, result, false, true)
-#define VERIFYSAME(serialized) VERIFY2(serialized, serialized, true, true)
-#define VERIFY(serialized, result) VERIFY2(serialized, result, true, true)
+#define VERIFYSAMENEW(serialized) VERIFY2(serialized, serialized)
+#define VERIFYNEW(serialized, result) VERIFY2(serialized, result)
#define VERIFY_FAIL(serialized, error) { \
try{ \
diff --git a/vdslib/src/vespa/vdslib/distribution/CMakeLists.txt b/vdslib/src/vespa/vdslib/distribution/CMakeLists.txt
index 0d9342291e8..58ec94eec9c 100644
--- a/vdslib/src/vespa/vdslib/distribution/CMakeLists.txt
+++ b/vdslib/src/vespa/vdslib/distribution/CMakeLists.txt
@@ -4,7 +4,6 @@ vespa_add_library(vdslib_distribution OBJECT
distribution.cpp
distribution_config_util.cpp
group.cpp
- idealnodecalculatorimpl.cpp
redundancygroupdistribution.cpp
DEPENDS
)
diff --git a/vdslib/src/vespa/vdslib/distribution/distribution.cpp b/vdslib/src/vespa/vdslib/distribution/distribution.cpp
index e9113d7dd23..ee022b1779a 100644
--- a/vdslib/src/vespa/vdslib/distribution/distribution.cpp
+++ b/vdslib/src/vespa/vdslib/distribution/distribution.cpp
@@ -9,8 +9,8 @@
#include <vespa/config/print/asciiconfigwriter.h>
#include <vespa/config/print/asciiconfigreader.hpp>
#include <vespa/vespalib/util/exceptions.h>
+#include <vespa/vespalib/stllike/hash_map.hpp>
#include <vespa/config-stor-distribution.h>
-#include <algorithm>
#include <cmath>
#include <cassert>
@@ -20,16 +20,19 @@ LOG_SETUP(".vdslib.distribution");
namespace storage::lib {
namespace {
- std::vector<uint32_t> getDistributionBitMasks() {
- std::vector<uint32_t> masks;
- masks.resize(32 + 1);
- uint32_t mask = 0;
- for (uint32_t i=0; i<=32; ++i) {
- masks[i] = mask;
- mask = (mask << 1) | 1;
- }
- return masks;
+
+std::vector<uint32_t>
+getDistributionBitMasks() {
+ std::vector<uint32_t> masks;
+ masks.resize(32 + 1);
+ uint32_t mask = 0;
+ for (uint32_t i=0; i<=32; ++i) {
+ masks[i] = mask;
+ mask = (mask << 1) | 1;
}
+ return masks;
+}
+
}
VESPA_IMPLEMENT_EXCEPTION(NoDistributorsAvailableException, vespalib::Exception);
@@ -65,8 +68,8 @@ Distribution::Distribution(const Distribution& d)
configure(*reader.read());
}
-Distribution::ConfigWrapper::ConfigWrapper(std::unique_ptr<DistributionConfig> cfg) :
- _cfg(std::move(cfg))
+Distribution::ConfigWrapper::ConfigWrapper(std::unique_ptr<DistributionConfig> cfg) noexcept
+ : _cfg(std::move(cfg))
{ }
Distribution::ConfigWrapper::~ConfigWrapper() = default;
@@ -150,8 +153,7 @@ Distribution::configure(const vespa::config::content::StorDistributionConfig& co
if ( ! nodeGraph) {
throw vespalib::IllegalStateException(
"Got config that didn't seem to specify even a root group. Must "
- "have a root group at minimum:\n"
- + _serialized, VESPA_STRLOC);
+ "have a root group at minimum:\n" + _serialized, VESPA_STRLOC);
}
nodeGraph->calculateDistributionHashValues();
_nodeGraph = std::move(nodeGraph);
@@ -161,14 +163,11 @@ Distribution::configure(const vespa::config::content::StorDistributionConfig& co
_ensurePrimaryPersisted = config.ensurePrimaryPersisted;
_readyCopies = config.readyCopies;
_activePerGroup = config.activePerLeafGroup;
- _distributorAutoOwnershipTransferOnWholeGroupDown
- = config.distributorAutoOwnershipTransferOnWholeGroupDown;
+ _distributorAutoOwnershipTransferOnWholeGroupDown = config.distributorAutoOwnershipTransferOnWholeGroupDown;
}
uint32_t
-Distribution::getGroupSeed(
- const document::BucketId& bucket, const ClusterState& clusterState,
- const Group& group) const
+Distribution::getGroupSeed(const document::BucketId& bucket, const ClusterState& clusterState, const Group& group) const
{
uint32_t seed(static_cast<uint32_t>(bucket.getRawId())
& _distributionBitMasks[clusterState.getDistributionBitCount()]);
@@ -177,8 +176,7 @@ Distribution::getGroupSeed(
}
uint32_t
-Distribution::getDistributorSeed(
- const document::BucketId& bucket, const ClusterState& state) const
+Distribution::getDistributorSeed(const document::BucketId& bucket, const ClusterState& state) const
{
uint32_t seed(static_cast<uint32_t>(bucket.getRawId())
& _distributionBitMasks[state.getDistributionBitCount()]);
@@ -186,8 +184,7 @@ Distribution::getDistributorSeed(
}
uint32_t
-Distribution::getStorageSeed(
- const document::BucketId& bucket, const ClusterState& state) const
+Distribution::getStorageSeed(const document::BucketId& bucket, const ClusterState& state) const
{
uint32_t seed(static_cast<uint32_t>(bucket.getRawId())
& _distributionBitMasks[state.getDistributionBitCount()]);
@@ -262,11 +259,8 @@ namespace {
}
void
-Distribution::getIdealGroups(const document::BucketId& bucket,
- const ClusterState& clusterState,
- const Group& parent,
- uint16_t redundancy,
- std::vector<ResultGroup>& results) const
+Distribution::getIdealGroups(const document::BucketId& bucket, const ClusterState& clusterState, const Group& parent,
+ uint16_t redundancy, std::vector<ResultGroup>& results) const
{
if (parent.isLeafGroup()) {
results.emplace_back(parent, redundancy);
@@ -300,15 +294,12 @@ Distribution::getIdealGroups(const document::BucketId& bucket,
// This should never happen. Config should verify that each group
// has enough groups beneath them.
assert(group._group != nullptr);
- getIdealGroups(bucket, clusterState, *group._group,
- redundancyArray[i], results);
+ getIdealGroups(bucket, clusterState, *group._group, redundancyArray[i], results);
}
}
const Group*
-Distribution::getIdealDistributorGroup(const document::BucketId& bucket,
- const ClusterState& clusterState,
- const Group& parent) const
+Distribution::getIdealDistributorGroup(const document::BucketId& bucket, const ClusterState& clusterState, const Group& parent) const
{
if (parent.isLeafGroup()) {
return &parent;
@@ -357,12 +348,8 @@ Distribution::allDistributorsDown(const Group& g, const ClusterState& cs)
}
void
-Distribution::getIdealNodes(const NodeType& nodeType,
- const ClusterState& clusterState,
- const document::BucketId& bucket,
- std::vector<uint16_t>& resultNodes,
- const char* upStates,
- uint16_t redundancy) const
+Distribution::getIdealNodes(const NodeType& nodeType, const ClusterState& clusterState, const document::BucketId& bucket,
+ std::vector<uint16_t>& resultNodes, const char* upStates, uint16_t redundancy) const
{
if (redundancy == DEFAULT_REDUNDANCY) redundancy = _redundancy;
resultNodes.clear();
@@ -388,8 +375,7 @@ Distribution::getIdealNodes(const NodeType& nodeType,
const Group* group(getIdealDistributorGroup(bucket, clusterState, *_nodeGraph));
if (group == nullptr) {
vespalib::asciistream ss;
- ss << "There is no legal distributor target in state with version "
- << clusterState.getVersion();
+ ss << "There is no legal distributor target in state with version " << clusterState.getVersion();
throw NoDistributorsAvailableException(ss.str(), VESPA_STRLOC);
}
_groupDistribution.push_back(ResultGroup(*group, 1));
@@ -459,9 +445,7 @@ Distribution::getDefaultDistributionConfig(uint16_t redundancy, uint16_t nodeCou
}
std::vector<uint16_t>
-Distribution::getIdealStorageNodes(
- const ClusterState& state, const document::BucketId& bucket,
- const char* upStates) const
+Distribution::getIdealStorageNodes(const ClusterState& state, const document::BucketId& bucket, const char* upStates) const
{
std::vector<uint16_t> nodes;
getIdealNodes(NodeType::STORAGE, state, bucket, nodes, upStates);
@@ -469,41 +453,42 @@ Distribution::getIdealStorageNodes(
}
uint16_t
-Distribution::getIdealDistributorNode(
- const ClusterState& state,
- const document::BucketId& bucket,
- const char* upStates) const
+Distribution::getIdealDistributorNode(const ClusterState& state, const document::BucketId& bucket, const char* upStates) const
{
std::vector<uint16_t> nodes;
getIdealNodes(NodeType::DISTRIBUTOR, state, bucket, nodes, upStates);
assert(nodes.size() <= 1);
if (nodes.empty()) {
vespalib::asciistream ss;
- ss << "There is no legal distributor target in state with version "
- << state.getVersion();
+ ss << "There is no legal distributor target in state with version " << state.getVersion();
throw NoDistributorsAvailableException(ss.str(), VESPA_STRLOC);
}
return nodes[0];
}
std::vector<Distribution::IndexList>
-Distribution::splitNodesIntoLeafGroups(IndexList nodeList) const
+Distribution::splitNodesIntoLeafGroups(vespalib::ConstArrayRef<uint16_t> nodeList) const
{
- std::vector<IndexList> result;
- std::map<uint16_t, IndexList> nodes;
+ vespalib::hash_map<uint16_t, IndexList> nodes(nodeList.size());
for (auto node : nodeList) {
const Group* group((node < _node2Group.size()) ? _node2Group[node] : nullptr);
if (group == nullptr) {
- LOGBP(warning, "Node %u is not assigned to a group. "
- "Should not happen?", node);
+ LOGBP(warning, "Node %u is not assigned to a group. Should not happen?", node);
} else {
assert(group->isLeafGroup());
nodes[group->getIndex()].push_back(node);
}
}
+ std::vector<uint16_t> sorted;
+ sorted.reserve(nodes.size());
+ for (const auto & entry : nodes) {
+ sorted.push_back(entry.first);
+ }
+ std::sort(sorted.begin(), sorted.end());
+ std::vector<IndexList> result;
result.reserve(nodes.size());
- for (auto & node : nodes) {
- result.emplace_back(std::move(node.second));
+ for (uint16_t groupId : sorted) {
+ result.emplace_back(std::move(nodes.find(groupId)->second));
}
return result;
}
diff --git a/vdslib/src/vespa/vdslib/distribution/distribution.h b/vdslib/src/vespa/vdslib/distribution/distribution.h
index 355b87884c1..8cf93b01630 100644
--- a/vdslib/src/vespa/vdslib/distribution/distribution.h
+++ b/vdslib/src/vespa/vdslib/distribution/distribution.h
@@ -12,6 +12,7 @@
#include <vespa/document/bucket/bucketid.h>
#include <vespa/vdslib/state/nodetype.h>
#include <vespa/vespalib/util/exception.h>
+#include <vespa/vespalib/util/small_vector.h>
namespace vespa::config::content::internal {
class InternalStorDistributionType;
@@ -38,9 +39,9 @@ private:
uint16_t _redundancy;
uint16_t _initialRedundancy;
uint16_t _readyCopies;
- bool _activePerGroup;
- bool _ensurePrimaryPersisted;
- bool _distributorAutoOwnershipTransferOnWholeGroupDown;
+ bool _activePerGroup;
+ bool _ensurePrimaryPersisted;
+ bool _distributorAutoOwnershipTransferOnWholeGroupDown;
vespalib::string _serialized;
struct ResultGroup {
@@ -50,7 +51,7 @@ private:
ResultGroup(const Group& group, uint16_t redundancy) noexcept
: _group(&group), _redundancy(redundancy) {}
- bool operator<(const ResultGroup& other) const {
+ bool operator<(const ResultGroup& other) const noexcept {
return _group->getIndex() < other._group->getIndex();
}
};
@@ -59,32 +60,23 @@ private:
* Get seed to use for ideal state algorithm's random number generator
* to decide which hierarchical group we should pick.
*/
- uint32_t getGroupSeed(
- const document::BucketId&, const ClusterState&,
- const Group&) const;
+ uint32_t getGroupSeed(const document::BucketId&, const ClusterState&, const Group&) const;
/**
* Get seed to use for ideal state algorithm's random number generator
* to decide which distributor node this bucket should be mapped to.
*/
- uint32_t getDistributorSeed(
- const document::BucketId&, const ClusterState&) const;
+ uint32_t getDistributorSeed(const document::BucketId&, const ClusterState&) const;
/**
* Get seed to use for ideal state algorithm's random number generator
* to decide which storage node this bucket should be mapped to.
*/
- uint32_t getStorageSeed(
- const document::BucketId&, const ClusterState&) const;
+ uint32_t getStorageSeed(const document::BucketId&, const ClusterState&) const;
- void getIdealGroups(const document::BucketId& bucket,
- const ClusterState& clusterState,
- const Group& parent,
- uint16_t redundancy,
- std::vector<ResultGroup>& results) const;
+ void getIdealGroups(const document::BucketId& bucket, const ClusterState& clusterState, const Group& parent,
+ uint16_t redundancy, std::vector<ResultGroup>& results) const;
- const Group* getIdealDistributorGroup(const document::BucketId& bucket,
- const ClusterState& clusterState,
- const Group& parent) const;
+ const Group* getIdealDistributorGroup(const document::BucketId& bucket, const ClusterState& clusterState, const Group& parent) const;
/**
* Since distribution object may be used often in ideal state calculations
@@ -97,9 +89,9 @@ private:
public:
class ConfigWrapper {
public:
- ConfigWrapper(ConfigWrapper && rhs) = default;
- ConfigWrapper & operator = (ConfigWrapper && rhs) = default;
- ConfigWrapper(std::unique_ptr<DistributionConfig> cfg);
+ ConfigWrapper(ConfigWrapper && rhs) noexcept = default;
+ ConfigWrapper & operator = (ConfigWrapper && rhs) noexcept = default;
+ ConfigWrapper(std::unique_ptr<DistributionConfig> cfg) noexcept;
~ConfigWrapper();
const DistributionConfig & get() const { return *_cfg; }
private:
@@ -114,33 +106,26 @@ public:
Distribution& operator=(const Distribution&) = delete;
- const vespalib::string& serialize() const { return _serialized; }
+ const vespalib::string& serialize() const noexcept { return _serialized; }
- const Group& getNodeGraph() const { return *_nodeGraph; }
- uint16_t getRedundancy() const { return _redundancy; }
- uint16_t getInitialRedundancy() const { return _initialRedundancy; }
- uint16_t getReadyCopies() const { return _readyCopies; }
- bool ensurePrimaryPersisted() const { return _ensurePrimaryPersisted; }
- bool distributorAutoOwnershipTransferOnWholeGroupDown() const
- { return _distributorAutoOwnershipTransferOnWholeGroupDown; }
- bool activePerGroup() const { return _activePerGroup; }
+ const Group& getNodeGraph() const noexcept { return *_nodeGraph; }
+ uint16_t getRedundancy() const noexcept { return _redundancy; }
+ uint16_t getInitialRedundancy() const noexcept { return _initialRedundancy; }
+ uint16_t getReadyCopies() const noexcept { return _readyCopies; }
+ bool ensurePrimaryPersisted() const noexcept { return _ensurePrimaryPersisted; }
+ bool distributorAutoOwnershipTransferOnWholeGroupDown() const noexcept { return _distributorAutoOwnershipTransferOnWholeGroupDown; }
+ bool activePerGroup() const noexcept { return _activePerGroup; }
- bool operator==(const Distribution& o) const
- { return (_serialized == o._serialized); }
- bool operator!=(const Distribution& o) const
- { return (_serialized != o._serialized); }
+ bool operator==(const Distribution& o) const noexcept { return (_serialized == o._serialized); }
+ bool operator!=(const Distribution& o) const noexcept { return (_serialized != o._serialized); }
void print(std::ostream& out, bool, const std::string&) const override;
/** Simplified wrapper for getIdealNodes() */
- std::vector<uint16_t> getIdealStorageNodes(
- const ClusterState&, const document::BucketId&,
- const char* upStates = "uim") const;
+ std::vector<uint16_t> getIdealStorageNodes(const ClusterState&, const document::BucketId&, const char* upStates = "uim") const;
/** Simplified wrapper for getIdealNodes() */
- uint16_t getIdealDistributorNode(
- const ClusterState&, const document::BucketId&,
- const char* upStates = "uim") const;
+ uint16_t getIdealDistributorNode(const ClusterState&, const document::BucketId&, const char* upStates = "uim") const;
/**
* @throws TooFewBucketBitsInUseException If distribution bit count is
@@ -149,25 +134,22 @@ public:
* in any upstate.
*/
enum { DEFAULT_REDUNDANCY = 0xffff };
- void getIdealNodes(const NodeType&, const ClusterState&,
- const document::BucketId&, std::vector<uint16_t>& nodes,
- const char* upStates = "uim",
- uint16_t redundancy = DEFAULT_REDUNDANCY) const;
+ void getIdealNodes(const NodeType&, const ClusterState&, const document::BucketId&, std::vector<uint16_t>& nodes,
+ const char* upStates, uint16_t redundancy = DEFAULT_REDUNDANCY) const;
/**
* Unit tests can use this function to get raw config for this class to use
* with a really simple setup with no hierarchical grouping. This function
* should not be used by any production code.
*/
- static ConfigWrapper getDefaultDistributionConfig(
- uint16_t redundancy = 2, uint16_t nodeCount = 10);
+ static ConfigWrapper getDefaultDistributionConfig(uint16_t redundancy = 2, uint16_t nodeCount = 10);
/**
* Utility function used by distributor to split copies into groups to
* handle active per group feature.
*/
- using IndexList = std::vector<uint16_t>;
- std::vector<IndexList> splitNodesIntoLeafGroups(IndexList nodes) const;
+ using IndexList = vespalib::SmallVector<uint16_t, 4>;
+ std::vector<IndexList> splitNodesIntoLeafGroups(vespalib::ConstArrayRef<uint16_t> nodes) const;
static bool allDistributorsDown(const Group&, const ClusterState&);
};
diff --git a/vdslib/src/vespa/vdslib/distribution/group.cpp b/vdslib/src/vespa/vdslib/distribution/group.cpp
index 537b4635e75..254a20e1052 100644
--- a/vdslib/src/vespa/vdslib/distribution/group.cpp
+++ b/vdslib/src/vespa/vdslib/distribution/group.cpp
@@ -11,7 +11,7 @@
namespace storage::lib {
-Group::Group(uint16_t index, vespalib::stringref name)
+Group::Group(uint16_t index, vespalib::stringref name) noexcept
: _name(name),
_index(index),
_distributionHash(0),
@@ -46,7 +46,7 @@ Group::~Group()
}
bool
-Group::operator==(const Group& other) const
+Group::operator==(const Group& other) const noexcept
{
return (_name == other._name &&
_index == other._index &&
diff --git a/vdslib/src/vespa/vdslib/distribution/group.h b/vdslib/src/vespa/vdslib/distribution/group.h
index 5767f55d20a..3f468bee995 100644
--- a/vdslib/src/vespa/vdslib/distribution/group.h
+++ b/vdslib/src/vespa/vdslib/distribution/group.h
@@ -49,28 +49,25 @@ private:
public:
// Create leaf node
- Group(uint16_t index, vespalib::stringref name);
+ Group(uint16_t index, vespalib::stringref name) noexcept;
// Create branch node
Group(uint16_t index, vespalib::stringref name,
const Distribution&, uint16_t redundancy);
virtual ~Group();
- bool isLeafGroup() const { return _nodes.size() > 0; }
- bool operator==(const Group& other) const;
+ bool isLeafGroup() const noexcept { return ! _nodes.empty(); }
+ bool operator==(const Group& other) const noexcept;
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
- vespalib::Double getCapacity() const { return _capacity; }
- const vespalib::string & getName() const { return _name; }
- uint16_t getIndex() const { return _index; }
+ vespalib::Double getCapacity() const noexcept { return _capacity; }
+ const vespalib::string & getName() const noexcept { return _name; }
+ uint16_t getIndex() const noexcept { return _index; }
std::map<uint16_t, Group*>& getSubGroups() { return _subGroups; }
- const std::map<uint16_t, Group*>& getSubGroups() const
- { return _subGroups; }
- const std::vector<uint16_t>& getNodes() const { return _nodes; };
- const Distribution& getDistributionSpec() const
- { return _distributionSpec; }
- const Distribution& getDistribution(uint16_t redundancy) const
- { return _preCalculated[redundancy]; }
- uint32_t getDistributionHash() const { return _distributionHash; }
+ const std::map<uint16_t, Group*>& getSubGroups() const noexcept { return _subGroups; }
+ const std::vector<uint16_t>& getNodes() const noexcept { return _nodes; };
+ const Distribution& getDistributionSpec() const noexcept { return _distributionSpec; }
+ const Distribution& getDistribution(uint16_t redundancy) const noexcept { return _preCalculated[redundancy]; }
+ uint32_t getDistributionHash() const noexcept { return _distributionHash; }
void addSubGroup(Group::UP);
void setCapacity(vespalib::Double capacity);
diff --git a/vdslib/src/vespa/vdslib/distribution/idealnodecalculator.h b/vdslib/src/vespa/vdslib/distribution/idealnodecalculator.h
deleted file mode 100644
index bc42df1b49c..00000000000
--- a/vdslib/src/vespa/vdslib/distribution/idealnodecalculator.h
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-/**
- * An interface to implement for a calculator calcuting ideal state. It should
- * be easy to wrap this calculator in a cache. Thus options that seldom change,
- * are taken in as set parameters, such that existing cache can be invalidated.
- */
-#pragma once
-
-#include <vespa/document/bucket/bucketid.h>
-#include <vespa/document/util/printable.h>
-#include <vespa/vdslib/state/node.h>
-#include <vector>
-#include <memory>
-
-namespace storage::lib {
-
-class Distribution;
-class ClusterState;
-
-/**
- * A list of ideal nodes, sorted in preferred order. Wraps a vector to hide
- * unneeded details, and make it easily printable.
- */
-class IdealNodeList : public document::Printable {
- std::vector<Node> _idealNodes;
-
-public:
- IdealNodeList();
- ~IdealNodeList();
-
- void push_back(const Node& node) {
- _idealNodes.push_back(node);
- }
-
- const Node& operator[](uint32_t i) const { return _idealNodes[i]; }
- uint32_t size() const { return _idealNodes.size(); }
- bool contains(const Node& n) const {
- for (uint32_t i=0; i<_idealNodes.size(); ++i) {
- if (n == _idealNodes[i]) return true;
- }
- return false;
- }
- uint16_t indexOf(const Node& n) const {
- for (uint16_t i=0; i<_idealNodes.size(); ++i) {
- if (n == _idealNodes[i]) return i;
- }
- return 0xffff;
- }
-
- void print(std::ostream& out, bool, const std::string &) const override;
-};
-
-/**
- * Simple interface to use for those who needs to calculate ideal nodes.
- */
-class IdealNodeCalculator {
-public:
- using SP = std::shared_ptr<IdealNodeCalculator>;
- enum UpStates {
- UpInit,
- UpInitMaintenance,
- UP_STATE_COUNT
- };
-
- virtual ~IdealNodeCalculator() = default;
-
- virtual IdealNodeList getIdealNodes(const NodeType&,
- const document::BucketId&,
- UpStates upStates = UpInit) const = 0;
-
- // Wrapper functions to make prettier call if nodetype is given.
- IdealNodeList getIdealDistributorNodes(const document::BucketId& bucket,
- UpStates upStates = UpInit) const
- { return getIdealNodes(NodeType::DISTRIBUTOR, bucket, upStates); }
- IdealNodeList getIdealStorageNodes(const document::BucketId& bucket,
- UpStates upStates = UpInit) const
- { return getIdealNodes(NodeType::STORAGE, bucket, upStates); }
-};
-
-
-/**
- * More complex interface that provides a way to alter needed settings not
- * provided in the function call itself.
- */
-class IdealNodeCalculatorConfigurable : public IdealNodeCalculator
-{
-public:
- using SP = std::shared_ptr<IdealNodeCalculatorConfigurable>;
-
- virtual void setDistribution(const Distribution&) = 0;
- virtual void setClusterState(const ClusterState&) = 0;
-};
-
-}
diff --git a/vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.cpp b/vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.cpp
deleted file mode 100644
index da34ec4526a..00000000000
--- a/vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.cpp
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "idealnodecalculatorimpl.h"
-#include "distribution.h"
-#include <vespa/vespalib/util/exceptions.h>
-#include <ostream>
-#include <cassert>
-
-namespace storage::lib {
-
-IdealNodeList::IdealNodeList() = default;
-IdealNodeList::~IdealNodeList() = default;
-
-void
-IdealNodeList::print(std::ostream& out, bool , const std::string &) const
-{
- out << "[";
- for (uint32_t i=0; i<_idealNodes.size(); ++i) {
- if (i != 0) out << ", ";
- out << _idealNodes[i];
- }
- out << "]";
-}
-
-IdealNodeCalculatorImpl::IdealNodeCalculatorImpl()
- : _distribution(0),
- _clusterState(0)
-{
- initUpStateMapping();
-}
-
-IdealNodeCalculatorImpl::~IdealNodeCalculatorImpl() = default;
-
-void
-IdealNodeCalculatorImpl::setDistribution(const Distribution& d) {
- _distribution = &d;
-}
-void
-IdealNodeCalculatorImpl::setClusterState(const ClusterState& cs) {
- _clusterState = &cs;
-}
-
-IdealNodeList
-IdealNodeCalculatorImpl::getIdealNodes(const NodeType& nodeType,
- const document::BucketId& bucket,
- UpStates upStates) const
-{
- assert(_clusterState != 0);
- assert(_distribution != 0);
- std::vector<uint16_t> nodes;
- _distribution->getIdealNodes(nodeType, *_clusterState, bucket, nodes, _upStates[upStates]);
- IdealNodeList list;
- for (uint32_t i=0; i<nodes.size(); ++i) {
- list.push_back(Node(nodeType, nodes[i]));
- }
- return list;
-}
-
-void
-IdealNodeCalculatorImpl::initUpStateMapping() {
- _upStates.clear();
- _upStates.resize(UP_STATE_COUNT);
- _upStates[UpInit] = "ui";
- _upStates[UpInitMaintenance] = "uim";
- for (uint32_t i=0; i<_upStates.size(); ++i) {
- if (_upStates[i] == 0) throw vespalib::IllegalStateException(
- "Failed to initialize up state. Code likely not updated "
- "after another upstate was added.", VESPA_STRLOC);
- }
-}
-
-}
diff --git a/vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.h b/vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.h
deleted file mode 100644
index 9b36f1094fd..00000000000
--- a/vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-/**
- * A cache for an ideal nodes implementation. Making it cheap for localized
- * access, regardless of real implementation.
- */
-#pragma once
-
-#include "idealnodecalculator.h"
-
-namespace storage::lib {
-
-class IdealNodeCalculatorImpl : public IdealNodeCalculatorConfigurable {
- std::vector<const char*> _upStates;
- const Distribution* _distribution;
- const ClusterState* _clusterState;
-
-public:
- IdealNodeCalculatorImpl();
- ~IdealNodeCalculatorImpl();
-
- void setDistribution(const Distribution& d) override;
- void setClusterState(const ClusterState& cs) override;
-
- IdealNodeList getIdealNodes(const NodeType& nodeType,
- const document::BucketId& bucket,
- UpStates upStates) const override;
-private:
- void initUpStateMapping();
-};
-
-}
diff --git a/vdslib/src/vespa/vdslib/state/clusterstate.cpp b/vdslib/src/vespa/vdslib/state/clusterstate.cpp
index e9159eef631..f4314a6624b 100644
--- a/vdslib/src/vespa/vdslib/state/clusterstate.cpp
+++ b/vdslib/src/vespa/vdslib/state/clusterstate.cpp
@@ -7,7 +7,10 @@
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/stllike/asciistream.h>
+#include <vespa/vespalib/stllike/hash_map.hpp>
+#include <vespa/vespalib/stllike/hash_map_equal.hpp>
#include <sstream>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".vdslib.state.cluster");
@@ -24,9 +27,9 @@ namespace storage::lib {
ClusterState::ClusterState()
: Printable(),
_version(0),
+ _nodeCount(),
_clusterState(&State::DOWN),
_nodeStates(),
- _nodeCount(2),
_description(),
_distributionBits(16)
{ }
@@ -41,14 +44,10 @@ struct NodeData {
NodeData() : empty(true), node(NodeType::STORAGE, 0), ost() {}
- void addTo(std::map<Node, NodeState>& nodeStates,
- std::vector<uint16_t>& nodeCount)
- {
+ void addTo(ClusterState::NodeMap & nodeStates, ClusterState::NodeCounts & nodeCount) {
if (!empty) {
NodeState state(ost.str(), &node.getType());
- if (state != NodeState(node.getType(), State::UP)
- || state.getDescription().size() > 0)
- {
+ if ((state != NodeState(node.getType(), State::UP)) || (state.getDescription().size() > 0)) {
nodeStates.insert(std::make_pair(node, state));
}
if (nodeCount[node.getType()] <= node.getIndex()) {
@@ -63,9 +62,9 @@ struct NodeData {
ClusterState::ClusterState(const vespalib::string& serialized)
: Printable(),
_version(0),
+ _nodeCount(),
_clusterState(&State::UP),
_nodeStates(),
- _nodeCount(2),
_description(),
_distributionBits(16)
{
@@ -74,13 +73,13 @@ ClusterState::ClusterState(const vespalib::string& serialized)
NodeData nodeData;
vespalib::string lastAbsolutePath;
- for (vespalib::StringTokenizer::Iterator it = st.begin(); it != st.end(); ++it) {
- vespalib::string::size_type index = it->find(':');
+ for (const auto & token : st) {
+ vespalib::string::size_type index = token.find(':');
if (index == vespalib::string::npos) {
- throw IllegalArgumentException("Token " + *it + " does not contain ':': " + serialized, VESPA_STRLOC);
+ throw IllegalArgumentException("Token " + token + " does not contain ':': " + serialized, VESPA_STRLOC);
}
- vespalib::string key = it->substr(0, index);
- vespalib::stringref value = it->substr(index + 1);
+ vespalib::string key = token.substr(0, index);
+ vespalib::stringref value = token.substr(index + 1);
if (key.size() > 0 && key[0] == '.') {
if (lastAbsolutePath == "") {
throw IllegalArgumentException("The first path in system state string needs to be absolute", VESPA_STRLOC);
@@ -111,7 +110,9 @@ ClusterState::parse(vespalib::stringref key, vespalib::stringref value, NodeData
break;
case 'b':
if (key == "bits") {
- _distributionBits = atoi(value.data());
+ uint32_t numBits = atoi(value.data());
+ assert(numBits <= 64);
+ _distributionBits = numBits;
return true;
}
break;
@@ -138,7 +139,7 @@ ClusterState::parse(vespalib::stringref key, vespalib::stringref value, NodeData
bool
ClusterState::parseSorD(vespalib::stringref key, vespalib::stringref value, NodeData & nodeData) {
- const NodeType* nodeType(0);
+ const NodeType* nodeType = nullptr;
vespalib::string::size_type dot = key.find('.');
vespalib::stringref type(dot == vespalib::string::npos
? key : key.substr(0, dot));
@@ -147,10 +148,9 @@ ClusterState::parseSorD(vespalib::stringref key, vespalib::stringref value, Node
} else if (type == "distributor") {
nodeType = &NodeType::DISTRIBUTOR;
}
- if (nodeType == 0) return false;
+ if (nodeType == nullptr) return false;
if (dot == vespalib::string::npos) { // Entry that set node counts
- uint16_t nodeCount = 0;
- nodeCount = atoi(value.data());
+ uint16_t nodeCount = atoi(value.data());
if (nodeCount > _nodeCount[*nodeType] ) {
_nodeCount[*nodeType] = nodeCount;
@@ -158,12 +158,9 @@ ClusterState::parseSorD(vespalib::stringref key, vespalib::stringref value, Node
return true;
}
vespalib::string::size_type dot2 = key.find('.', dot + 1);
- Node node;
- if (dot2 == vespalib::string::npos) {
- node = Node(*nodeType, atoi(key.substr(dot + 1).data()));
- } else {
- node = Node(*nodeType, atoi(key.substr(dot + 1, dot2 - dot - 1).data()));
- }
+ Node node(*nodeType, (dot2 == vespalib::string::npos)
+ ? atoi(key.substr(dot + 1).data())
+ : atoi(key.substr(dot + 1, dot2 - dot - 1).data()));
if (node.getIndex() >= _nodeCount[*nodeType]) {
vespalib::asciistream ost;
@@ -183,74 +180,73 @@ ClusterState::parseSorD(vespalib::stringref key, vespalib::stringref value, Node
return true;
}
+struct SeparatorPrinter {
+ bool first;
+ SeparatorPrinter() : first(true) {}
+ const char * toString() {
+ if (first) {
+ first = false;
+ return "";
+ }
+ return " ";
+ }
+};
+
namespace {
- struct SeparatorPrinter {
- bool first;
- SeparatorPrinter() : first(true) {}
- const char * toString() {
- if (first) {
- first = false;
- return "";
+
+void
+serialize_node(vespalib::asciistream & out, const Node & node, const NodeState & state) {
+ vespalib::asciistream prefix;
+ prefix << "." << node.getIndex() << ".";
+ vespalib::asciistream ost;
+ state.serialize(ost, prefix.str(), false);
+ vespalib::stringref content = ost.str();
+ if ( !content.empty()) {
+ out << " " << content;
+ }
+}
+
+}
+
+void
+ClusterState::serialize_nodes(vespalib::asciistream & out, SeparatorPrinter & sep, const NodeType & nodeType,
+ const std::vector<NodeStatePair> & nodeStates) const
+{
+ uint16_t nodeCount = getNodeCount(nodeType);
+ if (nodeCount > 0) {
+ out << sep.toString() << nodeType.serialize() << ":" << nodeCount;
+ for (const auto & entry : nodeStates) {
+ if (entry.first.getType() == nodeType) {
+ serialize_node(out, entry.first, entry.second);
}
- return " ";
}
- };
+ }
}
void
-ClusterState::serialize(vespalib::asciistream & out, bool ignoreNewFeatures) const
+ClusterState::serialize(vespalib::asciistream & out) const
{
SeparatorPrinter sep;
- if (!ignoreNewFeatures && _version != 0) {
+ if (_version != 0) {
out << sep.toString() << "version:" << _version;
}
- if (!ignoreNewFeatures && *_clusterState != State::UP) {
+ if (*_clusterState != State::UP) {
out << sep.toString() << "cluster:" << _clusterState->serialize();
}
- if (!ignoreNewFeatures && _distributionBits != 16) {
+ if (_distributionBits != 16) {
out << sep.toString() << "bits:" << _distributionBits;
}
- uint16_t distCount = getNodeCount(NodeType::DISTRIBUTOR);
- if (ignoreNewFeatures || distCount > 0) {
- out << sep.toString() << "distributor:" << distCount;
- for (std::map<Node, NodeState>::const_iterator it =
- _nodeStates.begin();
- it != _nodeStates.end(); ++it)
- {
- if (it->first.getType() != NodeType::DISTRIBUTOR) continue;
- vespalib::asciistream prefix;
- prefix << "." << it->first.getIndex() << ".";
- vespalib::asciistream ost;
- it->second.serialize(ost, prefix.str(), false);
- vespalib::stringref content = ost.str();
- if (content.size() > 0) {
- out << " " << content;
- }
- }
- }
- uint16_t storCount = getNodeCount(NodeType::STORAGE);
- if (ignoreNewFeatures || storCount > 0) {
- out << sep.toString() << "storage:" << storCount;
- for (std::map<Node, NodeState>::const_iterator it =
- _nodeStates.begin();
- it != _nodeStates.end(); ++it)
- {
- if (it->first.getType() != NodeType::STORAGE) continue;
- vespalib::asciistream prefix;
- prefix << "." << it->first.getIndex() << ".";
- vespalib::asciistream ost;
- it->second.serialize(ost, prefix.str(), false);
- vespalib::stringref content = ost.str();
- if ( !content.empty()) {
- out << " " << content;
- }
- }
- }
+ if ((getNodeCount(NodeType::DISTRIBUTOR) + getNodeCount(NodeType::STORAGE)) == 0u) return;
+
+ std::vector<NodeStatePair> nodeStates(_nodeStates.cbegin(), _nodeStates.cend());
+ std::sort(nodeStates.begin(), nodeStates.end(), [](const NodeStatePair &a, const NodeStatePair &b) { return a.first < b.first; });
+ serialize_nodes(out, sep, NodeType::DISTRIBUTOR, nodeStates);
+ serialize_nodes(out, sep, NodeType::STORAGE, nodeStates);
}
bool
-ClusterState::operator==(const ClusterState& other) const
+ClusterState::operator==(const ClusterState& other) const noexcept
{
return (_version == other._version &&
*_clusterState == *other._clusterState &&
@@ -260,17 +256,11 @@ ClusterState::operator==(const ClusterState& other) const
}
bool
-ClusterState::operator!=(const ClusterState& other) const
+ClusterState::operator!=(const ClusterState& other) const noexcept
{
return !(*this == other);
}
-uint16_t
-ClusterState::getNodeCount(const NodeType& type) const
-{
- return _nodeCount[type];
-}
-
namespace {
[[noreturn]] void throwUnknownType(const Node & node) __attribute__((noinline));
void throwUnknownType(const Node & node) {
@@ -282,7 +272,7 @@ const NodeState&
ClusterState::getNodeState(const Node& node) const
{
// If it actually has an entry in map, return that
- std::map<Node, NodeState>::const_iterator it = _nodeStates.find(node);
+ const auto it = _nodeStates.find(node);
if (it != _nodeStates.end()) return it->second;
// If beyond node count, the node is down.
@@ -307,9 +297,7 @@ void
ClusterState::setClusterState(const State& state)
{
if (!state.validClusterState()) {
- throw vespalib::IllegalStateException(
- state.toString(true) + " is not a legal cluster state",
- VESPA_STRLOC);
+ throw vespalib::IllegalStateException(state.toString(true) + " is not a legal cluster state", VESPA_STRLOC);
}
_clusterState = &state;
}
@@ -319,17 +307,12 @@ ClusterState::setNodeState(const Node& node, const NodeState& state)
{
state.verifySupportForNodeType(node.getType());
if (node.getIndex() >= _nodeCount[node.getType()]) {
- for (uint32_t i = _nodeCount[node.getType()]; i < node.getIndex(); ++i)
- {
- _nodeStates.insert(std::make_pair(
- Node(node.getType(), i),
- NodeState(node.getType(), State::DOWN)));
+ for (uint32_t i = _nodeCount[node.getType()]; i < node.getIndex(); ++i) {
+ _nodeStates.insert(std::make_pair(Node(node.getType(), i), NodeState(node.getType(), State::DOWN)));
}
_nodeCount[node.getType()] = node.getIndex() + 1;
}
- if (state == NodeState(node.getType(), State::UP)
- && state.getDescription().size() == 0)
- {
+ if ((state == NodeState(node.getType(), State::UP)) && state.getDescription().empty()) {
_nodeStates.erase(node);
} else {
_nodeStates.insert(std::make_pair(node, state));
@@ -339,32 +322,34 @@ ClusterState::setNodeState(const Node& node, const NodeState& state)
}
void
-ClusterState::print(std::ostream& out, bool verbose,
- const std::string&) const
+ClusterState::print(std::ostream& out, bool verbose, const std::string&) const
{
(void) verbose;
vespalib::asciistream tmp;
- serialize(tmp, false);
+ serialize(tmp);
out << tmp.str();
}
void
ClusterState::removeExtraElements()
{
+ removeExtraElements(NodeType::STORAGE);
+ removeExtraElements(NodeType::DISTRIBUTOR);
+}
+
+void
+ClusterState::removeExtraElements(const NodeType & type)
+{
// Simplify the system state by removing the last indexes if the nodes
// are down.
- for (uint32_t i=0; i<2; ++i) {
- const NodeType& type(i == 0 ? NodeType::STORAGE
- : NodeType::DISTRIBUTOR);
- for (int32_t index = _nodeCount[type]; index >= 0; --index) {
- Node node(type, index - 1);
- std::map<Node, NodeState>::iterator it(_nodeStates.find(node));
- if (it == _nodeStates.end()) break;
- if (it->second.getState() != State::DOWN) break;
- if (it->second.getDescription() != "") break;
- _nodeStates.erase(it);
- --_nodeCount[type];
- }
+ for (int32_t index = _nodeCount[type]; index >= 0; --index) {
+ Node node(type, index - 1);
+ const auto it = _nodeStates.find(node);
+ if (it == _nodeStates.end()) break;
+ if (it->second.getState() != State::DOWN) break;
+ if (it->second.getDescription() != "") break;
+ _nodeStates.erase(it);
+ --_nodeCount[type];
}
}
@@ -413,90 +398,89 @@ void
ClusterState::printStateGroupwise(std::ostream& out, const Distribution& dist,
bool verbose, const std::string& indent) const
{
- out << "ClusterState(Version: " << _version << ", Cluster state: "
- << _clusterState->toString(true) << ", Distribution bits: "
- << _distributionBits << ") {";
+ out << "ClusterState(Version: " << _version << ", Cluster state: " << _clusterState->toString(true)
+ << ", Distribution bits: " << _distributionBits << ") {";
printStateGroupwise(out, dist.getNodeGraph(), verbose, indent + " ", true);
out << "\n" << indent << "}";
}
namespace {
- template<typename T>
- std::string getNumberSpec(const std::vector<T>& numbers) {
- std::ostringstream ost;
- bool first = true;
- uint32_t firstInRange = numbers.size() == 0 ? 0 : numbers[0];;
- uint32_t lastInRange = firstInRange;
- for (uint32_t i=1; i<=numbers.size(); ++i) {
- if (i < numbers.size() && numbers[i] == lastInRange + 1) {
- ++lastInRange;
+
+template<typename T>
+std::string getNumberSpec(const std::vector<T>& numbers) {
+ std::ostringstream ost;
+ bool first = true;
+ uint32_t firstInRange = numbers.size() == 0 ? 0 : numbers[0];;
+ uint32_t lastInRange = firstInRange;
+ for (uint32_t i=1; i<=numbers.size(); ++i) {
+ if (i < numbers.size() && numbers[i] == lastInRange + 1) {
+ ++lastInRange;
+ } else {
+ if (first) {
+ first = false;
} else {
- if (first) {
- first = false;
- } else {
- ost << ",";
- }
- if (firstInRange == lastInRange) {
- ost << firstInRange;
- } else {
- ost << firstInRange << "-" << lastInRange;
- }
- if (i < numbers.size()) {
- firstInRange = lastInRange = numbers[i];
- }
+ ost << ",";
+ }
+ if (firstInRange == lastInRange) {
+ ost << firstInRange;
+ } else {
+ ost << firstInRange << "-" << lastInRange;
+ }
+ if (i < numbers.size()) {
+ firstInRange = lastInRange = numbers[i];
}
}
- return ost.str();
}
+ return ost.str();
+}
+
+}
+
+size_t
+ClusterState::printStateGroupwise(std::ostream& out, const Group& group, bool verbose,
+ const std::string& indent, const NodeType& nodeType) const
+{
+ NodeState defState(nodeType, State::UP);
+ size_t printed = 0;
+ for (uint16_t nodeId : group.getNodes()) {
+ Node node(nodeType, nodeId);
+ const NodeState& state(getNodeState(node));
+ if (state != defState) {
+ out << "\n" << indent << " " << node << ": ";
+ state.print(out, verbose, indent + " ");
+ printed++;
+ }
+ }
+ return printed;
}
void
-ClusterState::printStateGroupwise(std::ostream& out, const Group& group,
- bool verbose, const std::string& indent,
- bool rootGroup) const
+ClusterState::printStateGroupwise(std::ostream& out, const Group& group, bool verbose,
+ const std::string& indent, bool rootGroup) const
{
if (rootGroup) {
out << "\n" << indent << "Top group";
} else {
- out << "\n" << indent << "Group " << group.getIndex() << ": "
- << group.getName();
+ out << "\n" << indent << "Group " << group.getIndex() << ": " << group.getName();
if (group.getCapacity() != 1.0) {
out << ", capacity " << group.getCapacity();
}
}
out << ".";
if (group.isLeafGroup()) {
- out << " " << group.getNodes().size() << " node"
- << (group.getNodes().size() != 1 ? "s" : "") << " ["
- << getNumberSpec(group.getNodes()) << "] {";
- bool printedAny = false;
- for (uint32_t j=0; j<2; ++j) {
- const NodeType& nodeType(
- j == 0 ? NodeType::DISTRIBUTOR : NodeType::STORAGE);
- NodeState defState(nodeType, State::UP);
- for (uint32_t i=0; i<group.getNodes().size(); ++i) {
- Node node(nodeType, group.getNodes()[i]);
- const NodeState& state(getNodeState(node));
- if (state != defState) {
- out << "\n" << indent << " " << node << ": ";
- state.print(out, verbose, indent + " ");
- printedAny = true;
- }
- }
- }
- if (!printedAny) {
+ out << " " << group.getNodes().size() << " node" << (group.getNodes().size() != 1 ? "s" : "")
+ << " [" << getNumberSpec(group.getNodes()) << "] {";
+ size_t printed = printStateGroupwise(out, group, verbose, indent, NodeType::DISTRIBUTOR) +
+ printStateGroupwise(out, group, verbose, indent, NodeType::STORAGE);
+ if (printed == 0) {
out << "\n" << indent << " All nodes in group up and available.";
}
} else {
- const std::map<uint16_t, Group*>& children(group.getSubGroups());
- out << " " << children.size() << " branch"
- << (children.size() != 1 ? "es" : "") << " with distribution "
- << group.getDistributionSpec() << " {";
- for (std::map<uint16_t, Group*>::const_iterator it = children.begin();
- it != children.end(); ++it)
- {
- printStateGroupwise(out, *it->second, verbose,
- indent + " ", false);
+ const auto & children(group.getSubGroups());
+ out << " " << children.size() << " branch" << (children.size() != 1 ? "es" : "")
+ << " with distribution " << group.getDistributionSpec() << " {";
+ for (const auto & child : children) {
+ printStateGroupwise(out, *child.second, verbose,indent + " ", false);
}
}
out << "\n" << indent << "}";
diff --git a/vdslib/src/vespa/vdslib/state/clusterstate.h b/vdslib/src/vespa/vdslib/state/clusterstate.h
index 3af5a45fcac..90ec7c1aa65 100644
--- a/vdslib/src/vespa/vdslib/state/clusterstate.h
+++ b/vdslib/src/vespa/vdslib/state/clusterstate.h
@@ -10,26 +10,21 @@
#include "node.h"
#include "nodestate.h"
-#include <map>
+#include <vespa/vespalib/stllike/hash_map.h>
+#include <array>
namespace storage::lib {
class Distribution;
class Group;
struct NodeData;
+struct SeparatorPrinter;
class ClusterState : public document::Printable {
- uint32_t _version;
- const State* _clusterState;
- std::map<Node, NodeState> _nodeStates;
- std::vector<uint16_t> _nodeCount;
- vespalib::string _description;
- uint16_t _distributionBits;
-
- void getTextualDifference(std::ostringstream& builder, const NodeType& type,
- const ClusterState& other) const;
-
public:
+ using NodeStatePair = std::pair<Node, NodeState>;
+ using NodeMap = vespalib::hash_map<Node, NodeState>;
+ using NodeCounts = std::array<uint16_t, 2>;
using CSP = std::shared_ptr<const ClusterState>;
using SP = std::shared_ptr<ClusterState>;
using UP = std::unique_ptr<ClusterState>;
@@ -43,31 +38,29 @@ public:
~ClusterState();
std::string getTextualDifference(const ClusterState& other) const;
- void serialize(vespalib::asciistream & out, bool ignoreNewFeatures) const;
+ void serialize(vespalib::asciistream & out) const;
- bool operator==(const ClusterState& other) const;
- bool operator!=(const ClusterState& other) const;
+ bool operator==(const ClusterState& other) const noexcept;
+ bool operator!=(const ClusterState& other) const noexcept;
- uint32_t getVersion() const { return _version; }
+ uint32_t getVersion() const noexcept { return _version; }
/**
* Returns the smallest number above the highest node index found of the
* given type that is not down.
*/
- uint16_t getNodeCount(const NodeType& type) const;
- uint16_t getDistributionBitCount() const { return _distributionBits; }
- const State& getClusterState() const { return *_clusterState; }
+ uint16_t getNodeCount(const NodeType& type) const noexcept { return _nodeCount[type]; }
+ uint16_t getDistributionBitCount() const noexcept { return _distributionBits; }
+ const State& getClusterState() const noexcept { return *_clusterState; }
const NodeState& getNodeState(const Node& node) const;
- void setVersion(uint32_t version) { _version = version; }
+ void setVersion(uint32_t version) noexcept { _version = version; }
void setClusterState(const State& state);
void setNodeState(const Node& node, const NodeState& state);
- void setDistributionBitCount(uint16_t count) { _distributionBits = count; }
+ void setDistributionBitCount(uint16_t count) noexcept { _distributionBits = count; }
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
- void printStateGroupwise(std::ostream& out,
- const Distribution&, bool verbose = false,
- const std::string& indent = "") const;
+ void printStateGroupwise(std::ostream& out, const Distribution&, bool verbose, const std::string& indent) const;
private:
// Preconditions: `key` and `value` MUST point into null-terminated strings.
@@ -75,9 +68,18 @@ private:
// Preconditions: `key` and `value` MUST point into null-terminated strings.
bool parseSorD(vespalib::stringref key, vespalib::stringref value, NodeData & nodeData);
void removeExtraElements();
- void printStateGroupwise(std::ostream& out, const Group&, bool verbose,
- const std::string& indent, bool rootGroup) const;
-
+ void removeExtraElements(const NodeType& type);
+ void printStateGroupwise(std::ostream& out, const Group&, bool verbose, const std::string& indent, bool rootGroup) const;
+ void getTextualDifference(std::ostringstream& builder, const NodeType& type, const ClusterState& other) const;
+ size_t printStateGroupwise(std::ostream& out, const Group&, bool verbose, const std::string& indent, const NodeType& type) const;
+ void serialize_nodes(vespalib::asciistream & out, SeparatorPrinter & sep, const NodeType & nodeType,
+ const std::vector<NodeStatePair> & nodeStates) const;
+ uint32_t _version;
+ NodeCounts _nodeCount;
+ const State* _clusterState;
+ NodeMap _nodeStates;
+ vespalib::string _description;
+ uint16_t _distributionBits;
};
}
diff --git a/vdslib/src/vespa/vdslib/state/node.h b/vdslib/src/vespa/vdslib/state/node.h
index 2e33e54c638..49c8f0e641b 100644
--- a/vdslib/src/vespa/vdslib/state/node.h
+++ b/vdslib/src/vespa/vdslib/state/node.h
@@ -13,24 +13,25 @@ namespace storage::lib {
class Node {
const NodeType* _type;
- uint16_t _index;
+ uint16_t _index;
public:
Node() noexcept : _type(&NodeType::STORAGE), _index(0) { }
Node(const NodeType& type, uint16_t index) noexcept
: _type(&type), _index(index) { }
- const NodeType& getType() const { return *_type; }
- uint16_t getIndex() const { return _index; }
+ const NodeType& getType() const noexcept { return *_type; }
+ uint16_t getIndex() const noexcept { return _index; }
+ uint32_t hash() const noexcept { return (_index << 1) | *_type; }
- bool operator==(const Node& other) const {
+ bool operator==(const Node& other) const noexcept {
return (other._index == _index && *other._type == *_type);
}
- bool operator!=(const Node& other) const {
+ bool operator!=(const Node& other) const noexcept {
return (other._index != _index || *other._type != *_type);
}
- bool operator<(const Node& n) const {
+ bool operator<(const Node& n) const noexcept {
if (*_type != *n._type) return (*_type < *n._type);
return (_index < n._index);
}
diff --git a/vespa-athenz/pom.xml b/vespa-athenz/pom.xml
index 7c3c982af84..55fd25f8b99 100644
--- a/vespa-athenz/pom.xml
+++ b/vespa-athenz/pom.xml
@@ -118,11 +118,7 @@
</exclusion>
<exclusion>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-core</artifactId>
- </exclusion>
- <exclusion>
- <groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
@@ -158,6 +154,10 @@
<artifactId>jakarta.activation-api</artifactId>
</exclusion>
<exclusion>
+ <groupId>com.sun.activation</groupId>
+ <artifactId>jakarta.activation</artifactId>
+ </exclusion>
+ <exclusion>
<groupId>jakarta.xml.bind</groupId>
<artifactId>jakarta.xml.bind-api</artifactId>
</exclusion>
@@ -275,6 +275,52 @@
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</dependency>
+ <dependency>
+ <groupId>com.google.http-client</groupId>
+ <artifactId>google-http-client-apache-v2</artifactId>
+ <exclusions>
+ <exclusion>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpcore</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpclient</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.google.http-client</groupId>
+ <artifactId>google-http-client</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>com.google.http-client</groupId>
+ <artifactId>google-http-client</artifactId>
+ <exclusions>
+ <exclusion>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpcore</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpclient</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>com.google.auth</groupId>
+ <artifactId>google-auth-library-oauth2-http</artifactId>
+ <exclusions>
+ <exclusion>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
</dependencies>
<build>
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/gcp/GcpCredentials.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/gcp/GcpCredentials.java
new file mode 100644
index 00000000000..bbdc3c2b372
--- /dev/null
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/gcp/GcpCredentials.java
@@ -0,0 +1,180 @@
+package com.yahoo.vespa.athenz.gcp;
+
+import com.google.api.client.http.apache.v2.ApacheHttpTransport;
+import com.google.auth.http.HttpTransportFactory;
+import com.google.auth.oauth2.ExternalAccountCredentials;
+import com.yahoo.security.token.TokenDomain;
+import com.yahoo.security.token.TokenGenerator;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.athenz.api.AthenzDomain;
+import com.yahoo.vespa.athenz.identity.ServiceIdentityProvider;
+import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
+import org.apache.http.impl.client.HttpClientBuilder;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URLEncoder;
+import java.nio.charset.StandardCharsets;
+import java.util.Objects;
+
+public class GcpCredentials {
+ private static final TokenDomain domain = TokenDomain.of("athenz-gcp-oauth2-nonce");
+
+ final private InputStream tokenApiStream;
+ private final HttpTransportFactory httpTransportFactory;
+
+ private GcpCredentials(Builder builder) {
+ String clientId = builder.athenzDomain.getName() + ".gcp";
+ String audience = String.format("//iam.googleapis.com/projects/%s/locations/global/workloadIdentityPools/%s/providers/%s",
+ builder.projectNumber, builder.workloadPoolName, builder.workloadProviderName);
+ String serviceUrl = String.format("https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/%s@%s.iam.gserviceaccount.com:generateAccessToken",
+ builder.serviceAccountName, builder.projectName);
+ String scope = URLEncoder.encode(generateIdTokenScope(builder.athenzDomain.getName(), builder.role), StandardCharsets.UTF_8);
+ String redirectUri = URLEncoder.encode(generateRedirectUri(clientId, builder.redirectURISuffix), StandardCharsets.UTF_8);
+ String tokenUrl = String.format("%s/oauth2/auth?response_type=id_token&client_id=%s&redirect_uri=%s&scope=%s&nonce=%s&keyType=EC&fullArn=true&output=json",
+ builder.ztsUrl, clientId, redirectUri, scope, TokenGenerator.generateToken(domain, "", 32).secretTokenString());
+
+ tokenApiStream = createTokenAPIStream(audience, serviceUrl, tokenUrl, builder.tokenLifetimeSeconds);
+ SSLConnectionSocketFactory sslConnectionSocketFactory = new SSLConnectionSocketFactory(builder.identityProvider.getIdentitySslContext());
+ HttpClientBuilder httpClientBuilder = ApacheHttpTransport.newDefaultHttpClientBuilder()
+ .setSSLSocketFactory(sslConnectionSocketFactory);
+ httpTransportFactory = () -> new ApacheHttpTransport(httpClientBuilder.build());
+ }
+
+ public ExternalAccountCredentials getCredential() throws IOException {
+ return ExternalAccountCredentials.fromStream(tokenApiStream, httpTransportFactory);
+ }
+
+ private InputStream createTokenAPIStream(final String audience, final String serviceUrl, final String tokenUrl,
+ int tokenLifetimeSeconds) {
+
+ Slime root = new Slime();
+ Cursor c = root.setObject();
+
+ c.setString("type", "external_account");
+ c.setString("audience", audience);
+ c.setString("subject_token_type", "urn:ietf:params:oauth:token-type:jwt");
+ c.setString("token_url", "https://sts.googleapis.com/v1/token");
+
+ c.setString("service_account_impersonation_url", serviceUrl);
+ Cursor sai = c.setObject("service_account_impersonation");
+ sai.setLong("token_lifetime_seconds", tokenLifetimeSeconds);
+
+ Cursor credentialSource = c.setObject("credential_source");
+ credentialSource.setString("url", tokenUrl);
+
+ Cursor credentialSourceFormat = credentialSource.setObject("format");
+ credentialSourceFormat.setString("type", "json");
+ credentialSourceFormat.setString("subject_token_field_name", "id_token");
+
+ try {
+ return new ByteArrayInputStream(SlimeUtils.toJsonBytes(root));
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private static String generateIdTokenScope(final String domainName, String roleName) {
+ StringBuilder scope = new StringBuilder(256);
+ scope.append("openid");
+ scope.append(' ').append(domainName).append(":role.").append(roleName);
+ return scope.toString();
+ }
+
+ private static String generateRedirectUri(final String clientId, String uriSuffix) {
+ int idx = clientId.lastIndexOf('.');
+ if (idx == -1) {
+ return "";
+ }
+ final String dashDomain = clientId.substring(0, idx).replace('.', '-');
+ final String service = clientId.substring(idx + 1);
+ return "https://" + service + "." + dashDomain + "." + uriSuffix;
+ }
+
+
+ public static class Builder {
+ private String ztsUrl;
+ private ServiceIdentityProvider identityProvider;
+ private String redirectURISuffix;
+ private AthenzDomain athenzDomain;
+ private String role;
+ private String projectName;
+ private String projectNumber;
+ private String serviceAccountName;
+
+ private int tokenLifetimeSeconds = 3600; // default to 1 hour lifetime
+ private String workloadPoolName = "athenz";
+ private String workloadProviderName = "athenz";
+
+ public GcpCredentials build() {
+ Objects.requireNonNull(ztsUrl);
+ Objects.requireNonNull(identityProvider);
+ Objects.requireNonNull(redirectURISuffix);
+ Objects.requireNonNull(athenzDomain);
+ Objects.requireNonNull(role);
+ Objects.requireNonNull(projectName);
+ Objects.requireNonNull(projectNumber);
+ Objects.requireNonNull(serviceAccountName);
+
+ return new GcpCredentials(this);
+ }
+
+ public Builder setZtsUrl(String ztsUrl) {
+ this.ztsUrl = ztsUrl;
+ return this;
+ }
+
+ public Builder identityProvider(ServiceIdentityProvider provider) {
+ this.identityProvider = provider;
+ return this;
+ }
+
+ public Builder redirectURISuffix(String redirectURISuffix) {
+ this.redirectURISuffix = redirectURISuffix;
+ return this;
+ }
+
+ public Builder athenzDomain(AthenzDomain athenzDomain) {
+ this.athenzDomain = athenzDomain;
+ return this;
+ }
+
+ public Builder role(String gcpRole) {
+ this.role = gcpRole;
+ return this;
+ }
+
+ public Builder projectName(String projectName) {
+ this.projectName = projectName;
+ return this;
+ }
+
+ public Builder projectNumber(String projectNumber) {
+ this.projectNumber = projectNumber;
+ return this;
+ }
+
+ public Builder serviceAccountName(String serviceAccountName) {
+ this.serviceAccountName = serviceAccountName;
+ return this;
+ }
+
+ public Builder tokenLifetimeSeconds(int tokenLifetimeSeconds) {
+ this.tokenLifetimeSeconds = tokenLifetimeSeconds;
+ return this;
+ }
+
+ public Builder workloadPoolName(String workloadPoolName) {
+ this.workloadPoolName = workloadPoolName;
+ return this;
+ }
+
+ public Builder workloadProviderName(String workloadProviderName) {
+ this.workloadProviderName = workloadProviderName;
+ return this;
+ }
+ }
+}
diff --git a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
index 7684e3ea2ae..c950f00e5b5 100644
--- a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
+++ b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
@@ -9,11 +9,11 @@ backport-util-concurrent:backport-util-concurrent:3.1
ch.qos.logback:logback-classic:1.2.10
ch.qos.logback:logback-core:1.2.10
classworlds:classworlds:1.1-alpha-2
-com.amazonaws:aws-java-sdk-core:1.12.460
-com.amazonaws:aws-java-sdk-ssm:1.12.460
-com.amazonaws:aws-java-sdk-sts:1.12.460
-com.amazonaws:jmespath-java:1.12.460
-com.auth0:java-jwt:3.10.0
+com.amazonaws:aws-java-sdk-core:1.12.540
+com.amazonaws:aws-java-sdk-ssm:1.12.540
+com.amazonaws:aws-java-sdk-sts:1.12.540
+com.amazonaws:jmespath-java:1.12.540
+com.auth0:java-jwt:3.19.4
com.fasterxml.jackson.core:jackson-annotations:2.15.2
com.fasterxml.jackson.core:jackson-core:2.15.2
com.fasterxml.jackson.core:jackson-databind:2.15.2
@@ -23,72 +23,87 @@ com.fasterxml.jackson.datatype:jackson-datatype-jsr310:2.15.2
com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.15.2
com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.15.2
com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.15.2
+com.github.luben:zstd-jni:1.5.5-5
com.github.spotbugs:spotbugs-annotations:3.1.9
+com.google.auth:google-auth-library-credentials:1.19.0
+com.google.auth:google-auth-library-oauth2-http:1.19.0
+com.google.auto.value:auto-value-annotations:1.10.1
com.google.code.findbugs:jsr305:3.0.2
-com.google.errorprone:error_prone_annotations:2.18.0
+com.google.code.gson:gson:2.10
+com.google.errorprone:error_prone_annotations:2.21.1
com.google.guava:failureaccess:1.0.1
-com.google.guava:guava:32.1.1-jre
+com.google.guava:guava:32.1.2-jre
+com.google.http-client:google-http-client:1.43.3
+com.google.http-client:google-http-client-apache-v2:1.43.3
+com.google.http-client:google-http-client-gson:1.42.3
com.google.inject:guice:4.2.3:no_aop
com.google.j2objc:j2objc-annotations:2.8
-com.google.protobuf:protobuf-java:3.21.7
-com.ibm.icu:icu4j:70.1
-com.intellij:annotations:9.0.4
-com.microsoft.onnxruntime:onnxruntime:1.13.1
+com.google.protobuf:protobuf-java:3.24.2
+com.ibm.icu:icu4j:73.2
+com.microsoft.onnxruntime:onnxruntime:1.15.1
com.sun.activation:javax.activation:1.2.0
-com.sun.istack:istack-commons-runtime:3.0.8
+com.sun.istack:istack-commons-runtime:3.0.12
com.sun.xml.bind:jaxb-core:2.3.0
com.sun.xml.bind:jaxb-impl:2.3.0
-com.sun.xml.fastinfoset:FastInfoset:1.2.16
com.thaiopensource:jing:20091111
-com.yahoo.athenz:athenz-auth-core:1.11.34
-com.yahoo.athenz:athenz-client-common:1.11.34
-com.yahoo.athenz:athenz-zms-core:1.11.34
-com.yahoo.athenz:athenz-zpe-java-client:1.11.34
-com.yahoo.athenz:athenz-zts-core:1.11.34
+com.yahoo.athenz:athenz-auth-core:1.11.40
+com.yahoo.athenz:athenz-client-common:1.11.40
+com.yahoo.athenz:athenz-zms-core:1.11.40
+com.yahoo.athenz:athenz-zpe-java-client:1.11.40
+com.yahoo.athenz:athenz-zts-core:1.11.40
com.yahoo.rdl:rdl-java:1.5.4
commons-cli:commons-cli:1.5.0
-commons-codec:commons-codec:1.15
+commons-codec:commons-codec:1.16.0
commons-fileupload:commons-fileupload:1.5
-commons-io:commons-io:2.11.0
+commons-io:commons-io:2.13.0
commons-logging:commons-logging:1.2
io.airlift:airline:0.9
-io.dropwizard.metrics:metrics-core:3.2.5
+io.dropwizard.metrics:metrics-core:3.2.6
+io.grpc:grpc-context:1.27.2
io.jsonwebtoken:jjwt-api:0.11.5
io.jsonwebtoken:jjwt-impl:0.11.5
io.jsonwebtoken:jjwt-jackson:0.11.5
-io.netty:netty-buffer:4.1.94.Final
-io.netty:netty-codec:4.1.94.Final
-io.netty:netty-common:4.1.94.Final
-io.netty:netty-handler:4.1.94.Final
-io.netty:netty-resolver:4.1.94.Final
+io.netty:netty-buffer:4.1.97.Final
+io.netty:netty-codec:4.1.97.Final
+io.netty:netty-common:4.1.97.Final
+io.netty:netty-handler:4.1.97.Final
+io.netty:netty-resolver:4.1.97.Final
io.netty:netty-tcnative:2.0.61.Final
io.netty:netty-tcnative-classes:2.0.61.Final
-io.netty:netty-transport:4.1.94.Final
-io.netty:netty-transport-classes-epoll:4.1.94.Final
-io.netty:netty-transport-native-epoll:4.1.94.Final
-io.netty:netty-transport-native-unix-common:4.1.94.Final
-io.prometheus:simpleclient:0.6.0
-io.prometheus:simpleclient_common:0.6.0
+io.netty:netty-transport:4.1.97.Final
+io.netty:netty-transport-classes-epoll:4.1.97.Final
+io.netty:netty-transport-native-epoll:4.1.97.Final
+io.netty:netty-transport-native-unix-common:4.1.97.Final
+io.opencensus:opencensus-api:0.31.1
+io.opencensus:opencensus-contrib-http-util:0.31.1
+io.prometheus:simpleclient:0.16.0
+io.prometheus:simpleclient_common:0.16.0
+io.prometheus:simpleclient_tracer_common:0.16.0
+io.prometheus:simpleclient_tracer_otel:0.16.0
+io.prometheus:simpleclient_tracer_otel_agent:0.16.0
+jakarta.annotation:jakarta.annotation-api:1.3.5
+jakarta.validation:jakarta.validation-api:2.0.2
+jakarta.ws.rs:jakarta.ws.rs-api:2.1.6
javax.annotation:javax.annotation-api:1.2
javax.inject:javax.inject:1
javax.servlet:javax.servlet-api:3.1.0
-javax.validation:validation-api:1.1.0.Final
-javax.ws.rs:javax.ws.rs-api:2.0.1
+javax.ws.rs:javax.ws.rs-api:2.1.1
javax.xml.bind:jaxb-api:2.3.0
-joda-time:joda-time:2.12.2
-net.java.dev.jna:jna:5.11.0
+joda-time:joda-time:2.12.5
+junit:junit:4.13.2
+net.java.dev.jna:jna:5.13.0
net.openhft:zero-allocation-hashing:0.16
org.antlr:antlr-runtime:3.5.3
-org.antlr:antlr4-runtime:4.11.1
+org.antlr:antlr4-runtime:4.13.0
org.apache.aries.spifly:org.apache.aries.spifly.dynamic.bundle:1.3.6
org.apache.commons:commons-compress:1.23.0
-org.apache.commons:commons-csv:1.8
+org.apache.commons:commons-csv:1.10.0
org.apache.commons:commons-exec:1.3
-org.apache.commons:commons-lang3:3.12.0
+org.apache.commons:commons-lang3:3.13.0
org.apache.commons:commons-math3:3.6.1
-org.apache.curator:curator-client:5.4.0
-org.apache.curator:curator-framework:5.4.0
-org.apache.curator:curator-recipes:5.4.0
+org.apache.curator:curator-client:5.5.0
+org.apache.curator:curator-framework:5.5.0
+org.apache.curator:curator-recipes:5.5.0
org.apache.felix:org.apache.felix.framework:7.0.5
org.apache.felix:org.apache.felix.log:1.0.1
org.apache.httpcomponents:httpclient:4.5.14
@@ -99,33 +114,21 @@ org.apache.httpcomponents.core5:httpcore5:5.2.2
org.apache.httpcomponents.core5:httpcore5-h2:5.2.2
org.apache.lucene:lucene-analysis-common:9.7.0
org.apache.lucene:lucene-core:9.7.0
-org.apache.maven:maven-archiver:3.6.0
-org.apache.maven:maven-artifact:3.8.7
+org.apache.maven:maven-archiver:3.6.1
+org.apache.maven:maven-artifact:3.9.4
org.apache.maven:maven-artifact-manager:2.2.1
-org.apache.maven:maven-builder-support:3.8.7
-org.apache.maven:maven-compat:3.0
-org.apache.maven:maven-core:3.8.7
-org.apache.maven:maven-model:3.8.7
-org.apache.maven:maven-model-builder:3.8.7
-org.apache.maven:maven-plugin-api:3.8.7
+org.apache.maven:maven-model:3.9.4
+org.apache.maven:maven-plugin-api:3.9.4
org.apache.maven:maven-plugin-registry:2.2.1
org.apache.maven:maven-profile:2.2.1
org.apache.maven:maven-project:2.2.1
-org.apache.maven:maven-repository-metadata:3.8.7
-org.apache.maven:maven-resolver-provider:3.8.7
-org.apache.maven:maven-settings:3.8.7
-org.apache.maven:maven-settings-builder:3.8.7
-org.apache.maven.plugin-tools:maven-plugin-annotations:3.6.4
-org.apache.maven.plugins:maven-jar-plugin:3.2.0
-org.apache.maven.resolver:maven-resolver-api:1.6.3
-org.apache.maven.resolver:maven-resolver-impl:1.6.3
-org.apache.maven.resolver:maven-resolver-spi:1.6.3
-org.apache.maven.resolver:maven-resolver-util:1.6.3
-org.apache.maven.shared:file-management:3.0.0
-org.apache.maven.shared:maven-shared-io:3.0.0
-org.apache.maven.shared:maven-shared-utils:3.2.1
-org.apache.maven.wagon:wagon-provider-api:2.10
-org.apache.opennlp:opennlp-tools:1.9.3
+org.apache.maven:maven-repository-metadata:3.9.4
+org.apache.maven:maven-settings:3.9.4
+org.apache.maven.plugin-tools:maven-plugin-annotations:3.9.0
+org.apache.maven.plugins:maven-jar-plugin:3.3.0
+org.apache.maven.shared:file-management:3.1.0
+org.apache.maven.wagon:wagon-provider-api:3.5.3
+org.apache.opennlp:opennlp-tools:1.9.4
org.apache.velocity:velocity-engine-core:2.3
org.apache.yetus:audience-annotations:0.12.0
org.apache.zookeeper:zookeeper:3.8.0
@@ -133,20 +136,18 @@ org.apache.zookeeper:zookeeper:3.8.1
org.apache.zookeeper:zookeeper-jute:3.8.0
org.apache.zookeeper:zookeeper-jute:3.8.1
org.apiguardian:apiguardian-api:1.1.2
-org.bouncycastle:bcpkix-jdk18on:1.74
-org.bouncycastle:bcprov-jdk18on:1.74
-org.bouncycastle:bcutil-jdk18on:1.74
-org.codehaus.plexus:plexus-archiver:4.2.1
-org.codehaus.plexus:plexus-cipher:2.0
-org.codehaus.plexus:plexus-classworlds:2.6.0
+org.bouncycastle:bcpkix-jdk18on:1.76
+org.bouncycastle:bcprov-jdk18on:1.76
+org.bouncycastle:bcutil-jdk18on:1.76
+org.codehaus.plexus:plexus-archiver:4.8.0
+org.codehaus.plexus:plexus-classworlds:2.7.0
org.codehaus.plexus:plexus-component-annotations:1.5.5
org.codehaus.plexus:plexus-container-default:1.0-alpha-9-stable-1
org.codehaus.plexus:plexus-interpolation:1.26
-org.codehaus.plexus:plexus-io:3.4.0
-org.codehaus.plexus:plexus-sec-dispatcher:2.0
-org.codehaus.plexus:plexus-utils:3.3.1
-org.eclipse.collections:eclipse-collections:11.0.0
-org.eclipse.collections:eclipse-collections-api:11.0.0
+org.codehaus.plexus:plexus-io:3.4.1
+org.codehaus.plexus:plexus-utils:3.5.1
+org.eclipse.collections:eclipse-collections:11.1.0
+org.eclipse.collections:eclipse-collections-api:11.1.0
org.eclipse.jetty:jetty-alpn-client:11.0.15
org.eclipse.jetty:jetty-alpn-java-client:11.0.15
org.eclipse.jetty:jetty-alpn-java-server:11.0.15
@@ -168,78 +169,65 @@ org.eclipse.jetty.toolchain:jetty-jakarta-servlet-api:5.0.2
org.eclipse.sisu:org.eclipse.sisu.inject:0.3.5
org.eclipse.sisu:org.eclipse.sisu.plexus:0.3.5
org.fusesource.jansi:jansi:1.18
-org.glassfish.hk2:hk2-api:2.5.0-b30
-org.glassfish.hk2:hk2-locator:2.5.0-b30
-org.glassfish.hk2:hk2-utils:2.5.0-b30
-org.glassfish.hk2:osgi-resource-locator:1.0.1
-org.glassfish.hk2.external:aopalliance-repackaged:2.5.0-b30
-org.glassfish.hk2.external:javax.inject:2.5.0-b30
-org.glassfish.jaxb:jaxb-runtime:2.3.2
-org.glassfish.jaxb:txw2:2.3.2
-org.glassfish.jersey.bundles.repackaged:jersey-guava:2.25
-org.glassfish.jersey.core:jersey-client:2.25
-org.glassfish.jersey.core:jersey-common:2.25
-org.glassfish.jersey.core:jersey-server:2.25
-org.glassfish.jersey.ext:jersey-entity-filtering:2.25
-org.glassfish.jersey.ext:jersey-proxy-client:2.25
-org.glassfish.jersey.media:jersey-media-json-jackson:2.25
-org.glassfish.jersey.media:jersey-media-multipart:2.25
+org.glassfish.hk2:osgi-resource-locator:1.0.3
+org.glassfish.hk2.external:jakarta.inject:2.6.1
+org.glassfish.jaxb:jaxb-runtime:2.3.8
+org.glassfish.jaxb:txw2:2.3.8
+org.glassfish.jersey.core:jersey-client:2.40
+org.glassfish.jersey.core:jersey-common:2.40
+org.glassfish.jersey.core:jersey-server:2.40
+org.glassfish.jersey.ext:jersey-entity-filtering:2.40
+org.glassfish.jersey.ext:jersey-proxy-client:2.40
+org.glassfish.jersey.media:jersey-media-json-jackson:2.40
+org.glassfish.jersey.media:jersey-media-multipart:2.40
+org.hamcrest:hamcrest:2.2
+org.hamcrest:hamcrest-core:2.2
org.hdrhistogram:HdrHistogram:2.1.12
org.iq80.snappy:snappy:0.4
-org.javassist:javassist:3.20.0-GA
-org.json:json:20230227
+org.json:json:20230618
org.junit.jupiter:junit-jupiter-api:5.8.1
org.junit.jupiter:junit-jupiter-engine:5.8.1
org.junit.platform:junit-platform-commons:1.8.1
org.junit.platform:junit-platform-engine:1.8.1
org.junit.platform:junit-platform-launcher:1.8.1
-org.jvnet.mimepull:mimepull:1.9.6
-org.jvnet.staxex:stax-ex:1.8.1
+org.jvnet.mimepull:mimepull:1.10.0
org.kohsuke:libpam4j:1.11
org.lz4:lz4-java:1.8.0
-org.opentest4j:opentest4j:1.2.0
+org.opentest4j:opentest4j:1.3.0
org.osgi:org.osgi.compendium:4.1.0
org.osgi:org.osgi.core:4.1.0
-org.ow2.asm:asm:9.3
-org.ow2.asm:asm-analysis:9.3
-org.ow2.asm:asm-commons:9.3
-org.ow2.asm:asm-tree:9.3
-org.ow2.asm:asm-util:9.3
+org.ow2.asm:asm:9.5
+org.ow2.asm:asm-analysis:9.5
+org.ow2.asm:asm-commons:9.5
+org.ow2.asm:asm-tree:9.5
+org.ow2.asm:asm-util:9.5
org.questdb:questdb:6.2
-org.slf4j:jcl-over-slf4j:1.7.32
-org.slf4j:log4j-over-slf4j:1.7.32
-org.slf4j:slf4j-api:1.7.32
-org.slf4j:slf4j-jdk14:1.7.32
-org.slf4j:slf4j-simple:1.7.32
-org.sonatype.sisu:sisu-guice:2.1.7:noaop
-org.sonatype.sisu:sisu-inject-bean:1.4.2
-org.sonatype.sisu:sisu-inject-plexus:1.4.2
-org.tukaani:xz:1.8
-org.xerial.snappy:snappy-java:1.1.10.1
+org.slf4j:jcl-over-slf4j:1.7.36
+org.slf4j:log4j-over-slf4j:1.7.36
+org.slf4j:slf4j-api:1.7.36
+org.slf4j:slf4j-jdk14:1.7.36
+org.slf4j:slf4j-simple:1.7.36
+org.tukaani:xz:1.9
+org.xerial.snappy:snappy-java:1.1.10.3
software.amazon.ion:ion-java:1.0.2
xerces:xercesImpl:2.12.2
xml-apis:xml-apis:1.4.01
#[test-only]
# Contains dependencies that are used exclusively in 'test' scope
-com.github.luben:zstd-jni:1.5.5-4
com.github.tomakehurst:wiremock-jre8-standalone:2.35.0
-com.google.guava:guava-testlib:32.1.1-jre
+com.google.guava:guava-testlib:32.1.2-jre
com.google.inject:guice:4.2.3
-com.google.jimfs:jimfs:1.2
-junit:junit:4.13.2
-net.bytebuddy:byte-buddy:1.11.19
-net.bytebuddy:byte-buddy-agent:1.11.19
-org.apache.curator:curator-test:5.4.0
-org.assertj:assertj-core:3.11.1
-org.checkerframework:checker-qual:3.30.0
-org.cthul:cthul-matchers:1.0
-org.hamcrest:hamcrest-all:1.3
-org.hamcrest:hamcrest-core:1.3
-org.hamcrest:hamcrest-library:1.3
+com.google.jimfs:jimfs:1.3.0
+net.bytebuddy:byte-buddy:1.12.21
+net.bytebuddy:byte-buddy:1.14.6
+net.bytebuddy:byte-buddy-agent:1.14.6
+org.apache.curator:curator-test:5.5.0
+org.assertj:assertj-core:3.24.2
+org.checkerframework:checker-qual:3.37.0
org.junit.jupiter:junit-jupiter:5.8.1
org.junit.jupiter:junit-jupiter-params:5.8.1
org.junit.vintage:junit-vintage-engine:5.8.1
-org.mockito:mockito-core:4.0.0
-org.mockito:mockito-junit-jupiter:4.0.0
-org.objenesis:objenesis:3.2
+org.mockito:mockito-core:5.5.0
+org.mockito:mockito-junit-jupiter:5.5.0
+org.objenesis:objenesis:3.3
diff --git a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java
index ce86ad59ffe..ebbff9ed84b 100644
--- a/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java
+++ b/vespa-feed-client/src/main/java/ai/vespa/feed/client/impl/HttpRequestStrategy.java
@@ -149,7 +149,7 @@ class HttpRequestStrategy implements RequestStrategy {
return false;
}
- /** Retries throttled requests (429, 503), adjusting the target inflight count, and server errors (500, 502). */
+ /** Retries throttled requests (429), adjusting the target inflight count, and server errors (500, 502, 503, 504). */
private boolean retry(HttpRequest request, HttpResponse response, int attempt) {
if (response.code() / 100 == 2 || response.code() == 404 || response.code() == 412) {
logResponse(FINEST, response, request, attempt);
@@ -159,14 +159,14 @@ class HttpRequestStrategy implements RequestStrategy {
}
- if (response.code() == 429 || response.code() == 503) { // Throttling; reduce target inflight.
+ if (response.code() == 429) { // Throttling; reduce target inflight.
logResponse(FINER, response, request, attempt);
throttler.throttled((inflight.get() - delayedCount.get()));
return true;
}
logResponse(FINE, response, request, attempt);
- if (response.code() == 500 || response.code() == 502 || response.code() == 504) { // Hopefully temporary errors.
+ if (response.code() == 500 || response.code() == 502 || response.code() == 503 || response.code() == 504) { // Hopefully temporary errors.
breaker.failure(response);
return retry(request, attempt);
}
diff --git a/vespa-maven-plugin/pom.xml b/vespa-maven-plugin/pom.xml
index e5b7dec71e4..d4f2e8649af 100644
--- a/vespa-maven-plugin/pom.xml
+++ b/vespa-maven-plugin/pom.xml
@@ -84,6 +84,11 @@
<artifactId>maven-jar-plugin</artifactId>
<scope>provided</scope>
</dependency>
+ <dependency>
+ <groupId>org.apache.maven</groupId>
+ <artifactId>maven-project</artifactId>
+ <scope>provided</scope>
+ </dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
diff --git a/vespa-maven-plugin/src/main/java/ai/vespa/hosted/plugin/SubmitMojo.java b/vespa-maven-plugin/src/main/java/ai/vespa/hosted/plugin/SubmitMojo.java
index f6ef17bc1b8..eebb7f4e738 100644
--- a/vespa-maven-plugin/src/main/java/ai/vespa/hosted/plugin/SubmitMojo.java
+++ b/vespa-maven-plugin/src/main/java/ai/vespa/hosted/plugin/SubmitMojo.java
@@ -55,7 +55,7 @@ public class SubmitMojo extends AbstractVespaMojo {
optionalOf(projectId, Long::parseLong), optionalOf(risk, Integer::parseInt),
optionalOf(description));
- getLog().info(controller.submit(submission, id.tenant(), id.application()));
+ getLog().info(controller.submit(submission, id.tenant(), id.application()).message());
}
}
diff --git a/vespajlib/pom.xml b/vespajlib/pom.xml
index 5ba7f2d2ff2..d3c9831c596 100644
--- a/vespajlib/pom.xml
+++ b/vespajlib/pom.xml
@@ -67,7 +67,7 @@
<!-- test scope -->
<dependency>
<groupId>org.hamcrest</groupId>
- <artifactId>hamcrest-library</artifactId>
+ <artifactId>hamcrest</artifactId>
<scope>test</scope>
</dependency>
<dependency>
diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java b/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java
index 68af9aa0a49..7fa591a88ba 100644
--- a/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java
+++ b/vespajlib/src/main/java/com/yahoo/concurrent/maintenance/Maintainer.java
@@ -48,9 +48,7 @@ public abstract class Maintainer implements Runnable {
this.ignoreCollision = ignoreCollision;
this.clock = clock;
this.successFactorBaseline = successFactorBaseline;
- var startedAt = clock.instant();
- Objects.requireNonNull(clusterHostnames);
- Duration initialDelay = staggeredDelay(interval, startedAt, HostName.getLocalhost(), clusterHostnames)
+ Duration initialDelay = staggeredDelay(interval, HostName.getLocalhost(), clusterHostnames)
.plus(Duration.ofSeconds(30)); // Let the system stabilize before maintenance
service = new ScheduledThreadPoolExecutor(1, r -> new Thread(r, name() + "-worker"));
service.scheduleAtFixedRate(this, initialDelay.toMillis(), interval.toMillis(), TimeUnit.MILLISECONDS);
@@ -148,14 +146,17 @@ public abstract class Maintainer implements Runnable {
return name == null ? this.getClass().getSimpleName() : name;
}
- /** Returns the initial delay of this calculated from cluster index of given hostname */
- static Duration staggeredDelay(Duration interval, Instant now, String hostname, List<String> clusterHostnames) {
+ /** Returns the initial delay of this calculated from cluster index of the hostname of this node, and the maintainer name. */
+ Duration staggeredDelay(Duration interval, String hostname, List<String> clusterHostnames) {
Objects.requireNonNull(clusterHostnames);
if ( ! clusterHostnames.contains(hostname))
return interval;
- long offset = clusterHostnames.indexOf(hostname) * interval.toMillis() / clusterHostnames.size();
- return Duration.ofMillis(Math.floorMod(offset - now.toEpochMilli(), interval.toMillis()));
+ Instant now = clock.instant();
+ long nodeOffset = clusterHostnames.indexOf(hostname) * interval.toMillis() / clusterHostnames.size();
+ long maintainerOffset = getClass().getName().hashCode() % interval.toMillis();
+ long totalOffset = nodeOffset + maintainerOffset;
+ return Duration.ofMillis(Math.floorMod(totalOffset - now.toEpochMilli(), interval.toMillis()));
}
private static Duration requireInterval(Duration interval) {
diff --git a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/MaintainerTest.java b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/MaintainerTest.java
index cdb5e36a455..a65af1e264e 100644
--- a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/MaintainerTest.java
+++ b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/MaintainerTest.java
@@ -2,6 +2,7 @@
package com.yahoo.concurrent.maintenance;
import com.yahoo.concurrent.UncheckedTimeoutException;
+import com.yahoo.test.ManualClock;
import org.junit.Test;
import java.time.Duration;
@@ -23,22 +24,30 @@ public class MaintainerTest {
public void staggering() {
List<String> cluster = List.of("cfg1", "cfg2", "cfg3");
Duration interval = Duration.ofMillis(300);
- Instant now = Instant.ofEpochMilli(1000);
- assertEquals(200, Maintainer.staggeredDelay(interval, now, "cfg1", cluster).toMillis());
- assertEquals(0, Maintainer.staggeredDelay(interval, now, "cfg2", cluster).toMillis());
- assertEquals(100, Maintainer.staggeredDelay(interval, now, "cfg3", cluster).toMillis());
-
- now = Instant.ofEpochMilli(1001);
- assertEquals(199, Maintainer.staggeredDelay(interval, now, "cfg1", cluster).toMillis());
- assertEquals(299, Maintainer.staggeredDelay(interval, now, "cfg2", cluster).toMillis());
- assertEquals(99, Maintainer.staggeredDelay(interval, now, "cfg3", cluster).toMillis());
-
- now = Instant.ofEpochMilli(1101);
- assertEquals(99, Maintainer.staggeredDelay(interval, now, "cfg1", cluster).toMillis());
- assertEquals(199, Maintainer.staggeredDelay(interval, now, "cfg2", cluster).toMillis());
- assertEquals(299, Maintainer.staggeredDelay(interval, now, "cfg3", cluster).toMillis());
-
- assertEquals(300, Maintainer.staggeredDelay(interval, now, "cfg0", cluster).toMillis());
+ ManualClock clock = new ManualClock(Instant.ofEpochMilli(1000));
+
+ // ∠( ᐛ 」∠)_
+ class MaintainerWithBestHashE extends TestMaintainer { MaintainerWithBestHashE() { super(jobControl, new TestJobMetrics(), clock); } }
+ class MaintainerWithBestHashF extends TestMaintainer { MaintainerWithBestHashF() { super(jobControl, new TestJobMetrics(), clock); } }
+ class MaintainerWithBestHashG extends TestMaintainer { MaintainerWithBestHashG() { super(jobControl, new TestJobMetrics(), clock); } }
+
+ assertEquals(200, new MaintainerWithBestHashF().staggeredDelay(interval, "cfg1", cluster).toMillis());
+ assertEquals(299, new MaintainerWithBestHashE().staggeredDelay(interval, "cfg2", cluster).toMillis());
+ assertEquals( 0, new MaintainerWithBestHashF().staggeredDelay(interval, "cfg2", cluster).toMillis());
+ assertEquals( 1, new MaintainerWithBestHashG().staggeredDelay(interval, "cfg2", cluster).toMillis());
+ assertEquals(100, new MaintainerWithBestHashF().staggeredDelay(interval, "cfg3", cluster).toMillis());
+
+ clock.advance(Duration.ofMillis(1));
+ assertEquals(199, new MaintainerWithBestHashF().staggeredDelay(interval, "cfg1", cluster).toMillis());
+ assertEquals(299, new MaintainerWithBestHashF().staggeredDelay(interval, "cfg2", cluster).toMillis());
+ assertEquals( 99, new MaintainerWithBestHashF().staggeredDelay(interval, "cfg3", cluster).toMillis());
+
+ clock.advance(Duration.ofMillis(100));
+ assertEquals( 99, new MaintainerWithBestHashF().staggeredDelay(interval, "cfg1", cluster).toMillis());
+ assertEquals(199, new MaintainerWithBestHashF().staggeredDelay(interval, "cfg2", cluster).toMillis());
+ assertEquals(299, new MaintainerWithBestHashF().staggeredDelay(interval, "cfg3", cluster).toMillis());
+
+ assertEquals(300, new MaintainerWithBestHashF().staggeredDelay(interval, "cfg0", cluster).toMillis());
}
@Test
diff --git a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/TestMaintainer.java b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/TestMaintainer.java
index d8191b98a51..8c7ca1e18db 100644
--- a/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/TestMaintainer.java
+++ b/vespajlib/src/test/java/com/yahoo/concurrent/maintenance/TestMaintainer.java
@@ -15,20 +15,24 @@ class TestMaintainer extends Maintainer {
private double success = 1.0;
private RuntimeException exceptionToThrow = null;
- public TestMaintainer(String name, JobControl jobControl, JobMetrics jobMetrics) {
+ TestMaintainer(JobControl jobControl, JobMetrics jobMetrics, Clock clock) {
+ super(null, Duration.ofDays(1), clock, jobControl, jobMetrics, List.of(), false);
+ }
+
+ TestMaintainer(String name, JobControl jobControl, JobMetrics jobMetrics) {
super(name, Duration.ofDays(1), Clock.systemUTC(), jobControl, jobMetrics, List.of(), false);
}
- public int totalRuns() {
+ int totalRuns() {
return totalRuns;
}
- public TestMaintainer successOnNextRun(double success) {
+ TestMaintainer successOnNextRun(double success) {
this.success = success;
return this;
}
- public TestMaintainer throwOnNextRun(RuntimeException e) {
+ TestMaintainer throwOnNextRun(RuntimeException e) {
this.exceptionToThrow = e;
return this;
}
diff --git a/vespalib/src/tests/alloc/alloc_test.cpp b/vespalib/src/tests/alloc/alloc_test.cpp
index 04e009fcf8a..f39543daa2d 100644
--- a/vespalib/src/tests/alloc/alloc_test.cpp
+++ b/vespalib/src/tests/alloc/alloc_test.cpp
@@ -1,11 +1,11 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/config.h>
#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/vespalib/util/alloc.h>
#include <vespa/vespalib/util/memory_allocator.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/util/round_up_to_page_size.h>
-#include <vespa/vespalib/util/sanitizers.h>
#include <vespa/vespalib/util/size_literals.h>
#include <cstddef>
#include <sys/mman.h>
@@ -216,12 +216,12 @@ TEST("auto alloced mmap alloc can not be extended if no room") {
}
/*
- * The two following tests are disabled when address sanitizer is
+ * The two following tests are disabled when any sanitizer is
* enabled since extra instrumentation code might trigger extra mmap
* or munmap calls, breaking some of the assumptions in the disabled
* tests.
*/
-#ifndef VESPA_USE_ADDRESS_SANITIZER
+#ifndef VESPA_USE_SANITIZER
TEST("mmap alloc can be extended if room") {
Alloc dummy = Alloc::allocMMap(100);
Alloc reserved = Alloc::allocMMap(100);
diff --git a/vespalib/src/tests/arrayref/arrayref_test.cpp b/vespalib/src/tests/arrayref/arrayref_test.cpp
index bd8646b2f99..8c41d38b292 100644
--- a/vespalib/src/tests/arrayref/arrayref_test.cpp
+++ b/vespalib/src/tests/arrayref/arrayref_test.cpp
@@ -53,4 +53,24 @@ TEST("require that references can be unconstified") {
EXPECT_EQUAL(data[1], 5);
}
+TEST("require that std::array references can be constified") {
+ std::array<int,3> data({1,2,3});
+ const ArrayRef<int> array_ref(data);
+ ConstArrayRef<int> const_ref(array_ref);
+ EXPECT_EQUAL(const_ref.size(), 3u);
+ EXPECT_EQUAL(const_ref.end() - const_ref.begin(), 3);
+ EXPECT_EQUAL(const_ref[2], 3);
+}
+
+TEST("require that references can be unconstified") {
+ std::array<int, 3> data({1,2,3});
+ const ConstArrayRef<int> const_ref(data);
+ ArrayRef<int> array_ref = unconstify(const_ref);
+ EXPECT_EQUAL(array_ref.size(), 3u);
+ EXPECT_EQUAL(array_ref.end() - array_ref.begin(), 3);
+ EXPECT_EQUAL(array_ref[1], 2);
+ array_ref[1] = 5;
+ EXPECT_EQUAL(data[1], 5);
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/guard/guard_test.cpp b/vespalib/src/tests/guard/guard_test.cpp
index 9e5e7e55cc6..c61c4874eff 100644
--- a/vespalib/src/tests/guard/guard_test.cpp
+++ b/vespalib/src/tests/guard/guard_test.cpp
@@ -7,20 +7,7 @@
using namespace vespalib;
-class Test : public TestApp
-{
-public:
- void testFilePointer();
- void testFileDescriptor();
- void testDirPointer();
- void testValueGuard();
- void testMaxValueGuard();
- void testCounterGuard();
- int Main() override;
-};
-
-void
-Test::testFilePointer()
+TEST("testFilePointer")
{
{
FilePointer file(fopen("bogus", "r"));
@@ -72,8 +59,7 @@ Test::testFilePointer()
}
}
-void
-Test::testFileDescriptor()
+TEST("testFileDescriptor")
{
{
FileDescriptor file(open("bogus", O_RDONLY));
@@ -126,124 +112,7 @@ Test::testFileDescriptor()
}
}
-void
-Test::testDirPointer()
-{
- {
- DirPointer dir(opendir("bogus"));
- EXPECT_TRUE(!dir.valid());
- }
- {
- DirPointer dir(opendir(TEST_PATH("").c_str()));
- EXPECT_TRUE(dir.valid());
-
- dirent *de;
- bool foundGuardCpp = false;
- while ((de = readdir(dir)) != NULL) {
- if (strcmp(de->d_name, "guard_test.cpp") == 0) {
- foundGuardCpp = true;
- }
- }
- EXPECT_TRUE(foundGuardCpp);
- }
- {
- DIR *dp = NULL;
- {
- DirPointer dir(opendir("."));
- EXPECT_TRUE(dir.valid());
- dp = dir;
- }
- EXPECT_TRUE(dp != NULL);
- // EXPECT_TRUE(readdir(dp) == NULL);
- }
- {
- DirPointer dir(opendir("."));
- EXPECT_TRUE(dir.valid());
- dir.reset(opendir("."));
- EXPECT_TRUE(dir.valid());
-
- DIR *ref = dir.dp();
- DIR *dp = dir.release();
- EXPECT_TRUE(dp != NULL);
- EXPECT_TRUE(dp == ref);
- EXPECT_TRUE(!dir.valid());
- EXPECT_TRUE(dir.dp() == NULL);
- closedir(dp);
- }
-}
-
-void
-Test::testValueGuard()
-{
- int value = 10;
- {
- ValueGuard<int> guard(value);
- value = 20;
- EXPECT_TRUE(value == 20);
- }
- EXPECT_TRUE(value == 10);
- {
- ValueGuard<int> guard(value, 50);
- value = 20;
- EXPECT_TRUE(value == 20);
- }
- EXPECT_TRUE(value == 50);
- {
- ValueGuard<int> guard(value);
- value = 20;
- guard.update(100);
- EXPECT_TRUE(value == 20);
- }
- EXPECT_TRUE(value == 100);
- {
- ValueGuard<int> guard(value);
- value = 20;
- guard.dismiss();
- EXPECT_TRUE(value == 20);
- }
- EXPECT_TRUE(value == 20);
-}
-
-void
-Test::testMaxValueGuard()
-{
- int value = 10;
- {
- MaxValueGuard<int> guard(value);
- value = 20;
- EXPECT_TRUE(value == 20);
- }
- EXPECT_TRUE(value == 10);
- {
- MaxValueGuard<int> guard(value);
- value = 5;
- EXPECT_TRUE(value == 5);
- }
- EXPECT_TRUE(value == 5);
- {
- MaxValueGuard<int> guard(value, 50);
- value = 100;
- EXPECT_TRUE(value == 100);
- }
- EXPECT_TRUE(value == 50);
- {
- MaxValueGuard<int> guard(value);
- value = 200;
- guard.update(100);
- EXPECT_TRUE(value == 200);
- }
- EXPECT_TRUE(value == 100);
- {
- MaxValueGuard<int> guard(value);
- value = 200;
- guard.dismiss();
- EXPECT_TRUE(value == 200);
- }
- EXPECT_TRUE(value == 200);
-}
-
-void
-Test::testCounterGuard()
+TEST("testCounterGuard")
{
int cnt = 10;
{
@@ -254,17 +123,4 @@ Test::testCounterGuard()
EXPECT_TRUE(cnt == 10);
}
-int
-Test::Main()
-{
- TEST_INIT("guard_test");
- testFilePointer();
- testFileDescriptor();
- testDirPointer();
- testValueGuard();
- testMaxValueGuard();
- testCounterGuard();
- TEST_DONE();
-}
-
-TEST_APPHOOK(Test)
+TEST_MAIN() { TEST_RUN_ALL(); } \ No newline at end of file
diff --git a/vespalib/src/tests/io/fileutil/fileutiltest.cpp b/vespalib/src/tests/io/fileutil/fileutiltest.cpp
index 0948d18304e..93803c1fe9e 100644
--- a/vespalib/src/tests/io/fileutil/fileutiltest.cpp
+++ b/vespalib/src/tests/io/fileutil/fileutiltest.cpp
@@ -102,15 +102,6 @@ TEST("require that vespalib::File::open works")
ASSERT_TRUE(fileExists("mydir/myfile"));
f.unlink();
}
- // Opening with direct IO support works.
- {
- File f("mydir/myfile");
- f.open(File::CREATE | File::DIRECTIO, false);
- ASSERT_TRUE(fileExists("mydir/myfile"));
- if (!f.isOpenWithDirectIO()) {
- std::cerr << "This platform does not support direct IO\n";
- }
- }
// Opening plain file works
{
File f("myfile");
@@ -126,16 +117,6 @@ TEST("require that vespalib::File::open works")
//std::cerr << e.what() << "\n";
EXPECT_EQUAL(IoException::ILLEGAL_PATH, e.getType());
}
- // Test opening already open file
- {
- std::unique_ptr<File> f(new File("myfile"));
- f->open(File::CREATE, false);
- f->closeFileWhenDestructed(false);
- File f2(f->getFileDescriptor(), "myfile");
- f.reset();
- ASSERT_TRUE(f2.isOpen());
- f2.write(" ", 1, 0);
- }
// Test reopening file in same object
{
File f("myfile");
@@ -161,29 +142,6 @@ TEST("require that vespalib::File::isOpen works")
ASSERT_TRUE(!f.isOpen());
}
-TEST("require that vespalib::File::stat works")
-{
- std::filesystem::remove(std::filesystem::path("myfile"));
- std::filesystem::remove_all(std::filesystem::path("mydir"));
- EXPECT_EQUAL(false, fileExists("myfile"));
- EXPECT_EQUAL(false, fileExists("mydir"));
- std::filesystem::create_directory(std::filesystem::path("mydir"));
- File f("myfile");
- f.open(File::CREATE, false);
- f.write("foobar", 6, 0);
-
- FileInfo info = f.stat();
- EXPECT_EQUAL(6, info._size);
- EXPECT_EQUAL(true, info._plainfile);
- EXPECT_EQUAL(false, info._directory);
-
- EXPECT_EQUAL(6, f.getFileSize());
- f.close();
-
- EXPECT_EQUAL(true, fileExists("myfile"));
- EXPECT_EQUAL(true, fileExists("mydir"));
-}
-
TEST("require that vespalib::File::resize works")
{
std::filesystem::remove(std::filesystem::path("myfile"));
@@ -204,47 +162,6 @@ TEST("require that vespalib::File::resize works")
EXPECT_EQUAL(std::string("foo"), std::string(&vec[0], 3));
}
-TEST("require that copy constructor and assignment for vespalib::File works")
-{
- // Copy file not opened.
- {
- File f("myfile");
- File f2(f);
- EXPECT_EQUAL(f.getFilename(), f2.getFilename());
- }
- // Copy file opened
- {
- File f("myfile");
- f.open(File::CREATE);
- File f2(f);
- EXPECT_EQUAL(f.getFilename(), f2.getFilename());
- ASSERT_TRUE(f2.isOpen());
- ASSERT_TRUE(!f.isOpen());
- }
- // Assign file opened to another file opened
- {
- File f("myfile");
- f.open(File::CREATE);
- int fd = f.getFileDescriptor();
- File f2("targetfile");
- f2.open(File::CREATE);
- f = f2;
- EXPECT_EQUAL(std::string("targetfile"), f2.getFilename());
- EXPECT_EQUAL(f.getFilename(), f2.getFilename());
- ASSERT_TRUE(!f2.isOpen());
- ASSERT_TRUE(f.isOpen());
- try{
- File f3(fd, "myfile");
- f3.closeFileWhenDestructed(false); // Already closed
- f3.write("foo", 3, 0);
- TEST_FATAL("This file descriptor should have been closed");
- } catch (IoException& e) {
- //std::cerr << e.what() << "\n";
- EXPECT_EQUAL(IoException::INTERNAL_FAILURE, e.getType());
- }
- }
-}
-
TEST("require that we can read all data written to file")
{
// Write text into a file.
diff --git a/vespalib/src/tests/small_vector/small_vector_test.cpp b/vespalib/src/tests/small_vector/small_vector_test.cpp
index 2118a3c492f..4b432799672 100644
--- a/vespalib/src/tests/small_vector/small_vector_test.cpp
+++ b/vespalib/src/tests/small_vector/small_vector_test.cpp
@@ -7,6 +7,14 @@
using namespace vespalib;
+struct Alive {
+ uint32_t &cnt;
+ uint32_t id;
+ Alive(uint32_t &cnt_in, uint32_t id_in) noexcept
+ : cnt(cnt_in), id(id_in) { ++cnt; }
+ ~Alive() { --cnt; }
+};
+
template <typename T, size_t N>
void verify(const SmallVector<T,N> &vec, std::vector<uint32_t> expect, size_t expect_capacity = 0) {
if (expect_capacity == 0) {
@@ -263,4 +271,22 @@ TEST(SmallVectorTest, check_back_method) {
EXPECT_EQ(&vec.back(), vec.end() - 1);
}
+TEST(SmallVectorTest, pop_back) {
+ uint32_t my_cnt = 0;
+ {
+ SmallVector<Alive> vec;
+ vec.emplace_back(my_cnt, 1);
+ vec.emplace_back(my_cnt, 2);
+ vec.emplace_back(my_cnt, 3);
+ EXPECT_EQ(vec.size(), 3);
+ EXPECT_EQ(my_cnt, 3);
+ vec.pop_back();
+ EXPECT_EQ(my_cnt, 2);
+ ASSERT_EQ(vec.size(), 2);
+ EXPECT_EQ(vec[0].id, 1);
+ EXPECT_EQ(vec[1].id, 2);
+ }
+ EXPECT_EQ(my_cnt, 0);
+}
+
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/tests/util/file_area_freelist/file_area_freelist_test.cpp b/vespalib/src/tests/util/file_area_freelist/file_area_freelist_test.cpp
index 9a51be3b817..e844a66f5e9 100644
--- a/vespalib/src/tests/util/file_area_freelist/file_area_freelist_test.cpp
+++ b/vespalib/src/tests/util/file_area_freelist/file_area_freelist_test.cpp
@@ -14,6 +14,7 @@ protected:
public:
FileAreaFreeListTest();
~FileAreaFreeListTest();
+ void test_merge_area_blocked(bool previous);
};
FileAreaFreeListTest::FileAreaFreeListTest()
@@ -23,6 +24,24 @@ FileAreaFreeListTest::FileAreaFreeListTest()
FileAreaFreeListTest::~FileAreaFreeListTest() = default;
+void
+FileAreaFreeListTest::test_merge_area_blocked(bool previous)
+{
+ _freelist.add_premmapped_area(4, 1);
+ _freelist.add_premmapped_area(5, 1);
+ EXPECT_EQ(4, _freelist.alloc(1));
+ EXPECT_EQ(5, _freelist.alloc(1));
+ if (previous) {
+ _freelist.free(4, 1);
+ _freelist.free(5, 1);
+ } else {
+ _freelist.free(5, 1);
+ _freelist.free(4, 1);
+ }
+ EXPECT_EQ(bad_offset, _freelist.alloc(2));
+ _freelist.remove_premmapped_area(4, 1);
+ _freelist.remove_premmapped_area(5, 1);
+}
TEST_F(FileAreaFreeListTest, empty_freelist_is_ok)
{
@@ -44,6 +63,11 @@ TEST_F(FileAreaFreeListTest, merge_area_with_next_area)
EXPECT_EQ(bad_offset, _freelist.alloc(1));
}
+TEST_F(FileAreaFreeListTest, merge_area_with_next_area_blocked_by_fence)
+{
+ test_merge_area_blocked(false);
+}
+
TEST_F(FileAreaFreeListTest, merge_area_with_previous_area)
{
_freelist.free(3, 1);
@@ -52,6 +76,11 @@ TEST_F(FileAreaFreeListTest, merge_area_with_previous_area)
EXPECT_EQ(bad_offset, _freelist.alloc(1));
}
+TEST_F(FileAreaFreeListTest, merge_area_with_previous_area_blocked_by_fence)
+{
+ test_merge_area_blocked(true);
+}
+
TEST_F(FileAreaFreeListTest, merge_area_with_previous_and_next_area)
{
_freelist.free(5, 1);
diff --git a/vespalib/src/tests/util/mmap_file_allocator/mmap_file_allocator_test.cpp b/vespalib/src/tests/util/mmap_file_allocator/mmap_file_allocator_test.cpp
index ef16998902e..c6971c0d803 100644
--- a/vespalib/src/tests/util/mmap_file_allocator/mmap_file_allocator_test.cpp
+++ b/vespalib/src/tests/util/mmap_file_allocator/mmap_file_allocator_test.cpp
@@ -12,6 +12,7 @@ namespace {
vespalib::string basedir("mmap-file-allocator-dir");
vespalib::string hello("hello");
+vespalib::string world("world");
struct MyAlloc
{
@@ -36,7 +37,24 @@ struct MyAlloc
}
-class MmapFileAllocatorTest : public ::testing::Test
+struct AllocatorSetup {
+ uint32_t small_limit;
+ uint32_t premmap_size;
+
+ AllocatorSetup(uint32_t small_limit_in, uint32_t premmap_size_in)
+ : small_limit(small_limit_in),
+ premmap_size(premmap_size_in)
+ {
+ }
+};
+
+std::ostream& operator<<(std::ostream& os, const AllocatorSetup setup)
+{
+ os << "small" << setup.small_limit << "premm" << setup.premmap_size;
+ return os;
+}
+
+class MmapFileAllocatorTest : public ::testing::TestWithParam<AllocatorSetup>
{
protected:
MmapFileAllocator _allocator;
@@ -47,49 +65,69 @@ public:
};
MmapFileAllocatorTest::MmapFileAllocatorTest()
- : _allocator(basedir)
+ : _allocator(basedir, GetParam().small_limit, GetParam().premmap_size)
{
}
MmapFileAllocatorTest::~MmapFileAllocatorTest() = default;
-TEST_F(MmapFileAllocatorTest, zero_sized_allocation_is_handled)
+INSTANTIATE_TEST_SUITE_P(MmapFileAllocatorMultiTest,
+ MmapFileAllocatorTest,
+ testing::Values(AllocatorSetup(0, 1_Mi), AllocatorSetup(512, 1_Mi), AllocatorSetup(128_Ki, 1_Mi)), testing::PrintToStringParamName());
+
+
+
+TEST_P(MmapFileAllocatorTest, zero_sized_allocation_is_handled)
{
MyAlloc buf(_allocator, _allocator.alloc(0));
EXPECT_EQ(nullptr, buf.data);
EXPECT_EQ(0u, buf.size);
}
-TEST_F(MmapFileAllocatorTest, mmap_file_allocator_works)
+TEST_P(MmapFileAllocatorTest, mmap_file_allocator_works)
{
- MyAlloc buf(_allocator, _allocator.alloc(4));
- EXPECT_LE(4u, buf.size);
+ MyAlloc buf(_allocator, _allocator.alloc(300));
+ EXPECT_LE(300u, buf.size);
EXPECT_TRUE(buf.data != nullptr);
memcpy(buf.data, "1st", 4);
- MyAlloc buf2(_allocator, _allocator.alloc(5));
- EXPECT_LE(5u, buf2.size);
+ MyAlloc buf2(_allocator, _allocator.alloc(600));
+ EXPECT_LE(600u, buf2.size);
EXPECT_TRUE(buf2.data != nullptr);
EXPECT_TRUE(buf.data != buf2.data);
memcpy(buf2.data, "fine", 5);
- EXPECT_EQ(0u, _allocator.resize_inplace(buf.asPair(), 5));
- EXPECT_EQ(0u, _allocator.resize_inplace(buf.asPair(), 3));
+ EXPECT_EQ(0u, _allocator.resize_inplace(buf.asPair(), 500));
+ EXPECT_EQ(0u, _allocator.resize_inplace(buf.asPair(), 200));
EXPECT_NE(0u, _allocator.get_end_offset());
- int result = msync(buf.data, buf.size, MS_SYNC);
- EXPECT_EQ(0, result);
- result = msync(buf2.data, buf2.size, MS_SYNC);
- EXPECT_EQ(0, result);
+ if (GetParam().small_limit == 0) {
+ int result = msync(buf.data, buf.size, MS_SYNC);
+ EXPECT_EQ(0, result);
+ result = msync(buf2.data, buf2.size, MS_SYNC);
+ EXPECT_EQ(0, result);
+ }
}
-TEST_F(MmapFileAllocatorTest, reuse_file_offset_works)
+TEST_P(MmapFileAllocatorTest, reuse_file_offset_works)
{
+ constexpr size_t size_300 = 300;
+ constexpr size_t size_600 = 600;
+ assert(hello.size() + 1 <= size_300);
+ assert(world.size() + 1 <= size_600);
{
- MyAlloc buf(_allocator, _allocator.alloc(hello.size() + 1));
+ MyAlloc buf(_allocator, _allocator.alloc(size_300));
memcpy(buf.data, hello.c_str(), hello.size() + 1);
}
{
- MyAlloc buf(_allocator, _allocator.alloc(hello.size() + 1));
+ MyAlloc buf(_allocator, _allocator.alloc(size_300));
EXPECT_EQ(0, memcmp(buf.data, hello.c_str(), hello.size() + 1));
}
+ {
+ MyAlloc buf(_allocator, _allocator.alloc(size_600));
+ memcpy(buf.data, world.c_str(), world.size() + 1);
+ }
+ {
+ MyAlloc buf(_allocator, _allocator.alloc(size_600));
+ EXPECT_EQ(0, memcmp(buf.data, world.c_str(), world.size() + 1));
+ }
}
GTEST_MAIN_RUN_ALL_TESTS()
diff --git a/vespalib/src/vespa/fastos/unix_file.cpp b/vespalib/src/vespa/fastos/unix_file.cpp
index 692674b95bd..6d10338aec1 100644
--- a/vespalib/src/vespa/fastos/unix_file.cpp
+++ b/vespalib/src/vespa/fastos/unix_file.cpp
@@ -213,6 +213,10 @@ FastOS_UNIX_File::Open(unsigned int openFlags, const char *filename)
if (eCode != 0) {
fprintf(stderr, "Failed: posix_madvise(%p, %ld, %d) = %d\n", mbase, mlen, fadviseOptions, eCode);
}
+ eCode = madvise(mbase, mlen, MADV_DONTDUMP);
+ if (eCode != 0) {
+ fprintf(stderr, "Failed: madvise(%p, %ld, MADV_DONTDUMP) = %d\n", mbase, mlen, eCode);
+ }
#endif
_mmapbase = mbase;
_mmaplen = mlen;
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
index 5c88900ae92..b5328134e4f 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.cpp
@@ -295,7 +295,7 @@ DataStoreBase::getMemoryUsage() const {
extra_used += _free_lists.size() * sizeof(FreeList);
usage.incAllocatedBytes(extra_allocated);
usage.incUsedBytes(extra_used);
- usage.merge(_stash.get_memory_usage());
+ merge_stash_memory_usage(usage);
return usage;
}
@@ -526,4 +526,20 @@ DataStoreBase::inc_hold_buffer_count()
++_hold_buffer_count;
}
+void
+DataStoreBase::merge_stash_memory_usage(vespalib::MemoryUsage& usage) const
+{
+ /*
+ * Estimate stash memory usage instead of sampling it to avoid race
+ * with writer thread.
+ */
+ uint32_t buffer_states = get_bufferid_limit_acquire();
+ size_t stashed_buffer_state_size = sizeof(BufferState) + sizeof(stash::DestructObject<BufferState>);
+ size_t chunk_size = _stash.get_chunk_size();
+ uint32_t buffer_states_per_chunk = (chunk_size - sizeof(stash::Chunk)) / stashed_buffer_state_size;
+ uint32_t chunks = (buffer_states + buffer_states_per_chunk - 1) / buffer_states_per_chunk;
+ usage.incAllocatedBytes(chunks * chunk_size);
+ usage.incUsedBytes(buffer_states * stashed_buffer_state_size + chunks * sizeof(stash::Chunk));
+}
+
}
diff --git a/vespalib/src/vespa/vespalib/datastore/datastorebase.h b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
index b91d6c7cfa6..d7ae20a7028 100644
--- a/vespalib/src/vespa/vespalib/datastore/datastorebase.h
+++ b/vespalib/src/vespa/vespalib/datastore/datastorebase.h
@@ -261,6 +261,8 @@ private:
virtual void reclaim_all_entry_refs() = 0;
+ void merge_stash_memory_usage(vespalib::MemoryUsage& usage) const;
+
std::vector<BufferAndMeta> _buffers; // For fast mapping with known types
// Provides a mapping from typeId -> primary buffer for that type.
diff --git a/vespalib/src/vespa/vespalib/io/fileutil.cpp b/vespalib/src/vespa/vespalib/io/fileutil.cpp
index 6c169ab8d98..cb478f0f225 100644
--- a/vespalib/src/vespa/vespalib/io/fileutil.cpp
+++ b/vespalib/src/vespa/vespalib/io/fileutil.cpp
@@ -5,7 +5,6 @@
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/util/size_literals.h>
#include <vespa/vespalib/util/stringfmt.h>
-#include <ostream>
#include <cassert>
#include <filesystem>
#include <dirent.h>
@@ -16,35 +15,34 @@
#include <vespa/log/log.h>
LOG_SETUP(".vespalib.io.fileutil");
+namespace fs = std::filesystem;
+
namespace vespalib {
namespace {
- FileInfo::UP
- processStat(struct stat& filestats, bool result, stringref path) {
- FileInfo::UP resval;
- if (result) {
- resval.reset(new FileInfo);
- resval->_plainfile = S_ISREG(filestats.st_mode);
- resval->_directory = S_ISDIR(filestats.st_mode);
- resval->_symlink = S_ISLNK(filestats.st_mode);
- resval->_size = filestats.st_size;
- } else if (errno != ENOENT) {
- asciistream ost;
- ost << "An IO error occured while statting '" << path << "'. "
- << "errno(" << errno << "): " << getErrorString(errno);
- throw IoException(ost.str(), IoException::getErrorType(errno),
- VESPA_STRLOC);
- }
- LOG(debug, "stat(%s): Existed? %s, Plain file? %s, Directory? %s, "
- "Size: %" PRIu64,
- string(path).c_str(),
- resval.get() ? "true" : "false",
- resval.get() && resval->_plainfile ? "true" : "false",
- resval.get() && resval->_directory ? "true" : "false",
- resval.get() ? resval->_size : 0);
- return resval;
+FileInfo::UP
+processStat(struct stat& filestats, bool result, stringref path) {
+ FileInfo::UP resval;
+ if (result) {
+ resval = std::make_unique<FileInfo>();
+ resval->_plainfile = S_ISREG(filestats.st_mode);
+ resval->_directory = S_ISDIR(filestats.st_mode);
+ resval->_size = filestats.st_size;
+ } else if (errno != ENOENT) {
+ asciistream ost;
+ ost << "An IO error occured while statting '" << path << "'. "
+ << "errno(" << errno << "): " << getErrorString(errno);
+ throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
+ LOG(debug, "stat(%s): Existed? %s, Plain file? %s, Directory? %s, Size: %" PRIu64,
+ string(path).c_str(),
+ resval.get() ? "true" : "false",
+ resval.get() && resval->_plainfile ? "true" : "false",
+ resval.get() && resval->_directory ? "true" : "false",
+ resval.get() ? resval->_size : 0);
+ return resval;
+}
string
safeStrerror(int errnum)
@@ -54,167 +52,61 @@ safeStrerror(int errnum)
}
-bool
-FileInfo::operator==(const FileInfo& fi) const
-{
- return (_size == fi._size && _plainfile == fi._plainfile
- && _directory == fi._directory);
-}
-
-std::ostream&
-operator<<(std::ostream& out, const FileInfo& info)
-{
- out << "FileInfo(size: " << info._size;
- if (info._plainfile) out << ", plain file";
- if (info._directory) out << ", directory";
- out << ")";
- return out;
-}
-
File::File(stringref filename)
: _fd(-1),
- _flags(0),
- _filename(filename),
- _close(true),
- _fileReads(0),
- _fileWrites(0)
-{
-}
-
-File::File(int fileDescriptor, stringref filename)
- : _fd(fileDescriptor),
- _flags(0),
- _filename(filename),
- _close(true),
- _fileReads(0),
- _fileWrites(0)
-{
-}
+ _filename(filename)
+{ }
File::~File()
{
- if (_close && _fd != -1) close();
-}
-
-File::File(File& f)
- : _fd(f._fd),
- _flags(f._flags),
- _filename(f._filename),
- _close(f._close),
- _fileReads(f._fileReads),
- _fileWrites(f._fileWrites)
-{
- f._fd = -1;
- f._flags = 0;
- f._close = true;
- f._fileReads = 0;
- f._fileWrites = 0;
-}
-
-File&
-File::operator=(File& f)
-{
- if (_close && _fd != -1) close();
- _fd = f._fd;
- _flags = f._flags;
- _filename = f._filename;
- _close = f._close;
- _fileReads = f._fileReads;
- _fileWrites = f._fileWrites;
- f._fd = -1;
- f._flags = 0;
- f._close = true;
- f._fileReads = 0;
- f._fileWrites = 0;
- return *this;
-}
-
-void
-File::setFilename(stringref filename)
-{
- if (_filename == filename) return;
- if (_close && _fd != -1) close();
- _filename = filename;
- _fd = -1;
- _flags = 0;
- _close = true;
+ if (_fd != -1) close();
}
namespace {
- int openAndCreateDirsIfMissing(const string & filename, int flags,
- bool createDirsIfMissing)
+int openAndCreateDirsIfMissing(const string & filename, int flags, bool createDirsIfMissing)
+{
+ int fd = ::open(filename.c_str(), flags, 0644);
+ if (fd < 0 && errno == ENOENT && ((flags & O_CREAT) != 0)
+ && createDirsIfMissing)
{
- int fd = ::open(filename.c_str(), flags, 0644);
- if (fd < 0 && errno == ENOENT && ((flags & O_CREAT) != 0)
- && createDirsIfMissing)
- {
- auto pos = filename.rfind('/');
- if (pos != string::npos) {
- string path(filename.substr(0, pos));
- std::filesystem::create_directories(std::filesystem::path(path));
- LOG(spam, "open(%s, %d): Retrying open after creating parent "
- "directories.", filename.c_str(), flags);
- fd = ::open(filename.c_str(), flags, 0644);
- }
+ auto pos = filename.rfind('/');
+ if (pos != string::npos) {
+ string path(filename.substr(0, pos));
+ fs::create_directories(fs::path(path));
+ LOG(spam, "open(%s, %d): Retrying open after creating parent directories.", filename.c_str(), flags);
+ fd = ::open(filename.c_str(), flags, 0644);
}
- return fd;
}
+ return fd;
+}
}
void
File::open(int flags, bool autoCreateDirectories) {
if ((flags & File::READONLY) != 0) {
if ((flags & File::CREATE) != 0) {
- throw IllegalArgumentException(
- "Cannot use READONLY and CREATE options at the same time",
- VESPA_STRLOC);
+ throw IllegalArgumentException("Cannot use READONLY and CREATE options at the same time", VESPA_STRLOC);
}
if ((flags & File::TRUNC) != 0) {
- throw IllegalArgumentException(
- "Cannot use READONLY and TRUNC options at the same time",
- VESPA_STRLOC);
+ throw IllegalArgumentException("Cannot use READONLY and TRUNC options at the same time", VESPA_STRLOC);
}
if (autoCreateDirectories) {
- throw IllegalArgumentException(
- "No point in auto-creating directories on read only access",
- VESPA_STRLOC);
+ throw IllegalArgumentException("No point in auto-creating directories on read only access", VESPA_STRLOC);
}
}
int openflags = ((flags & File::READONLY) != 0 ? O_RDONLY : O_RDWR)
| ((flags & File::CREATE) != 0 ? O_CREAT : 0)
-#ifdef __linux__
- | ((flags & File::DIRECTIO) != 0 ? O_DIRECT : 0)
-#endif
| ((flags & File::TRUNC) != 0 ? O_TRUNC: 0);
int fd = openAndCreateDirsIfMissing(_filename, openflags, autoCreateDirectories);
-#ifdef __linux__
- if (fd < 0 && ((flags & File::DIRECTIO) != 0)) {
- openflags = (openflags ^ O_DIRECT);
- flags = (flags ^ DIRECTIO);
- LOG(debug, "open(%s, %d): Retrying without direct IO due to failure "
- "opening with errno(%d): %s",
- _filename.c_str(), flags, errno, safeStrerror(errno).c_str());
- fd = openAndCreateDirsIfMissing(_filename, openflags, autoCreateDirectories);
- }
-#endif
if (fd < 0) {
asciistream ost;
- ost << "open(" << _filename << ", 0x"
- << hex << flags << dec << "): Failed, errno(" << errno
- << "): " << safeStrerror(errno);
+ ost << "open(" << _filename << ", 0x" << hex << flags << dec
+ << "): Failed, errno(" << errno << "): " << safeStrerror(errno);
throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
- _flags = flags;
- if (_close && _fd != -1) close();
+ if (_fd != -1) close();
_fd = fd;
- LOG(debug, "open(%s, %d). File opened with file descriptor %d.",
- _filename.c_str(), flags, fd);
-}
-
-void
-File::closeFileWhenDestructed(bool closeOnDestruct)
-{
- _close = closeOnDestruct;
+ LOG(debug, "open(%s, %d). File opened with file descriptor %d.", _filename.c_str(), flags, fd);
}
FileInfo
@@ -226,13 +118,11 @@ File::stat() const
result = processStat(filestats, fstat(_fd, &filestats) == 0, _filename);
assert(result.get()); // The file must exist in a file instance
} else {
- result = processStat(filestats,
- ::stat(_filename.c_str(), &filestats) == 0,
- _filename);
+ result = processStat(filestats, ::stat(_filename.c_str(), &filestats) == 0, _filename);
// If the file does not exist yet, act like it does. It will
// probably be created when opened.
- if (result.get() == 0) {
- result.reset(new FileInfo());
+ if ( ! result) {
+ result = std::make_unique<FileInfo>();
result->_size = 0;
result->_directory = false;
result->_plainfile = true;
@@ -246,69 +136,32 @@ File::resize(off_t size)
{
if (ftruncate(_fd, size) != 0) {
asciistream ost;
- ost << "resize(" << _filename << ", " << size << "): Failed, errno("
- << errno << "): " << safeStrerror(errno);
+ ost << "resize(" << _filename << ", " << size << "): Failed, errno(" << errno << "): " << safeStrerror(errno);
throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
- LOG(debug, "resize(%s): Resized to %" PRIu64 " bytes.",
- _filename.c_str(), size);
-}
-
-void
-File::verifyDirectIO(uint64_t buf, size_t bufsize, off_t offset) const
-{
- if (offset % 512 != 0) {
- LOG(error,
- "Access to file %s failed because offset %" PRIu64 " wasn't 512-byte "
- "aligned. Buffer memory address was %" PRIx64 ", length %zu",
- _filename.c_str(), static_cast<uint64_t>(offset), buf, bufsize);
- assert(false);
- }
- if (buf % 512 != 0) {
- LOG(error,
- "Access to file %s failed because buffer memory address %" PRIx64 " "
- "wasn't 512-byte aligned. Offset was %" PRIu64 ", length %zu",
- _filename.c_str(), buf, static_cast<uint64_t>(offset), bufsize);
- assert(false);
- }
- if (bufsize % 512 != 0) {
- LOG(error,
- "Access to file %s failed because buffer size %zu wasn't 512-byte "
- "aligned. Buffer memory address was %" PRIx64 ", offset %" PRIu64,
- _filename.c_str(), bufsize, buf, static_cast<uint64_t>(offset));
- assert(false);
- }
+ LOG(debug, "resize(%s): Resized to %" PRIu64 " bytes.", _filename.c_str(), size);
}
off_t
File::write(const void *buf, size_t bufsize, off_t offset)
{
- ++_fileWrites;
size_t left = bufsize;
- LOG(debug, "write(%s): Writing %zu bytes at offset %" PRIu64 ".",
- _filename.c_str(), bufsize, offset);
-
- if (_flags & DIRECTIO) {
- verifyDirectIO((uint64_t)buf, bufsize, offset);
- }
+ LOG(debug, "write(%s): Writing %zu bytes at offset %" PRIu64 ".", _filename.c_str(), bufsize, offset);
while (left > 0) {
ssize_t written = ::pwrite(_fd, buf, left, offset);
if (written > 0) {
- LOG(spam, "write(%s): Wrote %zd bytes at offset %" PRIu64 ".",
- _filename.c_str(), written, offset);
+ LOG(spam, "write(%s): Wrote %zd bytes at offset %" PRIu64 ".", _filename.c_str(), written, offset);
left -= written;
buf = ((const char*) buf) + written;
offset += written;
} else if (written == 0) {
- LOG(spam, "write(%s): Wrote %zd bytes at offset %" PRIu64 ".",
- _filename.c_str(), written, offset);
+ LOG(spam, "write(%s): Wrote %zd bytes at offset %" PRIu64 ".", _filename.c_str(), written, offset);
assert(false); // Can this happen?
} else if (errno != EINTR && errno != EAGAIN) {
asciistream ost;
- ost << "write(" << _fd << ", " << buf
- << ", " << left << ", " << offset << "), Failed, errno("
- << errno << "): " << safeStrerror(errno);
+ ost << "write(" << _fd << ", " << buf << ", " << left << ", " << offset
+ << "), Failed, errno(" << errno << "): " << safeStrerror(errno);
throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
}
@@ -318,37 +171,23 @@ File::write(const void *buf, size_t bufsize, off_t offset)
size_t
File::read(void *buf, size_t bufsize, off_t offset) const
{
- ++_fileReads;
size_t remaining = bufsize;
- LOG(debug, "read(%s): Reading %zu bytes from offset %" PRIu64 ".",
- _filename.c_str(), bufsize, offset);
-
- if (_flags & DIRECTIO) {
- verifyDirectIO((uint64_t)buf, bufsize, offset);
- }
+ LOG(debug, "read(%s): Reading %zu bytes from offset %" PRIu64 ".", _filename.c_str(), bufsize, offset);
while (remaining > 0) {
ssize_t bytesread = ::pread(_fd, buf, remaining, offset);
if (bytesread > 0) {
- LOG(spam, "read(%s): Read %zd bytes from offset %" PRIu64 ".",
- _filename.c_str(), bytesread, offset);
+ LOG(spam, "read(%s): Read %zd bytes from offset %" PRIu64 ".", _filename.c_str(), bytesread, offset);
remaining -= bytesread;
buf = ((char*) buf) + bytesread;
offset += bytesread;
- if (((_flags & DIRECTIO) != 0) && ((bytesread % 512) != 0) && (offset == getFileSize())) {
- LOG(spam, "read(%s): Found EOF. Directio read to unaligned file end at offset %" PRIu64 ".",
- _filename.c_str(), offset);
- break;
- }
} else if (bytesread == 0) { // EOF
- LOG(spam, "read(%s): Found EOF. Zero bytes read from offset %" PRIu64 ".",
- _filename.c_str(), offset);
+ LOG(spam, "read(%s): Found EOF. Zero bytes read from offset %" PRIu64 ".", _filename.c_str(), offset);
break;
} else if (errno != EINTR && errno != EAGAIN) {
asciistream ost;
ost << "read(" << _fd << ", " << buf << ", " << remaining << ", "
- << offset << "): Failed, errno(" << errno << "): "
- << safeStrerror(errno);
+ << offset << "): Failed, errno(" << errno << "): " << safeStrerror(errno);
throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
}
@@ -433,13 +272,7 @@ bool
File::unlink()
{
close();
- return std::filesystem::remove(std::filesystem::path(_filename));
-}
-
-namespace {
-
- uint32_t diskAlignmentSize = 4_Ki;
-
+ return fs::remove(fs::path(_filename));
}
DirectoryList
@@ -465,16 +298,6 @@ listDirectory(const string & path)
return result;
}
-MallocAutoPtr
-getAlignedBuffer(size_t size)
-{
- void *ptr;
- int result = posix_memalign(&ptr, diskAlignmentSize, size);
- assert(result == 0);
- (void)result;
- return MallocAutoPtr(ptr);
-}
-
string dirname(stringref name)
{
size_t found = name.rfind('/');
@@ -517,8 +340,7 @@ getOpenErrorString(const int osError, stringref filename)
{
asciistream os;
string dirName(dirname(filename));
- os << "error=" << osError << "(\"" <<
- getErrorString(osError) << "\") fileStat";
+ os << "error=" << osError << "(\"" << getErrorString(osError) << "\") fileStat";
addStat(os, filename);
os << " dirStat";
addStat(os, dirName);
diff --git a/vespalib/src/vespa/vespalib/io/fileutil.h b/vespalib/src/vespa/vespalib/io/fileutil.h
index 4de36daa85f..148317a7edf 100644
--- a/vespalib/src/vespa/vespalib/io/fileutil.h
+++ b/vespalib/src/vespa/vespalib/io/fileutil.h
@@ -43,14 +43,10 @@ struct FileInfo {
bool _plainfile;
bool _directory;
- bool _symlink;
off_t _size;
- bool operator==(const FileInfo&) const;
};
-std::ostream& operator<<(std::ostream&, const FileInfo&);
-
/**
* @brief A File instance is used to access a single open file.
*
@@ -61,74 +57,44 @@ std::ostream& operator<<(std::ostream&, const FileInfo&);
*/
class File {
private:
- int _fd;
- int _flags;
- vespalib::string _filename;
- bool _close;
- mutable int _fileReads; // Tracks number of file reads done on this file
- mutable int _fileWrites; // Tracks number of file writes done in this file
+ int _fd;
+ string _filename;
+ void sync();
/**
- * Verify that direct I/O alignment preconditions hold. Triggers assertion
- * failure on violations.
+ * Get information about the current file. If file is opened, file descriptor
+ * will be used for stat. If file is not open, and the file does not exist
+ * yet, you will get fileinfo describing an empty file.
*/
- void verifyDirectIO(uint64_t buf, size_t bufsize, off_t offset) const;
-
+ FileInfo stat() const;
public:
using UP = std::unique_ptr<File>;
/**
* If failing to open file using direct IO it will retry using cached IO.
*/
- enum Flag { READONLY = 1, CREATE = 2, DIRECTIO = 4, TRUNC = 8 };
+ enum Flag { READONLY = 1, CREATE = 2, TRUNC = 8 };
/** Create a file instance, without opening the file. */
- File(vespalib::stringref filename);
-
- /** Create a file instance of an already open file. */
- File(int fileDescriptor, vespalib::stringref filename);
-
- /** Copying a file instance, moves any open file descriptor. */
- File(File& f);
- File& operator=(File& f);
+ File(stringref filename);
/** Closes the file if not instructed to do otherwise. */
- virtual ~File();
+ ~File();
- /**
- * Make this instance point at another file.
- * Closes the old file it it was open.
- */
- void setFilename(vespalib::stringref filename);
+ const string& getFilename() const { return _filename; }
- const vespalib::string& getFilename() const { return _filename; }
-
- virtual void open(int flags, bool autoCreateDirectories = false);
+ void open(int flags, bool autoCreateDirectories = false);
bool isOpen() const { return (_fd != -1); }
- bool isOpenWithDirectIO() const { return ((_flags & DIRECTIO) != 0); }
-
- /**
- * Whether or not file should be closed when this instance is destructed.
- * By default it will be closed.
- */
- void closeFileWhenDestructed(bool close);
- virtual int getFileDescriptor() const { return _fd; }
-
- /**
- * Get information about the current file. If file is opened, file descriptor
- * will be used for stat. If file is not open, and the file does not exist
- * yet, you will get fileinfo describing an empty file.
- */
- virtual FileInfo stat() const;
+ int getFileDescriptor() const { return _fd; }
/**
* Get the filesize of a file, specified by a file descriptor.
*
* @throw IoException If we failed to stat the file.
*/
- virtual off_t getFileSize() const { return stat()._size; }
+ off_t getFileSize() const { return stat()._size; }
/**
* Resize the currently open file to a given size,
@@ -138,7 +104,7 @@ public:
* @param size new size of file
* @throw IoException If we failed to resize the file.
*/
- virtual void resize(off_t size);
+ void resize(off_t size);
/**
* Writes data to file.
@@ -152,7 +118,7 @@ public:
* @throw IoException If we failed to write to the file.
* @return Always return bufsize.
*/
- virtual off_t write(const void *buf, size_t bufsize, off_t offset);
+ off_t write(const void *buf, size_t bufsize, off_t offset);
/**
* Read characters from a file.
@@ -167,7 +133,7 @@ public:
* @return The number of bytes actually read. If less than
* bufsize, this indicates that EOF was reached.
*/
- virtual size_t read(void *buf, size_t bufsize, off_t offset) const;
+ size_t read(void *buf, size_t bufsize, off_t offset) const;
/**
* Read the file into a string.
@@ -177,7 +143,7 @@ public:
* @throw IoException If we failed to read from file.
* @return The content of the file.
*/
- vespalib::string readAll() const;
+ string readAll() const;
/**
* Read a file into a string.
@@ -188,7 +154,7 @@ public:
* @throw IoException If we failed to read from file.
* @return The content of the file.
*/
- static vespalib::string readAll(vespalib::stringref path);
+ static string readAll(stringref path);
/**
* Sync file or directory.
@@ -198,24 +164,17 @@ public:
*
* @throw IoException If we failed to sync the file.
*/
- static void sync(vespalib::stringref path);
-
- virtual void sync();
- virtual bool close();
- virtual bool unlink();
+ static void sync(stringref path);
- int getFileReadCount() const { return _fileReads; }
- int getFileWriteCount() const { return _fileWrites; }
+ bool close();
+ bool unlink();
};
/**
* List the contents of the given directory.
*/
-using DirectoryList = std::vector<vespalib::string>;
-extern DirectoryList listDirectory(const vespalib::string & path);
-
-extern MallocAutoPtr getAlignedBuffer(size_t size);
-
+using DirectoryList = std::vector<string>;
+extern DirectoryList listDirectory(const string & path);
string dirname(stringref name);
string getOpenErrorString(const int osError, stringref name);
diff --git a/vespalib/src/vespa/vespalib/io/mapped_file_input.cpp b/vespalib/src/vespa/vespalib/io/mapped_file_input.cpp
index 854ca29c66a..7f1f0d003b7 100644
--- a/vespalib/src/vespa/vespalib/io/mapped_file_input.cpp
+++ b/vespalib/src/vespa/vespalib/io/mapped_file_input.cpp
@@ -16,11 +16,11 @@ MappedFileInput::MappedFileInput(const vespalib::string &file_name)
{
struct stat info;
if ((_fd != -1) && fstat(_fd, &info) == 0) {
- _data = static_cast<char*>(mmap(0, info.st_size,
- PROT_READ, MAP_SHARED, _fd, 0));
+ _data = static_cast<char*>(mmap(0, info.st_size, PROT_READ, MAP_SHARED, _fd, 0));
if (_data != MAP_FAILED) {
_size = info.st_size;
madvise(_data, _size, MADV_SEQUENTIAL);
+ madvise(_data, _size, MADV_DONTDUMP);
}
}
}
diff --git a/vespalib/src/vespa/vespalib/stllike/hash_map.cpp b/vespalib/src/vespa/vespalib/stllike/hash_map.cpp
index 9540a47eec3..50a3d73fe12 100644
--- a/vespalib/src/vespa/vespalib/stllike/hash_map.cpp
+++ b/vespalib/src/vespa/vespalib/stllike/hash_map.cpp
@@ -16,6 +16,8 @@ VESPALIB_HASH_MAP_INSTANTIATE(vespalib::string, double);
VESPALIB_HASH_MAP_INSTANTIATE(int64_t, int32_t);
VESPALIB_HASH_MAP_INSTANTIATE(int64_t, uint32_t);
VESPALIB_HASH_MAP_INSTANTIATE(int32_t, uint32_t);
+VESPALIB_HASH_MAP_INSTANTIATE(uint16_t, uint16_t);
+VESPALIB_HASH_MAP_INSTANTIATE(uint16_t, uint32_t);
VESPALIB_HASH_MAP_INSTANTIATE(uint32_t, int32_t);
VESPALIB_HASH_MAP_INSTANTIATE(uint32_t, uint32_t);
VESPALIB_HASH_MAP_INSTANTIATE(uint64_t, uint32_t);
diff --git a/vespalib/src/vespa/vespalib/stllike/hash_map.h b/vespalib/src/vespa/vespalib/stllike/hash_map.h
index 889093a9550..c4f60f879d7 100644
--- a/vespalib/src/vespa/vespalib/stllike/hash_map.h
+++ b/vespalib/src/vespa/vespalib/stllike/hash_map.h
@@ -35,6 +35,8 @@ public:
constexpr iterator end() noexcept { return _ht.end(); }
constexpr const_iterator begin() const noexcept { return _ht.begin(); }
constexpr const_iterator end() const noexcept { return _ht.end(); }
+ constexpr const_iterator cbegin() const noexcept { return _ht.begin(); }
+ constexpr const_iterator cend() const noexcept { return _ht.end(); }
constexpr size_t capacity() const noexcept { return _ht.capacity(); }
constexpr size_t size() const noexcept { return _ht.size(); }
constexpr bool empty() const noexcept { return _ht.empty(); }
diff --git a/vespalib/src/vespa/vespalib/stllike/hash_set.cpp b/vespalib/src/vespa/vespalib/stllike/hash_set.cpp
index 8812af426bf..54614329a97 100644
--- a/vespalib/src/vespa/vespalib/stllike/hash_set.cpp
+++ b/vespalib/src/vespa/vespalib/stllike/hash_set.cpp
@@ -6,6 +6,8 @@
namespace vespalib {
}
+VESPALIB_HASH_SET_INSTANTIATE(int16_t);
+VESPALIB_HASH_SET_INSTANTIATE(uint16_t);
VESPALIB_HASH_SET_INSTANTIATE(int32_t);
VESPALIB_HASH_SET_INSTANTIATE(uint32_t);
VESPALIB_HASH_SET_INSTANTIATE(uint64_t);
diff --git a/vespalib/src/vespa/vespalib/stllike/hash_set_insert.hpp b/vespalib/src/vespa/vespalib/stllike/hash_set_insert.hpp
index 6d5b7ed8b05..77e46bbf9e8 100644
--- a/vespalib/src/vespa/vespalib/stllike/hash_set_insert.hpp
+++ b/vespalib/src/vespa/vespalib/stllike/hash_set_insert.hpp
@@ -9,7 +9,7 @@ namespace vespalib {
template<typename K, typename H, typename EQ, typename M>
template<typename InputIterator>
hash_set<K, H, EQ, M>::hash_set(InputIterator first, InputIterator last)
- : _ht(0)
+ : _ht(last - first)
{
insert(first, last);
}
@@ -18,7 +18,6 @@ template<typename K, typename H, typename EQ, typename M>
template<typename InputIt>
void
hash_set<K, H, EQ, M>::insert(InputIt first, InputIt last) {
- _ht.resize(last - first + capacity());
for (; first < last; first++) {
insert(*first);
}
diff --git a/vespalib/src/vespa/vespalib/stllike/string.hpp b/vespalib/src/vespa/vespalib/stllike/string.hpp
index e0144ab6f85..3438c6b641a 100644
--- a/vespalib/src/vespa/vespalib/stllike/string.hpp
+++ b/vespalib/src/vespa/vespalib/stllike/string.hpp
@@ -17,8 +17,10 @@ void
small_string<StackSize>::_reserveBytes(size_type newBufferSize) noexcept {
if (isAllocated()) {
_buf = (char *) realloc(_buf, newBufferSize);
+ assert(_buf);
} else {
char *tmp = (char *) malloc(newBufferSize);
+ assert(tmp);
memcpy(tmp, _stack, _sz);
tmp[_sz] = '\0';
_buf = tmp;
@@ -96,6 +98,7 @@ void small_string<StackSize>::init_slower(const void *s) noexcept
{
_bufferSize = _sz+1;
_buf = (char *) malloc(_bufferSize);
+ assert(_buf);
memcpy(_buf, s, _sz);
_buf[_sz] = '\0';
}
@@ -105,6 +108,7 @@ void small_string<StackSize>::appendAlloc(const void * s, size_type addSz) noexc
{
size_type newBufferSize = roundUp2inN(_sz+addSz+1);
char * buf = (char *) malloc(newBufferSize);
+ assert(buf);
memcpy(buf, buffer(), _sz);
if (isAllocated()) {
free(_buf);
diff --git a/vespalib/src/vespa/vespalib/util/arrayref.h b/vespalib/src/vespa/vespalib/util/arrayref.h
index 319947e4cd9..9057f56fac0 100644
--- a/vespalib/src/vespa/vespalib/util/arrayref.h
+++ b/vespalib/src/vespa/vespalib/util/arrayref.h
@@ -3,6 +3,7 @@
#include <cstddef>
#include <vector>
+#include <array>
namespace vespalib {
@@ -17,6 +18,8 @@ public:
constexpr ArrayRef(T * v, size_t sz) noexcept : _v(v), _sz(sz) { }
template<typename A=std::allocator<T>>
ArrayRef(std::vector<T, A> & v) noexcept : _v(v.data()), _sz(v.size()) { }
+ template<size_t SZ>
+ ArrayRef(std::array<T, SZ> & v) noexcept : _v(v.data()), _sz(SZ) { }
T & operator [] (size_t i) noexcept { return _v[i]; }
const T & operator [] (size_t i) const noexcept { return _v[i]; }
T * data() noexcept { return _v; }
@@ -36,6 +39,8 @@ public:
constexpr ConstArrayRef(const T *v, size_t sz) noexcept : _v(v), _sz(sz) { }
template<typename A=std::allocator<T>>
ConstArrayRef(const std::vector<T, A> & v) noexcept : _v(v.data()), _sz(v.size()) { }
+ template<size_t SZ>
+ ConstArrayRef(const std::array<T, SZ> & v) noexcept : _v(v.data()), _sz(SZ) { }
ConstArrayRef(const ArrayRef<T> & v) noexcept : _v(v.data()), _sz(v.size()) { }
constexpr ConstArrayRef() noexcept : _v(nullptr), _sz(0) {}
const T & operator [] (size_t i) const noexcept { return _v[i]; }
diff --git a/vespalib/src/vespa/vespalib/util/file_area_freelist.cpp b/vespalib/src/vespa/vespalib/util/file_area_freelist.cpp
index 4894ddfa2fd..32b58b7e805 100644
--- a/vespalib/src/vespa/vespalib/util/file_area_freelist.cpp
+++ b/vespalib/src/vespa/vespalib/util/file_area_freelist.cpp
@@ -7,11 +7,15 @@ namespace vespalib::alloc {
FileAreaFreeList::FileAreaFreeList()
: _free_areas(),
- _free_sizes()
+ _free_sizes(),
+ _fences()
{
}
-FileAreaFreeList::~FileAreaFreeList() = default;
+FileAreaFreeList::~FileAreaFreeList()
+{
+ assert(_fences.empty());
+}
void
FileAreaFreeList::remove_from_size_set(uint64_t offset, size_t size)
@@ -73,23 +77,29 @@ FileAreaFreeList::free(uint64_t offset, size_t size)
{
auto itr = _free_areas.lower_bound(offset);
if (itr != _free_areas.end() && itr->first <= offset + size) {
- // Merge with next free area
assert(itr->first == offset + size);
- remove_from_size_set(itr->first, itr->second);
- size += itr->second;
- itr = _free_areas.erase(itr);
+ if (!_fences.contains(offset + size)) {
+ // Merge with next free area
+ remove_from_size_set(itr->first, itr->second);
+ size += itr->second;
+ itr = _free_areas.erase(itr);
+ }
}
bool adjusted_prev_area = false;
if (itr != _free_areas.begin()) {
--itr;
if (itr->first + itr->second >= offset) {
- // Merge with previous free area
assert(itr->first + itr->second == offset);
- remove_from_size_set(itr->first, itr->second);
- offset = itr->first;
- size += itr->second;
- itr->second = size;
- adjusted_prev_area = true;
+ if (!_fences.contains(offset)) {
+ // Merge with previous free area
+ remove_from_size_set(itr->first, itr->second);
+ offset = itr->first;
+ size += itr->second;
+ itr->second = size;
+ adjusted_prev_area = true;
+ } else {
+ ++itr;
+ }
} else {
++itr;
}
@@ -101,4 +111,38 @@ FileAreaFreeList::free(uint64_t offset, size_t size)
assert(ins_res.second);
}
+void
+FileAreaFreeList::add_premmapped_area(uint64_t offset, size_t size)
+{
+ auto itr = _free_areas.lower_bound(offset);
+ if (itr != _free_areas.end()) {
+ assert(itr->first >= offset + size);
+ }
+ auto ins_res = _free_sizes[size].insert(offset);
+ assert(ins_res.second);
+ _free_areas.emplace_hint(itr, offset, size);
+ auto fences_ins_res = _fences.insert(offset);
+ assert(fences_ins_res.second);
+}
+
+void
+FileAreaFreeList::remove_premmapped_area(uint64_t offset, size_t size)
+{
+ auto itr = _free_areas.lower_bound(offset);
+ assert(itr != _free_areas.end());
+ assert(itr->first == offset);
+ assert(itr->second == size);
+ auto sizes_itr = _free_sizes.lower_bound(size);
+ assert(sizes_itr != _free_sizes.end());
+ assert(sizes_itr->first == size);
+ assert(sizes_itr->second.contains(offset));
+ assert(_fences.contains(offset));
+ _free_areas.erase(itr);
+ sizes_itr->second.erase(offset);
+ if (sizes_itr->second.empty()) {
+ _free_sizes.erase(sizes_itr);
+ }
+ _fences.erase(offset);
+}
+
}
diff --git a/vespalib/src/vespa/vespalib/util/file_area_freelist.h b/vespalib/src/vespa/vespalib/util/file_area_freelist.h
index 00820c680ed..4982f294289 100644
--- a/vespalib/src/vespa/vespalib/util/file_area_freelist.h
+++ b/vespalib/src/vespa/vespalib/util/file_area_freelist.h
@@ -2,6 +2,7 @@
#pragma once
+#include <vespa/vespalib/stllike/hash_set.h>
#include <cstddef>
#include <cstdint>
#include <limits>
@@ -16,6 +17,7 @@ namespace vespalib::alloc {
class FileAreaFreeList {
std::map<uint64_t, size_t> _free_areas; // map from offset to size
std::map<size_t, std::set<uint64_t>> _free_sizes; // map from size to set of offsets
+ vespalib::hash_set<uint64_t> _fences;
void remove_from_size_set(uint64_t offset, size_t size);
std::pair<uint64_t, size_t> prepare_reuse_area(size_t size);
public:
@@ -23,6 +25,8 @@ public:
~FileAreaFreeList();
uint64_t alloc(size_t size);
void free(uint64_t offset, size_t size);
+ void add_premmapped_area(uint64_t offset, size_t size);
+ void remove_premmapped_area(uint64_t offset, size_t size);
static constexpr uint64_t bad_offset = std::numeric_limits<uint64_t>::max();
};
diff --git a/vespalib/src/vespa/vespalib/util/guard.h b/vespalib/src/vespa/vespalib/util/guard.h
index 32237a59d9a..efd7b8345c9 100644
--- a/vespalib/src/vespa/vespalib/util/guard.h
+++ b/vespalib/src/vespa/vespalib/util/guard.h
@@ -2,8 +2,7 @@
#pragma once
-#include <stdio.h>
-#include <dirent.h>
+#include <cstdio>
#include <unistd.h>
namespace vespalib {
@@ -19,43 +18,43 @@ class FilePointer
{
private:
FILE *_fp;
- FilePointer(const FilePointer &);
- FilePointer &operator=(const FilePointer &);
public:
/**
* @brief Create a FilePointer from a FILE pointer.
*
* @param file the underlying FILE pointer
**/
- explicit FilePointer(FILE *file = NULL) : _fp(file) {}
+ explicit FilePointer(FILE *file = nullptr) noexcept : _fp(file) {}
+ FilePointer(const FilePointer &) = delete;
+ FilePointer &operator=(const FilePointer &) = delete;
/**
* @brief Close the file if it is still open.
**/
~FilePointer() { reset(); }
/**
- * @brief Check whether we have a FILE pointer (not NULL)
+ * @brief Check whether we have a FILE pointer (not nullptr)
*
* @return true if we have an underlying FILE pointer
**/
- bool valid() const { return (_fp != NULL); }
+ bool valid() const noexcept { return (_fp != nullptr); }
/**
* @brief Obtain the internal FILE pointer
*
* @return internal FILE pointer
**/
- FILE *fp() const { return _fp; }
+ FILE *fp() const noexcept { return _fp; }
/**
* @brief Implicit cast to obtain internal FILE pointer
*
* @return internal FILE pointer
**/
- operator FILE*() { return _fp; }
+ operator FILE*() noexcept { return _fp; }
/**
* @brief Take ownership of a new FILE pointer.
*
* The previously owned FILE pointer is closed, if present.
**/
- void reset(FILE *file = NULL) {
+ void reset(FILE *file = nullptr) {
if (valid()) {
fclose(_fp);
}
@@ -68,81 +67,13 @@ public:
*
* @return the released FILE pointer
**/
- FILE *release() {
+ FILE *release() noexcept {
FILE *tmp = _fp;
- _fp = NULL;
+ _fp = nullptr;
return tmp;
}
};
-
-/**
- * @brief A DirPointer wraps a bald DIR pointer inside a guarding object.
- *
- * The underlying directory is closed when the DirPointer object is
- * destructed.
- **/
-class DirPointer
-{
-private:
- DIR *_dp;
- DirPointer(const DirPointer &);
- DirPointer &operator=(const DirPointer &);
-public:
- /**
- * @brief Create a DirPointer from a DIR pointer.
- *
- * @param dir the underlying DIR pointer
- **/
- explicit DirPointer(DIR *dir = NULL) : _dp(dir) {}
- /**
- * Close the directory if it is still open.
- **/
- ~DirPointer() { reset(); }
- /**
- * @brief Check whether we have a DIR pointer (not NULL)
- *
- * @return true if we have an underlying DIR pointer
- **/
- bool valid() const { return (_dp != NULL); }
- /**
- * @brief Obtain the internal DIR pointer
- *
- * @return internal DIR pointer
- **/
- DIR *dp() const { return _dp; }
- /**
- * @brief Implicit cast to obtain internal DIR pointer
- *
- * @return internal DIR pointer
- **/
- operator DIR*() { return _dp; }
- /**
- * @brief Take ownership of a new DIR pointer.
- *
- * The previously owned DIR pointer is closed, if present.
- **/
- void reset(DIR *dir = NULL) {
- if (valid()) {
- closedir(_dp);
- }
- _dp = dir;
- }
- /**
- * @brief Release ownership of the current DIR pointer.
- *
- * The directory will no longer be closed by the destructor.
- *
- * @return the released DIR pointer
- **/
- DIR *release() {
- DIR *tmp = _dp;
- _dp = NULL;
- return tmp;
- }
-};
-
-
/**
* @brief A FileDescriptor wraps a file descriptor inside a guarding object.
*
@@ -153,15 +84,15 @@ class FileDescriptor
{
private:
int _fd;
- FileDescriptor(const FileDescriptor &);
- FileDescriptor &operator=(const FileDescriptor &);
public:
/**
* @brief Create a FileDescriptor from a file descriptor.
*
* @param file the underlying file descriptor
**/
- explicit FileDescriptor(int file = -1) : _fd(file) {}
+ explicit FileDescriptor(int file = -1) noexcept : _fd(file) {}
+ FileDescriptor(const FileDescriptor &) = delete;
+ FileDescriptor &operator=(const FileDescriptor &) = delete;
/**
* @brief Close the file if it is still open.
**/
@@ -171,13 +102,13 @@ public:
*
* @return true if we have an underlying file descriptor
**/
- bool valid() const { return (_fd >= 0); }
+ bool valid() const noexcept { return (_fd >= 0); }
/**
* @brief Obtain the internal file descriptor
*
* @return internal file descriptor
**/
- int fd() const { return _fd; }
+ int fd() const noexcept { return _fd; }
/**
* @brief Take ownership of a new file descriptor.
*
@@ -196,7 +127,7 @@ public:
*
* @return the released file descriptor
**/
- int release() {
+ int release() noexcept {
int tmp = _fd;
_fd = -1;
return tmp;
@@ -216,161 +147,20 @@ class CounterGuard
{
private:
int &_cnt;
- CounterGuard(const CounterGuard &);
- CounterGuard &operator=(const CounterGuard &);
public:
/**
* @brief Increase the value
*
* @param cnt a reference to the value that will be modified
**/
- explicit CounterGuard(int &cnt) : _cnt(cnt) { ++cnt; }
+ explicit CounterGuard(int &cnt) noexcept : _cnt(cnt) { ++cnt; }
+ CounterGuard(const CounterGuard &) = delete;
+ CounterGuard &operator=(const CounterGuard &) = delete;
/**
* @brief Decrease the value
**/
~CounterGuard() { --_cnt; }
};
-
-/**
- * @brief A ValueGuard is used to set a variable to a specific value
- * when the ValueGuard is destructed.
- *
- * This can be used to revert a variable if an exception is thrown.
- * However, you must remember to dismiss the guard if you don't want
- * it to set the value when it goes out of scope.
- **/
-template<typename T>
-class ValueGuard
-{
-private:
- bool _active;
- T &_ref;
- T _value;
-
- ValueGuard(const ValueGuard &);
- ValueGuard &operator=(const ValueGuard &);
-public:
- /**
- * @brief Create a ValueGuard for the given variable.
- *
- * The variable will be reverted to its original value in the destructor.
- *
- * @param ref the variable that will be modified
- **/
- explicit ValueGuard(T &ref) : _active(true), _ref(ref), _value(ref) {}
- /**
- * @brief Create a ValueGuard for the given variable.
- *
- * The variable will be set to the given value in the destructor.
- *
- * @param ref the variable that will be modified
- * @param val the value it will be set to
- **/
- ValueGuard(T &ref, const T &val) : _active(true), _ref(ref), _value(val) {}
- /**
- * @brief Reset the variable.
- *
- * Set the variable to the value defined in the constructor or the
- * update method. If dismiss has been invoked, the variable is not
- * modified.
- **/
- ~ValueGuard() {
- if (_active) {
- _ref = _value;
- }
- }
- /**
- * @brief Dismiss this guard.
- *
- * When a guard has been dismissed, the destructor will not modify
- * the variable. The dismiss method is typically used to indicate
- * that everything went ok, and that we no longer need to protect
- * the variable from exceptions.
- **/
- void dismiss() { _active = false; }
- /// @brief See dismiss
- void deactivate() { dismiss(); }
- /**
- * @brief Update the value the variable will be set to in the
- * destructor.
- *
- * This can be used to set revert points during execution.
- **/
- void update(const T &val) { _value = val; }
- void operator=(const T& val) { update(val); }
-};
-
-
-/**
- * @brief A MaxValueGuard is used to enfore an upper bound on the
- * value of a variable when the MaxValueGuard is destructed.
- *
- * This can be used to revert a variable if an exception is thrown.
- * However, you must remember to dismiss the guard if you don't want
- * it to set the value when it goes out of scope.
- **/
-template<typename T>
-class MaxValueGuard {
- bool _active;
- T &_ref;
- T _value;
-
- MaxValueGuard(const MaxValueGuard &);
- MaxValueGuard &operator=(const MaxValueGuard &);
-public:
- /**
- * @brief Create a MaxValueGuard for the given variable.
- *
- * The variable will be reverted back to its original value in the
- * destructor if it has increased.
- *
- * @param ref the variable that will be modified
- **/
- explicit MaxValueGuard(T &ref) : _active(true), _ref(ref), _value(ref) {}
- /**
- * @brief Create a ValueGuard for the given variable.
- *
- * The given upper bound will be enforced in the destructor.
- *
- * @param ref the variable that will be modified
- * @param val upper bound for the variable
- **/
- MaxValueGuard(T& ref, const T& val) : _active(true), _ref(ref), _value(val) {}
- /**
- * @brief Enforce the upper bound.
- *
- * If the current value of the variable is greater than the upper
- * bound, it is set to the upper bound as defined in the
- * constructor or the update method. If dismiss has been invoked,
- * the variable is not modified.
- **/
- ~MaxValueGuard() {
- if (_active && _ref > _value) {
- _ref = _value;
- }
- }
- /**
- * @brief Dismiss this guard.
- *
- * When a guard is dismissed, the destructor will not modify the
- * variable. The dismiss method is typically used to indicate that
- * everything went ok, and that we no longer need to protect the
- * variable from exceptions.
- **/
- void dismiss() { _active = false; }
- /// @brief See dismiss
- void deactivate() { dismiss(); }
- /**
- * @brief Update the upper bound that will be enforced in the
- * destructor.
- *
- * This can be used to set revert points during execution.
- **/
- void update(const T &val) { _value = val; }
- /// @brief See update.
- void operator=(const T& val) { update(val); }
-};
-
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/util/jsonstream.cpp b/vespalib/src/vespa/vespalib/util/jsonstream.cpp
index e8814ee95f6..86bb8a1a8d1 100644
--- a/vespalib/src/vespa/vespalib/util/jsonstream.cpp
+++ b/vespalib/src/vespa/vespalib/util/jsonstream.cpp
@@ -39,7 +39,7 @@ JsonStream::JsonStream(asciistream& as, bool createIndents)
push({State::ROOT});
}
-JsonStream::~JsonStream() {}
+JsonStream::~JsonStream() = default;
JsonStream&
JsonStream::operator<<(stringref value)
diff --git a/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp b/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp
index 51a639a3c4e..198294b3770 100644
--- a/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp
+++ b/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp
@@ -11,26 +11,51 @@
#include <filesystem>
using vespalib::make_string_short::fmt;
+namespace fs = std::filesystem;
namespace vespalib::alloc {
MmapFileAllocator::MmapFileAllocator(const vespalib::string& dir_name)
+ : MmapFileAllocator(dir_name, default_small_limit, default_premmap_size)
+{
+}
+
+MmapFileAllocator::MmapFileAllocator(const vespalib::string& dir_name, uint32_t small_limit, uint32_t premmap_size)
: _dir_name(dir_name),
+ _small_limit(small_limit),
+ _premmap_size(premmap_size),
_file(_dir_name + "/swapfile"),
_end_offset(0),
_allocations(),
- _freelist()
+ _freelist(),
+ _small_allocations(),
+ _small_freelist(),
+ _premmapped_areas()
{
- std::filesystem::create_directories(std::filesystem::path(_dir_name));
+ fs::create_directories(fs::path(_dir_name));
_file.open(O_RDWR | O_CREAT | O_TRUNC, false);
}
MmapFileAllocator::~MmapFileAllocator()
{
+ assert(_small_allocations.empty());
+ assert(_allocations.size() == _premmapped_areas.size());
+ for (auto& area : _premmapped_areas) {
+ auto offset = area.first;
+ auto ptr = area.second;
+ auto itr = _allocations.find(ptr);
+ assert(itr != _allocations.end());
+ assert(itr->first == ptr);
+ assert(itr->second.offset == offset);
+ auto size = itr->second.size;
+ _small_freelist.remove_premmapped_area(offset, size);
+ free_large({ptr, size});
+ }
+ _premmapped_areas.clear();
assert(_allocations.empty());
_file.close();
_file.unlink();
- std::filesystem::remove_all(std::filesystem::path(_dir_name));
+ fs::remove_all(fs::path(_dir_name));
}
uint64_t
@@ -52,11 +77,23 @@ MmapFileAllocator::alloc(size_t sz) const
if (sz == 0) {
return PtrAndSize(); // empty allocation
}
+ static constexpr size_t alignment = 128;
+ sz = (sz + alignment - 1) & -alignment; // round sz to a multiple of alignment
+ if (sz >= _small_limit) {
+ return alloc_large(sz);
+ } else {
+ return alloc_small(sz);
+ }
+}
+
+PtrAndSize
+MmapFileAllocator::alloc_large(size_t sz) const
+{
sz = round_up_to_page_size(sz);
uint64_t offset = alloc_area(sz);
void *buf = mmap(nullptr, sz, PROT_READ | PROT_WRITE, MAP_SHARED, _file.getFileDescriptor(), offset);
if (buf == MAP_FAILED) {
- throw IoException(fmt("Failed mmap(nullptr, %zu, PROT_READ | PROT_WRITE, MAP_SHARED, %s(fd=%d), %lu). Reason given by OS = '%s'",
+ throw IoException(fmt("Failed mmap(nullptr, %zu, PROT_READ | PROT_WRITE, MAP_SHARED, %s(fd=%d), %" PRIu64 "). Reason given by OS = '%s'",
sz, _file.getFilename().c_str(), _file.getFileDescriptor(), offset, getLastErrorString().c_str()),
IoException::getErrorType(errno), VESPA_STRLOC);
}
@@ -73,6 +110,45 @@ MmapFileAllocator::alloc(size_t sz) const
return PtrAndSize(buf, sz);
}
+void*
+MmapFileAllocator::map_premapped_offset_to_ptr(uint64_t offset, size_t size) const
+{
+ auto itr = _premmapped_areas.lower_bound(offset);
+ if (itr == _premmapped_areas.end() || itr->first > offset) {
+ assert(itr != _premmapped_areas.begin());
+ --itr;
+ }
+ auto aitr = _allocations.find(itr->second);
+ assert(aitr != _allocations.end());
+ assert(aitr->first == itr->second);
+ assert(offset >= aitr->second.offset);
+ assert(offset + size <= aitr->second.offset + aitr->second.size);
+ return static_cast<char*>(itr->second) + (offset - aitr->second.offset);
+}
+
+PtrAndSize
+MmapFileAllocator::alloc_small(size_t sz) const
+{
+ uint64_t offset = _small_freelist.alloc(sz);
+ if (offset == FileAreaFreeList::bad_offset) {
+ auto new_premmap = alloc_large(_premmap_size);
+ assert(new_premmap.size() >= _premmap_size);
+ auto itr = _allocations.find(new_premmap.get());
+ assert(itr != _allocations.end());
+ assert(itr->first == new_premmap.get());
+ _small_freelist.add_premmapped_area(itr->second.offset, itr->second.size);
+ auto ins_res = _premmapped_areas.emplace(itr->second.offset, new_premmap.get());
+ assert(ins_res.second);
+ offset = _small_freelist.alloc(sz);
+ assert(offset != FileAreaFreeList::bad_offset);
+ }
+ auto ptr = map_premapped_offset_to_ptr(offset, sz);
+ // Register allocation
+ auto ins_res = _small_allocations.insert(std::make_pair(ptr, SizeAndOffset(sz, offset)));
+ assert(ins_res.second);
+ return {ptr, sz};
+}
+
void
MmapFileAllocator::free(PtrAndSize alloc) const noexcept
{
@@ -81,13 +157,30 @@ MmapFileAllocator::free(PtrAndSize alloc) const noexcept
return; // empty allocation
}
assert(alloc.get() != nullptr);
+ if (alloc.size() >= _small_limit) {
+ free_large(alloc);
+ } else {
+ free_small(alloc);
+ }
+}
+
+uint64_t
+MmapFileAllocator::remove_allocation(PtrAndSize alloc, Allocations& allocations) const noexcept
+{
// Check that matching allocation is registered
- auto itr = _allocations.find(alloc.get());
- assert(itr != _allocations.end());
+ auto itr = allocations.find(alloc.get());
+ assert(itr != allocations.end());
assert(itr->first == alloc.get());
assert(itr->second.size == alloc.size());
auto offset = itr->second.offset;
- _allocations.erase(itr);
+ allocations.erase(itr);
+ return offset;
+}
+
+void
+MmapFileAllocator::free_large(PtrAndSize alloc) const noexcept
+{
+ auto offset = remove_allocation(alloc, _allocations);
int retval = madvise(alloc.get(), alloc.size(), MADV_DONTNEED);
assert(retval == 0);
retval = munmap(alloc.get(), alloc.size());
@@ -95,6 +188,13 @@ MmapFileAllocator::free(PtrAndSize alloc) const noexcept
_freelist.free(offset, alloc.size());
}
+void
+MmapFileAllocator::free_small(PtrAndSize alloc) const noexcept
+{
+ auto offset = remove_allocation(alloc, _small_allocations);
+ _small_freelist.free(offset, alloc.size());
+}
+
size_t
MmapFileAllocator::resize_inplace(PtrAndSize, size_t) const
{
diff --git a/vespalib/src/vespa/vespalib/util/mmap_file_allocator.h b/vespalib/src/vespa/vespalib/util/mmap_file_allocator.h
index 883c7e49848..c79dc8682ba 100644
--- a/vespalib/src/vespa/vespalib/util/mmap_file_allocator.h
+++ b/vespalib/src/vespa/vespalib/util/mmap_file_allocator.h
@@ -7,6 +7,7 @@
#include <vespa/vespalib/io/fileutil.h>
#include <vespa/vespalib/stllike/hash_map.h>
#include <vespa/vespalib/stllike/string.h>
+#include <vespa/vespalib/util/size_literals.h>
#include <map>
namespace vespalib::alloc {
@@ -15,6 +16,9 @@ namespace vespalib::alloc {
* Class handling memory allocations backed by one or more files.
* Not reentrant or thread safe. Should not be destructed before all allocations
* have been freed.
+ *
+ * Memory allocations smaller than _small_limit use portions of
+ * premmapped areas to reduce the total number of memory mappings.
*/
class MmapFileAllocator : public MemoryAllocator {
struct SizeAndOffset {
@@ -30,14 +34,29 @@ class MmapFileAllocator : public MemoryAllocator {
{
}
};
+ using Allocations = hash_map<void *, SizeAndOffset>;
const vespalib::string _dir_name;
+ const uint32_t _small_limit;
+ const uint32_t _premmap_size;
mutable File _file;
mutable uint64_t _end_offset;
- mutable hash_map<void *, SizeAndOffset> _allocations;
+ mutable Allocations _allocations;
mutable FileAreaFreeList _freelist;
+ mutable Allocations _small_allocations;
+ mutable FileAreaFreeList _small_freelist;
+ mutable std::map<uint64_t, void*> _premmapped_areas;
uint64_t alloc_area(size_t sz) const;
+ PtrAndSize alloc_large(size_t size) const;
+ PtrAndSize alloc_small(size_t size) const;
+ void free_large(PtrAndSize alloc) const noexcept;
+ void free_small(PtrAndSize alloc) const noexcept;
+ void* map_premapped_offset_to_ptr(uint64_t offset, size_t size) const;
+ uint64_t remove_allocation(PtrAndSize alloc, Allocations& allocations) const noexcept;
public:
+ static constexpr uint32_t default_small_limit = 0;
+ static constexpr uint32_t default_premmap_size = 1_Mi;
MmapFileAllocator(const vespalib::string& dir_name);
+ MmapFileAllocator(const vespalib::string& dir_name, uint32_t small_limit, uint32_t premmap_size);
~MmapFileAllocator();
PtrAndSize alloc(size_t sz) const override;
void free(PtrAndSize alloc) const noexcept override;
diff --git a/vespalib/src/vespa/vespalib/util/small_vector.h b/vespalib/src/vespa/vespalib/util/small_vector.h
index ba166362d33..0d204897328 100644
--- a/vespalib/src/vespa/vespalib/util/small_vector.h
+++ b/vespalib/src/vespa/vespalib/util/small_vector.h
@@ -212,6 +212,7 @@ public:
}
void push_back(const T &obj) { emplace_back(obj); }
void push_back(T &&obj) { emplace_back(std::move(obj)); }
+ void pop_back() { small_vector::destroy_objects(_data + --_size, 1); }
};
template <typename T, size_t N, size_t M>
diff --git a/vespalib/src/vespa/vespalib/util/stash.cpp b/vespalib/src/vespa/vespalib/util/stash.cpp
index 9e982588294..870340adefe 100644
--- a/vespalib/src/vespa/vespalib/util/stash.cpp
+++ b/vespalib/src/vespa/vespalib/util/stash.cpp
@@ -132,12 +132,11 @@ Stash::get_memory_usage() const
used += chunk->used;
}
for (auto cleanup = _cleanup; cleanup; cleanup = cleanup->next) {
- if (auto memory = dynamic_cast<stash::DeleteMemory *>(cleanup)) {
- allocated += memory->allocated;
- used += memory->allocated;
- }
+ auto extra = cleanup->allocated();
+ allocated += extra;
+ used += extra;
}
- return MemoryUsage(allocated, used, 0, 0);
-};
+ return {allocated, used, 0, 0};
+}
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/util/stash.h b/vespalib/src/vespa/vespalib/util/stash.h
index 5f2ad27bbcd..2be352e6381 100644
--- a/vespalib/src/vespa/vespalib/util/stash.h
+++ b/vespalib/src/vespa/vespalib/util/stash.h
@@ -15,25 +15,29 @@ struct Cleanup {
Cleanup * const next;
explicit Cleanup(Cleanup *next_in) noexcept : next(next_in) {}
virtual void cleanup() = 0;
+ virtual size_t allocated() const noexcept { return 0; }
protected:
virtual ~Cleanup() = default;
};
// used as header for memory allocated outside the stash
-struct DeleteMemory : public Cleanup {
- explicit DeleteMemory(size_t sz, Cleanup *next_in) noexcept : Cleanup(next_in), allocated(sz) {}
+struct DeleteMemory final : public Cleanup {
+ explicit DeleteMemory(size_t sz, Cleanup *next_in) noexcept : Cleanup(next_in), _allocated(sz) {}
void cleanup() override { free((void*)this); }
- size_t allocated;
+ size_t allocated() const noexcept override { return _allocated; }
+ size_t _allocated;
};
// used as prefix for objects to be destructed
-template<typename T> struct DestructObject : public Cleanup {
+template<typename T>
+struct DestructObject final : public Cleanup {
explicit DestructObject(Cleanup *next_in) noexcept : Cleanup(next_in) {}
void cleanup() override { reinterpret_cast<T*>(this + 1)->~T(); }
};
// used as prefix for arrays to be destructed
-template<typename T> struct DestructArray : public Cleanup {
+template<typename T>
+struct DestructArray final : public Cleanup {
size_t size;
explicit DestructArray(Cleanup *next_in, size_t size_in) noexcept
: Cleanup(next_in), size(size_in) {}