aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--client/go/internal/cli/cmd/cert.go13
-rw-r--r--client/go/internal/cli/cmd/document.go2
-rw-r--r--client/go/internal/vespa/application.go21
-rw-r--r--client/go/internal/vespa/deploy_test.go21
-rw-r--r--client/src/main/java/ai/vespa/client/dsl/NearestNeighbor.java2
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java8
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java16
-rw-r--r--config-model/src/main/java/com/yahoo/schema/RankProfile.java9
-rw-r--r--config-model/src/main/java/com/yahoo/schema/Schema.java10
-rw-r--r--config-model/src/main/java/com/yahoo/schema/derived/RawRankProfile.java5
-rw-r--r--config-model/src/main/java/com/yahoo/schema/document/ImmutableSDField.java1
-rw-r--r--config-model/src/main/java/com/yahoo/schema/parser/ConvertParsedRanking.java2
-rw-r--r--config-model/src/main/java/com/yahoo/schema/parser/ParsedRankProfile.java7
-rw-r--r--config-model/src/main/java/com/yahoo/schema/processing/IndexingInputs.java10
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/Logserver.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidator.java30
-rw-r--r--config-model/src/main/javacc/SchemaParser.jj16
-rw-r--r--config-model/src/main/resources/schema/content.rnc4
-rw-r--r--config-model/src/test/java/com/yahoo/schema/RankProfileTestCase.java59
-rw-r--r--config-model/src/test/java/com/yahoo/schema/processing/IndexingInputsTestCase.java10
-rw-r--r--config-model/src/test/java/com/yahoo/schema/processing/RankingExpressionWithOnnxTestCase.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidatorTest.java48
-rw-r--r--config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainer.java124
-rw-r--r--config-proxy/src/test/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainerTest.java75
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java41
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDirectory.java31
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java84
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java8
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/Session.java54
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionData.java87
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java37
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java70
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionSerializer.java53
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java45
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/DataplaneTokenSerializer.java7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/OperatorCertificateSerializer.java15
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantSecretStoreSerializer.java6
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileDirectoryTest.java18
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java51
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java2
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClientTest.java31
-rw-r--r--container-search/abi-spec.json3
-rw-r--r--container-search/src/main/java/com/yahoo/search/dispatch/ReconfigurableDispatcher.java27
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/properties/QueryProperties.java1
-rw-r--r--container-search/src/main/java/com/yahoo/search/query/ranking/Matching.java16
-rw-r--r--container-search/src/test/java/com/yahoo/search/query/MatchingTestCase.java6
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java2
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java273
-rw-r--r--controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java174
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/OsController.java8
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/SystemApplication.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackage.java13
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java10
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackageTest.java10
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsVersionStatusUpdaterTest.java8
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgraderTest.java16
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java12
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-enclave.json30
-rw-r--r--controller-server/src/test/resources/test_runner_services.xml-cd2
-rw-r--r--controller-server/src/test/resources/test_runner_services_with_legacy_tests.xml-cd2
-rw-r--r--dependency-versions/pom.xml2
-rw-r--r--document/src/test/java/com/yahoo/document/DocumentTestCase.java2
-rw-r--r--document/src/tests/serialization/vespadocumentserializer_test.cpp2
-rw-r--r--eval/src/vespa/eval/eval/inline_operation.h28
-rw-r--r--eval/src/vespa/eval/instruction/best_similarity_function.cpp5
-rw-r--r--eval/src/vespa/eval/instruction/dense_dot_product_function.cpp40
-rw-r--r--eval/src/vespa/eval/instruction/mixed_112_dot_product.cpp15
-rw-r--r--eval/src/vespa/eval/instruction/mixed_inner_product_function.cpp31
-rw-r--r--eval/src/vespa/eval/instruction/sum_max_dot_product_function.cpp6
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/EmptyFileReferenceData.java55
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java2
-rw-r--r--filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceData.java2
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java9
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java14
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java4
-rw-r--r--fnet/src/tests/sync_execute/sync_execute.cpp8
-rw-r--r--fnet/src/vespa/fnet/connection.cpp13
-rw-r--r--fnet/src/vespa/fnet/connection.h2
-rw-r--r--fnet/src/vespa/fnet/transport_thread.cpp37
-rw-r--r--fnet/src/vespa/fnet/transport_thread.h5
-rw-r--r--indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpression.java51
-rw-r--r--indexinglanguage/src/main/javacc/IndexingParser.jj8
-rw-r--r--indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpressionTestCase.java51
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/SourceSession.java14
-rw-r--r--messagebus/src/vespa/messagebus/network/rpctarget.cpp9
-rw-r--r--messagebus/src/vespa/messagebus/network/rpctarget.h8
-rw-r--r--messagebus/src/vespa/messagebus/network/rpctargetpool.cpp2
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/DefaultMetrics.java3
-rw-r--r--metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java2
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java6
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloader.java15
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/metrics/Metrics.java11
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java62
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixUser.java1
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloaderTest.java9
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java8
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java10
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java (renamed from node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java)116
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java46
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java42
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaling.java20
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java44
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ResourceChange.java94
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java15
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java26
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java14
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java75
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java47
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java28
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java33
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTester.java3
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainerTest.java3
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java2
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java3
-rw-r--r--parent/pom.xml15
-rw-r--r--persistence/src/vespa/persistence/spi/clusterstate.cpp2
-rw-r--r--searchcore/src/tests/proton/matching/matching_test.cpp21
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp16
-rw-r--r--searchcore/src/vespa/searchcore/proton/matching/match_tools.h10
-rw-r--r--searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp22
-rw-r--r--searchlib/src/tests/queryeval/nearest_neighbor/nearest_neighbor_test.cpp10
-rw-r--r--searchlib/src/tests/ranksetup/ranksetup_test.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attribute_blueprint_params.h10
-rw-r--r--searchlib/src/vespa/searchlib/fef/indexproperties.cpp16
-rw-r--r--searchlib/src/vespa/searchlib/fef/indexproperties.h15
-rw-r--r--searchlib/src/vespa/searchlib/fef/ranksetup.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/fef/ranksetup.h3
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp9
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.h2
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.cpp21
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.h10
-rw-r--r--searchlib/src/vespa/searchlib/tensor/distance_calculator.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/tensor/distance_calculator.h6
-rw-r--r--storage/src/tests/distributor/btree_bucket_database_test.cpp6
-rw-r--r--storage/src/tests/distributor/bucketdatabasetest.cpp16
-rw-r--r--storage/src/tests/distributor/bucketdatabasetest.h14
-rw-r--r--storage/src/tests/distributor/bucketdbmetricupdatertest.cpp13
-rw-r--r--storage/src/tests/distributor/bucketstateoperationtest.cpp1
-rw-r--r--storage/src/tests/distributor/distributor_bucket_space_test.cpp10
-rw-r--r--storage/src/tests/distributor/distributor_stripe_test_util.cpp103
-rw-r--r--storage/src/tests/distributor/distributor_stripe_test_util.h1
-rw-r--r--storage/src/tests/distributor/garbagecollectiontest.cpp3
-rw-r--r--storage/src/tests/distributor/operationtargetresolvertest.cpp26
-rw-r--r--storage/src/tests/distributor/pendingmessagetrackertest.cpp6
-rw-r--r--storage/src/tests/distributor/simplemaintenancescannertest.cpp8
-rw-r--r--storage/src/tests/distributor/statecheckerstest.cpp55
-rw-r--r--storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp70
-rw-r--r--storage/src/tests/distributor/top_level_distributor_test_util.cpp12
-rw-r--r--storage/src/tests/distributor/top_level_distributor_test_util.h1
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketcopy.h20
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketdatabase.h33
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketinfo.cpp33
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketinfo.h64
-rw-r--r--storage/src/vespa/storage/bucketdb/bucketinfo.hpp99
-rw-r--r--storage/src/vespa/storage/common/distributorcomponent.h7
-rw-r--r--storage/src/vespa/storage/distributor/activecopy.cpp155
-rw-r--r--storage/src/vespa/storage/distributor/activecopy.h43
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space.cpp20
-rw-r--r--storage/src/vespa/storage/distributor/distributor_bucket_space.h2
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.cpp13
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_component.cpp108
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe_component.h37
-rw-r--r--storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.cpp55
-rw-r--r--storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.h68
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemanager.cpp67
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemanager.h14
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemetricsset.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/idealstatemetricsset.h4
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/maintenancescanner.h14
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp45
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h58
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp21
-rw-r--r--storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h12
-rw-r--r--storage/src/vespa/storage/distributor/messagetracker.cpp27
-rw-r--r--storage/src/vespa/storage/distributor/messagetracker.h25
-rw-r--r--storage/src/vespa/storage/distributor/nodeinfo.cpp26
-rw-r--r--storage/src/vespa/storage/distributor/nodeinfo.h14
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/putoperation.cpp13
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp8
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp9
-rw-r--r--storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp9
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp5
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp15
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp6
-rw-r--r--storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp2
-rw-r--r--storage/src/vespa/storage/distributor/operationtargetresolver.h27
-rw-r--r--storage/src/vespa/storage/distributor/operationtargetresolverimpl.cpp88
-rw-r--r--storage/src/vespa/storage/distributor/operationtargetresolverimpl.h27
-rw-r--r--storage/src/vespa/storage/distributor/pendingmessagetracker.cpp35
-rw-r--r--storage/src/vespa/storage/distributor/pendingmessagetracker.h11
-rw-r--r--storage/src/vespa/storage/distributor/persistencemessagetracker.cpp68
-rw-r--r--storage/src/vespa/storage/distributor/persistencemessagetracker.h5
-rw-r--r--storage/src/vespa/storage/distributor/statechecker.cpp12
-rw-r--r--storage/src/vespa/storage/distributor/statechecker.h54
-rw-r--r--storage/src/vespa/storage/distributor/statecheckers.cpp465
-rw-r--r--storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp1
-rw-r--r--storage/src/vespa/storage/persistence/persistenceutil.h1
-rw-r--r--storage/src/vespa/storage/storageserver/communicationmanager.cpp22
-rw-r--r--storage/src/vespa/storage/storageserver/rpc/slime_cluster_state_bundle_codec.cpp2
-rw-r--r--storage/src/vespa/storage/storageutil/distributorstatecache.h8
-rw-r--r--storage/src/vespa/storage/storageutil/utils.h47
-rw-r--r--storage/src/vespa/storage/tools/getidealstate.cpp15
-rw-r--r--storage/src/vespa/storageframework/generic/status/htmlstatusreporter.cpp15
-rw-r--r--storage/src/vespa/storageframework/generic/status/htmlstatusreporter.h3
-rw-r--r--vdslib/src/tests/distribution/CMakeLists.txt1
-rw-r--r--vdslib/src/tests/distribution/distributiontest.cpp173
-rw-r--r--vdslib/src/tests/distribution/idealnodecalculatorimpltest.cpp35
-rw-r--r--vdslib/src/tests/state/clusterstatetest.cpp20
-rw-r--r--vdslib/src/vespa/vdslib/distribution/CMakeLists.txt1
-rw-r--r--vdslib/src/vespa/vdslib/distribution/distribution.cpp96
-rw-r--r--vdslib/src/vespa/vdslib/distribution/distribution.h80
-rw-r--r--vdslib/src/vespa/vdslib/distribution/idealnodecalculator.h94
-rw-r--r--vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.cpp72
-rw-r--r--vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.h31
-rw-r--r--vdslib/src/vespa/vdslib/state/clusterstate.cpp314
-rw-r--r--vdslib/src/vespa/vdslib/state/clusterstate.h42
-rw-r--r--vdslib/src/vespa/vdslib/state/node.h13
-rw-r--r--vespa-athenz/pom.xml46
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/gcp/GcpCredentials.java180
-rw-r--r--vespa-dependencies-enforcer/allowed-maven-dependencies.txt12
-rw-r--r--vespalib/src/tests/guard/guard_test.cpp152
-rw-r--r--vespalib/src/tests/io/fileutil/fileutiltest.cpp83
-rw-r--r--vespalib/src/vespa/vespalib/io/fileutil.cpp302
-rw-r--r--vespalib/src/vespa/vespalib/io/fileutil.h89
-rw-r--r--vespalib/src/vespa/vespalib/stllike/hash_map.cpp1
-rw-r--r--vespalib/src/vespa/vespalib/stllike/hash_set_insert.hpp3
-rw-r--r--vespalib/src/vespa/vespalib/stllike/string.hpp4
-rw-r--r--vespalib/src/vespa/vespalib/util/guard.h250
-rw-r--r--vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp5
245 files changed, 3791 insertions, 3908 deletions
diff --git a/client/go/internal/cli/cmd/cert.go b/client/go/internal/cli/cmd/cert.go
index f7320e37626..7fbb357d1db 100644
--- a/client/go/internal/cli/cmd/cert.go
+++ b/client/go/internal/cli/cmd/cert.go
@@ -156,9 +156,16 @@ func doCertAdd(cli *CLI, overwriteCertificate bool, args []string) error {
}
func maybeCopyCertificate(force, ignoreZip bool, cli *CLI, target vespa.Target, pkg vespa.ApplicationPackage) error {
- if pkg.IsZip() && !ignoreZip {
- hint := "Try running 'mvn clean', then 'vespa auth cert add' and finally 'mvn package'"
- return errHint(fmt.Errorf("cannot add certificate to compressed application package: %s", pkg.Path), hint)
+ if pkg.IsZip() {
+ if ignoreZip {
+ cli.printWarning("Cannot verify existence of "+color.CyanString("security/clients.pem")+" since "+pkg.Path+" is compressed",
+ "Deployment to Vespa Cloud requires certificate in application package",
+ "See https://cloud.vespa.ai/en/security/guide")
+ return nil
+ } else {
+ hint := "Try running 'mvn clean', then 'vespa auth cert add' and finally 'mvn package'"
+ return errHint(fmt.Errorf("cannot add certificate to compressed application package: %s", pkg.Path), hint)
+ }
}
if force {
return copyCertificate(cli, target, pkg)
diff --git a/client/go/internal/cli/cmd/document.go b/client/go/internal/cli/cmd/document.go
index c31f8c34d14..1e5d1c30f6e 100644
--- a/client/go/internal/cli/cmd/document.go
+++ b/client/go/internal/cli/cmd/document.go
@@ -171,7 +171,7 @@ https://docs.vespa.ai/en/reference/document-json-format.html#document-operations
When this returns successfully, the document is guaranteed to be visible in any
subsequent get or query operation.
-To feed with high throughput, https://docs.vespa.ai/en/vespa-feed-client.html
+To feed with high throughput, https://docs.vespa.ai/en/reference/vespa-cli/vespa_feed.html
should be used instead of this.`,
Example: `$ vespa document src/test/resources/A-Head-Full-of-Dreams.json`,
DisableAutoGenTag: true,
diff --git a/client/go/internal/vespa/application.go b/client/go/internal/vespa/application.go
index b31dde54d67..b6b5b9427b3 100644
--- a/client/go/internal/vespa/application.go
+++ b/client/go/internal/vespa/application.go
@@ -216,17 +216,28 @@ func copyFile(src *zip.File, dst string) error {
// FindApplicationPackage finds the path to an application package from the zip file or directory zipOrDir. If
// requirePackaging is true, the application package is required to be packaged with mvn package.
+//
+// Package to use is preferred in this order:
+// 1. Given path, if it's a zip
+// 2. target/application
+// 3. target/application.zip
+// 4. src/main/application
+// 5. Given path, if it contains services.xml
func FindApplicationPackage(zipOrDir string, requirePackaging bool) (ApplicationPackage, error) {
if isZip(zipOrDir) {
return ApplicationPackage{Path: zipOrDir}, nil
}
- if util.PathExists(filepath.Join(zipOrDir, "pom.xml")) {
- zip := filepath.Join(zipOrDir, "target", "application.zip")
- if util.PathExists(zip) {
+ // Prefer uncompressed application because this allows us to add security/clients.pem to the package on-demand
+ if path := filepath.Join(zipOrDir, "target", "application"); util.PathExists(path) {
+ return ApplicationPackage{Path: path}, nil
+ }
+ appZip := filepath.Join(zipOrDir, "target", "application.zip")
+ if util.PathExists(filepath.Join(zipOrDir, "pom.xml")) || util.PathExists(appZip) {
+ if util.PathExists(appZip) {
if testZip := filepath.Join(zipOrDir, "target", "application-test.zip"); util.PathExists(testZip) {
- return ApplicationPackage{Path: zip, TestPath: testZip}, nil
+ return ApplicationPackage{Path: appZip, TestPath: testZip}, nil
}
- return ApplicationPackage{Path: zip}, nil
+ return ApplicationPackage{Path: appZip}, nil
}
if requirePackaging {
return ApplicationPackage{}, errors.New("found pom.xml, but target/application.zip does not exist: run 'mvn package' first")
diff --git a/client/go/internal/vespa/deploy_test.go b/client/go/internal/vespa/deploy_test.go
index 39a9f2bcdf2..c68ad750f1a 100644
--- a/client/go/internal/vespa/deploy_test.go
+++ b/client/go/internal/vespa/deploy_test.go
@@ -131,6 +131,11 @@ func TestFindApplicationPackage(t *testing.T) {
existingFile: filepath.Join(dir, "services.xml"),
})
assertFindApplicationPackage(t, dir, pkgFixture{
+ expectedPath: dir,
+ expectedTestPath: dir,
+ existingFiles: []string{filepath.Join(dir, "services.xml"), filepath.Join(dir, "tests", "foo.json")},
+ })
+ assertFindApplicationPackage(t, dir, pkgFixture{
expectedPath: filepath.Join(dir, "src", "main", "application"),
existingFile: filepath.Join(dir, "src", "main", "application") + string(os.PathSeparator),
})
@@ -149,11 +154,17 @@ func TestFindApplicationPackage(t *testing.T) {
existingFiles: []string{filepath.Join(dir, "pom.xml"), filepath.Join(dir, "target", "application.zip")},
requirePackaging: true,
})
- dir2 := t.TempDir()
- assertFindApplicationPackage(t, dir2, pkgFixture{
- expectedPath: dir2,
- expectedTestPath: dir2,
- existingFiles: []string{filepath.Join(dir2, "services.xml"), filepath.Join(dir2, "tests", "foo.json")},
+ assertFindApplicationPackage(t, dir, pkgFixture{
+ expectedPath: filepath.Join(dir, "target", "application.zip"),
+ existingFiles: []string{filepath.Join(dir, "target", "application.zip")},
+ })
+ assertFindApplicationPackage(t, dir, pkgFixture{
+ expectedPath: filepath.Join(dir, "target", "application"),
+ existingFiles: []string{filepath.Join(dir, "target", "application"), filepath.Join(dir, "target", "application.zip")},
+ })
+ zip := filepath.Join(dir, "myapp.zip")
+ assertFindApplicationPackage(t, zip, pkgFixture{
+ expectedPath: zip,
})
}
diff --git a/client/src/main/java/ai/vespa/client/dsl/NearestNeighbor.java b/client/src/main/java/ai/vespa/client/dsl/NearestNeighbor.java
index 1ae7f5cdfde..7dd45153353 100644
--- a/client/src/main/java/ai/vespa/client/dsl/NearestNeighbor.java
+++ b/client/src/main/java/ai/vespa/client/dsl/NearestNeighbor.java
@@ -14,7 +14,7 @@ public class NearestNeighbor extends QueryChain {
this.nonEmpty = true;
}
- NearestNeighbor annotate(Annotation annotation) {
+ public NearestNeighbor annotate(Annotation annotation) {
this.annotation = annotation;
return this;
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java b/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java
index 7ef92bba7e9..20940989618 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/Quota.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.model.api;
+import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeUtils;
@@ -50,10 +51,13 @@ public class Quota {
public Slime toSlime() {
var slime = new Slime();
- var root = slime.setObject();
+ toSlime(slime.setObject());
+ return slime;
+ }
+
+ public void toSlime(Cursor root) {
maxClusterSize.ifPresent(clusterSize -> root.setLong("clusterSize", clusterSize));
budget.ifPresent(b -> root.setString("budget", b.toPlainString()));
- return slime;
}
public static Quota unlimited() { return UNLIMITED; }
diff --git a/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java b/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java
index 3b715c63105..dbcd1cea2fa 100644
--- a/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java
+++ b/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java
@@ -63,6 +63,7 @@ public class MockApplicationPackage implements ApplicationPackage {
private final boolean failOnValidateXml;
private final QueryProfileRegistry queryProfileRegistry;
private final ApplicationMetaData applicationMetaData;
+ private final TenantName tenantName;
private DeploymentSpec deploymentSpec = null;
@@ -70,7 +71,7 @@ public class MockApplicationPackage implements ApplicationPackage {
Map<Path, MockApplicationFile> files,
String schemaDir,
String deploymentSpec, String validationOverrides, boolean failOnValidateXml,
- String queryProfile, String queryProfileType) {
+ String queryProfile, String queryProfileType, TenantName tenantName) {
this.root = root;
this.hostsS = hosts;
this.servicesS = services;
@@ -85,19 +86,20 @@ public class MockApplicationPackage implements ApplicationPackage {
applicationMetaData = new ApplicationMetaData("dir",
0L,
false,
- ApplicationId.from(TenantName.defaultName(),
+ ApplicationId.from(tenantName,
ApplicationName.from(APPLICATION_NAME),
InstanceName.defaultName()),
"checksum",
APPLICATION_GENERATION,
0L);
+ this.tenantName = tenantName;
}
/** Returns the root of this application package relative to the current dir */
protected File root() { return root; }
@Override
- public ApplicationId getApplicationId() { return ApplicationId.from("default", "mock-application", "default"); }
+ public ApplicationId getApplicationId() { return ApplicationId.from(tenantName.value(), "mock-application", "default"); }
@Override
public Reader getServices() {
@@ -246,6 +248,7 @@ public class MockApplicationPackage implements ApplicationPackage {
private boolean failOnValidateXml = false;
private String queryProfile = null;
private String queryProfileType = null;
+ private TenantName tenantName = TenantName.defaultName();
public Builder() {
}
@@ -323,10 +326,15 @@ public class MockApplicationPackage implements ApplicationPackage {
return this;
}
+ public Builder withTenantname(TenantName tenantName) {
+ this.tenantName = tenantName;
+ return this;
+ }
+
public ApplicationPackage build() {
return new MockApplicationPackage(root, hosts, services, schemas, files, schemaDir,
deploymentSpec, validationOverrides, failOnValidateXml,
- queryProfile, queryProfileType);
+ queryProfile, queryProfileType, tenantName);
}
}
diff --git a/config-model/src/main/java/com/yahoo/schema/RankProfile.java b/config-model/src/main/java/com/yahoo/schema/RankProfile.java
index 69f32daef4a..35ef12f077a 100644
--- a/config-model/src/main/java/com/yahoo/schema/RankProfile.java
+++ b/config-model/src/main/java/com/yahoo/schema/RankProfile.java
@@ -100,6 +100,7 @@ public class RankProfile implements Cloneable {
private Double termwiseLimit = null;
private Double postFilterThreshold = null;
private Double approximateThreshold = null;
+ private Double targetHitsMaxAdjustmentFactor = null;
/** The drop limit used to drop hits with rank score less than or equal to this value */
private double rankScoreDropLimit = -Double.MAX_VALUE;
@@ -768,6 +769,7 @@ public class RankProfile implements Cloneable {
public void setTermwiseLimit(double termwiseLimit) { this.termwiseLimit = termwiseLimit; }
public void setPostFilterThreshold(double threshold) { this.postFilterThreshold = threshold; }
public void setApproximateThreshold(double threshold) { this.approximateThreshold = threshold; }
+ public void setTargetHitsMaxAdjustmentFactor(double factor) { this.targetHitsMaxAdjustmentFactor = factor; }
public OptionalDouble getTermwiseLimit() {
if (termwiseLimit != null) return OptionalDouble.of(termwiseLimit);
@@ -789,6 +791,13 @@ public class RankProfile implements Cloneable {
return uniquelyInherited(p -> p.getApproximateThreshold(), l -> l.isPresent(), "approximate-threshold").orElse(OptionalDouble.empty());
}
+ public OptionalDouble getTargetHitsMaxAdjustmentFactor() {
+ if (targetHitsMaxAdjustmentFactor != null) {
+ return OptionalDouble.of(targetHitsMaxAdjustmentFactor);
+ }
+ return uniquelyInherited(p -> p.getTargetHitsMaxAdjustmentFactor(), l -> l.isPresent(), "target-hits-max-adjustment-factor").orElse(OptionalDouble.empty());
+ }
+
/** Whether we should ignore the default rank features. Set to null to use inherited */
public void setIgnoreDefaultRankFeatures(Boolean ignoreDefaultRankFeatures) {
this.ignoreDefaultRankFeatures = ignoreDefaultRankFeatures;
diff --git a/config-model/src/main/java/com/yahoo/schema/Schema.java b/config-model/src/main/java/com/yahoo/schema/Schema.java
index 93bec4975a6..36730a502ea 100644
--- a/config-model/src/main/java/com/yahoo/schema/Schema.java
+++ b/config-model/src/main/java/com/yahoo/schema/Schema.java
@@ -319,16 +319,12 @@ public class Schema implements ImmutableSchema {
return null;
}
- /**
- * @return true if the document has been added.
- */
+ /** Returns true if the document has been added. */
public boolean hasDocument() {
return documentType != null;
}
- /**
- * @return The document in this search.
- */
+ /** Returns the document in this search. */
@Override
public SDDocumentType getDocument() {
return documentType;
@@ -384,7 +380,7 @@ public class Schema implements ImmutableSchema {
}
/**
- * Returns a field defined in one of the documents of this search definition.
+ * Returns a field defined in one of the documents of this schema.
* This does not include the extra fields defined outside the document
* (those accessible through the getExtraField() method).
*
diff --git a/config-model/src/main/java/com/yahoo/schema/derived/RawRankProfile.java b/config-model/src/main/java/com/yahoo/schema/derived/RawRankProfile.java
index 82c0c9d516a..29bd454cc62 100644
--- a/config-model/src/main/java/com/yahoo/schema/derived/RawRankProfile.java
+++ b/config-model/src/main/java/com/yahoo/schema/derived/RawRankProfile.java
@@ -153,6 +153,7 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
private final double termwiseLimit;
private final OptionalDouble postFilterThreshold;
private final OptionalDouble approximateThreshold;
+ private final OptionalDouble targetHitsMaxAdjustmentFactor;
private final double rankScoreDropLimit;
private final boolean enableNestedMultivalueGrouping;
@@ -197,6 +198,7 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
enableNestedMultivalueGrouping = deployProperties.featureFlags().enableNestedMultivalueGrouping();
postFilterThreshold = compiled.getPostFilterThreshold();
approximateThreshold = compiled.getApproximateThreshold();
+ targetHitsMaxAdjustmentFactor = compiled.getTargetHitsMaxAdjustmentFactor();
keepRankCount = compiled.getKeepRankCount();
rankScoreDropLimit = compiled.getRankScoreDropLimit();
ignoreDefaultRankFeatures = compiled.getIgnoreDefaultRankFeatures();
@@ -429,6 +431,9 @@ public class RawRankProfile implements RankProfilesConfig.Producer {
if (approximateThreshold.isPresent()) {
properties.add(new Pair<>("vespa.matching.global_filter.lower_limit", String.valueOf(approximateThreshold.getAsDouble())));
}
+ if (targetHitsMaxAdjustmentFactor.isPresent()) {
+ properties.add(new Pair<>("vespa.matching.nns.target_hits_max_adjustment_factor", String.valueOf(targetHitsMaxAdjustmentFactor.getAsDouble())));
+ }
if (matchPhaseSettings != null) {
properties.add(new Pair<>("vespa.matchphase.degradation.attribute", matchPhaseSettings.getAttribute()));
properties.add(new Pair<>("vespa.matchphase.degradation.ascendingorder", matchPhaseSettings.getAscending() + ""));
diff --git a/config-model/src/main/java/com/yahoo/schema/document/ImmutableSDField.java b/config-model/src/main/java/com/yahoo/schema/document/ImmutableSDField.java
index 2d826e164b7..4c7e7eb28f4 100644
--- a/config-model/src/main/java/com/yahoo/schema/document/ImmutableSDField.java
+++ b/config-model/src/main/java/com/yahoo/schema/document/ImmutableSDField.java
@@ -101,4 +101,5 @@ public interface ImmutableSDField {
boolean existsIndex(String name);
SummaryField getSummaryField(String name);
boolean hasIndex();
+
}
diff --git a/config-model/src/main/java/com/yahoo/schema/parser/ConvertParsedRanking.java b/config-model/src/main/java/com/yahoo/schema/parser/ConvertParsedRanking.java
index bdecf6332a0..c25d393c8bf 100644
--- a/config-model/src/main/java/com/yahoo/schema/parser/ConvertParsedRanking.java
+++ b/config-model/src/main/java/com/yahoo/schema/parser/ConvertParsedRanking.java
@@ -65,6 +65,8 @@ public class ConvertParsedRanking {
(value -> profile.setPostFilterThreshold(value));
parsed.getApproximateThreshold().ifPresent
(value -> profile.setApproximateThreshold(value));
+ parsed.getTargetHitsMaxAdjustmentFactor().ifPresent
+ (value -> profile.setTargetHitsMaxAdjustmentFactor(value));
parsed.getKeepRankCount().ifPresent
(value -> profile.setKeepRankCount(value));
parsed.getMinHitsPerThread().ifPresent
diff --git a/config-model/src/main/java/com/yahoo/schema/parser/ParsedRankProfile.java b/config-model/src/main/java/com/yahoo/schema/parser/ParsedRankProfile.java
index 2809ee0c633..1d06b993cdc 100644
--- a/config-model/src/main/java/com/yahoo/schema/parser/ParsedRankProfile.java
+++ b/config-model/src/main/java/com/yahoo/schema/parser/ParsedRankProfile.java
@@ -29,6 +29,7 @@ class ParsedRankProfile extends ParsedBlock {
private Double termwiseLimit = null;
private Double postFilterThreshold = null;
private Double approximateThreshold = null;
+ private Double targetHitsMaxAdjustmentFactor = null;
private final List<FeatureList> matchFeatures = new ArrayList<>();
private final List<FeatureList> rankFeatures = new ArrayList<>();
private final List<FeatureList> summaryFeatures = new ArrayList<>();
@@ -65,6 +66,7 @@ class ParsedRankProfile extends ParsedBlock {
Optional<Double> getTermwiseLimit() { return Optional.ofNullable(this.termwiseLimit); }
Optional<Double> getPostFilterThreshold() { return Optional.ofNullable(this.postFilterThreshold); }
Optional<Double> getApproximateThreshold() { return Optional.ofNullable(this.approximateThreshold); }
+ Optional<Double> getTargetHitsMaxAdjustmentFactor() { return Optional.ofNullable(this.targetHitsMaxAdjustmentFactor); }
List<FeatureList> getMatchFeatures() { return List.copyOf(this.matchFeatures); }
List<FeatureList> getRankFeatures() { return List.copyOf(this.rankFeatures); }
List<FeatureList> getSummaryFeatures() { return List.copyOf(this.summaryFeatures); }
@@ -231,4 +233,9 @@ class ParsedRankProfile extends ParsedBlock {
this.approximateThreshold = threshold;
}
+ void setTargetHitsMaxAdjustmentFactor(double factor) {
+ verifyThat(targetHitsMaxAdjustmentFactor == null, "already has target-hits-max-adjustment-factor");
+ this.targetHitsMaxAdjustmentFactor = factor;
+ }
+
}
diff --git a/config-model/src/main/java/com/yahoo/schema/processing/IndexingInputs.java b/config-model/src/main/java/com/yahoo/schema/processing/IndexingInputs.java
index 88e84d5289f..985ec8653c7 100644
--- a/config-model/src/main/java/com/yahoo/schema/processing/IndexingInputs.java
+++ b/config-model/src/main/java/com/yahoo/schema/processing/IndexingInputs.java
@@ -96,11 +96,11 @@ public class IndexingInputs extends Processor {
@Override
protected void doVisit(Expression exp) {
if ( ! (exp instanceof InputExpression)) return;
- String inputField = ((InputExpression)exp).getFieldName();
- if (schema.getField(inputField).hasFullIndexingDocprocRights()) return;
-
- fail(schema, field, "Indexing script refers to field '" + inputField + "' which does not exist " +
- "in document type '" + schema.getDocument().getName() + "', and is not a mutable attribute.");
+ var referencedFieldName = ((InputExpression)exp).getFieldName();
+ var referencedField = schema.getField(referencedFieldName);
+ if (referencedField == null || ! referencedField.hasFullIndexingDocprocRights())
+ fail(schema, field, "Indexing script refers to field '" + referencedFieldName +
+ "' which is neither a field in " + schema.getDocument() + " nor a mutable attribute");
}
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/Logserver.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/Logserver.java
index 7d7d0007b5e..2a0839e209d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/Logserver.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/Logserver.java
@@ -15,7 +15,6 @@ import java.util.Optional;
*/
public class Logserver extends AbstractService {
- private static final long serialVersionUID = 1L;
private static final String logArchiveDir = "$ROOT/logs/vespa/logarchive";
private String compressionType = "gzip";
@@ -32,7 +31,10 @@ public class Logserver extends AbstractService {
@Override
public void initService(DeployState deployState) {
super.initService(deployState);
- this.compressionType = deployState.featureFlags().logFileCompressionAlgorithm("gzip");
+ // TODO Vespa 9: Change default to zstd everywhere
+ this.compressionType = deployState.isHosted()
+ ? deployState.featureFlags().logFileCompressionAlgorithm("zstd")
+ : deployState.featureFlags().logFileCompressionAlgorithm("gzip");
}
/**
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidator.java b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidator.java
new file mode 100644
index 00000000000..842405e68f9
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidator.java
@@ -0,0 +1,30 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation;
+
+import com.yahoo.config.model.ConfigModelContext;
+import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.vespa.model.VespaModel;
+
+import java.util.logging.Logger;
+
+/**
+ * Validator to check that only infrastructure tenant can use non-default application-type
+ *
+ * @author mortent
+ */
+public class InfrastructureDeploymentValidator extends Validator {
+
+ private static final Logger log = Logger.getLogger(InfrastructureDeploymentValidator.class.getName());
+
+ @Override
+ public void validate(VespaModel model, DeployState deployState) {
+ // Allow the internally defined tenant owning all infrastructure applications
+ if (ApplicationId.global().tenant().equals(model.applicationPackage().getApplicationId().tenant())) return;
+ ConfigModelContext.ApplicationType applicationType = model.getAdmin().getApplicationType();
+ if (applicationType != ConfigModelContext.ApplicationType.DEFAULT) {
+ log.warning("Tenant %s is not allowed to use application type %s".formatted(model.applicationPackage().getApplicationId().toFullString(), applicationType));
+ throw new IllegalArgumentException("Tenant is not allowed to override application type");
+ }
+ }
+}
diff --git a/config-model/src/main/javacc/SchemaParser.jj b/config-model/src/main/javacc/SchemaParser.jj
index b2cb258c0ab..42eeabb5ac7 100644
--- a/config-model/src/main/javacc/SchemaParser.jj
+++ b/config-model/src/main/javacc/SchemaParser.jj
@@ -326,6 +326,7 @@ TOKEN :
| < TERMWISE_LIMIT: "termwise-limit" >
| < POST_FILTER_THRESHOLD: "post-filter-threshold" >
| < APPROXIMATE_THRESHOLD: "approximate-threshold" >
+| < TARGET_HITS_MAX_ADJUSTMENT_FACTOR: "target-hits-max-adjustment-factor" >
| < KEEP_RANK_COUNT: "keep-rank-count" >
| < RANK_SCORE_DROP_LIMIT: "rank-score-drop-limit" >
| < CONSTANTS: "constants" >
@@ -1727,6 +1728,7 @@ void rankProfileItem(ParsedSchema schema, ParsedRankProfile profile) : { }
| termwiseLimit(profile)
| postFilterThreshold(profile)
| approximateThreshold(profile)
+ | targetHitsMaxAdjustmentFactor(profile)
| rankFeatures(profile)
| rankProperties(profile)
| secondPhase(profile)
@@ -2190,6 +2192,19 @@ void approximateThreshold(ParsedRankProfile profile) :
}
/**
+ * This rule consumes a target-hits-max-adjustment-factor statement for a rank profile.
+ *
+ * @param profile the rank profile to modify
+ */
+void targetHitsMaxAdjustmentFactor(ParsedRankProfile profile) :
+{
+ double factor;
+}
+{
+ (<TARGET_HITS_MAX_ADJUSTMENT_FACTOR> <COLON> factor = floatValue()) { profile.setTargetHitsMaxAdjustmentFactor(factor); }
+}
+
+/**
* Consumes a rank-properties block of a rank profile. There
* is a little trick within this rule to allow the final rank property
* to skip the terminating newline token.
@@ -2641,6 +2656,7 @@ String identifierWithDash() :
| <SECOND_PHASE>
| <STRUCT_FIELD>
| <SUMMARY_TO>
+ | <TARGET_HITS_MAX_ADJUSTMENT_FACTOR>
| <TERMWISE_LIMIT>
| <UPPER_BOUND>
) { return token.image; }
diff --git a/config-model/src/main/resources/schema/content.rnc b/config-model/src/main/resources/schema/content.rnc
index bb0e39a41ab..dff24745778 100644
--- a/config-model/src/main/resources/schema/content.rnc
+++ b/config-model/src/main/resources/schema/content.rnc
@@ -6,11 +6,11 @@ include "searchchains.rnc"
Redundancy = element redundancy {
attribute reply-after { xsd:nonNegativeInteger }? &
- xsd:nonNegativeInteger
+ xsd:integer { minInclusive = "1" maxInclusive = "65534" }
}
MinRedundancy = element min-redundancy {
- xsd:nonNegativeInteger
+ xsd:integer { minInclusive = "1" maxInclusive = "65534" }
}
DistributionType = element distribution {
diff --git a/config-model/src/test/java/com/yahoo/schema/RankProfileTestCase.java b/config-model/src/test/java/com/yahoo/schema/RankProfileTestCase.java
index 85225f0d255..380b458ea8c 100644
--- a/config-model/src/test/java/com/yahoo/schema/RankProfileTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/RankProfileTestCase.java
@@ -459,17 +459,9 @@ public class RankProfileTestCase extends AbstractSchemaTestCase {
}
private void verifyApproximateNearestNeighborThresholdSettings(Double postFilterThreshold, Double approximateThreshold) throws ParseException {
- var rankProfileRegistry = new RankProfileRegistry();
- var props = new TestProperties();
- var queryProfileRegistry = new QueryProfileRegistry();
- var builder = new ApplicationBuilder(rankProfileRegistry, queryProfileRegistry, props);
- builder.addSchema(createSDWithRankProfileThresholds(postFilterThreshold, approximateThreshold));
- builder.build(true);
-
- var schema = builder.getSchema();
- var rankProfile = rankProfileRegistry.get(schema, "my_profile");
- var rawRankProfile = new RawRankProfile(rankProfile, new LargeRankingExpressions(new MockFileRegistry()), queryProfileRegistry,
- new ImportedMlModels(), new AttributeFields(schema), props);
+ var rp = createRankProfile(postFilterThreshold, approximateThreshold, null);
+ var rankProfile = rp.getFirst();
+ var rawRankProfile = rp.getSecond();
if (postFilterThreshold != null) {
assertEquals((double)postFilterThreshold, rankProfile.getPostFilterThreshold().getAsDouble(), 0.000001);
@@ -488,13 +480,52 @@ public class RankProfileTestCase extends AbstractSchemaTestCase {
}
}
- private String createSDWithRankProfileThresholds(Double postFilterThreshold, Double approximateThreshold) {
+ @Test
+ void target_hits_max_adjustment_factor_is_configurable() throws ParseException {
+ verifyTargetHitsMaxAdjustmentFactor(null);
+ verifyTargetHitsMaxAdjustmentFactor(2.0);
+ }
+
+ private void verifyTargetHitsMaxAdjustmentFactor(Double targetHitsMaxAdjustmentFactor) throws ParseException {
+ var rp = createRankProfile(null, null, targetHitsMaxAdjustmentFactor);
+ var rankProfile = rp.getFirst();
+ var rawRankProfile = rp.getSecond();
+ if (targetHitsMaxAdjustmentFactor != null) {
+ assertEquals((double)targetHitsMaxAdjustmentFactor, rankProfile.getTargetHitsMaxAdjustmentFactor().getAsDouble(), 0.000001);
+ assertEquals(String.valueOf(targetHitsMaxAdjustmentFactor), findProperty(rawRankProfile.configProperties(), "vespa.matching.nns.target_hits_max_adjustment_factor").get());
+ } else {
+ assertTrue(rankProfile.getTargetHitsMaxAdjustmentFactor().isEmpty());
+ assertFalse(findProperty(rawRankProfile.configProperties(), "vespa.matching.nns.target_hits_max_adjustment_factor").isPresent());
+ }
+ }
+
+ private Pair<RankProfile, RawRankProfile> createRankProfile(Double postFilterThreshold,
+ Double approximateThreshold,
+ Double targetHitsMaxAdjustmentFactor) throws ParseException {
+ var rankProfileRegistry = new RankProfileRegistry();
+ var props = new TestProperties();
+ var queryProfileRegistry = new QueryProfileRegistry();
+ var builder = new ApplicationBuilder(rankProfileRegistry, queryProfileRegistry, props);
+ builder.addSchema(createSDWithRankProfile(postFilterThreshold, approximateThreshold, targetHitsMaxAdjustmentFactor));
+ builder.build(true);
+
+ var schema = builder.getSchema();
+ var rankProfile = rankProfileRegistry.get(schema, "my_profile");
+ var rawRankProfile = new RawRankProfile(rankProfile, new LargeRankingExpressions(new MockFileRegistry()), queryProfileRegistry,
+ new ImportedMlModels(), new AttributeFields(schema), props);
+ return new Pair<>(rankProfile, rawRankProfile);
+ }
+
+ private String createSDWithRankProfile(Double postFilterThreshold,
+ Double approximateThreshold,
+ Double targetHitsMaxAdjustmentFactor) {
return joinLines(
"search test {",
" document test {}",
" rank-profile my_profile {",
- (postFilterThreshold != null ? (" post-filter-threshold: " + postFilterThreshold) : ""),
- (approximateThreshold != null ? (" approximate-threshold: " + approximateThreshold) : ""),
+ (postFilterThreshold != null ? (" post-filter-threshold: " + postFilterThreshold) : ""),
+ (approximateThreshold != null ? (" approximate-threshold: " + approximateThreshold) : ""),
+ (targetHitsMaxAdjustmentFactor != null ? (" target-hits-max-adjustment-factor: " + targetHitsMaxAdjustmentFactor) : ""),
" }",
"}");
}
diff --git a/config-model/src/test/java/com/yahoo/schema/processing/IndexingInputsTestCase.java b/config-model/src/test/java/com/yahoo/schema/processing/IndexingInputsTestCase.java
index 893ee3b1ea4..d420623f233 100644
--- a/config-model/src/test/java/com/yahoo/schema/processing/IndexingInputsTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/processing/IndexingInputsTestCase.java
@@ -17,29 +17,29 @@ public class IndexingInputsTestCase {
void requireThatExtraFieldInputExtraFieldThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_extra_field.sd",
"For schema 'indexing_extra_field_input_extra_field', field 'bar': Indexing script refers " +
- "to field 'bar' which does not exist in document type " +
- "'indexing_extra_field_input_extra_field', and is not a mutable attribute.");
+ "to field 'bar' which is neither a field in document type " +
+ "'indexing_extra_field_input_extra_field' nor a mutable attribute");
}
@Test
void requireThatExtraFieldInputImplicitThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_implicit.sd",
"For schema 'indexing_extra_field_input_implicit', field 'foo': Indexing script refers to " +
- "field 'foo' which does not exist in document type 'indexing_extra_field_input_implicit', and is not a mutable attribute.");
+ "field 'foo' which is neither a field in document type 'indexing_extra_field_input_implicit' nor a mutable attribute");
}
@Test
void requireThatExtraFieldInputNullThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_null.sd",
"For schema 'indexing_extra_field_input_null', field 'foo': Indexing script refers to field " +
- "'foo' which does not exist in document type 'indexing_extra_field_input_null', and is not a mutable attribute.");
+ "'foo' which is neither a field in document type 'indexing_extra_field_input_null' nor a mutable attribute");
}
@Test
void requireThatExtraFieldInputSelfThrows() throws IOException, ParseException {
assertBuildFails("src/test/examples/indexing_extra_field_input_self.sd",
"For schema 'indexing_extra_field_input_self', field 'foo': Indexing script refers to field " +
- "'foo' which does not exist in document type 'indexing_extra_field_input_self', and is not a mutable attribute.");
+ "'foo' which is neither a field in document type 'indexing_extra_field_input_self' nor a mutable attribute");
}
}
diff --git a/config-model/src/test/java/com/yahoo/schema/processing/RankingExpressionWithOnnxTestCase.java b/config-model/src/test/java/com/yahoo/schema/processing/RankingExpressionWithOnnxTestCase.java
index 2f53dba7bb4..8db8f0710a0 100644
--- a/config-model/src/test/java/com/yahoo/schema/processing/RankingExpressionWithOnnxTestCase.java
+++ b/config-model/src/test/java/com/yahoo/schema/processing/RankingExpressionWithOnnxTestCase.java
@@ -4,6 +4,8 @@ package com.yahoo.schema.processing;
import com.yahoo.config.application.api.ApplicationFile;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.model.test.MockApplicationPackage;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.TenantName;
import com.yahoo.io.IOUtils;
import com.yahoo.io.reader.NamedReader;
import com.yahoo.path.Path;
@@ -400,7 +402,7 @@ public class RankingExpressionWithOnnxTestCase {
StoringApplicationPackage(Path applicationPackageWritableRoot, String queryProfile, String queryProfileType) {
super(new File(applicationPackageWritableRoot.toString()),
null, null, List.of(), Map.of(), null,
- null, null, false, queryProfile, queryProfileType);
+ null, null, false, queryProfile, queryProfileType, TenantName.defaultName());
}
@Override
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidatorTest.java
new file mode 100644
index 00000000000..0281d5cd6ee
--- /dev/null
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/InfrastructureDeploymentValidatorTest.java
@@ -0,0 +1,48 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.application.validation;
+
+import com.yahoo.config.model.NullConfigModelRegistry;
+import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.config.model.test.MockApplicationPackage;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.vespa.model.VespaModel;
+import org.junit.jupiter.api.Test;
+import org.xml.sax.SAXException;
+
+import java.io.IOException;
+
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
+
+public class InfrastructureDeploymentValidatorTest {
+
+ @Test
+ public void allows_infrastructure_deployments() {
+ assertDoesNotThrow(() -> runValidator(ApplicationId.global().tenant()));
+ }
+
+ @Test
+ public void prevents_non_infrastructure_deployments() {
+ IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> runValidator(TenantName.defaultName()));
+ assertEquals("Tenant is not allowed to override application type", exception.getMessage());
+ }
+
+ private void runValidator(TenantName tenantName) throws IOException, SAXException {
+ String services = """
+ <services version='1.0' application-type="hosted-infrastructure">
+ <container id='default' version='1.0' />
+ </services>
+ """;
+ var app = new MockApplicationPackage.Builder()
+ .withTenantname(tenantName)
+ .withServices(services)
+ .build();
+ var deployState = new DeployState.Builder().applicationPackage(app).build();
+ var model = new VespaModel(new NullConfigModelRegistry(), deployState);
+
+ var validator = new InfrastructureDeploymentValidator();
+ validator.validate(model, deployState);
+ }
+}
diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainer.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainer.java
index 627a15aab65..eab1368a2a1 100644
--- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainer.java
+++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainer.java
@@ -3,24 +3,29 @@ package com.yahoo.vespa.config.proxy.filedistribution;
import com.yahoo.concurrent.DaemonThreadFactory;
import com.yahoo.io.IOUtils;
+import com.yahoo.vespa.config.util.ConfigUtils;
import com.yahoo.vespa.filedistribution.FileDownloader;
+
import java.io.File;
import java.io.IOException;
import java.io.UncheckedIOException;
+import java.nio.file.Path;
import java.nio.file.attribute.BasicFileAttributes;
import java.time.Duration;
import java.time.Instant;
-import java.util.Arrays;
+import java.util.Comparator;
import java.util.HashSet;
+import java.util.List;
+import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
-import java.util.stream.Collectors;
import static java.nio.file.Files.readAttributes;
+import static java.util.logging.Level.INFO;
/**
* Deletes file references and url downloads that have not been used for some time.
@@ -35,27 +40,43 @@ class FileReferencesAndDownloadsMaintainer implements Runnable {
private static final File defaultUrlDownloadDir = UrlDownloadRpcServer.downloadDir;
private static final File defaultFileReferencesDownloadDir = FileDownloader.defaultDownloadDirectory;
private static final Duration defaultDurationToKeepFiles = Duration.ofDays(30);
+ private static final int defaultOutdatedFilesToKeep = 20;
private static final Duration interval = Duration.ofMinutes(1);
- private final ScheduledExecutorService executor =
- new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("file references and downloads cleanup"));
+ private final Optional<ScheduledExecutorService> executor;
private final File urlDownloadDir;
private final File fileReferencesDownloadDir;
private final Duration durationToKeepFiles;
+ private final int outDatedFilesToKeep;
FileReferencesAndDownloadsMaintainer() {
- this(defaultFileReferencesDownloadDir, defaultUrlDownloadDir, keepFileReferencesDuration());
+ this(defaultFileReferencesDownloadDir, defaultUrlDownloadDir, keepFileReferencesDuration(),
+ outDatedFilesToKeep(), configServers());
}
- FileReferencesAndDownloadsMaintainer(File fileReferencesDownloadDir, File urlDownloadDir, Duration durationToKeepFiles) {
+ FileReferencesAndDownloadsMaintainer(File fileReferencesDownloadDir,
+ File urlDownloadDir,
+ Duration durationToKeepFiles,
+ int outdatedFilesToKeep,
+ List<String> configServers) {
this.fileReferencesDownloadDir = fileReferencesDownloadDir;
this.urlDownloadDir = urlDownloadDir;
this.durationToKeepFiles = durationToKeepFiles;
- executor.scheduleAtFixedRate(this, interval.toSeconds(), interval.toSeconds(), TimeUnit.SECONDS);
+ this.outDatedFilesToKeep = outdatedFilesToKeep;
+ // Do not run on config servers
+ if (configServers.contains(ConfigUtils.getCanonicalHostName())) {
+ log.log(INFO, "Not running maintainer, since this is on a config server host");
+ executor = Optional.empty();
+ } else {
+ executor = Optional.of(new ScheduledThreadPoolExecutor(1, new DaemonThreadFactory("file references and downloads cleanup")));
+ executor.get().scheduleAtFixedRate(this, interval.toSeconds(), interval.toSeconds(), TimeUnit.SECONDS);
+ }
}
@Override
public void run() {
+ if (executor.isEmpty()) return;
+
try {
deleteUnusedFiles(fileReferencesDownloadDir);
deleteUnusedFiles(urlDownloadDir);
@@ -65,42 +86,62 @@ class FileReferencesAndDownloadsMaintainer implements Runnable {
}
public void close() {
- executor.shutdownNow();
- try {
- if ( ! executor.awaitTermination(10, TimeUnit.SECONDS))
- throw new RuntimeException("Unable to shutdown " + executor + " before timeout");
- } catch (InterruptedException e) {
- throw new RuntimeException(e);
- }
+ executor.ifPresent(ex -> {
+ ex.shutdownNow();
+ try {
+ if (! ex.awaitTermination(10, TimeUnit.SECONDS))
+ throw new RuntimeException("Unable to shutdown " + executor + " before timeout");
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ });
}
private void deleteUnusedFiles(File directory) {
- Instant deleteNotUsedSinceInstant = Instant.now().minus(durationToKeepFiles);
- Set<String> filesOnDisk = new HashSet<>();
+
File[] files = directory.listFiles();
- if (files != null)
- filesOnDisk.addAll(Arrays.stream(files).map(File::getName).collect(Collectors.toSet()));
- log.log(Level.FINE, () -> "Files on disk (in " + directory + "): " + filesOnDisk);
+ if (files == null) return;
+
+ List<File> filesToDelete = filesThatCanBeDeleted(files);
+ filesToDelete.forEach(fileReference -> {
+ if (IOUtils.recursiveDeleteDir(fileReference))
+ log.log(Level.FINE, "Deleted " + fileReference.getAbsolutePath());
+ else
+ log.log(Level.WARNING, "Could not delete " + fileReference.getAbsolutePath());
+ });
+ }
- Set<String> filesToDelete = filesOnDisk
+ private List<File> filesThatCanBeDeleted(File[] files) {
+ Instant deleteNotUsedSinceInstant = Instant.now().minus(durationToKeepFiles);
+
+ Set<File> filesOnDisk = new HashSet<>(List.of(files));
+ log.log(Level.FINE, () -> "Files on disk: " + filesOnDisk);
+ int deleteCount = Math.max(0, filesOnDisk.size() - outDatedFilesToKeep);
+ var canBeDeleted = filesOnDisk
.stream()
- .filter(fileReference -> isFileLastModifiedBefore(new File(directory, fileReference), deleteNotUsedSinceInstant))
- .collect(Collectors.toSet());
- if (filesToDelete.size() > 0) {
- log.log(Level.INFO, "Files that can be deleted in " + directory + " (not used since " + deleteNotUsedSinceInstant + "): " + filesToDelete);
- filesToDelete.forEach(fileReference -> {
- File file = new File(directory, fileReference);
- if (!IOUtils.recursiveDeleteDir(file))
- log.log(Level.WARNING, "Could not delete " + file.getAbsolutePath());
- });
- }
+ .peek(file -> log.log(Level.FINE, () -> file + ":" + fileLastModifiedTime(file.toPath())))
+ .filter(fileReference -> isFileLastModifiedBefore(fileReference, deleteNotUsedSinceInstant))
+ .sorted(Comparator.comparing(fileReference -> fileLastModifiedTime(fileReference.toPath())))
+ .toList();
+
+ // Make sure we keep some files
+ canBeDeleted = canBeDeleted.subList(0, Math.min(canBeDeleted.size(), deleteCount));
+ if (canBeDeleted.size() > 0)
+ log.log(INFO, "Files that can be deleted (not accessed since " + deleteNotUsedSinceInstant +
+ ", will also keep " + outDatedFilesToKeep +
+ " no matter when last accessed): " + canBeDeleted);
+
+ return canBeDeleted;
}
private boolean isFileLastModifiedBefore(File fileReference, Instant instant) {
- BasicFileAttributes fileAttributes;
+ return fileLastModifiedTime(fileReference.toPath()).isBefore(instant);
+ }
+
+ private static Instant fileLastModifiedTime(Path fileReference) {
try {
- fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class);
- return fileAttributes.lastModifiedTime().toInstant().isBefore(instant);
+ BasicFileAttributes fileAttributes = readAttributes(fileReference, BasicFileAttributes.class);
+ return fileAttributes.lastModifiedTime().toInstant();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
@@ -114,4 +155,21 @@ class FileReferencesAndDownloadsMaintainer implements Runnable {
return defaultDurationToKeepFiles;
}
+ private static int outDatedFilesToKeep() {
+ String env = System.getenv("VESPA_KEEP_FILE_REFERENCES_COUNT");
+ if (env != null && !env.isEmpty())
+ return Integer.parseInt(env);
+ else
+ return defaultOutdatedFilesToKeep;
+ }
+
+ private static List<String> configServers() {
+ String env = System.getenv("VESPA_CONFIGSERVERS");
+ if (env == null || env.isEmpty())
+ return List.of(ConfigUtils.getCanonicalHostName());
+ else {
+ return List.of(env.split(","));
+ }
+ }
+
}
diff --git a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainerTest.java b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainerTest.java
index fad021c0119..c41305b4dc8 100644
--- a/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainerTest.java
+++ b/config-proxy/src/test/java/com/yahoo/vespa/config/proxy/filedistribution/FileReferencesAndDownloadsMaintainerTest.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.config.proxy.filedistribution;
import com.yahoo.io.IOUtils;
+import com.yahoo.vespa.config.util.ConfigUtils;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
@@ -10,6 +11,9 @@ import java.io.File;
import java.io.IOException;
import java.time.Duration;
import java.time.Instant;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.IntStream;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -19,9 +23,12 @@ import static org.junit.jupiter.api.Assertions.assertNotNull;
*/
public class FileReferencesAndDownloadsMaintainerTest {
+ private static final Duration keepDuration = Duration.ofMinutes(1);
+ private static final int outDatedFilesToKeep = 9;
+
private File cachedFileReferences;
private File cachedDownloads;
- private FileReferencesAndDownloadsMaintainer cachedFilesMaintainer;
+ private FileReferencesAndDownloadsMaintainer maintainer;
@TempDir
public File tempFolder;
@@ -30,22 +37,70 @@ public class FileReferencesAndDownloadsMaintainerTest {
public void setup() throws IOException {
cachedFileReferences = newFolder(tempFolder, "cachedFileReferences");
cachedDownloads = newFolder(tempFolder, "cachedDownloads");
- cachedFilesMaintainer = new FileReferencesAndDownloadsMaintainer(cachedFileReferences, cachedDownloads, Duration.ofMinutes(1));
}
@Test
- void require_old_files_to_be_deleted() throws IOException {
+ void require_old_files_to_be_deleted() {
+ maintainer = new FileReferencesAndDownloadsMaintainer(cachedFileReferences, cachedDownloads, keepDuration, outDatedFilesToKeep,
+ List.of("host1"));
runMaintainerAndAssertFiles(0, 0);
- File fileReference = writeFile(cachedFileReferences, "fileReference");
- File download = writeFile(cachedDownloads, "download");
- runMaintainerAndAssertFiles(1, 1);
+ var fileReferences = writeFiles(20);
+ var downloads = writeDownloads(21);
+ runMaintainerAndAssertFiles(20, 21);
+
+ updateLastModifiedTimestamp(0, 5, fileReferences, downloads);
+ runMaintainerAndAssertFiles(15, 16);
- updateLastModifiedTimeStamp(fileReference, Instant.now().minus(Duration.ofMinutes(10)));
- runMaintainerAndAssertFiles(0, 1);
+ updateLastModifiedTimestamp(6, 20, fileReferences, downloads);
+ // Should keep at least outDatedFilesToKeep file references and downloads even if there are more that are old
+ runMaintainerAndAssertFiles(outDatedFilesToKeep, outDatedFilesToKeep);
+ }
- updateLastModifiedTimeStamp(download, Instant.now().minus(Duration.ofMinutes(10)));
+ @Test
+ void require_no_files_deleted_when_running_on_config_server_host() {
+ maintainer = new FileReferencesAndDownloadsMaintainer(cachedFileReferences, cachedDownloads, keepDuration,
+ outDatedFilesToKeep, List.of(ConfigUtils.getCanonicalHostName()));
runMaintainerAndAssertFiles(0, 0);
+
+ var fileReferences = writeFiles(10);
+ var downloads = writeDownloads(10);
+ runMaintainerAndAssertFiles(10, 10);
+
+ updateLastModifiedTimestamp(0, 10, fileReferences, downloads);
+ runMaintainerAndAssertFiles(10, 10);
+ }
+
+ private void updateLastModifiedTimestamp(int startInclusive, int endExclusive, List<File> fileReferences, List<File> downloads) {
+ IntStream.range(startInclusive, endExclusive).forEach(i -> {
+ Instant instant = Instant.now().minus(keepDuration.plus(Duration.ofMinutes(1)).minus(Duration.ofSeconds(i)));
+ updateLastModifiedTimeStamp(fileReferences.get(i), instant);
+ updateLastModifiedTimeStamp(downloads.get(i), instant);
+ });
+ }
+
+ private List<File> writeFiles(int count) {
+ List<File> files = new ArrayList<>();
+ IntStream.range(0, count).forEach(i -> {
+ try {
+ files.add(writeFile(cachedFileReferences, "fileReference" + i));
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
+ return files;
+ }
+
+ private List<File> writeDownloads(int count) {
+ List<File> files = new ArrayList<>();
+ IntStream.range(0, count).forEach(i -> {
+ try {
+ files.add(writeFile(cachedDownloads, "download" + i));
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ });
+ return files;
}
private void updateLastModifiedTimeStamp(File file, Instant instant) {
@@ -55,7 +110,7 @@ public class FileReferencesAndDownloadsMaintainerTest {
}
private void runMaintainerAndAssertFiles(int fileReferenceCount, int downloadCount) {
- cachedFilesMaintainer.run();
+ maintainer.run();
File[] fileReferences = cachedFileReferences.listFiles();
assertNotNull(fileReferences);
assertEquals(fileReferenceCount, fileReferences.length);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
index 3502ece9cb7..c7e4022c668 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/ApplicationRepository.java
@@ -167,7 +167,6 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
ConfigserverConfig configserverConfig,
Orchestrator orchestrator,
TesterClient testerClient,
- Zone zone,
HealthCheckerProvider healthCheckers,
Metric metric,
SecretStore secretStore,
@@ -698,10 +697,10 @@ public class ApplicationRepository implements com.yahoo.config.provision.Deploye
Optional<String> applicationPackage = Optional.empty();
Optional<Session> session = getActiveSession(applicationId);
if (session.isPresent()) {
- FileReference applicationPackageReference = session.get().getApplicationPackageReference();
+ Optional<FileReference> applicationPackageReference = session.get().getApplicationPackageReference();
File downloadDirectory = new File(Defaults.getDefaults().underVespaHome(configserverConfig().fileReferencesDir()));
- if (applicationPackageReference != null && ! fileReferenceExistsOnDisk(downloadDirectory, applicationPackageReference))
- applicationPackage = Optional.of(applicationPackageReference.value());
+ if (applicationPackageReference.isPresent() && ! fileReferenceExistsOnDisk(downloadDirectory, applicationPackageReference.get()))
+ applicationPackage = Optional.of(applicationPackageReference.get().value());
}
return applicationPackage;
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java
index 0acf32d79a7..efa62625159 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/deploy/ZooKeeperClient.java
@@ -96,8 +96,8 @@ public class ZooKeeperClient {
Path zkPath = getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(SCHEMAS_DIR);
curator.create(zkPath);
// Ensures that ranking expressions and other files are also written
- writeDir(app.getFile(ApplicationPackage.SEARCH_DEFINITIONS_DIR), zkPath, true);
- writeDir(app.getFile(ApplicationPackage.SCHEMAS_DIR), zkPath, true);
+ writeDir(app.getFile(ApplicationPackage.SEARCH_DEFINITIONS_DIR), zkPath);
+ writeDir(app.getFile(ApplicationPackage.SCHEMAS_DIR), zkPath);
for (NamedReader sd : schemas) {
curator.set(zkPath.append(sd.getName()), Utf8.toBytes(com.yahoo.io.IOUtils.readAll(sd.getReader())));
sd.getReader().close();
@@ -105,7 +105,7 @@ public class ZooKeeperClient {
}
/**
- * Puts some of the application package files into ZK - see write(app).
+ * Writes some application package files into ZK - see write(app).
*
* @param app the application package to use as input.
* @throws java.io.IOException if not able to write to Zookeeper
@@ -118,45 +118,40 @@ public class ZooKeeperClient {
writeFile(app.getFile(Path.fromString(VALIDATION_OVERRIDES.getName())), getZooKeeperAppPath(USERAPP_ZK_SUBPATH));
writeDir(app.getFile(RULES_DIR),
getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(RULES_DIR),
- (path) -> path.getName().endsWith(ApplicationPackage.RULES_NAME_SUFFIX),
- true);
+ (path) -> path.getName().endsWith(ApplicationPackage.RULES_NAME_SUFFIX));
writeDir(app.getFile(QUERY_PROFILES_DIR),
getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(QUERY_PROFILES_DIR),
- xmlFilter, true);
+ xmlFilter);
writeDir(app.getFile(PAGE_TEMPLATES_DIR),
getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(PAGE_TEMPLATES_DIR),
- xmlFilter, true);
+ xmlFilter);
writeDir(app.getFile(Path.fromString(SEARCHCHAINS_DIR)),
getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(SEARCHCHAINS_DIR),
- xmlFilter, true);
+ xmlFilter);
writeDir(app.getFile(Path.fromString(DOCPROCCHAINS_DIR)),
getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(DOCPROCCHAINS_DIR),
- xmlFilter, true);
+ xmlFilter);
writeDir(app.getFile(Path.fromString(ROUTINGTABLES_DIR)),
getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(ROUTINGTABLES_DIR),
- xmlFilter, true);
+ xmlFilter);
writeDir(app.getFile(MODELS_GENERATED_REPLICATED_DIR),
- getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(MODELS_GENERATED_REPLICATED_DIR),
- true);
+ getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(MODELS_GENERATED_REPLICATED_DIR));
writeDir(app.getFile(SECURITY_DIR),
- getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(SECURITY_DIR),
- true);
+ getZooKeeperAppPath(USERAPP_ZK_SUBPATH).append(SECURITY_DIR));
}
- private void writeDir(ApplicationFile file, Path zooKeeperAppPath, boolean recurse) throws IOException {
- writeDir(file, zooKeeperAppPath, (__) -> true, recurse);
+ private void writeDir(ApplicationFile file, Path zooKeeperAppPath) throws IOException {
+ writeDir(file, zooKeeperAppPath, (__) -> true);
}
- private void writeDir(ApplicationFile dir, Path path, ApplicationFile.PathFilter filenameFilter, boolean recurse) throws IOException {
+ private void writeDir(ApplicationFile dir, Path path, ApplicationFile.PathFilter filenameFilter) throws IOException {
if ( ! dir.isDirectory()) return;
for (ApplicationFile file : listFiles(dir, filenameFilter)) {
String name = file.getPath().getName();
if (name.startsWith(".")) continue; //.svn , .git ...
if (file.isDirectory()) {
curator.create(path.append(name));
- if (recurse) {
- writeDir(file, path.append(name), filenameFilter, recurse);
- }
+ writeDir(file, path.append(name), filenameFilter);
} else {
writeFile(file, path);
}
@@ -202,9 +197,7 @@ public class ZooKeeperClient {
if (files == null || files.isEmpty()) {
curator.create(getZooKeeperAppPath(USERAPP_ZK_SUBPATH + "/" + userInclude));
}
- writeDir(dir,
- getZooKeeperAppPath(USERAPP_ZK_SUBPATH + "/" + userInclude),
- xmlFilter, true);
+ writeDir(dir, getZooKeeperAppPath(USERAPP_ZK_SUBPATH + "/" + userInclude), xmlFilter);
}
}
@@ -249,7 +242,7 @@ public class ZooKeeperClient {
.forEach(path -> curator.delete(getZooKeeperAppPath(path)));
} catch (Exception e) {
logger.log(Level.WARNING, "Could not clean up in zookeeper: " + Exceptions.toMessageString(e));
- //Might be called in an exception handler before re-throw, so do not throw here.
+ // Might be called in an exception handler before re-throw, so do not throw here.
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDirectory.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDirectory.java
index da18c4e4fcc..6fe133958f5 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDirectory.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDirectory.java
@@ -24,12 +24,14 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.attribute.BasicFileAttributes;
import java.time.Clock;
+import java.util.Optional;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import java.util.logging.Level;
import java.util.logging.Logger;
import static com.yahoo.yolean.Exceptions.uncheck;
+import static java.util.logging.Level.INFO;
/**
* Global file directory, holding files for file distribution for all deployed applications.
@@ -40,7 +42,6 @@ public class FileDirectory extends AbstractComponent {
private static final Logger log = Logger.getLogger(FileDirectory.class.getName());
private final Locks<FileReference> locks = new Locks<>(1, TimeUnit.MINUTES);
-
private final File root;
@Inject
@@ -67,7 +68,7 @@ public class FileDirectory extends AbstractComponent {
}
}
- static private class Filter implements FilenameFilter {
+ private static class Filter implements FilenameFilter {
@Override
public boolean accept(File dir, String name) {
return !".".equals(name) && !"..".equals(name) ;
@@ -78,17 +79,23 @@ public class FileDirectory extends AbstractComponent {
return root.getAbsolutePath() + "/" + ref.value();
}
- public File getFile(FileReference reference) {
+ public Optional<File> getFile(FileReference reference) {
ensureRootExist();
File dir = new File(getPath(reference));
- if (!dir.exists())
- throw new IllegalArgumentException("File reference '" + reference.value() + "' with absolute path '" + dir.getAbsolutePath() + "' does not exist.");
- if (!dir.isDirectory())
- throw new IllegalArgumentException("File reference '" + reference.value() + "' with absolute path '" + dir.getAbsolutePath() + "' is not a directory.");
- File [] files = dir.listFiles(new Filter());
- if (files == null || files.length == 0)
- throw new IllegalArgumentException("File reference '" + reference.value() + "' with absolute path '" + dir.getAbsolutePath() + " does not contain any files");
- return files[0];
+ if (!dir.exists()) {
+ log.log(INFO, "File reference '" + reference.value() + "' ('" + dir.getAbsolutePath() + "') does not exist.");
+ return Optional.empty();
+ }
+ if (!dir.isDirectory()) {
+ log.log(INFO, "File reference '" + reference.value() + "' ('" + dir.getAbsolutePath() + ")' is not a directory.");
+ return Optional.empty();
+ }
+ File[] files = dir.listFiles(new Filter());
+ if (files == null || files.length == 0) {
+ log.log(INFO, "File reference '" + reference.value() + "' ('" + dir.getAbsolutePath() + "') does not contain any files");
+ return Optional.empty();
+ }
+ return Optional.of(files[0]);
}
public File getRoot() { return root; }
@@ -136,7 +143,7 @@ public class FileDirectory extends AbstractComponent {
private void deleteDirRecursively(File dir) {
log.log(Level.FINE, "Will delete dir " + dir);
if ( ! IOUtils.recursiveDeleteDir(dir))
- log.log(Level.INFO, "Failed to delete " + dir);
+ log.log(INFO, "Failed to delete " + dir);
}
// Check if we should add file, it might already exist
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
index 57d57d16d2f..e45c3a8e380 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileServer.java
@@ -12,7 +12,6 @@ import com.yahoo.jrt.StringValue;
import com.yahoo.jrt.Supervisor;
import com.yahoo.jrt.Transport;
import com.yahoo.vespa.config.ConnectionPool;
-import com.yahoo.vespa.filedistribution.EmptyFileReferenceData;
import com.yahoo.vespa.filedistribution.FileDistributionConnectionPool;
import com.yahoo.vespa.filedistribution.FileDownloader;
import com.yahoo.vespa.filedistribution.FileReferenceCompressor;
@@ -20,14 +19,16 @@ import com.yahoo.vespa.filedistribution.FileReferenceData;
import com.yahoo.vespa.filedistribution.FileReferenceDownload;
import com.yahoo.vespa.filedistribution.LazyFileReferenceData;
import com.yahoo.vespa.filedistribution.LazyTemporaryStorageFileReferenceData;
-import com.yahoo.yolean.Exceptions;
+
import java.io.File;
import java.io.IOException;
+import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Duration;
import java.time.Instant;
import java.util.List;
+import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@@ -35,6 +36,10 @@ import java.util.logging.Level;
import java.util.logging.Logger;
import static com.yahoo.vespa.config.server.filedistribution.FileDistributionUtil.getOtherConfigServersInCluster;
+import static com.yahoo.vespa.config.server.filedistribution.FileServer.FileApiErrorCodes.NOT_FOUND;
+import static com.yahoo.vespa.config.server.filedistribution.FileServer.FileApiErrorCodes.OK;
+import static com.yahoo.vespa.config.server.filedistribution.FileServer.FileApiErrorCodes.TIMEOUT;
+import static com.yahoo.vespa.config.server.filedistribution.FileServer.FileApiErrorCodes.TRANSFER_FAILED;
import static com.yahoo.vespa.filedistribution.FileReferenceData.CompressionType;
import static com.yahoo.vespa.filedistribution.FileReferenceData.CompressionType.gzip;
import static com.yahoo.vespa.filedistribution.FileReferenceData.Type;
@@ -54,10 +59,11 @@ public class FileServer {
private final List<CompressionType> compressionTypes; // compression types to use, in preferred order
// TODO: Move to filedistribution module, so that it can be used by both clients and servers
- private enum FileApiErrorCodes {
+ enum FileApiErrorCodes {
OK(0, "OK"),
NOT_FOUND(1, "File reference not found"),
- TIMEOUT(2, "Timeout");
+ TIMEOUT(2, "Timeout"),
+ TRANSFER_FAILED(3, "Failed transferring file");
private final int code;
private final String description;
FileApiErrorCodes(int code, String description) {
@@ -103,40 +109,33 @@ public class FileServer {
}
private boolean hasFile(FileReference reference) {
- try {
- return fileDirectory.getFile(reference).exists();
- } catch (IllegalArgumentException e) {
- log.log(Level.FINE, () -> "Failed locating " + reference + ": " + e.getMessage());
- }
+ Optional<File> file = fileDirectory.getFile(reference);
+ if (file.isPresent())
+ return file.get().exists();
+
+ log.log(Level.FINE, () -> "Failed locating " + reference);
return false;
}
FileDirectory getRootDir() { return fileDirectory; }
- void startFileServing(FileReference reference, Receiver target, Set<CompressionType> acceptedCompressionTypes) {
- if ( ! fileDirectory.getFile(reference).exists()) return;
-
- File file = this.fileDirectory.getFile(reference);
- log.log(Level.FINE, () -> "Start serving " + reference + " with file '" + file.getAbsolutePath() + "'");
- FileReferenceData fileData = EmptyFileReferenceData.empty(reference, file.getName());
- try {
- fileData = readFileReferenceData(reference, acceptedCompressionTypes);
+ void startFileServing(FileReference reference, File file, Receiver target, Set<CompressionType> acceptedCompressionTypes) {
+ var absolutePath = file.getAbsolutePath();
+ try (FileReferenceData fileData = fileReferenceData(reference, acceptedCompressionTypes, file)) {
+ log.log(Level.FINE, () -> "Start serving " + reference.value() + " with file '" + absolutePath + "'");
target.receive(fileData, new ReplayStatus(0, "OK"));
- log.log(Level.FINE, () -> "Done serving " + reference.value() + " with file '" + file.getAbsolutePath() + "'");
- } catch (IOException e) {
- String errorDescription = "For" + reference.value() + ": failed reading file '" + file.getAbsolutePath() + "'";
- log.warning(errorDescription + " for sending to '" + target.toString() + "'. " + e.getMessage());
- target.receive(fileData, new ReplayStatus(1, errorDescription));
+ log.log(Level.FINE, () -> "Done serving " + reference.value() + " with file '" + absolutePath + "'");
+ } catch (IOException ioe) {
+ throw new UncheckedIOException("For " + reference.value() + ": failed reading file '" + absolutePath + "'" +
+ " for sending to '" + target.toString() + "'. ", ioe);
} catch (Exception e) {
- log.log(Level.WARNING, "Failed serving " + reference + ": " + Exceptions.toMessageString(e));
- } finally {
- fileData.close();
+ throw new RuntimeException("Failed serving " + reference.value() + " to '" + target + "': ", e);
}
}
- private FileReferenceData readFileReferenceData(FileReference reference, Set<CompressionType> acceptedCompressionTypes) throws IOException {
- File file = this.fileDirectory.getFile(reference);
-
+ private FileReferenceData fileReferenceData(FileReference reference,
+ Set<CompressionType> acceptedCompressionTypes,
+ File file) throws IOException {
if (file.isDirectory()) {
Path tempFile = Files.createTempFile("filereferencedata", reference.value());
CompressionType compressionType = chooseCompressionType(acceptedCompressionTypes);
@@ -172,20 +171,21 @@ public class FileServer {
Set<CompressionType> acceptedCompressionTypes) {
if (Instant.now().isAfter(deadline)) {
log.log(Level.INFO, () -> "Deadline exceeded for request for file reference '" + fileReference + "' from " + client);
- return FileApiErrorCodes.TIMEOUT;
+ return TIMEOUT;
}
- boolean fileExists;
try {
var fileReferenceDownload = new FileReferenceDownload(fileReference, client, downloadFromOtherSourceIfNotFound);
- fileExists = hasFileDownloadIfNeeded(fileReferenceDownload);
- if (fileExists) startFileServing(fileReference, receiver, acceptedCompressionTypes);
- } catch (IllegalArgumentException e) {
- fileExists = false;
+ var file = getFileDownloadIfNeeded(fileReferenceDownload);
+ if (file.isEmpty()) return NOT_FOUND;
+
+ startFileServing(fileReference, file.get(), receiver, acceptedCompressionTypes);
+ } catch (Exception e) {
log.warning("Failed serving file reference '" + fileReference + "', request from " + client + " failed with: " + e.getMessage());
+ return TRANSFER_FAILED;
}
- return (fileExists ? FileApiErrorCodes.OK : FileApiErrorCodes.NOT_FOUND);
+ return OK;
}
/* Choose the first compression type (list is in preferred order) that matches an accepted compression type, or fail */
@@ -198,9 +198,11 @@ public class FileServer {
acceptedCompressionTypes + ", compression types server can use: " + compressionTypes);
}
- boolean hasFileDownloadIfNeeded(FileReferenceDownload fileReferenceDownload) {
+ public Optional<File> getFileDownloadIfNeeded(FileReferenceDownload fileReferenceDownload) {
FileReference fileReference = fileReferenceDownload.fileReference();
- if (hasFile(fileReference)) return true;
+ Optional<File> file = fileDirectory.getFile(fileReference);
+ if (file.isPresent())
+ return file;
if (fileReferenceDownload.downloadFromOtherSourceIfNotFound()) {
log.log(Level.FINE, "File not found, downloading from another source");
@@ -209,13 +211,13 @@ public class FileServer {
FileReferenceDownload newDownload = new FileReferenceDownload(fileReference,
fileReferenceDownload.client(),
false);
- boolean fileExists = downloader.getFile(newDownload).isPresent();
- if ( ! fileExists)
+ file = downloader.getFile(newDownload);
+ if (file.isEmpty())
log.log(Level.INFO, "Failed downloading '" + fileReferenceDownload + "'");
- return fileExists;
+ return file;
} else {
log.log(Level.FINE, "File not found, will not download from another source");
- return false;
+ return Optional.empty();
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
index 22ef6cc2547..031574bec77 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/maintenance/ApplicationPackageMaintainer.java
@@ -66,15 +66,15 @@ public class ApplicationPackageMaintainer extends ConfigServerMaintainer {
Optional<Session> session = applicationRepository.getActiveSession(applicationId);
if (session.isEmpty()) continue; // App might be deleted after call to listApplications() or not activated yet (bootstrap phase)
- FileReference appFileReference = session.get().getApplicationPackageReference();
- if (appFileReference != null) {
+ Optional<FileReference> appFileReference = session.get().getApplicationPackageReference();
+ if (appFileReference.isPresent()) {
long sessionId = session.get().getSessionId();
attempts++;
- if (!fileReferenceExistsOnDisk(downloadDirectory, appFileReference)) {
+ if (!fileReferenceExistsOnDisk(downloadDirectory, appFileReference.get())) {
log.fine(() -> "Downloading application package with file reference " + appFileReference +
" for " + applicationId + " (session " + sessionId + ")");
- FileReferenceDownload download = new FileReferenceDownload(appFileReference,
+ FileReferenceDownload download = new FileReferenceDownload(appFileReference.get(),
this.getClass().getSimpleName(),
false);
if (fileDownloader.getFile(download).isEmpty()) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java
index eee7d6ec63d..d26a22284c0 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java
@@ -518,7 +518,7 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
request.parameters().add(new StringValue(fileData.filename()));
request.parameters().add(new StringValue(fileData.type().name()));
request.parameters().add(new Int64Value(fileData.size()));
- // Only add paramter if not gzip, this is default and old clients will not handle the extra parameter
+ // Only add parameter if not gzip, this is default and old clients will not handle the extra parameter
if (fileData.compressionType() != CompressionType.gzip)
request.parameters().add(new StringValue(fileData.compressionType().name()));
return request;
@@ -532,7 +532,7 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
request.parameters().add(new DataValue(buf));
invokeRpcIfValidConnection(request);
if (request.isError()) {
- throw new IllegalArgumentException("Failed delivering reference '" + ref.value() + "' to " +
+ throw new IllegalArgumentException("Failed delivering part of reference '" + ref.value() + "' to " +
target.toString() + " with error: '" + request.errorMessage() + "'.");
} else {
if (request.returnValues().get(0).asInt32() != 0) {
@@ -550,7 +550,8 @@ public class RpcServer implements Runnable, ConfigActivationListener, TenantList
request.parameters().add(new StringValue(status.getDescription()));
invokeRpcIfValidConnection(request);
if (request.isError()) {
- throw new IllegalArgumentException("Failed delivering reference '" + fileData.fileReference().value() + "' with file '" + fileData.filename() + "' to " +
+ throw new IllegalArgumentException("Failed delivering eof for reference '" + fileData.fileReference().value() +
+ "' with file '" + fileData.filename() + "' to " +
target.toString() + " with error: '" + request.errorMessage() + "'.");
} else {
if (request.returnValues().get(0).asInt32() != 0) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/Session.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/Session.java
index b627fe9ba3b..eb359f9ffc6 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/Session.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/Session.java
@@ -94,14 +94,10 @@ public abstract class Session implements Comparable<Session> {
* @return log preamble
*/
public String logPre() {
- Optional<ApplicationId> applicationId;
+ Optional<ApplicationId> applicationId = getOptionalApplicationId();
+
// We might not be able to read application id from zookeeper
// e.g. when the app has been deleted. Use tenant name in that case.
- try {
- applicationId = Optional.of(getApplicationId());
- } catch (Exception e) {
- applicationId = Optional.empty();
- }
return applicationId
.filter(appId -> ! appId.equals(ApplicationId.defaultId()))
.map(TenantRepository::logPre)
@@ -116,46 +112,6 @@ public abstract class Session implements Comparable<Session> {
return sessionZooKeeperClient.readActivatedTime();
}
- public void setApplicationId(ApplicationId applicationId) {
- sessionZooKeeperClient.writeApplicationId(applicationId);
- }
-
- void setApplicationPackageReference(FileReference applicationPackageReference) {
- sessionZooKeeperClient.writeApplicationPackageReference(Optional.ofNullable(applicationPackageReference));
- }
-
- public void setVespaVersion(Version version) {
- sessionZooKeeperClient.writeVespaVersion(version);
- }
-
- public void setDockerImageRepository(Optional<DockerImage> dockerImageRepository) {
- sessionZooKeeperClient.writeDockerImageRepository(dockerImageRepository);
- }
-
- public void setAthenzDomain(Optional<AthenzDomain> athenzDomain) {
- sessionZooKeeperClient.writeAthenzDomain(athenzDomain);
- }
-
- public void setQuota(Optional<Quota> quota) {
- sessionZooKeeperClient.writeQuota(quota);
- }
-
- public void setTenantSecretStores(List<TenantSecretStore> tenantSecretStores) {
- sessionZooKeeperClient.writeTenantSecretStores(tenantSecretStores);
- }
-
- public void setOperatorCertificates(List<X509Certificate> operatorCertificates) {
- sessionZooKeeperClient.writeOperatorCertificates(operatorCertificates);
- }
-
- public void setCloudAccount(Optional<CloudAccount> cloudAccount) {
- sessionZooKeeperClient.writeCloudAccount(cloudAccount);
- }
-
- public void setDataplaneTokens(List<DataplaneToken> dataplaneTokens) {
- sessionZooKeeperClient.writeDataplaneTokens(dataplaneTokens);
- }
-
/** Returns application id read from ZooKeeper. Will throw RuntimeException if not found */
public ApplicationId getApplicationId() { return sessionZooKeeperClient.readApplicationId(); }
@@ -168,7 +124,7 @@ public abstract class Session implements Comparable<Session> {
}
}
- public FileReference getApplicationPackageReference() {return sessionZooKeeperClient.readApplicationPackageReference(); }
+ public Optional<FileReference> getApplicationPackageReference() { return sessionZooKeeperClient.readApplicationPackageReference(); }
public Optional<DockerImage> getDockerImageRepository() { return sessionZooKeeperClient.readDockerImageRepository(); }
@@ -202,6 +158,8 @@ public abstract class Session implements Comparable<Session> {
return sessionZooKeeperClient.readDataplaneTokens();
}
+ public SessionZooKeeperClient getSessionZooKeeperClient() { return sessionZooKeeperClient; }
+
private Transaction createSetStatusTransaction(Status status) {
return sessionZooKeeperClient.createWriteStatusTransaction(status);
}
@@ -226,7 +184,7 @@ public abstract class Session implements Comparable<Session> {
return getApplicationPackage().getFile(relativePath);
}
- Optional<ApplicationSet> applicationSet() { return Optional.empty(); };
+ Optional<ApplicationSet> applicationSet() { return Optional.empty(); }
private void markSessionEdited() {
setStatus(Session.Status.NEW);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionData.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionData.java
new file mode 100644
index 00000000000..1fb72e1253e
--- /dev/null
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionData.java
@@ -0,0 +1,87 @@
+package com.yahoo.vespa.config.server.session;
+
+import com.yahoo.component.Version;
+import com.yahoo.config.FileReference;
+import com.yahoo.config.model.api.Quota;
+import com.yahoo.config.model.api.TenantSecretStore;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.AthenzDomain;
+import com.yahoo.config.provision.CloudAccount;
+import com.yahoo.config.provision.DataplaneToken;
+import com.yahoo.config.provision.DockerImage;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.config.server.tenant.DataplaneTokenSerializer;
+import com.yahoo.vespa.config.server.tenant.OperatorCertificateSerializer;
+import com.yahoo.vespa.config.server.tenant.TenantSecretStoreSerializer;
+
+import java.io.IOException;
+import java.security.cert.X509Certificate;
+import java.util.List;
+import java.util.Optional;
+
+/**
+ * Data class for session information, typically parameters supplied in a deployment request that needs
+ * to be persisted in ZooKeeper. These will be used when creating a new session based on an existing one.
+ *
+ * @author hmusum
+ */
+public record SessionData(ApplicationId applicationId,
+ Optional<FileReference> applicationPackageReference,
+ Version version,
+ Optional<DockerImage> dockerImageRepository,
+ Optional<AthenzDomain> athenzDomain,
+ Optional<Quota> quota,
+ List<TenantSecretStore> tenantSecretStores,
+ List<X509Certificate> operatorCertificates,
+ Optional<CloudAccount> cloudAccount,
+ List<DataplaneToken> dataplaneTokens) {
+
+ // NOTE: Any state added here MUST also be propagated in com.yahoo.vespa.config.server.deploy.Deployment.prepare()
+ static final String APPLICATION_ID_PATH = "applicationId";
+ static final String APPLICATION_PACKAGE_REFERENCE_PATH = "applicationPackageReference";
+ static final String VERSION_PATH = "version";
+ static final String CREATE_TIME_PATH = "createTime";
+ static final String DOCKER_IMAGE_REPOSITORY_PATH = "dockerImageRepository";
+ static final String ATHENZ_DOMAIN = "athenzDomain";
+ static final String QUOTA_PATH = "quota";
+ static final String TENANT_SECRET_STORES_PATH = "tenantSecretStores";
+ static final String OPERATOR_CERTIFICATES_PATH = "operatorCertificates";
+ static final String CLOUD_ACCOUNT_PATH = "cloudAccount";
+ static final String DATAPLANE_TOKENS_PATH = "dataplaneTokens";
+ static final String SESSION_DATA_PATH = "sessionData";
+
+ public byte[] toJson() {
+ try {
+ Slime slime = new Slime();
+ toSlime(slime.setObject());
+ return SlimeUtils.toJsonBytes(slime);
+ }
+ catch (IOException e) {
+ throw new RuntimeException("Serialization of session data to json failed", e);
+ }
+ }
+
+ private void toSlime(Cursor object) {
+ object.setString(APPLICATION_ID_PATH, applicationId.serializedForm());
+ applicationPackageReference.ifPresent(ref -> object.setString(APPLICATION_PACKAGE_REFERENCE_PATH, ref.value()));
+ object.setString(VERSION_PATH, version.toString());
+ object.setLong(CREATE_TIME_PATH, System.currentTimeMillis());
+ dockerImageRepository.ifPresent(image -> object.setString(DOCKER_IMAGE_REPOSITORY_PATH, image.asString()));
+ athenzDomain.ifPresent(domain -> object.setString(ATHENZ_DOMAIN, domain.value()));
+ quota.ifPresent(q -> q.toSlime(object.setObject(QUOTA_PATH)));
+
+ Cursor tenantSecretStoresArray = object.setArray(TENANT_SECRET_STORES_PATH);
+ TenantSecretStoreSerializer.toSlime(tenantSecretStores, tenantSecretStoresArray);
+
+ Cursor operatorCertificatesArray = object.setArray(OPERATOR_CERTIFICATES_PATH);
+ OperatorCertificateSerializer.toSlime(operatorCertificates, operatorCertificatesArray);
+
+ cloudAccount.ifPresent(account -> object.setString(CLOUD_ACCOUNT_PATH, account.value()));
+
+ Cursor dataplaneTokensArray = object.setArray(DATAPLANE_TOKENS_PATH);
+ DataplaneTokenSerializer.toSlime(dataplaneTokens, dataplaneTokensArray);
+ }
+
+}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
index ae87a0dd182..8d45ac7e8f1 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
@@ -36,6 +36,7 @@ import com.yahoo.vespa.config.server.ConfigServerSpec;
import com.yahoo.vespa.config.server.TimeoutBudget;
import com.yahoo.vespa.config.server.application.ApplicationSet;
import com.yahoo.vespa.config.server.configchange.ConfigChangeActions;
+import com.yahoo.vespa.config.server.deploy.ZooKeeperClient;
import com.yahoo.vespa.config.server.deploy.ZooKeeperDeployer;
import com.yahoo.vespa.config.server.filedistribution.FileDistributionFactory;
import com.yahoo.vespa.config.server.host.HostValidator;
@@ -49,7 +50,9 @@ import com.yahoo.vespa.config.server.tenant.EndpointCertificateMetadataStore;
import com.yahoo.vespa.config.server.tenant.EndpointCertificateRetriever;
import com.yahoo.vespa.config.server.tenant.TenantRepository;
import com.yahoo.vespa.curator.Curator;
+import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.model.application.validation.BundleValidator;
import org.xml.sax.SAXException;
import javax.xml.parsers.ParserConfigurationException;
@@ -71,6 +74,8 @@ import java.util.logging.Logger;
import java.util.stream.Collectors;
import java.util.zip.ZipException;
+import static com.yahoo.vespa.config.server.session.SessionZooKeeperClient.getSessionPath;
+
/**
* A SessionPreparer is responsible for preparing a session given an application package.
*
@@ -90,6 +95,7 @@ public class SessionPreparer {
private final SecretStore secretStore;
private final FlagSource flagSource;
private final ExecutorService executor;
+ private final BooleanFlag writeSessionData;
public SessionPreparer(ModelFactoryRegistry modelFactoryRegistry,
FileDistributionFactory fileDistributionFactory,
@@ -111,6 +117,7 @@ public class SessionPreparer {
this.secretStore = secretStore;
this.flagSource = flagSource;
this.executor = executor;
+ this.writeSessionData = Flags.WRITE_CONFIG_SERVER_SESSION_DATA_AS_ONE_BLOB.bindTo(flagSource);
}
ExecutorService getExecutor() { return executor; }
@@ -335,7 +342,7 @@ public class SessionPreparer {
writeStateToZooKeeper(sessionZooKeeperClient,
preprocessedApplicationPackage,
applicationId,
- filereference,
+ Optional.of(filereference),
dockerImageRepository,
vespaVersion,
logger,
@@ -377,7 +384,7 @@ public class SessionPreparer {
private void writeStateToZooKeeper(SessionZooKeeperClient zooKeeperClient,
ApplicationPackage applicationPackage,
ApplicationId applicationId,
- FileReference fileReference,
+ Optional<FileReference> fileReference,
Optional<DockerImage> dockerImageRepository,
Version vespaVersion,
DeployLogger deployLogger,
@@ -389,20 +396,22 @@ public class SessionPreparer {
List<X509Certificate> operatorCertificates,
Optional<CloudAccount> cloudAccount,
List<DataplaneToken> dataplaneTokens) {
- ZooKeeperDeployer zkDeployer = zooKeeperClient.createDeployer(deployLogger);
+ Path sessionPath = getSessionPath(applicationId.tenant(), zooKeeperClient.sessionId());
+ ZooKeeperDeployer zkDeployer = new ZooKeeperDeployer(new ZooKeeperClient(curator, deployLogger, sessionPath));
try {
zkDeployer.deploy(applicationPackage, fileRegistryMap, allocatedHosts);
- // Note: When changing the below you need to also change similar calls in SessionRepository.createSessionFromExisting()
- zooKeeperClient.writeApplicationId(applicationId);
- zooKeeperClient.writeApplicationPackageReference(Optional.of(fileReference));
- zooKeeperClient.writeVespaVersion(vespaVersion);
- zooKeeperClient.writeDockerImageRepository(dockerImageRepository);
- zooKeeperClient.writeAthenzDomain(athenzDomain);
- zooKeeperClient.writeQuota(quota);
- zooKeeperClient.writeTenantSecretStores(tenantSecretStores);
- zooKeeperClient.writeOperatorCertificates(operatorCertificates);
- zooKeeperClient.writeCloudAccount(cloudAccount);
- zooKeeperClient.writeDataplaneTokens(dataplaneTokens);
+ new SessionSerializer().write(zooKeeperClient,
+ applicationId,
+ fileReference,
+ dockerImageRepository,
+ vespaVersion,
+ athenzDomain,
+ quota,
+ tenantSecretStores,
+ operatorCertificates,
+ cloudAccount,
+ dataplaneTokens,
+ writeSessionData);
} catch (RuntimeException | IOException e) {
zkDeployer.cleanup();
throw new RuntimeException("Error preparing session", e);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java
index f82aa405380..1af728919d9 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionRepository.java
@@ -6,7 +6,6 @@ import com.google.common.collect.Multiset;
import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.concurrent.DaemonThreadFactory;
import com.yahoo.concurrent.StripedExecutor;
-import com.yahoo.config.FileReference;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.model.api.ConfigDefinitionRepo;
@@ -27,7 +26,6 @@ import com.yahoo.vespa.config.server.application.ApplicationSet;
import com.yahoo.vespa.config.server.application.TenantApplications;
import com.yahoo.vespa.config.server.configchange.ConfigChangeActions;
import com.yahoo.vespa.config.server.deploy.TenantFileSystemDirs;
-import com.yahoo.vespa.config.server.filedistribution.FileDirectory;
import com.yahoo.vespa.config.server.filedistribution.FileDistributionFactory;
import com.yahoo.vespa.config.server.http.InvalidApplicationException;
import com.yahoo.vespa.config.server.http.UnknownVespaVersionException;
@@ -41,7 +39,9 @@ import com.yahoo.vespa.config.server.tenant.TenantRepository;
import com.yahoo.vespa.config.server.zookeeper.SessionCounter;
import com.yahoo.vespa.config.server.zookeeper.ZKApplication;
import com.yahoo.vespa.curator.Curator;
+import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.LongFlag;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.UnboundStringFlag;
@@ -127,6 +127,7 @@ public class SessionRepository {
private final ConfigDefinitionRepo configDefinitionRepo;
private final int maxNodeSize;
private final LongFlag expiryTimeFlag;
+ private final BooleanFlag writeSessionData;
public SessionRepository(TenantName tenantName,
TenantApplications applicationRepo,
@@ -168,7 +169,8 @@ public class SessionRepository {
this.modelFactoryRegistry = modelFactoryRegistry;
this.configDefinitionRepo = configDefinitionRepo;
this.maxNodeSize = maxNodeSize;
- expiryTimeFlag = PermanentFlags.CONFIG_SERVER_SESSION_EXPIRY_TIME.bindTo(flagSource);
+ this.expiryTimeFlag = PermanentFlags.CONFIG_SERVER_SESSION_EXPIRY_TIME.bindTo(flagSource);
+ this.writeSessionData = Flags.WRITE_CONFIG_SERVER_SESSION_DATA_AS_ONE_BLOB.bindTo(flagSource);
loadSessions(); // Needs to be done before creating cache below
this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor);
@@ -266,24 +268,14 @@ public class SessionRepository {
boolean internalRedeploy,
TimeoutBudget timeoutBudget,
DeployLogger deployLogger) {
- ApplicationId existingApplicationId = existingSession.getApplicationId();
+ ApplicationId applicationId = existingSession.getApplicationId();
File existingApp = getSessionAppDir(existingSession.getSessionId());
LocalSession session = createSessionFromApplication(existingApp,
- existingApplicationId,
+ applicationId,
internalRedeploy,
timeoutBudget,
deployLogger);
- // Note: Setters below need to be kept in sync with calls in SessionPreparer.writeStateToZooKeeper()
- session.setApplicationId(existingApplicationId);
- session.setApplicationPackageReference(existingSession.getApplicationPackageReference());
- session.setVespaVersion(existingSession.getVespaVersion());
- session.setDockerImageRepository(existingSession.getDockerImageRepository());
- session.setAthenzDomain(existingSession.getAthenzDomain());
- session.setQuota(existingSession.getQuota());
- session.setTenantSecretStores(existingSession.getTenantSecretStores());
- session.setOperatorCertificates(existingSession.getOperatorCertificates());
- session.setCloudAccount(existingSession.getCloudAccount());
- session.setDataplaneTokens(existingSession.getDataplaneTokens());
+ write(existingSession, session, applicationId);
return session;
}
@@ -534,7 +526,6 @@ public class SessionRepository {
private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) {
log.log(Level.FINE, () -> "Loading application for " + session);
SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId());
- ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage();
ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(),
session.getSessionId(),
sessionZooKeeperClient,
@@ -550,9 +541,9 @@ public class SessionRepository {
modelFactoryRegistry,
configDefinitionRepo);
return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(),
- sessionZooKeeperClient.readDockerImageRepository(),
- sessionZooKeeperClient.readVespaVersion(),
- applicationPackage,
+ session.getDockerImageRepository(),
+ session.getVespaVersion(),
+ sessionZooKeeperClient.loadApplicationPackage(),
new AllocatedHostsFromAllModels(),
clock.instant()));
}
@@ -578,6 +569,24 @@ public class SessionRepository {
});
}
+ // ---------------- Serialization ----------------------------------------------------------------
+
+ private void write(Session existingSession, LocalSession session, ApplicationId applicationId) {
+ SessionSerializer sessionSerializer = new SessionSerializer();
+ sessionSerializer.write(session.getSessionZooKeeperClient(),
+ applicationId,
+ existingSession.getApplicationPackageReference(),
+ existingSession.getDockerImageRepository(),
+ existingSession.getVespaVersion(),
+ existingSession.getAthenzDomain(),
+ existingSession.getQuota(),
+ existingSession.getTenantSecretStores(),
+ existingSession.getOperatorCertificates(),
+ existingSession.getCloudAccount(),
+ existingSession.getDataplaneTokens(),
+ writeSessionData);
+ }
+
// ---------------- Common stuff ----------------------------------------------------------------
public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) {
@@ -854,23 +863,18 @@ public class SessionRepository {
}
SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId);
- FileReference fileReference = sessionZKClient.readApplicationPackageReference();
+ var fileReference = sessionZKClient.readApplicationPackageReference();
log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference);
- if (fileReference == null) return;
+ if (fileReference.isEmpty()) return;
+
+ Optional<File> sessionDir = fileDistributionFactory.fileDirectory().getFile(fileReference.get());
+ // We cannot be guaranteed that the file reference exists (it could be that it has not
+ // been downloaded yet), and e.g. when bootstrapping we cannot throw an exception in that case
+ if (sessionDir.isEmpty()) return;
- File sessionDir;
- FileDirectory fileDirectory = fileDistributionFactory.fileDirectory();
- try {
- sessionDir = fileDirectory.getFile(fileReference);
- } catch (IllegalArgumentException e) {
- // We cannot be guaranteed that the file reference exists (it could be that it has not
- // been downloaded yet), and e.g. when bootstrapping we cannot throw an exception in that case
- log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference + " not found");
- return;
- }
ApplicationId applicationId = sessionZKClient.readApplicationId();
log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId);
- createLocalSession(sessionDir, applicationId, sessionId);
+ createLocalSession(sessionDir.get(), applicationId, sessionId);
}
private Optional<Long> getActiveSessionId(ApplicationId applicationId) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionSerializer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionSerializer.java
new file mode 100644
index 00000000000..1202b2bd08b
--- /dev/null
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionSerializer.java
@@ -0,0 +1,53 @@
+package com.yahoo.vespa.config.server.session;
+
+import com.yahoo.component.Version;
+import com.yahoo.config.FileReference;
+import com.yahoo.config.model.api.Quota;
+import com.yahoo.config.model.api.TenantSecretStore;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.AthenzDomain;
+import com.yahoo.config.provision.CloudAccount;
+import com.yahoo.config.provision.DataplaneToken;
+import com.yahoo.config.provision.DockerImage;
+import com.yahoo.vespa.flags.BooleanFlag;
+
+import java.security.cert.X509Certificate;
+import java.util.List;
+import java.util.Optional;
+
+/**
+ * Serialization and deserialization of session data to/from ZooKeeper.
+ * @author hmusum
+ */
+public class SessionSerializer {
+
+ void write(SessionZooKeeperClient zooKeeperClient, ApplicationId applicationId,
+ Optional<FileReference> fileReference, Optional<DockerImage> dockerImageRepository,
+ Version vespaVersion, Optional<AthenzDomain> athenzDomain, Optional<Quota> quota,
+ List<TenantSecretStore> tenantSecretStores, List<X509Certificate> operatorCertificates,
+ Optional<CloudAccount> cloudAccount, List<DataplaneToken> dataplaneTokens,
+ BooleanFlag writeSessionData) {
+ zooKeeperClient.writeApplicationId(applicationId);
+ zooKeeperClient.writeApplicationPackageReference(fileReference);
+ zooKeeperClient.writeVespaVersion(vespaVersion);
+ zooKeeperClient.writeDockerImageRepository(dockerImageRepository);
+ zooKeeperClient.writeAthenzDomain(athenzDomain);
+ zooKeeperClient.writeQuota(quota);
+ zooKeeperClient.writeTenantSecretStores(tenantSecretStores);
+ zooKeeperClient.writeOperatorCertificates(operatorCertificates);
+ zooKeeperClient.writeCloudAccount(cloudAccount);
+ zooKeeperClient.writeDataplaneTokens(dataplaneTokens);
+ if (writeSessionData.value())
+ zooKeeperClient.writeSessionData(new SessionData(applicationId,
+ fileReference,
+ vespaVersion,
+ dockerImageRepository,
+ athenzDomain,
+ quota,
+ tenantSecretStores,
+ operatorCertificates,
+ cloudAccount,
+ dataplaneTokens));
+ }
+
+} \ No newline at end of file
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java
index 23b6fe075fa..7d1a7ceae4e 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClient.java
@@ -6,7 +6,6 @@ import com.yahoo.component.Version;
import com.yahoo.component.Vtag;
import com.yahoo.config.FileReference;
import com.yahoo.config.application.api.ApplicationPackage;
-import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.model.api.ConfigDefinitionRepo;
import com.yahoo.config.model.api.Quota;
import com.yahoo.config.model.api.TenantSecretStore;
@@ -23,8 +22,6 @@ import com.yahoo.text.Utf8;
import com.yahoo.transaction.Transaction;
import com.yahoo.vespa.config.server.NotFoundException;
import com.yahoo.vespa.config.server.UserConfigDefinitionRepo;
-import com.yahoo.vespa.config.server.deploy.ZooKeeperClient;
-import com.yahoo.vespa.config.server.deploy.ZooKeeperDeployer;
import com.yahoo.vespa.config.server.filedistribution.AddFileInterface;
import com.yahoo.vespa.config.server.filedistribution.MockFileManager;
import com.yahoo.vespa.config.server.tenant.CloudAccountSerializer;
@@ -45,6 +42,18 @@ import java.util.List;
import java.util.Optional;
import java.util.logging.Level;
+import static com.yahoo.vespa.config.server.session.SessionData.APPLICATION_ID_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.APPLICATION_PACKAGE_REFERENCE_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.ATHENZ_DOMAIN;
+import static com.yahoo.vespa.config.server.session.SessionData.CLOUD_ACCOUNT_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.CREATE_TIME_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.DATAPLANE_TOKENS_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.DOCKER_IMAGE_REPOSITORY_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.OPERATOR_CERTIFICATES_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.QUOTA_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.SESSION_DATA_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.TENANT_SECRET_STORES_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.VERSION_PATH;
import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.USER_DEFCONFIGS_ZK_SUBPATH;
import static com.yahoo.vespa.curator.Curator.CompletionWaiter;
import static com.yahoo.yolean.Exceptions.uncheck;
@@ -61,18 +70,6 @@ public class SessionZooKeeperClient {
// NOTE: Any state added here MUST also be propagated in com.yahoo.vespa.config.server.deploy.Deployment.prepare()
- static final String APPLICATION_ID_PATH = "applicationId";
- static final String APPLICATION_PACKAGE_REFERENCE_PATH = "applicationPackageReference";
- private static final String VERSION_PATH = "version";
- private static final String CREATE_TIME_PATH = "createTime";
- private static final String DOCKER_IMAGE_REPOSITORY_PATH = "dockerImageRepository";
- private static final String ATHENZ_DOMAIN = "athenzDomain";
- private static final String QUOTA_PATH = "quota";
- private static final String TENANT_SECRET_STORES_PATH = "tenantSecretStores";
- private static final String OPERATOR_CERTIFICATES_PATH = "operatorCertificates";
- private static final String CLOUD_ACCOUNT_PATH = "cloudAccount";
- private static final String DATAPLANE_TOKENS_PATH = "dataplaneTokens";
-
private final Curator curator;
private final TenantName tenantName;
private final long sessionId;
@@ -180,11 +177,8 @@ public class SessionZooKeeperClient {
reference -> curator.set(applicationPackageReferencePath(), Utf8.toBytes(reference.value())));
}
- FileReference readApplicationPackageReference() {
- Optional<byte[]> data = curator.getData(applicationPackageReferencePath());
- if (data.isEmpty()) return null; // This should not happen.
-
- return new FileReference(Utf8.toString(data.get()));
+ Optional<FileReference> readApplicationPackageReference() {
+ return curator.getData(applicationPackageReferencePath()).map(d -> new FileReference(Utf8.toString(d)));
}
private Path applicationPackageReferencePath() {
@@ -227,6 +221,10 @@ public class SessionZooKeeperClient {
curator.set(versionPath(), Utf8.toBytes(version.toString()));
}
+ public void writeSessionData(SessionData sessionData) {
+ curator.set(sessionPath.append(SESSION_DATA_PATH), sessionData.toJson());
+ }
+
public Version readVespaVersion() {
Optional<byte[]> data = curator.getData(versionPath());
// TODO: Empty version should not be possible any more - verify and remove
@@ -261,11 +259,6 @@ public class SessionZooKeeperClient {
.orElseThrow(() -> new IllegalStateException("Allocated hosts does not exists"));
}
- public ZooKeeperDeployer createDeployer(DeployLogger logger) {
- ZooKeeperClient zkClient = new ZooKeeperClient(curator, logger, sessionPath);
- return new ZooKeeperDeployer(zkClient);
- }
-
public Transaction createWriteStatusTransaction(Session.Status status) {
CuratorTransaction transaction = new CuratorTransaction(curator);
if (curator.exists(sessionStatusPath)) {
@@ -368,7 +361,7 @@ public class SessionZooKeeperClient {
transaction.commit();
}
- private static Path getSessionPath(TenantName tenantName, long sessionId) {
+ static Path getSessionPath(TenantName tenantName, long sessionId) {
return TenantRepository.getSessionsPath(tenantName).append(String.valueOf(sessionId));
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/DataplaneTokenSerializer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/DataplaneTokenSerializer.java
index ef41512f979..3b819da6237 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/DataplaneTokenSerializer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/DataplaneTokenSerializer.java
@@ -54,6 +54,11 @@ public class DataplaneTokenSerializer {
public static Slime toSlime(List<DataplaneToken> dataplaneTokens) {
Slime slime = new Slime();
Cursor root = slime.setArray();
+ toSlime(dataplaneTokens, root);
+ return slime;
+ }
+
+ public static void toSlime(List<DataplaneToken> dataplaneTokens, Cursor root) {
for (DataplaneToken token : dataplaneTokens) {
Cursor cursor = root.addObject();
cursor.setString(ID_FIELD, token.tokenId());
@@ -65,6 +70,6 @@ public class DataplaneTokenSerializer {
val.setString(EXPIRATION_FIELD, v.expiration().map(Instant::toString).orElse("<none>"));
});
}
- return slime;
}
+
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/OperatorCertificateSerializer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/OperatorCertificateSerializer.java
index 232dd2e5fe7..e5a969bb948 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/OperatorCertificateSerializer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/OperatorCertificateSerializer.java
@@ -1,8 +1,6 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
package com.yahoo.vespa.config.server.tenant;
-import com.yahoo.config.model.api.ApplicationRoles;
import com.yahoo.security.X509CertificateUtils;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
@@ -11,21 +9,28 @@ import com.yahoo.slime.SlimeUtils;
import java.security.cert.X509Certificate;
import java.util.List;
-import java.util.stream.Collectors;
+/**
+ * Serializer for operator certificates.
+ * The certificates are serialized as a list of PEM strings.
+ * @author tokle
+ */
public class OperatorCertificateSerializer {
private final static String certificateField = "certificates";
-
public static Slime toSlime(List<X509Certificate> certificateList) {
Slime slime = new Slime();
var root = slime.setObject();
Cursor array = root.setArray(certificateField);
+ toSlime(certificateList, array);
+ return slime;
+ }
+
+ public static void toSlime(List<X509Certificate> certificateList, Cursor array) {
certificateList.stream()
.map(X509CertificateUtils::toPem)
.forEach(array::addString);
- return slime;
}
public static List<X509Certificate> fromSlime(Inspector object) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantSecretStoreSerializer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantSecretStoreSerializer.java
index 262192ad6c4..b8df5073a3e 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantSecretStoreSerializer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantSecretStoreSerializer.java
@@ -30,10 +30,14 @@ public class TenantSecretStoreSerializer {
public static Slime toSlime(List<TenantSecretStore> tenantSecretStores) {
Slime slime = new Slime();
Cursor cursor = slime.setArray();
- tenantSecretStores.forEach(tenantSecretStore -> toSlime(tenantSecretStore, cursor.addObject()));
+ toSlime(tenantSecretStores, cursor);
return slime;
}
+ public static void toSlime(List<TenantSecretStore> tenantSecretStores, Cursor cursor) {
+ tenantSecretStores.forEach(tenantSecretStore -> toSlime(tenantSecretStore, cursor.addObject()));
+ }
+
public static void toSlime(TenantSecretStore tenantSecretStore, Cursor object) {
object.setString(awsIdField, tenantSecretStore.getAwsId());
object.setString(nameField, tenantSecretStore.getName());
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileDirectoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileDirectoryTest.java
index 649d382ddb6..040df208323 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileDirectoryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileDirectoryTest.java
@@ -37,9 +37,9 @@ public class FileDirectoryTest {
FileReference foo = createFile("foo");
FileReference bar = createFile("bar");
- assertTrue(fileDirectory.getFile(foo).exists());
+ assertTrue(fileDirectory.getFile(foo).get().exists());
assertEquals("ea315b7acac56246", foo.value());
- assertTrue(fileDirectory.getFile(bar).exists());
+ assertTrue(fileDirectory.getFile(bar).get().exists());
assertEquals("2b8e97f15c854e1d", bar.value());
}
@@ -49,7 +49,7 @@ public class FileDirectoryTest {
File subDirectory = new File(temporaryFolder.getRoot(), subdirName);
createFileInSubDir(subDirectory, "foo", "some content");
FileReference fileReference = fileDirectory.addFile(subDirectory);
- File dir = fileDirectory.getFile(fileReference);
+ File dir = fileDirectory.getFile(fileReference).get();
assertTrue(dir.exists());
assertTrue(new File(dir, "foo").exists());
assertFalse(new File(dir, "doesnotexist").exists());
@@ -58,7 +58,7 @@ public class FileDirectoryTest {
// Change contents of a file, file reference value should change
createFileInSubDir(subDirectory, "foo", "new content");
FileReference fileReference2 = fileDirectory.addFile(subDirectory);
- dir = fileDirectory.getFile(fileReference2);
+ dir = fileDirectory.getFile(fileReference2).get();
assertTrue(new File(dir, "foo").exists());
assertNotEquals(fileReference + " should not be equal to " + fileReference2, fileReference, fileReference2);
assertEquals("e5d4b3fe5ee3ede3", fileReference2.value());
@@ -66,7 +66,7 @@ public class FileDirectoryTest {
// Add a file, should be available and file reference should have another value
createFileInSubDir(subDirectory, "bar", "some other content");
FileReference fileReference3 = fileDirectory.addFile(subDirectory);
- dir = fileDirectory.getFile(fileReference3);
+ dir = fileDirectory.getFile(fileReference3).get();
assertTrue(new File(dir, "foo").exists());
assertTrue(new File(dir, "bar").exists());
assertEquals("894bced3fc9d199b", fileReference3.value());
@@ -78,7 +78,7 @@ public class FileDirectoryTest {
File subDirectory = new File(temporaryFolder.getRoot(), subdirName);
createFileInSubDir(subDirectory, "foo", "some content");
FileReference fileReference = fileDirectory.addFile(subDirectory);
- File dir = fileDirectory.getFile(fileReference);
+ File dir = fileDirectory.getFile(fileReference).get();
assertTrue(dir.exists());
File foo = new File(dir, "foo");
assertTrue(foo.exists());
@@ -90,7 +90,7 @@ public class FileDirectoryTest {
try { Thread.sleep(1000);} catch (InterruptedException e) {/*ignore */} // Needed since we have timestamp resolution of 1 second
Files.delete(Paths.get(fileDirectory.getPath(fileReference)).resolve("subdir").resolve("foo"));
fileReference = fileDirectory.addFile(subDirectory);
- dir = fileDirectory.getFile(fileReference);
+ dir = fileDirectory.getFile(fileReference).get();
File foo2 = new File(dir, "foo");
assertTrue(dir.exists());
assertTrue(foo2.exists());
@@ -107,7 +107,7 @@ public class FileDirectoryTest {
File subDirectory = new File(temporaryFolder.getRoot(), subdirName);
createFileInSubDir(subDirectory, "foo", "some content");
FileReference fileReference = fileDirectory.addFile(subDirectory);
- File dir = fileDirectory.getFile(fileReference);
+ File dir = fileDirectory.getFile(fileReference).get();
assertTrue(dir.exists());
File foo = new File(dir, "foo");
assertTrue(foo.exists());
@@ -119,7 +119,7 @@ public class FileDirectoryTest {
// Add a file that already exists, nothing should happen
createFileInSubDir(subDirectory, "foo", "some content"); // same as before, nothing should happen
FileReference fileReference3 = fileDirectory.addFile(subDirectory);
- dir = fileDirectory.getFile(fileReference3);
+ dir = fileDirectory.getFile(fileReference3).get();
assertTrue(new File(dir, "foo").exists());
assertEquals("bebc5a1aee74223d", fileReference3.value()); // same hash
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
index 49458acd60b..373b39c8365 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/filedistribution/FileServerTest.java
@@ -29,6 +29,7 @@ import static com.yahoo.vespa.filedistribution.FileReferenceData.CompressionType
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.fail;
public class FileServerTest {
@@ -60,9 +61,9 @@ public class FileServerTest {
String dir = "123";
assertFalse(fileServer.hasFile(dir));
FileReferenceDownload foo = new FileReferenceDownload(new FileReference(dir), "test");
- assertFalse(fileServer.hasFileDownloadIfNeeded(foo));
+ assertFalse(fileServer.getFileDownloadIfNeeded(foo).isPresent());
writeFile(dir);
- assertTrue(fileServer.hasFileDownloadIfNeeded(foo));
+ assertTrue(fileServer.getFileDownloadIfNeeded(foo).isPresent());
}
@Test
@@ -78,7 +79,9 @@ public class FileServerTest {
File dir = getFileServerRootDir();
IOUtils.writeFile(dir + "/12y/f1", "dummy-data", true);
CompletableFuture<byte []> content = new CompletableFuture<>();
- fileServer.startFileServing(new FileReference("12y"), new FileReceiver(content), Set.of(gzip));
+ FileReference fileReference = new FileReference("12y");
+ var file = fileServer.getFileDownloadIfNeeded(new FileReferenceDownload(fileReference, "test"));
+ fileServer.startFileServing(fileReference, file.get(), new FileReceiver(content), Set.of(gzip));
assertEquals(new String(content.get()), "dummy-data");
}
@@ -89,7 +92,9 @@ public class FileServerTest {
File dir = getFileServerRootDir();
IOUtils.writeFile(dir + "/subdir/12z/f1", "dummy-data-2", true);
CompletableFuture<byte []> content = new CompletableFuture<>();
- fileServer.startFileServing(new FileReference("subdir"), new FileReceiver(content), Set.of(gzip, lz4));
+ FileReference fileReference = new FileReference("subdir");
+ var file = fileServer.getFileDownloadIfNeeded(new FileReferenceDownload(fileReference, "test"));
+ fileServer.startFileServing(fileReference, file.get(), new FileReceiver(content), Set.of(gzip, lz4));
// Decompress with lz4 and check contents
var compressor = new FileReferenceCompressor(FileReferenceData.Type.compressed, lz4);
@@ -130,6 +135,27 @@ public class FileServerTest {
assertEquals(1, fileServer.downloader().connectionPool().getSize());
}
+ @Test
+ public void requireThatErrorsAreHandled() throws IOException, ExecutionException, InterruptedException {
+ File dir = getFileServerRootDir();
+ IOUtils.writeFile(dir + "/12y/f1", "dummy-data", true);
+ CompletableFuture<byte []> content = new CompletableFuture<>();
+ FailingFileReceiver fileReceiver = new FailingFileReceiver(content);
+
+ // Should fail the first time, see FailingFileReceiver
+ FileReference reference = new FileReference("12y");
+ var file = fileServer.getFileDownloadIfNeeded(new FileReferenceDownload(reference, "test"));
+ try {
+ fileServer.startFileServing(reference, file.get(), fileReceiver, Set.of(gzip));
+ fail("Should have failed");
+ } catch (RuntimeException e) {
+ // expected
+ }
+
+ fileServer.startFileServing(reference, file.get(), fileReceiver, Set.of(gzip));
+ assertEquals(new String(content.get()), "dummy-data");
+ }
+
private void writeFile(String dir) throws IOException {
File rootDir = getFileServerRootDir();
IOUtils.createDirectory(rootDir + "/" + dir);
@@ -153,6 +179,23 @@ public class FileServerTest {
}
}
+ private static class FailingFileReceiver implements FileServer.Receiver {
+ final CompletableFuture<byte []> content;
+ int counter = 0;
+ FailingFileReceiver(CompletableFuture<byte []> content) {
+ this.content = content;
+ }
+ @Override
+ public void receive(FileReferenceData fileData, FileServer.ReplayStatus status) {
+ counter++;
+ if (counter <= 1)
+ throw new RuntimeException("Failed to receive file");
+ else {
+ this.content.complete(fileData.content().array());
+ }
+ }
+ }
+
private File getFileServerRootDir() {
return fileServer.getRootDir().getRoot();
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java
index 52d5ba16562..0158aa1961d 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java
@@ -67,8 +67,8 @@ import java.util.OptionalInt;
import java.util.Set;
import java.util.logging.Level;
+import static com.yahoo.vespa.config.server.session.SessionData.APPLICATION_PACKAGE_REFERENCE_PATH;
import static com.yahoo.vespa.config.server.session.SessionPreparer.PrepareResult;
-import static com.yahoo.vespa.config.server.session.SessionZooKeeperClient.APPLICATION_PACKAGE_REFERENCE_PATH;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClientTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClientTest.java
index 4a7aeafab7e..569b6624815 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClientTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionZooKeeperClientTest.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.config.server.session;
import com.yahoo.cloud.config.ConfigserverConfig;
+import com.yahoo.component.Version;
import com.yahoo.config.FileReference;
import com.yahoo.config.model.api.Quota;
import com.yahoo.config.model.api.TenantSecretStore;
@@ -16,10 +17,13 @@ import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
+
import java.time.Instant;
import java.util.List;
import java.util.Optional;
+import static com.yahoo.vespa.config.server.session.SessionData.APPLICATION_ID_PATH;
+import static com.yahoo.vespa.config.server.session.SessionData.SESSION_DATA_PATH;
import static com.yahoo.vespa.config.server.zookeeper.ZKApplication.SESSIONSTATE_ZK_SUBPATH;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -87,7 +91,7 @@ public class SessionZooKeeperClientTest {
int sessionId = 3;
SessionZooKeeperClient zkc = createSessionZKClient(sessionId);
zkc.writeApplicationId(id);
- Path path = sessionPath(sessionId).append(SessionZooKeeperClient.APPLICATION_ID_PATH);
+ Path path = sessionPath(sessionId).append(APPLICATION_ID_PATH);
assertTrue(curator.exists(path));
assertEquals(id.serializedForm(), Utf8.toString(curator.getData(path).get()));
}
@@ -135,7 +139,7 @@ public class SessionZooKeeperClientTest {
final FileReference testRef = new FileReference("test-ref");
SessionZooKeeperClient zkc = createSessionZKClient(3);
zkc.writeApplicationPackageReference(Optional.of(testRef));
- assertEquals(testRef, zkc.readApplicationPackageReference());
+ assertEquals(testRef, zkc.readApplicationPackageReference().get());
}
@Test
@@ -157,9 +161,30 @@ public class SessionZooKeeperClientTest {
assertEquals(secretStores, zkc.readTenantSecretStores());
}
+ @Test
+ public void require_that_session_data_is_written_to_zk() {
+ int sessionId = 2;
+ SessionZooKeeperClient zkc = createSessionZKClient(sessionId);
+ zkc.writeSessionData(new SessionData(ApplicationId.defaultId(),
+ Optional.of(new FileReference("foo")),
+ Version.fromString("8.195.1"),
+ Optional.empty(),
+ Optional.empty(),
+ Optional.empty(),
+ List.of(),
+ List.of(),
+ Optional.empty(),
+ List.of()));
+ Path path = sessionPath(sessionId).append(SESSION_DATA_PATH);
+ assertTrue(curator.exists(path));
+ String data = Utf8.toString(curator.getData(path).get());
+ assertTrue(data.contains("{\"applicationId\":\"default:default:default\",\"applicationPackageReference\":\"foo\",\"version\":\"8.195.1\",\"createTime\":"));
+ assertTrue(data.contains(",\"tenantSecretStores\":[],\"operatorCertificates\":[],\"dataplaneTokens\":[]}"));
+ }
+
private void assertApplicationIdParse(long sessionId, String idString, String expectedIdString) {
SessionZooKeeperClient zkc = createSessionZKClient(sessionId);
- Path path = sessionPath(sessionId).append(SessionZooKeeperClient.APPLICATION_ID_PATH);
+ Path path = sessionPath(sessionId).append(APPLICATION_ID_PATH);
curator.set(path, Utf8.toBytes(idString));
assertEquals(expectedIdString, zkc.readApplicationId().serializedForm());
}
diff --git a/container-search/abi-spec.json b/container-search/abi-spec.json
index 0f440957dfd..cdb660f294a 100644
--- a/container-search/abi-spec.json
+++ b/container-search/abi-spec.json
@@ -6981,12 +6981,14 @@
"public java.lang.Integer getMinHitsPerThread()",
"public java.lang.Double getPostFilterThreshold()",
"public java.lang.Double getApproximateThreshold()",
+ "public java.lang.Double getTargetHitsMaxAdjustmentFactor()",
"public void setTermwiselimit(double)",
"public void setNumThreadsPerSearch(int)",
"public void setNumSearchPartitions(int)",
"public void setMinHitsPerThread(int)",
"public void setPostFilterThreshold(double)",
"public void setApproximateThreshold(double)",
+ "public void setTargetHitsMaxAdjustmentFactor(double)",
"public void prepare(com.yahoo.search.query.ranking.RankProperties)",
"public com.yahoo.search.query.ranking.Matching clone()",
"public boolean equals(java.lang.Object)",
@@ -7000,6 +7002,7 @@
"public static final java.lang.String MINHITSPERTHREAD",
"public static final java.lang.String POST_FILTER_THRESHOLD",
"public static final java.lang.String APPROXIMATE_THRESHOLD",
+ "public static final java.lang.String TARGET_HITS_MAX_ADJUSTMENT_FACTOR",
"public java.lang.Double termwiseLimit"
]
},
diff --git a/container-search/src/main/java/com/yahoo/search/dispatch/ReconfigurableDispatcher.java b/container-search/src/main/java/com/yahoo/search/dispatch/ReconfigurableDispatcher.java
index 625a8bcb6da..c86c21d677f 100644
--- a/container-search/src/main/java/com/yahoo/search/dispatch/ReconfigurableDispatcher.java
+++ b/container-search/src/main/java/com/yahoo/search/dispatch/ReconfigurableDispatcher.java
@@ -1,20 +1,17 @@
package com.yahoo.search.dispatch;
import com.yahoo.component.ComponentId;
+import com.yahoo.component.annotation.Inject;
import com.yahoo.config.subscription.ConfigSubscriber;
+import com.yahoo.container.QrConfig;
import com.yahoo.container.handler.VipStatus;
-import com.yahoo.messagebus.network.rpc.SlobrokConfigSubscriber;
import com.yahoo.vespa.config.search.DispatchConfig;
import com.yahoo.vespa.config.search.DispatchNodesConfig;
import com.yahoo.yolean.UncheckedInterruptedException;
-import java.util.Objects;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
-import static java.util.Objects.requireNonNull;
-
/**
* @author jonmv
*/
@@ -22,10 +19,20 @@ public class ReconfigurableDispatcher extends Dispatcher {
private final ConfigSubscriber subscriber;
- public ReconfigurableDispatcher(ComponentId clusterId, DispatchConfig dispatchConfig, VipStatus vipStatus) {
+ @Inject
+ public ReconfigurableDispatcher(ComponentId clusterId, DispatchConfig dispatchConfig, QrConfig qrConfig, VipStatus vipStatus) {
super(clusterId, dispatchConfig, new DispatchNodesConfig.Builder().build(), vipStatus);
this.subscriber = new ConfigSubscriber();
- this.subscriber.subscribe(this::updateWithNewConfig, DispatchNodesConfig.class, clusterId.stringValue());
+ CountDownLatch configured = new CountDownLatch(1);
+ this.subscriber.subscribe(config -> { updateWithNewConfig(config); configured.countDown(); },
+ DispatchNodesConfig.class, configId(clusterId, qrConfig));
+ try {
+ if ( ! configured.await(1, TimeUnit.MINUTES))
+ throw new IllegalStateException("timed out waiting for initial dispatch nodes config for " + clusterId.getName());
+ }
+ catch (InterruptedException e) {
+ throw new UncheckedInterruptedException("interrupted waiting for initial dispatch nodes config for " + clusterId.getName(), e);
+ }
}
@Override
@@ -34,4 +41,8 @@ public class ReconfigurableDispatcher extends Dispatcher {
super.deconstruct();
}
+ private static String configId(ComponentId clusterId, QrConfig qrConfig) {
+ return qrConfig.clustername() + "/component/" + clusterId.getName();
+ }
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/query/properties/QueryProperties.java b/container-search/src/main/java/com/yahoo/search/query/properties/QueryProperties.java
index 800b3a1ba89..99d6959441a 100644
--- a/container-search/src/main/java/com/yahoo/search/query/properties/QueryProperties.java
+++ b/container-search/src/main/java/com/yahoo/search/query/properties/QueryProperties.java
@@ -91,6 +91,7 @@ public class QueryProperties extends Properties {
addDualCasedRM(map, Matching.MINHITSPERTHREAD, GetterSetter.of(query -> query.getRanking().getMatching().getMinHitsPerThread(), (query, value) -> query.getRanking().getMatching().setMinHitsPerThread(asInteger(value, 0))));
addDualCasedRM(map, Matching.POST_FILTER_THRESHOLD, GetterSetter.of(query -> query.getRanking().getMatching().getPostFilterThreshold(), (query, value) -> query.getRanking().getMatching().setPostFilterThreshold(asDouble(value, 1.0))));
addDualCasedRM(map, Matching.APPROXIMATE_THRESHOLD, GetterSetter.of(query -> query.getRanking().getMatching().getApproximateThreshold(), (query, value) -> query.getRanking().getMatching().setApproximateThreshold(asDouble(value, 0.05))));
+ addDualCasedRM(map, Matching.TARGET_HITS_MAX_ADJUSTMENT_FACTOR, GetterSetter.of(query -> query.getRanking().getMatching().getTargetHitsMaxAdjustmentFactor(), (query, value) -> query.getRanking().getMatching().setTargetHitsMaxAdjustmentFactor(asDouble(value, 20.0))));
map.put(CompoundName.fromComponents(Ranking.RANKING, Ranking.MATCH_PHASE, MatchPhase.ATTRIBUTE), GetterSetter.of(query -> query.getRanking().getMatchPhase().getAttribute(), (query, value) -> query.getRanking().getMatchPhase().setAttribute(asString(value, null))));
map.put(CompoundName.fromComponents(Ranking.RANKING, Ranking.MATCH_PHASE, MatchPhase.ASCENDING), GetterSetter.of(query -> query.getRanking().getMatchPhase().getAscending(), (query, value) -> query.getRanking().getMatchPhase().setAscending(asBoolean(value, false))));
diff --git a/container-search/src/main/java/com/yahoo/search/query/ranking/Matching.java b/container-search/src/main/java/com/yahoo/search/query/ranking/Matching.java
index 35fbd52f967..4d21f32d16d 100644
--- a/container-search/src/main/java/com/yahoo/search/query/ranking/Matching.java
+++ b/container-search/src/main/java/com/yahoo/search/query/ranking/Matching.java
@@ -24,6 +24,7 @@ public class Matching implements Cloneable {
public static final String MINHITSPERTHREAD = "minHitsPerThread";
public static final String POST_FILTER_THRESHOLD = "postFilterThreshold";
public static final String APPROXIMATE_THRESHOLD = "approximateThreshold";
+ public static final String TARGET_HITS_MAX_ADJUSTMENT_FACTOR = "targetHitsMaxAdjustmentFactor";
static {
argumentType =new QueryProfileType(Ranking.MATCHING);
@@ -35,6 +36,7 @@ public class Matching implements Cloneable {
argumentType.addField(new FieldDescription(MINHITSPERTHREAD, "integer"));
argumentType.addField(new FieldDescription(POST_FILTER_THRESHOLD, "double"));
argumentType.addField(new FieldDescription(APPROXIMATE_THRESHOLD, "double"));
+ argumentType.addField(new FieldDescription(TARGET_HITS_MAX_ADJUSTMENT_FACTOR, "double"));
argumentType.freeze();
}
@@ -46,6 +48,7 @@ public class Matching implements Cloneable {
private Integer minHitsPerThread = null;
private Double postFilterThreshold = null;
private Double approximateThreshold = null;
+ private Double targetHitsMaxAdjustmentFactor = null;
public Double getTermwiseLimit() { return termwiseLimit; }
public Integer getNumThreadsPerSearch() { return numThreadsPerSearch; }
@@ -53,6 +56,7 @@ public class Matching implements Cloneable {
public Integer getMinHitsPerThread() { return minHitsPerThread; }
public Double getPostFilterThreshold() { return postFilterThreshold; }
public Double getApproximateThreshold() { return approximateThreshold; }
+ public Double getTargetHitsMaxAdjustmentFactor() { return targetHitsMaxAdjustmentFactor; }
public void setTermwiselimit(double value) {
if ((value < 0.0) || (value > 1.0)) {
@@ -75,6 +79,9 @@ public class Matching implements Cloneable {
public void setApproximateThreshold(double threshold) {
approximateThreshold = threshold;
}
+ public void setTargetHitsMaxAdjustmentFactor(double factor) {
+ targetHitsMaxAdjustmentFactor = factor;
+ }
/** Internal operation - DO NOT USE */
public void prepare(RankProperties rankProperties) {
@@ -97,6 +104,9 @@ public class Matching implements Cloneable {
if (approximateThreshold != null) {
rankProperties.put("vespa.matching.global_filter.lower_limit", String.valueOf(approximateThreshold));
}
+ if (targetHitsMaxAdjustmentFactor != null) {
+ rankProperties.put("vespa.matching.nns.target_hits_max_adjustment_factor", String.valueOf(targetHitsMaxAdjustmentFactor));
+ }
}
@Override
@@ -119,12 +129,14 @@ public class Matching implements Cloneable {
Objects.equals(numSearchPartitions, matching.numSearchPartitions) &&
Objects.equals(minHitsPerThread, matching.minHitsPerThread) &&
Objects.equals(postFilterThreshold, matching.postFilterThreshold) &&
- Objects.equals(approximateThreshold, matching.approximateThreshold);
+ Objects.equals(approximateThreshold, matching.approximateThreshold) &&
+ Objects.equals(targetHitsMaxAdjustmentFactor, matching.targetHitsMaxAdjustmentFactor);
}
@Override
public int hashCode() {
- return Objects.hash(termwiseLimit, numThreadsPerSearch, numSearchPartitions, minHitsPerThread, postFilterThreshold, approximateThreshold);
+ return Objects.hash(termwiseLimit, numThreadsPerSearch, numSearchPartitions, minHitsPerThread,
+ postFilterThreshold, approximateThreshold, targetHitsMaxAdjustmentFactor);
}
}
diff --git a/container-search/src/test/java/com/yahoo/search/query/MatchingTestCase.java b/container-search/src/test/java/com/yahoo/search/query/MatchingTestCase.java
index e3a1eb18a33..37d0e9e1072 100644
--- a/container-search/src/test/java/com/yahoo/search/query/MatchingTestCase.java
+++ b/container-search/src/test/java/com/yahoo/search/query/MatchingTestCase.java
@@ -20,6 +20,7 @@ public class MatchingTestCase {
assertNull(query.getRanking().getMatching().getMinHitsPerThread());
assertNull(query.getRanking().getMatching().getPostFilterThreshold());
assertNull(query.getRanking().getMatching().getApproximateThreshold());
+ assertNull(query.getRanking().getMatching().getTargetHitsMaxAdjustmentFactor());
}
@Test
@@ -30,13 +31,15 @@ public class MatchingTestCase {
"&ranking.matching.numSearchPartitions=13" +
"&ranking.matching.minHitsPerThread=3" +
"&ranking.matching.postFilterThreshold=0.8" +
- "&ranking.matching.approximateThreshold=0.3");
+ "&ranking.matching.approximateThreshold=0.3" +
+ "&ranking.matching.targetHitsMaxAdjustmentFactor=2.5");
assertEquals(Double.valueOf(0.7), query.getRanking().getMatching().getTermwiseLimit());
assertEquals(Integer.valueOf(17), query.getRanking().getMatching().getNumThreadsPerSearch());
assertEquals(Integer.valueOf(13), query.getRanking().getMatching().getNumSearchPartitions());
assertEquals(Integer.valueOf(3), query.getRanking().getMatching().getMinHitsPerThread());
assertEquals(Double.valueOf(0.8), query.getRanking().getMatching().getPostFilterThreshold());
assertEquals(Double.valueOf(0.3), query.getRanking().getMatching().getApproximateThreshold());
+ assertEquals(Double.valueOf(2.5), query.getRanking().getMatching().getTargetHitsMaxAdjustmentFactor());
query.prepare();
assertEquals("0.7", query.getRanking().getProperties().get("vespa.matching.termwise_limit").get(0));
@@ -45,6 +48,7 @@ public class MatchingTestCase {
assertEquals("3", query.getRanking().getProperties().get("vespa.matching.minhitsperthread").get(0));
assertEquals("0.8", query.getRanking().getProperties().get("vespa.matching.global_filter.upper_limit").get(0));
assertEquals("0.3", query.getRanking().getProperties().get("vespa.matching.global_filter.lower_limit").get(0));
+ assertEquals("2.5", query.getRanking().getProperties().get("vespa.matching.nns.target_hits_max_adjustment_factor").get(0));
}
@Test
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java
index 7403f0a1b01..fbf3a5d9a03 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/FlagsTarget.java
@@ -142,7 +142,7 @@ public interface FlagsTarget {
var fetchVector = new FetchVector();
if (!flagDimensions.contains(CLOUD)) fetchVector = fetchVector.with(CLOUD, cloud.value());
if (!flagDimensions.contains(ENVIRONMENT)) fetchVector = fetchVector.with(ENVIRONMENT, virtualZoneId.environment().value());
- if (!flagDimensions.contains(SYSTEM)) fetchVector = fetchVector.with(SYSTEM, system.value());
+ fetchVector = fetchVector.with(SYSTEM, system.value());
if (!flagDimensions.contains(ZONE_ID)) fetchVector = fetchVector.with(ZONE_ID, virtualZoneId.value());
return fetchVector.isEmpty() ? data : data.partialResolve(fetchVector);
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java
index 577769baf1e..c6f1d96ed43 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchive.java
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.api.systemflags.v1;
+import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
@@ -18,8 +19,10 @@ import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.text.JSON;
import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagId;
+import com.yahoo.vespa.flags.json.Condition;
import com.yahoo.vespa.flags.json.DimensionHelper;
import com.yahoo.vespa.flags.json.FlagData;
+import com.yahoo.vespa.flags.json.RelationalCondition;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneRegistry;
import java.io.BufferedInputStream;
@@ -49,6 +52,7 @@ import java.util.zip.ZipOutputStream;
import static com.yahoo.config.provision.CloudName.AWS;
import static com.yahoo.config.provision.CloudName.GCP;
import static com.yahoo.config.provision.CloudName.YAHOO;
+import static com.yahoo.vespa.flags.FetchVector.Dimension.SYSTEM;
import static com.yahoo.yolean.Exceptions.uncheck;
/**
@@ -82,8 +86,8 @@ public class SystemFlagsDataArchive {
String name = entry.getName();
if (!entry.isDirectory() && name.startsWith("flags/")) {
Path filePath = Paths.get(name);
- String rawData = new String(zipIn.readAllBytes(), StandardCharsets.UTF_8);
- addFile(builder, rawData, filePath, zoneRegistry, true);
+ String fileContent = new String(zipIn.readAllBytes(), StandardCharsets.UTF_8);
+ builder.maybeAddFile(filePath, fileContent, zoneRegistry, true);
}
}
return builder.build();
@@ -92,7 +96,7 @@ public class SystemFlagsDataArchive {
}
}
- public static SystemFlagsDataArchive fromDirectory(Path directory, ZoneRegistry zoneRegistry, boolean forceAddFiles) {
+ public static SystemFlagsDataArchive fromDirectory(Path directory, ZoneRegistry zoneRegistry, boolean simulateInController) {
Path root = directory.toAbsolutePath();
Path flagsDirectory = directory.resolve("flags");
if (!Files.isDirectory(flagsDirectory)) {
@@ -103,8 +107,8 @@ public class SystemFlagsDataArchive {
directoryStream.forEach(path -> {
Path relativePath = root.relativize(path.toAbsolutePath());
if (Files.isRegularFile(path)) {
- String rawData = uncheck(() -> Files.readString(path, StandardCharsets.UTF_8));
- addFile(builder, rawData, relativePath, zoneRegistry, forceAddFiles);
+ String fileContent = uncheck(() -> Files.readString(path, StandardCharsets.UTF_8));
+ builder.maybeAddFile(relativePath, fileContent, zoneRegistry, simulateInController);
}
});
return builder.build();
@@ -168,114 +172,119 @@ public class SystemFlagsDataArchive {
return files.getOrDefault(flagId, Map.of()).containsKey(filename);
}
- private static void addFile(Builder builder, String rawData, Path filePath, ZoneRegistry zoneRegistry, boolean force) {
- String filename = filePath.getFileName().toString();
-
- if (filename.startsWith("."))
- return; // Ignore files starting with '.'
-
- if (!force && !FlagsTarget.filenameForSystem(filename, zoneRegistry.system()))
- return; // Ignore files for other systems
-
- FlagId directoryDeducedFlagId = new FlagId(filePath.getName(filePath.getNameCount()-2).toString());
- FlagData flagData;
- if (rawData.isBlank()) {
- flagData = new FlagData(directoryDeducedFlagId);
- } else {
- Set<ZoneId> zones = force ? Stream.concat(Stream.of(ZoneId.ofVirtualControllerZone()),
- zoneRegistry.zones().all().zones().stream().map(ZoneApi::getVirtualId))
- .collect(Collectors.toSet())
- : Set.of();
- String normalizedRawData = normalizeJson(rawData, zones);
- flagData = FlagData.deserialize(normalizedRawData);
- if (!directoryDeducedFlagId.equals(flagData.id())) {
- throw new FlagValidationException("Flag data file with flag id '%s' in directory for '%s'"
- .formatted(flagData.id(), directoryDeducedFlagId.toString()));
- }
-
- String serializedData = flagData.serializeToJson();
- if (!JSON.equals(serializedData, normalizedRawData)) {
- throw new FlagValidationException("""
- %s contains unknown non-comment fields or rules with null values: after removing any comment fields the JSON is:
- %s
- but deserializing this ended up with:
- %s
- These fields may be spelled wrong, or remove them?
- See https://git.ouroath.com/vespa/hosted-feature-flags for more info on the JSON syntax
- """.formatted(filePath, normalizedRawData, serializedData));
- }
- }
-
- if (builder.hasFile(filename, flagData)) {
- throw new FlagValidationException("Flag data file in '%s' contains redundant flag data for id '%s' already set in another directory!"
- .formatted(filePath, flagData.id()));
- }
-
- builder.addFile(filename, flagData);
- }
-
- static String normalizeJson(String json, Set<ZoneId> zones) {
- JsonNode root = uncheck(() -> mapper.readTree(json));
- removeCommentsRecursively(root);
- removeNullRuleValues(root);
- verifyValues(root, zones);
- return root.toString();
- }
-
- private static void verifyValues(JsonNode root, Set<ZoneId> zones) {
- var cursor = new JsonAccessor(root);
- cursor.get("rules").forEachArrayElement(rule -> rule.get("conditions").forEachArrayElement(condition -> {
- FetchVector.Dimension dimension = DimensionHelper
- .fromWire(condition.get("dimension")
- .asString()
- .orElseThrow(() -> new FlagValidationException("Invalid dimension in condition: " + condition)));
- switch (dimension) {
- case APPLICATION_ID -> validateStringValues(condition, ApplicationId::fromSerializedForm);
- case CONSOLE_USER_EMAIL -> validateStringValues(condition, email -> {});
- case CLOUD -> validateStringValues(condition, cloud -> {
- if (!Set.of(YAHOO, AWS, GCP).contains(CloudName.from(cloud)))
- throw new FlagValidationException("Unknown cloud: " + cloud);
- });
- case CLUSTER_ID -> validateStringValues(condition, ClusterSpec.Id::from);
- case CLUSTER_TYPE -> validateStringValues(condition, ClusterSpec.Type::from);
- case ENVIRONMENT -> validateStringValues(condition, Environment::from);
- case HOSTNAME -> validateStringValues(condition, HostName::of);
- case NODE_TYPE -> validateStringValues(condition, NodeType::valueOf);
- case SYSTEM -> validateStringValues(condition, system -> {
+ private static void validateSystems(FlagData flagData) throws FlagValidationException {
+ flagData.rules().forEach(rule -> rule.conditions().forEach(condition -> {
+ if (condition.dimension() == SYSTEM) {
+ validateConditionValues(condition, system -> {
if (!SystemName.hostedVespa().contains(SystemName.from(system)))
throw new FlagValidationException("Unknown system: " + system);
});
- case TENANT_ID -> validateStringValues(condition, TenantName::from);
- case VESPA_VERSION -> validateStringValues(condition, versionString -> {
- if (Version.fromString(versionString).getMajor() < 8)
- throw new FlagValidationException("Major Vespa version must be at least 8: " + versionString);
- });
- case ZONE_ID -> validateStringValues(condition, zoneIdString -> {
- ZoneId zoneId = ZoneId.from(zoneIdString);
- if (!zones.isEmpty() && !zones.contains(zoneId))
- throw new FlagValidationException("Unknown zone: " + zoneIdString);
- });
}
}));
}
- private static void validateStringValues(JsonAccessor condition, Consumer<String> valueValidator) {
- condition.get("values").forEachArrayElement(conditionValue -> {
- String value = conditionValue.asString()
- .orElseThrow(() -> {
- String dimension = condition.get("dimension").asString().orElseThrow();
- String type = condition.get("type").asString().orElseThrow();
- return new FlagValidationException("Non-string %s in %s condition: %s".formatted(
- dimension, type, conditionValue));
- });
+ private static void validateForSystem(FlagData flagData, ZoneRegistry zoneRegistry, boolean inController) throws FlagValidationException {
+ Set<ZoneId> zones = inController ?
+ zoneRegistry.zonesIncludingSystem().all().zones().stream().map(ZoneApi::getVirtualId).collect(Collectors.toSet()) :
+ null;
+
+ flagData.rules().forEach(rule -> rule.conditions().forEach(condition -> {
+ int force_switch_expression_dummy = switch (condition.type()) {
+ case RELATIONAL -> switch (condition.dimension()) {
+ case APPLICATION_ID, CLOUD, CLUSTER_ID, CLUSTER_TYPE, CONSOLE_USER_EMAIL, ENVIRONMENT,
+ HOSTNAME, NODE_TYPE, SYSTEM, TENANT_ID, ZONE_ID ->
+ throw new FlagValidationException(condition.type().toWire() + " " +
+ DimensionHelper.toWire(condition.dimension()) +
+ " condition is not supported");
+ case VESPA_VERSION -> {
+ RelationalCondition rCond = RelationalCondition.create(condition.toCreateParams());
+ Version version = Version.fromString(rCond.relationalPredicate().rightOperand());
+ if (version.getMajor() < 8)
+ throw new FlagValidationException("Major Vespa version must be at least 8: " + version);
+ yield 0;
+ }
+ };
+
+ case WHITELIST, BLACKLIST -> switch (condition.dimension()) {
+ case APPLICATION_ID -> validateConditionValues(condition, ApplicationId::fromSerializedForm);
+ case CONSOLE_USER_EMAIL -> validateConditionValues(condition, email -> {
+ if (!email.contains("@"))
+ throw new FlagValidationException("Invalid email address: " + email);
+ });
+ case CLOUD -> validateConditionValues(condition, cloud -> {
+ if (!Set.of(YAHOO, AWS, GCP).contains(CloudName.from(cloud)))
+ throw new FlagValidationException("Unknown cloud: " + cloud);
+ });
+ case CLUSTER_ID -> validateConditionValues(condition, ClusterSpec.Id::from);
+ case CLUSTER_TYPE -> validateConditionValues(condition, ClusterSpec.Type::from);
+ case ENVIRONMENT -> validateConditionValues(condition, Environment::from);
+ case HOSTNAME -> validateConditionValues(condition, HostName::of);
+ case NODE_TYPE -> validateConditionValues(condition, NodeType::valueOf);
+ case SYSTEM -> throw new IllegalStateException("Flag data contains system dimension");
+ case TENANT_ID -> validateConditionValues(condition, TenantName::from);
+ case VESPA_VERSION -> throw new FlagValidationException(condition.type().toWire() + " " +
+ DimensionHelper.toWire(condition.dimension()) +
+ " condition is not supported");
+ case ZONE_ID -> validateConditionValues(condition, zoneIdString -> {
+ ZoneId zoneId = ZoneId.from(zoneIdString);
+ if (inController && !zones.contains(zoneId))
+ throw new FlagValidationException("Unknown zone: " + zoneIdString);
+ });
+ };
+ };
+ }));
+ }
+
+ private static int validateConditionValues(Condition condition, Consumer<String> valueValidator) {
+ condition.toCreateParams().values().forEach(value -> {
try {
valueValidator.accept(value);
} catch (IllegalArgumentException e) {
- String dimension = condition.get("dimension").asString().orElseThrow();
- String type = condition.get("type").asString().orElseThrow();
+ String dimension = DimensionHelper.toWire(condition.dimension());
+ String type = condition.type().toWire();
throw new FlagValidationException("Invalid %s '%s' in %s condition: %s".formatted(dimension, value, type, e.getMessage()));
}
});
+
+ return 0; // dummy to force switch expression
+ }
+
+ private static FlagData parseFlagData(FlagId flagId, String fileContent, ZoneRegistry zoneRegistry, boolean inController) {
+ if (fileContent.isBlank()) return new FlagData(flagId);
+
+ final JsonNode root;
+ try {
+ root = mapper.readTree(fileContent);
+ } catch (JsonProcessingException e) {
+ throw new FlagValidationException("Invalid JSON: " + e.getMessage());
+ }
+
+ removeCommentsRecursively(root);
+ removeNullRuleValues(root);
+ String normalizedRawData = root.toString();
+ FlagData flagData = FlagData.deserialize(normalizedRawData);
+
+ if (!flagId.equals(flagData.id()))
+ throw new FlagValidationException("Flag ID specified in file (%s) doesn't match the directory name (%s)"
+ .formatted(flagData.id(), flagId.toString()));
+
+ String serializedData = flagData.serializeToJson();
+ if (!JSON.equals(serializedData, normalizedRawData))
+ throw new FlagValidationException("""
+ Unknown non-comment fields or rules with null values: after removing any comment fields the JSON is:
+ %s
+ but deserializing this ended up with:
+ %s
+ These fields may be spelled wrong, or remove them?
+ See https://git.ouroath.com/vespa/hosted-feature-flags for more info on the JSON syntax
+ """.formatted(normalizedRawData, serializedData));
+
+ validateSystems(flagData);
+ flagData = flagData.partialResolve(new FetchVector().with(SYSTEM, zoneRegistry.system().value()));
+
+ validateForSystem(flagData, zoneRegistry, inController);
+
+ return flagData;
}
private static void removeCommentsRecursively(JsonNode node) {
@@ -312,56 +321,46 @@ public class SystemFlagsDataArchive {
public Builder() {}
- public Builder addFile(String filename, FlagData data) {
- files.computeIfAbsent(data.id(), k -> new TreeMap<>()).put(filename, data);
- return this;
- }
+ boolean maybeAddFile(Path filePath, String fileContent, ZoneRegistry zoneRegistry, boolean inController) {
+ String filename = filePath.getFileName().toString();
- public boolean hasFile(String filename, FlagData data) {
- return files.containsKey(data.id()) && files.get(data.id()).containsKey(filename);
- }
-
- public SystemFlagsDataArchive build() {
- Map<FlagId, Map<String, FlagData>> copy = new TreeMap<>();
- files.forEach((flagId, map) -> copy.put(flagId, new TreeMap<>(map)));
- return new SystemFlagsDataArchive(copy);
- }
+ if (filename.startsWith("."))
+ return false; // Ignore files starting with '.'
- }
+ if (!inController && !FlagsTarget.filenameForSystem(filename, zoneRegistry.system()))
+ return false; // Ignore files for other systems
- private static class JsonAccessor {
- private final JsonNode jsonNode;
+ FlagId directoryDeducedFlagId = new FlagId(filePath.getName(filePath.getNameCount()-2).toString());
- public JsonAccessor(JsonNode jsonNode) {
- this.jsonNode = jsonNode;
- }
+ if (hasFile(filename, directoryDeducedFlagId))
+ throw new FlagValidationException("Flag data file in '%s' contains redundant flag data for id '%s' already set in another directory!"
+ .formatted(filePath, directoryDeducedFlagId));
- public JsonAccessor get(String fieldName) {
- if (jsonNode == null) {
- return this;
- } else {
- return new JsonAccessor(jsonNode.get(fieldName));
+ final FlagData flagData;
+ try {
+ flagData = parseFlagData(directoryDeducedFlagId, fileContent, zoneRegistry, inController);
+ } catch (FlagValidationException e) {
+ throw new FlagValidationException("In file " + filePath + ": " + e.getMessage());
}
- }
- public Optional<String> asString() {
- return jsonNode != null && jsonNode.isTextual() ? Optional.of(jsonNode.textValue()) : Optional.empty();
+ addFile(filename, flagData);
+ return true;
}
- public void forEachArrayElement(Consumer<JsonAccessor> consumer) {
- if (jsonNode != null && jsonNode.isArray()) {
- jsonNode.forEach(jsonNodeElement -> consumer.accept(new JsonAccessor(jsonNodeElement)));
- }
+ public Builder addFile(String filename, FlagData data) {
+ files.computeIfAbsent(data.id(), k -> new TreeMap<>()).put(filename, data);
+ return this;
}
- /** Returns true if this (JsonNode) is a string and equal to value. */
- public boolean isEqualTo(String value) {
- return jsonNode != null && jsonNode.isTextual() && Objects.equals(jsonNode.textValue(), value);
+ public boolean hasFile(String filename, FlagId id) {
+ return files.containsKey(id) && files.get(id).containsKey(filename);
}
- @Override
- public String toString() {
- return jsonNode == null ? "undefined" : jsonNode.toString();
+ public SystemFlagsDataArchive build() {
+ Map<FlagId, Map<String, FlagData>> copy = new TreeMap<>();
+ files.forEach((flagId, map) -> copy.put(flagId, new TreeMap<>(map)));
+ return new SystemFlagsDataArchive(copy);
}
+
}
}
diff --git a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java
index 2d0374dc888..759f21579d4 100644
--- a/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java
+++ b/controller-api/src/test/java/com/yahoo/vespa/hosted/controller/api/systemflags/v1/SystemFlagsDataArchiveTest.java
@@ -14,6 +14,7 @@ import com.yahoo.vespa.athenz.api.AthenzService;
import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagId;
import com.yahoo.vespa.flags.RawFlag;
+import com.yahoo.vespa.flags.json.Condition;
import com.yahoo.vespa.flags.json.FlagData;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneRegistry;
import org.junit.jupiter.api.Test;
@@ -28,10 +29,12 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
+import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.List;
import java.util.Optional;
import java.util.Set;
+import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -78,11 +81,11 @@ public class SystemFlagsDataArchiveTest {
can_serialize_and_deserialize_archive(true);
}
- private void can_serialize_and_deserialize_archive(boolean forceAddFiles) throws IOException {
+ private void can_serialize_and_deserialize_archive(boolean simulateInController) throws IOException {
File tempFile = File.createTempFile("serialized-flags-archive", null, temporaryFolder);
try (OutputStream out = new BufferedOutputStream(new FileOutputStream(tempFile))) {
- var archive = fromDirectory("system-flags", forceAddFiles);
- if (forceAddFiles)
+ var archive = fromDirectory("system-flags", simulateInController);
+ if (simulateInController)
archive.validateAllFilesAreForTargets(Set.of(mainControllerTarget, prodUsWestCfgTarget));
archive.toZip(out);
}
@@ -98,9 +101,9 @@ public class SystemFlagsDataArchiveTest {
retrieves_correct_flag_data_for_target(true);
}
- private void retrieves_correct_flag_data_for_target(boolean forceAddFiles) {
- var archive = fromDirectory("system-flags", forceAddFiles);
- if (forceAddFiles)
+ private void retrieves_correct_flag_data_for_target(boolean simulateInController) {
+ var archive = fromDirectory("system-flags", simulateInController);
+ if (simulateInController)
archive.validateAllFilesAreForTargets(Set.of(mainControllerTarget, prodUsWestCfgTarget));
assertArchiveReturnsCorrectTestFlagDataForTarget(archive);
}
@@ -111,9 +114,9 @@ public class SystemFlagsDataArchiveTest {
supports_multi_level_flags_directory(true);
}
- private void supports_multi_level_flags_directory(boolean forceAddFiles) {
- var archive = fromDirectory("system-flags-multi-level", forceAddFiles);
- if (forceAddFiles)
+ private void supports_multi_level_flags_directory(boolean simulateInController) {
+ var archive = fromDirectory("system-flags-multi-level", simulateInController);
+ if (simulateInController)
archive.validateAllFilesAreForTargets(Set.of(mainControllerTarget, prodUsWestCfgTarget));
assertFlagDataHasValue(archive, MY_TEST_FLAG, mainControllerTarget, "default");
}
@@ -124,9 +127,9 @@ public class SystemFlagsDataArchiveTest {
duplicated_flagdata_is_detected(true);
}
- private void duplicated_flagdata_is_detected(boolean forceAddFiles) {
+ private void duplicated_flagdata_is_detected(boolean simulateInController) {
Throwable exception = assertThrows(FlagValidationException.class, () -> {
- fromDirectory("system-flags-multi-level-with-duplicated-flagdata", forceAddFiles);
+ fromDirectory("system-flags-multi-level-with-duplicated-flagdata", simulateInController);
});
assertTrue(exception.getMessage().contains("contains redundant flag data for id 'my-test-flag' already set in another directory!"));
}
@@ -137,9 +140,9 @@ public class SystemFlagsDataArchiveTest {
empty_files_are_handled_as_no_flag_data_for_target(true);
}
- private void empty_files_are_handled_as_no_flag_data_for_target(boolean forceAddFiles) {
- var archive = fromDirectory("system-flags", forceAddFiles);
- if (forceAddFiles)
+ private void empty_files_are_handled_as_no_flag_data_for_target(boolean simulateInController) {
+ var archive = fromDirectory("system-flags", simulateInController);
+ if (simulateInController)
archive.validateAllFilesAreForTargets(Set.of(mainControllerTarget, prodUsWestCfgTarget));
assertNoFlagData(archive, FLAG_WITH_EMPTY_DATA, mainControllerTarget);
assertFlagDataHasValue(archive, FLAG_WITH_EMPTY_DATA, prodUsWestCfgTarget, "main.prod.us-west-1");
@@ -187,7 +190,7 @@ public class SystemFlagsDataArchiveTest {
fromDirectory("system-flags-with-unknown-field-name", true);
});
assertEquals("""
- flags/my-test-flag/main.prod.us-west-1.json contains unknown non-comment fields or rules with null values: after removing any comment fields the JSON is:
+ In file flags/my-test-flag/main.prod.us-west-1.json: Unknown non-comment fields or rules with null values: after removing any comment fields the JSON is:
{"id":"my-test-flag","rules":[{"condition":[{"type":"whitelist","dimension":"hostname","values":["foo.com"]}],"value":"default"}]}
but deserializing this ended up with:
{"id":"my-test-flag","rules":[{"value":"default"}]}
@@ -218,6 +221,7 @@ public class SystemFlagsDataArchiveTest {
void remove_comments_and_null_value_in_rules() {
assertTrue(JSON.equals("""
{
+ "id": "foo",
"rules": [
{
"conditions": [
@@ -249,8 +253,9 @@ public class SystemFlagsDataArchiveTest {
}
]
}""",
- SystemFlagsDataArchive.normalizeJson("""
+ normalizeJson("""
{
+ "id": "foo",
"comment": "bar",
"rules": [
{
@@ -289,83 +294,91 @@ public class SystemFlagsDataArchiveTest {
"value": true
}
]
- }""", Set.of(ZoneId.from("prod.us-west-1")))));
+ }""")));
+ }
+
+ private static String normalizeJson(String json) {
+ SystemFlagsDataArchive.Builder builder = new SystemFlagsDataArchive.Builder();
+ assertTrue(builder.maybeAddFile(Path.of("flags/temporary/foo/default.json"), json, createZoneRegistryMock(), true));
+ List<FlagData> flagData = builder.build().flagData(prodUsWestCfgTarget);
+ assertEquals(1, flagData.size());
+ return JSON.canonical(flagData.get(0).serializeToJson());
}
@Test
void normalize_json_succeed_on_valid_values() {
- normalizeJson("application", "\"a:b:c\"");
- normalizeJson("cloud", "\"yahoo\"");
- normalizeJson("cloud", "\"aws\"");
- normalizeJson("cloud", "\"gcp\"");
- normalizeJson("cluster-id", "\"some-id\"");
- normalizeJson("cluster-type", "\"admin\"");
- normalizeJson("cluster-type", "\"container\"");
- normalizeJson("cluster-type", "\"content\"");
- normalizeJson("console-user-email", "\"name@domain.com\"");
- normalizeJson("environment", "\"prod\"");
- normalizeJson("environment", "\"staging\"");
- normalizeJson("environment", "\"test\"");
- normalizeJson("hostname", "\"2080046-v6-11.ostk.bm2.prod.gq1.yahoo.com\"");
- normalizeJson("node-type", "\"tenant\"");
- normalizeJson("node-type", "\"host\"");
- normalizeJson("node-type", "\"config\"");
- normalizeJson("node-type", "\"host\"");
- normalizeJson("system", "\"main\"");
- normalizeJson("system", "\"public\"");
- normalizeJson("tenant", "\"vespa\"");
- normalizeJson("vespa-version", "\"8.201.13\"");
- normalizeJson("zone", "\"prod.us-west-1\"", Set.of(ZoneId.from("prod.us-west-1")));
- }
-
- private void normalizeJson(String dimension, String jsonValue) {
- normalizeJson(dimension, jsonValue, Set.of());
- }
-
- private void normalizeJson(String dimension, String jsonValue, Set<ZoneId> zones) {
- SystemFlagsDataArchive.normalizeJson("""
+ addFile(Condition.Type.WHITELIST, "application", "a:b:c");
+ addFile(Condition.Type.WHITELIST, "cloud", "yahoo");
+ addFile(Condition.Type.WHITELIST, "cloud", "aws");
+ addFile(Condition.Type.WHITELIST, "cloud", "gcp");
+ addFile(Condition.Type.WHITELIST, "cluster-id", "some-id");
+ addFile(Condition.Type.WHITELIST, "cluster-type", "admin");
+ addFile(Condition.Type.WHITELIST, "cluster-type", "container");
+ addFile(Condition.Type.WHITELIST, "cluster-type", "content");
+ addFile(Condition.Type.WHITELIST, "console-user-email", "name@domain.com");
+ addFile(Condition.Type.WHITELIST, "environment", "prod");
+ addFile(Condition.Type.WHITELIST, "environment", "staging");
+ addFile(Condition.Type.WHITELIST, "environment", "test");
+ addFile(Condition.Type.WHITELIST, "hostname", "2080046-v6-11.ostk.bm2.prod.gq1.yahoo.com");
+ addFile(Condition.Type.WHITELIST, "node-type", "tenant");
+ addFile(Condition.Type.WHITELIST, "node-type", "host");
+ addFile(Condition.Type.WHITELIST, "node-type", "config");
+ addFile(Condition.Type.WHITELIST, "node-type", "host");
+ addFile(Condition.Type.WHITELIST, "system", "main");
+ addFile(Condition.Type.WHITELIST, "system", "public");
+ addFile(Condition.Type.WHITELIST, "tenant", "vespa");
+ addFile(Condition.Type.RELATIONAL, "vespa-version", ">=8.201.13");
+ addFile(Condition.Type.WHITELIST, "zone", "prod.us-west-1");
+ }
+
+ private void addFile(Condition.Type type, String dimension, String jsonValue) {
+ SystemFlagsDataArchive.Builder builder = new SystemFlagsDataArchive.Builder();
+
+ String valuesField = type == Condition.Type.RELATIONAL ?
+ "\"predicate\": \"%s\"".formatted(jsonValue) :
+ "\"values\": [ \"%s\" ]".formatted(jsonValue);
+
+ assertTrue(builder.maybeAddFile(Path.of("flags/temporary/foo/default.json"), """
{
"id": "foo",
"rules": [
{
"conditions": [
{
- "type": "whitelist",
+ "type": "%s",
"dimension": "%s",
- "values": [ %s ]
+ %s
}
],
"value": true
}
]
}
- """.formatted(dimension, jsonValue), zones);
+ """.formatted(type.toWire(), dimension, valuesField),
+ createZoneRegistryMock(),
+ true));
}
@Test
void normalize_json_fail_on_invalid_values() {
- failNormalizeJson("application", "\"a.b.c\"", "Invalid application 'a.b.c' in whitelist condition: Application ids must be on the form tenant:application:instance, but was a.b.c");
- failNormalizeJson("cloud", "\"foo\"", "Unknown cloud: foo");
- // failNormalizeJson("cluster-id", ... any String is valid
- failNormalizeJson("cluster-type", "\"foo\"", "Invalid cluster-type 'foo' in whitelist condition: Illegal cluster type 'foo'");
- failNormalizeJson("console-user-email", "123", "Non-string console-user-email in whitelist condition: 123");
- failNormalizeJson("environment", "\"foo\"", "Invalid environment 'foo' in whitelist condition: 'foo' is not a valid environment identifier");
- failNormalizeJson("hostname", "\"not:a:hostname\"", "Invalid hostname 'not:a:hostname' in whitelist condition: hostname must match '(([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9])\\.?', but got: 'not:a:hostname'");
- failNormalizeJson("node-type", "\"footype\"", "Invalid node-type 'footype' in whitelist condition: No enum constant com.yahoo.config.provision.NodeType.footype");
- failNormalizeJson("system", "\"bar\"", "Invalid system 'bar' in whitelist condition: 'bar' is not a valid system");
- failNormalizeJson("tenant", "123", "Non-string tenant in whitelist condition: 123");
- failNormalizeJson("vespa-version", "\"not-a-version\"", "Invalid vespa-version 'not-a-version' in whitelist condition: Invalid version component in 'not-a-version'");
- failNormalizeJson("zone", "\"dev.%illegal\"", Set.of(ZoneId.from("prod.example-region")), "Invalid zone 'dev.%illegal' in whitelist condition: region name must match '[a-z]([a-z0-9-]*[a-z0-9])*', but got: '%illegal'");
- failNormalizeJson("zone", "\"dev.non-existing-zone\"", Set.of(ZoneId.from("prod.example-region")), "Unknown zone: dev.non-existing-zone");
- }
-
- private void failNormalizeJson(String dimension, String jsonValue, String expectedExceptionMessage) {
- failNormalizeJson(dimension, jsonValue, Set.of(), expectedExceptionMessage);
- }
-
- private void failNormalizeJson(String dimension, String jsonValue, Set<ZoneId> zones, String expectedExceptionMessage) {
+ failAddFile(Condition.Type.WHITELIST, "application", "a.b.c", "In file flags/temporary/foo/default.json: Invalid application 'a.b.c' in whitelist condition: Application ids must be on the form tenant:application:instance, but was a.b.c");
+ failAddFile(Condition.Type.WHITELIST, "cloud", "foo", "In file flags/temporary/foo/default.json: Unknown cloud: foo");
+ // cluster-id: any String is valid
+ failAddFile(Condition.Type.WHITELIST, "cluster-type", "foo", "In file flags/temporary/foo/default.json: Invalid cluster-type 'foo' in whitelist condition: Illegal cluster type 'foo'");
+ failAddFile(Condition.Type.WHITELIST, "console-user-email", "not-valid-email-address", "In file flags/temporary/foo/default.json: Invalid email address: not-valid-email-address");
+ failAddFile(Condition.Type.WHITELIST, "environment", "foo", "In file flags/temporary/foo/default.json: Invalid environment 'foo' in whitelist condition: 'foo' is not a valid environment identifier");
+ failAddFile(Condition.Type.WHITELIST, "hostname", "not:a:hostname", "In file flags/temporary/foo/default.json: Invalid hostname 'not:a:hostname' in whitelist condition: hostname must match '(([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9])\\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9-]{0,61}[A-Za-z0-9])\\.?', but got: 'not:a:hostname'");
+ failAddFile(Condition.Type.WHITELIST, "node-type", "footype", "In file flags/temporary/foo/default.json: Invalid node-type 'footype' in whitelist condition: No enum constant com.yahoo.config.provision.NodeType.footype");
+ failAddFile(Condition.Type.WHITELIST, "system", "bar", "In file flags/temporary/foo/default.json: Invalid system 'bar' in whitelist condition: 'bar' is not a valid system");
+ failAddFile(Condition.Type.WHITELIST, "tenant", "a tenant", "In file flags/temporary/foo/default.json: Invalid tenant 'a tenant' in whitelist condition: tenant name must match '[a-zA-Z0-9_-]{1,256}', but got: 'a tenant'");
+ failAddFile(Condition.Type.WHITELIST, "vespa-version", "not-a-version", "In file flags/temporary/foo/default.json: whitelist vespa-version condition is not supported");
+ failAddFile(Condition.Type.RELATIONAL, "vespa-version", ">7.1.2", "In file flags/temporary/foo/default.json: Major Vespa version must be at least 8: 7.1.2");
+ failAddFile(Condition.Type.WHITELIST, "zone", "dev.%illegal", "In file flags/temporary/foo/default.json: Invalid zone 'dev.%illegal' in whitelist condition: region name must match '[a-z]([a-z0-9-]*[a-z0-9])*', but got: '%illegal'");
+ }
+
+ private void failAddFile(Condition.Type type, String dimension, String jsonValue, String expectedExceptionMessage) {
try {
- normalizeJson(dimension, jsonValue, zones);
+ addFile(type, dimension, jsonValue);
fail();
} catch (RuntimeException e) {
assertEquals(expectedExceptionMessage, e.getMessage());
@@ -380,8 +393,8 @@ public class SystemFlagsDataArchiveTest {
assertFlagDataHasValue(archive, MY_TEST_FLAG, prodUsWestCfgTarget, "main.prod.us-west-1");
}
- private SystemFlagsDataArchive fromDirectory(String testDirectory, boolean forceAddFiles) {
- return SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/" + testDirectory), createZoneRegistryMock(), forceAddFiles);
+ private SystemFlagsDataArchive fromDirectory(String testDirectory, boolean simulateInController) {
+ return SystemFlagsDataArchive.fromDirectory(Paths.get("src/test/resources/" + testDirectory), createZoneRegistryMock(), simulateInController);
}
@SuppressWarnings("unchecked") // workaround for mocking a method for generic return type
@@ -396,12 +409,21 @@ public class SystemFlagsDataArchiveTest {
when(registryMock.systemZone()).thenReturn(zoneApi);
when(registryMock.getConfigServerVipUri(any())).thenReturn(URI.create("http://localhost:8080/"));
when(registryMock.getConfigServerHttpsIdentity(any())).thenReturn(new AthenzService("domain", "servicename"));
+ ZoneList zones = mockZoneList("prod.us-west-1", "prod.us-east-3");
+ when(registryMock.zones()).thenReturn(zones);
+ ZoneList zonesIncludingSystem = mockZoneList("prod.us-west-1", "prod.us-east-3", "prod.controller");
+ when(registryMock.zonesIncludingSystem()).thenReturn(zonesIncludingSystem);
+ return registryMock;
+ }
+
+ @SuppressWarnings("unchecked") // workaround for mocking a method for generic return type
+ private static ZoneList mockZoneList(String... zones) {
ZoneList zoneListMock = mock(ZoneList.class);
when(zoneListMock.reachable()).thenReturn(zoneListMock);
when(zoneListMock.all()).thenReturn(zoneListMock);
- when(zoneListMock.zones()).thenReturn((List)List.of(new SimpleZone("prod.us-west-1"), new SimpleZone("prod.us-east-3")));
- when(registryMock.zones()).thenReturn(zoneListMock);
- return registryMock;
+ List<? extends ZoneApi> zoneList = Stream.of(zones).map(SimpleZone::new).toList();
+ when(zoneListMock.zones()).thenReturn((List) zoneList);
+ return zoneListMock;
}
private static void assertArchiveReturnsCorrectTestFlagDataForTarget(SystemFlagsDataArchive archive) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/OsController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/OsController.java
index c426c27418d..58c3b4da5e4 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/OsController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/OsController.java
@@ -11,6 +11,7 @@ import com.yahoo.vespa.hosted.controller.versions.OsVersionStatus;
import com.yahoo.vespa.hosted.controller.versions.OsVersionTarget;
import java.time.Instant;
+import java.util.Comparator;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
@@ -161,9 +162,12 @@ public record OsController(Controller controller) {
/** Remove certifications for non-existent OS versions */
public void removeStaleCertifications(OsVersionStatus currentStatus) {
try (Mutex lock = curator().lockCertifiedOsVersions()) {
- Set<OsVersion> knownVersions = currentStatus.versions().keySet();
+ Optional<OsVersion> minKnownVersion = currentStatus.versions().keySet().stream()
+ .filter(v -> !v.version().isEmpty())
+ .min(Comparator.naturalOrder());
+ if (minKnownVersion.isEmpty()) return;
Set<CertifiedOsVersion> certifiedVersions = new HashSet<>(readCertified());
- if (certifiedVersions.removeIf(cv -> !knownVersions.contains(cv.osVersion()))) {
+ if (certifiedVersions.removeIf(cv -> cv.osVersion().version().isBefore(minKnownVersion.get().version()))) {
curator().writeCertifiedOsVersions(certifiedVersions);
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/SystemApplication.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/SystemApplication.java
index 56b7ffd01f9..9d909cb5ebf 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/SystemApplication.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/SystemApplication.java
@@ -29,7 +29,7 @@ public enum SystemApplication {
configServerHost(InfrastructureApplication.CONFIG_SERVER_HOST),
configServer(InfrastructureApplication.CONFIG_SERVER),
proxyHost(InfrastructureApplication.PROXY_HOST),
- proxy(InfrastructureApplication.PROXY, proxyHost, configServer),
+ proxy(InfrastructureApplication.PROXY, configServer),
tenantHost(InfrastructureApplication.TENANT_HOST);
/** The tenant owning all system applications */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackage.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackage.java
index b4c9b2ebd57..120c0a89f45 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackage.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackage.java
@@ -97,7 +97,9 @@ public class TestPackage {
keyPair = null;
this.certificate = null;
}
- this.applicationPackageStream = new ApplicationPackageStream(inZip, () -> __ -> false, () -> new Replacer() {
+ boolean isEnclave = isPublicSystem &&
+ !spec.cloudAccount(cloud, id.application().instance(), id.type().zone()).isUnspecified();
+ this.applicationPackageStream = new ApplicationPackageStream(inZip, () -> name -> name.endsWith(".xml"), () -> new Replacer() {
// Initially skips all declared entries, ensuring they're generated and appended after all input entries.
final Map<String, UnaryOperator<InputStream>> entries = new HashMap<>();
@@ -127,7 +129,7 @@ public class TestPackage {
__ -> new ByteArrayInputStream(servicesXml( ! isPublicSystem,
certificateValidFrom != null,
hasLegacyTests,
- testerResourcesFor(id.type().zone(), spec.requireInstance(id.application().instance())),
+ testerResourcesFor(id.type().zone(), spec.requireInstance(id.application().instance()), isEnclave),
testerApp)));
entries.put(deploymentFile,
@@ -225,7 +227,7 @@ public class TestPackage {
return new TestSummary(problems, suites);
}
- static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) {
+ static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec, boolean isEnclave) {
NodeResources nodeResources = spec.steps().stream()
.filter(step -> step.concerns(zone.environment()))
.findFirst()
@@ -233,6 +235,7 @@ public class TestPackage {
.map(NodeResources::fromLegacyName)
.orElse(zone.region().value().matches("^(aws|gcp)-.*") ? DEFAULT_TESTER_RESOURCES_CLOUD
: DEFAULT_TESTER_RESOURCES);
+ if (isEnclave) nodeResources = nodeResources.with(NodeResources.Architecture.x86_64);
return nodeResources.with(NodeResources.DiskSpeed.any);
}
@@ -245,8 +248,8 @@ public class TestPackage {
// Of the remaining memory, split 50/50 between Surefire running the tests and the rest
int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2);
- String resourceString = Text.format("<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>",
- resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name());
+ String resourceString = Text.format("<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\" architecture=\"%s\"/>",
+ resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name(), resources.architecture().name());
String runtimeProviderClass = config.runtimeProviderClass();
String tenantCdBundle = config.tenantCdBundle();
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
index 998b72665d7..00c9bd165a9 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainer.java
@@ -271,7 +271,7 @@ public class ResourceMeterMaintainer extends ControllerMaintainer {
private Metric.Context getMetricContext(ApplicationId applicationId, ZoneId zoneId) {
return metric.createContext(Map.of(
- "tenant", applicationId.tenant().value(),
+ "tenantName", applicationId.tenant().value(),
"applicationId", applicationId.toFullString(),
"zoneId", zoneId.value()
));
@@ -279,7 +279,7 @@ public class ResourceMeterMaintainer extends ControllerMaintainer {
private Metric.Context getMetricContext(ResourceSnapshot snapshot) {
return metric.createContext(Map.of(
- "tenant", snapshot.getApplicationId().tenant().value(),
+ "tenantName", snapshot.getApplicationId().tenant().value(),
"applicationId", snapshot.getApplicationId().toFullString(),
"zoneId", snapshot.getZoneId().value(),
"architecture", snapshot.resources().architecture()
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
index 43c8e7c9469..c526b335c90 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
@@ -23,7 +23,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RevisionId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.application.Change;
-import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.deployment.ConvergenceSummary;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentStatus;
@@ -53,7 +52,6 @@ import java.util.Locale;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Optional;
-import java.util.SortedMap;
import java.util.stream.Stream;
import static com.yahoo.config.application.api.DeploymentSpec.UpgradePolicy.canary;
@@ -109,10 +107,10 @@ class JobControllerApiHandlerHelper {
int limit = limitStr.map(Integer::parseInt).orElse(Integer.MAX_VALUE);
toSlime(cursor.setArray("runs"), runs.values(), application, limit, baseUriForJobType);
- controller.applications().decideCloudAccountOf(new DeploymentId(id.application(),
- runs.lastEntry().getValue().id().job().type().zone()), // Urgh, must use a job with actual zone.
- application.deploymentSpec())
- .ifPresent(cloudAccount -> cursor.setObject("enclave").setString("cloudAccount", cloudAccount.value()));
+ Optional.ofNullable(runs.lastEntry())
+ .map(entry -> new DeploymentId(id.application(), entry.getValue().id().job().type().zone())) // Urgh, must use a job with actual zone.
+ .flatMap(deployment -> controller.applications().decideCloudAccountOf(deployment, application.deploymentSpec()))
+ .ifPresent(cloudAccount -> cursor.setObject("enclave").setString("cloudAccount", cloudAccount.value()));
return new SlimeJsonResponse(slime);
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackageTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackageTest.java
index f529d81bf32..4c61fe7c77d 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackageTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/TestPackageTest.java
@@ -148,8 +148,8 @@ public class TestPackageTest {
"components/foo-tests.jar",
"artifacts/key"),
bundlePackage.keySet());
- assertEquals(Map.of(),
- unzip(bundleTests.asApplicationPackage().truncatedPackage().zippedContent()));
+ assertEquals(Set.of("deployment.xml", "services.xml"),
+ unzip(bundleTests.asApplicationPackage().truncatedPackage().zippedContent()).keySet());
}
@Test
@@ -221,10 +221,10 @@ public class TestPackageTest {
</deployment>
""");
- NodeResources firstResources = TestPackage.testerResourcesFor(ZoneId.from("prod", "gcp-us-west-1"), spec.requireInstance("first"));
- assertEquals(TestPackage.DEFAULT_TESTER_RESOURCES_CLOUD, firstResources);
+ NodeResources firstResources = TestPackage.testerResourcesFor(ZoneId.from("prod", "gcp-us-west-1"), spec.requireInstance("first"), true);
+ assertEquals(TestPackage.DEFAULT_TESTER_RESOURCES_CLOUD.with(NodeResources.Architecture.x86_64), firstResources);
- NodeResources secondResources = TestPackage.testerResourcesFor(ZoneId.from("prod", "us-west-1"), spec.requireInstance("second"));
+ NodeResources secondResources = TestPackage.testerResourcesFor(ZoneId.from("prod", "us-west-1"), spec.requireInstance("second"), false);
assertEquals(6, secondResources.vcpu(), 1e-9);
assertEquals(16, secondResources.memoryGb(), 1e-9);
assertEquals(100, secondResources.diskGb(), 1e-9);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java
index 63d479d4c6c..dbb7f80df0e 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ZoneRegistryMock.java
@@ -61,8 +61,8 @@ public class ZoneRegistryMock extends AbstractComponent implements ZoneRegistry
public ZoneRegistryMock(SystemName system) {
this.system = system;
if (system.isPublic()) {
- this.zones = List.of(ZoneApiMock.fromId("test.us-east-1"),
- ZoneApiMock.fromId("staging.us-east-3"),
+ this.zones = List.of(ZoneApiMock.newBuilder().withId("test.us-east-1").withCloud("aws").withCloudNativeAvailabilityZone("use1-az4").build(),
+ ZoneApiMock.newBuilder().withId("staging.us-east-3").withCloud("aws").withCloudNativeAvailabilityZone("use3-az1").build(),
ZoneApiMock.newBuilder().withId("prod.aws-us-east-1c").withCloud("aws").withCloudNativeAvailabilityZone("use1-az2").build(),
ZoneApiMock.newBuilder().withId("prod.aws-eu-west-1a").withCloud("aws").withCloudNativeAvailabilityZone("euw1-az3").build(),
ZoneApiMock.newBuilder().withId("dev.aws-us-east-1c").withCloud("aws").withCloudNativeAvailabilityZone("use1-az2").build());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsVersionStatusUpdaterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsVersionStatusUpdaterTest.java
index af535abce26..6f4052bf0ef 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsVersionStatusUpdaterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OsVersionStatusUpdaterTest.java
@@ -68,12 +68,14 @@ public class OsVersionStatusUpdaterTest {
.filter(osVersion -> !osVersion.version().isEmpty())
.collect(Collectors.toSet());
List<OsVersion> versionsToCertify = new ArrayList<>(knownVersions);
- versionsToCertify.addAll(List.of(new OsVersion(Version.fromString("95.0.1"), cloud),
- new OsVersion(Version.fromString("98.0.2"), cloud)));
+ OsVersion futureVersion = new OsVersion(Version.fromString("98.0.2"), cloud); // Keep future version
+ versionsToCertify.addAll(List.of(new OsVersion(Version.fromString("3.11"), cloud),
+ futureVersion));
for (OsVersion version : versionsToCertify) {
tester.controller().os().certify(version.version(), version.cloud(), Version.fromString("1.2.3"));
}
- assertEquals(knownVersions.size() + 2, certifiedOsVersions(tester).size());
+ knownVersions.add(futureVersion);
+ assertEquals(knownVersions.size() + 1, certifiedOsVersions(tester).size());
statusUpdater.maintain();
assertEquals(knownVersions, certifiedOsVersions(tester));
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java
index 8196aa48197..fac05fc125f 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java
@@ -81,7 +81,7 @@ public class ResourceMeterMaintainerTest {
assertEquals(1.72,
(Double) metrics.getMetric(context ->
z1.value().equals(context.get("zoneId")) &&
- app1.tenant().value().equals(context.get("tenant")),
+ app1.tenant().value().equals(context.get("tenantName")),
"metering.cost.hourly").get(),
Double.MIN_VALUE);
}
@@ -109,8 +109,8 @@ public class ResourceMeterMaintainerTest {
assertEquals(tester.clock().millis() / 1000, metrics.getMetric("metering_last_reported"));
assertEquals(2224.0d, (Double) metrics.getMetric("metering_total_reported"), Double.MIN_VALUE);
- assertEquals(24d, (Double) metrics.getMetric(context -> "tenant1".equals(context.get("tenant")), "metering.vcpu").get(), Double.MIN_VALUE);
- assertEquals(40d, (Double) metrics.getMetric(context -> "tenant2".equals(context.get("tenant")), "metering.vcpu").get(), Double.MIN_VALUE);
+ assertEquals(24d, (Double) metrics.getMetric(context -> "tenant1".equals(context.get("tenantName")), "metering.vcpu").get(), Double.MIN_VALUE);
+ assertEquals(40d, (Double) metrics.getMetric(context -> "tenant2".equals(context.get("tenantName")), "metering.vcpu").get(), Double.MIN_VALUE);
// Metering is not refreshed
assertFalse(resourceClient.hasRefreshedMaterializedView());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgraderTest.java
index a7360f3d2d8..d3d66715202 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgraderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/SystemUpgraderTest.java
@@ -324,9 +324,9 @@ public class SystemUpgraderTest {
for (var zone : List.of(zone1, zone2)) {
systemUpgrader.maintain();
completeUpgrade(List.of(SystemApplication.tenantHost,
- SystemApplication.proxyHost,
- SystemApplication.configServerHost),
- version2, zone);
+ SystemApplication.proxyHost,
+ SystemApplication.configServerHost),
+ version2, zone);
completeUpgrade(SystemApplication.configServer, version2, zone);
systemUpgrader.maintain();
completeUpgrade(SystemApplication.proxy, version2, zone);
@@ -341,12 +341,12 @@ public class SystemUpgraderTest {
for (var zone : List.of(zone2, zone1)) {
systemUpgrader.maintain();
completeUpgrade(List.of(SystemApplication.tenantHost,
- SystemApplication.configServerHost,
- SystemApplication.proxy),
- version1, zone);
+ SystemApplication.configServerHost,
+ SystemApplication.proxyHost,
+ SystemApplication.proxy),
+ version1, zone);
convergeServices(SystemApplication.proxy, zone);
- List<SystemApplication> lastToDowngrade = List.of(SystemApplication.configServer,
- SystemApplication.proxyHost);
+ List<SystemApplication> lastToDowngrade = List.of(SystemApplication.configServer);
assertWantedVersion(lastToDowngrade, version2, zone);
// ... and then configserver and proxyhost
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java
index 93937bdc4af..905330c6daf 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java
@@ -14,13 +14,10 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RevisionId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TestReport;
-import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentContext;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
-import com.yahoo.vespa.hosted.controller.notification.Notification.Type;
-import com.yahoo.vespa.hosted.controller.notification.NotificationSource;
import org.junit.jupiter.api.Test;
import java.io.ByteArrayOutputStream;
@@ -34,6 +31,7 @@ import java.util.List;
import java.util.Optional;
import static com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException.ErrorCode.INVALID_APPLICATION_PACKAGE;
+import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.applicationPackage;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.devAwsUsEast2a;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.devUsEast1;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.productionUsCentral1;
@@ -42,8 +40,6 @@ import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.pro
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.stagingTest;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.systemTest;
import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.testUsCentral1;
-import static com.yahoo.vespa.hosted.controller.deployment.DeploymentContext.applicationPackage;
-import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.deploymentFailed;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.installationFailed;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.invalidApplication;
import static com.yahoo.vespa.hosted.controller.deployment.RunStatus.running;
@@ -208,16 +204,18 @@ public class JobControllerApiHandlerHelperTest {
void testEnclave() {
var cloudAccount = CloudAccount.from("aws:123456789012");
var applicationPackage = new ApplicationPackageBuilder()
+ .cloudAccount(cloudAccount.value())
.stagingTest()
.systemTest()
- .region("aws-us-east-1c", cloudAccount.value())
+ .region("aws-us-east-1c")
.build();
var tester = new DeploymentTester(new ControllerTester(SystemName.Public));
tester.controllerTester().flagSource().withListFlag(PermanentFlags.CLOUD_ACCOUNTS.id(), List.of(cloudAccount.value()), String.class);
- tester.controllerTester().zoneRegistry().configureCloudAccount(cloudAccount, ZoneId.from("prod.aws-us-east-1c"));
+ tester.controllerTester().zoneRegistry().configureCloudAccount(cloudAccount, systemTest.zone(), stagingTest.zone(), ZoneId.from("prod.aws-us-east-1c"));
var app = tester.newDeploymentContext();
app.submit(applicationPackage).deploy();
+ assertEquals(Optional.of(cloudAccount), tester.controllerTester().configServer().cloudAccount(app.deploymentIdIn(systemTest.zone())));
assertResponse(JobControllerApiHandlerHelper.overviewResponse(tester.controller(), app.application().id(), URI.create("https://some.url:43/root/")), "overview-enclave.json");
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-enclave.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-enclave.json
index 3673c1bdf07..9d82ed97849 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-enclave.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-enclave.json
@@ -5,11 +5,11 @@
"steps": [
{
"type": "instance",
- "dependencies": [ ],
+ "dependencies": [],
"declared": true,
"instance": "default",
"readyAt": 0,
- "deploying": { },
+ "deploying": {},
"latestVersions": {
"platform": {
"platform": "6.1.0",
@@ -21,7 +21,7 @@
"upgrade": false
}
],
- "blockers": [ ]
+ "blockers": []
},
"application": {
"application": {
@@ -42,21 +42,24 @@
}
}
],
- "blockers": [ ]
+ "blockers": []
}
},
"delayCause": null
},
{
"type": "test",
- "dependencies": [ ],
+ "dependencies": [],
"declared": true,
"instance": "default",
"readyAt": 0,
"jobName": "staging-test",
"url": "https://some.url:43/instance/default/job/staging-test",
"environment": "staging",
- "toRun": [ ],
+ "toRun": [],
+ "enclave": {
+ "cloudAccount": "aws:123456789012"
+ },
"runs": [
{
"id": 1,
@@ -137,14 +140,17 @@
},
{
"type": "test",
- "dependencies": [ ],
+ "dependencies": [],
"declared": true,
"instance": "default",
"readyAt": 0,
"jobName": "system-test",
"url": "https://some.url:43/instance/default/job/system-test",
"environment": "test",
- "toRun": [ ],
+ "toRun": [],
+ "enclave": {
+ "cloudAccount": "aws:123456789012"
+ },
"runs": [
{
"id": 1,
@@ -209,11 +215,7 @@
},
{
"type": "deployment",
- "dependencies": [
- 0,
- 1,
- 2
- ],
+ "dependencies": [0, 1, 2],
"declared": true,
"instance": "default",
"readyAt": 1600000000000,
@@ -228,7 +230,7 @@
"sourceUrl": "repository1/tree/commit1",
"commit": "commit1"
},
- "toRun": [ ],
+ "toRun": [],
"enclave": {
"cloudAccount": "aws:123456789012"
},
diff --git a/controller-server/src/test/resources/test_runner_services.xml-cd b/controller-server/src/test/resources/test_runner_services.xml-cd
index 4bf3a78801d..35ad0d31577 100644
--- a/controller-server/src/test/resources/test_runner_services.xml-cd
+++ b/controller-server/src/test/resources/test_runner_services.xml-cd
@@ -33,7 +33,7 @@
</component>
<nodes count="1">
- <resources vcpu="2.00" memory="12.00Gb" disk="75.00Gb" disk-speed="fast" storage-type="local"/>
+ <resources vcpu="2.00" memory="12.00Gb" disk="75.00Gb" disk-speed="fast" storage-type="local" architecture="any"/>
</nodes>
</container>
</services>
diff --git a/controller-server/src/test/resources/test_runner_services_with_legacy_tests.xml-cd b/controller-server/src/test/resources/test_runner_services_with_legacy_tests.xml-cd
index 526fd12965b..91317f1490c 100644
--- a/controller-server/src/test/resources/test_runner_services_with_legacy_tests.xml-cd
+++ b/controller-server/src/test/resources/test_runner_services_with_legacy_tests.xml-cd
@@ -34,7 +34,7 @@
<nodes count="1">
<jvm allocated-memory="17%"/>
- <resources vcpu="2.00" memory="12.00Gb" disk="75.00Gb" disk-speed="fast" storage-type="local"/>
+ <resources vcpu="2.00" memory="12.00Gb" disk="75.00Gb" disk-speed="fast" storage-type="local" architecture="any"/>
</nodes>
</container>
</services>
diff --git a/dependency-versions/pom.xml b/dependency-versions/pom.xml
index 5f75b042722..074cb2190da 100644
--- a/dependency-versions/pom.xml
+++ b/dependency-versions/pom.xml
@@ -110,7 +110,7 @@
<org.json.vespa.version>20230227</org.json.vespa.version>
<org.lz4.vespa.version>1.8.0</org.lz4.vespa.version>
<prometheus.client.vespa.version>0.6.0</prometheus.client.vespa.version>
- <protobuf.vespa.version>3.21.7</protobuf.vespa.version>
+ <protobuf.vespa.version>3.24.0</protobuf.vespa.version>
<spifly.vespa.version>1.3.6</spifly.vespa.version>
<surefire.vespa.version>3.0.0-M9</surefire.vespa.version>
<wiremock.vespa.version>2.35.0</wiremock.vespa.version>
diff --git a/document/src/test/java/com/yahoo/document/DocumentTestCase.java b/document/src/test/java/com/yahoo/document/DocumentTestCase.java
index 33b77cb1878..e5f6453c581 100644
--- a/document/src/test/java/com/yahoo/document/DocumentTestCase.java
+++ b/document/src/test/java/com/yahoo/document/DocumentTestCase.java
@@ -42,7 +42,7 @@ import static org.junit.Assert.fail;
/**
* Test for Document and all its features, including (de)serialization.
*
- * @author <a href="thomasg@yahoo-inc.com>Thomas Gundersen</a>
+ * @author Thomas Gundersen
* @author bratseth
*/
public class DocumentTestCase extends DocumentTestCaseBase {
diff --git a/document/src/tests/serialization/vespadocumentserializer_test.cpp b/document/src/tests/serialization/vespadocumentserializer_test.cpp
index 1839005d720..03878f43e4b 100644
--- a/document/src/tests/serialization/vespadocumentserializer_test.cpp
+++ b/document/src/tests/serialization/vespadocumentserializer_test.cpp
@@ -686,7 +686,7 @@ void deserializeAndCheck(const string &file_name, FieldValueT &value,
const string &field_name) {
File file(file_name);
file.open(File::READONLY);
- vector<char> content(file.stat()._size);
+ vector<char> content(file.getFileSize());
size_t r = file.read(&content[0], content.size(), 0);
ASSERT_EQUAL(content.size(), r);
diff --git a/eval/src/vespa/eval/eval/inline_operation.h b/eval/src/vespa/eval/eval/inline_operation.h
index 9b862b59e37..910fa9cffaa 100644
--- a/eval/src/vespa/eval/eval/inline_operation.h
+++ b/eval/src/vespa/eval/eval/inline_operation.h
@@ -4,6 +4,7 @@
#include "operation.h"
#include <vespa/vespalib/util/typify.h>
+#include <cblas.h>
#include <cmath>
namespace vespalib::eval::operation {
@@ -148,4 +149,31 @@ void apply_op2_vec_vec(D *dst, const A *a, const B *b, size_t n, OP2 &&f) {
//-----------------------------------------------------------------------------
+template <typename LCT, typename RCT>
+struct DotProduct {
+ static double apply(const LCT * lhs, const RCT * rhs, size_t count) {
+ double result = 0.0;
+ for (size_t i = 0; i < count; ++i) {
+ result += lhs[i] * rhs[i];
+ }
+ return result;
+ }
+};
+
+template <>
+struct DotProduct<float,float> {
+ static float apply(const float * lhs, const float * rhs, size_t count) {
+ return cblas_sdot(count, lhs, 1, rhs, 1);
+ }
+};
+
+template <>
+struct DotProduct<double,double> {
+ static double apply(const double * lhs, const double * rhs, size_t count) {
+ return cblas_ddot(count, lhs, 1, rhs, 1);
+ }
+};
+
+//-----------------------------------------------------------------------------
+
}
diff --git a/eval/src/vespa/eval/instruction/best_similarity_function.cpp b/eval/src/vespa/eval/instruction/best_similarity_function.cpp
index 964f27a4564..415a08d0d93 100644
--- a/eval/src/vespa/eval/instruction/best_similarity_function.cpp
+++ b/eval/src/vespa/eval/instruction/best_similarity_function.cpp
@@ -1,10 +1,9 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "best_similarity_function.h"
-#include <vespa/eval/eval/operation.h>
+#include <vespa/eval/eval/inline_operation.h>
#include <vespa/eval/eval/value.h>
#include <vespa/vespalib/util/binary_hamming_distance.h>
-#include <cblas.h>
namespace vespalib::eval {
@@ -22,7 +21,7 @@ struct BestSimParam {
struct UseDotProduct {
static float calc(const float *pri, const float *sec, size_t size) {
- return cblas_sdot(size, pri, 1, sec, 1);
+ return DotProduct<float,float>::apply(pri, sec, size);
}
};
diff --git a/eval/src/vespa/eval/instruction/dense_dot_product_function.cpp b/eval/src/vespa/eval/instruction/dense_dot_product_function.cpp
index a2048707685..de9e029f377 100644
--- a/eval/src/vespa/eval/instruction/dense_dot_product_function.cpp
+++ b/eval/src/vespa/eval/instruction/dense_dot_product_function.cpp
@@ -1,9 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "dense_dot_product_function.h"
-#include <vespa/eval/eval/operation.h>
+#include <vespa/eval/eval/inline_operation.h>
#include <vespa/eval/eval/value.h>
-#include <cblas.h>
namespace vespalib::eval {
@@ -16,26 +15,7 @@ template <typename LCT, typename RCT>
void my_dot_product_op(InterpretedFunction::State &state, uint64_t) {
auto lhs_cells = state.peek(1).cells().typify<LCT>();
auto rhs_cells = state.peek(0).cells().typify<RCT>();
- double result = 0.0;
- const LCT *lhs = lhs_cells.cbegin();
- const RCT *rhs = rhs_cells.cbegin();
- for (size_t i = 0; i < lhs_cells.size(); ++i) {
- result += ((*lhs++) * (*rhs++));
- }
- state.pop_pop_push(state.stash.create<DoubleValue>(result));
-}
-
-void my_cblas_double_dot_product_op(InterpretedFunction::State &state, uint64_t) {
- auto lhs_cells = state.peek(1).cells().typify<double>();
- auto rhs_cells = state.peek(0).cells().typify<double>();
- double result = cblas_ddot(lhs_cells.size(), lhs_cells.cbegin(), 1, rhs_cells.cbegin(), 1);
- state.pop_pop_push(state.stash.create<DoubleValue>(result));
-}
-
-void my_cblas_float_dot_product_op(InterpretedFunction::State &state, uint64_t) {
- auto lhs_cells = state.peek(1).cells().typify<float>();
- auto rhs_cells = state.peek(0).cells().typify<float>();
- double result = cblas_sdot(lhs_cells.size(), lhs_cells.cbegin(), 1, rhs_cells.cbegin(), 1);
+ double result = DotProduct<LCT,RCT>::apply(lhs_cells.cbegin(), rhs_cells.cbegin(), lhs_cells.size());
state.pop_pop_push(state.stash.create<DoubleValue>(result));
}
@@ -44,19 +24,6 @@ struct MyDotProductOp {
static auto invoke() { return my_dot_product_op<LCT,RCT>; }
};
-InterpretedFunction::op_function my_select(CellType lct, CellType rct) {
- if (lct == rct) {
- if (lct == CellType::DOUBLE) {
- return my_cblas_double_dot_product_op;
- }
- if (lct == CellType::FLOAT) {
- return my_cblas_float_dot_product_op;
- }
- }
- using MyTypify = TypifyCellType;
- return typify_invoke<2,MyTypify,MyDotProductOp>(lct, rct);
-}
-
} // namespace <unnamed>
DenseDotProductFunction::DenseDotProductFunction(const TensorFunction &lhs_in,
@@ -68,7 +35,8 @@ DenseDotProductFunction::DenseDotProductFunction(const TensorFunction &lhs_in,
InterpretedFunction::Instruction
DenseDotProductFunction::compile_self(const ValueBuilderFactory &, Stash &) const
{
- auto op = my_select(lhs().result_type().cell_type(), rhs().result_type().cell_type());
+ auto op = typify_invoke<2,TypifyCellType,MyDotProductOp>(lhs().result_type().cell_type(),
+ rhs().result_type().cell_type());
return InterpretedFunction::Instruction(op);
}
diff --git a/eval/src/vespa/eval/instruction/mixed_112_dot_product.cpp b/eval/src/vespa/eval/instruction/mixed_112_dot_product.cpp
index 8bfa4b07980..47e1dbb58ed 100644
--- a/eval/src/vespa/eval/instruction/mixed_112_dot_product.cpp
+++ b/eval/src/vespa/eval/instruction/mixed_112_dot_product.cpp
@@ -5,7 +5,6 @@
#include <vespa/vespalib/util/typify.h>
#include <vespa/vespalib/util/require.h>
#include <vespa/eval/eval/visit_stuff.h>
-#include <cblas.h>
#include <algorithm>
#include <optional>
@@ -17,14 +16,6 @@ using namespace instruction;
namespace {
-template <typename CT> double my_dot_product(const CT * lhs, const CT * rhs, size_t count);
-template <> double my_dot_product<double>(const double * lhs, const double * rhs, size_t count) {
- return cblas_ddot(count, lhs, 1, rhs, 1);
-}
-template <> double my_dot_product<float>(const float * lhs, const float * rhs, size_t count) {
- return cblas_sdot(count, lhs, 1, rhs, 1);
-}
-
template <typename T, size_t N>
ConstArrayRef<const T *> as_ccar(std::array<T *, N> &array) {
return {array.data(), array.size()};
@@ -54,10 +45,11 @@ double my_mixed_112_dot_product_fallback(const Value::Index &a_idx, const Value:
auto outer = a_idx.create_view({});
auto model = c_idx.create_view({&single_dim[0], 1});
outer->lookup({});
+ using dot_product = DotProduct<CT,CT>;
while (outer->next_result(as_car(c_addr_ref[0]), a_space)) {
model->lookup(as_ccar(c_addr_ref));
if (model->next_result({}, c_space)) {
- result += my_dot_product<CT>(b_cells, c_cells + (c_space * dense_size), dense_size) * a_cells[a_space];
+ result += dot_product::apply(b_cells, c_cells + (c_space * dense_size), dense_size) * a_cells[a_space];
}
}
return result;
@@ -70,11 +62,12 @@ double my_fast_mixed_112_dot_product(const FastAddrMap *a_map, const FastAddrMap
{
double result = 0.0;
const auto &a_labels = a_map->labels();
+ using dot_product = DotProduct<CT,CT>;
for (size_t a_space = 0; a_space < a_labels.size(); ++a_space) {
if (a_cells[a_space] != 0.0) { // handle pseudo-sparse input
auto c_space = c_map->lookup_singledim(a_labels[a_space]);
if (c_space != FastAddrMap::npos()) {
- result += my_dot_product<CT>(b_cells, c_cells + (c_space * dense_size), dense_size) * a_cells[a_space];
+ result += dot_product::apply(b_cells, c_cells + (c_space * dense_size), dense_size) * a_cells[a_space];
}
}
}
diff --git a/eval/src/vespa/eval/instruction/mixed_inner_product_function.cpp b/eval/src/vespa/eval/instruction/mixed_inner_product_function.cpp
index 248f909fcf5..5880a90a2cd 100644
--- a/eval/src/vespa/eval/instruction/mixed_inner_product_function.cpp
+++ b/eval/src/vespa/eval/instruction/mixed_inner_product_function.cpp
@@ -1,9 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "mixed_inner_product_function.h"
-#include <vespa/eval/eval/operation.h>
+#include <vespa/eval/eval/inline_operation.h>
#include <vespa/eval/eval/value.h>
-#include <cblas.h>
namespace vespalib::eval {
@@ -12,31 +11,6 @@ using namespace operation;
namespace {
-template <typename LCT, typename RCT>
-struct MyDotProduct {
- static double apply(const LCT * lhs, const RCT * rhs, size_t count) {
- double result = 0.0;
- for (size_t i = 0; i < count; ++i) {
- result += lhs[i] * rhs[i];
- }
- return result;
- }
-};
-
-template <>
-struct MyDotProduct<double,double> {
- static double apply(const double * lhs, const double * rhs, size_t count) {
- return cblas_ddot(count, lhs, 1, rhs, 1);
- }
-};
-
-template <>
-struct MyDotProduct<float,float> {
- static float apply(const float * lhs, const float * rhs, size_t count) {
- return cblas_sdot(count, lhs, 1, rhs, 1);
- }
-};
-
struct MixedInnerProductParam {
ValueType res_type;
size_t vector_size;
@@ -66,8 +40,9 @@ void my_mixed_inner_product_op(InterpretedFunction::State &state, uint64_t param
ArrayRef<OCT> out_cells = state.stash.create_uninitialized_array<OCT>(num_output_cells);
const MCT *m_cp = m_cells.begin();
const VCT *v_cp = v_cells.begin();
+ using dot_product = DotProduct<MCT,VCT>;
for (OCT &out : out_cells) {
- out = MyDotProduct<MCT,VCT>::apply(m_cp, v_cp, param.vector_size);
+ out = dot_product::apply(m_cp, v_cp, param.vector_size);
m_cp += param.vector_size;
}
assert(m_cp == m_cells.end());
diff --git a/eval/src/vespa/eval/instruction/sum_max_dot_product_function.cpp b/eval/src/vespa/eval/instruction/sum_max_dot_product_function.cpp
index a76eaa38925..41017bc3687 100644
--- a/eval/src/vespa/eval/instruction/sum_max_dot_product_function.cpp
+++ b/eval/src/vespa/eval/instruction/sum_max_dot_product_function.cpp
@@ -1,9 +1,8 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "sum_max_dot_product_function.h"
-#include <vespa/eval/eval/operation.h>
+#include <vespa/eval/eval/inline_operation.h>
#include <vespa/eval/eval/value.h>
-#include <cblas.h>
namespace vespalib::eval {
@@ -16,11 +15,12 @@ void my_sum_max_dot_product_op(InterpretedFunction::State &state, uint64_t dp_si
double result = 0.0;
auto query_cells = state.peek(1).cells().typify<float>();
auto document_cells = state.peek(0).cells().typify<float>();
+ using dot_product = DotProduct<float,float>;
if ((query_cells.size() > 0) && (document_cells.size() > 0)) {
for (const float *query = query_cells.begin(); query < query_cells.end(); query += dp_size) {
float max_dp = aggr::Max<float>::null_value();
for (const float *document = document_cells.begin(); document < document_cells.end(); document += dp_size) {
- max_dp = aggr::Max<float>::combine(max_dp, cblas_sdot(dp_size, query, 1, document, 1));
+ max_dp = aggr::Max<float>::combine(max_dp, dot_product::apply(query, document, dp_size));
}
result += max_dp;
}
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/EmptyFileReferenceData.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/EmptyFileReferenceData.java
deleted file mode 100644
index ea8461b42f3..00000000000
--- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/EmptyFileReferenceData.java
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.filedistribution;
-
-import com.yahoo.config.FileReference;
-
-import java.nio.ByteBuffer;
-
-public class EmptyFileReferenceData extends FileReferenceData {
-
- private final byte[] content;
- private final long xxhash;
- private int contentRead = 0;
-
- private EmptyFileReferenceData(FileReference fileReference, String filename, Type type, byte[] content, long xxhash) {
- super(fileReference, filename, type, CompressionType.gzip);
- this.content = content;
- this.xxhash = xxhash;
- }
-
- public static FileReferenceData empty(FileReference fileReference, String filename) {
- return new EmptyFileReferenceData(fileReference, filename, FileReferenceData.Type.file, new byte[0], 0);
- }
-
- public ByteBuffer content() {
- return ByteBuffer.wrap(content);
- }
-
- @Override
- public int nextContent(ByteBuffer bb) {
- if (contentRead >= content.length) {
- return -1;
- } else {
- int left = content.length - contentRead;
- int size = Math.min(bb.remaining(), left);
- bb.put(content, contentRead, size);
- contentRead += size;
- return size;
- }
- }
-
- @Override
- public long xxhash() {
- return xxhash;
- }
-
- @Override
- public long size() {
- return content.length;
- }
-
- @Override
- public void close() {
- // no-op
- }
-}
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java
index b37fe02226b..a567a3bc4b3 100644
--- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReceiver.java
@@ -243,7 +243,7 @@ public class FileReceiver {
synchronized (sessions) {
if (sessions.containsKey(sessionId)) {
retval = 1;
- log.severe("Session id " + sessionId + " already exist, impossible. Request from(" + req.target() + ")");
+ log.severe("Session id " + sessionId + " already exist, impossible. Request from " + req.target());
} else {
try {
sessions.put(sessionId, new Session(downloadDirectory, sessionId, reference,
diff --git a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceData.java b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceData.java
index 3f83cbea506..87f45db5221 100644
--- a/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceData.java
+++ b/filedistribution/src/main/java/com/yahoo/vespa/filedistribution/FileReferenceData.java
@@ -10,7 +10,7 @@ import java.nio.ByteBuffer;
*
* @author hmusum
*/
-public abstract class FileReferenceData {
+public abstract class FileReferenceData implements AutoCloseable {
public enum Type { file, compressed }
public enum CompressionType { gzip, lz4, zstd }
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index 3fd2b1856e6..d5eadf45b08 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -301,7 +301,7 @@ public class Flags {
HOSTNAME);
public static final UnboundBooleanFlag ALLOW_MORE_THAN_ONE_CONTENT_GROUP_DOWN = defineFeatureFlag(
- "allow-more-than-one-content-group-down", false, List.of("hmusum"), "2023-04-14", "2023-08-15",
+ "allow-more-than-one-content-group-down", true, List.of("hmusum"), "2023-04-14", "2023-09-01",
"Whether to enable possible configuration of letting more than one content group down",
"Takes effect at redeployment",
APPLICATION_ID);
@@ -318,13 +318,8 @@ public class Flags {
"Takes effect on next run of CertPoolMaintainer"
);
- public static final UnboundStringFlag CONTAINER_IMAGE_PULL_IO_MAX = defineStringFlag(
- "container-image-pull-io-max", "", List.of("freva"), "2023-08-04", "2023-09-15",
- "The value (excluding the device name) of io.max cgroup used by container image pull, e.g. 'wiops=100', or 'wbps=10000 riops=20', or empty for unlimited",
- "Takes effect at next host-admin tick");
-
public static final UnboundBooleanFlag ENABLE_THE_ONE_THAT_SHOULD_NOT_BE_NAMED = defineFeatureFlag(
- "enable-the-one-that-should-not-be-named", false, List.of("hmusum"), "2023-05-08", "2023-08-15",
+ "enable-the-one-that-should-not-be-named", false, List.of("hmusum"), "2023-05-08", "2023-09-15",
"Whether to enable the one program that should not be named",
"Takes effect at next host-admin tick");
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
index 619dd39ca47..b50e71154eb 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/PermanentFlags.java
@@ -21,6 +21,7 @@ import static com.yahoo.vespa.flags.FetchVector.Dimension.HOSTNAME;
import static com.yahoo.vespa.flags.FetchVector.Dimension.NODE_TYPE;
import static com.yahoo.vespa.flags.FetchVector.Dimension.TENANT_ID;
import static com.yahoo.vespa.flags.FetchVector.Dimension.VESPA_VERSION;
+import static com.yahoo.vespa.flags.FetchVector.Dimension.ZONE_ID;
/**
* Definition for permanent feature flags
@@ -351,13 +352,20 @@ public class PermanentFlags {
"Takes effect immediately",
TENANT_ID);
- public static final UnboundIntFlag KEEP_FILE_REFERENCES_ON_TENANT_NODES = defineIntFlag(
- "keep-file-references-on-tenant-nodes", 30,
+ public static final UnboundIntFlag KEEP_FILE_REFERENCES_DAYS = defineIntFlag(
+ "keep-file-references-days", 30,
"How many days to keep file references on tenant nodes (based on last modification time)",
"Takes effect on restart of Docker container",
APPLICATION_ID
);
+ public static final UnboundIntFlag KEEP_FILE_REFERENCES_COUNT = defineIntFlag(
+ "keep-file-references-count", 20,
+ "How many file references to keep on tenant nodes (no matter what last modification time is)",
+ "Takes effect on restart of Docker container",
+ ZONE_ID, APPLICATION_ID
+ );
+
public static final UnboundIntFlag ENDPOINT_CONNECTION_TTL = defineIntFlag(
"endpoint-connection-ttl", 45,
"Time to live for connections to endpoints in seconds",
@@ -377,7 +385,7 @@ public class PermanentFlags {
"Takes effect immediately");
public static final UnboundBooleanFlag DROP_CACHES = defineFeatureFlag(
- "drop-caches", false,
+ "drop-caches", true,
"Drop caches on tenant hosts",
"Takes effect on next tick",
// The application ID is the exclusive application ID associated with the host,
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java b/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java
index 749f6830870..031b61c8e7e 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/json/RelationalCondition.java
@@ -68,6 +68,10 @@ public class RelationalCondition implements Condition {
return fetchVector.getValue(dimension).map(predicate::test).orElse(false);
}
+ public RelationalPredicate relationalPredicate() {
+ return relationalPredicate;
+ }
+
@Override
public WireCondition toWire() {
var condition = new WireCondition();
diff --git a/fnet/src/tests/sync_execute/sync_execute.cpp b/fnet/src/tests/sync_execute/sync_execute.cpp
index 5d2f4097ab4..0dd65b08874 100644
--- a/fnet/src/tests/sync_execute/sync_execute.cpp
+++ b/fnet/src/tests/sync_execute/sync_execute.cpp
@@ -17,6 +17,8 @@ TEST("sync execute") {
DoIt exe2;
DoIt exe3;
DoIt exe4;
+ DoIt exe5;
+ DoIt exe6;
FNET_Transport transport;
ASSERT_TRUE(transport.execute(&exe1));
ASSERT_TRUE(transport.Start());
@@ -26,14 +28,16 @@ TEST("sync execute") {
ASSERT_TRUE(exe2.gate.getCount() == 0u);
ASSERT_TRUE(transport.execute(&exe3));
transport.ShutDown(false);
- ASSERT_TRUE(!transport.execute(&exe4));
+ uint32_t expect_cnt_4 = transport.execute(&exe4) ? 0 : 1;
transport.sync();
transport.WaitFinished();
+ ASSERT_TRUE(!transport.execute(&exe5));
transport.sync();
ASSERT_TRUE(exe1.gate.getCount() == 0u);
ASSERT_TRUE(exe2.gate.getCount() == 0u);
ASSERT_TRUE(exe3.gate.getCount() == 0u);
- ASSERT_TRUE(exe4.gate.getCount() == 1u);
+ ASSERT_TRUE(exe4.gate.getCount() == expect_cnt_4);
+ ASSERT_TRUE(exe5.gate.getCount() == 1u);
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/fnet/src/vespa/fnet/connection.cpp b/fnet/src/vespa/fnet/connection.cpp
index a770561344f..314fc7517e5 100644
--- a/fnet/src/vespa/fnet/connection.cpp
+++ b/fnet/src/vespa/fnet/connection.cpp
@@ -526,7 +526,7 @@ FNET_Connection::FNET_Connection(FNET_TransportThread *owner,
FNET_Connection::~FNET_Connection()
{
- assert(!_resolve_handler.load());
+ assert(!_resolve_handler);
_num_connections.fetch_sub(1, std::memory_order_relaxed);
}
@@ -541,7 +541,7 @@ FNET_Connection::Init()
// initiate async resolve
if (IsClient()) {
_resolve_handler = std::make_shared<ResolveHandler>(this);
- Owner()->owner().resolve_async(GetSpec(), _resolve_handler.load());
+ Owner()->owner().resolve_async(GetSpec(), _resolve_handler);
}
return true;
}
@@ -555,12 +555,11 @@ FNET_Connection::server_adapter()
bool
FNET_Connection::handle_add_event()
{
- std::shared_ptr<ResolveHandler> resolve_handler = _resolve_handler.exchange({});
- if (resolve_handler) {
+ if (_resolve_handler) {
auto tweak = [this](vespalib::SocketHandle &handle) { return Owner()->tune(handle); };
- _socket = Owner()->owner().create_client_crypto_socket(resolve_handler->address.connect(tweak), vespalib::SocketSpec(GetSpec()));
+ _socket = Owner()->owner().create_client_crypto_socket(_resolve_handler->address.connect(tweak), vespalib::SocketSpec(GetSpec()));
_ioc_socket_fd = _socket->get_fd();
- resolve_handler.reset();
+ _resolve_handler.reset();
}
return (_socket && (_socket->get_fd() >= 0));
}
@@ -681,7 +680,7 @@ FNET_Connection::Sync()
void
FNET_Connection::Close()
{
- _resolve_handler.store({});
+ _resolve_handler.reset();
detach_selector();
SetState(FNET_CLOSED);
_ioc_socket_fd = -1;
diff --git a/fnet/src/vespa/fnet/connection.h b/fnet/src/vespa/fnet/connection.h
index 8fe78cba8c2..0db71db14e0 100644
--- a/fnet/src/vespa/fnet/connection.h
+++ b/fnet/src/vespa/fnet/connection.h
@@ -71,7 +71,7 @@ private:
void handle_result(vespalib::SocketAddress result) override;
~ResolveHandler();
};
- using ResolveHandlerSP = std::atomic<std::shared_ptr<ResolveHandler>>;
+ using ResolveHandlerSP = std::shared_ptr<ResolveHandler>;
FNET_IPacketStreamer *_streamer; // custom packet streamer
FNET_IServerAdapter *_serverAdapter; // only on server side
vespalib::CryptoSocket::UP _socket; // socket for this conn
diff --git a/fnet/src/vespa/fnet/transport_thread.cpp b/fnet/src/vespa/fnet/transport_thread.cpp
index 0b0df02c04c..217738b7364 100644
--- a/fnet/src/vespa/fnet/transport_thread.cpp
+++ b/fnet/src/vespa/fnet/transport_thread.cpp
@@ -119,7 +119,7 @@ FNET_TransportThread::PostEvent(FNET_ControlPacket *cpacket,
size_t qLen;
{
std::unique_lock<std::mutex> guard(_lock);
- if (IsShutDown()) {
+ if (_reject_events) {
guard.unlock();
DiscardEvent(cpacket, context);
return false;
@@ -243,7 +243,8 @@ FNET_TransportThread::FNET_TransportThread(FNET_Transport &owner_in)
_started(false),
_shutdown(false),
_finished(false),
- _detaching()
+ _detaching(),
+ _reject_events(false)
{
trapsigpipe();
}
@@ -384,9 +385,9 @@ FNET_TransportThread::ShutDown(bool waitFinished)
bool wasEmpty = false;
{
std::lock_guard<std::mutex> guard(_lock);
- if (!IsShutDown()) {
+ if (!should_shut_down()) {
_shutdown.store(true, std::memory_order_relaxed);
- wasEmpty = _queue.IsEmpty_NoLock();
+ wasEmpty = _queue.IsEmpty_NoLock();
}
}
if (wasEmpty) {
@@ -503,7 +504,7 @@ FNET_TransportThread::handle_event(FNET_IOComponent &ctx, bool read, bool write)
bool
FNET_TransportThread::EventLoopIteration() {
- if (!IsShutDown()) {
+ if (!should_shut_down()) {
int msTimeout = vespalib::count_ms(time_tools().event_timeout());
// obtain I/O events
_selector.poll(msTimeout);
@@ -530,7 +531,7 @@ FNET_TransportThread::EventLoopIteration() {
FlushDeleteList();
} // -- END OF MAIN EVENT LOOP --
- if (!IsShutDown())
+ if (!should_shut_down())
return true;
if (is_finished())
return false;
@@ -552,10 +553,22 @@ FNET_TransportThread::checkTimedoutComponents(vespalib::duration timeout) {
void
FNET_TransportThread::endEventLoop() {
+ // close and remove all I/O Components
+ FNET_IOComponent *component = _componentsHead;
+ while (component != nullptr) {
+ assert(component == _componentsHead);
+ FNET_IOComponent *tmp = component;
+ component = component->_ioc_next;
+ RemoveComponent(tmp);
+ tmp->Close();
+ tmp->internal_subref();
+ }
+
// flush event queue
{
std::lock_guard<std::mutex> guard(_lock);
_queue.FlushPackets_NoLock(&_myQueue);
+ _reject_events = true;
}
// discard remaining events
@@ -569,16 +582,6 @@ FNET_TransportThread::endEventLoop() {
}
}
- // close and remove all I/O Components
- FNET_IOComponent *component = _componentsHead;
- while (component != nullptr) {
- assert(component == _componentsHead);
- FNET_IOComponent *tmp = component;
- component = component->_ioc_next;
- RemoveComponent(tmp);
- tmp->Close();
- tmp->internal_subref();
- }
assert(_componentsHead == nullptr &&
_componentsTail == nullptr &&
_timeOutHead == nullptr &&
@@ -588,7 +591,7 @@ FNET_TransportThread::endEventLoop() {
{
std::lock_guard<std::mutex> guard(_shutdownLock);
- _finished.store(true, std::memory_order_relaxed);
+ _finished.store(true, std::memory_order_release);
_shutdownCond.notify_all();
}
diff --git a/fnet/src/vespa/fnet/transport_thread.h b/fnet/src/vespa/fnet/transport_thread.h
index 6047d4e3482..c7ada472501 100644
--- a/fnet/src/vespa/fnet/transport_thread.h
+++ b/fnet/src/vespa/fnet/transport_thread.h
@@ -52,6 +52,7 @@ private:
std::atomic<bool> _shutdown; // should stop event loop ?
std::atomic<bool> _finished; // event loop stopped ?
std::set<FNET_IServerAdapter*> _detaching; // server adapters being detached
+ bool _reject_events; // the transport thread does not want any more events
/**
* Add an IOComponent to the list of components. This operation is
@@ -169,12 +170,12 @@ private:
**/
bool EventLoopIteration();
- bool IsShutDown() const noexcept {
+ [[nodiscard]] bool should_shut_down() const noexcept {
return _shutdown.load(std::memory_order_relaxed);
}
[[nodiscard]] bool is_finished() const noexcept {
- return _finished.load(std::memory_order_relaxed);
+ return _finished.load(std::memory_order_acquire);
}
public:
diff --git a/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpression.java b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpression.java
new file mode 100644
index 00000000000..c8106148630
--- /dev/null
+++ b/indexinglanguage/src/main/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpression.java
@@ -0,0 +1,51 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.indexinglanguage.expressions;
+
+import com.yahoo.document.DataType;
+import com.yahoo.document.datatypes.LongFieldValue;
+import java.time.Instant;
+
+/**
+ * Converts ISO-8601 formatted date string to UNIX Epoch Time in seconds
+ *
+ * @author bergum
+ */
+
+public class ToEpochSecondExpression extends Expression {
+ public ToEpochSecondExpression() {
+ super(DataType.STRING); //only accept string input
+ }
+
+ @Override
+ protected void doExecute(ExecutionContext context) {
+ String inputString = String.valueOf(context.getValue());
+ long epochTime = Instant.parse(inputString).getEpochSecond();
+ context.setValue(new LongFieldValue(epochTime));
+ }
+
+ @Override
+ protected void doVerify(VerificationContext context) {
+ context.setValueType(createdOutputType());
+ }
+
+ @Override
+ public DataType createdOutputType() {
+ return DataType.LONG;
+ }
+
+ @Override
+ public String toString() {
+ return "to_epoch_second";
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ return obj instanceof ToEpochSecondExpression;
+ }
+
+ @Override
+ public int hashCode() {
+ return getClass().hashCode();
+ }
+
+}
diff --git a/indexinglanguage/src/main/javacc/IndexingParser.jj b/indexinglanguage/src/main/javacc/IndexingParser.jj
index a039ad137ee..d559d9b7260 100644
--- a/indexinglanguage/src/main/javacc/IndexingParser.jj
+++ b/indexinglanguage/src/main/javacc/IndexingParser.jj
@@ -198,6 +198,7 @@ TOKEN :
<TO_INT: "to_int"> |
<TO_LONG: "to_long"> |
<TO_POS: "to_pos"> |
+ <TO_EPOCH_SECOND: "to_epoch_second"> |
<TO_STRING: "to_string"> |
<TO_WSET: "to_wset"> |
<TO_BOOL: "to_bool"> |
@@ -338,6 +339,7 @@ Expression value() :
val = toIntExp() |
val = toLongExp() |
val = toPosExp() |
+ val = toEpochSecondExp() |
val = toStringExp() |
val = toWsetExp() |
val = toBoolExp() |
@@ -713,6 +715,12 @@ Expression toPosExp() : { }
{ return new ToPositionExpression(); }
}
+Expression toEpochSecondExp() : { }
+{
+ ( <TO_EPOCH_SECOND> )
+ { return new ToEpochSecondExpression(); }
+}
+
Expression toStringExp() : { }
{
( <TO_STRING> )
diff --git a/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpressionTestCase.java b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpressionTestCase.java
new file mode 100644
index 00000000000..7203afcc1a0
--- /dev/null
+++ b/indexinglanguage/src/test/java/com/yahoo/vespa/indexinglanguage/expressions/ToEpochSecondExpressionTestCase.java
@@ -0,0 +1,51 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.indexinglanguage.expressions;
+
+import com.yahoo.document.DataType;
+import com.yahoo.document.datatypes.FieldValue;
+import com.yahoo.document.datatypes.LongFieldValue;
+import com.yahoo.document.datatypes.StringFieldValue;
+import com.yahoo.vespa.indexinglanguage.SimpleTestAdapter;
+import org.junit.Test;
+
+import static com.yahoo.vespa.indexinglanguage.expressions.ExpressionAssert.assertVerify;
+import static com.yahoo.vespa.indexinglanguage.expressions.ExpressionAssert.assertVerifyThrows;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+
+public class ToEpochSecondExpressionTestCase {
+ @Test
+ public void requireThatHashCodeAndEqualsAreImplemented() {
+ Expression exp = new ToEpochSecondExpression();
+ assertFalse(exp.equals(new Object()));
+ assertEquals(exp, new ToEpochSecondExpression());
+ assertEquals(exp.hashCode(), new ToEpochSecondExpression().hashCode());
+ }
+
+ @Test
+ public void requireThatExpressionCanBeVerified() {
+ Expression exp = new ToEpochSecondExpression();
+ assertVerify(DataType.STRING, exp, DataType.LONG);
+ assertVerifyThrows(DataType.INT, exp, "Expected string input, got int.");
+ assertVerifyThrows(null, exp, "Expected string input, got null.");
+ }
+
+ @Test
+ public void requireThatValueIsConvertedWithMs() {
+ ExecutionContext ctx = new ExecutionContext(new SimpleTestAdapter());
+ ctx.setValue(new StringFieldValue("2023-12-24T17:00:43.000Z")).execute(new ToEpochSecondExpression());
+ FieldValue val = ctx.getValue();
+ assertTrue(val instanceof LongFieldValue);
+ assertEquals(1703437243L, ((LongFieldValue)val).getLong());
+ }
+
+ @Test
+ public void requireThatValueIsConverted() {
+ ExecutionContext ctx = new ExecutionContext(new SimpleTestAdapter());
+ ctx.setValue(new StringFieldValue("2023-12-24T17:00:43Z")).execute(new ToEpochSecondExpression());
+ FieldValue val = ctx.getValue();
+ assertTrue(val instanceof LongFieldValue);
+ assertEquals(1703437243L, ((LongFieldValue)val).getLong());
+ }
+}
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/SourceSession.java b/messagebus/src/main/java/com/yahoo/messagebus/SourceSession.java
index ad06aed43ec..53fd49dcf58 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/SourceSession.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/SourceSession.java
@@ -34,7 +34,7 @@ public final class SourceSession implements ReplyHandler, MessageBus.SendBlocked
void dec() { count --; }
boolean enough() { return count > 5; }
}
- private static ThreadLocal<Counter> sendBlockedRecurseLevel = ThreadLocal.withInitial(Counter::new);
+ private static final ThreadLocal<Counter> sendBlockedRecurseLevel = ThreadLocal.withInitial(Counter::new);
/**
* The default constructor requires values for all final member variables
@@ -139,13 +139,15 @@ public final class SourceSession implements ReplyHandler, MessageBus.SendBlocked
if (closed) {
return new Result(ErrorCode.SEND_QUEUE_CLOSED, "Source session is closed.");
}
- if (throttlePolicy != null && ! throttlePolicy.canSend(message, pendingCount)) {
- return new Result(ErrorCode.SEND_QUEUE_FULL,
- "Too much pending data (" + pendingCount + " messages).");
- }
- message.pushHandler(replyHandler);
if (throttlePolicy != null) {
+ if (! throttlePolicy.canSend(message, pendingCount)) {
+ return new Result(ErrorCode.SEND_QUEUE_FULL,
+ "Too much pending data (" + pendingCount + " messages).");
+ }
+ message.pushHandler(replyHandler);
throttlePolicy.processMessage(message);
+ } else {
+ message.pushHandler(replyHandler);
}
++pendingCount;
}
diff --git a/messagebus/src/vespa/messagebus/network/rpctarget.cpp b/messagebus/src/vespa/messagebus/network/rpctarget.cpp
index 9c6ca9dff69..d7f3e77c6fd 100644
--- a/messagebus/src/vespa/messagebus/network/rpctarget.cpp
+++ b/messagebus/src/vespa/messagebus/network/rpctarget.cpp
@@ -5,8 +5,8 @@
namespace mbus {
-RPCTarget::RPCTarget(const string &spec, FRT_Supervisor &orb) :
- _lock(),
+RPCTarget::RPCTarget(const string &spec, FRT_Supervisor &orb, ctor_tag)
+ : _lock(),
_orb(orb),
_name(spec),
_target(*_orb.GetTarget(spec.c_str())),
@@ -48,6 +48,7 @@ RPCTarget::resolveVersion(duration timeout, RPCTarget::IVersionHandler &handler)
handler.handleVersion(_version.get());
} else if (shouldInvoke) {
FRT_RPCRequest *req = _orb.AllocRPCRequest();
+ req->getStash().create<SP>(shared_from_this());
req->SetMethodName("mbus.getVersion");
_target.InvokeAsync(req, vespalib::to_s(timeout), this);
}
@@ -67,8 +68,9 @@ RPCTarget::isValid() const
}
void
-RPCTarget::RequestDone(FRT_RPCRequest *req)
+RPCTarget::RequestDone(FRT_RPCRequest *raw_req)
{
+ auto req = vespalib::ref_counted<FRT_RPCRequest>::internal_attach(raw_req);
HandlerList handlers;
{
std::lock_guard guard(_lock);
@@ -94,7 +96,6 @@ RPCTarget::RequestDone(FRT_RPCRequest *req)
_state = (_version.get() ? VERSION_RESOLVED : VERSION_NOT_RESOLVED);
}
_cond.notify_all();
- req->internal_subref();
}
} // namespace mbus
diff --git a/messagebus/src/vespa/messagebus/network/rpctarget.h b/messagebus/src/vespa/messagebus/network/rpctarget.h
index fffffae64f7..77fcef5f48f 100644
--- a/messagebus/src/vespa/messagebus/network/rpctarget.h
+++ b/messagebus/src/vespa/messagebus/network/rpctarget.h
@@ -13,7 +13,7 @@ namespace mbus {
* target. Instances of this class are returned by {@link RPCService}, and
* cached by {@link RPCTargetPool}.
*/
-class RPCTarget : public FRT_IRequestWait {
+class RPCTarget : public FRT_IRequestWait, public std::enable_shared_from_this<RPCTarget> {
public:
/**
* Declares a version handler used when resolving the version of a target.
@@ -58,6 +58,7 @@ private:
Version_UP _version;
HandlerList _versionHandlers;
+ struct ctor_tag {};
public:
/**
* Convenience typedefs.
@@ -72,7 +73,10 @@ public:
* @param spec The connection spec of this target.
* @param orb The FRT supervisor to use when connecting to target.
*/
- RPCTarget(const string &name, FRT_Supervisor &orb);
+ RPCTarget(const string &name, FRT_Supervisor &orb, ctor_tag);
+ static SP create(const string &name, FRT_Supervisor &orb) {
+ return std::make_shared<RPCTarget>(name, orb, ctor_tag{});
+ }
/**
* Destructor. Subrefs the contained FRT target.
diff --git a/messagebus/src/vespa/messagebus/network/rpctargetpool.cpp b/messagebus/src/vespa/messagebus/network/rpctargetpool.cpp
index b403c65f863..db09b127114 100644
--- a/messagebus/src/vespa/messagebus/network/rpctargetpool.cpp
+++ b/messagebus/src/vespa/messagebus/network/rpctargetpool.cpp
@@ -97,7 +97,7 @@ RPCTargetPool::getTarget(FRT_Supervisor &orb, const RPCServiceAddress &address)
std::vector<RPCTarget::SP> targets;
targets.reserve(_numTargetsPerSpec);
for (size_t i(0); i < _numTargetsPerSpec; i++) {
- targets.push_back(std::make_shared<RPCTarget>(spec, orb));
+ targets.push_back(RPCTarget::create(spec, orb));
}
_targets.insert(TargetMap::value_type(spec, Entry(std::move(targets), currentTime)));
return _targets.find(spec)->second.getTarget(guard, currentTime);
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/DefaultMetrics.java b/metrics/src/main/java/ai/vespa/metrics/set/DefaultMetrics.java
index cb3a13acb85..515b06de2d8 100644
--- a/metrics/src/main/java/ai/vespa/metrics/set/DefaultMetrics.java
+++ b/metrics/src/main/java/ai/vespa/metrics/set/DefaultMetrics.java
@@ -56,6 +56,7 @@ public class DefaultMetrics {
addStorageMetrics(metrics);
addDistributorMetrics(metrics);
addClusterControllerMetrics(metrics);
+ addSentinelMetrics(metrics);
addOtherMetrics(metrics);
return Collections.unmodifiableSet(metrics);
}
@@ -154,7 +155,7 @@ public class DefaultMetrics {
private static void addSentinelMetrics(Set<Metric> metrics) {
// Metrics needed for alerting
- addMetric(metrics, SentinelMetrics.SENTINEL_TOTAL_RESTARTS, EnumSet.of(sum, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, SentinelMetrics.SENTINEL_TOTAL_RESTARTS, EnumSet.of(max, sum, last)); // TODO: Vespa 9: Remove last, sum?
}
private static void addOtherMetrics(Set<Metric> metrics) {
diff --git a/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java b/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java
index f9e37f4a85b..bc8567b8bf5 100644
--- a/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java
+++ b/metrics/src/main/java/ai/vespa/metrics/set/VespaMetricSet.java
@@ -62,7 +62,7 @@ public class VespaMetricSet {
Set<Metric> metrics = new LinkedHashSet<>();
addMetric(metrics, SentinelMetrics.SENTINEL_RESTARTS.count());
- addMetric(metrics, SentinelMetrics.SENTINEL_TOTAL_RESTARTS, EnumSet.of(sum, last)); // TODO: Vespa 9: Remove last
+ addMetric(metrics, SentinelMetrics.SENTINEL_TOTAL_RESTARTS, EnumSet.of(max, sum, last)); // TODO: Vespa 9: Remove last, sum?
addMetric(metrics, SentinelMetrics.SENTINEL_UPTIME.last());
addMetric(metrics, SentinelMetrics.SENTINEL_RUNNING, EnumSet.of(count, last));
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java
index 264035b86a1..fa933e9622a 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/ContainerOperations.java
@@ -36,7 +36,7 @@ public class ContainerOperations {
public ContainerOperations(ContainerEngine containerEngine, Cgroup cgroup, FileSystem fileSystem, Timer timer) {
this.containerEngine = Objects.requireNonNull(containerEngine);
- this.imageDownloader = new ContainerImageDownloader(containerEngine);
+ this.imageDownloader = new ContainerImageDownloader(containerEngine, timer);
this.imagePruner = new ContainerImagePruner(containerEngine, timer);
this.containerStatsCollector = new ContainerStatsCollector(containerEngine, cgroup, fileSystem);
}
@@ -62,8 +62,8 @@ public class ContainerOperations {
}
/** Pull image asynchronously. Returns true if image is still downloading and false if download is complete */
- public boolean pullImageAsyncIfNeeded(TaskContext context, DockerImage dockerImage, RegistryCredentials registryCredentials) {
- return !imageDownloader.get(context, dockerImage, registryCredentials);
+ public boolean pullImageAsyncIfNeeded(TaskContext context, DockerImage dockerImage, RegistryCredentialsProvider credentialsProvider) {
+ return !imageDownloader.get(context, dockerImage, credentialsProvider);
}
/** Executes a command inside container identified by given context. Does NOT throw on non-zero exit code */
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloader.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloader.java
index 1e37e080528..d3327bf5148 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloader.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloader.java
@@ -3,10 +3,13 @@ package com.yahoo.vespa.hosted.node.admin.container.image;
import com.yahoo.concurrent.DaemonThreadFactory;
import com.yahoo.config.provision.DockerImage;
+import com.yahoo.jdisc.Timer;
import com.yahoo.vespa.hosted.node.admin.component.TaskContext;
import com.yahoo.vespa.hosted.node.admin.container.ContainerEngine;
-import com.yahoo.vespa.hosted.node.admin.container.RegistryCredentials;
+import com.yahoo.vespa.hosted.node.admin.container.RegistryCredentialsProvider;
+import java.time.Duration;
+import java.time.Instant;
import java.util.Collections;
import java.util.HashSet;
import java.util.Objects;
@@ -26,13 +29,15 @@ public class ContainerImageDownloader {
private static final Logger LOG = Logger.getLogger(ContainerImageDownloader.class.getName());
private final ContainerEngine containerEngine;
+ private final Timer timer;
private final ExecutorService executorService = Executors.newSingleThreadExecutor(
new DaemonThreadFactory("container-image-downloader")); // Download one image at a time
private final Set<DockerImage> pendingDownloads = Collections.synchronizedSet(new HashSet<>());
- public ContainerImageDownloader(ContainerEngine containerEngine) {
+ public ContainerImageDownloader(ContainerEngine containerEngine, Timer timer) {
this.containerEngine = Objects.requireNonNull(containerEngine);
+ this.timer = Objects.requireNonNull(timer);
}
/**
@@ -40,12 +45,14 @@ public class ContainerImageDownloader {
*
* @return true if the image download has completed.
*/
- public boolean get(TaskContext context, DockerImage image, RegistryCredentials registryCredentials) {
+ public boolean get(TaskContext context, DockerImage image, RegistryCredentialsProvider credentialsProvider) {
if (pendingDownloads.contains(image)) return false;
if (containerEngine.hasImage(context, image)) return true;
executorService.submit(() -> {
try {
- containerEngine.pullImage(context, image, registryCredentials);
+ Instant start = timer.currentTime();
+ containerEngine.pullImage(context, image, credentialsProvider.get());
+ LOG.log(Level.INFO, "Downloaded container image " + image + " in " + Duration.between(start, timer.currentTime()));
} catch (RuntimeException e) {
LOG.log(Level.SEVERE, "Failed to download container image " + image, e);
} finally {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/metrics/Metrics.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/metrics/Metrics.java
index 07a8d545178..e9dbfa0c524 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/metrics/Metrics.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/container/metrics/Metrics.java
@@ -102,6 +102,17 @@ public class Metrics {
}
}
+ public void deleteMetricByName(String application, String metricName, DimensionType type) {
+ synchronized (monitor) {
+ Optional.ofNullable(metrics.get(type))
+ .map(m -> m.get(application))
+ .map(ApplicationMetrics::metricsByDimensions)
+ .ifPresent(dims ->
+ dims.values().forEach(metrics -> metrics.remove(metricName))
+ );
+ }
+ }
+
Map<Dimensions, Map<String, MetricValue>> getOrCreateApplicationMetrics(String application, DimensionType type) {
return metrics.computeIfAbsent(type, m -> new HashMap<>())
.computeIfAbsent(application, app -> new ApplicationMetrics())
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
index 4c17bfbe039..466ee65fcc1 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
@@ -17,11 +17,9 @@ import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeState;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.reports.DropDocumentsReport;
import com.yahoo.vespa.hosted.node.admin.configserver.orchestrator.Orchestrator;
-import com.yahoo.vespa.hosted.node.admin.configserver.orchestrator.OrchestratorException;
import com.yahoo.vespa.hosted.node.admin.container.Container;
import com.yahoo.vespa.hosted.node.admin.container.ContainerOperations;
import com.yahoo.vespa.hosted.node.admin.container.ContainerResources;
-import com.yahoo.vespa.hosted.node.admin.container.RegistryCredentials;
import com.yahoo.vespa.hosted.node.admin.container.RegistryCredentialsProvider;
import com.yahoo.vespa.hosted.node.admin.maintenance.ContainerWireguardTask;
import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer;
@@ -431,9 +429,8 @@ public class NodeAgentImpl implements NodeAgent {
NodeSpec node = context.node();
if (node.wantedDockerImage().equals(container.map(c -> c.image()))) return false;
- RegistryCredentials credentials = registryCredentialsProvider.get();
return node.wantedDockerImage()
- .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, credentials))
+ .map(image -> containerOperations.pullImageAsyncIfNeeded(context, image, registryCredentialsProvider))
.orElse(false);
}
@@ -486,18 +483,21 @@ public class NodeAgentImpl implements NodeAgent {
lastNode = node;
}
+ // Run this here and now, even though we may immediately remove the container below.
+ // This ensures these maintainers are run even if something fails or returns early.
+ // These maintainers should also run immediately after starting the container (see below).
+ container.filter(c -> c.state().isRunning())
+ .ifPresent(c -> runImportantContainerMaintainers(context, c));
+
switch (node.state()) {
- case ready:
- case reserved:
- case failed:
- case inactive:
- case parked:
+ case ready, reserved, failed, inactive, parked -> {
storageMaintainer.syncLogs(context, true);
+ if (node.state() == NodeState.reserved) downloadImageIfNeeded(context, container);
removeContainerIfNeededUpdateContainerState(context, container);
updateNodeRepoWithCurrentAttributes(context, Optional.empty());
stopServicesIfNeeded(context);
- break;
- case active:
+ }
+ case active -> {
storageMaintainer.syncLogs(context, true);
storageMaintainer.cleanDiskIfFull(context);
storageMaintainer.handleCoreDumpsForContainer(context, container, false);
@@ -513,13 +513,11 @@ public class NodeAgentImpl implements NodeAgent {
containerState = STARTING;
container = Optional.of(startContainer(context));
containerState = UNKNOWN;
+ runImportantContainerMaintainers(context, container.get());
} else {
container = Optional.of(updateContainerIfNeeded(context, container.get()));
}
- aclMaintainer.ifPresent(maintainer -> maintainer.converge(context));
- final Optional<Container> finalContainer = container;
- wireguardTasks.forEach(task -> task.converge(context, finalContainer.get().id()));
startServicesIfNeeded(context);
resumeNodeIfNeeded(context);
if (healthChecker.isPresent()) {
@@ -550,11 +548,8 @@ public class NodeAgentImpl implements NodeAgent {
orchestrator.resume(context.hostname().value());
suspendedInOrchestrator = false;
}
- break;
- case provisioned:
- nodeRepository.setNodeState(context.hostname().value(), NodeState.ready);
- break;
- case dirty:
+ }
+ case dirty -> {
removeContainerIfNeededUpdateContainerState(context, container);
context.log(logger, "State is " + node.state() + ", will delete application storage and mark node as ready");
credentialsMaintainers.forEach(maintainer -> maintainer.clearCredentials(context));
@@ -562,12 +557,16 @@ public class NodeAgentImpl implements NodeAgent {
storageMaintainer.archiveNodeStorage(context);
updateNodeRepoWithCurrentAttributes(context, Optional.empty());
nodeRepository.setNodeState(context.hostname().value(), NodeState.ready);
- break;
- default:
- throw ConvergenceException.ofError("UNKNOWN STATE " + node.state().name());
+ }
+ default -> throw ConvergenceException.ofError("Unexpected state " + node.state().name());
}
}
+ private void runImportantContainerMaintainers(NodeAgentContext context, Container runningContainer) {
+ aclMaintainer.ifPresent(maintainer -> maintainer.converge(context));
+ wireguardTasks.forEach(task -> task.converge(context, runningContainer.id()));
+ }
+
private static void logChangesToNodeSpec(NodeAgentContext context, NodeSpec lastNode, NodeSpec node) {
StringBuilder builder = new StringBuilder();
appendIfDifferent(builder, "state", lastNode, node, NodeSpec::state);
@@ -609,23 +608,8 @@ public class NodeAgentImpl implements NodeAgent {
if (context.node().state() != NodeState.active) return;
context.log(logger, "Ask Orchestrator for permission to suspend node");
- try {
- orchestrator.suspend(context.hostname().value());
- suspendedInOrchestrator = true;
- } catch (OrchestratorException e) {
- // Ensure the ACLs are up to date: The reason we're unable to suspend may be because some other
- // node is unable to resume because the ACL rules of SOME Docker container is wrong...
- // Same can happen with stale WireGuard config, so update that too
- try {
- aclMaintainer.ifPresent(maintainer -> maintainer.converge(context));
- wireguardTasks.forEach(task -> getContainer(context).ifPresent(c -> task.converge(context, c.id())));
- } catch (RuntimeException suppressed) {
- logger.log(Level.WARNING, "Suppressing ACL update failure: " + suppressed);
- e.addSuppressed(suppressed);
- }
-
- throw e;
- }
+ orchestrator.suspend(context.hostname().value());
+ suspendedInOrchestrator = true;
}
protected void writeContainerData(NodeAgentContext context, ContainerData containerData) { }
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixUser.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixUser.java
index 665bb4b8bbc..78fc4b151c7 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixUser.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/task/util/file/UnixUser.java
@@ -11,6 +11,7 @@ import java.util.Objects;
public class UnixUser {
public static final UnixUser ROOT = new UnixUser("root", 0, "root", 0);
+ public static final UnixUser VESPA = new UnixUser("vespa", 1000, "vespa", 1000);
private final String name;
private final int uid;
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloaderTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloaderTest.java
index 9fd14e7e665..7f002eee315 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloaderTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/container/image/ContainerImageDownloaderTest.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.node.admin.container.image;
import com.yahoo.config.provision.DockerImage;
+import com.yahoo.jdisc.test.TestTimer;
import com.yahoo.vespa.hosted.node.admin.component.TaskContext;
import com.yahoo.vespa.hosted.node.admin.component.TestTaskContext;
import com.yahoo.vespa.hosted.node.admin.container.ContainerEngineMock;
@@ -21,15 +22,15 @@ public class ContainerImageDownloaderTest {
@Timeout(5_000)
void test_download() {
ContainerEngineMock podman = new ContainerEngineMock().asyncImageDownload(true);
- ContainerImageDownloader downloader = new ContainerImageDownloader(podman);
+ ContainerImageDownloader downloader = new ContainerImageDownloader(podman, new TestTimer());
TaskContext context = new TestTaskContext();
DockerImage image = DockerImage.fromString("registry.example.com/repo/vespa:7.42");
- assertFalse(downloader.get(context, image, RegistryCredentials.none), "Download started");
- assertFalse(downloader.get(context, image, RegistryCredentials.none), "Download pending");
+ assertFalse(downloader.get(context, image, () -> RegistryCredentials.none), "Download started");
+ assertFalse(downloader.get(context, image, () -> RegistryCredentials.none), "Download pending");
podman.completeDownloadOf(image);
boolean downloadCompleted;
- while (!(downloadCompleted = downloader.get(context, image, RegistryCredentials.none))) ;
+ while (!(downloadCompleted = downloader.get(context, image, () -> RegistryCredentials.none))) ;
assertTrue(downloadCompleted, "Download completed");
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
index 0913e1d040a..ef4d6d849f6 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
@@ -487,20 +487,6 @@ public class NodeAgentImplTest {
}
@Test
- void provisionedNodeIsMarkedAsReady() {
- final NodeSpec node = nodeBuilder(NodeState.provisioned)
- .wantedDockerImage(dockerImage)
- .build();
-
- NodeAgentContext context = createContext(node);
- NodeAgentImpl nodeAgent = makeNodeAgent(null, false);
- when(nodeRepository.getOptionalNode(hostName)).thenReturn(Optional.of(node));
-
- nodeAgent.doConverge(context);
- verify(nodeRepository, times(1)).setNodeState(eq(hostName), eq(NodeState.ready));
- }
-
- @Test
void testRestartDeadContainerAfterNodeAdminRestart() {
final NodeSpec node = nodeBuilder(NodeState.active)
.currentDockerImage(dockerImage).wantedDockerImage(dockerImage)
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
index 602314bed96..eafaed2a217 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/NodeRepository.java
@@ -95,8 +95,7 @@ public class NodeRepository extends AbstractComponent {
metricsDb,
orchestrator,
config.useCuratorClientCache(),
- zone.environment().isProduction() && !zone.cloud().dynamicProvisioning() && !zone.system().isCd() ? 1 : 0,
- config.nodeCacheSize());
+ zone.environment().isProduction() && !zone.cloud().dynamicProvisioning() && !zone.system().isCd() ? 1 : 0);
}
/**
@@ -116,15 +115,14 @@ public class NodeRepository extends AbstractComponent {
MetricsDb metricsDb,
Orchestrator orchestrator,
boolean useCuratorClientCache,
- int spareCount,
- long nodeCacheSize) {
+ int spareCount) {
if (provisionServiceProvider.getHostProvisioner().isPresent() != zone.cloud().dynamicProvisioning())
throw new IllegalArgumentException(String.format(
"dynamicProvisioning property must be 1-to-1 with availability of HostProvisioner, was: dynamicProvisioning=%s, hostProvisioner=%s",
zone.cloud().dynamicProvisioning(), provisionServiceProvider.getHostProvisioner().map(__ -> "present").orElse("empty")));
this.flagSource = flagSource;
- this.db = new CuratorDb(flavors, curator, clock, useCuratorClientCache, nodeCacheSize);
+ this.db = new CuratorDb(flavors, curator, clock, useCuratorClientCache);
this.zone = zone;
this.clock = clock;
this.applications = new Applications(db);
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
index 1ca81df824b..796bc2eeb92 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/applications/Cluster.java
@@ -208,6 +208,16 @@ public class Cluster {
return minimum(ClusterModel.minScalingDuration(clusterSpec), totalDuration.dividedBy(completedEventCount));
}
+ /** The predicted time this cluster will stay in each resource configuration (including the scaling duration). */
+ public Duration allocationDuration(ClusterSpec clusterSpec) {
+ if (scalingEvents.size() < 2) return Duration.ofHours(12); // Default
+
+ long totalDurationMs = 0;
+ for (int i = 1; i < scalingEvents().size(); i++)
+ totalDurationMs += scalingEvents().get(i).at().toEpochMilli() - scalingEvents().get(i - 1).at().toEpochMilli();
+ return Duration.ofMillis(totalDurationMs / (scalingEvents.size() - 1));
+ }
+
private static Duration minimum(Duration smallestAllowed, Duration duration) {
if (duration.minus(smallestAllowed).isNegative())
return smallestAllowed;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
index c19d76efb35..8069c9c089b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableClusterResources.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocatableResources.java
@@ -10,13 +10,14 @@ import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
+import java.time.Duration;
import java.util.List;
import java.util.Optional;
/**
* @author bratseth
*/
-public class AllocatableClusterResources {
+public class AllocatableResources {
/** The node count in the cluster */
private final int nodes;
@@ -32,9 +33,9 @@ public class AllocatableClusterResources {
private final double fulfilment;
/** Fake allocatable resources from requested capacity */
- public AllocatableClusterResources(ClusterResources requested,
- ClusterSpec clusterSpec,
- NodeRepository nodeRepository) {
+ public AllocatableResources(ClusterResources requested,
+ ClusterSpec clusterSpec,
+ NodeRepository nodeRepository) {
this.nodes = requested.nodes();
this.groups = requested.groups();
this.realResources = nodeRepository.resourcesCalculator().requestToReal(requested.nodeResources(), nodeRepository.exclusiveAllocation(clusterSpec), false);
@@ -43,7 +44,7 @@ public class AllocatableClusterResources {
this.fulfilment = 1;
}
- public AllocatableClusterResources(NodeList nodes, NodeRepository nodeRepository) {
+ public AllocatableResources(NodeList nodes, NodeRepository nodeRepository) {
this.nodes = nodes.size();
this.groups = (int)nodes.stream().map(node -> node.allocation().get().membership().cluster().group()).distinct().count();
this.realResources = averageRealResourcesOf(nodes.asList(), nodeRepository); // Average since we average metrics over nodes
@@ -52,10 +53,10 @@ public class AllocatableClusterResources {
this.fulfilment = 1;
}
- public AllocatableClusterResources(ClusterResources realResources,
- NodeResources advertisedResources,
- ClusterResources idealResources,
- ClusterSpec clusterSpec) {
+ public AllocatableResources(ClusterResources realResources,
+ NodeResources advertisedResources,
+ ClusterResources idealResources,
+ ClusterSpec clusterSpec) {
this.nodes = realResources.nodes();
this.groups = realResources.groups();
this.realResources = realResources.nodeResources();
@@ -64,12 +65,12 @@ public class AllocatableClusterResources {
this.fulfilment = fulfilment(realResources, idealResources);
}
- private AllocatableClusterResources(int nodes,
- int groups,
- NodeResources realResources,
- NodeResources advertisedResources,
- ClusterSpec clusterSpec,
- double fulfilment) {
+ private AllocatableResources(int nodes,
+ int groups,
+ NodeResources realResources,
+ NodeResources advertisedResources,
+ ClusterSpec clusterSpec,
+ double fulfilment) {
this.nodes = nodes;
this.groups = groups;
this.realResources = realResources;
@@ -79,16 +80,16 @@ public class AllocatableClusterResources {
}
/** Returns this with the redundant node or group removed from counts. */
- public AllocatableClusterResources withoutRedundancy() {
+ public AllocatableResources withoutRedundancy() {
int groupSize = nodes / groups;
int nodesAdjustedForRedundancy = nodes > 1 ? (groups == 1 ? nodes - 1 : nodes - groupSize) : nodes;
int groupsAdjustedForRedundancy = nodes > 1 ? (groups == 1 ? 1 : groups - 1) : groups;
- return new AllocatableClusterResources(nodesAdjustedForRedundancy,
- groupsAdjustedForRedundancy,
- realResources,
- advertisedResources,
- clusterSpec,
- fulfilment);
+ return new AllocatableResources(nodesAdjustedForRedundancy,
+ groupsAdjustedForRedundancy,
+ realResources,
+ advertisedResources,
+ clusterSpec,
+ fulfilment);
}
/**
@@ -112,6 +113,7 @@ public class AllocatableClusterResources {
public ClusterSpec clusterSpec() { return clusterSpec; }
+ /** Returns the standard cost of these resources, in dollars per hour */
public double cost() { return nodes * advertisedResources.cost(); }
/**
@@ -128,11 +130,22 @@ public class AllocatableClusterResources {
return (vcpuFulfilment + memoryGbFulfilment + diskGbFulfilment) / 3;
}
- public boolean preferableTo(AllocatableClusterResources other) {
- if (this.fulfilment < 1 || other.fulfilment < 1) // always fulfil as much as possible
- return this.fulfilment > other.fulfilment;
+ public boolean preferableTo(AllocatableResources other, ClusterModel model) {
+ if (other.fulfilment() < 1 || this.fulfilment() < 1) // always fulfil as much as possible
+ return this.fulfilment() > other.fulfilment();
- return this.cost() < other.cost(); // otherwise, prefer lower cost
+ return this.cost() * toHours(model.allocationDuration()) + this.costChangingFrom(model)
+ <
+ other.cost() * toHours(model.allocationDuration()) + other.costChangingFrom(model);
+ }
+
+ private double toHours(Duration duration) {
+ return duration.toMillis() / 3600000.0;
+ }
+
+ /** The estimated cost of changing from the given current resources to this. */
+ public double costChangingFrom(ClusterModel model) {
+ return new ResourceChange(model, this).cost();
}
@Override
@@ -154,12 +167,13 @@ public class AllocatableClusterResources {
.withBandwidthGbps(sum.bandwidthGbps() / nodes.size());
}
- public static Optional<AllocatableClusterResources> from(ClusterResources wantedResources,
- ApplicationId applicationId,
- ClusterSpec clusterSpec,
- Limits applicationLimits,
- List<NodeResources> availableRealHostResources,
- NodeRepository nodeRepository) {
+ public static Optional<AllocatableResources> from(ClusterResources wantedResources,
+ ApplicationId applicationId,
+ ClusterSpec clusterSpec,
+ Limits applicationLimits,
+ List<NodeResources> availableRealHostResources,
+ ClusterModel model,
+ NodeRepository nodeRepository) {
var systemLimits = nodeRepository.nodeResourceLimits();
boolean exclusive = nodeRepository.exclusiveAllocation(clusterSpec);
if (! exclusive) {
@@ -193,8 +207,8 @@ public class AllocatableClusterResources {
}
else { // Return the cheapest flavor satisfying the requested resources, if any
NodeResources cappedWantedResources = applicationLimits.cap(wantedResources.nodeResources());
- Optional<AllocatableClusterResources> best = Optional.empty();
- Optional<AllocatableClusterResources> bestDisregardingDiskLimit = Optional.empty();
+ Optional<AllocatableResources> best = Optional.empty();
+ Optional<AllocatableResources> bestDisregardingDiskLimit = Optional.empty();
for (Flavor flavor : nodeRepository.flavors().getFlavors()) {
// Flavor decide resources: Real resources are the worst case real resources we'll get if we ask for these advertised resources
NodeResources advertisedResources = nodeRepository.resourcesCalculator().advertisedResourcesOf(flavor);
@@ -216,18 +230,18 @@ public class AllocatableClusterResources {
if ( ! between(applicationLimits.min().nodeResources(), applicationLimits.max().nodeResources(), advertisedResources)) continue;
if ( ! systemLimits.isWithinRealLimits(realResources, applicationId, clusterSpec)) continue;
- var candidate = new AllocatableClusterResources(wantedResources.with(realResources),
- advertisedResources,
- wantedResources,
- clusterSpec);
+ var candidate = new AllocatableResources(wantedResources.with(realResources),
+ advertisedResources,
+ wantedResources,
+ clusterSpec);
if ( ! systemLimits.isWithinAdvertisedDiskLimits(advertisedResources, clusterSpec)) { // TODO: Remove when disk limit is enforced
- if (bestDisregardingDiskLimit.isEmpty() || candidate.preferableTo(bestDisregardingDiskLimit.get())) {
+ if (bestDisregardingDiskLimit.isEmpty() || candidate.preferableTo(bestDisregardingDiskLimit.get(), model)) {
bestDisregardingDiskLimit = Optional.of(candidate);
}
continue;
}
- if (best.isEmpty() || candidate.preferableTo(best.get())) {
+ if (best.isEmpty() || candidate.preferableTo(best.get(), model)) {
best = Optional.of(candidate);
}
}
@@ -237,13 +251,13 @@ public class AllocatableClusterResources {
}
}
- private static AllocatableClusterResources calculateAllocatableResources(ClusterResources wantedResources,
- NodeRepository nodeRepository,
- ApplicationId applicationId,
- ClusterSpec clusterSpec,
- Limits applicationLimits,
- boolean exclusive,
- boolean bestCase) {
+ private static AllocatableResources calculateAllocatableResources(ClusterResources wantedResources,
+ NodeRepository nodeRepository,
+ ApplicationId applicationId,
+ ClusterSpec clusterSpec,
+ Limits applicationLimits,
+ boolean exclusive,
+ boolean bestCase) {
var systemLimits = nodeRepository.nodeResourceLimits();
var advertisedResources = nodeRepository.resourcesCalculator().realToRequest(wantedResources.nodeResources(), exclusive, bestCase);
advertisedResources = systemLimits.enlargeToLegal(advertisedResources, applicationId, clusterSpec, exclusive, true); // Ask for something legal
@@ -255,10 +269,10 @@ public class AllocatableClusterResources {
advertisedResources = advertisedResources.with(NodeResources.StorageType.remote);
realResources = nodeRepository.resourcesCalculator().requestToReal(advertisedResources, exclusive, bestCase);
}
- return new AllocatableClusterResources(wantedResources.with(realResources),
- advertisedResources,
- wantedResources,
- clusterSpec);
+ return new AllocatableResources(wantedResources.with(realResources),
+ advertisedResources,
+ wantedResources,
+ clusterSpec);
}
/** Returns true if the given resources could be allocated on any of the given host flavors */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
index 42bb16005ee..f650d8ec269 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/AllocationOptimizer.java
@@ -5,7 +5,6 @@ import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.IntRange;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.vespa.hosted.provision.NodeRepository;
-import com.yahoo.vespa.hosted.provision.provisioning.NodeResourceLimits;
import java.util.Optional;
@@ -35,21 +34,20 @@ public class AllocationOptimizer {
* @return the best allocation, if there are any possible legal allocations, fulfilling the target
* fully or partially, within the limits
*/
- public Optional<AllocatableClusterResources> findBestAllocation(Load loadAdjustment,
- AllocatableClusterResources current,
- ClusterModel clusterModel,
- Limits limits) {
+ public Optional<AllocatableResources> findBestAllocation(Load loadAdjustment,
+ ClusterModel model,
+ Limits limits) {
if (limits.isEmpty())
limits = Limits.of(new ClusterResources(minimumNodes, 1, NodeResources.unspecified()),
new ClusterResources(maximumNodes, maximumNodes, NodeResources.unspecified()),
IntRange.empty());
else
- limits = atLeast(minimumNodes, limits).fullySpecified(current.clusterSpec(), nodeRepository, clusterModel.application().id());
- Optional<AllocatableClusterResources> bestAllocation = Optional.empty();
+ limits = atLeast(minimumNodes, limits).fullySpecified(model.current().clusterSpec(), nodeRepository, model.application().id());
+ Optional<AllocatableResources> bestAllocation = Optional.empty();
var availableRealHostResources = nodeRepository.zone().cloud().dynamicProvisioning()
? nodeRepository.flavors().getFlavors().stream().map(flavor -> flavor.resources()).toList()
: nodeRepository.nodes().list().hosts().stream().map(host -> host.flavor().resources())
- .map(hostResources -> maxResourcesOf(hostResources, clusterModel))
+ .map(hostResources -> maxResourcesOf(hostResources, model))
.toList();
for (int groups = limits.min().groups(); groups <= limits.max().groups(); groups++) {
for (int nodes = limits.min().nodes(); nodes <= limits.max().nodes(); nodes++) {
@@ -58,15 +56,16 @@ public class AllocationOptimizer {
var resources = new ClusterResources(nodes,
groups,
nodeResourcesWith(nodes, groups,
- limits, loadAdjustment, current, clusterModel));
- var allocatableResources = AllocatableClusterResources.from(resources,
- clusterModel.application().id(),
- current.clusterSpec(),
- limits,
- availableRealHostResources,
- nodeRepository);
+ limits, loadAdjustment, model));
+ var allocatableResources = AllocatableResources.from(resources,
+ model.application().id(),
+ model.current().clusterSpec(),
+ limits,
+ availableRealHostResources,
+ model,
+ nodeRepository);
if (allocatableResources.isEmpty()) continue;
- if (bestAllocation.isEmpty() || allocatableResources.get().preferableTo(bestAllocation.get()))
+ if (bestAllocation.isEmpty() || allocatableResources.get().preferableTo(bestAllocation.get(), model))
bestAllocation = allocatableResources;
}
}
@@ -74,8 +73,8 @@ public class AllocationOptimizer {
}
/** Returns the max resources of a host one node may allocate. */
- private NodeResources maxResourcesOf(NodeResources hostResources, ClusterModel clusterModel) {
- if (nodeRepository.exclusiveAllocation(clusterModel.clusterSpec())) return hostResources;
+ private NodeResources maxResourcesOf(NodeResources hostResources, ClusterModel model) {
+ if (nodeRepository.exclusiveAllocation(model.clusterSpec())) return hostResources;
// static, shared hosts: Allocate at most half of the host cpu to simplify management
return hostResources.withVcpu(hostResources.vcpu() / 2);
}
@@ -88,9 +87,8 @@ public class AllocationOptimizer {
int groups,
Limits limits,
Load loadAdjustment,
- AllocatableClusterResources current,
- ClusterModel clusterModel) {
- var loadWithTarget = clusterModel.loadAdjustmentWith(nodes, groups, loadAdjustment);
+ ClusterModel model) {
+ var loadWithTarget = model.loadAdjustmentWith(nodes, groups, loadAdjustment);
// Leave some headroom above the ideal allocation to avoid immediately needing to scale back up
if (loadAdjustment.cpu() < 1 && (1.0 - loadWithTarget.cpu()) < headroomRequiredToScaleDown)
@@ -100,11 +98,11 @@ public class AllocationOptimizer {
if (loadAdjustment.disk() < 1 && (1.0 - loadWithTarget.disk()) < headroomRequiredToScaleDown)
loadAdjustment = loadAdjustment.withDisk(Math.min(1.0, loadAdjustment.disk() * (1.0 + headroomRequiredToScaleDown)));
- loadWithTarget = clusterModel.loadAdjustmentWith(nodes, groups, loadAdjustment);
+ loadWithTarget = model.loadAdjustmentWith(nodes, groups, loadAdjustment);
- var scaled = loadWithTarget.scaled(current.realResources().nodeResources());
+ var scaled = loadWithTarget.scaled(model.current().realResources().nodeResources());
var nonScaled = limits.isEmpty() || limits.min().nodeResources().isUnspecified()
- ? current.advertisedResources().nodeResources()
+ ? model.current().advertisedResources().nodeResources()
: limits.min().nodeResources(); // min=max for non-scaled
return nonScaled.withVcpu(scaled.vcpu()).withMemoryGb(scaled.memoryGb()).withDiskGb(scaled.diskGb());
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
index 32b59319a88..b5f86be68f6 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaler.java
@@ -54,40 +54,40 @@ public class Autoscaler {
}
private Autoscaling autoscale(Application application, Cluster cluster, NodeList clusterNodes, Limits limits) {
- ClusterModel clusterModel = new ClusterModel(nodeRepository,
- application,
- clusterNodes.not().retired().clusterSpec(),
- cluster,
- clusterNodes,
- nodeRepository.metricsDb(),
- nodeRepository.clock());
- if (clusterModel.isEmpty()) return Autoscaling.empty();
+ var model = new ClusterModel(nodeRepository,
+ application,
+ clusterNodes.not().retired().clusterSpec(),
+ cluster,
+ clusterNodes,
+ new AllocatableResources(clusterNodes.not().retired(), nodeRepository),
+ nodeRepository.metricsDb(),
+ nodeRepository.clock());
+ if (model.isEmpty()) return Autoscaling.empty();
if (! limits.isEmpty() && cluster.minResources().equals(cluster.maxResources()))
- return Autoscaling.dontScale(Autoscaling.Status.unavailable, "Autoscaling is not enabled", clusterModel);
+ return Autoscaling.dontScale(Autoscaling.Status.unavailable, "Autoscaling is not enabled", model);
- if ( ! clusterModel.isStable(nodeRepository))
- return Autoscaling.dontScale(Status.waiting, "Cluster change in progress", clusterModel);
+ if ( ! model.isStable(nodeRepository))
+ return Autoscaling.dontScale(Status.waiting, "Cluster change in progress", model);
- var current = new AllocatableClusterResources(clusterNodes.not().retired(), nodeRepository);
- var loadAdjustment = clusterModel.loadAdjustment();
+ var loadAdjustment = model.loadAdjustment();
// Ensure we only scale down if we'll have enough headroom to not scale up again given a small load increase
- var target = allocationOptimizer.findBestAllocation(loadAdjustment, current, clusterModel, limits);
+ var target = allocationOptimizer.findBestAllocation(loadAdjustment, model, limits);
if (target.isEmpty())
- return Autoscaling.dontScale(Status.insufficient, "No allocations are possible within configured limits", clusterModel);
+ return Autoscaling.dontScale(Status.insufficient, "No allocations are possible within configured limits", model);
- if (! worthRescaling(current.realResources(), target.get().realResources())) {
+ if (! worthRescaling(model.current().realResources(), target.get().realResources())) {
if (target.get().fulfilment() < 0.9999999)
- return Autoscaling.dontScale(Status.insufficient, "Configured limits prevents ideal scaling of this cluster", clusterModel);
- else if ( ! clusterModel.safeToScaleDown() && clusterModel.idealLoad().any(v -> v < 1.0))
- return Autoscaling.dontScale(Status.ideal, "Cooling off before considering to scale down", clusterModel);
+ return Autoscaling.dontScale(Status.insufficient, "Configured limits prevents ideal scaling of this cluster", model);
+ else if ( ! model.safeToScaleDown() && model.idealLoad().any(v -> v < 1.0))
+ return Autoscaling.dontScale(Status.ideal, "Cooling off before considering to scale down", model);
else
- return Autoscaling.dontScale(Status.ideal, "Cluster is ideally scaled (within configured limits)", clusterModel);
+ return Autoscaling.dontScale(Status.ideal, "Cluster is ideally scaled (within configured limits)", model);
}
- return Autoscaling.scaleTo(target.get().advertisedResources(), clusterModel);
+ return Autoscaling.scaleTo(target.get().advertisedResources(), model);
}
/** Returns true if it is worthwhile to make the given resource change, false if it is too insignificant */
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaling.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaling.java
index 0c86108b36c..fad280d6c29 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaling.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/Autoscaling.java
@@ -120,25 +120,25 @@ public class Autoscaling {
}
/** Creates an autoscaling conclusion which does not change the current allocation for a specified reason. */
- public static Autoscaling dontScale(Status status, String description, ClusterModel clusterModel) {
+ public static Autoscaling dontScale(Status status, String description, ClusterModel model) {
return new Autoscaling(status,
description,
Optional.empty(),
- clusterModel.at(),
- clusterModel.peakLoad(),
- clusterModel.idealLoad(),
- clusterModel.metrics());
+ model.at(),
+ model.peakLoad(),
+ model.idealLoad(),
+ model.metrics());
}
/** Creates an autoscaling conclusion to scale. */
- public static Autoscaling scaleTo(ClusterResources target, ClusterModel clusterModel) {
+ public static Autoscaling scaleTo(ClusterResources target, ClusterModel model) {
return new Autoscaling(Status.rescaling,
"Rescaling initiated due to load changes",
Optional.of(target),
- clusterModel.at(),
- clusterModel.peakLoad(),
- clusterModel.idealLoad(),
- clusterModel.metrics());
+ model.at(),
+ model.peakLoad(),
+ model.idealLoad(),
+ model.metrics());
}
public enum Status {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
index 0d64d4fbb10..8976dd9ff08 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModel.java
@@ -50,6 +50,7 @@ public class ClusterModel {
private final Application application;
private final ClusterSpec clusterSpec;
private final Cluster cluster;
+ private final AllocatableResources current;
private final CpuModel cpu = new CpuModel();
private final MemoryModel memory = new MemoryModel();
@@ -63,6 +64,7 @@ public class ClusterModel {
private final Clock clock;
private final Duration scalingDuration;
+ private final Duration allocationDuration;
private final ClusterTimeseries clusterTimeseries;
private final ClusterNodesTimeseries nodeTimeseries;
private final Instant at;
@@ -77,6 +79,7 @@ public class ClusterModel {
ClusterSpec clusterSpec,
Cluster cluster,
NodeList clusterNodes,
+ AllocatableResources current,
MetricsDb metricsDb,
Clock clock) {
this.nodeRepository = nodeRepository;
@@ -84,8 +87,10 @@ public class ClusterModel {
this.clusterSpec = clusterSpec;
this.cluster = cluster;
this.nodes = clusterNodes;
+ this.current = current;
this.clock = clock;
this.scalingDuration = cluster.scalingDuration(clusterSpec);
+ this.allocationDuration = cluster.allocationDuration(clusterSpec);
this.clusterTimeseries = metricsDb.getClusterTimeseries(application.id(), cluster.id());
this.nodeTimeseries = new ClusterNodesTimeseries(scalingDuration(), cluster, nodes, metricsDb);
this.at = clock.instant();
@@ -95,8 +100,10 @@ public class ClusterModel {
Application application,
ClusterSpec clusterSpec,
Cluster cluster,
+ AllocatableResources current,
Clock clock,
Duration scalingDuration,
+ Duration allocationDuration,
ClusterTimeseries clusterTimeseries,
ClusterNodesTimeseries nodeTimeseries) {
this.nodeRepository = nodeRepository;
@@ -104,9 +111,11 @@ public class ClusterModel {
this.clusterSpec = clusterSpec;
this.cluster = cluster;
this.nodes = NodeList.of();
+ this.current = current;
this.clock = clock;
this.scalingDuration = scalingDuration;
+ this.allocationDuration = allocationDuration;
this.clusterTimeseries = clusterTimeseries;
this.nodeTimeseries = nodeTimeseries;
this.at = clock.instant();
@@ -114,6 +123,7 @@ public class ClusterModel {
public Application application() { return application; }
public ClusterSpec clusterSpec() { return clusterSpec; }
+ public AllocatableResources current() { return current; }
private ClusterNodesTimeseries nodeTimeseries() { return nodeTimeseries; }
private ClusterTimeseries clusterTimeseries() { return clusterTimeseries; }
@@ -127,6 +137,27 @@ public class ClusterModel {
/** Returns the predicted duration of a rescaling of this cluster */
public Duration scalingDuration() { return scalingDuration; }
+ /**
+ * Returns the predicted duration of a resource change in this cluster,
+ * until we, or the application , will change it again.
+ */
+ public Duration allocationDuration() { return allocationDuration; }
+
+ public boolean isContent() {
+ return clusterSpec.type().isContent();
+ }
+
+ /** Returns the predicted duration of data redistribution in this cluster. */
+ public Duration redistributionDuration() {
+ if (! isContent()) return Duration.ofMinutes(0);
+ return scalingDuration(); // TODO: Estimate separately
+ }
+
+ /** Returns the predicted duration of replacing all the nodes in this cluster. */
+ public Duration nodeReplacementDuration() {
+ return Duration.ofMinutes(5); // TODO: Estimate?
+ }
+
/** Returns the average of the peak load measurement in each dimension, from each node. */
public Load peakLoad() {
return nodeTimeseries().peakLoad();
@@ -137,6 +168,10 @@ public class ClusterModel {
return loadWith(nodeCount(), groupCount());
}
+ public boolean isExclusive() {
+ return nodeRepository.exclusiveAllocation(clusterSpec);
+ }
+
/** Returns the relative load adjustment that should be made to this cluster given available measurements. */
public Load loadAdjustment() {
if (nodeTimeseries().measurementsPerNode() < 0.5) return Load.one(); // Don't change based on very little data
@@ -237,16 +272,15 @@ public class ClusterModel {
private Load adjustQueryDependentIdealLoadByBcpGroupInfo(Load ideal) {
double currentClusterTotalVcpuPerGroup = nodes.not().retired().first().get().resources().vcpu() * groupSize();
-
double targetQueryRateToHandle = ( canRescaleWithinBcpDeadline() ? averageQueryRate().orElse(0)
: cluster.bcpGroupInfo().queryRate() )
* cluster.bcpGroupInfo().growthRateHeadroom() * trafficShiftHeadroom();
- double neededTotalVcpPerGroup = cluster.bcpGroupInfo().cpuCostPerQuery() * targetQueryRateToHandle / groupCount() +
+ double neededTotalVcpuPerGroup = cluster.bcpGroupInfo().cpuCostPerQuery() * targetQueryRateToHandle / groupCount() +
( 1 - cpu.queryFraction()) * cpu.idealLoad() *
(clusterSpec.type().isContainer() ? 1 : groupSize());
-
- double cpuAdjustment = neededTotalVcpPerGroup / currentClusterTotalVcpuPerGroup;
- return ideal.withCpu(peakLoad().cpu() / cpuAdjustment);
+ // Max 1: Only use bcp group info if it indicates that we need to scale *up*
+ double cpuAdjustment = Math.max(1.0, neededTotalVcpuPerGroup / currentClusterTotalVcpuPerGroup);
+ return ideal.withCpu(ideal.cpu() / cpuAdjustment);
}
private boolean hasScaledIn(Duration period) {
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ResourceChange.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ResourceChange.java
new file mode 100644
index 00000000000..7a26a217e61
--- /dev/null
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/autoscale/ResourceChange.java
@@ -0,0 +1,94 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.provision.autoscale;
+
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.NodeResources;
+
+import java.time.Duration;
+
+/**
+ * A resource change.
+ *
+ * @author bratseth
+ */
+public class ResourceChange {
+
+ private final AllocatableResources from, to;
+ private final ClusterModel model;
+
+ public ResourceChange(ClusterModel model, AllocatableResources to) {
+ this.from = model.current();
+ this.to = to;
+ this.model = model;
+ }
+
+ /** Returns the estimated total cost of this resource change (coming in addition to the "to" resource cost). */
+ public double cost() {
+ if (model.isContent()) {
+ if (requiresNodeReplacement()) return toHours(model.redistributionDuration()) * from.cost();
+ return toHours(model.redistributionDuration()) * from.advertisedResources().cost() * nodesToRetire();
+ }
+ else {
+ if (requiresNodeReplacement()) return toHours(model.nodeReplacementDuration()) * from.cost();
+ return 0;
+ }
+ }
+
+ private boolean requiresRedistribution() {
+ if ( ! model.clusterSpec().type().isContent()) return false;
+ if (from.nodes() != to.nodes()) return true;
+ if (from.groups() != to.groups()) return true;
+ if (requiresNodeReplacement()) return true;
+ return false;
+ }
+
+ /**
+ * Returns the estimated number of nodes that will be retired by this change,
+ * given that it is a content cluster and no node replacement is necessary.
+ * This is not necessarily always perfectly correct if this changes group layout.
+ */
+ private int nodesToRetire() {
+ return Math.max(0, from.nodes() - to.nodes());
+ }
+
+ /** Returns true if the *existing* nodes of this needs to be replaced in this change. */
+ private boolean requiresNodeReplacement() {
+ var fromNodes = from.advertisedResources().nodeResources();
+ var toNodes = to.advertisedResources().nodeResources();
+
+ if (model.isExclusive()) {
+ return ! fromNodes.equals(toNodes);
+ }
+ else {
+ if ( ! fromNodes.justNonNumbers().equalsWhereSpecified(toNodes.justNonNumbers())) return true;
+ if ( ! canInPlaceResize()) return true;
+ return false;
+ }
+ }
+
+ private double toHours(Duration duration) {
+ return duration.toMillis() / 3600000.0;
+ }
+
+ private boolean canInPlaceResize() {
+ return canInPlaceResize(from.nodes(), from.advertisedResources().nodeResources(),
+ to.nodes(), to.advertisedResources().nodeResources(),
+ model.clusterSpec().type(), model.isExclusive(), from.groups() != to.groups());
+ }
+
+ public static boolean canInPlaceResize(int fromCount, NodeResources fromResources,
+ int toCount, NodeResources toResources,
+ ClusterSpec.Type type, boolean exclusive, boolean hasTopologyChange) {
+ if (exclusive) return false; // exclusive resources must match the host
+
+ // Never allow in-place resize when also changing topology or decreasing cluster size
+ if (hasTopologyChange || toCount < fromCount) return false;
+
+ // Do not allow increasing cluster size and decreasing node resources at the same time for content nodes
+ if (type.isContent() && toCount > fromCount && !toResources.satisfies(fromResources.justNumbers()))
+ return false;
+
+ return true;
+ }
+
+}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
index 92f86325cf7..6a01a2bcd18 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/AutoscalingMaintainer.java
@@ -16,7 +16,7 @@ import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Applications;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
-import com.yahoo.vespa.hosted.provision.autoscale.AllocatableClusterResources;
+import com.yahoo.vespa.hosted.provision.autoscale.AllocatableResources;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaler;
import com.yahoo.vespa.hosted.provision.autoscale.Autoscaling;
import com.yahoo.vespa.hosted.provision.autoscale.NodeMetricSnapshot;
@@ -87,7 +87,7 @@ public class AutoscalingMaintainer extends NodeRepositoryMaintainer {
NodeList clusterNodes = nodeRepository().nodes().list(Node.State.active).owner(applicationId).cluster(clusterId);
cluster = updateCompletion(cluster, clusterNodes);
- var current = new AllocatableClusterResources(clusterNodes.not().retired(), nodeRepository()).advertisedResources();
+ var current = new AllocatableResources(clusterNodes.not().retired(), nodeRepository()).advertisedResources();
// Autoscale unless an autoscaling is already in progress
Autoscaling autoscaling = null;
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
index c388273b1a6..43a135a7e04 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDb.java
@@ -88,8 +88,8 @@ public class CuratorDb {
/** Simple cache for deserialized node objects, based on their ZK node version. */
private final Cache<Path, Pair<Integer, Node>> cachedNodes = CacheBuilder.newBuilder().recordStats().build();
- public CuratorDb(NodeFlavors flavors, Curator curator, Clock clock, boolean useCache, long nodeCacheSize) {
- this.nodeSerializer = new NodeSerializer(flavors, nodeCacheSize);
+ public CuratorDb(NodeFlavors flavors, Curator curator, Clock clock, boolean useCache) {
+ this.nodeSerializer = new NodeSerializer(flavors);
this.db = new CachingCurator(curator, root, useCache);
this.clock = clock;
this.provisionIndexCounter = new CuratorCounter(curator, root.append("provisionIndexCounter"));
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
index 7e82ef55917..df39a0230b6 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializer.java
@@ -134,7 +134,7 @@ public class NodeSerializer {
// ---------------- Serialization ----------------------------------------------------
- public NodeSerializer(NodeFlavors flavors, long cacheSize) {
+ public NodeSerializer(NodeFlavors flavors) {
this.flavors = flavors;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
index 8a39f309935..5ce5bc8abd0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
@@ -98,10 +98,7 @@ public class CapacityPolicies {
Architecture architecture = adminClusterArchitecture(applicationId);
if (nodeRepository.exclusiveAllocation(clusterSpec)) {
- var resources = legacySmallestExclusiveResources(); //TODO: use 8Gb as default when no apps are using 4Gb
- return versioned(clusterSpec, Map.of(new Version(0), resources,
- new Version(8, 182, 12), resources.with(architecture),
- new Version(8, 187), smallestExclusiveResources().with(architecture)));
+ return smallestExclusiveResources().with(architecture);
}
if (clusterSpec.id().value().equals("cluster-controllers")) {
@@ -131,8 +128,7 @@ public class CapacityPolicies {
// 1.32 fits floor(8/1.32) = 6 cluster controllers on each 8Gb host, and each will have
// 1.32-(0.7+0.6)*(1.32/8) = 1.1 Gb real memory given current taxes.
if (architecture == Architecture.x86_64)
- return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(0.25, 1.14, 10, 0.3),
- new Version(8, 129, 4), new NodeResources(0.25, 1.32, 10, 0.3)));
+ return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(0.25, 1.32, 10, 0.3)));
else
// arm64 nodes need more memory
return versioned(clusterSpec, Map.of(new Version(0), new NodeResources(0.25, 1.50, 10, 0.3)));
@@ -159,13 +155,6 @@ public class CapacityPolicies {
}
// The lowest amount of resources that can be exclusive allocated (i.e. a matching host flavor for this exists)
- private NodeResources legacySmallestExclusiveResources() {
- return (zone.cloud().name().equals(CloudName.GCP))
- ? new NodeResources(1, 4, 50, 0.3)
- : new NodeResources(0.5, 4, 50, 0.3);
- }
-
- // The lowest amount of resources that can be exclusive allocated (i.e. a matching host flavor for this exists)
private NodeResources smallestExclusiveResources() {
return (zone.cloud().name().equals(CloudName.GCP))
? new NodeResources(2, 8, 50, 0.3)
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
index 3d0c1069584..a67a513550a 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeRepositoryProvisioner.java
@@ -23,7 +23,7 @@ import com.yahoo.vespa.hosted.provision.NodeList;
import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
-import com.yahoo.vespa.hosted.provision.autoscale.AllocatableClusterResources;
+import com.yahoo.vespa.hosted.provision.autoscale.AllocatableResources;
import com.yahoo.vespa.hosted.provision.autoscale.AllocationOptimizer;
import com.yahoo.vespa.hosted.provision.autoscale.ClusterModel;
import com.yahoo.vespa.hosted.provision.autoscale.Limits;
@@ -182,12 +182,12 @@ public class NodeRepositoryProvisioner implements Provisioner {
.not().retired()
.not().removable();
boolean firstDeployment = nodes.isEmpty();
- AllocatableClusterResources currentResources =
+ var current =
firstDeployment // start at min, preserve current resources otherwise
- ? new AllocatableClusterResources(initialResourcesFrom(requested, clusterSpec, application.id()), clusterSpec, nodeRepository)
- : new AllocatableClusterResources(nodes, nodeRepository);
- var clusterModel = new ClusterModel(nodeRepository, application, clusterSpec, cluster, nodes, nodeRepository.metricsDb(), nodeRepository.clock());
- return within(Limits.of(requested), currentResources, firstDeployment, clusterModel);
+ ? new AllocatableResources(initialResourcesFrom(requested, clusterSpec, application.id()), clusterSpec, nodeRepository)
+ : new AllocatableResources(nodes, nodeRepository);
+ var model = new ClusterModel(nodeRepository, application, clusterSpec, cluster, nodes, current, nodeRepository.metricsDb(), nodeRepository.clock());
+ return within(Limits.of(requested), model, firstDeployment);
}
private ClusterResources initialResourcesFrom(Capacity requested, ClusterSpec clusterSpec, ApplicationId applicationId) {
@@ -197,21 +197,19 @@ public class NodeRepositoryProvisioner implements Provisioner {
/** Make the minimal adjustments needed to the current resources to stay within the limits */
private ClusterResources within(Limits limits,
- AllocatableClusterResources current,
- boolean firstDeployment,
- ClusterModel clusterModel) {
+ ClusterModel model,
+ boolean firstDeployment) {
if (limits.min().equals(limits.max())) return limits.min();
// Don't change current deployments that are still legal
- if (! firstDeployment && current.advertisedResources().isWithin(limits.min(), limits.max()))
- return current.advertisedResources();
+ if (! firstDeployment && model.current().advertisedResources().isWithin(limits.min(), limits.max()))
+ return model.current().advertisedResources();
// Otherwise, find an allocation that preserves the current resources as well as possible
return allocationOptimizer.findBestAllocation(Load.one(),
- current,
- clusterModel,
+ model,
limits)
- .orElseThrow(() -> newNoAllocationPossible(current.clusterSpec(), limits))
+ .orElseThrow(() -> newNoAllocationPossible(model.current().clusterSpec(), limits))
.advertisedResources();
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
index cea0608013d..77f37cadc0b 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
@@ -6,6 +6,7 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.autoscale.ResourceChange;
import java.time.Duration;
import java.util.Map;
@@ -162,16 +163,11 @@ public interface NodeSpec {
@Override
public boolean canResize(NodeResources currentNodeResources, NodeResources currentSpareHostResources,
ClusterSpec.Type type, boolean hasTopologyChange, int currentClusterSize) {
- if (exclusive) return false; // exclusive resources must match the host
- // Never allow in-place resize when also changing topology or decreasing cluster size
- if (hasTopologyChange || count < currentClusterSize) return false;
+ return ResourceChange.canInPlaceResize(currentClusterSize, currentNodeResources, count, requestedNodeResources,
+ type, exclusive, hasTopologyChange)
+ &&
+ currentSpareHostResources.add(currentNodeResources.justNumbers()).satisfies(requestedNodeResources);
- // Do not allow increasing cluster size and decreasing node resources at the same time for content nodes
- if (type.isContent() && count > currentClusterSize && !requestedNodeResources.satisfies(currentNodeResources.justNumbers()))
- return false;
-
- // Otherwise, allowed as long as the host can satisfy the new requested resources
- return currentSpareHostResources.add(currentNodeResources.justNumbers()).satisfies(requestedNodeResources);
}
@Override
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
index e3f67721eb5..90cf37aa876 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/MockNodeRepository.java
@@ -96,7 +96,7 @@ public class MockNodeRepository extends NodeRepository {
new MemoryMetricsDb(Clock.fixed(Instant.ofEpochMilli(123), ZoneId.of("Z"))),
new OrchestratorMock(),
true,
- 0, 1000);
+ 0);
this.flavors = flavors;
defaultCloudAccount = zone.cloud().account();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java
index 49702a7d4c1..bf714cd9df1 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/NodeRepositoryTester.java
@@ -54,7 +54,7 @@ public class NodeRepositoryTester {
new MemoryMetricsDb(clock),
new OrchestratorMock(),
true,
- 0, 1000);
+ 0);
}
public NodeRepository nodeRepository() { return nodeRepository; }
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
index 1ed3c13cfff..f64e50310bb 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/RealDataScenarioTest.java
@@ -133,7 +133,7 @@ public class RealDataScenarioTest {
}
private static void initFromZk(NodeRepository nodeRepository, Path pathToZkSnapshot) {
- NodeSerializer nodeSerializer = new NodeSerializer(nodeRepository.flavors(), 1000);
+ NodeSerializer nodeSerializer = new NodeSerializer(nodeRepository.flavors());
AtomicBoolean nodeNext = new AtomicBoolean(false);
Pattern zkNodePathPattern = Pattern.compile(".?/provision/v1/nodes/[a-z0-9.-]+\\.(com|cloud).?");
Consumer<String> consumer = input -> {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
index d33857d1a1e..4e19d04ffac 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingTest.java
@@ -6,6 +6,7 @@ import com.yahoo.config.provision.ClusterInfo;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.IntRange;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeResources.DiskSpeed;
@@ -18,6 +19,7 @@ import com.yahoo.vespa.hosted.provision.provisioning.DynamicProvisioningTester;
import org.junit.Test;
import java.time.Duration;
+import java.util.List;
import java.util.Optional;
import static com.yahoo.config.provision.NodeResources.DiskSpeed.fast;
@@ -88,7 +90,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(7));
fixture.loader().applyCpuLoad(0.1f, 10);
fixture.tester().assertResources("Scaling cpu down since usage has gone down significantly",
- 6, 1, 1.1, 9.8, 390.2,
+ 9, 1, 1.0, 6.5, 243.9,
fixture.autoscale());
}
@@ -173,7 +175,7 @@ public class AutoscalingTest {
fixture.setScalingDuration(Duration.ofHours(12)); // Fixture sets last completion to be 1 day into the past
fixture.loader().applyLoad(new Load(1.0, 0.1, 1.0), 10);
fixture.tester().assertResources("Scaling up (only) since resource usage is too high",
- 8, 1, 7.1, 9.3, 75.4,
+ 5, 1, 11.7, 15.4, 132.0,
fixture.autoscale());
}
@@ -185,7 +187,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(1.0, 0.1, 1.0), 10);
fixture.tester().assertResources("Scaling cpu and disk up and memory down",
- 7, 1, 8.2, 4.0, 88.0,
+ 5, 1, 11.7, 4.0, 132.0,
fixture.autoscale());
}
@@ -208,7 +210,7 @@ public class AutoscalingTest {
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up since peak resource usage is too high",
- 8, 1, 4.3, 7.4, 29.0,
+ 5, 1, 7.1, 12.3, 50.7,
fixture.autoscale());
}
@@ -232,7 +234,7 @@ public class AutoscalingTest {
fixture.loader().applyCpuLoad(0.70, 1);
fixture.loader().applyCpuLoad(0.01, 100);
fixture.tester().assertResources("Scaling up cpu since peak resource usage is too high",
- 8, 1, 4.3, 7.7, 34.3,
+ 5, 1, 7.1, 12.8, 60.0,
fixture.autoscale());
}
@@ -393,11 +395,10 @@ public class AutoscalingTest {
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
- fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
- fixture.loader().applyCpuLoad(0.4, 240);
+ fixture.loader().applyCpuLoad(0.5, 240);
fixture.tester().assertResources("Scaling cpu up",
- 6, 6, 5.0, 7.4, 22.3,
+ 6, 6, 4.5, 7.4, 22.3,
fixture.autoscale());
}
@@ -460,7 +461,7 @@ public class AutoscalingTest {
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyCpuLoad(1.0, 120);
fixture.tester().assertResources("Suggesting above capacity limit",
- 8, 1, 6.2, 7.4, 29.0,
+ 5, 1, 10.2, 12.3, 50.7,
fixture.tester().suggest(fixture.applicationId, fixture.clusterSpec.id(), min, min));
}
@@ -593,13 +594,12 @@ public class AutoscalingTest {
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
- fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
Duration timePassed = fixture.loader().addCpuMeasurements(0.25, 120);
fixture.tester().clock().advance(timePassed.negated());
fixture.loader().addLoadMeasurements(10, t -> t == 0 ? 200.0 : 100.0, t -> 10.0);
- fixture.tester().assertResources("Scaling up cpu, others down, changing to 1 group is cheaper",
- 7, 1, 3.2, 43.3, 129.8,
+ fixture.tester().assertResources("Changing to 1 group is cheaper",
+ 7, 1, 2.5, 43.3, 129.8,
fixture.autoscale());
}
@@ -650,11 +650,10 @@ public class AutoscalingTest {
.initialResources(Optional.of(now))
.capacity(Capacity.from(min, max))
.build();
- fixture.setScalingDuration(Duration.ofHours(6));
fixture.tester().clock().advance(Duration.ofDays(2));
fixture.loader().applyLoad(new Load(0.16, 0.02, 0.5), 120);
fixture.tester().assertResources("Scaling down memory",
- 7, 1, 2.5, 4.0, 80.2,
+ 6, 1, 2.1, 4.0, 96.2,
fixture.autoscale());
}
@@ -710,16 +709,16 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale up since we assume we need 2x cpu for growth when no scaling time data",
- 8, 1, 1.6, 7.4, 29.0,
+ 5, 1, 2.6, 12.3, 50.7,
fixture.autoscale());
fixture.setScalingDuration(Duration.ofHours(8));
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100, t -> 100.0 + (t < 50 ? t : 100 - t), t -> 0.0);
fixture.tester.clock().advance(timeAdded.negated());
- fixture.loader().addCpuMeasurements(0.25, 200);
+ fixture.loader().addCpuMeasurements(0.20, 200);
fixture.tester().assertResources("Scale down since observed growth is slower than scaling time",
- 8, 1, 1.2, 7.4, 29.0,
+ 5, 1, 1.6, 12.3, 50.7,
fixture.autoscale());
fixture.setScalingDuration(Duration.ofHours(8));
@@ -730,7 +729,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.25, 200);
fixture.tester().assertResources("Scale up since observed growth is faster than scaling time",
- 8, 1, 1.5, 7.4, 29.0,
+ 5, 1, 2.4, 12.3, 50.7,
fixture.autoscale());
}
@@ -747,7 +746,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.7, 200);
fixture.tester().assertResources("Scale up slightly since observed growth is faster than scaling time, but we are not confident",
- 8, 1, 1.3, 7.4, 29.0,
+ 5, 1, 2.2, 12.3, 50.7,
fixture.autoscale());
}
@@ -766,16 +765,16 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester.assertResources("Query and write load is equal -> scale up somewhat",
- 8, 1, 1.8, 7.4, 29.0,
+ 5, 1, 2.9, 12.3, 50.7,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
timeAdded = fixture.loader().addLoadMeasurements(100, t -> t == 0 ? 800.0 : 400.0, t -> 100.0);
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
- // TODO: Ackhually, we scale down here - why?
+ // TODO: Ackhually, we scale up less here - why?
fixture.tester().assertResources("Query load is 4x write load -> scale up more",
- 8, 1, 1.4, 7.4, 29.0,
+ 5, 1, 2.2, 12.3, 50.7,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
@@ -783,7 +782,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Write load is 10x query load -> scale down",
- 6, 1, 1.1, 10.0, 40.5,
+ 5, 1, 1.3, 12.3, 50.7,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
@@ -791,7 +790,7 @@ public class AutoscalingTest {
fixture.tester.clock().advance(timeAdded.negated());
fixture.loader().addCpuMeasurements(0.4, 200);
fixture.tester().assertResources("Query only -> larger",
- 8, 1, 2.1, 7.4, 29.0,
+ 5, 1, 3.5, 12.3, 50.7,
fixture.autoscale());
fixture.tester().clock().advance(Duration.ofDays(2));
@@ -954,4 +953,32 @@ public class AutoscalingTest {
.build();
}
+ @Test
+ public void change_not_requiring_node_replacement_is_preferred() {
+ var min = new ClusterResources(5, 1, new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote));
+ var max = new ClusterResources(6, 1, new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote));
+
+ List<Flavor> flavors = List.of(new Flavor("arm_16", new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote, NodeResources.Architecture.arm64)),
+ new Flavor("x86_16", new NodeResources( 16, 64, 200, 1, DiskSpeed.fast, StorageType.remote, NodeResources.Architecture.x86_64)));
+ var fixture = DynamicProvisioningTester.fixture()
+ .clusterType(ClusterSpec.Type.container)
+ .hostFlavors(flavors)
+ .awsZone(false, Environment.prod)
+ .capacity(Capacity.from(min, max))
+ .initialResources(Optional.of(min.with(min.nodeResources().with(NodeResources.Architecture.x86_64))))
+ .build();
+ var nodes = fixture.nodes().not().retired().asList();
+ assertEquals(5, nodes.size());
+ assertEquals(NodeResources.Architecture.x86_64, nodes.get(0).resources().architecture());
+
+ fixture.tester().clock().advance(Duration.ofHours(5));
+ fixture.loader().applyCpuLoad(0.27, 10); // trigger rescaling, but don't cause fulfilment < 1
+ var autoscaling = fixture.autoscale();
+ fixture.deploy(Capacity.from(autoscaling.resources().get()));
+ nodes = fixture.nodes().not().retired().asList();
+ assertEquals(6, nodes.size());
+ assertEquals("We stay with x86 even though the first matching flavor is arm",
+ NodeResources.Architecture.x86_64, nodes.get(0).resources().architecture());
+ }
+
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java
index 379dbb27d87..be7bc3c44a8 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/AutoscalingUsingBcpGroupInfoTest.java
@@ -32,7 +32,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 4.0, 7.4, 29.0,
+ 8, 1, 3.4, 7.4, 29.0,
fixture.autoscale());
// Higher query rate
@@ -40,7 +40,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(200, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 8.0, 7.4, 29.0,
+ 8, 1, 6.8, 7.4, 29.0,
fixture.autoscale());
// Higher headroom
@@ -48,7 +48,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.3, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 4.8, 7.4, 29.0,
+ 8, 1, 4.0, 7.4, 29.0,
fixture.autoscale());
// Higher per query cost
@@ -56,7 +56,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 6.0, 7.4, 29.0,
+ 8, 1, 5.1, 7.4, 29.0,
fixture.autoscale());
// Bcp elsewhere is 0 - use local only
@@ -85,7 +85,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 10.5, 43.2, 190.0,
+ 3, 3, 11.7, 43.2, 190.0,
fixture.autoscale());
// Higher query rate
@@ -93,7 +93,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(200, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 20.9, 43.2, 190.0,
+ 3, 3, 23.1, 43.2, 190.0,
fixture.autoscale());
// Higher headroom
@@ -101,7 +101,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.3, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 12.4, 43.2, 190.0,
+ 3, 3, 13.8, 43.2, 190.0,
fixture.autoscale());
// Higher per query cost
@@ -109,7 +109,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 3, 3, 15.7, 43.2, 190.0,
+ 3, 3, 17.4, 43.2, 190.0,
fixture.autoscale());
}
@@ -127,7 +127,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 4.0, 16.0, 40.8,
+ 4, 1, 8.0, 16.0, 40.8,
fixture.autoscale());
// Higher query rate (mem and disk changes are due to being assigned larger hosts where we get less overhead share
@@ -135,7 +135,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(200, 1.1, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 8.0, 16.0, 40.8,
+ 7, 1, 8.0, 16.0, 40.8,
fixture.autoscale());
// Higher headroom
@@ -143,7 +143,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.3, 0.3));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 5, 1, 8.0, 16.0, 40.8,
+ 8, 1, 4.0, 16.0, 40.8,
fixture.autoscale());
// Higher per query cost
@@ -151,7 +151,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 6, 1, 8.0, 16.0, 40.8,
+ 10, 1, 4.0, 16.0, 40.8,
fixture.autoscale());
}
@@ -173,7 +173,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(100, 1.1, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("No need for traffic shift headroom",
- 2, 1, 2.0, 16.0, 40.8,
+ 3, 1, 4.0, 16.0, 40.8,
fixture.autoscale());
}
@@ -186,7 +186,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.store(new BcpGroupInfo(200, 1.3, 0.45));
fixture.loader().addCpuMeasurements(0.7f, 10);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 14.2, 7.4, 29.0,
+ 8, 1, 11.9, 7.4, 29.0,
fixture.autoscale());
// Some local traffic
@@ -196,7 +196,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration1.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 10.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 6.9, 7.4, 29.0,
+ 8, 1, 6.8, 7.4, 29.0,
fixture.autoscale());
// Enough local traffic to get half the votes
@@ -206,7 +206,7 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.tester().clock().advance(duration2.negated());
fixture.loader().addQueryRateMeasurements(10, __ -> 50.0);
fixture.tester().assertResources("Scaling up cpu using bcp group cpu info",
- 8, 1, 2.9, 7.4, 29.0,
+ 8, 1, 3.0, 7.4, 29.0,
fixture.autoscale());
// Mostly local
@@ -270,6 +270,21 @@ public class AutoscalingUsingBcpGroupInfoTest {
fixture.autoscale());
}
+ @Test
+ public void test_autoscaling_containers_with_some_local_traffic() {
+ var fixture = DynamicProvisioningTester.fixture().clusterType(ClusterSpec.Type.container).awsProdSetup(true).build();
+
+ // Some local traffic
+ fixture.tester().clock().advance(Duration.ofDays(2));
+ fixture.store(new BcpGroupInfo(200, 1.9, 0.01));
+ Duration duration1 = fixture.loader().addCpuMeasurements(0.58f, 10);
+ fixture.tester().clock().advance(duration1.negated());
+ fixture.loader().addQueryRateMeasurements(10, __ -> 10.0);
+ fixture.tester().assertResources("Not scaling down due to group info, even though it contains much evidence queries are cheap",
+ 3, 1, 4.0, 16.0, 40.8,
+ fixture.autoscale());
+ }
+
/** Tests with varying BCP group info parameters. */
@Test
public void test_autoscaling_metrics() {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
index ec084014a6a..f07d52a4a7f 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/ClusterModelTest.java
@@ -5,17 +5,12 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeResources;
-import com.yahoo.config.provision.Zone;
import com.yahoo.test.ManualClock;
-import com.yahoo.vespa.curator.mock.MockCurator;
-import com.yahoo.vespa.hosted.provision.NodeRepository;
import com.yahoo.vespa.hosted.provision.applications.Application;
import com.yahoo.vespa.hosted.provision.applications.Cluster;
import com.yahoo.vespa.hosted.provision.applications.Status;
import com.yahoo.vespa.hosted.provision.provisioning.ProvisioningTester;
-import com.yahoo.vespa.hosted.provision.testutils.MockNodeRepository;
import org.junit.Test;
import java.time.Duration;
@@ -36,10 +31,10 @@ public class ClusterModelTest {
public void unit_adjustment_should_cause_no_change() {
var model = clusterModelWithNoData(); // 5 nodes, 1 group
assertEquals(Load.one(), model.loadAdjustment());
- var target = model.loadAdjustment().scaled(resources());
+ var target = model.loadAdjustment().scaled(nodeResources());
int testingNodes = 5 - 1;
int currentNodes = 5 - 1;
- assertEquals(resources(), model.loadWith(testingNodes, 1).scaled(Load.one().divide(model.loadWith(currentNodes, 1)).scaled(target)));
+ assertEquals(nodeResources(), model.loadWith(testingNodes, 1).scaled(Load.one().divide(model.loadWith(currentNodes, 1)).scaled(target)));
}
@Test
@@ -91,16 +86,23 @@ public class ClusterModelTest {
ManualClock clock = new ManualClock();
Application application = Application.empty(ApplicationId.from("t1", "a1", "i1"));
ClusterSpec clusterSpec = clusterSpec();
- Cluster cluster = cluster(resources());
+ Cluster cluster = cluster();
application = application.with(cluster);
- return new ClusterModel(new ProvisioningTester.Builder().build().nodeRepository(),
+ var nodeRepository = new ProvisioningTester.Builder().build().nodeRepository();
+ return new ClusterModel(nodeRepository,
application.with(status),
- clusterSpec, cluster, clock, Duration.ofMinutes(10),
+ clusterSpec, cluster,
+ new AllocatableResources(clusterResources(), clusterSpec, nodeRepository),
+ clock, Duration.ofMinutes(10), Duration.ofMinutes(5),
timeseries(cluster,100, queryRate, writeRate, clock),
ClusterNodesTimeseries.empty());
}
- private NodeResources resources() {
+ private ClusterResources clusterResources() {
+ return new ClusterResources(5, 1, nodeResources());
+ }
+
+ private NodeResources nodeResources() {
return new NodeResources(1, 10, 100, 1);
}
@@ -111,10 +113,10 @@ public class ClusterModelTest {
.build();
}
- private Cluster cluster(NodeResources resources) {
+ private Cluster cluster() {
return Cluster.create(ClusterSpec.Id.from("test"),
false,
- Capacity.from(new ClusterResources(5, 1, resources)));
+ Capacity.from(clusterResources()));
}
/** Creates the given number of measurements, spaced 5 minutes between, using the given function */
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
index 33d3d3d50dc..78feba14fbf 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/autoscale/Fixture.java
@@ -5,17 +5,14 @@ import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Capacity;
import com.yahoo.config.provision.Cloud;
-import com.yahoo.config.provision.ClusterInfo;
import com.yahoo.config.provision.ClusterResources;
import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.Flavor;
-import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.Zone;
-import com.yahoo.vespa.curator.mock.MockCurator;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.flags.custom.HostResources;
@@ -29,7 +26,6 @@ import com.yahoo.vespa.hosted.provision.autoscale.awsnodes.AwsHostResourcesCalcu
import com.yahoo.vespa.hosted.provision.autoscale.awsnodes.AwsNodeTypes;
import com.yahoo.vespa.hosted.provision.provisioning.DynamicProvisioningTester;
import com.yahoo.vespa.hosted.provision.provisioning.HostResourcesCalculator;
-import com.yahoo.vespa.hosted.provision.testutils.MockNodeRepository;
import java.time.Duration;
import java.util.Arrays;
@@ -72,9 +68,9 @@ public class Fixture {
return tester().nodeRepository().applications().get(applicationId).orElse(Application.empty(applicationId));
}
- public AllocatableClusterResources currentResources() {
- return new AllocatableClusterResources(tester.nodeRepository().nodes().list(Node.State.active).owner(applicationId).cluster(clusterId()),
- tester.nodeRepository());
+ public AllocatableResources currentResources() {
+ return new AllocatableResources(tester.nodeRepository().nodes().list(Node.State.active).owner(applicationId).cluster(clusterId()),
+ tester.nodeRepository());
}
public Cluster cluster() {
@@ -89,6 +85,7 @@ public class Fixture {
clusterSpec,
cluster(),
nodes(),
+ new AllocatableResources(nodes(), tester.nodeRepository()),
tester.nodeRepository().metricsDb(),
tester.nodeRepository().clock());
}
@@ -180,6 +177,7 @@ public class Fixture {
new NodeResources(100, 1000, 1000, 1, NodeResources.DiskSpeed.any)));
HostResourcesCalculator resourceCalculator = new DynamicProvisioningTester.MockHostResourcesCalculator(zone);
final InMemoryFlagSource flagSource = new InMemoryFlagSource();
+ boolean reversedFlavorOrder = false;
int hostCount = 0;
public Fixture.Builder zone(Zone zone) {
@@ -228,12 +226,16 @@ public class Fixture {
public Fixture.Builder awsSetup(boolean allowHostSharing, Environment environment) {
return this.awsHostFlavors()
.awsResourceCalculator()
- .zone(new Zone(Cloud.builder().dynamicProvisioning(true)
- .allowHostSharing(allowHostSharing)
- .build(),
- SystemName.Public,
- environment,
- RegionName.from("aws-eu-west-1a")));
+ .awsZone(allowHostSharing, environment);
+ }
+
+ public Fixture.Builder awsZone(boolean allowHostSharing, Environment environment) {
+ return zone(new Zone(Cloud.builder().dynamicProvisioning(true)
+ .allowHostSharing(allowHostSharing)
+ .build(),
+ SystemName.Public,
+ environment,
+ RegionName.from("aws-eu-west-1a")));
}
public Fixture.Builder vespaVersion(Version version) {
@@ -246,6 +248,11 @@ public class Fixture {
return this;
}
+ public Fixture.Builder hostFlavors(List<Flavor> hostFlavors) {
+ this.hostFlavors = hostFlavors;
+ return this;
+ }
+
/** Adds the host resources available on AWS. */
public Fixture.Builder awsHostFlavors() {
this.hostFlavors = AwsNodeTypes.asFlavors();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTester.java
index 523feeeb303..eedf4946e3a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/CapacityCheckerTester.java
@@ -78,8 +78,7 @@ public class CapacityCheckerTester {
new MemoryMetricsDb(clock),
new OrchestratorMock(),
true,
- 0,
- 1000);
+ 0);
}
private void updateCapacityChecker() {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
index 8aaf0eb20e7..3145675325b 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ScalingSuggestionsMaintainerTest.java
@@ -75,7 +75,7 @@ public class ScalingSuggestionsMaintainerTest {
assertEquals("8 nodes with [vcpu: 3.2, memory: 4.5 Gb, disk: 10.0 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app1, cluster1, tester).resources().get().toString());
- assertEquals("8 nodes with [vcpu: 3.6, memory: 4.7 Gb, disk: 14.2 Gb, bandwidth: 0.1 Gbps, architecture: any]",
+ assertEquals("7 nodes with [vcpu: 4.1, memory: 5.3 Gb, disk: 16.5 Gb, bandwidth: 0.1 Gbps, architecture: any]",
suggestionOf(app2, cluster2, tester).resources().get().toString());
// Utilization goes way down
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainerTest.java
index a5ac2be72ee..6d67f39d9bb 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/SpareCapacityMaintainerTest.java
@@ -273,8 +273,7 @@ public class SpareCapacityMaintainerTest {
new MemoryMetricsDb(clock),
new OrchestratorMock(),
true,
- 1,
- 1000);
+ 1);
deployer = new MockDeployer(nodeRepository);
maintainer = new SpareCapacityMaintainer(deployer, nodeRepository, metric, Duration.ofDays(1), maxIterations);
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java
index c0d6ab90f06..e755f3c3cfc 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/CuratorDbTest.java
@@ -24,7 +24,7 @@ public class CuratorDbTest {
private final Curator curator = new MockCurator();
private final CuratorDb zkClient = new CuratorDb(
- FlavorConfigBuilder.createDummies("default"), curator, Clock.systemUTC(), true, 1000);
+ FlavorConfigBuilder.createDummies("default"), curator, Clock.systemUTC(), true);
@Test
public void can_read_stored_host_information() throws Exception {
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java
index 56f03423ad2..6e2d1e7fcd6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/persistence/NodeSerializerTest.java
@@ -60,7 +60,7 @@ import static org.junit.Assert.assertTrue;
public class NodeSerializerTest {
private final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default", "large", "ugccloud-container", "arm64", "gpu");
- private final NodeSerializer nodeSerializer = new NodeSerializer(nodeFlavors, 1000);
+ private final NodeSerializer nodeSerializer = new NodeSerializer(nodeFlavors);
private final ManualClock clock = new ManualClock();
@Test
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index bca48b19ccf..60dd9ce59ef 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -128,8 +128,7 @@ public class ProvisioningTester {
new MemoryMetricsDb(clock),
orchestrator,
true,
- spareCount,
- 1000);
+ spareCount);
this.provisioner = new NodeRepositoryProvisioner(nodeRepository, zone, provisionServiceProvider, new MockMetric());
this.capacityPolicies = new CapacityPolicies(nodeRepository);
this.provisionLogger = new InMemoryProvisionLogger();
diff --git a/parent/pom.xml b/parent/pom.xml
index df20b94ec79..56c896d57cc 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -1161,6 +1161,21 @@
<artifactId>checker-qual</artifactId>
<version>3.30.0</version>
</dependency>
+ <dependency>
+ <groupId>com.google.http-client</groupId>
+ <artifactId>google-http-client-apache-v2</artifactId>
+ <version>1.43.3</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.http-client</groupId>
+ <artifactId>google-http-client</artifactId>
+ <version>1.43.3</version>
+ </dependency>
+ <dependency>
+ <groupId>com.google.auth</groupId>
+ <artifactId>google-auth-library-oauth2-http</artifactId>
+ <version>1.19.0</version>
+ </dependency>
</dependencies>
</dependencyManagement>
diff --git a/persistence/src/vespa/persistence/spi/clusterstate.cpp b/persistence/src/vespa/persistence/spi/clusterstate.cpp
index ad5039fade1..e6708192d47 100644
--- a/persistence/src/vespa/persistence/spi/clusterstate.cpp
+++ b/persistence/src/vespa/persistence/spi/clusterstate.cpp
@@ -97,7 +97,7 @@ void ClusterState::serialize(vespalib::nbostream& o) const {
assert(_distribution);
assert(_state);
vespalib::asciistream tmp;
- _state->serialize(tmp, false);
+ _state->serialize(tmp);
o << tmp.str() << _nodeIndex;
o << _distribution->serialize();
}
diff --git a/searchcore/src/tests/proton/matching/matching_test.cpp b/searchcore/src/tests/proton/matching/matching_test.cpp
index b59384f1493..6ef462f80c4 100644
--- a/searchcore/src/tests/proton/matching/matching_test.cpp
+++ b/searchcore/src/tests/proton/matching/matching_test.cpp
@@ -1135,12 +1135,12 @@ TEST("require that docsum matcher can extract matching elements from single attr
EXPECT_EQUAL(list[1], 3u);
}
-struct GlobalFilterParamsFixture {
+struct AttributeBlueprintParamsFixture {
BlueprintFactory factory;
search::fef::test::IndexEnvironment index_env;
RankSetup rank_setup;
Properties rank_properties;
- GlobalFilterParamsFixture(double lower_limit, double upper_limit)
+ AttributeBlueprintParamsFixture(double lower_limit, double upper_limit, double target_hits_max_adjustment_factor)
: factory(),
index_env(),
rank_setup(factory, index_env),
@@ -1148,32 +1148,37 @@ struct GlobalFilterParamsFixture {
{
rank_setup.set_global_filter_lower_limit(lower_limit);
rank_setup.set_global_filter_upper_limit(upper_limit);
+ rank_setup.set_target_hits_max_adjustment_factor(target_hits_max_adjustment_factor);
}
- void set_query_properties(vespalib::stringref lower_limit, vespalib::stringref upper_limit) {
+ void set_query_properties(vespalib::stringref lower_limit, vespalib::stringref upper_limit,
+ vespalib::stringref target_hits_max_adjustment_factor) {
rank_properties.add(GlobalFilterLowerLimit::NAME, lower_limit);
rank_properties.add(GlobalFilterUpperLimit::NAME, upper_limit);
+ rank_properties.add(TargetHitsMaxAdjustmentFactor::NAME, target_hits_max_adjustment_factor);
}
AttributeBlueprintParams extract(uint32_t active_docids = 9, uint32_t docid_limit = 10) const {
- return MatchToolsFactory::extract_global_filter_params(rank_setup, rank_properties, active_docids, docid_limit);
+ return MatchToolsFactory::extract_attribute_blueprint_params(rank_setup, rank_properties, active_docids, docid_limit);
}
};
-TEST_F("global filter params are extracted from rank profile", GlobalFilterParamsFixture(0.2, 0.8))
+TEST_F("attribute blueprint params are extracted from rank profile", AttributeBlueprintParamsFixture(0.2, 0.8, 5.0))
{
auto params = f.extract();
EXPECT_EQUAL(0.2, params.global_filter_lower_limit);
EXPECT_EQUAL(0.8, params.global_filter_upper_limit);
+ EXPECT_EQUAL(5.0, params.target_hits_max_adjustment_factor);
}
-TEST_F("global filter params are extracted from query", GlobalFilterParamsFixture(0.2, 0.8))
+TEST_F("attribute blueprint params are extracted from query", AttributeBlueprintParamsFixture(0.2, 0.8, 5.0))
{
- f.set_query_properties("0.15", "0.75");
+ f.set_query_properties("0.15", "0.75", "3.0");
auto params = f.extract();
EXPECT_EQUAL(0.15, params.global_filter_lower_limit);
EXPECT_EQUAL(0.75, params.global_filter_upper_limit);
+ EXPECT_EQUAL(3.0, params.target_hits_max_adjustment_factor);
}
-TEST_F("global filter params are scaled with active hit ratio", GlobalFilterParamsFixture(0.2, 0.8))
+TEST_F("global filter params are scaled with active hit ratio", AttributeBlueprintParamsFixture(0.2, 0.8, 5.0))
{
auto params = f.extract(5, 10);
EXPECT_EQUAL(0.12, params.global_filter_lower_limit);
diff --git a/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp b/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp
index c7cbdc29689..a353d4816f6 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp
+++ b/searchcore/src/vespa/searchcore/proton/matching/match_tools.cpp
@@ -176,11 +176,11 @@ MatchToolsFactory(QueryLimiter & queryLimiter,
const search::IDocumentMetaStoreContext::IReadGuard::SP * metaStoreReadGuard,
bool is_search)
: _queryLimiter(queryLimiter),
- _global_filter_params(extract_global_filter_params(rankSetup, rankProperties, metaStore.getNumActiveLids(), searchContext.getDocIdLimit())),
+ _attribute_blueprint_params(extract_attribute_blueprint_params(rankSetup, rankProperties, metaStore.getNumActiveLids(), searchContext.getDocIdLimit())),
_query(),
_match_limiter(),
_queryEnv(indexEnv, attributeContext, rankProperties, searchContext.getIndexes()),
- _requestContext(doom, attributeContext, _queryEnv, _queryEnv.getObjectStore(), _global_filter_params, metaStoreReadGuard),
+ _requestContext(doom, attributeContext, _queryEnv, _queryEnv.getObjectStore(), _attribute_blueprint_params, metaStoreReadGuard),
_mdl(),
_rankSetup(rankSetup),
_featureOverrides(featureOverrides),
@@ -203,8 +203,8 @@ MatchToolsFactory(QueryLimiter & queryLimiter,
_query.fetchPostings();
if (is_search) {
_query.handle_global_filter(searchContext.getDocIdLimit(),
- _global_filter_params.global_filter_lower_limit,
- _global_filter_params.global_filter_upper_limit,
+ _attribute_blueprint_params.global_filter_lower_limit,
+ _attribute_blueprint_params.global_filter_upper_limit,
thread_bundle, trace);
}
_query.freeze();
@@ -324,18 +324,20 @@ MatchToolsFactory::get_feature_rename_map() const
}
AttributeBlueprintParams
-MatchToolsFactory::extract_global_filter_params(const RankSetup& rank_setup, const Properties& rank_properties,
- uint32_t active_docids, uint32_t docid_limit)
+MatchToolsFactory::extract_attribute_blueprint_params(const RankSetup& rank_setup, const Properties& rank_properties,
+ uint32_t active_docids, uint32_t docid_limit)
{
double lower_limit = GlobalFilterLowerLimit::lookup(rank_properties, rank_setup.get_global_filter_lower_limit());
double upper_limit = GlobalFilterUpperLimit::lookup(rank_properties, rank_setup.get_global_filter_upper_limit());
+ double target_hits_max_adjustment_factor = TargetHitsMaxAdjustmentFactor::lookup(rank_properties, rank_setup.get_target_hits_max_adjustment_factor());
// Note that we count the reserved docid 0 as active.
// This ensures that when searchable-copies=1, the ratio is 1.0.
double active_hit_ratio = std::min(active_docids + 1, docid_limit) / static_cast<double>(docid_limit);
return {lower_limit * active_hit_ratio,
- upper_limit * active_hit_ratio};
+ upper_limit * active_hit_ratio,
+ target_hits_max_adjustment_factor};
}
AttributeOperationTask::AttributeOperationTask(const RequestContext & requestContext,
diff --git a/searchcore/src/vespa/searchcore/proton/matching/match_tools.h b/searchcore/src/vespa/searchcore/proton/matching/match_tools.h
index db30ea8d2b2..681690d4c36 100644
--- a/searchcore/src/vespa/searchcore/proton/matching/match_tools.h
+++ b/searchcore/src/vespa/searchcore/proton/matching/match_tools.h
@@ -121,7 +121,7 @@ private:
using IIndexEnvironment = search::fef::IIndexEnvironment;
using IDiversifier = search::queryeval::IDiversifier;
QueryLimiter & _queryLimiter;
- AttributeBlueprintParams _global_filter_params;
+ AttributeBlueprintParams _attribute_blueprint_params;
Query _query;
MaybeMatchPhaseLimiter::UP _match_limiter;
std::unique_ptr<RangeQueryLocator> _rangeLocator;
@@ -177,15 +177,15 @@ public:
const StringStringMap & get_feature_rename_map() const;
/**
- * Extracts global filter parameters from the rank-profile and query.
+ * Extracts attribute blueprint parameters from the rank-profile and query.
*
- * These parameters are expected to be in the range [0.0, 1.0], which matches the range of the estimated hit ratio of the query.
+ * The global filter parameters are expected to be in the range [0.0, 1.0], which matches the range of the estimated hit ratio of the query.
* When searchable-copies > 1, we must scale the parameters to match the effective range of the estimated hit ratio.
* This is done by multiplying with the active hit ratio (active docids / docid limit).
*/
static AttributeBlueprintParams
- extract_global_filter_params(const RankSetup& rank_setup, const Properties& rank_properties,
- uint32_t active_docids, uint32_t docid_limit);
+ extract_attribute_blueprint_params(const RankSetup& rank_setup, const Properties& rank_properties,
+ uint32_t active_docids, uint32_t docid_limit);
};
}
diff --git a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
index 6ca7d298ee2..0475f8462fc 100644
--- a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
+++ b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
@@ -1320,15 +1320,16 @@ public:
return *_query_tensor;
}
- std::unique_ptr<NearestNeighborBlueprint> make_blueprint(bool approximate = true, double global_filter_lower_limit = 0.05) {
+ std::unique_ptr<NearestNeighborBlueprint> make_blueprint(bool approximate = true,
+ double global_filter_lower_limit = 0.05,
+ double target_hits_max_adjustment_factor = 20.0) {
search::queryeval::FieldSpec field("foo", 0, 0);
auto bp = std::make_unique<NearestNeighborBlueprint>(
field,
std::make_unique<DistanceCalculator>(this->as_dense_tensor(),
create_query_tensor(vec_2d(17, 42))),
- 3, approximate, 5,
- 100100.25,
- global_filter_lower_limit, 1.0, _no_doom.get_doom());
+ 3, approximate, 5, 100100.25,
+ global_filter_lower_limit, 1.0, target_hits_max_adjustment_factor, _no_doom.get_doom());
EXPECT_EQUAL(11u, bp->getState().estimate().estHits);
EXPECT_EQUAL(100100.25 * 100100.25, bp->get_distance_threshold());
return bp;
@@ -1362,6 +1363,19 @@ TEST_F("NN blueprint handles empty filter (post-filtering)", NearestNeighborBlue
EXPECT_EQUAL(NNBA::INDEX_TOP_K, bp->get_algorithm());
}
+TEST_F("NN blueprint adjustment of targetHits is bound (post-filtering)", NearestNeighborBlueprintFixture)
+{
+ auto bp = f.make_blueprint(true, 0.05, 3.5);
+ auto empty_filter = GlobalFilter::create();
+ bp->set_global_filter(*empty_filter, 0.2);
+ // targetHits is adjusted based on the estimated hit ratio of the query,
+ // but bound by target-hits-max-adjustment-factor
+ EXPECT_EQUAL(3u, bp->get_target_hits());
+ EXPECT_EQUAL(10u, bp->get_adjusted_target_hits());
+ EXPECT_EQUAL(10u, bp->getState().estimate().estHits);
+ EXPECT_EQUAL(NNBA::INDEX_TOP_K, bp->get_algorithm());
+}
+
TEST_F("NN blueprint handles strong filter (pre-filtering)", NearestNeighborBlueprintFixture)
{
auto bp = f.make_blueprint();
diff --git a/searchlib/src/tests/queryeval/nearest_neighbor/nearest_neighbor_test.cpp b/searchlib/src/tests/queryeval/nearest_neighbor/nearest_neighbor_test.cpp
index b9599a0c75d..f3545499231 100644
--- a/searchlib/src/tests/queryeval/nearest_neighbor/nearest_neighbor_test.cpp
+++ b/searchlib/src/tests/queryeval/nearest_neighbor/nearest_neighbor_test.cpp
@@ -126,11 +126,12 @@ SimpleResult find_matches(Fixture &env, const Value &qtv, double threshold = std
auto dff = search::tensor::make_distance_function_factory(DistanceMetric::Euclidean, qtv.cells().type);
auto df = dff->for_query_vector(qtv.cells());
threshold = df->convert_threshold(threshold);
- DistanceCalculator dist_calc(attr, std::move(df));
NearestNeighborDistanceHeap dh(2);
dh.set_distance_threshold(threshold);
const GlobalFilter &filter = *env._global_filter;
- auto search = NearestNeighborIterator::create(strict, tfmd, dist_calc, dh, filter);
+ auto search = NearestNeighborIterator::create(strict, tfmd,
+ std::make_unique<DistanceCalculator>(attr, qtv),
+ dh, filter);
if (strict) {
return SimpleResult().searchStrict(*search, attr.getNumDocs());
} else {
@@ -253,10 +254,11 @@ std::vector<feature_t> get_rawscores(Fixture &env, const Value &qtv) {
auto &tfmd = *(md->resolveTermField(0));
auto &attr = *(env._attr);
auto dff = search::tensor::make_distance_function_factory(DistanceMetric::Euclidean, qtv.cells().type);
- DistanceCalculator dist_calc(attr, dff->for_query_vector(qtv.cells()));
NearestNeighborDistanceHeap dh(2);
auto dummy_filter = GlobalFilter::create();
- auto search = NearestNeighborIterator::create(strict, tfmd, dist_calc, dh, *dummy_filter);
+ auto search = NearestNeighborIterator::create(strict, tfmd,
+ std::make_unique<DistanceCalculator>(attr, qtv),
+ dh, *dummy_filter);
uint32_t limit = attr.getNumDocs();
uint32_t docid = 1;
search->initRange(docid, limit);
diff --git a/searchlib/src/tests/ranksetup/ranksetup_test.cpp b/searchlib/src/tests/ranksetup/ranksetup_test.cpp
index 50d9d36f575..f708df0a862 100644
--- a/searchlib/src/tests/ranksetup/ranksetup_test.cpp
+++ b/searchlib/src/tests/ranksetup/ranksetup_test.cpp
@@ -533,6 +533,9 @@ void RankSetupTest::testRankSetup()
env.getProperties().add(mutate::on_second_phase::Operation::NAME, "=7");
env.getProperties().add(mutate::on_summary::Attribute::NAME, "c");
env.getProperties().add(mutate::on_summary::Operation::NAME, "-=2");
+ env.getProperties().add(matching::GlobalFilterLowerLimit::NAME, "0.3");
+ env.getProperties().add(matching::GlobalFilterUpperLimit::NAME, "0.7");
+ env.getProperties().add(matching::TargetHitsMaxAdjustmentFactor::NAME, "5.0");
RankSetup rs(_factory, env);
EXPECT_FALSE(rs.has_match_features());
@@ -571,7 +574,9 @@ void RankSetupTest::testRankSetup()
EXPECT_EQUAL(rs.getMutateOnSecondPhase()._operation, "=7");
EXPECT_EQUAL(rs.getMutateOnSummary()._attribute, "c");
EXPECT_EQUAL(rs.getMutateOnSummary()._operation, "-=2");
-
+ EXPECT_EQUAL(rs.get_global_filter_lower_limit(), 0.3);
+ EXPECT_EQUAL(rs.get_global_filter_upper_limit(), 0.7);
+ EXPECT_EQUAL(rs.get_target_hits_max_adjustment_factor(), 5.0);
}
bool
diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp b/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp
index be631be6dca..453b7b321b9 100644
--- a/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_factory.cpp
@@ -842,14 +842,16 @@ public:
}
try {
auto calc = tensor::DistanceCalculator::make_with_validation(_attr, *query_tensor);
+ const auto& params = getRequestContext().get_attribute_blueprint_params();
setResult(std::make_unique<queryeval::NearestNeighborBlueprint>(_field,
std::move(calc),
n.get_target_num_hits(),
n.get_allow_approximate(),
n.get_explore_additional_hits(),
n.get_distance_threshold(),
- getRequestContext().get_attribute_blueprint_params().global_filter_lower_limit,
- getRequestContext().get_attribute_blueprint_params().global_filter_upper_limit,
+ params.global_filter_lower_limit,
+ params.global_filter_upper_limit,
+ params.target_hits_max_adjustment_factor,
getRequestContext().getDoom()));
} catch (const vespalib::IllegalArgumentException& ex) {
return fail_nearest_neighbor_term(n, ex.getMessage());
diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_params.h b/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_params.h
index 39f58c5382e..64213235c23 100644
--- a/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_params.h
+++ b/searchlib/src/vespa/searchlib/attribute/attribute_blueprint_params.h
@@ -13,17 +13,21 @@ struct AttributeBlueprintParams
{
double global_filter_lower_limit;
double global_filter_upper_limit;
+ double target_hits_max_adjustment_factor;
AttributeBlueprintParams(double global_filter_lower_limit_in,
- double global_filter_upper_limit_in)
+ double global_filter_upper_limit_in,
+ double target_hits_max_adjustment_factor_in)
: global_filter_lower_limit(global_filter_lower_limit_in),
- global_filter_upper_limit(global_filter_upper_limit_in)
+ global_filter_upper_limit(global_filter_upper_limit_in),
+ target_hits_max_adjustment_factor(target_hits_max_adjustment_factor_in)
{
}
AttributeBlueprintParams()
: AttributeBlueprintParams(fef::indexproperties::matching::GlobalFilterLowerLimit::DEFAULT_VALUE,
- fef::indexproperties::matching::GlobalFilterUpperLimit::DEFAULT_VALUE)
+ fef::indexproperties::matching::GlobalFilterUpperLimit::DEFAULT_VALUE,
+ fef::indexproperties::matching::TargetHitsMaxAdjustmentFactor::DEFAULT_VALUE)
{
}
};
diff --git a/searchlib/src/vespa/searchlib/fef/indexproperties.cpp b/searchlib/src/vespa/searchlib/fef/indexproperties.cpp
index 8be44ce0a0c..7871e66970e 100644
--- a/searchlib/src/vespa/searchlib/fef/indexproperties.cpp
+++ b/searchlib/src/vespa/searchlib/fef/indexproperties.cpp
@@ -422,6 +422,22 @@ GlobalFilterUpperLimit::lookup(const Properties &props, double defaultValue)
return lookupDouble(props, NAME, defaultValue);
}
+const vespalib::string TargetHitsMaxAdjustmentFactor::NAME("vespa.matching.nns.target_hits_max_adjustment_factor");
+
+const double TargetHitsMaxAdjustmentFactor::DEFAULT_VALUE(20.0);
+
+double
+TargetHitsMaxAdjustmentFactor::lookup(const Properties& props)
+{
+ return lookup(props, DEFAULT_VALUE);
+}
+
+double
+TargetHitsMaxAdjustmentFactor::lookup(const Properties& props, double defaultValue)
+{
+ return lookupDouble(props, NAME, defaultValue);
+}
+
} // namespace matching
namespace softtimeout {
diff --git a/searchlib/src/vespa/searchlib/fef/indexproperties.h b/searchlib/src/vespa/searchlib/fef/indexproperties.h
index f538e7bef2e..4f38a27d3fe 100644
--- a/searchlib/src/vespa/searchlib/fef/indexproperties.h
+++ b/searchlib/src/vespa/searchlib/fef/indexproperties.h
@@ -313,6 +313,21 @@ namespace matching {
static double lookup(const Properties &props);
static double lookup(const Properties &props, double defaultValue);
};
+
+ /**
+ * Property to control the auto-adjustment of targetHits in a nearestNeighbor search using HNSW index with post-filtering.
+ *
+ * The targetHits is auto-adjusted in an effort to expose targetHits hits to first-phase ranking after post-filtering:
+ * adjustedTargetHits = min(targetHits / estimatedHitRatio, targetHits * targetHitsMaxAdjustmentFactor).
+ *
+ * This property ensures an upper bound of adjustedTargetHits, avoiding that the search in the HNSW index takes too long.
+ **/
+ struct TargetHitsMaxAdjustmentFactor {
+ static const vespalib::string NAME;
+ static const double DEFAULT_VALUE;
+ static double lookup(const Properties &props);
+ static double lookup(const Properties &props, double defaultValue);
+ };
}
namespace softtimeout {
diff --git a/searchlib/src/vespa/searchlib/fef/ranksetup.cpp b/searchlib/src/vespa/searchlib/fef/ranksetup.cpp
index 823e39199df..9d4e547feef 100644
--- a/searchlib/src/vespa/searchlib/fef/ranksetup.cpp
+++ b/searchlib/src/vespa/searchlib/fef/ranksetup.cpp
@@ -68,6 +68,7 @@ RankSetup::RankSetup(const BlueprintFactory &factory, const IIndexEnvironment &i
_softTimeoutTailCost(0.1),
_global_filter_lower_limit(0.0),
_global_filter_upper_limit(1.0),
+ _target_hits_max_adjustment_factor(20.0),
_mutateOnMatch(),
_mutateOnFirstPhase(),
_mutateOnSecondPhase(),
@@ -121,6 +122,7 @@ RankSetup::configure()
setSoftTimeoutTailCost(softtimeout::TailCost::lookup(_indexEnv.getProperties()));
set_global_filter_lower_limit(matching::GlobalFilterLowerLimit::lookup(_indexEnv.getProperties()));
set_global_filter_upper_limit(matching::GlobalFilterUpperLimit::lookup(_indexEnv.getProperties()));
+ set_target_hits_max_adjustment_factor(matching::TargetHitsMaxAdjustmentFactor::lookup(_indexEnv.getProperties()));
_mutateOnMatch._attribute = mutate::on_match::Attribute::lookup(_indexEnv.getProperties());
_mutateOnMatch._operation = mutate::on_match::Operation::lookup(_indexEnv.getProperties());
_mutateOnFirstPhase._attribute = mutate::on_first_phase::Attribute::lookup(_indexEnv.getProperties());
diff --git a/searchlib/src/vespa/searchlib/fef/ranksetup.h b/searchlib/src/vespa/searchlib/fef/ranksetup.h
index 832b86d042a..72432c2ed8a 100644
--- a/searchlib/src/vespa/searchlib/fef/ranksetup.h
+++ b/searchlib/src/vespa/searchlib/fef/ranksetup.h
@@ -76,6 +76,7 @@ private:
double _softTimeoutTailCost;
double _global_filter_lower_limit;
double _global_filter_upper_limit;
+ double _target_hits_max_adjustment_factor;
MutateOperation _mutateOnMatch;
MutateOperation _mutateOnFirstPhase;
MutateOperation _mutateOnSecondPhase;
@@ -393,6 +394,8 @@ public:
double get_global_filter_lower_limit() const { return _global_filter_lower_limit; }
void set_global_filter_upper_limit(double v) { _global_filter_upper_limit = v; }
double get_global_filter_upper_limit() const { return _global_filter_upper_limit; }
+ void set_target_hits_max_adjustment_factor(double v) { _target_hits_max_adjustment_factor = v; }
+ double get_target_hits_max_adjustment_factor() const { return _target_hits_max_adjustment_factor; }
/**
* This method may be used to indicate that certain features
diff --git a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp
index 87ddb8b6edc..a70f387100b 100644
--- a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.cpp
@@ -43,6 +43,7 @@ NearestNeighborBlueprint::NearestNeighborBlueprint(const queryeval::FieldSpec& f
double distance_threshold,
double global_filter_lower_limit,
double global_filter_upper_limit,
+ double target_hits_max_adjustment_factor,
const vespalib::Doom& doom)
: ComplexLeafBlueprint(field),
_distance_calc(std::move(distance_calc)),
@@ -55,6 +56,7 @@ NearestNeighborBlueprint::NearestNeighborBlueprint(const queryeval::FieldSpec& f
_distance_threshold(std::numeric_limits<double>::max()),
_global_filter_lower_limit(global_filter_lower_limit),
_global_filter_upper_limit(global_filter_upper_limit),
+ _target_hits_max_adjustment_factor(target_hits_max_adjustment_factor),
_distance_heap(target_hits),
_found_hits(),
_algorithm(Algorithm::EXACT),
@@ -95,8 +97,10 @@ NearestNeighborBlueprint::set_global_filter(const GlobalFilter &global_filter, d
} else { // post-filtering case
// The goal is to expose 'targetHits' hits to first-phase ranking.
// We try to achieve this by adjusting targetHits based on the estimated hit ratio of the query before post-filtering.
+ // However, this is bound by 'target-hits-max-adjustment-factor' to limit the cost of searching the HNSW index.
if (estimated_hit_ratio > 0.0) {
- _adjusted_target_hits = static_cast<double>(_target_hits) / estimated_hit_ratio;
+ _adjusted_target_hits = std::min(static_cast<double>(_target_hits) / estimated_hit_ratio,
+ static_cast<double>(_target_hits) * _target_hits_max_adjustment_factor);
}
}
if (_algorithm != Algorithm::EXACT_FALLBACK) {
@@ -133,7 +137,8 @@ NearestNeighborBlueprint::createLeafSearch(const search::fef::TermFieldMatchData
default:
;
}
- return NearestNeighborIterator::create(strict, tfmd, *_distance_calc,
+ return NearestNeighborIterator::create(strict, tfmd,
+ std::make_unique<search::tensor::DistanceCalculator>(_attr_tensor, _query_tensor),
_distance_heap, *_global_filter);
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.h
index f88cdd5adb1..174f0b23125 100644
--- a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_blueprint.h
@@ -38,6 +38,7 @@ private:
double _distance_threshold;
double _global_filter_lower_limit;
double _global_filter_upper_limit;
+ double _target_hits_max_adjustment_factor;
mutable NearestNeighborDistanceHeap _distance_heap;
std::vector<search::tensor::NearestNeighborIndex::Neighbor> _found_hits;
Algorithm _algorithm;
@@ -55,6 +56,7 @@ public:
double distance_threshold,
double global_filter_lower_limit,
double global_filter_upper_limit,
+ double target_hits_max_adjustment_factor,
const vespalib::Doom& doom);
NearestNeighborBlueprint(const NearestNeighborBlueprint&) = delete;
NearestNeighborBlueprint& operator=(const NearestNeighborBlueprint&) = delete;
diff --git a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.cpp b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.cpp
index 92c9a21db83..a71a8e6a49a 100644
--- a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.cpp
@@ -23,9 +23,8 @@ template <bool strict, bool has_filter>
class NearestNeighborImpl : public NearestNeighborIterator
{
public:
-
NearestNeighborImpl(Params params_in)
- : NearestNeighborIterator(params_in),
+ : NearestNeighborIterator(std::move(params_in)),
_lastScore(0.0)
{
}
@@ -53,7 +52,7 @@ public:
}
void doUnpack(uint32_t docId) override {
- double score = params().distance_calc.function().to_rawscore(_lastScore);
+ double score = params().distance_calc->function().to_rawscore(_lastScore);
params().tfmd.setRawScore(docId, score);
params().distanceHeap.used(_lastScore);
}
@@ -62,7 +61,7 @@ public:
private:
double computeDistance(uint32_t docId, double limit) {
- return params().distance_calc.calc_with_limit(docId, limit);
+ return params().distance_calc->calc_with_limit(docId, limit);
}
double _lastScore;
@@ -75,14 +74,14 @@ namespace {
template <bool has_filter>
std::unique_ptr<NearestNeighborIterator>
-resolve_strict(bool strict, const NearestNeighborIterator::Params &params)
+resolve_strict(bool strict, NearestNeighborIterator::Params params)
{
if (strict) {
using NNI = NearestNeighborImpl<true, has_filter>;
- return std::make_unique<NNI>(params);
+ return std::make_unique<NNI>(std::move(params));
} else {
using NNI = NearestNeighborImpl<false, has_filter>;
- return std::make_unique<NNI>(params);
+ return std::make_unique<NNI>(std::move(params));
}
}
@@ -92,15 +91,15 @@ std::unique_ptr<NearestNeighborIterator>
NearestNeighborIterator::create(
bool strict,
fef::TermFieldMatchData &tfmd,
- const search::tensor::DistanceCalculator &distance_calc,
+ std::unique_ptr<search::tensor::DistanceCalculator> distance_calc,
NearestNeighborDistanceHeap &distanceHeap,
const GlobalFilter &filter)
{
- Params params(tfmd, distance_calc, distanceHeap, filter);
+ Params params(tfmd, std::move(distance_calc), distanceHeap, filter);
if (filter.is_active()) {
- return resolve_strict<true>(strict, params);
+ return resolve_strict<true>(strict, std::move(params));
} else {
- return resolve_strict<false>(strict, params);
+ return resolve_strict<false>(strict, std::move(params));
}
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.h b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.h
index fe3f8d51d06..884f0f2f3eb 100644
--- a/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.h
+++ b/searchlib/src/vespa/searchlib/queryeval/nearest_neighbor_iterator.h
@@ -24,29 +24,29 @@ public:
struct Params {
fef::TermFieldMatchData &tfmd;
- const search::tensor::DistanceCalculator &distance_calc;
+ std::unique_ptr<search::tensor::DistanceCalculator> distance_calc;
NearestNeighborDistanceHeap &distanceHeap;
const GlobalFilter &filter;
Params(fef::TermFieldMatchData &tfmd_in,
- const search::tensor::DistanceCalculator &distance_calc_in,
+ std::unique_ptr<search::tensor::DistanceCalculator> distance_calc_in,
NearestNeighborDistanceHeap &distanceHeap_in,
const GlobalFilter &filter_in)
: tfmd(tfmd_in),
- distance_calc(distance_calc_in),
+ distance_calc(std::move(distance_calc_in)),
distanceHeap(distanceHeap_in),
filter(filter_in)
{}
};
NearestNeighborIterator(Params params_in)
- : _params(params_in)
+ : _params(std::move(params_in))
{}
static std::unique_ptr<NearestNeighborIterator> create(
bool strict,
fef::TermFieldMatchData &tfmd,
- const search::tensor::DistanceCalculator &distance_calc,
+ std::unique_ptr<search::tensor::DistanceCalculator> distance_calc,
NearestNeighborDistanceHeap &distanceHeap,
const GlobalFilter &filter);
diff --git a/searchlib/src/vespa/searchlib/tensor/distance_calculator.cpp b/searchlib/src/vespa/searchlib/tensor/distance_calculator.cpp
index 5759b4b74ea..f65c7103540 100644
--- a/searchlib/src/vespa/searchlib/tensor/distance_calculator.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/distance_calculator.cpp
@@ -30,14 +30,6 @@ DistanceCalculator::DistanceCalculator(const tensor::ITensorAttribute& attr_tens
assert(_dist_fun);
}
-DistanceCalculator::DistanceCalculator(const tensor::ITensorAttribute& attr_tensor,
- BoundDistanceFunction::UP function_in)
- : _attr_tensor(attr_tensor),
- _query_tensor(nullptr),
- _dist_fun(std::move(function_in))
-{
-}
-
DistanceCalculator::~DistanceCalculator() = default;
namespace {
diff --git a/searchlib/src/vespa/searchlib/tensor/distance_calculator.h b/searchlib/src/vespa/searchlib/tensor/distance_calculator.h
index b65f4ff1868..f44bc0d33cf 100644
--- a/searchlib/src/vespa/searchlib/tensor/distance_calculator.h
+++ b/searchlib/src/vespa/searchlib/tensor/distance_calculator.h
@@ -29,12 +29,6 @@ public:
DistanceCalculator(const tensor::ITensorAttribute& attr_tensor,
const vespalib::eval::Value& query_tensor_in);
- /**
- * Only used by unit tests where ownership of query tensor and distance function is handled outside.
- */
- DistanceCalculator(const tensor::ITensorAttribute& attr_tensor,
- BoundDistanceFunction::UP function_in);
-
~DistanceCalculator();
const tensor::ITensorAttribute& attribute_tensor() const { return _attr_tensor; }
diff --git a/storage/src/tests/distributor/btree_bucket_database_test.cpp b/storage/src/tests/distributor/btree_bucket_database_test.cpp
index 14d5a4142a8..40575cacfba 100644
--- a/storage/src/tests/distributor/btree_bucket_database_test.cpp
+++ b/storage/src/tests/distributor/btree_bucket_database_test.cpp
@@ -19,15 +19,15 @@ using document::BucketId;
namespace {
-BucketCopy BC(uint32_t node_idx, uint32_t state) {
+BucketCopy BC(uint16_t node_idx, uint32_t state) {
api::BucketInfo info(0x123, state, state);
- return BucketCopy(0, node_idx, info);
+ return {0, node_idx, info};
}
BucketInfo BI(uint32_t node_idx, uint32_t state) {
BucketInfo bi;
- bi.addNode(BC(node_idx, state), toVector<uint16_t>(0));
+ bi.addNode(BC(node_idx, state), {0});
return bi;
}
diff --git a/storage/src/tests/distributor/bucketdatabasetest.cpp b/storage/src/tests/distributor/bucketdatabasetest.cpp
index fcc64e0cccf..032b8ad8a9c 100644
--- a/storage/src/tests/distributor/bucketdatabasetest.cpp
+++ b/storage/src/tests/distributor/bucketdatabasetest.cpp
@@ -1,9 +1,9 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "bucketdatabasetest.h"
+#include <vespa/storage/storageutil/utils.h>
#include <vespa/vespalib/util/benchmark_timer.h>
#include <chrono>
-#include <iomanip>
#include <algorithm>
namespace storage::distributor {
@@ -16,21 +16,21 @@ void BucketDatabaseTest::SetUp() {
namespace {
-BucketCopy BC(uint32_t nodeIdx) {
+BucketCopy BC(uint16_t nodeIdx) {
return BucketCopy(0, nodeIdx, api::BucketInfo());
}
-BucketInfo BI(uint32_t nodeIdx) {
+BucketInfo BI(uint16_t nodeIdx) {
BucketInfo bi;
- bi.addNode(BC(nodeIdx), toVector<uint16_t>(0));
+ bi.addNode(BC(nodeIdx), {0});
return bi;
}
-BucketInfo BI3(uint32_t node0, uint32_t node1, uint32_t node2) {
+BucketInfo BI3(uint16_t node0, uint16_t node1, uint16_t node2) {
BucketInfo bi;
- bi.addNode(BC(node0), toVector<uint16_t>(node0, node1, node2));
- bi.addNode(BC(node1), toVector<uint16_t>(node0, node1, node2));
- bi.addNode(BC(node2), toVector<uint16_t>(node0, node1, node2));
+ bi.addNode(BC(node0), {node0, node1, node2});
+ bi.addNode(BC(node1), {node0, node1, node2});
+ bi.addNode(BC(node2), {node0, node1, node2});
return bi;
}
diff --git a/storage/src/tests/distributor/bucketdatabasetest.h b/storage/src/tests/distributor/bucketdatabasetest.h
index 33f914f8fd2..f24a62728d3 100644
--- a/storage/src/tests/distributor/bucketdatabasetest.h
+++ b/storage/src/tests/distributor/bucketdatabasetest.h
@@ -2,7 +2,6 @@
#pragma once
#include <vespa/storage/bucketdb/bucketdatabase.h>
-#include <vespa/storage/storageutil/utils.h>
#include <vespa/vespalib/gtest/gtest.h>
#include <functional>
@@ -11,19 +10,14 @@ namespace storage::distributor {
struct BucketDatabaseTest : public ::testing::TestWithParam<std::shared_ptr<BucketDatabase>> {
void SetUp() override ;
- std::string doFindParents(const std::vector<document::BucketId>& ids,
- const document::BucketId& searchId);
- std::string doFindAll(const std::vector<document::BucketId>& ids,
- const document::BucketId& searchId);
+ std::string doFindParents(const std::vector<document::BucketId>& ids, const document::BucketId& searchId);
+ std::string doFindAll(const std::vector<document::BucketId>& ids, const document::BucketId& searchId);
document::BucketId doCreate(const std::vector<document::BucketId>& ids,
- uint32_t minBits,
- const document::BucketId& wantedId);
+ uint32_t minBits, const document::BucketId& wantedId);
BucketDatabase& db() noexcept { return *GetParam(); }
- using UBoundFunc = std::function<
- document::BucketId(const BucketDatabase&,
- const document::BucketId&)>;
+ using UBoundFunc = std::function<document::BucketId(const BucketDatabase&, const document::BucketId&)>;
void doTestUpperBound(const UBoundFunc& f);
};
diff --git a/storage/src/tests/distributor/bucketdbmetricupdatertest.cpp b/storage/src/tests/distributor/bucketdbmetricupdatertest.cpp
index 4d04e3ca51a..57a7fb529be 100644
--- a/storage/src/tests/distributor/bucketdbmetricupdatertest.cpp
+++ b/storage/src/tests/distributor/bucketdbmetricupdatertest.cpp
@@ -202,7 +202,7 @@ TEST_F(BucketDBMetricUpdaterTest, buckets_with_varying_trustedness) {
{
BucketInfo info(makeInfo(100, 200));
info.resetTrusted();
- BucketDatabase::Entry e(document::BucketId(16, 3), info);
+ BucketDatabase::Entry e(document::BucketId(16, 3), std::move(info));
metricUpdater.visit(e, 2);
}
metricUpdater.completeRound(false);
@@ -233,7 +233,7 @@ TEST_F(BucketDBMetricUpdaterTest, pick_largest_copy_if_no_trusted) {
// No trusted copies, so must pick second copy.
BucketInfo info(makeInfo(100, 200));
info.resetTrusted();
- BucketDatabase::Entry e(document::BucketId(16, 2), info);
+ BucketDatabase::Entry e(document::BucketId(16, 2), std::move(info));
metricUpdater.visit(e, 2);
metricUpdater.completeRound(false);
metricUpdater.getLastCompleteStats().propagateMetrics(ims, dms);
@@ -270,7 +270,7 @@ BucketDBMetricUpdaterTest::visitBucketWith2Copies1Trusted(BucketDBMetricUpdater&
BucketInfo info;
addNode(info, 0, 100);
addNode(info, 1, 101); // Note different checksums => #trusted = 1
- BucketDatabase::Entry e(document::BucketId(16, 1), info);
+ BucketDatabase::Entry e(document::BucketId(16, 1), std::move(info));
metricUpdater.visit(e, 2);
}
@@ -281,18 +281,17 @@ BucketDBMetricUpdaterTest::visitBucketWith2CopiesBothTrusted(BucketDBMetricUpdat
BucketInfo info;
addNode(info, 0, 200);
addNode(info, 2, 200);
- BucketDatabase::Entry e(document::BucketId(16, 2), info);
+ BucketDatabase::Entry e(document::BucketId(16, 2), std::move(info));
metricUpdater.visit(e, 2);
}
// Single replica on node 2.
void
-BucketDBMetricUpdaterTest::visitBucketWith1Copy(
- BucketDBMetricUpdater& metricUpdater)
+BucketDBMetricUpdaterTest::visitBucketWith1Copy(BucketDBMetricUpdater& metricUpdater)
{
BucketInfo info;
addNode(info, 2, 100);
- BucketDatabase::Entry e(document::BucketId(16, 1), info);
+ BucketDatabase::Entry e(document::BucketId(16, 1), std::move(info));
metricUpdater.visit(e, 2);
}
diff --git a/storage/src/tests/distributor/bucketstateoperationtest.cpp b/storage/src/tests/distributor/bucketstateoperationtest.cpp
index 42ee4675e26..c9fab0b37e5 100644
--- a/storage/src/tests/distributor/bucketstateoperationtest.cpp
+++ b/storage/src/tests/distributor/bucketstateoperationtest.cpp
@@ -3,6 +3,7 @@
#include <tests/distributor/distributor_stripe_test_util.h>
#include <vespa/storage/distributor/operations/idealstate/setbucketstateoperation.h>
#include <vespa/storage/distributor/top_level_distributor.h>
+#include <vespa/storage/storageutil/utils.h>
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/vespalib/gtest/gtest.h>
#include "dummy_cluster_context.h"
diff --git a/storage/src/tests/distributor/distributor_bucket_space_test.cpp b/storage/src/tests/distributor/distributor_bucket_space_test.cpp
index 41e0dafdaaf..00bc803e81c 100644
--- a/storage/src/tests/distributor/distributor_bucket_space_test.cpp
+++ b/storage/src/tests/distributor/distributor_bucket_space_test.cpp
@@ -100,19 +100,19 @@ DistributorBucketSpaceTest::CountVector
DistributorBucketSpaceTest::count_service_layer_buckets(const std::vector<BucketId>& buckets)
{
CountVector result(3);
- std::vector<uint16_t> ideal_nodes;
for (auto& bucket : buckets) {
- auto &ideal_nodes_bundle = bucket_space.get_ideal_service_layer_nodes_bundle(bucket);
+ const auto & ideal_nodes_bundle = bucket_space.get_ideal_service_layer_nodes_bundle(bucket);
for (uint32_t i = 0; i < 3; ++i) {
+ IdealServiceLayerNodesBundle::ConstNodesRef ideal_nodes;
switch (i) {
case 0:
- ideal_nodes = ideal_nodes_bundle.get_available_nodes();
+ ideal_nodes = ideal_nodes_bundle.available_nodes();
break;
case 1:
- ideal_nodes = ideal_nodes_bundle.get_available_nonretired_nodes();
+ ideal_nodes = ideal_nodes_bundle.available_nonretired_nodes();
break;
case 2:
- ideal_nodes = ideal_nodes_bundle.get_available_nonretired_or_maintenance_nodes();
+ ideal_nodes = ideal_nodes_bundle.available_nonretired_or_maintenance_nodes();
break;
default:
;
diff --git a/storage/src/tests/distributor/distributor_stripe_test_util.cpp b/storage/src/tests/distributor/distributor_stripe_test_util.cpp
index 7a64eda28ff..5babde49380 100644
--- a/storage/src/tests/distributor/distributor_stripe_test_util.cpp
+++ b/storage/src/tests/distributor/distributor_stripe_test_util.cpp
@@ -10,6 +10,7 @@
#include <vespa/storage/distributor/distributormetricsset.h>
#include <vespa/storage/distributor/ideal_state_total_metrics.h>
#include <vespa/storage/distributor/node_supported_features_repo.h>
+#include <vespa/storage/storageutil/utils.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/text/stringtokenizer.h>
#include <vespa/vespalib/stllike/hash_map.hpp>
@@ -40,34 +41,22 @@ DistributorStripeTestUtil::createLinks()
_node = std::make_unique<TestDistributorApp>(_config.getConfigId());
_metrics = std::make_shared<DistributorMetricSet>();
_ideal_state_metrics = std::make_shared<IdealStateMetricSet>();
- _stripe = std::make_unique<DistributorStripe>(_node->getComponentRegister(),
- *_metrics,
- *_ideal_state_metrics,
- _node->node_identity(),
- _messageSender,
- *this,
- _done_initializing);
+ _stripe = std::make_unique<DistributorStripe>(_node->getComponentRegister(), *_metrics, *_ideal_state_metrics,
+ _node->node_identity(), _messageSender, *this, _done_initializing);
}
void
-DistributorStripeTestUtil::setup_stripe(int redundancy,
- int nodeCount,
- const std::string& systemState,
- uint32_t earlyReturn,
- bool requirePrimaryToBeWritten)
+DistributorStripeTestUtil::setup_stripe(int redundancy, int nodeCount, const std::string& systemState,
+ uint32_t earlyReturn, bool requirePrimaryToBeWritten)
{
setup_stripe(redundancy, nodeCount, lib::ClusterStateBundle(lib::ClusterState(systemState)), earlyReturn, requirePrimaryToBeWritten);
}
void
-DistributorStripeTestUtil::setup_stripe(int redundancy,
- int node_count,
- const lib::ClusterStateBundle& state,
- uint32_t early_return,
- bool require_primary_to_be_written)
+DistributorStripeTestUtil::setup_stripe(int redundancy, int node_count, const lib::ClusterStateBundle& state,
+ uint32_t early_return, bool require_primary_to_be_written)
{
- lib::Distribution::DistributionConfigBuilder config(
- lib::Distribution::getDefaultDistributionConfig(redundancy, node_count).get());
+ lib::Distribution::DistributionConfigBuilder config(lib::Distribution::getDefaultDistributionConfig(redundancy, node_count).get());
config.redundancy = redundancy;
config.initialRedundancy = early_return;
config.ensurePrimaryPersisted = require_primary_to_be_written;
@@ -93,8 +82,7 @@ DistributorStripeTestUtil::setup_stripe(int redundancy,
void
DistributorStripeTestUtil::set_redundancy(uint32_t redundancy)
{
- auto distribution = std::make_shared<lib::Distribution>(
- lib::Distribution::getDefaultDistributionConfig(redundancy, 100));
+ auto distribution = std::make_shared<lib::Distribution>(lib::Distribution::getDefaultDistributionConfig(redundancy, 100));
// Same rationale for not triggering a full distribution change as
// in setup_stripe() above
_node->getComponentRegister().setDistribution(distribution);
@@ -217,8 +205,7 @@ DistributorStripeTestUtil::getIdealStr(document::BucketId id, const lib::Cluster
}
std::vector<uint16_t> nodes;
- getDistribution().getIdealNodes(
- lib::NodeType::STORAGE, state, id, nodes);
+ getDistribution().getIdealNodes(lib::NodeType::STORAGE, state, id, nodes, "uim");
std::sort(nodes.begin(), nodes.end());
std::ostringstream ost;
ost << id << ": " << dumpVector(nodes);
@@ -226,8 +213,7 @@ DistributorStripeTestUtil::getIdealStr(document::BucketId id, const lib::Cluster
}
void
-DistributorStripeTestUtil::addIdealNodes(const lib::ClusterState& state,
- const document::BucketId& id)
+DistributorStripeTestUtil::addIdealNodes(const lib::ClusterState& state, const document::BucketId& id)
{
BucketDatabase::Entry entry = getBucket(id);
@@ -236,15 +222,11 @@ DistributorStripeTestUtil::addIdealNodes(const lib::ClusterState& state,
}
std::vector<uint16_t> res;
- getDistribution().getIdealNodes(
- lib::NodeType::STORAGE, state, id, res);
+ getDistribution().getIdealNodes(lib::NodeType::STORAGE, state, id, res, "uim");
for (uint32_t i = 0; i < res.size(); ++i) {
- if (state.getNodeState(lib::Node(lib::NodeType::STORAGE, res[i])).getState() !=
- lib::State::MAINTENANCE)
- {
- entry->addNode(BucketCopy(0, res[i], api::BucketInfo(1,1,1)),
- toVector<uint16_t>(0));
+ if (state.getNodeState(lib::Node(lib::NodeType::STORAGE, res[i])).getState() != lib::State::MAINTENANCE) {
+ entry->addNode(BucketCopy(0, res[i], api::BucketInfo(1,1,1)), {0});
}
}
@@ -292,10 +274,7 @@ DistributorStripeTestUtil::addNodesToBucketDB(const document::Bucket& bucket, co
}
uint16_t idx = atoi(tok2[0].data());
- BucketCopy node(
- 0,
- idx,
- info);
+ BucketCopy node(0, idx, info);
// Allow user to manually override trusted and active.
if (tok3.size() > flagsIdx && tok3[flagsIdx] == "t") {
@@ -309,44 +288,32 @@ DistributorStripeTestUtil::addNodesToBucketDB(const document::Bucket& bucket, co
}
void
-DistributorStripeTestUtil::addNodesToBucketDB(const document::BucketId& id,
- const std::string& nodeStr)
-{
+DistributorStripeTestUtil::addNodesToBucketDB(const document::BucketId& id, const std::string& nodeStr) {
addNodesToBucketDB(document::Bucket(makeBucketSpace(), id), nodeStr);
}
void
-DistributorStripeTestUtil::removeFromBucketDB(const document::BucketId& id)
-{
+DistributorStripeTestUtil::removeFromBucketDB(const document::BucketId& id) {
getBucketDatabase().remove(id);
}
void
-DistributorStripeTestUtil::addIdealNodes(const document::BucketId& id)
-{
+DistributorStripeTestUtil::addIdealNodes(const document::BucketId& id) {
// TODO STRIPE roundabout way of getting state bundle..!
addIdealNodes(*operation_context().cluster_state_bundle().getBaselineClusterState(), id);
}
void
-DistributorStripeTestUtil::insertBucketInfo(document::BucketId id,
- uint16_t node,
- uint32_t checksum,
- uint32_t count,
- uint32_t size,
- bool trusted,
- bool active)
+DistributorStripeTestUtil::insertBucketInfo(document::BucketId id, uint16_t node, uint32_t checksum,
+ uint32_t count, uint32_t size, bool trusted, bool active)
{
api::BucketInfo info(checksum, count, size);
insertBucketInfo(id, node, info, trusted, active);
}
void
-DistributorStripeTestUtil::insertBucketInfo(document::BucketId id,
- uint16_t node,
- const api::BucketInfo& info,
- bool trusted,
- bool active)
+DistributorStripeTestUtil::insertBucketInfo(document::BucketId id, uint16_t node, const api::BucketInfo& info,
+ bool trusted, bool active)
{
BucketDatabase::Entry entry = getBucketDatabase().get(id);
if (!entry.valid()) {
@@ -358,9 +325,7 @@ DistributorStripeTestUtil::insertBucketInfo(document::BucketId id,
info2.setActive();
}
BucketCopy copy(operation_context().generate_unique_timestamp(), node, info2);
-
- entry->addNode(copy.setTrusted(trusted), toVector<uint16_t>(0));
-
+ entry->addNode(copy.setTrusted(trusted), {0});
getBucketDatabase().update(entry);
}
@@ -371,9 +336,7 @@ DistributorStripeTestUtil::dumpBucket(const document::BucketId& bid)
}
void
-DistributorStripeTestUtil::sendReply(Operation& op,
- int idx,
- api::ReturnCode::Result result)
+DistributorStripeTestUtil::sendReply(Operation& op, int idx, api::ReturnCode::Result result)
{
if (idx == -1) {
idx = _sender.commands().size() - 1;
@@ -387,20 +350,17 @@ DistributorStripeTestUtil::sendReply(Operation& op,
}
BucketDatabase::Entry
-DistributorStripeTestUtil::getBucket(const document::Bucket& bucket) const
-{
+DistributorStripeTestUtil::getBucket(const document::Bucket& bucket) const {
return getBucketDatabase(bucket.getBucketSpace()).get(bucket.getBucketId());
}
BucketDatabase::Entry
-DistributorStripeTestUtil::getBucket(const document::BucketId& bId) const
-{
+DistributorStripeTestUtil::getBucket(const document::BucketId& bId) const {
return getBucketDatabase().get(bId);
}
void
-DistributorStripeTestUtil::disableBucketActivationInConfig(bool disable)
-{
+DistributorStripeTestUtil::disableBucketActivationInConfig(bool disable) {
ConfigBuilder builder;
builder.disableBucketActivation = disable;
configure_stripe(builder);
@@ -437,14 +397,12 @@ DistributorStripeTestUtil::doc_selection_parser() const {
}
DistributorMetricSet&
-DistributorStripeTestUtil::metrics()
-{
+DistributorStripeTestUtil::metrics() {
return *_metrics;
}
bool
-DistributorStripeTestUtil::tick()
-{
+DistributorStripeTestUtil::tick() {
return _stripe->tick();
}
@@ -553,8 +511,7 @@ DistributorStripeTestUtil::getBucketSpaces() const
void
DistributorStripeTestUtil::enable_cluster_state(vespalib::stringref state)
{
- getBucketDBUpdater().simulate_cluster_state_bundle_activation(
- lib::ClusterStateBundle(lib::ClusterState(state)));
+ getBucketDBUpdater().simulate_cluster_state_bundle_activation(lib::ClusterStateBundle(lib::ClusterState(state)));
}
void
diff --git a/storage/src/tests/distributor/distributor_stripe_test_util.h b/storage/src/tests/distributor/distributor_stripe_test_util.h
index 9963b2c96b4..272301bf4a6 100644
--- a/storage/src/tests/distributor/distributor_stripe_test_util.h
+++ b/storage/src/tests/distributor/distributor_stripe_test_util.h
@@ -7,6 +7,7 @@
#include <tests/common/teststorageapp.h>
#include <vespa/storage/common/hostreporter/hostinfo.h>
#include <vespa/storage/distributor/stripe_host_info_notifier.h>
+#include <vespa/storage/storageutil/utils.h>
namespace storage {
diff --git a/storage/src/tests/distributor/garbagecollectiontest.cpp b/storage/src/tests/distributor/garbagecollectiontest.cpp
index c2f4836f4cb..9b5056f2066 100644
--- a/storage/src/tests/distributor/garbagecollectiontest.cpp
+++ b/storage/src/tests/distributor/garbagecollectiontest.cpp
@@ -71,8 +71,7 @@ struct GarbageCollectionOperationTest : Test, DistributorStripeTestUtil {
std::shared_ptr<GarbageCollectionOperation> create_op() {
auto op = std::make_shared<GarbageCollectionOperation>(
- dummy_cluster_context, BucketAndNodes(makeDocumentBucket(_bucket_id),
- toVector<uint16_t>(0, 1)));
+ dummy_cluster_context, BucketAndNodes(makeDocumentBucket(_bucket_id), {0, 1}));
op->setIdealStateManager(&getIdealStateManager());
return op;
}
diff --git a/storage/src/tests/distributor/operationtargetresolvertest.cpp b/storage/src/tests/distributor/operationtargetresolvertest.cpp
index 2d41b0f4d32..19ca81e933f 100644
--- a/storage/src/tests/distributor/operationtargetresolvertest.cpp
+++ b/storage/src/tests/distributor/operationtargetresolvertest.cpp
@@ -3,7 +3,6 @@
#include <tests/distributor/distributor_stripe_test_util.h>
#include <vespa/config/helper/configgetter.h>
#include <vespa/config/helper/configgetter.hpp>
-#include <vespa/document/config/config-documenttypes.h>
#include <vespa/document/repo/documenttyperepo.h>
#include <vespa/document/test/make_bucket_space.h>
#include <vespa/document/test/make_document_bucket.h>
@@ -14,7 +13,6 @@
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/vdslib/distribution/distribution.h>
-#include <vespa/vdslib/distribution/idealnodecalculatorimpl.h>
#include <vespa/vespalib/gtest/gtest.h>
using document::BucketId;
@@ -112,14 +110,10 @@ struct TestTargets {
} // anonymous
BucketInstanceList
-OperationTargetResolverTest::getInstances(const BucketId& id,
- bool stripToRedundancy)
+OperationTargetResolverTest::getInstances(const BucketId& id, bool stripToRedundancy)
{
- lib::IdealNodeCalculatorImpl idealNodeCalc;
auto &bucketSpaceRepo(operation_context().bucket_space_repo());
auto &distributorBucketSpace(bucketSpaceRepo.get(makeBucketSpace()));
- idealNodeCalc.setDistribution(distributorBucketSpace.getDistribution());
- idealNodeCalc.setClusterState(distributorBucketSpace.getClusterState());
OperationTargetResolverImpl resolver(
distributorBucketSpace, distributorBucketSpace.getBucketDatabase(), 16,
distributorBucketSpace.getDistribution().getRedundancy(),
@@ -142,24 +136,6 @@ TEST_F(OperationTargetResolverTest, simple) {
.sendsTo(BucketId(16, 0), 0);
}
-TEST_F(OperationTargetResolverTest, multiple_nodes) {
- setup_stripe(1, 2, "storage:2 distributor:1");
-
- auto &bucketSpaceRepo(operation_context().bucket_space_repo());
- auto &distributorBucketSpace(bucketSpaceRepo.get(makeBucketSpace()));
- for (int i = 0; i < 100; ++i) {
- addNodesToBucketDB(BucketId(16, i), "0=0,1=0");
-
- lib::IdealNodeCalculatorImpl idealNodeCalc;
- idealNodeCalc.setDistribution(distributorBucketSpace.getDistribution());
- idealNodeCalc.setClusterState(distributorBucketSpace.getClusterState());
- lib::IdealNodeList idealNodes(
- idealNodeCalc.getIdealStorageNodes(BucketId(16, i)));
- uint16_t expectedNode = idealNodes[0].getIndex();
- MY_ASSERT_THAT(BucketId(32, i)).sendsTo(BucketId(16, i), expectedNode);
- }
-}
-
TEST_F(OperationTargetResolverTest, choose_ideal_state_when_many_copies) {
setup_stripe(2, 4, "storage:4 distributor:1");
addNodesToBucketDB(BucketId(16, 0), "0=0,1=0,2=0,3=0"); // ideal nodes: 1, 3
diff --git a/storage/src/tests/distributor/pendingmessagetrackertest.cpp b/storage/src/tests/distributor/pendingmessagetrackertest.cpp
index 3bfa1027a82..8277281206d 100644
--- a/storage/src/tests/distributor/pendingmessagetrackertest.cpp
+++ b/storage/src/tests/distributor/pendingmessagetrackertest.cpp
@@ -162,10 +162,14 @@ TEST_F(PendingMessageTrackerTest, simple) {
clock.setAbsoluteTimeInSeconds(1);
PendingMessageTracker tracker(compReg, 0);
+ std::ostringstream dummy; // Enable time tracking
+ tracker.reportStatus(dummy, framework::HttpUrlPath("/pendingmessages?order=bucket"));
+
auto remove = std::make_shared<api::RemoveCommand>(
makeDocumentBucket(document::BucketId(16, 1234)),
document::DocumentId("id:footype:testdoc:n=1234:foo"), 1001);
remove->setAddress(makeStorageAddress(0));
+
tracker.insert(remove);
{
@@ -238,6 +242,8 @@ TEST_F(PendingMessageTrackerTest, multiple_messages) {
compReg.setClock(clock);
clock.setAbsoluteTimeInSeconds(1);
PendingMessageTracker tracker(compReg, 0);
+ std::ostringstream dummy; // Enable time tracking
+ tracker.reportStatus(dummy, framework::HttpUrlPath("/pendingmessages?order=bucket"));
insertMessages(tracker);
diff --git a/storage/src/tests/distributor/simplemaintenancescannertest.cpp b/storage/src/tests/distributor/simplemaintenancescannertest.cpp
index b5dc72d995b..3d3c58ba842 100644
--- a/storage/src/tests/distributor/simplemaintenancescannertest.cpp
+++ b/storage/src/tests/distributor/simplemaintenancescannertest.cpp
@@ -82,7 +82,7 @@ TEST_F(SimpleMaintenanceScannerTest, prioritize_single_bucket) {
TEST_F(SimpleMaintenanceScannerTest, prioritize_single_bucket_alt_bucket_space) {
document::BucketSpace bucketSpace(4);
_bucketSpaceRepo->add(bucketSpace, std::make_unique<DistributorBucketSpace>());
- _scanner->reset();
+ (void)_scanner->fetch_and_reset();
addBucketToDb(bucketSpace, 1);
std::string expected("PrioritizedBucket(Bucket(BucketSpace(0x0000000000000004), BucketId(0x4000000000000001)), pri VERY_HIGH)\n");
@@ -148,7 +148,7 @@ TEST_F(SimpleMaintenanceScannerTest, reset) {
ASSERT_TRUE(scanEntireDatabase(0));
EXPECT_EQ(expected, _priorityDb->toString());
- _scanner->reset();
+ (void)_scanner->fetch_and_reset();
ASSERT_TRUE(scanEntireDatabase(3));
expected = "PrioritizedBucket(Bucket(BucketSpace(0x0000000000000001), BucketId(0x4000000000000001)), pri VERY_HIGH)\n"
@@ -180,7 +180,7 @@ TEST_F(SimpleMaintenanceScannerTest, pending_maintenance_operation_statistics) {
EXPECT_EQ(expected, stringifyGlobalPendingStats(stats));
}
- _scanner->reset();
+ (void)_scanner->fetch_and_reset();
{
const auto & stats = _scanner->getPendingMaintenanceStats();
EXPECT_EQ(expectedEmpty, stringifyGlobalPendingStats(stats));
@@ -301,7 +301,7 @@ TEST_F(SimpleMaintenanceScannerTest, merge_pending_maintenance_stats) {
TEST_F(SimpleMaintenanceScannerTest, empty_bucket_db_is_immediately_done_by_default) {
auto res = _scanner->scanNext();
EXPECT_TRUE(res.isDone());
- _scanner->reset();
+ (void)_scanner->fetch_and_reset();
res = _scanner->scanNext();
EXPECT_TRUE(res.isDone());
}
diff --git a/storage/src/tests/distributor/statecheckerstest.cpp b/storage/src/tests/distributor/statecheckerstest.cpp
index 16854cd63c6..13c982f5a77 100644
--- a/storage/src/tests/distributor/statecheckerstest.cpp
+++ b/storage/src/tests/distributor/statecheckerstest.cpp
@@ -7,6 +7,7 @@
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/storage/distributor/top_level_bucket_db_updater.h>
#include <vespa/storage/distributor/top_level_distributor.h>
+#include <vespa/storage/distributor/activecopy.h>
#include <vespa/storage/distributor/distributor_bucket_space.h>
#include <vespa/storage/distributor/distributor_stripe.h>
#include <vespa/storage/distributor/operations/idealstate/mergeoperation.h>
@@ -1383,9 +1384,8 @@ std::string StateCheckersTest::testGarbageCollection(
getBucketDatabase().update(e);
NodeMaintenanceStatsTracker statsTracker;
- StateChecker::Context c(node_context(), operation_context(),
- getDistributorBucketSpace(), statsTracker,
- makeDocumentBucket(e.getBucketId()));
+ StateChecker::Context c(node_context(), operation_context(), getDistributorBucketSpace(),
+ statsTracker, makeDocumentBucket(e.getBucketId()));
getClock().setAbsoluteTimeInSeconds(nowTimestamp);
return testStateChecker(checker, c, false, PendingMessage(), includePriority, includeSchedulingPri);
}
@@ -1394,38 +1394,29 @@ TEST_F(StateCheckersTest, garbage_collection) {
// BucketId(17, 0) has id (and thus 'hash') 0x4400000000000000. With a
// check interval modulo of 3600, this implies a start point of 848.
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testGarbageCollection(900, 3600 + 847, 3600));
+ EXPECT_EQ("NO OPERATIONS GENERATED", testGarbageCollection(900, 3600 + 847, 3600));
- EXPECT_EQ("[Needs garbage collection: Last check at 900, current time 4448, "
- "configured interval 3600]",
+ EXPECT_EQ("[Needs garbage collection: Last check at 900, current time 4448, configured interval 3600]",
testGarbageCollection(900, 3600 + 848, 3600));
- EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, "
- "configured interval 3600]",
+ EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, configured interval 3600]",
testGarbageCollection(3, 4000, 3600));
// GC start point 3648.
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testGarbageCollection(3, 3647, 8000));
+ EXPECT_EQ("NO OPERATIONS GENERATED", testGarbageCollection(3, 3647, 8000));
- EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, "
- "configured interval 3600]",
+ EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, configured interval 3600]",
testGarbageCollection(3, 4000, 3600));
// GC explicitly disabled.
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testGarbageCollection(3, 4000, 0));
+ EXPECT_EQ("NO OPERATIONS GENERATED", testGarbageCollection(3, 4000, 0));
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testGarbageCollection(3, 3, 1));
+ EXPECT_EQ("NO OPERATIONS GENERATED", testGarbageCollection(3, 3, 1));
- EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, "
- "configured interval 300] (pri 200)",
+ EXPECT_EQ("[Needs garbage collection: Last check at 3, current time 4000, configured interval 300] (pri 200)",
testGarbageCollection(3, 4000, 300, 1, true));
- EXPECT_EQ("NO OPERATIONS GENERATED",
- testGarbageCollection(3850, 4000, 300, 1));
+ EXPECT_EQ("NO OPERATIONS GENERATED", testGarbageCollection(3850, 4000, 300, 1));
}
TEST_F(StateCheckersTest, gc_ops_are_prioritized_with_low_priority_category) {
@@ -1597,11 +1588,12 @@ TEST_F(StateCheckersTest, context_populates_ideal_state_containers) {
StateChecker::Context c(node_context(), operation_context(),
getDistributorBucketSpace(), statsTracker, makeDocumentBucket({17, 0}));
- ASSERT_THAT(c.idealState, ElementsAre(1, 3));
- // TODO replace with UnorderedElementsAre once we can build gmock without issues
- std::vector<uint16_t> ideal_state(c.unorderedIdealState.begin(), c.unorderedIdealState.end());
- std::sort(ideal_state.begin(), ideal_state.end());
- ASSERT_THAT(ideal_state, ElementsAre(1, 3));
+ ASSERT_EQ(2, c.idealState().size());
+ ASSERT_EQ(1, c.idealState()[0]);
+ ASSERT_EQ(3, c.idealState()[1]);
+ for (uint16_t node : c.idealState()) {
+ ASSERT_TRUE(c.idealStateBundle.is_nonretired_or_maintenance(node));
+ }
}
namespace {
@@ -1616,8 +1608,7 @@ public:
explicit StateCheckerRunner(StateCheckersTest& fixture);
~StateCheckerRunner();
- StateCheckerRunner& addToDb(const document::BucketId& bid,
- const std::string& bucketInfo)
+ StateCheckerRunner& addToDb(const document::BucketId& bid, const std::string& bucketInfo)
{
_fixture.addNodesToBucketDB(bid, bucketInfo);
return *this;
@@ -1652,8 +1643,7 @@ public:
Checker checker;
StateChecker::Context c(_fixture.node_context(), _fixture.operation_context(),
_fixture.getDistributorBucketSpace(), _statsTracker, makeDocumentBucket(bid));
- _result = _fixture.testStateChecker(
- checker, c, false, StateCheckersTest::PendingMessage(), false);
+ _result = _fixture.testStateChecker(checker, c, false, StateCheckersTest::PendingMessage(), false);
}
const std::string& result() const { return _result; }
@@ -1749,4 +1739,9 @@ TEST_F(StateCheckersTest, stats_updates_for_maximum_time_since_gc_run) {
EXPECT_EQ(runner.stats().max_observed_time_since_last_gc(), 1900s);
}
+TEST(ActiveCopyTest, control_size) {
+ EXPECT_EQ(12, sizeof(ActiveCopy));
+ EXPECT_EQ(64, sizeof(IdealServiceLayerNodesBundle));
+}
+
}
diff --git a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
index 567e0a947da..7eb9dfe6269 100644
--- a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
+++ b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
@@ -65,12 +65,9 @@ public:
close();
}
- std::shared_ptr<RequestBucketInfoReply> make_fake_bucket_reply(
- const lib::ClusterState& state,
- const RequestBucketInfoCommand& cmd,
- int storageIndex,
- uint32_t bucketCount,
- uint32_t invalidBucketCount = 0)
+ std::shared_ptr<RequestBucketInfoReply>
+ make_fake_bucket_reply(const lib::ClusterState& state, const RequestBucketInfoCommand& cmd,
+ int storageIndex, uint32_t bucketCount,uint32_t invalidBucketCount = 0)
{
auto sreply = std::make_shared<RequestBucketInfoReply>(cmd);
sreply->setAddress(storage_address(storageIndex));
@@ -84,19 +81,14 @@ public:
}
std::vector<uint16_t> nodes;
- distributor_bucket_space(bucket).getDistribution().getIdealNodes(
- lib::NodeType::STORAGE, state, bucket, nodes);
+ distributor_bucket_space(bucket).getDistribution().getIdealNodes(lib::NodeType::STORAGE, state, bucket, nodes, "uim");
for (uint32_t j = 0; j < nodes.size(); ++j) {
if (nodes[j] == storageIndex) {
if (i >= bucketCount) {
- vec.push_back(api::RequestBucketInfoReply::Entry(
- document::BucketId(16, i),
- api::BucketInfo()));
+ vec.emplace_back(document::BucketId(16, i), api::BucketInfo());
} else {
- vec.push_back(api::RequestBucketInfoReply::Entry(
- document::BucketId(16, i),
- api::BucketInfo(10,1,1)));
+ vec.emplace_back(document::BucketId(16, i), api::BucketInfo(10,1,1));
}
}
}
@@ -105,45 +97,34 @@ public:
return sreply;
}
- void fake_bucket_reply(const lib::ClusterState &state,
- const api::StorageCommand &cmd,
- uint32_t bucket_count,
- uint32_t invalid_bucket_count = 0)
+ void fake_bucket_reply(const lib::ClusterState &state, const api::StorageCommand &cmd,
+ uint32_t bucket_count, uint32_t invalid_bucket_count = 0)
{
ASSERT_EQ(cmd.getType(), MessageType::REQUESTBUCKETINFO);
const api::StorageMessageAddress& address(*cmd.getAddress());
bucket_db_updater().onRequestBucketInfoReply(
- make_fake_bucket_reply(state,
- dynamic_cast<const RequestBucketInfoCommand &>(cmd),
- address.getIndex(),
- bucket_count,
- invalid_bucket_count));
+ make_fake_bucket_reply(state, dynamic_cast<const RequestBucketInfoCommand &>(cmd),
+ address.getIndex(), bucket_count, invalid_bucket_count));
}
- void fake_bucket_reply(const lib::ClusterState &state,
- const api::StorageCommand &cmd,
- uint32_t bucket_count,
+ void fake_bucket_reply(const lib::ClusterState &state, const api::StorageCommand &cmd, uint32_t bucket_count,
const std::function<void(api::RequestBucketInfoReply&)>& reply_decorator)
{
ASSERT_EQ(cmd.getType(), MessageType::REQUESTBUCKETINFO);
const api::StorageMessageAddress& address(*cmd.getAddress());
- auto reply = make_fake_bucket_reply(state,
- dynamic_cast<const RequestBucketInfoCommand &>(cmd),
- address.getIndex(),
- bucket_count, 0);
+ auto reply = make_fake_bucket_reply(state, dynamic_cast<const RequestBucketInfoCommand &>(cmd),
+ address.getIndex(), bucket_count, 0);
reply_decorator(*reply);
bucket_db_updater().onRequestBucketInfoReply(reply);
}
- void send_fake_reply_for_single_bucket_request(
- const api::RequestBucketInfoCommand& rbi)
+ void send_fake_reply_for_single_bucket_request(const api::RequestBucketInfoCommand& rbi)
{
ASSERT_EQ(size_t(1), rbi.getBuckets().size());
const document::BucketId& bucket(rbi.getBuckets()[0]);
auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi);
- reply->getBucketInfo().push_back(
- api::RequestBucketInfoReply::Entry(bucket, api::BucketInfo(20, 10, 12, 50, 60, true, true)));
+ reply->getBucketInfo().emplace_back(bucket, api::BucketInfo(20, 10, 12, 50, 60, true, true));
stripe_of_bucket(bucket).bucket_db_updater().onRequestBucketInfoReply(reply);
}
@@ -154,15 +135,11 @@ public:
}
std::vector<uint16_t> nodes;
- distributor_bucket_space(id).getDistribution().getIdealNodes(
- lib::NodeType::STORAGE, state, document::BucketId(id), nodes);
+ distributor_bucket_space(id).getDistribution().getIdealNodes(lib::NodeType::STORAGE, state, document::BucketId(id), nodes, "uim");
if (nodes.size() != entry->getNodeCount()) {
- return vespalib::make_string("Bucket Id %s has %d nodes in "
- "ideal state, but has only %d in DB",
- id.toString().c_str(),
- (int)nodes.size(),
- (int)entry->getNodeCount());
+ return vespalib::make_string("Bucket Id %s has %d nodes in ideal state, but has only %d in DB",
+ id.toString().c_str(), (int)nodes.size(), (int)entry->getNodeCount());
}
for (uint32_t i = 0; i<nodes.size(); i++) {
@@ -175,10 +152,7 @@ public:
}
if (!found) {
- return vespalib::make_string(
- "Bucket Id %s has no copy from node %d",
- id.toString().c_str(),
- nodes[i]);
+ return vespalib::make_string("Bucket Id %s has no copy from node %d", id.toString().c_str(), nodes[i]);
}
}
@@ -188,13 +162,11 @@ public:
struct OrderByIncreasingNodeIndex {
template <typename T>
bool operator()(const T& lhs, const T& rhs) {
- return (lhs->getAddress()->getIndex()
- < rhs->getAddress()->getIndex());
+ return (lhs->getAddress()->getIndex() < rhs->getAddress()->getIndex());
}
};
- void sort_sent_messages_by_index(DistributorMessageSenderStub& sender,
- size_t sortFromOffset = 0)
+ void sort_sent_messages_by_index(DistributorMessageSenderStub& sender, size_t sortFromOffset = 0)
{
std::sort(sender.commands().begin() + sortFromOffset,
sender.commands().end(),
diff --git a/storage/src/tests/distributor/top_level_distributor_test_util.cpp b/storage/src/tests/distributor/top_level_distributor_test_util.cpp
index 9677ea568e8..6bbe7a47da2 100644
--- a/storage/src/tests/distributor/top_level_distributor_test_util.cpp
+++ b/storage/src/tests/distributor/top_level_distributor_test_util.cpp
@@ -10,6 +10,7 @@
#include <vespa/storage/distributor/distributor_stripe_pool.h>
#include <vespa/storage/distributor/distributor_stripe_thread.h>
#include <vespa/storage/distributor/distributor_total_metrics.h>
+#include <vespa/storage/storageutil/utils.h>
#include <vespa/storage/common/bucket_stripe_utils.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/text/stringtokenizer.h>
@@ -187,7 +188,7 @@ TopLevelDistributorTestUtil::get_ideal_str(document::BucketId id, const lib::Clu
return id.toString();
}
std::vector<uint16_t> nodes;
- _component->getDistribution()->getIdealNodes(lib::NodeType::STORAGE, state, id, nodes);
+ _component->getDistribution()->getIdealNodes(lib::NodeType::STORAGE, state, id, nodes, "uim");
std::sort(nodes.begin(), nodes.end());
std::ostringstream ost;
ost << id << ": " << dumpVector(nodes);
@@ -205,14 +206,11 @@ TopLevelDistributorTestUtil::add_ideal_nodes(const lib::ClusterState& state, con
std::vector<uint16_t> res;
assert(_component.get());
- _component->getDistribution()->getIdealNodes(lib::NodeType::STORAGE, state, id, res);
+ _component->getDistribution()->getIdealNodes(lib::NodeType::STORAGE, state, id, res, "uim");
for (uint32_t i = 0; i < res.size(); ++i) {
- if (state.getNodeState(lib::Node(lib::NodeType::STORAGE, res[i])).getState() !=
- lib::State::MAINTENANCE)
- {
- entry->addNode(BucketCopy(0, res[i], api::BucketInfo(1,1,1)),
- toVector<uint16_t>(0));
+ if (state.getNodeState(lib::Node(lib::NodeType::STORAGE, res[i])).getState() != lib::State::MAINTENANCE) {
+ entry->addNode(BucketCopy(0, res[i], api::BucketInfo(1,1,1)), toVector<uint16_t>(0));
}
}
diff --git a/storage/src/tests/distributor/top_level_distributor_test_util.h b/storage/src/tests/distributor/top_level_distributor_test_util.h
index cd5db7c8f80..51700848733 100644
--- a/storage/src/tests/distributor/top_level_distributor_test_util.h
+++ b/storage/src/tests/distributor/top_level_distributor_test_util.h
@@ -7,7 +7,6 @@
#include <tests/common/teststorageapp.h>
#include <vespa/storage/common/hostreporter/hostinfo.h>
#include <vespa/storage/frameworkimpl/component/distributorcomponentregisterimpl.h>
-#include <vespa/storage/storageutil/utils.h>
#include <vespa/storageapi/message/state.h>
#include <vespa/storageframework/defaultimplementation/clock/fakeclock.h>
diff --git a/storage/src/vespa/storage/bucketdb/bucketcopy.h b/storage/src/vespa/storage/bucketdb/bucketcopy.h
index e8d1db1d824..ca629a6cd8e 100644
--- a/storage/src/vespa/storage/bucketdb/bucketcopy.h
+++ b/storage/src/vespa/storage/bucketdb/bucketcopy.h
@@ -7,10 +7,10 @@ namespace storage {
class BucketCopy {
private:
- uint64_t _timestamp;
+ uint64_t _timestamp;
api::BucketInfo _info;
- uint16_t _flags;
- uint16_t _node;
+ uint16_t _flags;
+ uint16_t _node;
public:
static const int TRUSTED = 1;
@@ -18,9 +18,7 @@ public:
BucketCopy() noexcept
: _timestamp(0), _flags(0), _node(0xffff) {}
- BucketCopy(uint64_t timestamp,
- uint16_t nodeIdx,
- const api::BucketInfo& info) noexcept
+ BucketCopy(uint64_t timestamp, uint16_t nodeIdx, const api::BucketInfo& info) noexcept
: _timestamp(timestamp),
_info(info),
_flags(0),
@@ -76,16 +74,14 @@ public:
_info.setActive(setactive);
}
- bool consistentWith(const BucketCopy& other,
- bool countInvalidAsConsistent = false) const noexcept
- {
+ bool consistentWith(const BucketCopy& other) const noexcept {
// If both are valid, check checksum and doc count.
if (valid() && other.valid()) {
return (getChecksum() == other.getChecksum()
&& getDocumentCount() == other.getDocumentCount());
}
- return countInvalidAsConsistent;
+ return false;
}
void print(std::ostream&, bool verbose, const std::string& indent) const;
@@ -93,9 +89,7 @@ public:
std::string toString() const;
bool operator==(const BucketCopy& other) const noexcept {
- return
- getBucketInfo() == other.getBucketInfo() &&
- _flags == other._flags;
+ return (getBucketInfo() == other.getBucketInfo()) && (_flags == other._flags);
}
};
diff --git a/storage/src/vespa/storage/bucketdb/bucketdatabase.h b/storage/src/vespa/storage/bucketdb/bucketdatabase.h
index 4e0b727036a..d3fdce8f0d8 100644
--- a/storage/src/vespa/storage/bucketdb/bucketdatabase.h
+++ b/storage/src/vespa/storage/bucketdb/bucketdatabase.h
@@ -22,26 +22,29 @@ public:
BucketInfoType _info;
public:
- EntryBase() : _bucketId(0) {} // Invalid entry
- EntryBase(const document::BucketId& bId, const BucketInfoType& bucketInfo)
- : _bucketId(bId), _info(bucketInfo) {}
- EntryBase(const document::BucketId& bId, BucketInfoType&& bucketInfo)
- : _bucketId(bId), _info(std::move(bucketInfo)) {}
- explicit EntryBase(const document::BucketId& bId) : _bucketId(bId) {}
-
- bool operator==(const EntryBase& other) const {
+ EntryBase() noexcept : _bucketId(0), _info() {} // Invalid entry
+ EntryBase(const document::BucketId& bId, BucketInfoType&& bucketInfo) noexcept
+ : _bucketId(bId),
+ _info(std::move(bucketInfo))
+ {}
+ explicit EntryBase(const document::BucketId& bId) noexcept : _bucketId(bId), _info() {}
+ EntryBase(EntryBase &&) noexcept = default;
+ EntryBase & operator=(EntryBase &&) noexcept = default;
+ EntryBase(const EntryBase &) = default;
+ EntryBase & operator=(const EntryBase &) = default;
+ bool operator==(const EntryBase& other) const noexcept {
return (_bucketId == other._bucketId && _info == other._info);
}
- bool valid() const { return (_bucketId.getRawId() != 0); }
+ bool valid() const noexcept { return (_bucketId.getRawId() != 0); }
std::string toString() const;
- const document::BucketId& getBucketId() const { return _bucketId; }
- const BucketInfoType& getBucketInfo() const { return _info; }
- BucketInfoType& getBucketInfo() { return _info; }
- BucketInfoType* operator->() { return &_info; }
- const BucketInfoType* operator->() const { return &_info; }
+ const document::BucketId& getBucketId() const noexcept { return _bucketId; }
+ const BucketInfoType& getBucketInfo() const noexcept { return _info; }
+ BucketInfoType& getBucketInfo() noexcept { return _info; }
+ BucketInfoType* operator->() noexcept { return &_info; }
+ const BucketInfoType* operator->() const noexcept { return &_info; }
- static EntryBase createInvalid() {
+ static EntryBase createInvalid() noexcept {
return EntryBase();
}
};
diff --git a/storage/src/vespa/storage/bucketdb/bucketinfo.cpp b/storage/src/vespa/storage/bucketdb/bucketinfo.cpp
index dcf49b4d022..a8c21efa793 100644
--- a/storage/src/vespa/storage/bucketdb/bucketinfo.cpp
+++ b/storage/src/vespa/storage/bucketdb/bucketinfo.cpp
@@ -9,9 +9,9 @@ namespace storage {
template class BucketInfoBase<std::vector<BucketCopy>>;
template class BucketInfoBase<vespalib::ConstArrayRef<BucketCopy>>;
-BucketInfo::BucketInfo() : BucketInfoBase() {}
+BucketInfo::BucketInfo() noexcept : BucketInfoBase() {}
-BucketInfo::BucketInfo(uint32_t lastGarbageCollection, std::vector<BucketCopy> nodes)
+BucketInfo::BucketInfo(uint32_t lastGarbageCollection, std::vector<BucketCopy> nodes) noexcept
: BucketInfoBase(lastGarbageCollection, std::move(nodes))
{}
@@ -23,7 +23,7 @@ BucketInfo::BucketInfo(BucketInfo&&) noexcept = default;
BucketInfo& BucketInfo::operator=(BucketInfo&&) noexcept = default;
void
-BucketInfo::updateTrusted() {
+BucketInfo::updateTrusted() noexcept {
if (validAndConsistent()) {
for (uint32_t i = 0; i < _nodes.size(); i++) {
_nodes[i].setTrusted();
@@ -51,7 +51,7 @@ BucketInfo::updateTrusted() {
}
void
-BucketInfo::resetTrusted() {
+BucketInfo::resetTrusted() noexcept {
for (uint32_t i = 0; i < _nodes.size(); i++) {
_nodes[i].clearTrusted();
}
@@ -63,10 +63,10 @@ namespace {
struct Sorter {
const std::vector<uint16_t>& _order;
- Sorter(const std::vector<uint16_t>& recommendedOrder) :
+ Sorter(const std::vector<uint16_t>& recommendedOrder) noexcept :
_order(recommendedOrder) {}
- bool operator() (const BucketCopy& a, const BucketCopy& b) {
+ bool operator() (const BucketCopy& a, const BucketCopy& b) noexcept {
int order_a = -1;
for (uint32_t i = 0; i < _order.size(); i++) {
if (_order[i] == a.getNode()) {
@@ -119,8 +119,7 @@ BucketInfo::addNodes(const std::vector<BucketCopy>& newCopies,
if (found) {
if (found->getTimestamp() < newCopies[i].getTimestamp()) {
- found->setBucketInfo(newCopies[i].getTimestamp(),
- newCopies[i].getBucketInfo());
+ found->setBucketInfo(newCopies[i].getTimestamp(), newCopies[i].getBucketInfo());
}
} else {
_nodes.push_back(newCopies[i]);
@@ -135,19 +134,15 @@ BucketInfo::addNodes(const std::vector<BucketCopy>& newCopies,
}
void
-BucketInfo::addNode(const BucketCopy& newCopy,
- const std::vector<uint16_t>& recommendedOrder)
+BucketInfo::addNode(const BucketCopy& newCopy, const std::vector<uint16_t>& recommendedOrder)
{
- addNodes(toVector<BucketCopy>(newCopy),
- recommendedOrder);
+ addNodes(toVector<BucketCopy>(newCopy), recommendedOrder);
}
bool
BucketInfo::removeNode(unsigned short node, TrustedUpdate update)
{
- for (std::vector<BucketCopy>::iterator iter = _nodes.begin();
- iter != _nodes.end();
- iter++) {
+ for (auto iter = _nodes.begin(); iter != _nodes.end(); iter++) {
if (iter->getNode() == node) {
_nodes.erase(iter);
if (update == TrustedUpdate::UPDATE) {
@@ -162,11 +157,9 @@ BucketInfo::removeNode(unsigned short node, TrustedUpdate update)
BucketCopy*
BucketInfo::getNodeInternal(uint16_t node)
{
- for (std::vector<BucketCopy>::iterator iter = _nodes.begin();
- iter != _nodes.end();
- iter++) {
- if (iter->getNode() == node) {
- return &*iter;
+ for (BucketCopy & copy : _nodes) {
+ if (copy.getNode() == node) {
+ return &copy;
}
}
return 0;
diff --git a/storage/src/vespa/storage/bucketdb/bucketinfo.h b/storage/src/vespa/storage/bucketdb/bucketinfo.h
index 57ebf505a50..9c024c31fd3 100644
--- a/storage/src/vespa/storage/bucketdb/bucketinfo.h
+++ b/storage/src/vespa/storage/bucketdb/bucketinfo.h
@@ -25,15 +25,15 @@ protected:
uint32_t _lastGarbageCollection;
NodeSeq _nodes;
public:
- BucketInfoBase()
+ BucketInfoBase() noexcept
: _lastGarbageCollection(0),
_nodes()
{}
- BucketInfoBase(uint32_t lastGarbageCollection, const NodeSeq& nodes)
+ BucketInfoBase(uint32_t lastGarbageCollection, const NodeSeq& nodes) noexcept
: _lastGarbageCollection(lastGarbageCollection),
_nodes(nodes)
{}
- BucketInfoBase(uint32_t lastGarbageCollection, NodeSeq&& nodes)
+ BucketInfoBase(uint32_t lastGarbageCollection, NodeSeq&& nodes) noexcept
: _lastGarbageCollection(lastGarbageCollection),
_nodes(std::move(nodes))
{}
@@ -47,28 +47,28 @@ public:
/**
* @return Returns the last time when this bucket was "garbage collected".
*/
- uint32_t getLastGarbageCollectionTime() const { return _lastGarbageCollection; }
+ uint32_t getLastGarbageCollectionTime() const noexcept { return _lastGarbageCollection; }
/** True if the bucket contains no documents and is consistent. */
- bool emptyAndConsistent() const;
+ bool emptyAndConsistent() const noexcept;
/**
Check that all copies have complete bucket information and are
consistent with eachother.
*/
- bool validAndConsistent() const;
+ bool validAndConsistent() const noexcept;
/**
* True if the bucket contains at least one invalid copy
*/
- bool hasInvalidCopy() const;
+ bool hasInvalidCopy() const noexcept;
/**
* Returns the number of trusted nodes this entry has.
*/
- uint16_t getTrustedCount() const;
+ uint16_t getTrustedCount() const noexcept;
- bool hasTrusted() const {
+ bool hasTrusted() const noexcept {
return getTrustedCount() != 0;
}
@@ -78,14 +78,14 @@ public:
* @param countInCompleteAsInconsistent If false, nodes that are incomplete
* are always counted as consistent with complete nodes.
*/
- bool consistentNodes(bool countInvalidAsConsistent = false) const;
+ bool consistentNodes() const noexcept;
void print(std::ostream&, bool verbose, const std::string& indent) const;
/**
* Returns the bucket copy struct for the given node, null if nonexisting
*/
- const BucketCopy* getNode(uint16_t node) const;
+ const BucketCopy* getNode(uint16_t node) const noexcept;
/**
* Returns the number of nodes this entry has.
@@ -95,7 +95,7 @@ public:
/**
* Returns a list of the nodes this entry has.
*/
- std::vector<uint16_t> getNodes() const;
+ std::vector<uint16_t> getNodes() const noexcept;
/**
Returns a reference to the node with the given index in the node
@@ -117,14 +117,25 @@ public:
std::string toString() const;
- uint32_t getHighestDocumentCount() const;
- uint32_t getHighestTotalDocumentSize() const;
- uint32_t getHighestMetaCount() const;
- uint32_t getHighestUsedFileSize() const;
-
- bool hasRecentlyCreatedEmptyCopy() const;
-
- bool operator==(const BucketInfoBase& other) const;
+ uint32_t getHighestDocumentCount() const noexcept;
+ uint32_t getHighestMetaCount() const noexcept;
+ uint32_t getHighestUsedFileSize() const noexcept;
+ struct Highest {
+ Highest() noexcept : _documentCount(0),_totalDocumentSize(0),_metaCount(0),_usedFileSize(0) {}
+ void update(const BucketCopy & n) noexcept {
+ _documentCount = std::max(_documentCount, n.getDocumentCount());
+ _totalDocumentSize = std::max(_totalDocumentSize, n.getTotalDocumentSize());
+ _metaCount = std::max(_metaCount, n.getMetaCount());
+ _usedFileSize = std::max(_usedFileSize, n.getUsedFileSize());
+ }
+ uint32_t _documentCount;
+ uint32_t _totalDocumentSize;
+ uint32_t _metaCount;
+ uint32_t _usedFileSize;
+ };
+ Highest getHighest() const noexcept;
+ bool hasRecentlyCreatedEmptyCopy() const noexcept;
+ bool operator==(const BucketInfoBase& other) const noexcept;
};
template <typename NodeSeq>
@@ -140,8 +151,8 @@ public:
class BucketInfo : public BucketInfoBase<std::vector<BucketCopy>> {
public:
- BucketInfo();
- BucketInfo(uint32_t lastGarbageCollection, std::vector<BucketCopy> nodes);
+ BucketInfo() noexcept;
+ BucketInfo(uint32_t lastGarbageCollection, std::vector<BucketCopy> nodes) noexcept;
~BucketInfo();
BucketInfo(const BucketInfo&);
@@ -152,20 +163,20 @@ public:
/**
* Sets the last time the bucket was "garbage collected".
*/
- void setLastGarbageCollectionTime(uint32_t timestamp) {
+ void setLastGarbageCollectionTime(uint32_t timestamp) noexcept {
_lastGarbageCollection = timestamp;
}
/**
Update trusted flags if bucket is now complete and consistent.
*/
- void updateTrusted();
+ void updateTrusted() noexcept;
/**
Removes any historical information on trustedness, and sets the bucket copies to
trusted if they are now complete and consistent.
*/
- void resetTrusted();
+ void resetTrusted() noexcept;
/**
Adds the given node.
@@ -184,8 +195,7 @@ public:
/**
Simplified API for the common case of inserting one node. See addNodes().
*/
- void addNode(const BucketCopy& newCopy,
- const std::vector<uint16_t>& recommendedOrder);
+ void addNode(const BucketCopy& newCopy, const std::vector<uint16_t>& recommendedOrder);
/**
Updates bucket information for a node. Does nothing if the node
diff --git a/storage/src/vespa/storage/bucketdb/bucketinfo.hpp b/storage/src/vespa/storage/bucketdb/bucketinfo.hpp
index b7e8c5925c5..f8dbff38a99 100644
--- a/storage/src/vespa/storage/bucketdb/bucketinfo.hpp
+++ b/storage/src/vespa/storage/bucketdb/bucketinfo.hpp
@@ -9,16 +9,18 @@
namespace storage {
template <typename NodeSeq>
-std::string BucketInfoBase<NodeSeq>::toString() const {
+std::string
+BucketInfoBase<NodeSeq>::toString() const {
std::ostringstream ost;
print(ost, true, "");
return ost.str();
}
template <typename NodeSeq>
-bool BucketInfoBase<NodeSeq>::emptyAndConsistent() const {
- for (uint32_t i = 0; i < _nodes.size(); i++) {
- if (!_nodes[i].empty()) {
+bool
+BucketInfoBase<NodeSeq>::emptyAndConsistent() const noexcept {
+ for (const auto & n : _nodes) {
+ if (!n.empty()) {
return false;
}
}
@@ -26,9 +28,10 @@ bool BucketInfoBase<NodeSeq>::emptyAndConsistent() const {
}
template <typename NodeSeq>
-bool BucketInfoBase<NodeSeq>::validAndConsistent() const {
- for (uint32_t i = 0; i < _nodes.size(); i++) {
- if (!_nodes[i].valid()) {
+bool
+BucketInfoBase<NodeSeq>::validAndConsistent() const noexcept {
+ for (const auto & n : _nodes) {
+ if (!n.valid()) {
return false;
}
}
@@ -36,9 +39,10 @@ bool BucketInfoBase<NodeSeq>::validAndConsistent() const {
}
template <typename NodeSeq>
-bool BucketInfoBase<NodeSeq>::hasInvalidCopy() const {
- for (uint32_t i = 0; i < _nodes.size(); i++) {
- if (!_nodes[i].valid()) {
+bool
+BucketInfoBase<NodeSeq>::hasInvalidCopy() const noexcept {
+ for (const auto & n : _nodes){
+ if (!n.valid()) {
return true;
}
}
@@ -46,10 +50,11 @@ bool BucketInfoBase<NodeSeq>::hasInvalidCopy() const {
}
template <typename NodeSeq>
-uint16_t BucketInfoBase<NodeSeq>::getTrustedCount() const {
+uint16_t
+BucketInfoBase<NodeSeq>::getTrustedCount() const noexcept {
uint32_t trustedCount = 0;
- for (uint32_t i = 0; i < _nodes.size(); i++) {
- if (_nodes[i].trusted()) {
+ for (const auto & n : _nodes) {
+ if (n.trusted()) {
trustedCount++;
}
}
@@ -57,11 +62,11 @@ uint16_t BucketInfoBase<NodeSeq>::getTrustedCount() const {
}
template <typename NodeSeq>
-bool BucketInfoBase<NodeSeq>::consistentNodes(bool countInvalidAsConsistent) const {
+bool
+BucketInfoBase<NodeSeq>::consistentNodes() const noexcept {
int compareIndex = 0;
for (uint32_t i = 1; i < _nodes.size(); i++) {
- if (!_nodes[i].consistentWith(_nodes[compareIndex],
- countInvalidAsConsistent)) return false;
+ if (!_nodes[i].consistentWith(_nodes[compareIndex])) return false;
}
return true;
}
@@ -90,14 +95,16 @@ struct ReplicaMetadata {
};
};
-constexpr bool is_majority(size_t n, size_t m) {
+constexpr bool
+is_majority(size_t n, size_t m) noexcept {
return (n >= (m / 2) + 1);
}
}
template <typename NodeSeq>
-api::BucketInfo BucketInfoBase<NodeSeq>::majority_consistent_bucket_info() const noexcept {
+api::BucketInfo
+BucketInfoBase<NodeSeq>::majority_consistent_bucket_info() const noexcept {
if (_nodes.size() < 3) {
return {};
}
@@ -116,7 +123,8 @@ api::BucketInfo BucketInfoBase<NodeSeq>::majority_consistent_bucket_info() const
}
template <typename NodeSeq>
-void BucketInfoBase<NodeSeq>::print(std::ostream& out, bool verbose, const std::string& indent) const {
+void
+BucketInfoBase<NodeSeq>::print(std::ostream& out, bool verbose, const std::string& indent) const {
if (_nodes.size() == 0) {
out << "no nodes";
}
@@ -129,7 +137,8 @@ void BucketInfoBase<NodeSeq>::print(std::ostream& out, bool verbose, const std::
}
template <typename NodeSeq>
-const BucketCopy* BucketInfoBase<NodeSeq>::getNode(uint16_t node) const {
+const BucketCopy*
+BucketInfoBase<NodeSeq>::getNode(uint16_t node) const noexcept {
for (const auto& n : _nodes) {
if (n.getNode() == node) {
return &n;
@@ -139,54 +148,61 @@ const BucketCopy* BucketInfoBase<NodeSeq>::getNode(uint16_t node) const {
}
template <typename NodeSeq>
-std::vector<uint16_t> BucketInfoBase<NodeSeq>::getNodes() const {
+std::vector<uint16_t>
+BucketInfoBase<NodeSeq>::getNodes() const noexcept {
std::vector<uint16_t> result;
- for (uint32_t i = 0; i < _nodes.size(); i++) {
- result.emplace_back(_nodes[i].getNode());
+ result.reserve(_nodes.size());
+ for (const auto & n : _nodes) {
+ result.emplace_back(n.getNode());
}
return result;
}
template <typename NodeSeq>
-uint32_t BucketInfoBase<NodeSeq>::getHighestDocumentCount() const {
- uint32_t highest = 0;
- for (uint32_t i = 0; i < _nodes.size(); ++i) {
- highest = std::max(highest, _nodes[i].getDocumentCount());
+BucketInfoBase<NodeSeq>::Highest
+BucketInfoBase<NodeSeq>::getHighest() const noexcept {
+ Highest highest;
+ for (const auto & n : _nodes) {
+ highest.update(n);
}
return highest;
}
template <typename NodeSeq>
-uint32_t BucketInfoBase<NodeSeq>::getHighestTotalDocumentSize() const {
+uint32_t
+BucketInfoBase<NodeSeq>::getHighestDocumentCount() const noexcept {
uint32_t highest = 0;
- for (uint32_t i = 0; i < _nodes.size(); ++i) {
- highest = std::max(highest, _nodes[i].getTotalDocumentSize());
+ for (const auto & n : _nodes) {
+ highest = std::max(highest, n.getDocumentCount());
}
return highest;
}
template <typename NodeSeq>
-uint32_t BucketInfoBase<NodeSeq>::getHighestMetaCount() const {
+uint32_t
+BucketInfoBase<NodeSeq>::getHighestMetaCount() const noexcept {
uint32_t highest = 0;
- for (uint32_t i = 0; i < _nodes.size(); ++i) {
- highest = std::max(highest, _nodes[i].getMetaCount());
+ for (const auto & n : _nodes) {
+ highest = std::max(highest, n.getMetaCount());
}
return highest;
}
template <typename NodeSeq>
-uint32_t BucketInfoBase<NodeSeq>::getHighestUsedFileSize() const {
+uint32_t
+BucketInfoBase<NodeSeq>::getHighestUsedFileSize() const noexcept {
uint32_t highest = 0;
- for (uint32_t i = 0; i < _nodes.size(); ++i) {
- highest = std::max(highest, _nodes[i].getUsedFileSize());
+ for (const auto & n : _nodes) {
+ highest = std::max(highest, n.getUsedFileSize());
}
return highest;
}
template <typename NodeSeq>
-bool BucketInfoBase<NodeSeq>::hasRecentlyCreatedEmptyCopy() const {
- for (uint32_t i = 0; i < _nodes.size(); ++i) {
- if (_nodes[i].wasRecentlyCreated()) {
+bool
+BucketInfoBase<NodeSeq>::hasRecentlyCreatedEmptyCopy() const noexcept {
+ for (const auto & n : _nodes) {
+ if (n.wasRecentlyCreated()) {
return true;
}
}
@@ -194,7 +210,8 @@ bool BucketInfoBase<NodeSeq>::hasRecentlyCreatedEmptyCopy() const {
}
template <typename NodeSeq>
-bool BucketInfoBase<NodeSeq>::operator==(const BucketInfoBase<NodeSeq>& other) const {
+bool
+BucketInfoBase<NodeSeq>::operator==(const BucketInfoBase<NodeSeq>& other) const noexcept {
if (_nodes.size() != other._nodes.size()) {
return false;
}
@@ -210,6 +227,6 @@ bool BucketInfoBase<NodeSeq>::operator==(const BucketInfoBase<NodeSeq>& other) c
}
return true;
-};
+}
}
diff --git a/storage/src/vespa/storage/common/distributorcomponent.h b/storage/src/vespa/storage/common/distributorcomponent.h
index 06bb49a6090..6542bf2ddfe 100644
--- a/storage/src/vespa/storage/common/distributorcomponent.h
+++ b/storage/src/vespa/storage/common/distributorcomponent.h
@@ -34,13 +34,6 @@
namespace storage {
-namespace bucketdb {
- class DistrBucketDatabase;
-}
-namespace lib {
- class IdealNodeCalculator;
-}
-
using DistributorConfig = vespa::config::content::core::internal::InternalStorDistributormanagerType;
using VisitorConfig = vespa::config::content::core::internal::InternalStorVisitordispatcherType;
diff --git a/storage/src/vespa/storage/distributor/activecopy.cpp b/storage/src/vespa/storage/distributor/activecopy.cpp
index c46e9868cc8..4e3ef4f88ee 100644
--- a/storage/src/vespa/storage/distributor/activecopy.cpp
+++ b/storage/src/vespa/storage/distributor/activecopy.cpp
@@ -1,49 +1,34 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "activecopy.h"
-
-#include <vespa/storage/storageutil/utils.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/stllike/asciistream.h>
#include <algorithm>
#include <cassert>
namespace std {
- template<typename T>
- std::ostream& operator<<(std::ostream& out, const std::vector<T>& v) {
- out << "[";
- for (uint32_t i=0; i<v.size(); ++i) {
- out << "\n " << v[i];
- }
- if (!v.empty()) {
- out << "\n";
- }
- return out << "]";
+
+template<typename T>
+std::ostream& operator<<(std::ostream& out, const std::vector<T>& v) {
+ out << "[";
+ for (uint32_t i=0; i<v.size(); ++i) {
+ out << "\n " << v[i];
+ }
+ if (!v.empty()) {
+ out << "\n";
}
+ return out << "]";
+}
+
}
namespace storage::distributor {
-ActiveCopy::ActiveCopy(uint16_t node, const BucketDatabase::Entry& e, const std::vector<uint16_t>& idealState)
- : _nodeIndex(node),
- _ideal(0xffff)
-{
- const BucketCopy* copy = e->getNode(node);
- assert(copy != nullptr);
- _doc_count = copy->getDocumentCount();
- _ready = copy->ready();
- _active = copy->active();
- for (uint32_t i=0; i<idealState.size(); ++i) {
- if (idealState[i] == node) {
- _ideal = i;
- break;
- }
- }
-}
+using IndexList = lib::Distribution::IndexList;
vespalib::string
ActiveCopy::getReason() const {
- if (_ready && (_doc_count > 0) && (_ideal < 0xffff)) {
+ if (_ready && (_doc_count > 0) && valid_ideal()) {
vespalib::asciistream ost;
ost << "copy is ready, has " << _doc_count
<< " docs and ideal state priority " << _ideal;
@@ -54,7 +39,7 @@ ActiveCopy::getReason() const {
return ost.str();
} else if (_ready) {
return "copy is ready";
- } else if ((_doc_count > 0) && (_ideal < 0xffff)) {
+ } else if ((_doc_count > 0) && valid_ideal()) {
vespalib::asciistream ost;
ost << "copy has " << _doc_count << " docs and ideal state priority " << _ideal;
return ost.str();
@@ -64,7 +49,7 @@ ActiveCopy::getReason() const {
return ost.str();
} else if (_active) {
return "copy is already active";
- } else if (_ideal < 0xffff) {
+ } else if (valid_ideal()) {
vespalib::asciistream ost;
ost << "copy is ideal state priority " << _ideal;
return ost.str();
@@ -82,7 +67,7 @@ operator<<(std::ostream& out, const ActiveCopy & e) {
if (e._doc_count > 0) {
out << ", doc_count " << e._doc_count;
}
- if (e._ideal < 0xffff) {
+ if (e.valid_ideal()) {
out << ", ideal pri " << e._ideal;
}
out << ")";
@@ -91,66 +76,66 @@ operator<<(std::ostream& out, const ActiveCopy & e) {
namespace {
- struct ActiveStateOrder {
- bool operator()(const ActiveCopy & e1, const ActiveCopy & e2) {
- if (e1._ready != e2._ready) {
- return e1._ready;
- }
- if (e1._doc_count != e2._doc_count) {
- return e1._doc_count > e2._doc_count;
- }
- if (e1._ideal != e2._ideal) {
- return e1._ideal < e2._ideal;
- }
- if (e1._active != e2._active) {
- return e1._active;
- }
- return e1._nodeIndex < e2._nodeIndex;
- }
- };
-
- std::vector<uint16_t>
- buildValidNodeIndexList(BucketDatabase::Entry& e) {
- std::vector<uint16_t> result;
- result.reserve(e->getNodeCount());
- for (uint32_t i=0, n=e->getNodeCount(); i < n; ++i) {
- const BucketCopy& cp = e->getNodeRef(i);
- if (!cp.valid()) {
- continue;
- }
+IndexList
+buildValidNodeIndexList(const BucketDatabase::Entry& e) {
+ IndexList result;
+ result.reserve(e->getNodeCount());
+ for (uint32_t i=0, n=e->getNodeCount(); i < n; ++i) {
+ const BucketCopy& cp = e->getNodeRef(i);
+ if (cp.valid()) {
result.push_back(cp.getNode());
}
- return result;
}
+ return result;
+}
- std::vector<ActiveCopy>
- buildNodeList(BucketDatabase::Entry& e,
- const std::vector<uint16_t>& nodeIndexes,
- const std::vector<uint16_t>& idealState)
- {
- std::vector<ActiveCopy> result;
- result.reserve(nodeIndexes.size());
- for (uint16_t nodeIndex : nodeIndexes) {
- result.emplace_back(nodeIndex, e, idealState);
- }
- return result;
+using SmallActiveCopyList = vespalib::SmallVector<ActiveCopy, 2>;
+static_assert(sizeof(SmallActiveCopyList) == 40);
+
+SmallActiveCopyList
+buildNodeList(const BucketDatabase::Entry& e,vespalib::ConstArrayRef<uint16_t> nodeIndexes, const IdealServiceLayerNodesBundle::Node2Index & idealState)
+{
+ SmallActiveCopyList result;
+ result.reserve(nodeIndexes.size());
+ for (uint16_t nodeIndex : nodeIndexes) {
+ const BucketCopy *copy = e->getNode(nodeIndex);
+ assert(copy);
+ result.emplace_back(nodeIndex, *copy, idealState.lookup(nodeIndex));
}
+ return result;
}
+}
+
+struct ActiveStateOrder {
+ bool operator()(const ActiveCopy & e1, const ActiveCopy & e2) noexcept {
+ if (e1._ready != e2._ready) {
+ return e1._ready;
+ }
+ if (e1._doc_count != e2._doc_count) {
+ return e1._doc_count > e2._doc_count;
+ }
+ if (e1._ideal != e2._ideal) {
+ return e1._ideal < e2._ideal;
+ }
+ if (e1._active != e2._active) {
+ return e1._active;
+ }
+ return e1.nodeIndex() < e2.nodeIndex();
+ }
+};
+
ActiveList
-ActiveCopy::calculate(const std::vector<uint16_t>& idealState,
- const lib::Distribution& distribution,
- BucketDatabase::Entry& e,
- uint32_t max_activation_inhibited_out_of_sync_groups)
+ActiveCopy::calculate(const Node2Index & idealState, const lib::Distribution& distribution,
+ const BucketDatabase::Entry& e, uint32_t max_activation_inhibited_out_of_sync_groups)
{
- std::vector<uint16_t> validNodesWithCopy = buildValidNodeIndexList(e);
+ IndexList validNodesWithCopy = buildValidNodeIndexList(e);
if (validNodesWithCopy.empty()) {
return ActiveList();
}
- using IndexList = std::vector<uint16_t>;
std::vector<IndexList> groups;
if (distribution.activePerGroup()) {
- groups = distribution.splitNodesIntoLeafGroups(std::move(validNodesWithCopy));
+ groups = distribution.splitNodesIntoLeafGroups(validNodesWithCopy);
} else {
groups.push_back(std::move(validNodesWithCopy));
}
@@ -162,7 +147,7 @@ ActiveCopy::calculate(const std::vector<uint16_t>& idealState,
: api::BucketInfo()); // Invalid by default
uint32_t inhibited_groups = 0;
for (const auto& group_nodes : groups) {
- std::vector<ActiveCopy> entries = buildNodeList(e, group_nodes, idealState);
+ SmallActiveCopyList entries = buildNodeList(e, group_nodes, idealState);
auto best = std::min_element(entries.begin(), entries.end(), ActiveStateOrder());
if ((groups.size() > 1) &&
(inhibited_groups < max_activation_inhibited_out_of_sync_groups) &&
@@ -180,24 +165,22 @@ ActiveCopy::calculate(const std::vector<uint16_t>& idealState,
}
void
-ActiveList::print(std::ostream& out, bool verbose,
- const std::string& indent) const
+ActiveList::print(std::ostream& out, bool verbose, const std::string& indent) const
{
out << "[";
if (verbose) {
for (size_t i=0; i<_v.size(); ++i) {
- out << "\n" << indent << " "
- << _v[i]._nodeIndex << " " << _v[i].getReason();
+ out << "\n" << indent << " " << _v[i].nodeIndex() << " " << _v[i].getReason();
}
if (!_v.empty()) {
out << "\n" << indent;
}
} else {
if (!_v.empty()) {
- out << _v[0]._nodeIndex;
+ out << _v[0].nodeIndex();
}
for (size_t i=1; i<_v.size(); ++i) {
- out << " " << _v[i]._nodeIndex;
+ out << " " << _v[i].nodeIndex();
}
}
out << "]";
@@ -207,7 +190,7 @@ bool
ActiveList::contains(uint16_t node) const noexcept
{
for (const auto& candidate : _v) {
- if (node == candidate._nodeIndex) {
+ if (node == candidate.nodeIndex()) {
return true;
}
}
diff --git a/storage/src/vespa/storage/distributor/activecopy.h b/storage/src/vespa/storage/distributor/activecopy.h
index 258fe3cdf16..91dfb3f0bd0 100644
--- a/storage/src/vespa/storage/distributor/activecopy.h
+++ b/storage/src/vespa/storage/distributor/activecopy.h
@@ -2,25 +2,43 @@
#pragma once
+#include "ideal_service_layer_nodes_bundle.h"
#include <vespa/storage/bucketdb/bucketdatabase.h>
namespace storage::lib { class Distribution; }
namespace storage::distributor {
class ActiveList;
+struct ActiveStateOrder;
-struct ActiveCopy {
- constexpr ActiveCopy() noexcept : _nodeIndex(-1), _ideal(-1), _doc_count(0), _ready(false), _active(false) { }
- ActiveCopy(uint16_t node, const BucketDatabase::Entry& e, const std::vector<uint16_t>& idealState);
+class ActiveCopy {
+ using Index = IdealServiceLayerNodesBundle::Index;
+ using Node2Index = IdealServiceLayerNodesBundle::Node2Index;
+public:
+ constexpr ActiveCopy() noexcept
+ : _nodeIndex(Index::invalid()),
+ _ideal(Index::invalid()),
+ _doc_count(0),
+ _ready(false),
+ _active(false)
+ { }
+ ActiveCopy(uint16_t node, const BucketCopy & copy, uint16_t ideal) noexcept
+ : _nodeIndex(node),
+ _ideal(ideal),
+ _doc_count(copy.getDocumentCount()),
+ _ready(copy.ready()),
+ _active(copy.active())
+ { }
vespalib::string getReason() const;
friend std::ostream& operator<<(std::ostream& out, const ActiveCopy& e);
- static ActiveList calculate(const std::vector<uint16_t>& idealState,
- const lib::Distribution&,
- BucketDatabase::Entry&,
- uint32_t max_activation_inhibited_out_of_sync_groups);
-
+ static ActiveList calculate(const Node2Index & idealState, const lib::Distribution&,
+ const BucketDatabase::Entry&, uint32_t max_activation_inhibited_out_of_sync_groups);
+ uint16_t nodeIndex() const noexcept { return _nodeIndex; }
+private:
+ friend ActiveStateOrder;
+ bool valid_ideal() const noexcept { return _ideal < Index::invalid(); }
uint16_t _nodeIndex;
uint16_t _ideal;
uint32_t _doc_count;
@@ -29,18 +47,17 @@ struct ActiveCopy {
};
class ActiveList : public vespalib::Printable {
- std::vector<ActiveCopy> _v;
-
public:
- ActiveList() {}
- ActiveList(std::vector<ActiveCopy>&& v) : _v(std::move(v)) { }
+ ActiveList() noexcept {}
+ ActiveList(std::vector<ActiveCopy>&& v) noexcept : _v(std::move(v)) { }
- ActiveCopy& operator[](size_t i) noexcept { return _v[i]; }
const ActiveCopy& operator[](size_t i) const noexcept { return _v[i]; }
[[nodiscard]] bool contains(uint16_t) const noexcept;
[[nodiscard]] bool empty() const noexcept { return _v.empty(); }
size_t size() const noexcept { return _v.size(); }
void print(std::ostream&, bool verbose, const std::string& indent) const override;
+private:
+ std::vector<ActiveCopy> _v;
};
}
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp b/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
index 5969ccad4cb..7ba9c67b156 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space.cpp
@@ -108,9 +108,7 @@ DistributorBucketSpace::owns_bucket_in_state(
}
bool
-DistributorBucketSpace::owns_bucket_in_state(
- const lib::ClusterState& clusterState,
- document::BucketId bucket) const
+DistributorBucketSpace::owns_bucket_in_state(const lib::ClusterState& clusterState, document::BucketId bucket) const
{
return owns_bucket_in_state(*_distribution, clusterState, bucket);
}
@@ -123,9 +121,9 @@ setup_ideal_nodes_bundle(IdealServiceLayerNodesBundle& ideal_nodes_bundle,
const lib::ClusterState& cluster_state,
document::BucketId bucket)
{
- ideal_nodes_bundle.set_available_nodes(distribution.getIdealStorageNodes(cluster_state, bucket, up_states));
- ideal_nodes_bundle.set_available_nonretired_nodes(distribution.getIdealStorageNodes(cluster_state, bucket, nonretired_up_states));
- ideal_nodes_bundle.set_available_nonretired_or_maintenance_nodes(distribution.getIdealStorageNodes(cluster_state, bucket, nonretired_or_maintenance_up_states));
+ ideal_nodes_bundle.set_nodes(distribution.getIdealStorageNodes(cluster_state, bucket, up_states),
+ distribution.getIdealStorageNodes(cluster_state, bucket, nonretired_up_states),
+ distribution.getIdealStorageNodes(cluster_state, bucket, nonretired_or_maintenance_up_states));
}
/*
@@ -152,16 +150,16 @@ DistributorBucketSpace::get_ideal_service_layer_nodes_bundle(document::BucketId
setup_ideal_nodes_bundle(ideal_nodes_bundle, *_distribution, *_clusterState, bucket);
return ideal_nodes_bundle;
}
- document::BucketId lookup_bucket(is_split_group_bucket(bucket) ? bucket.getUsedBits() : _distribution_bits, bucket.getId());
+ document::BucketId lookup_bucket(_distribution_bits, bucket.getId());
auto itr = _ideal_nodes.find(lookup_bucket);
if (itr != _ideal_nodes.end()) {
- return itr->second;
+ return *itr->second;
}
- IdealServiceLayerNodesBundle ideal_nodes_bundle;
- setup_ideal_nodes_bundle(ideal_nodes_bundle, *_distribution, *_clusterState, lookup_bucket);
+ auto ideal_nodes_bundle = std::make_unique<IdealServiceLayerNodesBundle>();
+ setup_ideal_nodes_bundle(*ideal_nodes_bundle, *_distribution, *_clusterState, lookup_bucket);
auto insres = _ideal_nodes.insert(std::make_pair(lookup_bucket, std::move(ideal_nodes_bundle)));
assert(insres.second);
- return insres.first->second;
+ return *insres.first->second;
}
BucketOwnershipFlags
diff --git a/storage/src/vespa/storage/distributor/distributor_bucket_space.h b/storage/src/vespa/storage/distributor/distributor_bucket_space.h
index f38556a664c..a66f0e5e983 100644
--- a/storage/src/vespa/storage/distributor/distributor_bucket_space.h
+++ b/storage/src/vespa/storage/distributor/distributor_bucket_space.h
@@ -41,7 +41,7 @@ class DistributorBucketSpace {
std::shared_ptr<const lib::ClusterState> _pending_cluster_state;
std::vector<bool> _available_nodes;
mutable vespalib::hash_map<document::BucketId, BucketOwnershipFlags, document::BucketId::hash> _ownerships;
- mutable vespalib::hash_map<document::BucketId, IdealServiceLayerNodesBundle, document::BucketId::hash> _ideal_nodes;
+ mutable vespalib::hash_map<document::BucketId, std::unique_ptr<IdealServiceLayerNodesBundle>, document::BucketId::hash> _ideal_nodes;
void clear();
void enumerate_available_nodes();
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.cpp b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
index ede85c036b3..243b3c5ecb2 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.cpp
@@ -314,7 +314,7 @@ DistributorStripe::enterRecoveryMode()
{
LOG(debug, "Entering recovery mode");
_schedulingMode = MaintenanceScheduler::RECOVERY_SCHEDULING_MODE;
- _scanner->reset();
+ (void)_scanner->fetch_and_reset(); // Just drop accumulated stats on the floor.
// We enter recovery mode due to cluster state or distribution config changes.
// Until we have completed a new DB scan round, we don't know the state of our
// newly owned buckets and must not report stats for these out to the cluster
@@ -604,11 +604,9 @@ PerNodeBucketSpacesStats
toBucketSpacesStats(const NodeMaintenanceStatsTracker &maintenanceStats)
{
PerNodeBucketSpacesStats result;
- for (const auto &nodeEntry : maintenanceStats.perNodeStats()) {
- for (const auto &bucketSpaceEntry : nodeEntry.second) {
- auto bucketSpace = document::FixedBucketSpaces::to_string(bucketSpaceEntry.first);
- result[nodeEntry.first][bucketSpace] = toBucketSpaceStats(bucketSpaceEntry.second);
- }
+ for (const auto &entry : maintenanceStats.perNodeStats()) {
+ auto bucketSpace = document::FixedBucketSpaces::to_string(entry.first.bucketSpace());
+ result[entry.first.node()][bucketSpace] = toBucketSpaceStats(entry.second);
}
return result;
}
@@ -643,7 +641,7 @@ DistributorStripe::updateInternalMetricsForCompletedScan()
_bucketDBMetricUpdater.completeRound();
_bucketDbStats = _bucketDBMetricUpdater.getLastCompleteStats();
- _maintenanceStats = _scanner->getPendingMaintenanceStats();
+ _maintenanceStats = _scanner->fetch_and_reset();
auto new_space_stats = toBucketSpacesStats(_maintenanceStats.perNodeStats);
if (merge_no_longer_pending_edge(_bucketSpacesStats, new_space_stats)) {
_must_send_updated_host_info = true;
@@ -684,7 +682,6 @@ DistributorStripe::scanNextBucket()
updateInternalMetricsForCompletedScan();
leaveRecoveryMode();
send_updated_host_info_if_required();
- _scanner->reset();
} else {
const auto &distribution(_bucketSpaceRepo->get(scanResult.getBucketSpace()).getDistribution());
_bucketDBMetricUpdater.visit(scanResult.getEntry(), distribution.getRedundancy());
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp b/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
index 9a5fd595b1d..47b89b2dd19 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_component.cpp
@@ -5,6 +5,7 @@
#include "distributor_bucket_space.h"
#include "pendingmessagetracker.h"
#include "storage_node_up_states.h"
+#include <vespa/storage/storageutil/utils.h>
#include <vespa/storageframework/generic/clock/clock.h>
#include <vespa/document/select/parser.h>
#include <vespa/vdslib/state/cluster_state_bundle.h>
@@ -17,12 +18,11 @@ using document::BucketSpace;
namespace storage::distributor {
-DistributorStripeComponent::DistributorStripeComponent(
- DistributorStripeInterface& distributor,
- DistributorBucketSpaceRepo& bucketSpaceRepo,
- DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
- DistributorComponentRegister& compReg,
- const std::string& name)
+DistributorStripeComponent::DistributorStripeComponent(DistributorStripeInterface& distributor,
+ DistributorBucketSpaceRepo& bucketSpaceRepo,
+ DistributorBucketSpaceRepo& readOnlyBucketSpaceRepo,
+ DistributorComponentRegister& compReg,
+ const std::string& name)
: storage::DistributorComponent(compReg, name),
_distributor(distributor),
_bucketSpaceRepo(bucketSpaceRepo),
@@ -44,30 +44,6 @@ DistributorStripeComponent::sendUp(const api::StorageMessage::SP& msg)
_distributor.getMessageSender().sendUp(msg);
}
-void
-DistributorStripeComponent::enumerateUnavailableNodes(
- std::vector<uint16_t>& unavailableNodes,
- const lib::ClusterState& s,
- const document::Bucket& bucket,
- const std::vector<BucketCopy>& candidates) const
-{
- const auto* up_states = storage_node_up_states();
- for (uint32_t i = 0; i < candidates.size(); ++i) {
- const BucketCopy& copy(candidates[i]);
- const lib::NodeState& ns(
- s.getNodeState(lib::Node(lib::NodeType::STORAGE, copy.getNode())));
- if (!ns.getState().oneOf(up_states)) {
- LOG(debug,
- "Trying to add a bucket copy to %s whose node is marked as "
- "down in the cluster state: %s. Ignoring it since no zombies "
- "are allowed!",
- bucket.toString().c_str(),
- copy.toString().c_str());
- unavailableNodes.emplace_back(copy.getNode());
- }
- }
-}
-
namespace {
/**
@@ -78,18 +54,19 @@ class UpdateBucketDatabaseProcessor : public BucketDatabase::EntryUpdateProcesso
const std::vector<BucketCopy>& _changed_nodes;
std::vector<uint16_t> _ideal_nodes;
bool _reset_trusted;
+ using ConstNodesRef = IdealServiceLayerNodesBundle::ConstNodesRef;
public:
- UpdateBucketDatabaseProcessor(const framework::Clock& clock, const std::vector<BucketCopy>& changed_nodes, std::vector<uint16_t> ideal_nodes, bool reset_trusted);
+ UpdateBucketDatabaseProcessor(const framework::Clock& clock, const std::vector<BucketCopy>& changed_nodes, ConstNodesRef ideal_nodes, bool reset_trusted);
~UpdateBucketDatabaseProcessor() override;
BucketDatabase::Entry create_entry(const document::BucketId& bucket) const override;
bool process_entry(BucketDatabase::Entry &entry) const override;
};
-UpdateBucketDatabaseProcessor::UpdateBucketDatabaseProcessor(const framework::Clock& clock, const std::vector<BucketCopy>& changed_nodes, std::vector<uint16_t> ideal_nodes, bool reset_trusted)
+UpdateBucketDatabaseProcessor::UpdateBucketDatabaseProcessor(const framework::Clock& clock, const std::vector<BucketCopy>& changed_nodes, ConstNodesRef ideal_nodes, bool reset_trusted)
: BucketDatabase::EntryUpdateProcessor(),
_clock(clock),
_changed_nodes(changed_nodes),
- _ideal_nodes(std::move(ideal_nodes)),
+ _ideal_nodes(ideal_nodes.cbegin(), ideal_nodes.cend()),
_reset_trusted(reset_trusted)
{
}
@@ -97,8 +74,7 @@ UpdateBucketDatabaseProcessor::UpdateBucketDatabaseProcessor(const framework::Cl
UpdateBucketDatabaseProcessor::~UpdateBucketDatabaseProcessor() = default;
BucketDatabase::Entry
-UpdateBucketDatabaseProcessor::create_entry(const document::BucketId &bucket) const
-{
+UpdateBucketDatabaseProcessor::create_entry(const document::BucketId &bucket) const {
return BucketDatabase::Entry(bucket, BucketInfo());
}
@@ -125,21 +101,16 @@ UpdateBucketDatabaseProcessor::process_entry(BucketDatabase::Entry &entry) const
}
void
-DistributorStripeComponent::update_bucket_database(
- const document::Bucket& bucket,
- const std::vector<BucketCopy>& changed_nodes,
- uint32_t update_flags)
+DistributorStripeComponent::update_bucket_database(const document::Bucket& bucket,
+ const std::vector<BucketCopy>& changed_nodes, uint32_t update_flags)
{
auto &bucketSpace(_bucketSpaceRepo.get(bucket.getBucketSpace()));
assert(!(bucket.getBucketId() == document::BucketId()));
BucketOwnership ownership(bucketSpace.check_ownership_in_pending_and_current_state(bucket.getBucketId()));
if (!ownership.isOwned()) {
- LOG(debug,
- "Trying to add %s to database that we do not own according to "
- "cluster state '%s' - ignoring!",
- bucket.toString().c_str(),
- ownership.getNonOwnedState().toString().c_str());
+ LOG(debug, "Trying to add %s to database that we do not own according to cluster state '%s' - ignoring!",
+ bucket.toString().c_str(), ownership.getNonOwnedState().toString().c_str());
return;
}
@@ -168,7 +139,7 @@ DistributorStripeComponent::update_bucket_database(
UpdateBucketDatabaseProcessor processor(getClock(),
found_down_node ? up_nodes : changed_nodes,
- bucketSpace.get_ideal_service_layer_nodes_bundle(bucket.getBucketId()).get_available_nodes(),
+ bucketSpace.get_ideal_service_layer_nodes_bundle(bucket.getBucketId()).available_nodes(),
(update_flags & DatabaseUpdate::RESET_TRUSTED) != 0);
bucketSpace.getBucketDatabase().process_update(bucket.getBucketId(), processor, (update_flags & DatabaseUpdate::CREATE_IF_NONEXISTING) != 0);
@@ -184,8 +155,7 @@ DistributorStripeComponent::node_address(uint16_t node_index) const noexcept
// Implements DistributorStripeOperationContext
void
-DistributorStripeComponent::remove_nodes_from_bucket_database(const document::Bucket& bucket,
- const std::vector<uint16_t>& nodes)
+DistributorStripeComponent::remove_nodes_from_bucket_database(const document::Bucket& bucket, const std::vector<uint16_t>& nodes)
{
auto &bucketSpace(_bucketSpaceRepo.get(bucket.getBucketSpace()));
BucketDatabase::Entry dbentry = bucketSpace.getBucketDatabase().get(bucket.getBucketId());
@@ -193,21 +163,15 @@ DistributorStripeComponent::remove_nodes_from_bucket_database(const document::Bu
if (dbentry.valid()) {
for (uint32_t i = 0; i < nodes.size(); ++i) {
if (dbentry->removeNode(nodes[i])) {
- LOG(debug,
- "Removed node %d from bucket %s. %u copies remaining",
- nodes[i],
- bucket.toString().c_str(),
- dbentry->getNodeCount());
+ LOG(debug, "Removed node %d from bucket %s. %u copies remaining",
+ nodes[i], bucket.toString().c_str(), dbentry->getNodeCount());
}
}
if (dbentry->getNodeCount() != 0) {
bucketSpace.getBucketDatabase().update(dbentry);
} else {
- LOG(debug,
- "After update, bucket %s now has no copies. "
- "Removing from database.",
- bucket.toString().c_str());
+ LOG(debug, "After update, bucket %s now has no copies. Removing from database.", bucket.toString().c_str());
bucketSpace.getBucketDatabase().remove(bucket.getBucketId());
}
@@ -218,7 +182,6 @@ document::BucketId
DistributorStripeComponent::make_split_bit_constrained_bucket_id(const document::DocumentId& doc_id) const
{
document::BucketId id(getBucketIdFactory().getBucketId(doc_id));
-
id.setUsedBits(_distributor.getConfig().getMinimalBucketSplit());
return id.stripUnused();
}
@@ -239,28 +202,18 @@ DistributorStripeComponent::get_sibling(const document::BucketId& bid) const
zeroBucket = document::BucketId(1, 0);
oneBucket = document::BucketId(1, 1);
} else {
- document::BucketId joinedBucket = document::BucketId(
- bid.getUsedBits() - 1,
- bid.getId());
-
- zeroBucket = document::BucketId(
- bid.getUsedBits(),
- joinedBucket.getId());
-
+ document::BucketId joinedBucket = document::BucketId(bid.getUsedBits() - 1,bid.getId());
+ zeroBucket = document::BucketId(bid.getUsedBits(), joinedBucket.getId());
uint64_t hiBit = 1;
hiBit <<= (bid.getUsedBits() - 1);
- oneBucket = document::BucketId(
- bid.getUsedBits(),
- joinedBucket.getId() | hiBit);
+ oneBucket = document::BucketId(bid.getUsedBits(), joinedBucket.getId() | hiBit);
}
return (zeroBucket == bid) ? oneBucket : zeroBucket;
}
bool
-DistributorStripeComponent::has_pending_message(uint16_t node_index,
- const document::Bucket& bucket,
- uint32_t message_type) const
+DistributorStripeComponent::has_pending_message(uint16_t node_index, const document::Bucket& bucket, uint32_t message_type) const
{
const auto& sender = static_cast<const DistributorStripeMessageSender&>(getDistributor());
return sender.getPendingMessageTracker().hasPendingMessage(node_index, bucket, message_type);
@@ -275,8 +228,7 @@ DistributorStripeComponent::cluster_state_bundle() const
bool
DistributorStripeComponent::storage_node_is_up(document::BucketSpace bucket_space, uint32_t node_index) const
{
- const lib::NodeState& ns = cluster_state_bundle().getDerivedClusterState(bucket_space)->getNodeState(
- lib::Node(lib::NodeType::STORAGE, node_index));
+ const auto & ns = cluster_state_bundle().getDerivedClusterState(bucket_space)->getNodeState(lib::Node(lib::NodeType::STORAGE, node_index));
return ns.getState().oneOf(storage_node_up_states());
}
@@ -294,4 +246,14 @@ DistributorStripeComponent::parse_selection(const vespalib::string& selection) c
return parser.parse(selection);
}
+void
+DistributorStripeComponent::update_bucket_database(const document::Bucket& bucket, const BucketCopy& changed_node, uint32_t update_flags) {
+ update_bucket_database(bucket, toVector<BucketCopy>(changed_node),update_flags);
+}
+
+void
+DistributorStripeComponent::remove_node_from_bucket_database(const document::Bucket& bucket, uint16_t node_index) {
+ remove_nodes_from_bucket_database(bucket, toVector<uint16_t>(node_index));
+}
+
}
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe_component.h b/storage/src/vespa/storage/distributor/distributor_stripe_component.h
index 5bcf9eec76d..8fd439992f7 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe_component.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe_component.h
@@ -8,7 +8,6 @@
#include "operationowner.h"
#include "statechecker.h"
#include <vespa/storage/common/distributorcomponent.h>
-#include <vespa/storage/storageutil/utils.h>
#include <vespa/storageapi/messageapi/storagecommand.h>
#include <vespa/storageapi/buckets/bucketinfo.h>
@@ -68,37 +67,25 @@ public:
/**
* Simple API for the common case of modifying a single node.
*/
- void update_bucket_database(const document::Bucket& bucket,
- const BucketCopy& changed_node,
- uint32_t update_flags) override {
- update_bucket_database(bucket,
- toVector<BucketCopy>(changed_node),
- update_flags);
- }
-
+ void update_bucket_database(const document::Bucket& bucket, const BucketCopy& changed_node, uint32_t update_flags) override;
/**
* Adds the given copies to the bucket database.
*/
- void update_bucket_database(const document::Bucket& bucket,
- const std::vector<BucketCopy>& changed_nodes,
- uint32_t update_flags) override;
+ void update_bucket_database(const document::Bucket& bucket, const std::vector<BucketCopy>& changed_nodes, uint32_t update_flags) override;
/**
* Removes a copy from the given bucket from the bucket database.
* If the resulting bucket is empty afterwards, removes the entire
* bucket entry from the bucket database.
*/
- void remove_node_from_bucket_database(const document::Bucket& bucket, uint16_t node_index) override {
- remove_nodes_from_bucket_database(bucket, toVector<uint16_t>(node_index));
- }
+ void remove_node_from_bucket_database(const document::Bucket& bucket, uint16_t node_index) override;
/**
* Removes the given bucket copies from the bucket database.
* If the resulting bucket is empty afterwards, removes the entire
* bucket entry from the bucket database.
*/
- void remove_nodes_from_bucket_database(const document::Bucket& bucket,
- const std::vector<uint16_t>& nodes) override;
+ void remove_nodes_from_bucket_database(const document::Bucket& bucket, const std::vector<uint16_t>& nodes) override;
const DistributorBucketSpaceRepo& bucket_space_repo() const noexcept override {
return _bucketSpaceRepo;
@@ -129,9 +116,7 @@ public:
const DistributorConfiguration& distributor_config() const noexcept override {
return getDistributor().getConfig();
}
- void send_inline_split_if_bucket_too_large(document::BucketSpace bucket_space,
- const BucketDatabase::Entry& entry,
- uint8_t pri) override {
+ void send_inline_split_if_bucket_too_large(document::BucketSpace bucket_space, const BucketDatabase::Entry& entry, uint8_t pri) override {
getDistributor().checkBucketForSplit(bucket_space, entry, pri);
}
OperationRoutingSnapshot read_snapshot_for_bucket(const document::Bucket& bucket) const override {
@@ -143,9 +128,7 @@ public:
const PendingMessageTracker& pending_message_tracker() const noexcept override {
return getDistributor().getPendingMessageTracker();
}
- bool has_pending_message(uint16_t node_index,
- const document::Bucket& bucket,
- uint32_t message_type) const override;
+ bool has_pending_message(uint16_t node_index, const document::Bucket& bucket, uint32_t message_type) const override;
const lib::ClusterState* pending_cluster_state_or_null(const document::BucketSpace& bucket_space) const override {
return getDistributor().pendingClusterStateOrNull(bucket_space);
}
@@ -171,15 +154,7 @@ public:
std::unique_ptr<document::select::Node> parse_selection(const vespalib::string& selection) const override;
private:
- void enumerateUnavailableNodes(
- std::vector<uint16_t>& unavailableNodes,
- const lib::ClusterState& s,
- const document::Bucket& bucket,
- const std::vector<BucketCopy>& candidates) const;
DistributorStripeInterface& _distributor;
-
-protected:
-
DistributorBucketSpaceRepo& _bucketSpaceRepo;
DistributorBucketSpaceRepo& _readOnlyBucketSpaceRepo;
};
diff --git a/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.cpp b/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.cpp
index 0d37219356e..1ce5e5c589f 100644
--- a/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.cpp
+++ b/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.cpp
@@ -1,19 +1,60 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "ideal_service_layer_nodes_bundle.h"
-#include <vespa/vdslib/distribution/idealnodecalculator.h>
+#include <vespa/vespalib/stllike/hash_map.hpp>
namespace storage::distributor {
-IdealServiceLayerNodesBundle::IdealServiceLayerNodesBundle() noexcept
- : _available_nodes(),
- _available_nonretired_nodes(),
- _available_nonretired_or_maintenance_nodes()
-{
+namespace {
+constexpr size_t BUILD_HASH_LIMIT = 32;
}
-IdealServiceLayerNodesBundle::IdealServiceLayerNodesBundle(IdealServiceLayerNodesBundle &&) noexcept = default;
+struct IdealServiceLayerNodesBundle::LookupMap : public vespalib::hash_map<uint16_t, Index> {
+ using Parent = vespalib::hash_map<uint16_t, Index>;
+ using Parent::Parent;
+};
+IdealServiceLayerNodesBundle::IdealServiceLayerNodesBundle() noexcept = default;
+IdealServiceLayerNodesBundle::IdealServiceLayerNodesBundle(IdealServiceLayerNodesBundle &&) noexcept = default;
IdealServiceLayerNodesBundle::~IdealServiceLayerNodesBundle() = default;
+void
+IdealServiceLayerNodesBundle::set_nodes(ConstNodesRef nodes,
+ ConstNodesRef nonretired_nodes,
+ ConstNodesRef nonretired_or_maintenance_nodes)
+{
+ _nodes.clear();
+ _nodes.reserve(nodes.size() + nonretired_nodes.size() + nonretired_or_maintenance_nodes.size());
+ std::for_each(nodes.cbegin(), nodes.cend(), [this](uint16_t n) { _nodes.emplace_back(n); });
+ _available_sz = nodes.size();
+ std::for_each(nonretired_nodes.cbegin(), nonretired_nodes.cend(), [this](uint16_t n) { _nodes.emplace_back(n); });
+ _nonretired_sz = nonretired_nodes.size();
+ std::for_each(nonretired_or_maintenance_nodes.cbegin(), nonretired_or_maintenance_nodes.cend(), [this](uint16_t n) { _nodes.emplace_back(n); });
+
+ if (nonretired_or_maintenance_nodes.size() > BUILD_HASH_LIMIT) {
+ _nonretired_or_maintenance_node_2_index = std::make_unique<LookupMap>(nonretired_or_maintenance_nodes.size());
+ for (uint16_t i(0); i < nonretired_or_maintenance_nodes.size(); i++) {
+ _nonretired_or_maintenance_node_2_index->insert(std::make_pair(nonretired_or_maintenance_nodes[i], Index(i)));
+ }
+ }
+}
+
+IdealServiceLayerNodesBundle::Index
+IdealServiceLayerNodesBundle::ConstNodesRef2Index::lookup(uint16_t node) const noexcept {
+ for (uint16_t i(0); i < _idealState.size(); i++) {
+ if (node == _idealState[i]) return Index(i);
+ }
+ return Index::invalid();
+}
+
+IdealServiceLayerNodesBundle::Index
+IdealServiceLayerNodesBundle::nonretired_or_maintenance_index(uint16_t node) const noexcept {
+ if (_nonretired_or_maintenance_node_2_index) {
+ const auto found = _nonretired_or_maintenance_node_2_index->find(node);
+ return (found != _nonretired_or_maintenance_node_2_index->end()) ? found->second : Index::invalid();
+ } else {
+ return ConstNodesRef2Index(available_nonretired_or_maintenance_nodes()).lookup(node);
+ }
+}
+
}
diff --git a/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.h b/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.h
index 929ec7aadc1..1fce5bf0813 100644
--- a/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.h
+++ b/storage/src/vespa/storage/distributor/ideal_service_layer_nodes_bundle.h
@@ -1,8 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vector>
-#include <cstdint>
+#include <vespa/vespalib/util/small_vector.h>
namespace storage::distributor {
@@ -10,28 +9,63 @@ namespace storage::distributor {
* Bundle of ideal service layer nodes for a bucket.
*/
class IdealServiceLayerNodesBundle {
- std::vector<uint16_t> _available_nodes;
- std::vector<uint16_t> _available_nonretired_nodes;
- std::vector<uint16_t> _available_nonretired_or_maintenance_nodes;
public:
+ using ConstNodesRef = vespalib::ConstArrayRef<uint16_t>;
+ class Index {
+ public:
+ constexpr explicit Index(uint16_t index) noexcept : _index(index) {}
+ constexpr bool valid() const noexcept {
+ return _index < MAX_INDEX;
+ }
+ constexpr operator uint16_t () const noexcept { return _index; }
+ static constexpr Index invalid() noexcept { return Index(MAX_INDEX); }
+ private:
+ static constexpr uint16_t MAX_INDEX = 0xffff;
+ uint16_t _index;
+ };
+ struct Node2Index {
+ virtual ~Node2Index() = default;
+ virtual Index lookup(uint16_t node) const noexcept = 0;
+ };
+ class NonRetiredOrMaintenance2Index final : public Node2Index {
+ public:
+ NonRetiredOrMaintenance2Index(const IdealServiceLayerNodesBundle & idealState) noexcept : _idealState(idealState) {}
+ Index lookup(uint16_t node) const noexcept override {
+ return _idealState.nonretired_or_maintenance_index(node);
+ }
+ private:
+ const IdealServiceLayerNodesBundle & _idealState;
+ };
+ class ConstNodesRef2Index final : public Node2Index {
+ public:
+ ConstNodesRef2Index(ConstNodesRef idealState) noexcept : _idealState(idealState) {}
+ Index lookup(uint16_t node) const noexcept override;
+ private:
+ ConstNodesRef _idealState;
+ };
IdealServiceLayerNodesBundle() noexcept;
IdealServiceLayerNodesBundle(IdealServiceLayerNodesBundle &&) noexcept;
~IdealServiceLayerNodesBundle();
- void set_available_nodes(std::vector<uint16_t> available_nodes) {
- _available_nodes = std::move(available_nodes);
+ void set_nodes(ConstNodesRef nodes, ConstNodesRef nonretired_nodes, ConstNodesRef nonretired_or_maintenance_nodes);
+ ConstNodesRef available_nodes() const noexcept { return {_nodes.data(), _available_sz}; }
+ ConstNodesRef available_nonretired_nodes() const noexcept { return {_nodes.data() + _available_sz, _nonretired_sz}; }
+ ConstNodesRef available_nonretired_or_maintenance_nodes() const noexcept {
+ uint16_t offset = _available_sz + _nonretired_sz;
+ return {_nodes.data() + offset, _nodes.size() - offset};
}
- void set_available_nonretired_nodes(std::vector<uint16_t> available_nonretired_nodes) {
- _available_nonretired_nodes = std::move(available_nonretired_nodes);
- }
- void set_available_nonretired_or_maintenance_nodes(std::vector<uint16_t> available_nonretired_or_maintenance_nodes) {
- _available_nonretired_or_maintenance_nodes = std::move(available_nonretired_or_maintenance_nodes);
- }
- std::vector<uint16_t> get_available_nodes() const { return _available_nodes; }
- std::vector<uint16_t> get_available_nonretired_nodes() const { return _available_nonretired_nodes; }
- std::vector<uint16_t> get_available_nonretired_or_maintenance_nodes() const {
- return _available_nonretired_or_maintenance_nodes;
+ bool is_nonretired_or_maintenance(uint16_t node) const noexcept {
+ return nonretired_or_maintenance_index(node) != Index::invalid();
}
+ NonRetiredOrMaintenance2Index nonretired_or_maintenance_to_index() const noexcept { return {*this}; }
+ ConstNodesRef2Index available_to_index() const noexcept { return {available_nodes()}; }
+private:
+ struct LookupMap;
+ Index nonretired_or_maintenance_index(uint16_t node) const noexcept;
+ vespalib::SmallVector<uint16_t,16> _nodes;
+ std::unique_ptr<LookupMap> _nonretired_or_maintenance_node_2_index;
+ uint16_t _available_sz;
+ uint16_t _nonretired_sz;
};
}
diff --git a/storage/src/vespa/storage/distributor/idealstatemanager.cpp b/storage/src/vespa/storage/distributor/idealstatemanager.cpp
index cad141e76ed..bc928ca3d41 100644
--- a/storage/src/vespa/storage/distributor/idealstatemanager.cpp
+++ b/storage/src/vespa/storage/distributor/idealstatemanager.cpp
@@ -10,10 +10,9 @@
#include <vespa/storageapi/message/persistence.h>
#include <vespa/document/bucket/fixed_bucket_spaces.h>
#include <vespa/vespalib/util/assert.h>
-#include <vespa/vespalib/stllike/hash_map.hpp>
#include <vespa/log/log.h>
-LOG_SETUP(".distributor.operation.queue");
+LOG_SETUP(".distributor.idealstatemanager");
using document::BucketSpace;
using storage::lib::Node;
@@ -21,10 +20,9 @@ using storage::lib::NodeType;
namespace storage::distributor {
-IdealStateManager::IdealStateManager(
- const DistributorNodeContext& node_ctx,
- DistributorStripeOperationContext& op_ctx,
- IdealStateMetricSet& metrics)
+IdealStateManager::IdealStateManager(const DistributorNodeContext& node_ctx,
+ DistributorStripeOperationContext& op_ctx,
+ IdealStateMetricSet& metrics)
: _metrics(metrics),
_stateCheckers(),
_splitBucketStateChecker(nullptr),
@@ -56,9 +54,7 @@ IdealStateManager::fillParentAndChildBuckets(StateChecker::Context& c)
{
c.db.getAll(c.getBucketId(), c.entries);
if (c.entries.empty()) {
- LOG(spam,
- "Did not find bucket %s in bucket database",
- c.bucket.toString().c_str());
+ LOG(spam, "Did not find bucket %s in bucket database", c.bucket.toString().c_str());
}
}
void
@@ -85,8 +81,7 @@ namespace {
* overwriting if already explicitly set.
*/
bool
-canOverwriteResult(const StateChecker::Result& existing,
- const StateChecker::Result& candidate)
+canOverwriteResult(const StateChecker::Result& existing, const StateChecker::Result& candidate)
{
return (!existing.getPriority().requiresMaintenance()
&& candidate.getPriority().requiresMaintenance());
@@ -101,9 +96,7 @@ IdealStateManager::runStateCheckers(StateChecker::Context& c) const
// We go through _all_ active state checkers so that statistics can be
// collected across all checkers, not just the ones that are highest pri.
for (const auto & checker : _stateCheckers) {
- if (!operation_context().distributor_config().stateCheckerIsActive(
- checker->getName()))
- {
+ if (!operation_context().distributor_config().stateCheckerIsActive(checker->getName())) {
LOG(spam, "Skipping state checker %s", checker->getName());
continue;
}
@@ -116,7 +109,8 @@ IdealStateManager::runStateCheckers(StateChecker::Context& c) const
return highestPri;
}
-void IdealStateManager::verify_only_live_nodes_in_context(const StateChecker::Context& c) const {
+void
+IdealStateManager::verify_only_live_nodes_in_context(const StateChecker::Context& c) const {
if (_has_logged_phantom_replica_warning) {
return;
}
@@ -125,11 +119,8 @@ void IdealStateManager::verify_only_live_nodes_in_context(const StateChecker::Co
const auto& state = c.systemState.getNodeState(lib::Node(lib::NodeType::STORAGE, index));
// Only nodes in Up, Initializing or Retired should ever be present in the DB.
if (!state.getState().oneOf("uir")) {
- LOG(error, "%s in bucket DB is on node %u, which is in unavailable state %s. "
- "Current cluster state is '%s'",
- c.entry.getBucketId().toString().c_str(),
- index,
- state.getState().toString().c_str(),
+ LOG(error, "%s in bucket DB is on node %u, which is in unavailable state %s. Current cluster state is '%s'",
+ c.entry.getBucketId().toString().c_str(), index, state.getState().toString().c_str(),
c.systemState.toString().c_str());
ASSERT_ONCE_OR_LOG(false, "Bucket DB contains replicas on unavailable node", 10000);
_has_logged_phantom_replica_warning = true;
@@ -138,9 +129,7 @@ void IdealStateManager::verify_only_live_nodes_in_context(const StateChecker::Co
}
StateChecker::Result
-IdealStateManager::generateHighestPriority(
- const document::Bucket& bucket,
- NodeMaintenanceStatsTracker& statsTracker) const
+IdealStateManager::generateHighestPriority(const document::Bucket& bucket, NodeMaintenanceStatsTracker& statsTracker) const
{
auto& distributorBucketSpace = _op_ctx.bucket_space_repo().get(bucket.getBucketSpace());
StateChecker::Context c(node_context(), operation_context(), distributorBucketSpace, statsTracker, bucket);
@@ -159,9 +148,7 @@ IdealStateManager::generateHighestPriority(
}
MaintenancePriorityAndType
-IdealStateManager::prioritize(
- const document::Bucket& bucket,
- NodeMaintenanceStatsTracker& statsTracker) const
+IdealStateManager::prioritize(const document::Bucket& bucket, NodeMaintenanceStatsTracker& statsTracker) const
{
StateChecker::Result generated(generateHighestPriority(bucket, statsTracker));
MaintenancePriority priority(generated.getPriority());
@@ -172,8 +159,7 @@ IdealStateManager::prioritize(
}
IdealStateOperation::SP
-IdealStateManager::generateInterceptingSplit(BucketSpace bucketSpace,
- const BucketDatabase::Entry& e,
+IdealStateManager::generateInterceptingSplit(BucketSpace bucketSpace, const BucketDatabase::Entry& e,
api::StorageMessage::Priority pri)
{
NodeMaintenanceStatsTracker statsTracker;
@@ -199,18 +185,15 @@ MaintenanceOperation::SP
IdealStateManager::generate(const document::Bucket& bucket) const
{
NodeMaintenanceStatsTracker statsTracker;
- IdealStateOperation::SP op(
- generateHighestPriority(bucket, statsTracker).createOperation());
+ IdealStateOperation::SP op(generateHighestPriority(bucket, statsTracker).createOperation());
if (op.get()) {
- op->setIdealStateManager(
- const_cast<IdealStateManager*>(this));
+ op->setIdealStateManager(const_cast<IdealStateManager*>(this));
}
return op;
}
std::vector<MaintenanceOperation::SP>
-IdealStateManager::generateAll(const document::Bucket &bucket,
- NodeMaintenanceStatsTracker& statsTracker) const
+IdealStateManager::generateAll(const document::Bucket &bucket, NodeMaintenanceStatsTracker& statsTracker) const
{
auto& distributorBucketSpace = _op_ctx.bucket_space_repo().get(bucket.getBucketSpace());
StateChecker::Context c(node_context(), operation_context(), distributorBucketSpace, statsTracker, bucket);
@@ -234,15 +217,11 @@ IdealStateManager::generateAll(const document::Bucket &bucket,
}
void
-IdealStateManager::getBucketStatus(
- BucketSpace bucketSpace,
- const BucketDatabase::ConstEntryRef& entry,
- NodeMaintenanceStatsTracker& statsTracker,
- std::ostream& out) const
+IdealStateManager::getBucketStatus(BucketSpace bucketSpace, const BucketDatabase::ConstEntryRef& entry,
+ NodeMaintenanceStatsTracker& statsTracker, std::ostream& out) const
{
document::Bucket bucket(bucketSpace, entry.getBucketId());
- std::vector<MaintenanceOperation::SP> operations(
- generateAll(bucket, statsTracker));
+ std::vector<MaintenanceOperation::SP> operations(generateAll(bucket, statsTracker));
if (operations.empty()) {
out << entry.getBucketId() << " : ";
} else {
@@ -261,13 +240,15 @@ IdealStateManager::getBucketStatus(
out << "[" << entry->toString() << "]<br>\n";
}
-void IdealStateManager::dump_bucket_space_db_status(document::BucketSpace bucket_space, std::ostream& out) const {
+void
+IdealStateManager::dump_bucket_space_db_status(document::BucketSpace bucket_space, std::ostream& out) const {
StatusBucketVisitor proc(*this, bucket_space, out);
auto& distributorBucketSpace = _op_ctx.bucket_space_repo().get(bucket_space);
distributorBucketSpace.getBucketDatabase().for_each_upper_bound(proc);
}
-void IdealStateManager::getBucketStatus(std::ostream& out) const {
+void
+IdealStateManager::getBucketStatus(std::ostream& out) const {
LOG(debug, "Dumping bucket database valid at cluster state version %u",
operation_context().cluster_state_bundle().getVersion());
diff --git a/storage/src/vespa/storage/distributor/idealstatemanager.h b/storage/src/vespa/storage/distributor/idealstatemanager.h
index 0c9e3ffa1c6..39a662e4a81 100644
--- a/storage/src/vespa/storage/distributor/idealstatemanager.h
+++ b/storage/src/vespa/storage/distributor/idealstatemanager.h
@@ -49,18 +49,14 @@ public:
MaintenanceOperation::SP generate(const document::Bucket& bucket) const override;
// MaintenanceOperationGenerator
- std::vector<MaintenanceOperation::SP> generateAll(
- const document::Bucket& bucket,
- NodeMaintenanceStatsTracker& statsTracker) const override;
+ std::vector<MaintenanceOperation::SP> generateAll(const document::Bucket& bucket, NodeMaintenanceStatsTracker& statsTracker) const override;
/**
* If the given bucket is too large, generate a split operation for it,
* with higher priority than the given one.
*/
- IdealStateOperation::SP generateInterceptingSplit(
- document::BucketSpace bucketSpace,
- const BucketDatabase::Entry& e,
- api::StorageMessage::Priority pri);
+ IdealStateOperation::SP generateInterceptingSplit(document::BucketSpace bucketSpace, const BucketDatabase::Entry& e,
+ api::StorageMessage::Priority pri);
IdealStateMetricSet& getMetrics() noexcept { return _metrics; }
@@ -78,9 +74,7 @@ private:
void verify_only_live_nodes_in_context(const StateChecker::Context& c) const;
static void fillParentAndChildBuckets(StateChecker::Context& c);
static void fillSiblingBucket(StateChecker::Context& c);
- StateChecker::Result generateHighestPriority(
- const document::Bucket& bucket,
- NodeMaintenanceStatsTracker& statsTracker) const;
+ StateChecker::Result generateHighestPriority(const document::Bucket& bucket, NodeMaintenanceStatsTracker& statsTracker) const;
StateChecker::Result runStateCheckers(StateChecker::Context& c) const;
static BucketDatabase::Entry* getEntryForPrimaryBucket(StateChecker::Context& c);
diff --git a/storage/src/vespa/storage/distributor/idealstatemetricsset.cpp b/storage/src/vespa/storage/distributor/idealstatemetricsset.cpp
index d50b2004bf2..ea345176dd0 100644
--- a/storage/src/vespa/storage/distributor/idealstatemetricsset.cpp
+++ b/storage/src/vespa/storage/distributor/idealstatemetricsset.cpp
@@ -134,7 +134,7 @@ IdealStateMetricSet::IdealStateMetricSet()
IdealStateMetricSet::~IdealStateMetricSet() = default;
-void IdealStateMetricSet::setPendingOperations(vespalib::ConstArrayRef<uint64_t> newMetrics) {
+void IdealStateMetricSet::setPendingOperations(std::span<uint64_t, IdealStateOperation::OPERATION_COUNT> newMetrics) {
for (uint32_t i = 0; i < IdealStateOperation::OPERATION_COUNT; i++) {
operations[i]->pending.set(newMetrics[i]);
}
diff --git a/storage/src/vespa/storage/distributor/idealstatemetricsset.h b/storage/src/vespa/storage/distributor/idealstatemetricsset.h
index 0bbc13d061a..e51e58ba3a4 100644
--- a/storage/src/vespa/storage/distributor/idealstatemetricsset.h
+++ b/storage/src/vespa/storage/distributor/idealstatemetricsset.h
@@ -5,7 +5,7 @@
#include <vespa/metrics/valuemetric.h>
#include <vespa/metrics/countmetric.h>
#include <vespa/storage/distributor/operations/idealstate/idealstateoperation.h>
-#include <vespa/vespalib/util/arrayref.h>
+#include <span>
namespace storage::distributor {
@@ -62,7 +62,7 @@ public:
IdealStateMetricSet();
~IdealStateMetricSet() override;
- void setPendingOperations(vespalib::ConstArrayRef<uint64_t> newMetrics);
+ void setPendingOperations(std::span<uint64_t, IdealStateOperation::OPERATION_COUNT> newMetrics);
};
} // storage::distributor
diff --git a/storage/src/vespa/storage/distributor/maintenance/maintenancescanner.h b/storage/src/vespa/storage/distributor/maintenance/maintenancescanner.h
index e4ccb6d88ad..b894ec9a1cd 100644
--- a/storage/src/vespa/storage/distributor/maintenance/maintenancescanner.h
+++ b/storage/src/vespa/storage/distributor/maintenance/maintenancescanner.h
@@ -23,12 +23,20 @@ public:
static ScanResult createDone() { return ScanResult(true); }
static ScanResult createNotDone(document::BucketSpace bucketSpace, BucketDatabase::Entry entry) {
- return ScanResult(bucketSpace, entry);
+ return ScanResult(bucketSpace, std::move(entry));
}
private:
- explicit ScanResult(bool done) : _done(done), _bucketSpace(document::BucketSpace::invalid()), _entry() {}
- ScanResult(document::BucketSpace bucketSpace, const BucketDatabase::Entry& e) : _done(false), _bucketSpace(bucketSpace), _entry(e) {}
+ explicit ScanResult(bool done) noexcept
+ : _done(done),
+ _bucketSpace(document::BucketSpace::invalid()),
+ _entry()
+ {}
+ ScanResult(document::BucketSpace bucketSpace, BucketDatabase::Entry e) noexcept
+ : _done(false),
+ _bucketSpace(bucketSpace),
+ _entry(std::move(e))
+ {}
};
virtual ScanResult scanNext() = 0;
diff --git a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp
index 7ac99f5712f..b10f5abd0f1 100644
--- a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp
+++ b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.cpp
@@ -1,32 +1,42 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "node_maintenance_stats_tracker.h"
+#include <vespa/vespalib/stllike/hash_map.hpp>
+#include <vespa/vespalib/stllike/hash_map_equal.hpp>
#include <ostream>
namespace storage::distributor {
const NodeMaintenanceStats NodeMaintenanceStatsTracker::_emptyNodeMaintenanceStats;
-namespace {
+NodeMaintenanceStats &
+NodeMaintenanceStatsTracker::stats(uint16_t node, document::BucketSpace bucketSpace) {
+ return _node_stats[BucketSpaceAndNode(node, bucketSpace)];
+}
-void
-merge_bucket_spaces_stats(NodeMaintenanceStatsTracker::BucketSpacesStats& dest,
- const NodeMaintenanceStatsTracker::BucketSpacesStats& src)
-{
- for (const auto& entry : src) {
- auto bucket_space = entry.first;
- dest[bucket_space].merge(entry.second);
- }
+const NodeMaintenanceStats &
+NodeMaintenanceStatsTracker::stats(uint16_t node, document::BucketSpace bucketSpace) const noexcept {
+ auto nodeItr = _node_stats.find(BucketSpaceAndNode(node, bucketSpace));
+ return (nodeItr != _node_stats.end()) ? nodeItr->second : _emptyNodeMaintenanceStats;
}
+const NodeMaintenanceStats&
+NodeMaintenanceStatsTracker::forNode(uint16_t node, document::BucketSpace bucketSpace) const noexcept {
+ return stats(node, bucketSpace);
+}
+
+bool
+NodeMaintenanceStatsTracker::operator==(const NodeMaintenanceStatsTracker& rhs) const noexcept {
+ return ((_node_stats == rhs._node_stats) &&
+ (_max_observed_time_since_last_gc == rhs._max_observed_time_since_last_gc));
}
void
NodeMaintenanceStatsTracker::merge(const NodeMaintenanceStatsTracker& rhs)
{
for (const auto& entry : rhs._node_stats) {
- auto node_index = entry.first;
- merge_bucket_spaces_stats(_node_stats[node_index], entry.second);
+ auto key = entry.first;
+ _node_stats[key].merge(entry.second);
}
_max_observed_time_since_last_gc = std::max(_max_observed_time_since_last_gc,
rhs._max_observed_time_since_last_gc);
@@ -45,13 +55,24 @@ operator<<(std::ostream& os, const NodeMaintenanceStats& stats)
return os;
}
-NodeMaintenanceStatsTracker::NodeMaintenanceStatsTracker()
+NodeMaintenanceStatsTracker::NodeMaintenanceStatsTracker() noexcept
: _node_stats(),
_total_stats(),
_max_observed_time_since_last_gc(0)
{}
+NodeMaintenanceStatsTracker::NodeMaintenanceStatsTracker(NodeMaintenanceStatsTracker &&) noexcept = default;
+NodeMaintenanceStatsTracker & NodeMaintenanceStatsTracker::operator =(NodeMaintenanceStatsTracker &&) noexcept = default;
+NodeMaintenanceStatsTracker::NodeMaintenanceStatsTracker(const NodeMaintenanceStatsTracker &) = default;
NodeMaintenanceStatsTracker::~NodeMaintenanceStatsTracker() = default;
+void
+NodeMaintenanceStatsTracker::reset(size_t nodes) {
+ _node_stats.clear();
+ _node_stats.resize(nodes);
+ _total_stats = NodeMaintenanceStats();
+ _max_observed_time_since_last_gc = vespalib::duration::zero();
+}
+
}
diff --git a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
index 3818dd4bacb..a5cb12de9a4 100644
--- a/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
+++ b/storage/src/vespa/storage/distributor/maintenance/node_maintenance_stats_tracker.h
@@ -1,9 +1,9 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <unordered_map>
#include <vespa/document/bucket/bucketspace.h>
#include <vespa/vespalib/util/time.h>
+#include <vespa/vespalib/stllike/hash_map.h>
namespace storage::distributor {
@@ -51,8 +51,23 @@ std::ostream& operator<<(std::ostream&, const NodeMaintenanceStats&);
class NodeMaintenanceStatsTracker
{
public:
- using BucketSpacesStats = std::unordered_map<document::BucketSpace, NodeMaintenanceStats, document::BucketSpace::hash>;
- using PerNodeStats = std::unordered_map<uint16_t, BucketSpacesStats>;
+ class BucketSpaceAndNode {
+ public:
+ BucketSpaceAndNode(uint16_t node_in, document::BucketSpace bucketSpace_in) noexcept
+ : _bucketSpace(bucketSpace_in),
+ _node(node_in)
+ {}
+ uint32_t hash() const noexcept { return (uint32_t(_node) << 2) | (_bucketSpace.getId() & 0x3); }
+ bool operator == (const BucketSpaceAndNode & b) const noexcept {
+ return (_bucketSpace == b._bucketSpace) && (_node == b._node);
+ }
+ document::BucketSpace bucketSpace() const noexcept { return _bucketSpace; }
+ uint16_t node() const noexcept { return _node; }
+ private:
+ document::BucketSpace _bucketSpace;
+ uint16_t _node;
+ };
+ using PerNodeStats = vespalib::hash_map<BucketSpaceAndNode, NodeMaintenanceStats>;
private:
PerNodeStats _node_stats;
@@ -61,32 +76,39 @@ private:
static const NodeMaintenanceStats _emptyNodeMaintenanceStats;
+ NodeMaintenanceStats & stats(uint16_t node, document::BucketSpace bucketSpace);
+ const NodeMaintenanceStats & stats(uint16_t node, document::BucketSpace bucketSpace) const noexcept;
public:
- NodeMaintenanceStatsTracker();
+ NodeMaintenanceStatsTracker() noexcept;
+ NodeMaintenanceStatsTracker(NodeMaintenanceStatsTracker &&) noexcept;
+ NodeMaintenanceStatsTracker & operator =(NodeMaintenanceStatsTracker &&) noexcept;
+ NodeMaintenanceStatsTracker(const NodeMaintenanceStatsTracker &);
~NodeMaintenanceStatsTracker();
+ void reset(size_t nodes);
+ size_t numNodes() const { return _node_stats.size(); }
void incMovingOut(uint16_t node, document::BucketSpace bucketSpace) {
- ++_node_stats[node][bucketSpace].movingOut;
+ ++stats(node, bucketSpace).movingOut;
++_total_stats.movingOut;
}
void incSyncing(uint16_t node, document::BucketSpace bucketSpace) {
- ++_node_stats[node][bucketSpace].syncing;
+ ++stats(node, bucketSpace).syncing;
++_total_stats.syncing;
}
void incCopyingIn(uint16_t node, document::BucketSpace bucketSpace) {
- ++_node_stats[node][bucketSpace].copyingIn;
+ ++stats(node, bucketSpace).copyingIn;
++_total_stats.copyingIn;
}
void incCopyingOut(uint16_t node, document::BucketSpace bucketSpace) {
- ++_node_stats[node][bucketSpace].copyingOut;
+ ++stats(node, bucketSpace).copyingOut;
++_total_stats.copyingOut;
}
void incTotal(uint16_t node, document::BucketSpace bucketSpace) {
- ++_node_stats[node][bucketSpace].total;
+ ++stats(node, bucketSpace).total;
++_total_stats.total;
}
@@ -98,18 +120,9 @@ public:
* Returned statistics for a given node index and bucket space, or all zero statistics
* if none have been recorded yet
*/
- const NodeMaintenanceStats& forNode(uint16_t node, document::BucketSpace bucketSpace) const {
- auto nodeItr = _node_stats.find(node);
- if (nodeItr != _node_stats.end()) {
- auto bucketSpaceItr = nodeItr->second.find(bucketSpace);
- if (bucketSpaceItr != nodeItr->second.end()) {
- return bucketSpaceItr->second;
- }
- }
- return _emptyNodeMaintenanceStats;
- }
+ const NodeMaintenanceStats& forNode(uint16_t node, document::BucketSpace bucketSpace) const noexcept;
- const PerNodeStats& perNodeStats() const {
+ const PerNodeStats& perNodeStats() const noexcept {
return _node_stats;
}
@@ -124,10 +137,7 @@ public:
return _max_observed_time_since_last_gc;
}
- bool operator==(const NodeMaintenanceStatsTracker& rhs) const {
- return ((_node_stats == rhs._node_stats) &&
- (_max_observed_time_since_last_gc == rhs._max_observed_time_since_last_gc));
- }
+ bool operator==(const NodeMaintenanceStatsTracker& rhs) const noexcept;
void merge(const NodeMaintenanceStatsTracker& rhs);
};
diff --git a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
index afcbef32584..ab27f2d2e43 100644
--- a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
+++ b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.cpp
@@ -41,11 +41,20 @@ SimpleMaintenanceScanner::PendingMaintenanceStats::merge(const PendingMaintenanc
perNodeStats.merge(rhs.perNodeStats);
}
-SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats() = default;
+SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats() noexcept = default;
SimpleMaintenanceScanner::PendingMaintenanceStats::~PendingMaintenanceStats() = default;
SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats(const PendingMaintenanceStats &) = default;
+SimpleMaintenanceScanner::PendingMaintenanceStats::PendingMaintenanceStats(PendingMaintenanceStats &&) noexcept = default;
SimpleMaintenanceScanner::PendingMaintenanceStats &
-SimpleMaintenanceScanner::PendingMaintenanceStats::operator = (const PendingMaintenanceStats &) = default;
+SimpleMaintenanceScanner::PendingMaintenanceStats::operator = (PendingMaintenanceStats &&) noexcept = default;
+
+SimpleMaintenanceScanner::PendingMaintenanceStats
+SimpleMaintenanceScanner::PendingMaintenanceStats::fetch_and_reset() {
+ PendingMaintenanceStats prev = std::move(*this);
+ global = GlobalMaintenanceStats();
+ perNodeStats.reset(prev.perNodeStats.numNodes());
+ return prev;
+}
MaintenanceScanner::ScanResult
SimpleMaintenanceScanner::scanNext()
@@ -64,16 +73,16 @@ SimpleMaintenanceScanner::scanNext()
countBucket(_bucketSpaceItr->first, entry.getBucketInfo());
prioritizeBucket(document::Bucket(_bucketSpaceItr->first, entry.getBucketId()));
_bucketCursor = entry.getBucketId();
- return ScanResult::createNotDone(_bucketSpaceItr->first, entry);
+ return ScanResult::createNotDone(_bucketSpaceItr->first, std::move(entry));
}
}
-void
-SimpleMaintenanceScanner::reset()
+SimpleMaintenanceScanner::PendingMaintenanceStats
+SimpleMaintenanceScanner::fetch_and_reset()
{
_bucketCursor = document::BucketId();
_bucketSpaceItr = _bucketSpaceRepo.begin();
- _pendingMaintenance = PendingMaintenanceStats();
+ return _pendingMaintenance.fetch_and_reset();
}
void
diff --git a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h
index 7af61815c31..3d1a57a6422 100644
--- a/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h
+++ b/storage/src/vespa/storage/distributor/maintenance/simplemaintenancescanner.h
@@ -23,11 +23,14 @@ public:
void merge(const GlobalMaintenanceStats& rhs) noexcept;
};
struct PendingMaintenanceStats {
- PendingMaintenanceStats();
+ PendingMaintenanceStats() noexcept;
PendingMaintenanceStats(const PendingMaintenanceStats &);
- PendingMaintenanceStats &operator = (const PendingMaintenanceStats &);
+ PendingMaintenanceStats &operator = (const PendingMaintenanceStats &) = delete;
+ PendingMaintenanceStats(PendingMaintenanceStats &&) noexcept;
+ PendingMaintenanceStats &operator = (PendingMaintenanceStats &&) noexcept;
~PendingMaintenanceStats();
- GlobalMaintenanceStats global;
+ [[nodiscard]] PendingMaintenanceStats fetch_and_reset();
+ GlobalMaintenanceStats global;
NodeMaintenanceStatsTracker perNodeStats;
void merge(const PendingMaintenanceStats& rhs);
@@ -50,11 +53,12 @@ public:
~SimpleMaintenanceScanner() override;
ScanResult scanNext() override;
- void reset();
+ [[nodiscard]] PendingMaintenanceStats fetch_and_reset();
// TODO: move out into own interface!
void prioritizeBucket(const document::Bucket &id);
+ // TODO Only for testing
const PendingMaintenanceStats& getPendingMaintenanceStats() const noexcept {
return _pendingMaintenance;
}
diff --git a/storage/src/vespa/storage/distributor/messagetracker.cpp b/storage/src/vespa/storage/distributor/messagetracker.cpp
index 8830e5ecabc..842238aa24c 100644
--- a/storage/src/vespa/storage/distributor/messagetracker.cpp
+++ b/storage/src/vespa/storage/distributor/messagetracker.cpp
@@ -3,6 +3,7 @@
#include "messagetracker.h"
#include <vespa/storageapi/messageapi/bucketcommand.h>
#include <vespa/storageapi/messageapi/bucketreply.h>
+#include <vespa/vespalib/stllike/hash_map.hpp>
#include <cinttypes>
#include <vespa/log/log.h>
@@ -19,10 +20,11 @@ MessageTracker::~MessageTracker() = default;
void
MessageTracker::flushQueue(MessageSender& sender)
{
- for (uint32_t i = 0; i < _commandQueue.size(); i++) {
- _commandQueue[i]._msg->setAddress(api::StorageMessageAddress::create(_cluster_ctx.cluster_name_ptr(), lib::NodeType::STORAGE, _commandQueue[i]._target));
- _sentMessages[_commandQueue[i]._msg->getMsgId()] = _commandQueue[i]._target;
- sender.sendCommand(_commandQueue[i]._msg);
+ _sentMessages.resize(_sentMessages.size() + _commandQueue.size());
+ for (const auto & toSend : _commandQueue) {
+ toSend._msg->setAddress(api::StorageMessageAddress::create(_cluster_ctx.cluster_name_ptr(), lib::NodeType::STORAGE, toSend._target));
+ _sentMessages[toSend._msg->getMsgId()] = toSend._target;
+ sender.sendCommand(toSend._msg);
}
_commandQueue.clear();
@@ -31,21 +33,14 @@ MessageTracker::flushQueue(MessageSender& sender)
uint16_t
MessageTracker::handleReply(api::BucketReply& reply)
{
- std::map<uint64_t, uint16_t>::iterator found = _sentMessages.find(reply.getMsgId());
- if (found == _sentMessages.end()) {
+ const auto found = _sentMessages.find(reply.getMsgId());
+ if (found == _sentMessages.end()) [[unlikely]] {
LOG(warning, "Received reply %" PRIu64 " for callback which we have no recollection of", reply.getMsgId());
return (uint16_t)-1;
- } else {
- uint16_t node = found->second;
- _sentMessages.erase(found);
- return node;
}
-}
-
-bool
-MessageTracker::finished()
-{
- return _sentMessages.empty();
+ uint16_t node = found->second;
+ _sentMessages.erase(found);
+ return node;
}
}
diff --git a/storage/src/vespa/storage/distributor/messagetracker.h b/storage/src/vespa/storage/distributor/messagetracker.h
index 73e2461eb7a..a0234f425a0 100644
--- a/storage/src/vespa/storage/distributor/messagetracker.h
+++ b/storage/src/vespa/storage/distributor/messagetracker.h
@@ -4,8 +4,7 @@
#include <vespa/storage/common/cluster_context.h>
#include <vespa/storage/common/messagesender.h>
#include <vespa/vespalib/stllike/string.h>
-#include <vector>
-#include <map>
+#include <vespa/vespalib/stllike/hash_map.h>
namespace storage::api {
class BucketCommand;
@@ -18,16 +17,17 @@ class MessageTracker {
public:
class ToSend {
public:
- ToSend(std::shared_ptr<api::BucketCommand> msg, uint16_t target) noexcept :
- _msg(std::move(msg)), _target(target) {};
+ ToSend(std::shared_ptr<api::BucketCommand> msg, uint16_t target) noexcept
+ : _msg(std::move(msg)), _target(target)
+ {}
std::shared_ptr<api::BucketCommand> _msg;
uint16_t _target;
};
MessageTracker(const ClusterContext& cluster_context);
- MessageTracker(MessageTracker&&) = default;
- MessageTracker& operator=(MessageTracker&&) = delete;
+ MessageTracker(MessageTracker&&) noexcept = default;
+ MessageTracker& operator=(MessageTracker&&) noexcept = delete;
MessageTracker(const MessageTracker &) = delete;
MessageTracker& operator=(const MessageTracker&) = delete;
~MessageTracker();
@@ -35,6 +35,9 @@ public:
void queueCommand(std::shared_ptr<api::BucketCommand> msg, uint16_t target) {
_commandQueue.emplace_back(std::move(msg), target);
}
+ void reserve_more_commands(size_t sz) {
+ _commandQueue.reserve(_commandQueue.size() + sz);
+ }
void flushQueue(MessageSender& sender);
@@ -46,13 +49,15 @@ public:
/**
Returns true if all messages sent have been received.
*/
- bool finished();
+ bool finished() const noexcept {
+ return _sentMessages.empty();
+ }
protected:
- std::vector<ToSend> _commandQueue;
+ std::vector<ToSend> _commandQueue;
// Keeps track of which node a message was sent to.
- std::map<uint64_t, uint16_t> _sentMessages;
- const ClusterContext& _cluster_ctx;
+ vespalib::hash_map<uint64_t, uint16_t> _sentMessages;
+ const ClusterContext& _cluster_ctx;
};
}
diff --git a/storage/src/vespa/storage/distributor/nodeinfo.cpp b/storage/src/vespa/storage/distributor/nodeinfo.cpp
index 6bb1949d606..3e645f57393 100644
--- a/storage/src/vespa/storage/distributor/nodeinfo.cpp
+++ b/storage/src/vespa/storage/distributor/nodeinfo.cpp
@@ -5,14 +5,16 @@
namespace storage::distributor {
-NodeInfo::NodeInfo(const framework::Clock& clock)
+NodeInfo::NodeInfo(const framework::Clock& clock) noexcept
: _clock(clock) {}
-uint32_t NodeInfo::getPendingCount(uint16_t idx) const {
+uint32_t
+NodeInfo::getPendingCount(uint16_t idx) const {
return getNode(idx)._pending;
}
-bool NodeInfo::isBusy(uint16_t idx) const {
+bool
+NodeInfo::isBusy(uint16_t idx) const {
const SingleNodeInfo& info = getNode(idx);
if (info._busyUntilTime.time_since_epoch().count() != 0) {
if (_clock.getMonotonicTime() > info._busyUntilTime) {
@@ -25,15 +27,18 @@ bool NodeInfo::isBusy(uint16_t idx) const {
return false;
}
-void NodeInfo::setBusy(uint16_t idx, vespalib::duration for_duration) {
+void
+NodeInfo::setBusy(uint16_t idx, vespalib::duration for_duration) {
getNode(idx)._busyUntilTime = _clock.getMonotonicTime() + for_duration;
}
-void NodeInfo::incPending(uint16_t idx) {
+void
+NodeInfo::incPending(uint16_t idx) {
getNode(idx)._pending++;
}
-void NodeInfo::decPending(uint16_t idx) {
+void
+NodeInfo::decPending(uint16_t idx) {
SingleNodeInfo& info = getNode(idx);
if (info._pending > 0) {
@@ -41,12 +46,14 @@ void NodeInfo::decPending(uint16_t idx) {
}
}
-void NodeInfo::clearPending(uint16_t idx) {
+void
+NodeInfo::clearPending(uint16_t idx) {
SingleNodeInfo& info = getNode(idx);
info._pending = 0;
}
-NodeInfo::SingleNodeInfo& NodeInfo::getNode(uint16_t idx) {
+NodeInfo::SingleNodeInfo&
+NodeInfo::getNode(uint16_t idx) {
const auto index_lbound = static_cast<size_t>(idx) + 1;
while (_nodes.size() < index_lbound) {
_nodes.emplace_back();
@@ -55,7 +62,8 @@ NodeInfo::SingleNodeInfo& NodeInfo::getNode(uint16_t idx) {
return _nodes[idx];
}
-const NodeInfo::SingleNodeInfo& NodeInfo::getNode(uint16_t idx) const {
+const NodeInfo::SingleNodeInfo&
+NodeInfo::getNode(uint16_t idx) const {
const auto index_lbound = static_cast<size_t>(idx) + 1;
while (_nodes.size() < index_lbound) {
_nodes.emplace_back();
diff --git a/storage/src/vespa/storage/distributor/nodeinfo.h b/storage/src/vespa/storage/distributor/nodeinfo.h
index 7f0716d7804..446739ca7e9 100644
--- a/storage/src/vespa/storage/distributor/nodeinfo.h
+++ b/storage/src/vespa/storage/distributor/nodeinfo.h
@@ -17,30 +17,24 @@ namespace storage::distributor {
class NodeInfo {
public:
- explicit NodeInfo(const framework::Clock& clock);
-
+ explicit NodeInfo(const framework::Clock& clock) noexcept;
uint32_t getPendingCount(uint16_t idx) const;
-
bool isBusy(uint16_t idx) const;
-
void setBusy(uint16_t idx, vespalib::duration for_duration);
-
void incPending(uint16_t idx);
-
void decPending(uint16_t idx);
-
void clearPending(uint16_t idx);
private:
struct SingleNodeInfo {
- SingleNodeInfo() : _pending(0), _busyUntilTime() {}
+ SingleNodeInfo() noexcept : _pending(0), _busyUntilTime() {}
- uint32_t _pending;
+ uint32_t _pending;
mutable vespalib::steady_time _busyUntilTime;
};
mutable std::vector<SingleNodeInfo> _nodes;
- const framework::Clock& _clock;
+ const framework::Clock& _clock;
const SingleNodeInfo& getNode(uint16_t idx) const;
SingleNodeInfo& getNode(uint16_t idx);
diff --git a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
index 8c6fdb314f3..854e7d15f82 100644
--- a/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/putoperation.cpp
@@ -10,7 +10,6 @@
#include <vespa/storage/distributor/storage_node_up_states.h>
#include <vespa/storageapi/message/persistence.h>
#include <vespa/vdslib/distribution/distribution.h>
-#include <vespa/vdslib/distribution/idealnodecalculatorimpl.h>
#include <vespa/vdslib/state/clusterstate.h>
#include <algorithm>
@@ -67,13 +66,11 @@ PutOperation::insertDatabaseEntryAndScheduleCreateBucket(const OperationTargetLi
assert(!multipleBuckets);
(void) multipleBuckets;
BucketDatabase::Entry entry(_bucket_space.getBucketDatabase().get(lastBucket));
- std::vector<uint16_t> idealState(
- _bucket_space.get_ideal_service_layer_nodes_bundle(lastBucket).get_available_nodes());
- active = ActiveCopy::calculate(idealState, _bucket_space.getDistribution(), entry,
+ active = ActiveCopy::calculate(_bucket_space.get_ideal_service_layer_nodes_bundle(lastBucket).available_to_index(), _bucket_space.getDistribution(), entry,
_op_ctx.distributor_config().max_activation_inhibited_out_of_sync_groups());
LOG(debug, "Active copies for bucket %s: %s", entry.getBucketId().toString().c_str(), active.toString().c_str());
for (uint32_t i=0; i<active.size(); ++i) {
- BucketCopy copy(*entry->getNode(active[i]._nodeIndex));
+ BucketCopy copy(*entry->getNode(active[i].nodeIndex()));
copy.setActive(true);
entry->updateNode(copy);
}
@@ -211,11 +208,11 @@ void PutOperation::start_direct_put_dispatch(DistributorStripeMessageSender& sen
}
if (!createBucketBatch.empty()) {
- _tracker.queueMessageBatch(createBucketBatch);
+ _tracker.queueMessageBatch(std::move(createBucketBatch));
}
std::vector<PersistenceMessageTracker::ToSend> putBatch;
-
+ putBatch.reserve(targets.size());
// Now send PUTs
for (const auto& target : targets) {
sendPutToBucketOnNode(_msg->getBucket().getBucketSpace(), target.getBucketId(),
@@ -223,7 +220,7 @@ void PutOperation::start_direct_put_dispatch(DistributorStripeMessageSender& sen
}
if (!putBatch.empty()) {
- _tracker.queueMessageBatch(putBatch);
+ _tracker.queueMessageBatch(std::move(putBatch));
} else {
const char* error = "Can't store document: No storage nodes available";
LOG(debug, "%s", error);
diff --git a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
index dd6e1e93791..5f52a8208fc 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/removelocationoperation.cpp
@@ -79,13 +79,11 @@ RemoveLocationOperation::onStart(DistributorStripeMessageSender& sender)
std::vector<uint16_t> nodes = e->getNodes();
for (uint32_t i = 0; i < nodes.size(); i++) {
- std::shared_ptr<api::RemoveLocationCommand> command(
- new api::RemoveLocationCommand(
- _msg->getDocumentSelection(),
- document::Bucket(_msg->getBucket().getBucketSpace(), e.getBucketId())));
+ auto command = std::make_shared<api::RemoveLocationCommand>(_msg->getDocumentSelection(),
+ document::Bucket(_msg->getBucket().getBucketSpace(), e.getBucketId()));
copyMessageSettings(*_msg, *command);
- _tracker.queueCommand(command, nodes[i]);
+ _tracker.queueCommand(std::move(command), nodes[i]);
sent = true;
}
}
diff --git a/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
index 96182b0744f..42d8e318f47 100644
--- a/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/removeoperation.cpp
@@ -65,9 +65,7 @@ void RemoveOperation::start_conditional_remove(DistributorStripeMessageSender& s
void RemoveOperation::start_direct_remove_dispatch(DistributorStripeMessageSender& sender) {
LOG(spam, "Started remove on document %s", _msg->getDocumentId().toString().c_str());
- document::BucketId bucketId(
- _node_ctx.bucket_id_factory().getBucketId(
- _msg->getDocumentId()));
+ document::BucketId bucketId(_node_ctx.bucket_id_factory().getBucketId(_msg->getDocumentId()));
std::vector<BucketDatabase::Entry> entries;
_bucket_space.getBucketDatabase().getParents(bucketId, entries);
@@ -79,8 +77,7 @@ void RemoveOperation::start_direct_remove_dispatch(DistributorStripeMessageSende
messages.reserve(e->getNodeCount());
for (uint32_t i = 0; i < e->getNodeCount(); i++) {
auto command = std::make_shared<api::RemoveCommand>(document::Bucket(_msg->getBucket().getBucketSpace(), e.getBucketId()),
- _msg->getDocumentId(),
- _msg->getTimestamp());
+ _msg->getDocumentId(), _msg->getTimestamp());
copyMessageSettings(*_msg, *command);
command->getTrace().setLevel(_msg->getTrace().getLevel());
@@ -90,7 +87,7 @@ void RemoveOperation::start_direct_remove_dispatch(DistributorStripeMessageSende
sent = true;
}
- _tracker.queueMessageBatch(messages);
+ _tracker.queueMessageBatch(std::move(messages));
}
if (!sent) {
diff --git a/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp b/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp
index 8988f2589ce..f43a6092372 100644
--- a/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/external/updateoperation.cpp
@@ -106,19 +106,18 @@ UpdateOperation::onStart(DistributorStripeMessageSender& sender)
const std::vector<uint16_t>& nodes = entry->getNodes();
std::vector<MessageTracker::ToSend> messages;
+ messages.reserve(nodes.size());
for (uint16_t node : nodes) {
- auto command = std::make_shared<api::UpdateCommand>(
- document::Bucket(_msg->getBucket().getBucketSpace(), entry.getBucketId()),
- _msg->getUpdate(),
- _msg->getTimestamp());
+ auto command = std::make_shared<api::UpdateCommand>(document::Bucket(_msg->getBucket().getBucketSpace(), entry.getBucketId()),
+ _msg->getUpdate(), _msg->getTimestamp());
copyMessageSettings(*_msg, *command);
command->setOldTimestamp(_msg->getOldTimestamp());
command->setCondition(_msg->getCondition());
messages.emplace_back(std::move(command), node);
}
- _tracker.queueMessageBatch(messages);
+ _tracker.queueMessageBatch(std::move(messages));
}
_tracker.flushQueue(sender);
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp
index 5599f9fb51e..2e6d0e95ec9 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/garbagecollectionoperation.cpp
@@ -87,7 +87,7 @@ void GarbageCollectionOperation::send_current_phase_remove_locations(Distributor
command->setPriority((_phase != Phase::WriteRemovesPhase)
? _priority
: _manager->operation_context().distributor_config().default_external_feed_priority());
- _tracker.queueCommand(command, nodes[i]);
+ _tracker.queueCommand(std::move(command), nodes[i]);
}
_tracker.flushQueue(sender);
}
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
index 0e9873f3434..616c4962dca 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/joinoperation.cpp
@@ -81,12 +81,11 @@ JoinOperation::enqueueJoinMessagePerTargetNode(
return false;
}
for (const auto& node : nodeToBuckets) {
- std::shared_ptr<api::JoinBucketsCommand> msg(
- new api::JoinBucketsCommand(getBucket()));
+ auto msg = std::make_shared<api::JoinBucketsCommand>(getBucket());
msg->getSourceBuckets() = node.second;
msg->setTimeout(MAX_TIMEOUT);
setCommandMeta(*msg);
- _tracker.queueCommand(msg, node.first);
+ _tracker.queueCommand(std::move(msg), node.first);
}
return true;
}
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp
index e46ccebffba..7bec6bbe53a 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/removebucketoperation.cpp
@@ -24,16 +24,11 @@ RemoveBucketOperation::onStartInternal(DistributorStripeMessageSender& sender)
uint16_t node = getNodes()[i];
const BucketCopy* copy(entry->getNode(node));
if (!copy) {
- LOG(debug, "Node %u was removed between scheduling remove "
- "operation and starting it; not sending DeleteBucket to it",
- node);
+ LOG(debug, "Node %u was removed between scheduling remove operation and starting it; not sending DeleteBucket to it", node);
continue;
}
- LOG(debug, "Sending DeleteBucket for %s to node %u",
- getBucketId().toString().c_str(),
- node);
- std::shared_ptr<api::DeleteBucketCommand> msg(
- new api::DeleteBucketCommand(getBucket()));
+ LOG(debug, "Sending DeleteBucket for %s to node %u", getBucketId().toString().c_str(), node);
+ auto msg = std::make_shared<api::DeleteBucketCommand>(getBucket());
setCommandMeta(*msg);
msg->setBucketInfo(copy->getBucketInfo());
msgs.push_back(std::make_pair(node, msg));
@@ -42,8 +37,8 @@ RemoveBucketOperation::onStartInternal(DistributorStripeMessageSender& sender)
_ok = true;
if (!getNodes().empty()) {
_manager->operation_context().remove_nodes_from_bucket_database(getBucket(), getNodes());
- for (uint32_t i = 0; i < msgs.size(); ++i) {
- _tracker.queueCommand(msgs[i].second, msgs[i].first);
+ for (auto & msg : msgs) {
+ _tracker.queueCommand(std::move(msg.second), msg.first);
}
_tracker.flushQueue(sender);
}
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp
index 00906d22ea4..9547bee6583 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/setbucketstateoperation.cpp
@@ -26,11 +26,9 @@ SetBucketStateOperation::enqueueSetBucketStateCommand(uint16_t node, bool active
active
? api::SetBucketStateCommand::ACTIVE
: api::SetBucketStateCommand::INACTIVE);
- LOG(debug, "Enqueuing %s for %s to node %u",
- active ? "Activate" : "Deactivate",
- getBucketId().toString().c_str(), node);
+ LOG(debug, "Enqueuing %s for %s to node %u", active ? "Activate" : "Deactivate", getBucketId().toString().c_str(), node);
setCommandMeta(*msg);
- _tracker.queueCommand(msg, node);
+ _tracker.queueCommand(std::move(msg), node);
}
bool
diff --git a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
index 8e64fb227a7..d704a42e96b 100644
--- a/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
+++ b/storage/src/vespa/storage/distributor/operations/idealstate/splitoperation.cpp
@@ -35,7 +35,7 @@ SplitOperation::onStart(DistributorStripeMessageSender& sender)
msg->setMinByteSize(_splitSize);
msg->setTimeout(MAX_TIMEOUT);
setCommandMeta(*msg);
- _tracker.queueCommand(msg, entry->getNodeRef(i).getNode());
+ _tracker.queueCommand(std::move(msg), entry->getNodeRef(i).getNode());
_ok = true;
}
diff --git a/storage/src/vespa/storage/distributor/operationtargetresolver.h b/storage/src/vespa/storage/distributor/operationtargetresolver.h
index 5e3c4a73f66..2de477d03e5 100644
--- a/storage/src/vespa/storage/distributor/operationtargetresolver.h
+++ b/storage/src/vespa/storage/distributor/operationtargetresolver.h
@@ -15,23 +15,23 @@ namespace storage::distributor {
class OperationTarget : public vespalib::AsciiPrintable
{
document::Bucket _bucket;
- lib::Node _node;
- bool _newCopy;
+ lib::Node _node;
+ bool _newCopy;
public:
- OperationTarget() : _newCopy(true) {}
- OperationTarget(const document::Bucket& bucket, const lib::Node& node, bool newCopy)
+ OperationTarget() noexcept : _newCopy(true) {}
+ OperationTarget(const document::Bucket& bucket, const lib::Node& node, bool newCopy) noexcept
: _bucket(bucket), _node(node), _newCopy(newCopy) {}
- document::BucketId getBucketId() const { return _bucket.getBucketId(); }
- document::Bucket getBucket() const { return _bucket; }
- const lib::Node& getNode() const { return _node; }
- bool isNewCopy() const { return _newCopy; }
+ document::BucketId getBucketId() const noexcept { return _bucket.getBucketId(); }
+ document::Bucket getBucket() const noexcept { return _bucket; }
+ const lib::Node& getNode() const noexcept { return _node; }
+ bool isNewCopy() const noexcept { return _newCopy; }
- bool operator==(const OperationTarget& o) const {
+ bool operator==(const OperationTarget& o) const noexcept {
return (_bucket == o._bucket && _node == o._node && _newCopy == o._newCopy);
}
- bool operator!=(const OperationTarget& o) const {
+ bool operator!=(const OperationTarget& o) const noexcept {
return !(operator==(o));
}
@@ -40,13 +40,13 @@ public:
class OperationTargetList : public std::vector<OperationTarget> {
public:
- bool hasAnyNewCopies() const {
+ bool hasAnyNewCopies() const noexcept {
for (size_t i=0; i<size(); ++i) {
if (operator[](i).isNewCopy()) return true;
}
return false;
}
- bool hasAnyExistingCopies() const {
+ bool hasAnyExistingCopies() const noexcept {
for (size_t i=0; i<size(); ++i) {
if (!operator[](i).isNewCopy()) return true;
}
@@ -63,8 +63,7 @@ public:
PUT
};
- virtual OperationTargetList getTargets(OperationType type,
- const document::BucketId& id) = 0;
+ virtual OperationTargetList getTargets(OperationType type, const document::BucketId& id) = 0;
};
}
diff --git a/storage/src/vespa/storage/distributor/operationtargetresolverimpl.cpp b/storage/src/vespa/storage/distributor/operationtargetresolverimpl.cpp
index 6a9d7e0e6da..eb08cf51f43 100644
--- a/storage/src/vespa/storage/distributor/operationtargetresolverimpl.cpp
+++ b/storage/src/vespa/storage/distributor/operationtargetresolverimpl.cpp
@@ -9,23 +9,8 @@
namespace storage::distributor {
-namespace {
-
-lib::IdealNodeList
-make_node_list(const std::vector<uint16_t>& nodes)
-{
- lib::IdealNodeList list;
- for (auto node : nodes) {
- list.push_back(lib::Node(lib::NodeType::STORAGE, node));
- }
- return list;
-}
-
-}
-
-BucketInstance::BucketInstance(
- const document::BucketId& id, const api::BucketInfo& info,
- lib::Node node, uint16_t idealLocationPriority, bool trusted, bool exist)
+BucketInstance::BucketInstance(const document::BucketId& id, const api::BucketInfo& info, lib::Node node,
+ uint16_t idealLocationPriority, bool trusted, bool exist) noexcept
: _bucket(id), _info(info), _node(node),
_idealLocationPriority(idealLocationPriority), _trusted(trusted), _exist(exist)
{
@@ -39,32 +24,25 @@ BucketInstance::print(vespalib::asciistream& out, const PrintProperties&) const
std::ostringstream ost;
ost << std::hex << _bucket.getId();
- out << "(" << ost.str() << ", "
- << infoString << ", node " << _node.getIndex()
- << ", ideal " << _idealLocationPriority
- << (_trusted ? ", trusted" : "")
- << (_exist ? "" : ", new copy")
- << ")";
+ out << "(" << ost.str() << ", " << infoString << ", node " << _node.getIndex() << ", ideal " << _idealLocationPriority
+ << (_trusted ? ", trusted" : "") << (_exist ? "" : ", new copy") << ")";
}
bool
BucketInstanceList::contains(lib::Node node) const {
- for (uint32_t i=0; i<_instances.size(); ++i) {
- if (_instances[i]._node == node) return true;
+ for (const auto & instance : _instances) {
+ if (instance._node == node) return true;
}
return false;
}
void
-BucketInstanceList::add(BucketDatabase::Entry& e,
- const lib::IdealNodeList& idealState)
+BucketInstanceList::add(const BucketDatabase::Entry& e, const IdealServiceLayerNodesBundle::Node2Index & idealState)
{
for (uint32_t i = 0; i < e.getBucketInfo().getNodeCount(); ++i) {
const BucketCopy& copy(e.getBucketInfo().getNodeRef(i));
lib::Node node(lib::NodeType::STORAGE, copy.getNode());
- _instances.push_back(BucketInstance(
- e.getBucketId(), copy.getBucketInfo(), node,
- idealState.indexOf(node), copy.trusted()));
+ _instances.emplace_back(e.getBucketId(), copy.getBucketInfo(), node, idealState.lookup(copy.getNode()), copy.trusted(), true);
}
}
@@ -73,9 +51,9 @@ BucketInstanceList::populate(const document::BucketId& specificId, const Distrib
{
std::vector<BucketDatabase::Entry> entries;
db.getParents(specificId, entries);
- for (uint32_t i=0; i<entries.size(); ++i) {
- lib::IdealNodeList idealNodes(make_node_list(distributor_bucket_space.get_ideal_service_layer_nodes_bundle(entries[i].getBucketId()).get_available_nonretired_or_maintenance_nodes()));
- add(entries[i], idealNodes);
+ for (const auto & entry : entries) {
+ auto node2Index = distributor_bucket_space.get_ideal_service_layer_nodes_bundle(entry.getBucketId()).nonretired_or_maintenance_to_index();
+ add(entry, node2Index);
}
}
@@ -102,40 +80,33 @@ BucketInstanceList::limitToRedundancyCopies(uint16_t redundancy)
}
document::BucketId
-BucketInstanceList::leastSpecificLeafBucketInSubtree(
- const document::BucketId& candidateId,
- const document::BucketId& mostSpecificId,
- const BucketDatabase& db) const
+BucketInstanceList::leastSpecificLeafBucketInSubtree(const document::BucketId& candidateId,
+ const document::BucketId& mostSpecificId,
+ const BucketDatabase& db)
{
assert(candidateId.contains(mostSpecificId));
document::BucketId treeNode = candidateId;
// treeNode may reach at most 58 bits since buckets at 58 bits by definition
// cannot have any children.
while (db.childCount(treeNode) != 0) {
- treeNode = document::BucketId(treeNode.getUsedBits() + 1,
- mostSpecificId.getRawId());
+ treeNode = document::BucketId(treeNode.getUsedBits() + 1, mostSpecificId.getRawId());
}
assert(treeNode.contains(mostSpecificId));
return treeNode;
}
void
-BucketInstanceList::extendToEnoughCopies(
- const DistributorBucketSpace& distributor_bucket_space,
- const BucketDatabase& db,
- const document::BucketId& targetIfNonPreExisting,
- const document::BucketId& mostSpecificId)
+BucketInstanceList::extendToEnoughCopies(const DistributorBucketSpace& distributor_bucket_space, const BucketDatabase& db,
+ const document::BucketId& targetIfNonPreExisting, const document::BucketId& mostSpecificId)
{
- document::BucketId newTarget(_instances.empty() ? targetIfNonPreExisting
- : _instances[0]._bucket);
+ document::BucketId newTarget(_instances.empty() ? targetIfNonPreExisting : _instances[0]._bucket);
newTarget = leastSpecificLeafBucketInSubtree(newTarget, mostSpecificId, db);
- lib::IdealNodeList idealNodes(make_node_list(distributor_bucket_space.get_ideal_service_layer_nodes_bundle(newTarget).get_available_nonretired_nodes()));
+ const auto & idealNodes = distributor_bucket_space.get_ideal_service_layer_nodes_bundle(newTarget).available_nonretired_nodes();
for (uint32_t i=0; i<idealNodes.size(); ++i) {
- if (!contains(idealNodes[i])) {
- _instances.push_back(BucketInstance(
- newTarget, api::BucketInfo(), idealNodes[i],
- i, false, false));
+ lib::Node node(lib::NodeType::STORAGE, idealNodes[i]);
+ if (!contains(node)) {
+ _instances.emplace_back(newTarget, api::BucketInfo(), node, i, false, false);
}
}
}
@@ -145,7 +116,7 @@ BucketInstanceList::createTargets(document::BucketSpace bucketSpace)
{
OperationTargetList result;
for (const auto& bi : _instances) {
- result.push_back(OperationTarget(document::Bucket(bucketSpace, bi._bucket), bi._node, !bi._exist));
+ result.emplace_back(document::Bucket(bucketSpace, bi._bucket), bi._node, !bi._exist);
}
return result;
}
@@ -188,22 +159,17 @@ struct InstanceOrder {
} // anonymous
BucketInstanceList
-OperationTargetResolverImpl::getAllInstances(OperationType type,
- const document::BucketId& id)
+OperationTargetResolverImpl::getAllInstances(OperationType type, const document::BucketId& id)
{
BucketInstanceList instances;
if (type == PUT) {
instances.populate(id, _distributor_bucket_space, _bucketDatabase);
instances.sort(InstanceOrder());
instances.removeNodeDuplicates();
- instances.extendToEnoughCopies(
- _distributor_bucket_space,
- _bucketDatabase,
- _bucketDatabase.getAppropriateBucket(_minUsedBucketBits, id),
- id);
+ instances.extendToEnoughCopies(_distributor_bucket_space, _bucketDatabase,
+ _bucketDatabase.getAppropriateBucket(_minUsedBucketBits, id), id);
} else {
- throw vespalib::IllegalArgumentException(
- "Unsupported operation type given", VESPA_STRLOC);
+ throw vespalib::IllegalArgumentException("Unsupported operation type given", VESPA_STRLOC);
}
return instances;
}
diff --git a/storage/src/vespa/storage/distributor/operationtargetresolverimpl.h b/storage/src/vespa/storage/distributor/operationtargetresolverimpl.h
index 9ff65475fa4..b76388da9bc 100644
--- a/storage/src/vespa/storage/distributor/operationtargetresolverimpl.h
+++ b/storage/src/vespa/storage/distributor/operationtargetresolverimpl.h
@@ -3,8 +3,8 @@
#pragma once
#include "operationtargetresolver.h"
+#include "ideal_service_layer_nodes_bundle.h"
#include <vespa/storage/bucketdb/bucketdatabase.h>
-#include <vespa/vdslib/distribution/idealnodecalculator.h>
#include <algorithm>
namespace storage::distributor {
@@ -19,11 +19,11 @@ struct BucketInstance : public vespalib::AsciiPrintable {
bool _trusted;
bool _exist;
- BucketInstance() : _idealLocationPriority(0xffff),
- _trusted(false), _exist(false) {}
+ BucketInstance() noexcept
+ : _idealLocationPriority(0xffff), _trusted(false), _exist(false) {}
BucketInstance(const document::BucketId& id, const api::BucketInfo& info,
lib::Node node, uint16_t idealLocationPriority, bool trusted,
- bool exist = true);
+ bool exist) noexcept;
void print(vespalib::asciistream& out, const PrintProperties&) const override;
};
@@ -42,10 +42,10 @@ class BucketInstanceList : public vespalib::AsciiPrintable {
* Postconditions:
* <return value>.contains(mostSpecificId)
*/
- document::BucketId leastSpecificLeafBucketInSubtree(
- const document::BucketId& candidateId,
- const document::BucketId& mostSpecificId,
- const BucketDatabase& db) const;
+ static document::BucketId
+ leastSpecificLeafBucketInSubtree(const document::BucketId& candidateId,
+ const document::BucketId& mostSpecificId,
+ const BucketDatabase& db);
public:
void add(const BucketInstance& instance) { _instances.push_back(instance); }
@@ -65,7 +65,7 @@ public:
const document::BucketId& mostSpecificId);
void populate(const document::BucketId&, const DistributorBucketSpace&, BucketDatabase&);
- void add(BucketDatabase::Entry& e, const lib::IdealNodeList& idealState);
+ void add(const BucketDatabase::Entry& e, const IdealServiceLayerNodesBundle::Node2Index & idealState);
template <typename Order>
void sort(const Order& order) {
@@ -79,9 +79,9 @@ public:
class OperationTargetResolverImpl : public OperationTargetResolver {
const DistributorBucketSpace& _distributor_bucket_space;
- BucketDatabase& _bucketDatabase;
- uint32_t _minUsedBucketBits;
- uint16_t _redundancy;
+ BucketDatabase& _bucketDatabase;
+ uint32_t _minUsedBucketBits;
+ uint16_t _redundancy;
document::BucketSpace _bucketSpace;
public:
@@ -97,8 +97,7 @@ public:
_bucketSpace(bucketSpace)
{}
- BucketInstanceList getAllInstances(OperationType type,
- const document::BucketId& id);
+ BucketInstanceList getAllInstances(OperationType type, const document::BucketId& id);
BucketInstanceList getInstances(OperationType type, const document::BucketId& id) {
BucketInstanceList result(getAllInstances(type, id));
result.limitToRedundancyCopies(_redundancy);
diff --git a/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp b/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp
index 5b8fa6b69e3..7b3cdacf702 100644
--- a/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp
+++ b/storage/src/vespa/storage/distributor/pendingmessagetracker.cpp
@@ -17,6 +17,7 @@ PendingMessageTracker::PendingMessageTracker(framework::ComponentRegister& cr, u
_nodeInfo(_component.getClock()),
_nodeBusyDuration(60s),
_deferred_read_tasks(),
+ _trackTime(false),
_lock()
{
_component.registerStatusPage(*this);
@@ -69,6 +70,13 @@ pairAsRange(Pair pair)
return PairAsRange<Pair>(std::move(pair));
}
+document::Bucket
+getBucket(const api::StorageMessage & msg) {
+ return (msg.getType() != api::MessageType::REQUESTBUCKETINFO)
+ ? msg.getBucket()
+ : document::Bucket(msg.getBucket().getBucketSpace(), dynamic_cast<const api::RequestBucketInfoCommand&>(msg).super_bucket_id());
+}
+
}
std::vector<uint64_t>
@@ -91,17 +99,19 @@ PendingMessageTracker::clearMessagesForNode(uint16_t node)
void
PendingMessageTracker::insert(const std::shared_ptr<api::StorageMessage>& msg)
{
- std::lock_guard guard(_lock);
if (msg->getAddress()) {
// TODO STRIPE reevaluate if getBucket() on RequestBucketInfo msgs should transparently return superbucket..!
- document::Bucket bucket = (msg->getType() != api::MessageType::REQUESTBUCKETINFO)
- ? msg->getBucket()
- : document::Bucket(msg->getBucket().getBucketSpace(),
- dynamic_cast<api::RequestBucketInfoCommand&>(*msg).super_bucket_id());
- _messages.emplace(currentTime(), msg->getType().getId(), msg->getPriority(), msg->getMsgId(),
- bucket, msg->getAddress()->getIndex());
-
- _nodeInfo.incPending(msg->getAddress()->getIndex());
+ document::Bucket bucket = getBucket(*msg);
+ {
+ // We will not start tracking time until we have been asked for html at least once.
+ // Time tracking is only used for presenting pending messages for debugging.
+ TimePoint now = (_trackTime.load(std::memory_order_relaxed)) ? currentTime() : TimePoint();
+ std::lock_guard guard(_lock);
+ _messages.emplace(now, msg->getType().getId(), msg->getPriority(), msg->getMsgId(),
+ bucket, msg->getAddress()->getIndex());
+
+ _nodeInfo.incPending(msg->getAddress()->getIndex());
+ }
LOG(debug, "Sending message %s with id %" PRIu64 " to %s",
msg->toString().c_str(), msg->getMsgId(), msg->getAddress()->toString().c_str());
@@ -111,15 +121,13 @@ PendingMessageTracker::insert(const std::shared_ptr<api::StorageMessage>& msg)
document::Bucket
PendingMessageTracker::reply(const api::StorageReply& r)
{
- std::unique_lock guard(_lock);
document::Bucket bucket;
-
LOG(debug, "Got reply: %s", r.toString().c_str());
uint64_t msgId = r.getMsgId();
+ std::unique_lock guard(_lock);
MessagesByMsgId& msgs = boost::multi_index::get<0>(_messages);
MessagesByMsgId::iterator iter = msgs.find(msgId);
-
if (iter != msgs.end()) {
bucket = iter->bucket;
_nodeInfo.decPending(r.getAddress()->getIndex());
@@ -127,7 +135,6 @@ PendingMessageTracker::reply(const api::StorageReply& r)
if (code == api::ReturnCode::BUSY || code == api::ReturnCode::TIMEOUT) {
_nodeInfo.setBusy(r.getAddress()->getIndex(), _nodeBusyDuration);
}
- LOG(debug, "Erased message with id %" PRIu64 " for bucket %s", msgId, bucket.toString().c_str());
msgs.erase(msgId);
auto deferred_tasks = get_deferred_ops_if_bucket_writes_drained(bucket);
// Deferred tasks may try to send messages, which in turn will invoke the PendingMessageTracker.
@@ -139,6 +146,7 @@ PendingMessageTracker::reply(const api::StorageReply& r)
for (auto& task : deferred_tasks) {
task->run(TaskRunState::OK);
}
+ LOG(debug, "Erased message with id %" PRIu64 " for bucket %s", msgId, bucket.toString().c_str());
}
return bucket;
@@ -328,6 +336,7 @@ PendingMessageTracker::getStatusPerNode(std::ostream& out) const
void
PendingMessageTracker::reportHtmlStatus(std::ostream& out, const framework::HttpUrlPath& path) const
{
+ _trackTime.store(true, std::memory_order_relaxed);
if (!path.hasAttribute("order")) {
getStatusStartPage(out);
} else if (path.getAttribute("order") == "bucket") {
diff --git a/storage/src/vespa/storage/distributor/pendingmessagetracker.h b/storage/src/vespa/storage/distributor/pendingmessagetracker.h
index fb672d5ee31..4b5655d3f3c 100644
--- a/storage/src/vespa/storage/distributor/pendingmessagetracker.h
+++ b/storage/src/vespa/storage/distributor/pendingmessagetracker.h
@@ -178,11 +178,12 @@ private:
document::Bucket::hash
>;
- Messages _messages;
- framework::Component _component;
- NodeInfo _nodeInfo;
- vespalib::duration _nodeBusyDuration;
- DeferredBucketTaskMap _deferred_read_tasks;
+ Messages _messages;
+ framework::Component _component;
+ NodeInfo _nodeInfo;
+ vespalib::duration _nodeBusyDuration;
+ DeferredBucketTaskMap _deferred_read_tasks;
+ mutable std::atomic<bool> _trackTime;
// Since distributor is currently single-threaded, this will only
// contend when status page is being accessed. It is, however, required
diff --git a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
index a30663bde2f..a4295613fd2 100644
--- a/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
+++ b/storage/src/vespa/storage/distributor/persistencemessagetracker.cpp
@@ -65,9 +65,7 @@ PersistenceMessageTrackerImpl::fail(MessageSender& sender, const api::ReturnCode
}
uint16_t
-PersistenceMessageTrackerImpl::receiveReply(
- MessageSender& sender,
- api::BucketInfoReply& reply)
+PersistenceMessageTrackerImpl::receiveReply(MessageSender& sender, api::BucketInfoReply& reply)
{
uint16_t node = handleReply(reply);
@@ -79,9 +77,7 @@ PersistenceMessageTrackerImpl::receiveReply(
}
void
-PersistenceMessageTrackerImpl::revert(
- MessageSender& sender,
- const std::vector<BucketNodePair>& revertNodes)
+PersistenceMessageTrackerImpl::revert(MessageSender& sender, const std::vector<BucketNodePair>& revertNodes)
{
if (_revertTimestamp != 0) {
// Since we're reverting, all received bucket info is voided.
@@ -101,15 +97,18 @@ PersistenceMessageTrackerImpl::revert(
}
void
-PersistenceMessageTrackerImpl::queueMessageBatch(const std::vector<MessageTracker::ToSend>& messages) {
+PersistenceMessageTrackerImpl::queueMessageBatch(std::vector<MessageTracker::ToSend> messages) {
_messageBatches.emplace_back();
- for (const auto & message : messages) {
+ auto & batch = _messageBatches.back();
+ batch.reserve(messages.size());
+ reserve_more_commands(messages.size());
+ for (auto & message : messages) {
if (_reply) {
message._msg->getTrace().setLevel(_reply->getTrace().getLevel());
}
- _messageBatches.back().push_back(message._msg->getMsgId());
- queueCommand(message._msg, message._target);
+ batch.push_back(message._msg->getMsgId());
+ queueCommand(std::move(message._msg), message._target);
}
}
@@ -153,24 +152,18 @@ PersistenceMessageTrackerImpl::canSendReplyEarly() const
}
void
-PersistenceMessageTrackerImpl::addBucketInfoFromReply(
- uint16_t node,
- const api::BucketInfoReply& reply)
+PersistenceMessageTrackerImpl::addBucketInfoFromReply(uint16_t node, const api::BucketInfoReply& reply)
{
document::Bucket bucket(reply.getBucket());
const api::BucketInfo& bucketInfo(reply.getBucketInfo());
if (reply.hasBeenRemapped()) {
LOG(debug, "Bucket %s: Received remapped bucket info %s from node %d",
- bucket.toString().c_str(),
- bucketInfo.toString().c_str(),
- node);
+ bucket.toString().c_str(), bucketInfo.toString().c_str(), node);
_remapBucketInfo[bucket].emplace_back(_op_ctx.generate_unique_timestamp(), node, bucketInfo);
} else {
LOG(debug, "Bucket %s: Received bucket info %s from node %d",
- bucket.toString().c_str(),
- bucketInfo.toString().c_str(),
- node);
+ bucket.toString().c_str(), bucketInfo.toString().c_str(), node);
_bucketInfo[bucket].emplace_back(_op_ctx.generate_unique_timestamp(), node, bucketInfo);
}
}
@@ -179,17 +172,12 @@ void
PersistenceMessageTrackerImpl::logSuccessfulReply(uint16_t node, const api::BucketInfoReply& reply) const
{
LOG(spam, "Bucket %s: Received successful reply %s",
- reply.getBucketId().toString().c_str(),
- reply.toString().c_str());
+ reply.getBucketId().toString().c_str(), reply.toString().c_str());
if (!reply.getBucketInfo().valid()) {
- LOG(error,
- "Reply %s from node %d contained invalid bucket "
- "information %s. This is a bug! Please report "
- "this to the Vespa team",
- reply.toString().c_str(),
- node,
- reply.getBucketInfo().toString().c_str());
+ LOG(error, "Reply %s from node %d contained invalid bucket information %s. This is a bug! "
+ "Please report this to the Vespa team",
+ reply.toString().c_str(), node, reply.getBucketInfo().toString().c_str());
}
}
@@ -233,12 +221,8 @@ void
PersistenceMessageTrackerImpl::updateFailureResult(const api::BucketInfoReply& reply)
{
LOG(debug, "Bucket %s: Received failed reply %s with result %s",
- reply.getBucketId().toString().c_str(),
- reply.toString().c_str(),
- reply.getResult().toString().c_str());
- if (reply.getResult().getResult() >
- _reply->getResult().getResult())
- {
+ reply.getBucketId().toString().c_str(), reply.toString().c_str(), reply.getResult().toString().c_str());
+ if (reply.getResult().getResult() > _reply->getResult().getResult()) {
_reply->setResult(reply.getResult());
}
@@ -246,12 +230,9 @@ PersistenceMessageTrackerImpl::updateFailureResult(const api::BucketInfoReply& r
}
void
-PersistenceMessageTrackerImpl::handleCreateBucketReply(
- api::BucketInfoReply& reply,
- uint16_t node)
+PersistenceMessageTrackerImpl::handleCreateBucketReply(api::BucketInfoReply& reply, uint16_t node)
{
- LOG(spam, "Received CreateBucket reply for %s from node %u",
- reply.getBucketId().toString().c_str(), node);
+ LOG(spam, "Received CreateBucket reply for %s from node %u", reply.getBucketId().toString().c_str(), node);
if (!reply.getResult().success()
&& reply.getResult().getResult() != api::ReturnCode::EXISTS)
{
@@ -268,9 +249,7 @@ PersistenceMessageTrackerImpl::handleCreateBucketReply(
}
void
-PersistenceMessageTrackerImpl::handlePersistenceReply(
- api::BucketInfoReply& reply,
- uint16_t node)
+PersistenceMessageTrackerImpl::handlePersistenceReply(api::BucketInfoReply& reply, uint16_t node)
{
++_n_persistence_replies_total;
if (reply.getBucketInfo().valid()) {
@@ -295,10 +274,7 @@ PersistenceMessageTrackerImpl::transfer_trace_state_to_reply()
}
void
-PersistenceMessageTrackerImpl::updateFromReply(
- MessageSender& sender,
- api::BucketInfoReply& reply,
- uint16_t node)
+PersistenceMessageTrackerImpl::updateFromReply(MessageSender& sender, api::BucketInfoReply& reply, uint16_t node)
{
_trace.addChild(reply.steal_trace());
diff --git a/storage/src/vespa/storage/distributor/persistencemessagetracker.h b/storage/src/vespa/storage/distributor/persistencemessagetracker.h
index 923ecf45649..9b06547dd98 100644
--- a/storage/src/vespa/storage/distributor/persistencemessagetracker.h
+++ b/storage/src/vespa/storage/distributor/persistencemessagetracker.h
@@ -8,7 +8,6 @@
#include <vespa/storageapi/messageapi/bucketinfocommand.h>
#include <vespa/storageapi/messageapi/bucketinforeply.h>
-
namespace storage::distributor {
struct PersistenceMessageTracker {
@@ -16,7 +15,7 @@ struct PersistenceMessageTracker {
using ToSend = MessageTracker::ToSend;
virtual void fail(MessageSender&, const api::ReturnCode&) = 0;
- virtual void queueMessageBatch(const std::vector<ToSend>&) = 0;
+ virtual void queueMessageBatch(std::vector<ToSend> messages) = 0;
virtual uint16_t receiveReply(MessageSender&, api::BucketInfoReply&) = 0;
virtual std::shared_ptr<api::BucketInfoReply>& getReply() = 0;
virtual void updateFromReply(MessageSender&, api::BucketInfoReply&, uint16_t node) = 0;
@@ -65,7 +64,7 @@ public:
have at most (messages.size() - initial redundancy) messages left in the
queue and have it's first message be done.
*/
- void queueMessageBatch(const std::vector<MessageTracker::ToSend>& messages) override;
+ void queueMessageBatch(std::vector<MessageTracker::ToSend> messages) override;
private:
using MessageBatch = std::vector<uint64_t>;
diff --git a/storage/src/vespa/storage/distributor/statechecker.cpp b/storage/src/vespa/storage/distributor/statechecker.cpp
index 27a60b73716..cd8b6e934d4 100644
--- a/storage/src/vespa/storage/distributor/statechecker.cpp
+++ b/storage/src/vespa/storage/distributor/statechecker.cpp
@@ -51,13 +51,11 @@ public:
StateChecker::Result
StateChecker::Result::noMaintenanceNeeded()
{
- return Result(std::unique_ptr<ResultImpl>());
+ return Result({});
}
StateChecker::Result
-StateChecker::Result::createStoredResult(
- IdealStateOperation::UP operation,
- MaintenancePriority::Priority priority)
+StateChecker::Result::createStoredResult(IdealStateOperation::UP operation, MaintenancePriority::Priority priority)
{
return Result(std::make_unique<StoredResultImpl>(std::move(operation), MaintenancePriority(priority)));
}
@@ -74,15 +72,13 @@ StateChecker::Context::Context(const DistributorNodeContext& node_ctx_in,
distributorConfig(op_ctx_in.distributor_config()),
distribution(distributorBucketSpace.getDistribution()),
gcTimeCalculator(op_ctx_in.bucket_id_hasher(), distributorConfig.getGarbageCollectionInterval()),
+ idealStateBundle(distributorBucketSpace.get_ideal_service_layer_nodes_bundle(bucket.getBucketId())),
node_ctx(node_ctx_in),
op_ctx(op_ctx_in),
db(distributorBucketSpace.getBucketDatabase()),
stats(statsTracker),
merges_inhibited_in_bucket_space(distributorBucketSpace.merges_inhibited())
-{
- idealState = distributorBucketSpace.get_ideal_service_layer_nodes_bundle(bucket.getBucketId()).get_available_nonretired_or_maintenance_nodes();
- unorderedIdealState.insert(idealState.begin(), idealState.end());
-}
+{ }
StateChecker::Context::~Context() = default;
diff --git a/storage/src/vespa/storage/distributor/statechecker.h b/storage/src/vespa/storage/distributor/statechecker.h
index 830e05676be..d120b5e62d7 100644
--- a/storage/src/vespa/storage/distributor/statechecker.h
+++ b/storage/src/vespa/storage/distributor/statechecker.h
@@ -2,6 +2,7 @@
#pragma once
#include "bucketgctimecalculator.h"
+#include "ideal_service_layer_nodes_bundle.h"
#include <vespa/storage/distributor/maintenance/maintenancepriority.h>
#include <vespa/storage/distributor/operations/idealstate/idealstateoperation.h>
#include <vespa/storage/common/storagecomponent.h>
@@ -63,27 +64,21 @@ public:
std::vector<BucketDatabase::Entry> entries;
// Common
- const lib::ClusterState& systemState;
- const lib::ClusterState* pending_cluster_state; // nullptr if no state is pending.
- const DistributorConfiguration& distributorConfig;
- const lib::Distribution& distribution;
- BucketGcTimeCalculator gcTimeCalculator;
-
- // Separate ideal state into ordered sequence and unordered set, as we
- // need to both know the actual order (activation prioritization etc) as
- // well as have the ability to quickly check if a node is in an ideal
- // location.
- std::vector<uint16_t> idealState;
- vespalib::hash_set<uint16_t> unorderedIdealState;
-
- const DistributorNodeContext& node_ctx;
- const DistributorStripeOperationContext& op_ctx;
- const BucketDatabase& db;
- NodeMaintenanceStatsTracker& stats;
- const bool merges_inhibited_in_bucket_space;
-
- const BucketDatabase::Entry& getSiblingEntry() const noexcept {
- return siblingEntry;
+ const lib::ClusterState & systemState;
+ const lib::ClusterState * pending_cluster_state; // nullptr if no state is pending.
+ const DistributorConfiguration & distributorConfig;
+ const lib::Distribution & distribution;
+ BucketGcTimeCalculator gcTimeCalculator;
+ const IdealServiceLayerNodesBundle & idealStateBundle;
+ const DistributorNodeContext & node_ctx;
+ const DistributorStripeOperationContext & op_ctx;
+ const BucketDatabase & db;
+ NodeMaintenanceStatsTracker & stats;
+ const bool merges_inhibited_in_bucket_space;
+
+ const BucketDatabase::Entry& getSiblingEntry() const noexcept { return siblingEntry; }
+ IdealServiceLayerNodesBundle::ConstNodesRef idealState() const noexcept {
+ return idealStateBundle.available_nonretired_or_maintenance_nodes();
}
document::Bucket getBucket() const noexcept { return bucket; }
@@ -107,28 +102,19 @@ public:
std::unique_ptr<ResultImpl> _impl;
public:
IdealStateOperation::UP createOperation() {
- return (_impl
- ? _impl->createOperation()
- : IdealStateOperation::UP());
+ return (_impl ? _impl->createOperation() : IdealStateOperation::UP());
}
MaintenancePriority getPriority() const {
- return (_impl
- ? _impl->getPriority()
- : MaintenancePriority());
+ return (_impl ? _impl->getPriority() : MaintenancePriority());
}
MaintenanceOperation::Type getType() const {
- return (_impl
- ? _impl->getType()
- : MaintenanceOperation::OPERATION_COUNT);
-
+ return (_impl ? _impl->getType() : MaintenanceOperation::OPERATION_COUNT);
}
static Result noMaintenanceNeeded();
- static Result createStoredResult(
- IdealStateOperation::UP operation,
- MaintenancePriority::Priority priority);
+ static Result createStoredResult(IdealStateOperation::UP operation, MaintenancePriority::Priority priority);
private:
explicit Result(std::unique_ptr<ResultImpl> impl)
: _impl(std::move(impl))
diff --git a/storage/src/vespa/storage/distributor/statecheckers.cpp b/storage/src/vespa/storage/distributor/statecheckers.cpp
index fe1f4422c45..478faa38232 100644
--- a/storage/src/vespa/storage/distributor/statecheckers.cpp
+++ b/storage/src/vespa/storage/distributor/statecheckers.cpp
@@ -27,9 +27,7 @@ SplitBucketStateChecker::validForSplit(Context& c)
{
// Can't split if we have no nodes.
if (c.entry->getNodeCount() == 0) {
- LOG(spam,
- "Can't split bucket %s, since it has no copies",
- c.bucket.toString().c_str());
+ LOG(spam, "Can't split bucket %s, since it has no copies", c.bucket.toString().c_str());
return false;
}
@@ -44,38 +42,30 @@ SplitBucketStateChecker::validForSplit(Context& c)
double
SplitBucketStateChecker::getBucketSizeRelativeToMax(Context& c)
{
- const BucketInfo& info(c.entry.getBucketInfo());
- const uint32_t highestDocumentCount(info.getHighestDocumentCount());
- const uint32_t highestTotalDocumentSize(info.getHighestTotalDocumentSize());
- const uint32_t highestMetaCount(info.getHighestMetaCount());
- const uint32_t highestUsedFileSize(info.getHighestUsedFileSize());
+ auto highest = c.entry.getBucketInfo().getHighest();
- if (highestDocumentCount < 2) {
+ if (highest._documentCount < 2) {
return 0;
}
double byteSplitRatio = 0;
if (c.distributorConfig.getSplitSize() > 0) {
- byteSplitRatio = static_cast<double>(highestTotalDocumentSize)
- / c.distributorConfig.getSplitSize();
+ byteSplitRatio = static_cast<double>(highest._totalDocumentSize) / c.distributorConfig.getSplitSize();
}
double docSplitRatio = 0;
if (c.distributorConfig.getSplitCount() > 0) {
- docSplitRatio = static_cast<double>(highestDocumentCount)
- / c.distributorConfig.getSplitCount();
+ docSplitRatio = static_cast<double>(highest._documentCount) / c.distributorConfig.getSplitCount();
}
double fileSizeRatio = 0;
if (c.distributorConfig.getSplitSize() > 0) {
- fileSizeRatio = static_cast<double>(highestUsedFileSize)
- / (2 * c.distributorConfig.getSplitSize());
+ fileSizeRatio = static_cast<double>(highest._usedFileSize) / (2 * c.distributorConfig.getSplitSize());
}
double metaSplitRatio = 0;
if (c.distributorConfig.getSplitCount() > 0) {
- metaSplitRatio = static_cast<double>(highestMetaCount)
- / (2 * c.distributorConfig.getSplitCount());
+ metaSplitRatio = static_cast<double>(highest._metaCount) / (2 * c.distributorConfig.getSplitCount());
}
return std::max(std::max(byteSplitRatio, docSplitRatio),
@@ -83,47 +73,31 @@ SplitBucketStateChecker::getBucketSizeRelativeToMax(Context& c)
}
StateChecker::Result
-SplitBucketStateChecker::generateMinimumBucketSplitOperation(
- Context& c)
+SplitBucketStateChecker::generateMinimumBucketSplitOperation(Context& c)
{
- auto so = std::make_unique<SplitOperation>(
- c.node_ctx,
- BucketAndNodes(c.getBucket(), c.entry->getNodes()),
- c.distributorConfig.getMinimalBucketSplit(),
- 0,
- 0);
+ auto so = std::make_unique<SplitOperation>(c.node_ctx, BucketAndNodes(c.getBucket(), c.entry->getNodes()),
+ c.distributorConfig.getMinimalBucketSplit(), 0, 0);
so->setPriority(c.distributorConfig.getMaintenancePriorities().splitDistributionBits);
- so->setDetailedReason(
- "[Splitting bucket because the current system size requires "
- "a higher minimum split bit]");
+ so->setDetailedReason("[Splitting bucket because the current system size requires a higher minimum split bit]");
return Result::createStoredResult(std::move(so), MaintenancePriority::MEDIUM);
}
StateChecker::Result
-SplitBucketStateChecker::generateMaxSizeExceededSplitOperation(
- Context& c)
+SplitBucketStateChecker::generateMaxSizeExceededSplitOperation(Context& c)
{
- auto so = std::make_unique<SplitOperation>(
- c.node_ctx,
- BucketAndNodes(c.getBucket(), c.entry->getNodes()),
- 58,
- c.distributorConfig.getSplitCount(),
- c.distributorConfig.getSplitSize());
+ auto so = std::make_unique<SplitOperation>(c.node_ctx, BucketAndNodes(c.getBucket(), c.entry->getNodes()), 58,
+ c.distributorConfig.getSplitCount(), c.distributorConfig.getSplitSize());
so->setPriority(c.distributorConfig.getMaintenancePriorities().splitLargeBucket);
- const BucketInfo& info(c.entry.getBucketInfo());
+ auto highest = c.entry.getBucketInfo().getHighest();
vespalib::asciistream ost;
ost << "[Splitting bucket because its maximum size ("
- << info.getHighestTotalDocumentSize()
- << " b, "
- << info.getHighestDocumentCount()
- << " docs, "
- << info.getHighestMetaCount()
- << " meta, "
- << info.getHighestUsedFileSize()
- << " b total"
+ << highest._totalDocumentSize << " b, "
+ << highest._documentCount << " docs, "
+ << highest._metaCount << " meta, "
+ << highest._usedFileSize << " b total"
<< ") is higher than the configured limit of ("
<< c.distributorConfig.getSplitSize()
<< ", " << c.distributorConfig.getSplitCount() << ")]";
@@ -159,9 +133,10 @@ JoinBucketsStateChecker::isFirstSibling(const document::BucketId& bucketId)
namespace {
+using ConstNodesRef = IdealServiceLayerNodesBundle::ConstNodesRef;
+
bool
-equalNodeSet(const std::vector<uint16_t>& idealState,
- const BucketDatabase::Entry& dbEntry)
+equalNodeSet(ConstNodesRef idealState, const BucketDatabase::Entry& dbEntry)
{
if (idealState.size() != dbEntry->getNodeCount()) {
return false;
@@ -179,12 +154,10 @@ equalNodeSet(const std::vector<uint16_t>& idealState,
bool
bucketAndSiblingReplicaLocationsEqualIdealState(const StateChecker::Context& context)
{
- if (!equalNodeSet(context.idealState, context.entry)) {
+ if (!equalNodeSet(context.idealState(), context.entry)) {
return false;
}
- std::vector<uint16_t> siblingIdealState(
- context.distribution.getIdealStorageNodes(
- context.systemState, context.siblingBucket));
+ std::vector<uint16_t> siblingIdealState = context.distribution.getIdealStorageNodes(context.systemState, context.siblingBucket);
if (!equalNodeSet(siblingIdealState, context.siblingEntry)) {
return false;
}
@@ -204,6 +177,42 @@ inconsistentJoinIsAllowed(const StateChecker::Context& context)
&& bucketAndSiblingReplicaLocationsEqualIdealState(context));
}
+bool
+isInconsistentlySplit(const StateChecker::Context& c)
+{
+ return (c.entries.size() > 1);
+}
+
+// We don't want to invoke joins on buckets that have more replicas than
+// required. This is in particular because joins cause ideal states to change
+// for the target buckets and trigger merges. Since the removal of the non-
+// ideal replicas is done by the DeleteBuckets state-checker, it will become
+// preempted by potential follow-up joins unless we explicitly avoid these.
+bool
+contextBucketHasTooManyReplicas(const StateChecker::Context& c)
+{
+ return (c.entry->getNodeCount() > c.distribution.getRedundancy());
+}
+
+bool
+bucketAtDistributionBitLimit(const document::BucketId& bucket, const StateChecker::Context& c)
+{
+ return (bucket.getUsedBits() <= std::max(uint32_t(c.systemState.getDistributionBitCount()),
+ c.distributorConfig.getMinimalBucketSplit()));
+}
+
+bool
+legalBucketSplitLevel(const document::BucketId& bucket, const StateChecker::Context& c)
+{
+ return bucket.getUsedBits() >= c.distributorConfig.getMinimalBucketSplit();
+}
+
+bool
+bucketHasMultipleChildren(const document::BucketId& bucket, const StateChecker::Context& c)
+{
+ return c.db.childCount(bucket) > 1;
+}
+
} // anon ns
bool
@@ -213,41 +222,29 @@ JoinBucketsStateChecker::siblingsAreInSync(const Context& context)
const auto& siblingEntry(context.siblingEntry);
if (entry->getNodeCount() != siblingEntry->getNodeCount()) {
- LOG(spam,
- "Not joining bucket %s because sibling bucket %s had different "
- "node count",
- context.bucket.toString().c_str(),
- context.siblingBucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because sibling bucket %s had different node count",
+ context.bucket.toString().c_str(), context.siblingBucket.toString().c_str());
return false;
}
bool siblingsCoLocated = true;
for (uint32_t i = 0; i < entry->getNodeCount(); ++i) {
- if (entry->getNodeRef(i).getNode()
- != siblingEntry->getNodeRef(i).getNode())
- {
+ if (entry->getNodeRef(i).getNode() != siblingEntry->getNodeRef(i).getNode()) {
siblingsCoLocated = false;
break;
}
}
if (!siblingsCoLocated && !inconsistentJoinIsAllowed(context)) {
- LOG(spam,
- "Not joining bucket %s because sibling bucket %s "
- "does not have the same node set, or inconsistent joins cannot be "
- "performed either due to config or because replicas were not in "
- "their ideal location",
- context.bucket.toString().c_str(),
- context.siblingBucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because sibling bucket %s does not have the same node set, or inconsistent "
+ "joins cannot be performed either due to config or because replicas were not in their ideal location",
+ context.bucket.toString().c_str(), context.siblingBucket.toString().c_str());
return false;
}
if (!entry->validAndConsistent() || !siblingEntry->validAndConsistent()) {
- LOG(spam,
- "Not joining bucket %s because it or %s is out of sync "
- "and syncing it may cause it to become too large",
- context.bucket.toString().c_str(),
- context.siblingBucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because it or %s is out of sync and syncing it may cause it to become too large",
+ context.bucket.toString().c_str(), context.siblingBucket.toString().c_str());
return false;
}
@@ -275,58 +272,27 @@ JoinBucketsStateChecker::singleBucketJoinIsEnabled(const Context& c)
return c.distributorConfig.getEnableJoinForSiblingLessBuckets();
}
-namespace {
-
-// We don't want to invoke joins on buckets that have more replicas than
-// required. This is in particular because joins cause ideal states to change
-// for the target buckets and trigger merges. Since the removal of the non-
-// ideal replicas is done by the DeleteBuckets state-checker, it will become
-// preempted by potential follow-up joins unless we explicitly avoid these.
-bool
-contextBucketHasTooManyReplicas(const StateChecker::Context& c)
-{
- return (c.entry->getNodeCount() > c.distribution.getRedundancy());
-}
-
-bool
-bucketAtDistributionBitLimit(const document::BucketId& bucket, const StateChecker::Context& c)
-{
- return (bucket.getUsedBits() <= std::max(
- uint32_t(c.systemState.getDistributionBitCount()),
- c.distributorConfig.getMinimalBucketSplit()));
-}
-
-}
-
bool
JoinBucketsStateChecker::shouldJoin(const Context& c)
{
if (c.entry->getNodeCount() == 0) {
- LOG(spam, "Not joining bucket %s because it has no nodes",
- c.bucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because it has no nodes", c.bucket.toString().c_str());
return false;
}
if (contextBucketHasTooManyReplicas(c)) {
- LOG(spam, "Not joining %s because it has too high replication level",
- c.bucket.toString().c_str());
+ LOG(spam, "Not joining %s because it has too high replication level", c.bucket.toString().c_str());
return false;
}
if (c.distributorConfig.getJoinSize() == 0 && c.distributorConfig.getJoinCount() == 0) {
- LOG(spam, "Not joining bucket %s because join is disabled",
- c.bucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because join is disabled", c.bucket.toString().c_str());
return false;
}
if (bucketAtDistributionBitLimit(c.getBucketId(), c)) {
- LOG(spam,
- "Not joining bucket %s because it is below the min split "
- "count (config: %u, cluster state: %u, bucket has: %u)",
- c.bucket.toString().c_str(),
- c.distributorConfig.getMinimalBucketSplit(),
- c.systemState.getDistributionBitCount(),
- c.getBucketId().getUsedBits());
+ LOG(spam, "Not joining bucket %s because it is below the min split count (config: %u, cluster state: %u, bucket has: %u)",
+ c.bucket.toString().c_str(), c.distributorConfig.getMinimalBucketSplit(), c.systemState.getDistributionBitCount(), c.getBucketId().getUsedBits());
return false;
}
@@ -336,11 +302,8 @@ JoinBucketsStateChecker::shouldJoin(const Context& c)
if (c.getSiblingEntry().valid()) {
if (!isFirstSibling(c.getBucketId())) {
- LOG(spam,
- "Not joining bucket %s because it is the second sibling of "
- "%s and not the first",
- c.bucket.toString().c_str(),
- c.siblingBucket.toString().c_str());
+ LOG(spam, "Not joining bucket %s because it is the second sibling of %s and not the first",
+ c.bucket.toString().c_str(), c.siblingBucket.toString().c_str());
return false;
}
if (!siblingsAreInSync(c)) {
@@ -402,22 +365,6 @@ JoinBucketsStateChecker::smallEnoughToJoin(const Context& c)
return true;
}
-namespace {
-
-bool
-legalBucketSplitLevel(const document::BucketId& bucket, const StateChecker::Context& c)
-{
- return bucket.getUsedBits() >= c.distributorConfig.getMinimalBucketSplit();
-}
-
-bool
-bucketHasMultipleChildren(const document::BucketId& bucket, const StateChecker::Context& c)
-{
- return c.db.childCount(bucket) > 1;
-}
-
-}
-
document::Bucket
JoinBucketsStateChecker::computeJoinBucket(const Context& c)
{
@@ -463,24 +410,13 @@ JoinBucketsStateChecker::check(Context& c) const
sourceBuckets.push_back(c.getBucketId());
}
sourceBuckets.push_back(c.getBucketId());
- auto op = std::make_unique<JoinOperation>(
- c.node_ctx,
- BucketAndNodes(joinedBucket, c.entry->getNodes()),
- sourceBuckets);
+ auto op = std::make_unique<JoinOperation>(c.node_ctx, BucketAndNodes(joinedBucket, c.entry->getNodes()), sourceBuckets);
op->setPriority(c.distributorConfig.getMaintenancePriorities().joinBuckets);
vespalib::asciistream ost;
- ost << "[Joining buckets "
- << sourceBuckets[1].toString()
- << " and " << sourceBuckets[0].toString()
- << " because their size ("
- << getTotalUsedFileSize(c)
- << " bytes, "
- << getTotalMetaCount(c)
- << " docs) is less than the configured limit of ("
- << c.distributorConfig.getJoinSize()
- << ", "
- << c.distributorConfig.getJoinCount()
- << ")";
+ ost << "[Joining buckets " << sourceBuckets[1].toString() << " and " << sourceBuckets[0].toString()
+ << " because their size (" << getTotalUsedFileSize(c) << " bytes, "
+ << getTotalMetaCount(c) << " docs) is less than the configured limit of ("
+ << c.distributorConfig.getJoinSize() << ", " << c.distributorConfig.getJoinCount() << ")";
op->setDetailedReason(ost.str());
@@ -516,8 +452,7 @@ vespalib::string
SplitInconsistentStateChecker::getReason(const document::BucketId& bucketId, const std::vector<BucketDatabase::Entry>& entries)
{
vespalib::asciistream reason;
- reason << "[Bucket is inconsistently split (list includes "
- << vespalib::hex << "0x" << bucketId.getId();
+ reason << "[Bucket is inconsistently split (list includes " << vespalib::hex << "0x" << bucketId.getId();
for (uint32_t i = 0, found = 0; i < entries.size() && found < 3; i++) {
if (!(entries[i].getBucketId() == bucketId)) {
@@ -530,24 +465,11 @@ SplitInconsistentStateChecker::getReason(const document::BucketId& bucketId, con
reason << " and " << vespalib::dec << entries.size() - 4 << " others";
}
- reason << ") Splitting it to improve the problem (max used bits "
- << vespalib::dec
- << getHighestUsedBits(entries)
- << ")]";
+ reason << ") Splitting it to improve the problem (max used bits " << vespalib::dec << getHighestUsedBits(entries) << ")]";
return reason.str();
}
-namespace {
-
-bool
-isInconsistentlySplit(const StateChecker::Context& c)
-{
- return (c.entries.size() > 1);
-}
-
-}
-
StateChecker::Result
SplitInconsistentStateChecker::check(Context& c) const
{
@@ -559,12 +481,8 @@ SplitInconsistentStateChecker::check(Context& c) const
return Result::noMaintenanceNeeded();
}
- auto op = std::make_unique<SplitOperation>(
- c.node_ctx,
- BucketAndNodes(c.getBucket(), c.entry->getNodes()),
- getHighestUsedBits(c.entries),
- 0,
- 0);
+ auto op = std::make_unique<SplitOperation>(c.node_ctx, BucketAndNodes(c.getBucket(), c.entry->getNodes()),
+ getHighestUsedBits(c.entries), 0, 0);
op->setPriority(c.distributorConfig.getMaintenancePriorities().splitInconsistentBucket);
op->setDetailedReason(getReason(c.getBucketId(), c.entries));
@@ -573,24 +491,24 @@ SplitInconsistentStateChecker::check(Context& c) const
namespace {
-bool containsMaintenanceNode(const std::vector<uint16_t>& ideal, const StateChecker::Context& c)
+bool
+containsMaintenanceNode(ConstNodesRef ideal, const StateChecker::Context& c)
{
for (uint16_t n : ideal) {
- lib::Node node(lib::NodeType::STORAGE, n);
- if (c.systemState.getNodeState(node).getState() == lib::State::MAINTENANCE) {
+ if (c.systemState.getNodeState(lib::Node(lib::NodeType::STORAGE, n)).getState() == lib::State::MAINTENANCE) {
return true;
}
}
return false;
}
-bool ideal_node_is_unavailable_in_pending_state(const StateChecker::Context& c) {
+bool
+ideal_node_is_unavailable_in_pending_state(const StateChecker::Context& c) {
if (!c.pending_cluster_state) {
return false;
}
- for (uint16_t n : c.idealState) {
- lib::Node node(lib::NodeType::STORAGE, n);
- if (!c.pending_cluster_state->getNodeState(node).getState().oneOf("uir")){
+ for (uint16_t n : c.idealState()) {
+ if (!c.pending_cluster_state->getNodeState(lib::Node(lib::NodeType::STORAGE, n)).getState().oneOf("uir")){
return true;
}
}
@@ -598,9 +516,7 @@ bool ideal_node_is_unavailable_in_pending_state(const StateChecker::Context& c)
}
bool
-consistentApartFromEmptyBucketsInNonIdealLocationAndInvalidEntries(
- const std::vector<uint16_t>& idealNodes,
- const BucketInfo& entry)
+consistentApartFromEmptyBucketsInNonIdealLocationAndInvalidEntries(ConstNodesRef idealNodes, const BucketInfo& entry)
{
api::BucketInfo info;
for (uint32_t i=0, n=entry.getNodeCount(); i<n; ++i) {
@@ -634,17 +550,8 @@ consistentApartFromEmptyBucketsInNonIdealLocationAndInvalidEntries(
class MergeNodes
{
public:
- MergeNodes()
- : _reason(), _nodes(), _problemFlags(0), _priority(255)
- {}
-
- explicit MergeNodes(const BucketDatabase::Entry& entry)
- : _reason(), _nodes(), _problemFlags(0), _priority(255)
- {
- for (uint16_t i = 0; i < entry->getNodeCount(); i++) {
- addNode(entry->getNodeRef(i).getNode());
- }
- }
+ MergeNodes() noexcept;
+ explicit MergeNodes(const BucketDatabase::Entry& entry);
MergeNodes(MergeNodes && rhs) noexcept = default;
MergeNodes & operator =(MergeNodes && rhs) noexcept = delete;
MergeNodes(const MergeNodes & rhs) = delete;
@@ -652,40 +559,16 @@ public:
~MergeNodes();
- void operator+=(const MergeNodes& other) {
- _reason << other._reason.str();
- _problemFlags |= other._problemFlags;
- _nodes.insert(_nodes.end(), other._nodes.begin(), other._nodes.end());
- updatePriority(other._priority);
- }
-
- bool shouldMerge() const {
+ bool shouldMerge() const noexcept {
return _problemFlags != 0;
}
- void markMoveToIdealLocation(uint16_t node, uint8_t msgPriority) {
- _reason << "[Moving bucket to ideal node " << node << "]";
- addProblem(NON_IDEAL_LOCATION);
- addNode(node);
- updatePriority(msgPriority);
- }
-
- void markOutOfSync(const StateChecker::Context& c, uint8_t msgPriority) {
- _reason << "[Synchronizing buckets with different checksums "
- << c.entry->toString()
- << "]";
- addProblem(OUT_OF_SYNC);
- updatePriority(msgPriority);
- }
-
- void markMissingReplica(uint16_t node, uint8_t msgPriority) {
- _reason << "[Adding missing node " << node << "]";
- addProblem(MISSING_REPLICA);
- addNode(node);
- updatePriority(msgPriority);
- }
+ void operator+=(const MergeNodes& other);
+ void markMoveToIdealLocation(uint16_t node, uint8_t msgPriority);
+ void markOutOfSync(const StateChecker::Context& c, uint8_t msgPriority);
+ void markMissingReplica(uint16_t node, uint8_t msgPriority);
- bool needsMoveOnly() const {
+ bool needsMoveOnly() const noexcept {
return _problemFlags == NON_IDEAL_LOCATION;
}
@@ -698,11 +581,11 @@ public:
std::string reason() const { return _reason.str(); }
private:
- void updatePriority(uint8_t pri) {
+ void updatePriority(uint8_t pri) noexcept {
_priority = std::min(pri, _priority);
}
- void addProblem(uint8_t newProblem) {
+ void addProblem(uint8_t newProblem) noexcept {
_problemFlags |= newProblem;
}
@@ -713,16 +596,68 @@ private:
};
vespalib::asciistream _reason;
std::vector<uint16_t> _nodes;
- uint8_t _problemFlags;
- uint8_t _priority;
+ uint8_t _problemFlags;
+ uint8_t _priority;
};
+MergeNodes::MergeNodes() noexcept
+ : _reason(),
+ _nodes(),
+ _problemFlags(0),
+ _priority(255)
+{}
+
+MergeNodes::MergeNodes(const BucketDatabase::Entry& entry)
+ : _reason(),
+ _nodes(),
+ _problemFlags(0),
+ _priority(255)
+{
+ _nodes.reserve(entry->getNodeCount());
+ for (uint16_t i = 0; i < entry->getNodeCount(); i++) {
+ addNode(entry->getNodeRef(i).getNode());
+ }
+}
+
MergeNodes::~MergeNodes() = default;
+
+void
+MergeNodes::operator+=(const MergeNodes& other) {
+ _reason << other._reason.str();
+ _problemFlags |= other._problemFlags;
+ _nodes.reserve(_nodes.size() + other._nodes.size());
+ _nodes.insert(_nodes.end(), other._nodes.begin(), other._nodes.end());
+ updatePriority(other._priority);
+}
+
+void
+MergeNodes::markMoveToIdealLocation(uint16_t node, uint8_t msgPriority) {
+ _reason << "[Moving bucket to ideal node " << node << "]";
+ addProblem(NON_IDEAL_LOCATION);
+ addNode(node);
+ updatePriority(msgPriority);
+}
+
+void
+MergeNodes::markOutOfSync(const StateChecker::Context& c, uint8_t msgPriority) {
+ _reason << "[Synchronizing buckets with different checksums " << c.entry->toString() << "]";
+ addProblem(OUT_OF_SYNC);
+ updatePriority(msgPriority);
+}
+
+void
+MergeNodes::markMissingReplica(uint16_t node, uint8_t msgPriority) {
+ _reason << "[Adding missing node " << node << "]";
+ addProblem(MISSING_REPLICA);
+ addNode(node);
+ updatePriority(msgPriority);
+}
+
bool
-presentInIdealState(const StateChecker::Context& c, uint16_t node)
+presentInIdealState(const StateChecker::Context& c, uint16_t node) noexcept
{
- return c.unorderedIdealState.find(node) != c.unorderedIdealState.end();
+ return c.idealStateBundle.is_nonretired_or_maintenance(node);
}
void
@@ -730,7 +665,7 @@ addStatisticsForNonIdealNodes(const StateChecker::Context& c, bool missingReplic
{
// Common case is that ideal state == actual state with no missing replicas.
// If so, do nothing.
- if (!missingReplica && (c.idealState.size() == c.entry->getNodeCount())) {
+ if (!missingReplica && (c.idealState().size() == c.entry->getNodeCount())) {
return;
}
for (uint32_t j = 0; j < c.entry->getNodeCount(); ++j) {
@@ -745,6 +680,9 @@ addStatisticsForNonIdealNodes(const StateChecker::Context& c, bool missingReplic
}
}
+MergeNodes checkForNodesMissingFromIdealState(StateChecker::Context& c) __attribute__((noinline));
+MergeNodes checkIfBucketsAreOutOfSyncAndNeedMerging(StateChecker::Context& c) __attribute__((noinline));
+
MergeNodes
checkForNodesMissingFromIdealState(StateChecker::Context& c)
{
@@ -753,26 +691,23 @@ checkForNodesMissingFromIdealState(StateChecker::Context& c)
// Check if we need to add copies to get to ideal state.
if (!c.entry->emptyAndConsistent()) {
bool hasMissingReplica = false;
- for (uint32_t i = 0; i < c.idealState.size(); i++) {
+ for (uint16_t node : c.idealState()) {
bool found = false;
for (uint32_t j = 0; j < c.entry->getNodeCount(); j++) {
- if (c.entry->getNodeRef(j).getNode() == c.idealState[i]) {
+ if (c.entry->getNodeRef(j).getNode() == node) {
found = true;
break;
}
}
if (!found) {
- const DistributorConfiguration::MaintenancePriorities& mp(
- c.distributorConfig.getMaintenancePriorities());
- if (c.idealState.size() > c.entry->getNodeCount()) {
- ret.markMissingReplica(c.idealState[i],
- mp.mergeTooFewCopies);
+ const auto & mp = c.distributorConfig.getMaintenancePriorities();
+ if (c.idealState().size() > c.entry->getNodeCount()) {
+ ret.markMissingReplica(node, mp.mergeTooFewCopies);
} else {
- ret.markMoveToIdealLocation(c.idealState[i],
- mp.mergeMoveToIdealNode);
+ ret.markMoveToIdealLocation(node,mp.mergeMoveToIdealNode);
}
- c.stats.incCopyingIn(c.idealState[i], c.getBucketSpace());
+ c.stats.incCopyingIn(node, c.getBucketSpace());
hasMissingReplica = true;
}
}
@@ -795,12 +730,8 @@ MergeNodes
checkIfBucketsAreOutOfSyncAndNeedMerging(StateChecker::Context& c)
{
MergeNodes ret;
- if (!consistentApartFromEmptyBucketsInNonIdealLocationAndInvalidEntries(
- c.idealState,
- c.entry.getBucketInfo()))
- {
- auto pri(c.distributorConfig.getMaintenancePriorities()
- .mergeOutOfSyncCopies);
+ if (!consistentApartFromEmptyBucketsInNonIdealLocationAndInvalidEntries(c.idealState(),c.entry.getBucketInfo())) {
+ auto pri(c.distributorConfig.getMaintenancePriorities().mergeOutOfSyncCopies);
ret.markOutOfSync(c, pri);
addStatisticsForOutOfSyncCopies(c);
}
@@ -839,7 +770,7 @@ SynchronizeAndMoveStateChecker::check(Context& c) const
if (isInconsistentlySplit(c)) {
return Result::noMaintenanceNeeded();
}
- if (containsMaintenanceNode(c.idealState, c)) {
+ if (containsMaintenanceNode(c.idealState(), c)) {
return Result::noMaintenanceNeeded();
}
if (ideal_node_is_unavailable_in_pending_state(c)) {
@@ -863,8 +794,7 @@ SynchronizeAndMoveStateChecker::check(Context& c) const
if ((c.getBucketSpace() == document::FixedBucketSpaces::default_space())
|| !c.distributorConfig.prioritize_global_bucket_merges())
{
- schedPri = (result.needsMoveOnly() ? MaintenancePriority::LOW
- : MaintenancePriority::MEDIUM);
+ schedPri = (result.needsMoveOnly() ? MaintenancePriority::LOW : MaintenancePriority::MEDIUM);
op->setPriority(result.priority());
} else {
// Since the default bucket space has a dependency on the global bucket space,
@@ -876,10 +806,8 @@ SynchronizeAndMoveStateChecker::check(Context& c) const
return Result::createStoredResult(std::move(op), schedPri);
} else {
- LOG(spam, "Bucket %s: No need for merge, as bucket is in consistent state "
- "(or inconsistent buckets are empty) %s",
- c.bucket.toString().c_str(),
- c.entry->toString().c_str());
+ LOG(spam, "Bucket %s: No need for merge, as bucket is in consistent state (or inconsistent buckets are empty) %s",
+ c.bucket.toString().c_str(), c.entry->toString().c_str());
return Result::noMaintenanceNeeded();
}
}
@@ -894,7 +822,7 @@ DeleteExtraCopiesStateChecker::bucketHasNoData(const Context& c)
bool
DeleteExtraCopiesStateChecker::copyIsInIdealState(const BucketCopy& cp, const Context& c)
{
- return hasItem(c.idealState, cp.getNode());
+ return c.idealStateBundle.is_nonretired_or_maintenance(cp.getNode());
}
bool
@@ -910,9 +838,7 @@ DeleteExtraCopiesStateChecker::addToRemoveSet(
std::vector<uint16_t>& removedCopies,
vespalib::asciistream& reasons)
{
- reasons << "[Removing " << reasonForRemoval
- << " from node " << copyToRemove.getNode()
- << ']';
+ reasons << "[Removing " << reasonForRemoval << " from node " << copyToRemove.getNode() << ']';
removedCopies.push_back(copyToRemove.getNode());
}
@@ -980,7 +906,7 @@ DeleteExtraCopiesStateChecker::check(Context& c) const
}
// Maintain symmetry with merge; don't try to mess with nodes that have an
// ideal copy on a node set in maintenance mode.
- if (containsMaintenanceNode(c.idealState, c)) {
+ if (containsMaintenanceNode(c.idealState(), c)) {
return Result::noMaintenanceNeeded();
}
@@ -988,8 +914,7 @@ DeleteExtraCopiesStateChecker::check(Context& c) const
std::vector<uint16_t> removedCopies;
if (bucketHasNoData(c)) {
- reasons << "[Removing all copies since bucket is empty:"
- << c.entry->toString() << "]";
+ reasons << "[Removing all copies since bucket is empty:" << c.entry->toString() << "]";
for (uint32_t j = 0, cnt = c.entry->getNodeCount(); j < cnt; ++j) {
removedCopies.push_back(c.entry->getNodeRef(j).getNode());
@@ -1003,9 +928,7 @@ DeleteExtraCopiesStateChecker::check(Context& c) const
}
if (!removedCopies.empty()) {
- auto ro = std::make_unique<RemoveBucketOperation>(
- c.node_ctx,
- BucketAndNodes(c.getBucket(), removedCopies));
+ auto ro = std::make_unique<RemoveBucketOperation>(c.node_ctx, BucketAndNodes(c.getBucket(), removedCopies));
ro->setPriority(c.distributorConfig.getMaintenancePriorities().deleteBucketCopy);
ro->setDetailedReason(reasons.str());
@@ -1019,7 +942,7 @@ bool
BucketStateStateChecker::shouldSkipActivationDueToMaintenance(const ActiveList& activeNodes, const Context& c)
{
for (uint32_t i = 0; i < activeNodes.size(); ++i) {
- const auto node_index = activeNodes[i]._nodeIndex;
+ const auto node_index = activeNodes[i].nodeIndex();
const BucketCopy* cp(c.entry->getNode(node_index));
if (!cp || cp->active()) {
continue;
@@ -1029,7 +952,7 @@ BucketStateStateChecker::shouldSkipActivationDueToMaintenance(const ActiveList&
// If copy is not ready, we don't want to activate it if a node
// is set in maintenance. Doing so would imply that we want proton
// to start background indexing.
- return containsMaintenanceNode(c.idealState, c);
+ return containsMaintenanceNode(c.idealState(), c);
} // else: activation does not imply indexing, so we can safely do it at any time.
}
}
@@ -1057,9 +980,9 @@ BucketStateStateChecker::check(Context& c) const
return Result::noMaintenanceNeeded();
}
- ActiveList activeNodes(
- ActiveCopy::calculate(c.idealState, c.distribution, c.entry,
- c.distributorConfig.max_activation_inhibited_out_of_sync_groups()));
+ ActiveList activeNodes = ActiveCopy::calculate(c.idealStateBundle.nonretired_or_maintenance_to_index(),
+ c.distribution, c.entry,
+ c.distributorConfig.max_activation_inhibited_out_of_sync_groups());
if (activeNodes.empty()) {
return Result::noMaintenanceNeeded();
}
@@ -1070,13 +993,12 @@ BucketStateStateChecker::check(Context& c) const
vespalib::asciistream reason;
std::vector<uint16_t> operationNodes;
for (uint32_t i=0; i<activeNodes.size(); ++i) {
- const BucketCopy* cp = c.entry->getNode(activeNodes[i]._nodeIndex);
+ const BucketCopy* cp = c.entry->getNode(activeNodes[i].nodeIndex());
if (cp == nullptr || cp->active()) {
continue;
}
- operationNodes.push_back(activeNodes[i]._nodeIndex);
- reason << "[Setting node " << activeNodes[i]._nodeIndex << " as active: "
- << activeNodes[i].getReason() << "]";
+ operationNodes.push_back(activeNodes[i].nodeIndex());
+ reason << "[Setting node " << activeNodes[i].nodeIndex() << " as active: " << activeNodes[i].getReason() << "]";
}
// Deactivate all copies that are currently marked as active.
@@ -1087,7 +1009,7 @@ BucketStateStateChecker::check(Context& c) const
}
bool shouldBeActive = false;
for (uint32_t j=0; j<activeNodes.size(); ++j) {
- if (activeNodes[j]._nodeIndex == cp.getNode()) {
+ if (activeNodes[j].nodeIndex() == cp.getNode()) {
shouldBeActive = true;
}
}
@@ -1103,12 +1025,9 @@ BucketStateStateChecker::check(Context& c) const
std::vector<uint16_t> activeNodeIndexes;
for (uint32_t i=0; i<activeNodes.size(); ++i) {
- activeNodeIndexes.push_back(activeNodes[i]._nodeIndex);
+ activeNodeIndexes.push_back(activeNodes[i].nodeIndex());
}
- auto op = std::make_unique<SetBucketStateOperation>(
- c.node_ctx,
- BucketAndNodes(c.getBucket(), operationNodes),
- activeNodeIndexes);
+ auto op = std::make_unique<SetBucketStateOperation>(c.node_ctx, BucketAndNodes(c.getBucket(), operationNodes), activeNodeIndexes);
// If activeNodes > 1, we're dealing with a active-per-leaf group case and
// we currently always send high pri activations.
@@ -1134,7 +1053,7 @@ GarbageCollectionStateChecker::needs_garbage_collection(const Context& c, vespal
if (c.entry->getNodeCount() == 0) {
return false;
}
- if (containsMaintenanceNode(c.idealState, c)) {
+ if (containsMaintenanceNode(c.idealState(), c)) {
return false;
}
std::chrono::seconds lastRunAt(c.entry->getLastGarbageCollectionTime());
diff --git a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
index 09e7d370a98..4c8e51908b0 100644
--- a/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
+++ b/storage/src/vespa/storage/distributor/stripe_bucket_db_updater.cpp
@@ -767,6 +767,7 @@ StripeBucketDBUpdater::MergingNodeRemover::merge(storage::BucketDatabase::Merger
}
std::vector<BucketCopy> remainingCopies;
+ remainingCopies.reserve(e->getNodeCount());
for (uint16_t i = 0; i < e->getNodeCount(); i++) {
const uint16_t node_idx = e->getNodeRef(i).getNode();
if (storage_node_is_available(node_idx)) {
diff --git a/storage/src/vespa/storage/persistence/persistenceutil.h b/storage/src/vespa/storage/persistence/persistenceutil.h
index c3fcb68ddc8..4bd0222bb9e 100644
--- a/storage/src/vespa/storage/persistence/persistenceutil.h
+++ b/storage/src/vespa/storage/persistence/persistenceutil.h
@@ -10,7 +10,6 @@
#include <vespa/persistence/spi/result.h>
#include <vespa/persistence/spi/context.h>
#include <vespa/vespalib/io/fileutil.h>
-#include <vespa/storage/storageutil/utils.h>
namespace storage::api {
class StorageMessage;
diff --git a/storage/src/vespa/storage/storageserver/communicationmanager.cpp b/storage/src/vespa/storage/storageserver/communicationmanager.cpp
index 362717f70e6..95ed9188422 100644
--- a/storage/src/vespa/storage/storageserver/communicationmanager.cpp
+++ b/storage/src/vespa/storage/storageserver/communicationmanager.cpp
@@ -272,7 +272,8 @@ CommunicationManager::~CommunicationManager()
LOG(debug, "Deleting link %s.", toString().c_str());
}
-void CommunicationManager::onClose()
+void
+CommunicationManager::onClose()
{
// Avoid getting config during shutdown
_configFetcher.reset();
@@ -328,7 +329,8 @@ CommunicationManager::configureMessageBusLimits(const CommunicationManagerConfig
: cfg.mbusContentNodeMaxPendingSize);
}
-void CommunicationManager::configure(std::unique_ptr<CommunicationManagerConfig> config)
+void
+CommunicationManager::configure(std::unique_ptr<CommunicationManagerConfig> config)
{
// Only allow dynamic (live) reconfiguration of message bus limits.
if (_mbus) {
@@ -494,8 +496,7 @@ CommunicationManager::sendMessageBusMessage(const std::shared_ptr<api::StorageCo
}
bool
-CommunicationManager::sendCommand(
- const std::shared_ptr<api::StorageCommand> & msg)
+CommunicationManager::sendCommand(const std::shared_ptr<api::StorageCommand> & msg)
{
if (!msg->getAddress()) {
LOGBP(warning, "Got command without address of type %s in CommunicationManager::sendCommand",
@@ -570,9 +571,8 @@ CommunicationManager::serializeNodeState(const api::GetNodeStateReply& gns, std:
}
void
-CommunicationManager::sendDirectRPCReply(
- RPCRequestWrapper& request,
- const std::shared_ptr<api::StorageReply>& reply)
+CommunicationManager::sendDirectRPCReply(RPCRequestWrapper& request,
+ const std::shared_ptr<api::StorageReply>& reply)
{
std::string_view requestName(request.getMethodName()); // TODO non-name based dispatch
// TODO rework this entire dispatch mechanism :D
@@ -616,9 +616,8 @@ CommunicationManager::sendDirectRPCReply(
}
void
-CommunicationManager::sendMessageBusReply(
- StorageTransportContext& context,
- const std::shared_ptr<api::StorageReply>& reply)
+CommunicationManager::sendMessageBusReply(StorageTransportContext& context,
+ const std::shared_ptr<api::StorageReply>& reply)
{
// Using messagebus for communication.
mbus::Reply::UP replyUP;
@@ -653,8 +652,7 @@ CommunicationManager::sendMessageBusReply(
}
bool
-CommunicationManager::sendReply(
- const std::shared_ptr<api::StorageReply>& reply)
+CommunicationManager::sendReply(const std::shared_ptr<api::StorageReply>& reply)
{
// Relaxed load since we're not doing any dependent reads that aren't
// already covered by some other form of explicit synchronization.
diff --git a/storage/src/vespa/storage/storageserver/rpc/slime_cluster_state_bundle_codec.cpp b/storage/src/vespa/storage/storageserver/rpc/slime_cluster_state_bundle_codec.cpp
index 8c994991b9b..ea049493348 100644
--- a/storage/src/vespa/storage/storageserver/rpc/slime_cluster_state_bundle_codec.cpp
+++ b/storage/src/vespa/storage/storageserver/rpc/slime_cluster_state_bundle_codec.cpp
@@ -44,7 +44,7 @@ OutputBuf::~OutputBuf() = default;
vespalib::string serialize_state(const lib::ClusterState& state) {
vespalib::asciistream as;
- state.serialize(as, false);
+ state.serialize(as);
return as.str();
}
diff --git a/storage/src/vespa/storage/storageutil/distributorstatecache.h b/storage/src/vespa/storage/storageutil/distributorstatecache.h
index 8c4d07e39bf..0652a980e3a 100644
--- a/storage/src/vespa/storage/storageutil/distributorstatecache.h
+++ b/storage/src/vespa/storage/storageutil/distributorstatecache.h
@@ -1,6 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
+
#include <vespa/vdslib/state/clusterstate.h>
#include <vespa/vdslib/distribution/distribution.h>
@@ -9,9 +10,7 @@ namespace storage {
class DistributorStateCache
{
public:
- DistributorStateCache(
- const lib::Distribution& distr,
- const lib::ClusterState& state)
+ DistributorStateCache(const lib::Distribution& distr, const lib::ClusterState& state)
: _distribution(distr),
_state(state),
_distrBitMask(0xffffffffffffffffull),
@@ -22,8 +21,7 @@ public:
_distrBitMask >>= (64 - state.getDistributionBitCount());
}
- uint16_t getOwner(const document::BucketId& bid,
- const char* upStates = "ui")
+ uint16_t getOwner(const document::BucketId& bid, const char* upStates = "ui")
{
uint64_t distributionBits = bid.getRawId() & _distrBitMask;
diff --git a/storage/src/vespa/storage/storageutil/utils.h b/storage/src/vespa/storage/storageutil/utils.h
index debb7e71ace..3d3f5b85d71 100644
--- a/storage/src/vespa/storage/storageutil/utils.h
+++ b/storage/src/vespa/storage/storageutil/utils.h
@@ -1,7 +1,7 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vector>
+#include <vespa/vespalib/util/arrayref.h>
#include <sstream>
namespace storage {
@@ -10,50 +10,55 @@ namespace storage {
* Creates a vector of the given type with one entry in it.
*/
template<class A>
-std::vector<A> toVector(A entry) {
+std::vector<A>
+toVector(A entry) {
std::vector<A> entries;
entries.push_back(entry);
return entries;
-};
+}
/**
* Creates a vector of the given type with two entries in it.
*/
template<class A>
-std::vector<A> toVector(A entry, A entry2) {
+std::vector<A>
+toVector(A entry, A entry2) {
std::vector<A> entries;
entries.push_back(entry);
entries.push_back(entry2);
return entries;
-};
+}
/**
* Creates a vector of the given type with two entries in it.
*/
template<class A>
-std::vector<A> toVector(A entry, A entry2, A entry3) {
+std::vector<A>
+toVector(A entry, A entry2, A entry3) {
std::vector<A> entries;
entries.push_back(entry);
entries.push_back(entry2);
entries.push_back(entry3);
return entries;
-};
+}
/**
* Creates a vector of the given type with two entries in it.
*/
template<class A>
-std::vector<A> toVector(A entry, A entry2, A entry3, A entry4) {
+std::vector<A>
+toVector(A entry, A entry2, A entry3, A entry4) {
std::vector<A> entries;
entries.push_back(entry);
entries.push_back(entry2);
entries.push_back(entry3);
entries.push_back(entry4);
return entries;
-};
+}
template<class A>
-std::string dumpVector(const std::vector<A>& vec) {
+std::string
+dumpVector(const std::vector<A>& vec) {
std::ostringstream ost;
for (uint32_t i = 0; i < vec.size(); ++i) {
if (!ost.str().empty()) {
@@ -65,27 +70,5 @@ std::string dumpVector(const std::vector<A>& vec) {
return ost.str();
}
-template<class A>
-bool hasItem(const std::vector<A>& vec, A entry) {
- for (uint32_t i = 0; i < vec.size(); ++i) {
- if (vec[i] == entry) {
- return true;
- }
- }
-
- return false;
-}
-
-template<typename T>
-struct ConfigReader : public T::Subscriber, public T
-{
- T& config; // Alter to inherit T to simplify but kept this for compatability
-
- ConfigReader(const std::string& configId) : config(*this) {
- T::subscribe(configId, *this);
- }
- void configure(const T& c) { config = c; }
-};
-
}
diff --git a/storage/src/vespa/storage/tools/getidealstate.cpp b/storage/src/vespa/storage/tools/getidealstate.cpp
index 8b120924aaa..9e80517f4f7 100644
--- a/storage/src/vespa/storage/tools/getidealstate.cpp
+++ b/storage/src/vespa/storage/tools/getidealstate.cpp
@@ -64,18 +64,13 @@ Options::Options(int argc, const char* const* argv)
Options::~Options() {}
-void processBucket(const lib::Distribution& distribution,
- const lib::ClusterState& clusterState,
- const std::string& upStates,
- const document::BucketId& bucket)
+void processBucket(const lib::Distribution& distribution, const lib::ClusterState& clusterState,
+ const std::string& upStates, const document::BucketId& bucket)
{
std::ostringstream ost;
- std::vector<uint16_t> storageNodes(distribution.getIdealStorageNodes(
- clusterState, bucket, upStates.c_str()));
- uint16_t distributorNode(distribution.getIdealDistributorNode(
- clusterState, bucket, upStates.c_str()));
- ost << bucket << " distributor: " << distributorNode
- << ", storage:";
+ std::vector<uint16_t> storageNodes(distribution.getIdealStorageNodes(clusterState, bucket, upStates.c_str()));
+ uint16_t distributorNode(distribution.getIdealDistributorNode(clusterState, bucket, upStates.c_str()));
+ ost << bucket << " distributor: " << distributorNode << ", storage:";
for (uint32_t i=0; i<storageNodes.size(); ++i) {
ost << " " << storageNodes[i];
}
diff --git a/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.cpp b/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.cpp
index 9b7c4919403..4cc32a2fc3d 100644
--- a/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.cpp
+++ b/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.cpp
@@ -4,17 +4,14 @@
namespace storage::framework {
-HtmlStatusReporter::HtmlStatusReporter(vespalib::stringref id,
- vespalib::stringref name)
+HtmlStatusReporter::HtmlStatusReporter(vespalib::stringref id, vespalib::stringref name)
: StatusReporter(id, name)
-{
-}
+{ }
HtmlStatusReporter::~HtmlStatusReporter() = default;
void
-HtmlStatusReporter::reportHtmlHeader(std::ostream& out,
- const HttpUrlPath& path) const
+HtmlStatusReporter::reportHtmlHeader(std::ostream& out, const HttpUrlPath& path) const
{
out << "<html>\n"
<< "<head>\n"
@@ -26,8 +23,7 @@ HtmlStatusReporter::reportHtmlHeader(std::ostream& out,
}
void
-HtmlStatusReporter::reportHtmlFooter(std::ostream& out,
- const HttpUrlPath&) const
+HtmlStatusReporter::reportHtmlFooter(std::ostream& out, const HttpUrlPath&) const
{
out << "</body>\n</html>\n";
}
@@ -39,8 +35,7 @@ HtmlStatusReporter::getReportContentType(const HttpUrlPath&) const
}
bool
-HtmlStatusReporter::reportStatus(std::ostream& out,
- const HttpUrlPath& path) const
+HtmlStatusReporter::reportStatus(std::ostream& out, const HttpUrlPath& path) const
{
if (!isValidStatusRequest()) return false;
reportHtmlHeader(out, path);
diff --git a/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.h b/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.h
index 4ffba20a3fa..ee3d65b0de3 100644
--- a/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.h
+++ b/storage/src/vespa/storageframework/generic/status/htmlstatusreporter.h
@@ -29,8 +29,7 @@ struct HtmlStatusReporter : public StatusReporter {
* some code in the <head></head> part of the HTML, such as javascript
* functions.
*/
- virtual void reportHtmlHeaderAdditions(std::ostream&,
- const HttpUrlPath&) const {}
+ virtual void reportHtmlHeaderAdditions(std::ostream&, const HttpUrlPath&) const {}
/**
* Write a default HTML header. It writes the start of an HTML
diff --git a/vdslib/src/tests/distribution/CMakeLists.txt b/vdslib/src/tests/distribution/CMakeLists.txt
index c4ae8b0291c..3f3be1e1cad 100644
--- a/vdslib/src/tests/distribution/CMakeLists.txt
+++ b/vdslib/src/tests/distribution/CMakeLists.txt
@@ -3,7 +3,6 @@ vespa_add_library(vdslib_testdistribution
SOURCES
distributiontest.cpp
grouptest.cpp
- idealnodecalculatorimpltest.cpp
DEPENDS
vdslib
GTest::GTest
diff --git a/vdslib/src/tests/distribution/distributiontest.cpp b/vdslib/src/tests/distribution/distributiontest.cpp
index b5c756aece9..ce07711a069 100644
--- a/vdslib/src/tests/distribution/distributiontest.cpp
+++ b/vdslib/src/tests/distribution/distributiontest.cpp
@@ -5,7 +5,6 @@
#include <vespa/config/subscription/configuri.h>
#include <vespa/fastos/file.h>
#include <vespa/vdslib/distribution/distribution.h>
-#include <vespa/vdslib/distribution/idealnodecalculator.h>
#include <vespa/vdslib/state/clusterstate.h>
#include <vespa/vdslib/state/random.h>
#include <vespa/vespalib/data/slime/slime.h>
@@ -53,9 +52,7 @@ TEST(DistributionTest, test_verify_java_distributions)
long maxBucket = 1;
long mask = 0;
- for (uint32_t distributionBits = 0; distributionBits <= 32;
- ++distributionBits)
- {
+ for (uint32_t distributionBits = 0; distributionBits <= 32; ++distributionBits) {
state.setDistributionBitCount(distributionBits);
RandomGen randomizer(distributionBits);
for (uint32_t bucketIndex = 0; bucketIndex < 64; ++bucketIndex) {
@@ -66,11 +63,8 @@ TEST(DistributionTest, test_verify_java_distributions)
bucketId = randomizer.nextUint64();
}
document::BucketId bucket(distributionBits, bucketId);
- for (uint32_t redundancy = 1;
- redundancy <= distr.getRedundancy(); ++redundancy)
- {
- int distributorIndex = distr.getIdealDistributorNode(
- state, bucket, "uim");
+ for (uint32_t redundancy = 1; redundancy <= distr.getRedundancy(); ++redundancy) {
+ int distributorIndex = distr.getIdealDistributorNode(state, bucket, "uim");
of << distributionBits << " " << (bucketId & mask)
<< " " << redundancy << " " << distributorIndex << "\n";
}
@@ -89,6 +83,51 @@ TEST(DistributionTest, test_verify_java_distributions)
namespace {
+/**
+* A list of ideal nodes, sorted in preferred order. Wraps a vector to hide
+* unneeded details, and make it easily printable.
+*/
+class IdealNodeList : public document::Printable {
+public:
+ IdealNodeList() noexcept;
+ ~IdealNodeList();
+
+ void push_back(const Node& node) {
+ _idealNodes.push_back(node);
+ }
+
+ const Node& operator[](uint32_t i) const noexcept { return _idealNodes[i]; }
+ uint32_t size() const noexcept { return _idealNodes.size(); }
+ bool contains(const Node& n) const noexcept {
+ return indexOf(n) != 0xffff;
+ }
+ uint16_t indexOf(const Node& n) const noexcept {
+ for (uint16_t i=0; i<_idealNodes.size(); ++i) {
+ if (n == _idealNodes[i]) return i;
+ }
+ return 0xffff;
+ }
+
+ void print(std::ostream& out, bool, const std::string &) const override;
+private:
+ std::vector<Node> _idealNodes;
+};
+
+IdealNodeList::IdealNodeList() noexcept = default;
+IdealNodeList::~IdealNodeList() = default;
+
+void
+IdealNodeList::print(std::ostream& out, bool , const std::string &) const
+{
+ out << "[";
+ for (uint32_t i=0; i<_idealNodes.size(); ++i) {
+ if (i != 0) out << ", ";
+ out << _idealNodes[i];
+ }
+ out << "]";
+}
+
+
struct ExpectedResult {
ExpectedResult() { }
ExpectedResult(const ExpectedResult &) = default;
@@ -102,22 +141,16 @@ struct ExpectedResult {
};
void
-verifyJavaDistribution(const vespalib::string& name,
- const ClusterState& state,
- const Distribution& distribution,
- const NodeType& nodeType,
- uint16_t redundancy,
- uint16_t nodeCount,
- vespalib::stringref upStates,
- const std::vector<ExpectedResult> results)
+verifyJavaDistribution(const vespalib::string& name, const ClusterState& state, const Distribution& distribution,
+ const NodeType& nodeType, uint16_t redundancy, uint16_t nodeCount,
+ vespalib::stringref upStates, const std::vector<ExpectedResult> results)
{
(void) nodeCount;
for (uint32_t i=0, n=results.size(); i<n; ++i) {
std::string testId = name + " " + results[i].bucket.toString();
try {
std::vector<uint16_t> nvect;
- distribution.getIdealNodes(nodeType, state, results[i].bucket,
- nvect, upStates.data(), redundancy);
+ distribution.getIdealNodes(nodeType, state, results[i].bucket, nvect, upStates.data(), redundancy);
IdealNodeList nodes;
for (uint32_t j=0, m=nvect.size(); j<m; ++j) {
nodes.push_back(Node(nodeType, nvect[j]));
@@ -155,8 +188,7 @@ auto readFile(const std::string & filename) {
TEST(DistributionTest, test_verify_java_distributions_2)
{
- vespalib::DirectoryList files(
- vespalib::listDirectory("distribution/testdata"));
+ vespalib::DirectoryList files(vespalib::listDirectory("distribution/testdata"));
for (uint32_t i=0, n=files.size(); i<n; ++i) {
size_t pos = files[i].find(".java.results");
if (pos == vespalib::string::npos || pos + 13 != files[i].size()) {
@@ -189,8 +221,7 @@ TEST(DistributionTest, test_verify_java_distributions_2)
ClusterState cs(c["cluster-state"].asString().make_string());
std::string distConfig(c["distribution"].asString().make_string());
Distribution d(distConfig);
- const NodeType& nt(
- NodeType::get(c["node-type"].asString().make_string()));
+ const NodeType& nt(NodeType::get(c["node-type"].asString().make_string()));
uint32_t redundancy(c["redundancy"].asLong());
uint32_t nodeCount(c["node-count"].asLong());
vespalib::string upStates(c["up-states"].asString().make_string());
@@ -209,8 +240,7 @@ TEST(DistributionTest, test_verify_java_distributions_2)
}
results.push_back(result);
}
- verifyJavaDistribution(name, cs, d, nt, redundancy, nodeCount,
- upStates, results);
+ verifyJavaDistribution(name, cs, d, nt, redundancy, nodeCount, upStates, results);
//std::cerr << name << ": Verified " << results.size() << " tests.\n";
}
}
@@ -223,8 +253,7 @@ TEST(DistributionTest, test_unchanged_distribution)
std::ifstream in("distribution/testdata/41-distributordistribution");
for (unsigned i = 0; i < 64_Ki; i++) {
- uint16_t node = distr.getIdealDistributorNode(
- state, document::BucketId(16, i), "u");
+ uint16_t node = distr.getIdealDistributorNode(state, document::BucketId(16, i), "u");
char buf[100];
in.getline(buf, 100);
@@ -272,9 +301,7 @@ struct MyTest {
document::BucketId bucket(16, i);
std::vector<uint16_t> nodes;
ClusterState clusterState(_state);
- _distribution->getIdealNodes(
- *_nodeType, clusterState, bucket, nodes,
- _upStates, _redundancy);
+ _distribution->getIdealNodes(*_nodeType, clusterState, bucket, nodes, _upStates, _redundancy);
for (uint32_t j=0; j<nodes.size(); ++j) {
++result[nodes[j]];
}
@@ -293,8 +320,7 @@ MyTest::MyTest()
{ }
MyTest::~MyTest() = default;
-std::vector<uint16_t> createNodeCountList(const std::string& source,
- std::vector<uint16_t>& vals) {
+std::vector<uint16_t> createNodeCountList(const std::string& source, std::vector<uint16_t>& vals) {
std::vector<uint16_t> result(vals.size(), 0);
vespalib::StringTokenizer st(source, " ");
for (uint32_t i=0; i<st.size(); ++i) {
@@ -375,15 +401,9 @@ TEST(DistributionTest, testHighSplitBit)
document::BucketId bid1 = document::BucketId(bits, base);
document::BucketId bid2 = document::BucketId(bits, base);
- std::vector<uint16_t> nodes1 =
- distr.getIdealStorageNodes(state,
- bid1,
- "u");
+ std::vector<uint16_t> nodes1 = distr.getIdealStorageNodes(state, bid1, "u");
- std::vector<uint16_t> nodes2 =
- distr.getIdealStorageNodes(state,
- bid2,
- "u");
+ std::vector<uint16_t> nodes2 = distr.getIdealStorageNodes(state, bid2, "u");
ost1 << bid1 << " vs. " << bid2 << ": ";
ost2 << bid1 << " vs. " << bid2 << ": ";
@@ -424,16 +444,14 @@ TEST(DistributionTest, test_distribution)
s1 << "storage:" << n << std::endl;
ClusterState systemState(s1.str());
- Distribution distr(
- Distribution::getDefaultDistributionConfig(3, n));
+ Distribution distr(Distribution::getDefaultDistributionConfig(3, n));
std::vector<std::pair<uint64_t, std::vector<uint16_t> > > _distribution(b);
std::vector<int> _nodeCount(n, 0);
for (int i = 0; i < b; i++) {
_distribution[i].first = i;
- _distribution[i].second = distr.getIdealStorageNodes(
- systemState, document::BucketId(26, i));
+ _distribution[i].second = distr.getIdealStorageNodes(systemState, document::BucketId(26, i));
sort(_distribution[i].second.begin(), _distribution[i].second.end());
auto unique_nodes = std::distance(_distribution[i].second.begin(), unique(_distribution[i].second.begin(), _distribution[i].second.end()));
_distribution[i].second.resize(unique_nodes);
@@ -469,9 +487,7 @@ TEST(DistributionTest, test_move)
{
ClusterState systemState("storage:3");
document::BucketId bucket(16, 0x8b4f67ae);
-
Distribution distr(Distribution::getDefaultDistributionConfig(2, 3));
-
res = distr.getIdealStorageNodes(systemState, bucket);
EXPECT_EQ(size_t(2), res.size());
}
@@ -479,11 +495,8 @@ TEST(DistributionTest, test_move)
std::vector<uint16_t> res2;
{
ClusterState systemState("storage:4");
-
Distribution distr(Distribution::getDefaultDistributionConfig(2, 4));
-
document::BucketId bucket(16, 0x8b4f67ae);
-
res2 = distr.getIdealStorageNodes(systemState, bucket);
EXPECT_EQ(size_t(2), res2.size());
}
@@ -506,8 +519,7 @@ TEST(DistributionTest, test_move_constraints)
std::vector<std::vector<uint16_t> > initBuckets(10000);
for (unsigned i = 0; i < initBuckets.size(); i++) {
- initBuckets[i] = distr.getIdealStorageNodes(
- clusterState, document::BucketId(16, i));
+ initBuckets[i] = distr.getIdealStorageNodes(clusterState, document::BucketId(16, i));
sort(initBuckets[i].begin(), initBuckets[i].end());
}
@@ -517,8 +529,7 @@ TEST(DistributionTest, test_move_constraints)
ClusterState systemState("storage:11 .10.s:d");
for (unsigned i = 0; i < addedDownBuckets.size(); i++) {
- addedDownBuckets[i] = distr.getIdealStorageNodes(
- systemState, document::BucketId(16, i));
+ addedDownBuckets[i] = distr.getIdealStorageNodes(systemState, document::BucketId(16, i));
sort(addedDownBuckets[i].begin(), addedDownBuckets[i].end());
}
for (unsigned i = 0; i < initBuckets.size(); i++) {
@@ -541,15 +552,14 @@ TEST(DistributionTest, test_move_constraints)
ClusterState systemState("storage:10 .0.s:d");
for (unsigned i = 0; i < removed0Buckets.size(); i++) {
- removed0Buckets[i] = distr.getIdealStorageNodes(
- systemState, document::BucketId(16, i));
+ removed0Buckets[i] = distr.getIdealStorageNodes(systemState, document::BucketId(16, i));
sort(removed0Buckets[i].begin(), removed0Buckets[i].end());
}
for (unsigned i = 0; i < initBuckets.size(); i++) {
std::vector<uint16_t> movedAway;
set_difference(initBuckets[i].begin(), initBuckets[i].end(),
- removed0Buckets[i].begin(), removed0Buckets[i].end(),
- back_inserter(movedAway));
+ removed0Buckets[i].begin(), removed0Buckets[i].end(),
+ back_inserter(movedAway));
if (movedAway.size() > 0) {
if (movedAway[0] != 0) {
std::cerr << i << ": ";
@@ -572,15 +582,14 @@ TEST(DistributionTest, test_move_constraints)
ClusterState systemState("storage:11");
for (unsigned i = 0; i < added10Buckets.size(); i++) {
- added10Buckets[i] = distr.getIdealStorageNodes(
- systemState, document::BucketId(16, i));
+ added10Buckets[i] = distr.getIdealStorageNodes(systemState, document::BucketId(16, i));
sort(added10Buckets[i].begin(), added10Buckets[i].end());
}
for (unsigned i = 0; i < initBuckets.size(); i++) {
std::vector<uint16_t> movedInto;
std::set_difference(added10Buckets[i].begin(), added10Buckets[i].end(),
- initBuckets[i].begin(), initBuckets[i].end(),
- std::inserter(movedInto, movedInto.begin()));
+ initBuckets[i].begin(), initBuckets[i].end(),
+ std::inserter(movedInto, movedInto.begin()));
if (movedInto.size() > 0) {
ASSERT_EQ((size_t)1, movedInto.size());
EXPECT_EQ((uint16_t)10, movedInto[0]);
@@ -601,11 +610,9 @@ TEST(DistributionTest, test_distribution_bits)
for (unsigned i = 0; i < 100; i++) {
int val = rand();
- uint32_t index = distr.getIdealDistributorNode(
- state1, document::BucketId(19, val), "u");
+ uint32_t index = distr.getIdealDistributorNode(state1, document::BucketId(19, val), "u");
ost1 << index << " ";
- index = distr.getIdealDistributorNode(
- state2, document::BucketId(19, val), "u");
+ index = distr.getIdealDistributorNode(state2, document::BucketId(19, val), "u");
ost2 << index << " ";
}
@@ -620,10 +627,8 @@ TEST(DistributionTest, test_redundancy_hierarchical_distribution)
Distribution distr2(Distribution::getDefaultDistributionConfig(2, 10));
for (unsigned i = 0; i < 100; i++) {
- uint16_t d1 = distr1.getIdealDistributorNode(
- state, document::BucketId(16, i), "u");
- uint16_t d2 = distr2.getIdealDistributorNode(
- state, document::BucketId(16, i), "u");
+ uint16_t d1 = distr1.getIdealDistributorNode(state, document::BucketId(16, i), "u");
+ uint16_t d2 = distr2.getIdealDistributorNode(state, document::BucketId(16, i), "u");
EXPECT_EQ(d1, d2);
}
}
@@ -653,20 +658,17 @@ TEST(DistributionTest, test_hierarchical_distribution)
ClusterState state("distributor:6 storage:6");
for (uint32_t i = 0; i < 3; ++i) {
- EXPECT_EQ(
- vespalib::string("rack0"),
- distr.getNodeGraph().getGroupForNode(i)->getName());
+ EXPECT_EQ(vespalib::string("rack0"),
+ distr.getNodeGraph().getGroupForNode(i)->getName());
}
for (uint32_t i = 3; i < 6; ++i) {
- EXPECT_EQ(
- vespalib::string("rack1"),
- distr.getNodeGraph().getGroupForNode(i)->getName());
+ EXPECT_EQ(vespalib::string("rack1"),
+ distr.getNodeGraph().getGroupForNode(i)->getName());
}
std::vector<int> mainNode(6);
for (uint32_t i=0; i<100; ++i) {
- std::vector<uint16_t> nodes = distr.getIdealStorageNodes(
- state, document::BucketId(16, i), "u");
+ std::vector<uint16_t> nodes = distr.getIdealStorageNodes(state, document::BucketId(16, i), "u");
ASSERT_EQ((size_t) 4, nodes.size());
EXPECT_LT(nodes[0], mainNode.size());
++mainNode[nodes[0]];
@@ -710,8 +712,7 @@ TEST(DistributionTest, test_group_capacity)
int group0count = 0;
int group1count = 0;
for (uint32_t i = 0; i < 1000; i++) {
- std::vector<uint16_t> nodes = distr.getIdealStorageNodes(
- state, document::BucketId(16, i), "u");
+ std::vector<uint16_t> nodes = distr.getIdealStorageNodes(state, document::BucketId(16, i), "u");
if (nodes[0] == 0 || nodes[0] == 1 || nodes[0] == 2) {
group0count++;
}
@@ -794,14 +795,12 @@ TEST(DistributionTest, test_hierarchical_no_redistribution)
EXPECT_EQ(numBuckets, v.size());
v.clear();
- state.setNodeState(Node(NodeType::STORAGE, 0),
- NodeState(NodeType::STORAGE, State::DOWN));
+ state.setNodeState(Node(NodeType::STORAGE, 0),NodeState(NodeType::STORAGE, State::DOWN));
std::vector< std::vector<uint16_t> > distr2(4);
for (size_t i = 0; i < numBuckets; i++) {
- nodes = distribution.getIdealStorageNodes(
- state, document::BucketId(16, i), "u");
+ nodes = distribution.getIdealStorageNodes(state, document::BucketId(16, i), "u");
for (uint16_t j=0; j<nodes.size(); ++j) {
ASSERT_TRUE(0 != nodes[j]);
distr2[nodes[j]].push_back(i);
@@ -1010,7 +1009,7 @@ group[2].nodes[1].retired false
auto nodes_of = [&](uint32_t bucket){
std::vector<uint16_t> actual;
- distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, bucket), actual);
+ distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, bucket), actual, "uim");
return actual;
};
@@ -1071,9 +1070,13 @@ TEST(DistributionTest, DISABLED_benchmark_ideal_state_for_many_groups) {
std::vector<uint16_t> actual;
uint32_t bucket = 0;
auto min_time = vespalib::BenchmarkTimer::benchmark([&]{
- distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, (bucket++ & 0xffffU)), actual);
+ distr.getIdealNodes(NodeType::STORAGE, state, document::BucketId(16, (bucket++ & 0xffffU)), actual, "uim");
}, 5.0);
fprintf(stderr, "%.10f seconds\n", min_time);
}
+TEST(DistributionTest, control_size_of_IndexList) {
+ EXPECT_EQ(24u, sizeof(Distribution::IndexList));
+}
+
}
diff --git a/vdslib/src/tests/distribution/idealnodecalculatorimpltest.cpp b/vdslib/src/tests/distribution/idealnodecalculatorimpltest.cpp
deleted file mode 100644
index 4159491097c..00000000000
--- a/vdslib/src/tests/distribution/idealnodecalculatorimpltest.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/config-stor-distribution.h>
-#include <vespa/vdslib/distribution/idealnodecalculatorimpl.h>
-#include <vespa/vdslib/distribution/distribution.h>
-#include <vespa/vdslib/state/clusterstate.h>
-#include <vespa/vespalib/gtest/gtest.h>
-
-namespace storage::lib {
-
-/**
- * Class is just a wrapper for distribution, so little needs to be tested. Just
- * that:
- *
- * - get ideal nodes calls gets propagated correctly.
- * - Changes in distribution/cluster state is picked up.
- */
-
-TEST(IdealNodeCalculatorImplTest, test_normal_usage)
-{
- ClusterState state("storage:10");
- Distribution distr(Distribution::getDefaultDistributionConfig(3, 10));
- IdealNodeCalculatorImpl impl;
- IdealNodeCalculatorConfigurable& configurable(impl);
- IdealNodeCalculator& calc(impl);
- configurable.setDistribution(distr);
- configurable.setClusterState(state);
-
- std::string expected("[storage.8, storage.9, storage.6]");
- EXPECT_EQ(
- expected,
- calc.getIdealStorageNodes(document::BucketId(16, 5)).toString());
-}
-
-}
diff --git a/vdslib/src/tests/state/clusterstatetest.cpp b/vdslib/src/tests/state/clusterstatetest.cpp
index a08ec007f55..0b278177453 100644
--- a/vdslib/src/tests/state/clusterstatetest.cpp
+++ b/vdslib/src/tests/state/clusterstatetest.cpp
@@ -13,10 +13,10 @@ using ::testing::ContainsRegex;
namespace storage::lib {
-#define VERIFY3(source, result, type, typestr) { \
+#define VERIFY3(source, result, typestr) { \
vespalib::asciistream ost; \
try { \
- state->serialize(ost, type); \
+ state->serialize(ost); \
} catch (std::exception& e) { \
FAIL() << ("Failed to serialize system state " \
+ state->toString(true) + " in " + std::string(typestr) \
@@ -26,24 +26,18 @@ namespace storage::lib {
vespalib::string(typestr) + " \"" + ost.str() + "\"") << state->toString(true); \
}
-#define VERIFY2(serialized, result, testOld, testNew) { \
+#define VERIFY2(serialized, result) { \
std::unique_ptr<ClusterState> state; \
try { \
state.reset(new ClusterState(serialized)); \
} catch (std::exception& e) { \
- FAIL() << ("Failed to parse '" + std::string(serialized) \
- + "': " + e.what()); \
+ FAIL() << ("Failed to parse '" + std::string(serialized) + "': " + e.what()); \
} \
- if (testOld) VERIFY3(serialized, result, true, "Old") \
- if (testNew) VERIFY3(serialized, result, false, "New") \
+ VERIFY3(serialized, result, "New") \
}
-#define VERIFYSAMEOLD(serialized) VERIFY2(serialized, serialized, true, false)
-#define VERIFYOLD(serialized, result) VERIFY2(serialized, result, true, false)
-#define VERIFYSAMENEW(serialized) VERIFY2(serialized, serialized, false, true)
-#define VERIFYNEW(serialized, result) VERIFY2(serialized, result, false, true)
-#define VERIFYSAME(serialized) VERIFY2(serialized, serialized, true, true)
-#define VERIFY(serialized, result) VERIFY2(serialized, result, true, true)
+#define VERIFYSAMENEW(serialized) VERIFY2(serialized, serialized)
+#define VERIFYNEW(serialized, result) VERIFY2(serialized, result)
#define VERIFY_FAIL(serialized, error) { \
try{ \
diff --git a/vdslib/src/vespa/vdslib/distribution/CMakeLists.txt b/vdslib/src/vespa/vdslib/distribution/CMakeLists.txt
index 0d9342291e8..58ec94eec9c 100644
--- a/vdslib/src/vespa/vdslib/distribution/CMakeLists.txt
+++ b/vdslib/src/vespa/vdslib/distribution/CMakeLists.txt
@@ -4,7 +4,6 @@ vespa_add_library(vdslib_distribution OBJECT
distribution.cpp
distribution_config_util.cpp
group.cpp
- idealnodecalculatorimpl.cpp
redundancygroupdistribution.cpp
DEPENDS
)
diff --git a/vdslib/src/vespa/vdslib/distribution/distribution.cpp b/vdslib/src/vespa/vdslib/distribution/distribution.cpp
index 637a5089822..ee022b1779a 100644
--- a/vdslib/src/vespa/vdslib/distribution/distribution.cpp
+++ b/vdslib/src/vespa/vdslib/distribution/distribution.cpp
@@ -9,8 +9,8 @@
#include <vespa/config/print/asciiconfigwriter.h>
#include <vespa/config/print/asciiconfigreader.hpp>
#include <vespa/vespalib/util/exceptions.h>
+#include <vespa/vespalib/stllike/hash_map.hpp>
#include <vespa/config-stor-distribution.h>
-#include <algorithm>
#include <cmath>
#include <cassert>
@@ -20,16 +20,19 @@ LOG_SETUP(".vdslib.distribution");
namespace storage::lib {
namespace {
- std::vector<uint32_t> getDistributionBitMasks() {
- std::vector<uint32_t> masks;
- masks.resize(32 + 1);
- uint32_t mask = 0;
- for (uint32_t i=0; i<=32; ++i) {
- masks[i] = mask;
- mask = (mask << 1) | 1;
- }
- return masks;
+
+std::vector<uint32_t>
+getDistributionBitMasks() {
+ std::vector<uint32_t> masks;
+ masks.resize(32 + 1);
+ uint32_t mask = 0;
+ for (uint32_t i=0; i<=32; ++i) {
+ masks[i] = mask;
+ mask = (mask << 1) | 1;
}
+ return masks;
+}
+
}
VESPA_IMPLEMENT_EXCEPTION(NoDistributorsAvailableException, vespalib::Exception);
@@ -65,8 +68,8 @@ Distribution::Distribution(const Distribution& d)
configure(*reader.read());
}
-Distribution::ConfigWrapper::ConfigWrapper(std::unique_ptr<DistributionConfig> cfg) :
- _cfg(std::move(cfg))
+Distribution::ConfigWrapper::ConfigWrapper(std::unique_ptr<DistributionConfig> cfg) noexcept
+ : _cfg(std::move(cfg))
{ }
Distribution::ConfigWrapper::~ConfigWrapper() = default;
@@ -150,8 +153,7 @@ Distribution::configure(const vespa::config::content::StorDistributionConfig& co
if ( ! nodeGraph) {
throw vespalib::IllegalStateException(
"Got config that didn't seem to specify even a root group. Must "
- "have a root group at minimum:\n"
- + _serialized, VESPA_STRLOC);
+ "have a root group at minimum:\n" + _serialized, VESPA_STRLOC);
}
nodeGraph->calculateDistributionHashValues();
_nodeGraph = std::move(nodeGraph);
@@ -161,14 +163,11 @@ Distribution::configure(const vespa::config::content::StorDistributionConfig& co
_ensurePrimaryPersisted = config.ensurePrimaryPersisted;
_readyCopies = config.readyCopies;
_activePerGroup = config.activePerLeafGroup;
- _distributorAutoOwnershipTransferOnWholeGroupDown
- = config.distributorAutoOwnershipTransferOnWholeGroupDown;
+ _distributorAutoOwnershipTransferOnWholeGroupDown = config.distributorAutoOwnershipTransferOnWholeGroupDown;
}
uint32_t
-Distribution::getGroupSeed(
- const document::BucketId& bucket, const ClusterState& clusterState,
- const Group& group) const
+Distribution::getGroupSeed(const document::BucketId& bucket, const ClusterState& clusterState, const Group& group) const
{
uint32_t seed(static_cast<uint32_t>(bucket.getRawId())
& _distributionBitMasks[clusterState.getDistributionBitCount()]);
@@ -177,8 +176,7 @@ Distribution::getGroupSeed(
}
uint32_t
-Distribution::getDistributorSeed(
- const document::BucketId& bucket, const ClusterState& state) const
+Distribution::getDistributorSeed(const document::BucketId& bucket, const ClusterState& state) const
{
uint32_t seed(static_cast<uint32_t>(bucket.getRawId())
& _distributionBitMasks[state.getDistributionBitCount()]);
@@ -186,8 +184,7 @@ Distribution::getDistributorSeed(
}
uint32_t
-Distribution::getStorageSeed(
- const document::BucketId& bucket, const ClusterState& state) const
+Distribution::getStorageSeed(const document::BucketId& bucket, const ClusterState& state) const
{
uint32_t seed(static_cast<uint32_t>(bucket.getRawId())
& _distributionBitMasks[state.getDistributionBitCount()]);
@@ -262,11 +259,8 @@ namespace {
}
void
-Distribution::getIdealGroups(const document::BucketId& bucket,
- const ClusterState& clusterState,
- const Group& parent,
- uint16_t redundancy,
- std::vector<ResultGroup>& results) const
+Distribution::getIdealGroups(const document::BucketId& bucket, const ClusterState& clusterState, const Group& parent,
+ uint16_t redundancy, std::vector<ResultGroup>& results) const
{
if (parent.isLeafGroup()) {
results.emplace_back(parent, redundancy);
@@ -300,15 +294,12 @@ Distribution::getIdealGroups(const document::BucketId& bucket,
// This should never happen. Config should verify that each group
// has enough groups beneath them.
assert(group._group != nullptr);
- getIdealGroups(bucket, clusterState, *group._group,
- redundancyArray[i], results);
+ getIdealGroups(bucket, clusterState, *group._group, redundancyArray[i], results);
}
}
const Group*
-Distribution::getIdealDistributorGroup(const document::BucketId& bucket,
- const ClusterState& clusterState,
- const Group& parent) const
+Distribution::getIdealDistributorGroup(const document::BucketId& bucket, const ClusterState& clusterState, const Group& parent) const
{
if (parent.isLeafGroup()) {
return &parent;
@@ -357,12 +348,8 @@ Distribution::allDistributorsDown(const Group& g, const ClusterState& cs)
}
void
-Distribution::getIdealNodes(const NodeType& nodeType,
- const ClusterState& clusterState,
- const document::BucketId& bucket,
- std::vector<uint16_t>& resultNodes,
- const char* upStates,
- uint16_t redundancy) const
+Distribution::getIdealNodes(const NodeType& nodeType, const ClusterState& clusterState, const document::BucketId& bucket,
+ std::vector<uint16_t>& resultNodes, const char* upStates, uint16_t redundancy) const
{
if (redundancy == DEFAULT_REDUNDANCY) redundancy = _redundancy;
resultNodes.clear();
@@ -388,8 +375,7 @@ Distribution::getIdealNodes(const NodeType& nodeType,
const Group* group(getIdealDistributorGroup(bucket, clusterState, *_nodeGraph));
if (group == nullptr) {
vespalib::asciistream ss;
- ss << "There is no legal distributor target in state with version "
- << clusterState.getVersion();
+ ss << "There is no legal distributor target in state with version " << clusterState.getVersion();
throw NoDistributorsAvailableException(ss.str(), VESPA_STRLOC);
}
_groupDistribution.push_back(ResultGroup(*group, 1));
@@ -459,9 +445,7 @@ Distribution::getDefaultDistributionConfig(uint16_t redundancy, uint16_t nodeCou
}
std::vector<uint16_t>
-Distribution::getIdealStorageNodes(
- const ClusterState& state, const document::BucketId& bucket,
- const char* upStates) const
+Distribution::getIdealStorageNodes(const ClusterState& state, const document::BucketId& bucket, const char* upStates) const
{
std::vector<uint16_t> nodes;
getIdealNodes(NodeType::STORAGE, state, bucket, nodes, upStates);
@@ -469,28 +453,23 @@ Distribution::getIdealStorageNodes(
}
uint16_t
-Distribution::getIdealDistributorNode(
- const ClusterState& state,
- const document::BucketId& bucket,
- const char* upStates) const
+Distribution::getIdealDistributorNode(const ClusterState& state, const document::BucketId& bucket, const char* upStates) const
{
std::vector<uint16_t> nodes;
getIdealNodes(NodeType::DISTRIBUTOR, state, bucket, nodes, upStates);
assert(nodes.size() <= 1);
if (nodes.empty()) {
vespalib::asciistream ss;
- ss << "There is no legal distributor target in state with version "
- << state.getVersion();
+ ss << "There is no legal distributor target in state with version " << state.getVersion();
throw NoDistributorsAvailableException(ss.str(), VESPA_STRLOC);
}
return nodes[0];
}
std::vector<Distribution::IndexList>
-Distribution::splitNodesIntoLeafGroups(IndexList nodeList) const
+Distribution::splitNodesIntoLeafGroups(vespalib::ConstArrayRef<uint16_t> nodeList) const
{
- std::vector<IndexList> result;
- std::map<uint16_t, IndexList> nodes;
+ vespalib::hash_map<uint16_t, IndexList> nodes(nodeList.size());
for (auto node : nodeList) {
const Group* group((node < _node2Group.size()) ? _node2Group[node] : nullptr);
if (group == nullptr) {
@@ -500,9 +479,16 @@ Distribution::splitNodesIntoLeafGroups(IndexList nodeList) const
nodes[group->getIndex()].push_back(node);
}
}
+ std::vector<uint16_t> sorted;
+ sorted.reserve(nodes.size());
+ for (const auto & entry : nodes) {
+ sorted.push_back(entry.first);
+ }
+ std::sort(sorted.begin(), sorted.end());
+ std::vector<IndexList> result;
result.reserve(nodes.size());
- for (auto & node : nodes) {
- result.emplace_back(std::move(node.second));
+ for (uint16_t groupId : sorted) {
+ result.emplace_back(std::move(nodes.find(groupId)->second));
}
return result;
}
diff --git a/vdslib/src/vespa/vdslib/distribution/distribution.h b/vdslib/src/vespa/vdslib/distribution/distribution.h
index 355b87884c1..8cf93b01630 100644
--- a/vdslib/src/vespa/vdslib/distribution/distribution.h
+++ b/vdslib/src/vespa/vdslib/distribution/distribution.h
@@ -12,6 +12,7 @@
#include <vespa/document/bucket/bucketid.h>
#include <vespa/vdslib/state/nodetype.h>
#include <vespa/vespalib/util/exception.h>
+#include <vespa/vespalib/util/small_vector.h>
namespace vespa::config::content::internal {
class InternalStorDistributionType;
@@ -38,9 +39,9 @@ private:
uint16_t _redundancy;
uint16_t _initialRedundancy;
uint16_t _readyCopies;
- bool _activePerGroup;
- bool _ensurePrimaryPersisted;
- bool _distributorAutoOwnershipTransferOnWholeGroupDown;
+ bool _activePerGroup;
+ bool _ensurePrimaryPersisted;
+ bool _distributorAutoOwnershipTransferOnWholeGroupDown;
vespalib::string _serialized;
struct ResultGroup {
@@ -50,7 +51,7 @@ private:
ResultGroup(const Group& group, uint16_t redundancy) noexcept
: _group(&group), _redundancy(redundancy) {}
- bool operator<(const ResultGroup& other) const {
+ bool operator<(const ResultGroup& other) const noexcept {
return _group->getIndex() < other._group->getIndex();
}
};
@@ -59,32 +60,23 @@ private:
* Get seed to use for ideal state algorithm's random number generator
* to decide which hierarchical group we should pick.
*/
- uint32_t getGroupSeed(
- const document::BucketId&, const ClusterState&,
- const Group&) const;
+ uint32_t getGroupSeed(const document::BucketId&, const ClusterState&, const Group&) const;
/**
* Get seed to use for ideal state algorithm's random number generator
* to decide which distributor node this bucket should be mapped to.
*/
- uint32_t getDistributorSeed(
- const document::BucketId&, const ClusterState&) const;
+ uint32_t getDistributorSeed(const document::BucketId&, const ClusterState&) const;
/**
* Get seed to use for ideal state algorithm's random number generator
* to decide which storage node this bucket should be mapped to.
*/
- uint32_t getStorageSeed(
- const document::BucketId&, const ClusterState&) const;
+ uint32_t getStorageSeed(const document::BucketId&, const ClusterState&) const;
- void getIdealGroups(const document::BucketId& bucket,
- const ClusterState& clusterState,
- const Group& parent,
- uint16_t redundancy,
- std::vector<ResultGroup>& results) const;
+ void getIdealGroups(const document::BucketId& bucket, const ClusterState& clusterState, const Group& parent,
+ uint16_t redundancy, std::vector<ResultGroup>& results) const;
- const Group* getIdealDistributorGroup(const document::BucketId& bucket,
- const ClusterState& clusterState,
- const Group& parent) const;
+ const Group* getIdealDistributorGroup(const document::BucketId& bucket, const ClusterState& clusterState, const Group& parent) const;
/**
* Since distribution object may be used often in ideal state calculations
@@ -97,9 +89,9 @@ private:
public:
class ConfigWrapper {
public:
- ConfigWrapper(ConfigWrapper && rhs) = default;
- ConfigWrapper & operator = (ConfigWrapper && rhs) = default;
- ConfigWrapper(std::unique_ptr<DistributionConfig> cfg);
+ ConfigWrapper(ConfigWrapper && rhs) noexcept = default;
+ ConfigWrapper & operator = (ConfigWrapper && rhs) noexcept = default;
+ ConfigWrapper(std::unique_ptr<DistributionConfig> cfg) noexcept;
~ConfigWrapper();
const DistributionConfig & get() const { return *_cfg; }
private:
@@ -114,33 +106,26 @@ public:
Distribution& operator=(const Distribution&) = delete;
- const vespalib::string& serialize() const { return _serialized; }
+ const vespalib::string& serialize() const noexcept { return _serialized; }
- const Group& getNodeGraph() const { return *_nodeGraph; }
- uint16_t getRedundancy() const { return _redundancy; }
- uint16_t getInitialRedundancy() const { return _initialRedundancy; }
- uint16_t getReadyCopies() const { return _readyCopies; }
- bool ensurePrimaryPersisted() const { return _ensurePrimaryPersisted; }
- bool distributorAutoOwnershipTransferOnWholeGroupDown() const
- { return _distributorAutoOwnershipTransferOnWholeGroupDown; }
- bool activePerGroup() const { return _activePerGroup; }
+ const Group& getNodeGraph() const noexcept { return *_nodeGraph; }
+ uint16_t getRedundancy() const noexcept { return _redundancy; }
+ uint16_t getInitialRedundancy() const noexcept { return _initialRedundancy; }
+ uint16_t getReadyCopies() const noexcept { return _readyCopies; }
+ bool ensurePrimaryPersisted() const noexcept { return _ensurePrimaryPersisted; }
+ bool distributorAutoOwnershipTransferOnWholeGroupDown() const noexcept { return _distributorAutoOwnershipTransferOnWholeGroupDown; }
+ bool activePerGroup() const noexcept { return _activePerGroup; }
- bool operator==(const Distribution& o) const
- { return (_serialized == o._serialized); }
- bool operator!=(const Distribution& o) const
- { return (_serialized != o._serialized); }
+ bool operator==(const Distribution& o) const noexcept { return (_serialized == o._serialized); }
+ bool operator!=(const Distribution& o) const noexcept { return (_serialized != o._serialized); }
void print(std::ostream& out, bool, const std::string&) const override;
/** Simplified wrapper for getIdealNodes() */
- std::vector<uint16_t> getIdealStorageNodes(
- const ClusterState&, const document::BucketId&,
- const char* upStates = "uim") const;
+ std::vector<uint16_t> getIdealStorageNodes(const ClusterState&, const document::BucketId&, const char* upStates = "uim") const;
/** Simplified wrapper for getIdealNodes() */
- uint16_t getIdealDistributorNode(
- const ClusterState&, const document::BucketId&,
- const char* upStates = "uim") const;
+ uint16_t getIdealDistributorNode(const ClusterState&, const document::BucketId&, const char* upStates = "uim") const;
/**
* @throws TooFewBucketBitsInUseException If distribution bit count is
@@ -149,25 +134,22 @@ public:
* in any upstate.
*/
enum { DEFAULT_REDUNDANCY = 0xffff };
- void getIdealNodes(const NodeType&, const ClusterState&,
- const document::BucketId&, std::vector<uint16_t>& nodes,
- const char* upStates = "uim",
- uint16_t redundancy = DEFAULT_REDUNDANCY) const;
+ void getIdealNodes(const NodeType&, const ClusterState&, const document::BucketId&, std::vector<uint16_t>& nodes,
+ const char* upStates, uint16_t redundancy = DEFAULT_REDUNDANCY) const;
/**
* Unit tests can use this function to get raw config for this class to use
* with a really simple setup with no hierarchical grouping. This function
* should not be used by any production code.
*/
- static ConfigWrapper getDefaultDistributionConfig(
- uint16_t redundancy = 2, uint16_t nodeCount = 10);
+ static ConfigWrapper getDefaultDistributionConfig(uint16_t redundancy = 2, uint16_t nodeCount = 10);
/**
* Utility function used by distributor to split copies into groups to
* handle active per group feature.
*/
- using IndexList = std::vector<uint16_t>;
- std::vector<IndexList> splitNodesIntoLeafGroups(IndexList nodes) const;
+ using IndexList = vespalib::SmallVector<uint16_t, 4>;
+ std::vector<IndexList> splitNodesIntoLeafGroups(vespalib::ConstArrayRef<uint16_t> nodes) const;
static bool allDistributorsDown(const Group&, const ClusterState&);
};
diff --git a/vdslib/src/vespa/vdslib/distribution/idealnodecalculator.h b/vdslib/src/vespa/vdslib/distribution/idealnodecalculator.h
deleted file mode 100644
index bc42df1b49c..00000000000
--- a/vdslib/src/vespa/vdslib/distribution/idealnodecalculator.h
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-/**
- * An interface to implement for a calculator calcuting ideal state. It should
- * be easy to wrap this calculator in a cache. Thus options that seldom change,
- * are taken in as set parameters, such that existing cache can be invalidated.
- */
-#pragma once
-
-#include <vespa/document/bucket/bucketid.h>
-#include <vespa/document/util/printable.h>
-#include <vespa/vdslib/state/node.h>
-#include <vector>
-#include <memory>
-
-namespace storage::lib {
-
-class Distribution;
-class ClusterState;
-
-/**
- * A list of ideal nodes, sorted in preferred order. Wraps a vector to hide
- * unneeded details, and make it easily printable.
- */
-class IdealNodeList : public document::Printable {
- std::vector<Node> _idealNodes;
-
-public:
- IdealNodeList();
- ~IdealNodeList();
-
- void push_back(const Node& node) {
- _idealNodes.push_back(node);
- }
-
- const Node& operator[](uint32_t i) const { return _idealNodes[i]; }
- uint32_t size() const { return _idealNodes.size(); }
- bool contains(const Node& n) const {
- for (uint32_t i=0; i<_idealNodes.size(); ++i) {
- if (n == _idealNodes[i]) return true;
- }
- return false;
- }
- uint16_t indexOf(const Node& n) const {
- for (uint16_t i=0; i<_idealNodes.size(); ++i) {
- if (n == _idealNodes[i]) return i;
- }
- return 0xffff;
- }
-
- void print(std::ostream& out, bool, const std::string &) const override;
-};
-
-/**
- * Simple interface to use for those who needs to calculate ideal nodes.
- */
-class IdealNodeCalculator {
-public:
- using SP = std::shared_ptr<IdealNodeCalculator>;
- enum UpStates {
- UpInit,
- UpInitMaintenance,
- UP_STATE_COUNT
- };
-
- virtual ~IdealNodeCalculator() = default;
-
- virtual IdealNodeList getIdealNodes(const NodeType&,
- const document::BucketId&,
- UpStates upStates = UpInit) const = 0;
-
- // Wrapper functions to make prettier call if nodetype is given.
- IdealNodeList getIdealDistributorNodes(const document::BucketId& bucket,
- UpStates upStates = UpInit) const
- { return getIdealNodes(NodeType::DISTRIBUTOR, bucket, upStates); }
- IdealNodeList getIdealStorageNodes(const document::BucketId& bucket,
- UpStates upStates = UpInit) const
- { return getIdealNodes(NodeType::STORAGE, bucket, upStates); }
-};
-
-
-/**
- * More complex interface that provides a way to alter needed settings not
- * provided in the function call itself.
- */
-class IdealNodeCalculatorConfigurable : public IdealNodeCalculator
-{
-public:
- using SP = std::shared_ptr<IdealNodeCalculatorConfigurable>;
-
- virtual void setDistribution(const Distribution&) = 0;
- virtual void setClusterState(const ClusterState&) = 0;
-};
-
-}
diff --git a/vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.cpp b/vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.cpp
deleted file mode 100644
index da34ec4526a..00000000000
--- a/vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.cpp
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "idealnodecalculatorimpl.h"
-#include "distribution.h"
-#include <vespa/vespalib/util/exceptions.h>
-#include <ostream>
-#include <cassert>
-
-namespace storage::lib {
-
-IdealNodeList::IdealNodeList() = default;
-IdealNodeList::~IdealNodeList() = default;
-
-void
-IdealNodeList::print(std::ostream& out, bool , const std::string &) const
-{
- out << "[";
- for (uint32_t i=0; i<_idealNodes.size(); ++i) {
- if (i != 0) out << ", ";
- out << _idealNodes[i];
- }
- out << "]";
-}
-
-IdealNodeCalculatorImpl::IdealNodeCalculatorImpl()
- : _distribution(0),
- _clusterState(0)
-{
- initUpStateMapping();
-}
-
-IdealNodeCalculatorImpl::~IdealNodeCalculatorImpl() = default;
-
-void
-IdealNodeCalculatorImpl::setDistribution(const Distribution& d) {
- _distribution = &d;
-}
-void
-IdealNodeCalculatorImpl::setClusterState(const ClusterState& cs) {
- _clusterState = &cs;
-}
-
-IdealNodeList
-IdealNodeCalculatorImpl::getIdealNodes(const NodeType& nodeType,
- const document::BucketId& bucket,
- UpStates upStates) const
-{
- assert(_clusterState != 0);
- assert(_distribution != 0);
- std::vector<uint16_t> nodes;
- _distribution->getIdealNodes(nodeType, *_clusterState, bucket, nodes, _upStates[upStates]);
- IdealNodeList list;
- for (uint32_t i=0; i<nodes.size(); ++i) {
- list.push_back(Node(nodeType, nodes[i]));
- }
- return list;
-}
-
-void
-IdealNodeCalculatorImpl::initUpStateMapping() {
- _upStates.clear();
- _upStates.resize(UP_STATE_COUNT);
- _upStates[UpInit] = "ui";
- _upStates[UpInitMaintenance] = "uim";
- for (uint32_t i=0; i<_upStates.size(); ++i) {
- if (_upStates[i] == 0) throw vespalib::IllegalStateException(
- "Failed to initialize up state. Code likely not updated "
- "after another upstate was added.", VESPA_STRLOC);
- }
-}
-
-}
diff --git a/vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.h b/vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.h
deleted file mode 100644
index 9b36f1094fd..00000000000
--- a/vdslib/src/vespa/vdslib/distribution/idealnodecalculatorimpl.h
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-/**
- * A cache for an ideal nodes implementation. Making it cheap for localized
- * access, regardless of real implementation.
- */
-#pragma once
-
-#include "idealnodecalculator.h"
-
-namespace storage::lib {
-
-class IdealNodeCalculatorImpl : public IdealNodeCalculatorConfigurable {
- std::vector<const char*> _upStates;
- const Distribution* _distribution;
- const ClusterState* _clusterState;
-
-public:
- IdealNodeCalculatorImpl();
- ~IdealNodeCalculatorImpl();
-
- void setDistribution(const Distribution& d) override;
- void setClusterState(const ClusterState& cs) override;
-
- IdealNodeList getIdealNodes(const NodeType& nodeType,
- const document::BucketId& bucket,
- UpStates upStates) const override;
-private:
- void initUpStateMapping();
-};
-
-}
diff --git a/vdslib/src/vespa/vdslib/state/clusterstate.cpp b/vdslib/src/vespa/vdslib/state/clusterstate.cpp
index 6a319af68ef..f4314a6624b 100644
--- a/vdslib/src/vespa/vdslib/state/clusterstate.cpp
+++ b/vdslib/src/vespa/vdslib/state/clusterstate.cpp
@@ -7,7 +7,10 @@
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/stllike/asciistream.h>
+#include <vespa/vespalib/stllike/hash_map.hpp>
+#include <vespa/vespalib/stllike/hash_map_equal.hpp>
#include <sstream>
+#include <cassert>
#include <vespa/log/log.h>
LOG_SETUP(".vdslib.state.cluster");
@@ -24,9 +27,9 @@ namespace storage::lib {
ClusterState::ClusterState()
: Printable(),
_version(0),
+ _nodeCount(),
_clusterState(&State::DOWN),
_nodeStates(),
- _nodeCount(2),
_description(),
_distributionBits(16)
{ }
@@ -41,14 +44,10 @@ struct NodeData {
NodeData() : empty(true), node(NodeType::STORAGE, 0), ost() {}
- void addTo(std::map<Node, NodeState>& nodeStates,
- std::vector<uint16_t>& nodeCount)
- {
+ void addTo(ClusterState::NodeMap & nodeStates, ClusterState::NodeCounts & nodeCount) {
if (!empty) {
NodeState state(ost.str(), &node.getType());
- if (state != NodeState(node.getType(), State::UP)
- || state.getDescription().size() > 0)
- {
+ if ((state != NodeState(node.getType(), State::UP)) || (state.getDescription().size() > 0)) {
nodeStates.insert(std::make_pair(node, state));
}
if (nodeCount[node.getType()] <= node.getIndex()) {
@@ -63,9 +62,9 @@ struct NodeData {
ClusterState::ClusterState(const vespalib::string& serialized)
: Printable(),
_version(0),
+ _nodeCount(),
_clusterState(&State::UP),
_nodeStates(),
- _nodeCount(2),
_description(),
_distributionBits(16)
{
@@ -74,13 +73,13 @@ ClusterState::ClusterState(const vespalib::string& serialized)
NodeData nodeData;
vespalib::string lastAbsolutePath;
- for (vespalib::StringTokenizer::Iterator it = st.begin(); it != st.end(); ++it) {
- vespalib::string::size_type index = it->find(':');
+ for (const auto & token : st) {
+ vespalib::string::size_type index = token.find(':');
if (index == vespalib::string::npos) {
- throw IllegalArgumentException("Token " + *it + " does not contain ':': " + serialized, VESPA_STRLOC);
+ throw IllegalArgumentException("Token " + token + " does not contain ':': " + serialized, VESPA_STRLOC);
}
- vespalib::string key = it->substr(0, index);
- vespalib::stringref value = it->substr(index + 1);
+ vespalib::string key = token.substr(0, index);
+ vespalib::stringref value = token.substr(index + 1);
if (key.size() > 0 && key[0] == '.') {
if (lastAbsolutePath == "") {
throw IllegalArgumentException("The first path in system state string needs to be absolute", VESPA_STRLOC);
@@ -111,7 +110,9 @@ ClusterState::parse(vespalib::stringref key, vespalib::stringref value, NodeData
break;
case 'b':
if (key == "bits") {
- _distributionBits = atoi(value.data());
+ uint32_t numBits = atoi(value.data());
+ assert(numBits <= 64);
+ _distributionBits = numBits;
return true;
}
break;
@@ -138,7 +139,7 @@ ClusterState::parse(vespalib::stringref key, vespalib::stringref value, NodeData
bool
ClusterState::parseSorD(vespalib::stringref key, vespalib::stringref value, NodeData & nodeData) {
- const NodeType* nodeType(0);
+ const NodeType* nodeType = nullptr;
vespalib::string::size_type dot = key.find('.');
vespalib::stringref type(dot == vespalib::string::npos
? key : key.substr(0, dot));
@@ -147,10 +148,9 @@ ClusterState::parseSorD(vespalib::stringref key, vespalib::stringref value, Node
} else if (type == "distributor") {
nodeType = &NodeType::DISTRIBUTOR;
}
- if (nodeType == 0) return false;
+ if (nodeType == nullptr) return false;
if (dot == vespalib::string::npos) { // Entry that set node counts
- uint16_t nodeCount = 0;
- nodeCount = atoi(value.data());
+ uint16_t nodeCount = atoi(value.data());
if (nodeCount > _nodeCount[*nodeType] ) {
_nodeCount[*nodeType] = nodeCount;
@@ -158,12 +158,9 @@ ClusterState::parseSorD(vespalib::stringref key, vespalib::stringref value, Node
return true;
}
vespalib::string::size_type dot2 = key.find('.', dot + 1);
- Node node;
- if (dot2 == vespalib::string::npos) {
- node = Node(*nodeType, atoi(key.substr(dot + 1).data()));
- } else {
- node = Node(*nodeType, atoi(key.substr(dot + 1, dot2 - dot - 1).data()));
- }
+ Node node(*nodeType, (dot2 == vespalib::string::npos)
+ ? atoi(key.substr(dot + 1).data())
+ : atoi(key.substr(dot + 1, dot2 - dot - 1).data()));
if (node.getIndex() >= _nodeCount[*nodeType]) {
vespalib::asciistream ost;
@@ -183,70 +180,69 @@ ClusterState::parseSorD(vespalib::stringref key, vespalib::stringref value, Node
return true;
}
+struct SeparatorPrinter {
+ bool first;
+ SeparatorPrinter() : first(true) {}
+ const char * toString() {
+ if (first) {
+ first = false;
+ return "";
+ }
+ return " ";
+ }
+};
+
namespace {
- struct SeparatorPrinter {
- bool first;
- SeparatorPrinter() : first(true) {}
- const char * toString() {
- if (first) {
- first = false;
- return "";
+
+void
+serialize_node(vespalib::asciistream & out, const Node & node, const NodeState & state) {
+ vespalib::asciistream prefix;
+ prefix << "." << node.getIndex() << ".";
+ vespalib::asciistream ost;
+ state.serialize(ost, prefix.str(), false);
+ vespalib::stringref content = ost.str();
+ if ( !content.empty()) {
+ out << " " << content;
+ }
+}
+
+}
+
+void
+ClusterState::serialize_nodes(vespalib::asciistream & out, SeparatorPrinter & sep, const NodeType & nodeType,
+ const std::vector<NodeStatePair> & nodeStates) const
+{
+ uint16_t nodeCount = getNodeCount(nodeType);
+ if (nodeCount > 0) {
+ out << sep.toString() << nodeType.serialize() << ":" << nodeCount;
+ for (const auto & entry : nodeStates) {
+ if (entry.first.getType() == nodeType) {
+ serialize_node(out, entry.first, entry.second);
}
- return " ";
}
- };
+ }
}
void
-ClusterState::serialize(vespalib::asciistream & out, bool ignoreNewFeatures) const
+ClusterState::serialize(vespalib::asciistream & out) const
{
SeparatorPrinter sep;
- if (!ignoreNewFeatures && _version != 0) {
+ if (_version != 0) {
out << sep.toString() << "version:" << _version;
}
- if (!ignoreNewFeatures && *_clusterState != State::UP) {
+ if (*_clusterState != State::UP) {
out << sep.toString() << "cluster:" << _clusterState->serialize();
}
- if (!ignoreNewFeatures && _distributionBits != 16) {
+ if (_distributionBits != 16) {
out << sep.toString() << "bits:" << _distributionBits;
}
- uint16_t distCount = getNodeCount(NodeType::DISTRIBUTOR);
- if (ignoreNewFeatures || distCount > 0) {
- out << sep.toString() << "distributor:" << distCount;
- for (std::map<Node, NodeState>::const_iterator it =
- _nodeStates.begin();
- it != _nodeStates.end(); ++it)
- {
- if (it->first.getType() != NodeType::DISTRIBUTOR) continue;
- vespalib::asciistream prefix;
- prefix << "." << it->first.getIndex() << ".";
- vespalib::asciistream ost;
- it->second.serialize(ost, prefix.str(), false);
- vespalib::stringref content = ost.str();
- if (content.size() > 0) {
- out << " " << content;
- }
- }
- }
- uint16_t storCount = getNodeCount(NodeType::STORAGE);
- if (ignoreNewFeatures || storCount > 0) {
- out << sep.toString() << "storage:" << storCount;
- for (std::map<Node, NodeState>::const_iterator it =
- _nodeStates.begin();
- it != _nodeStates.end(); ++it)
- {
- if (it->first.getType() != NodeType::STORAGE) continue;
- vespalib::asciistream prefix;
- prefix << "." << it->first.getIndex() << ".";
- vespalib::asciistream ost;
- it->second.serialize(ost, prefix.str(), false);
- vespalib::stringref content = ost.str();
- if ( !content.empty()) {
- out << " " << content;
- }
- }
- }
+ if ((getNodeCount(NodeType::DISTRIBUTOR) + getNodeCount(NodeType::STORAGE)) == 0u) return;
+
+ std::vector<NodeStatePair> nodeStates(_nodeStates.cbegin(), _nodeStates.cend());
+ std::sort(nodeStates.begin(), nodeStates.end(), [](const NodeStatePair &a, const NodeStatePair &b) { return a.first < b.first; });
+ serialize_nodes(out, sep, NodeType::DISTRIBUTOR, nodeStates);
+ serialize_nodes(out, sep, NodeType::STORAGE, nodeStates);
}
bool
@@ -265,12 +261,6 @@ ClusterState::operator!=(const ClusterState& other) const noexcept
return !(*this == other);
}
-uint16_t
-ClusterState::getNodeCount(const NodeType& type) const noexcept
-{
- return _nodeCount[type];
-}
-
namespace {
[[noreturn]] void throwUnknownType(const Node & node) __attribute__((noinline));
void throwUnknownType(const Node & node) {
@@ -282,7 +272,7 @@ const NodeState&
ClusterState::getNodeState(const Node& node) const
{
// If it actually has an entry in map, return that
- auto it = _nodeStates.find(node);
+ const auto it = _nodeStates.find(node);
if (it != _nodeStates.end()) return it->second;
// If beyond node count, the node is down.
@@ -307,9 +297,7 @@ void
ClusterState::setClusterState(const State& state)
{
if (!state.validClusterState()) {
- throw vespalib::IllegalStateException(
- state.toString(true) + " is not a legal cluster state",
- VESPA_STRLOC);
+ throw vespalib::IllegalStateException(state.toString(true) + " is not a legal cluster state", VESPA_STRLOC);
}
_clusterState = &state;
}
@@ -319,17 +307,12 @@ ClusterState::setNodeState(const Node& node, const NodeState& state)
{
state.verifySupportForNodeType(node.getType());
if (node.getIndex() >= _nodeCount[node.getType()]) {
- for (uint32_t i = _nodeCount[node.getType()]; i < node.getIndex(); ++i)
- {
- _nodeStates.insert(std::make_pair(
- Node(node.getType(), i),
- NodeState(node.getType(), State::DOWN)));
+ for (uint32_t i = _nodeCount[node.getType()]; i < node.getIndex(); ++i) {
+ _nodeStates.insert(std::make_pair(Node(node.getType(), i), NodeState(node.getType(), State::DOWN)));
}
_nodeCount[node.getType()] = node.getIndex() + 1;
}
- if (state == NodeState(node.getType(), State::UP)
- && state.getDescription().size() == 0)
- {
+ if ((state == NodeState(node.getType(), State::UP)) && state.getDescription().empty()) {
_nodeStates.erase(node);
} else {
_nodeStates.insert(std::make_pair(node, state));
@@ -339,32 +322,34 @@ ClusterState::setNodeState(const Node& node, const NodeState& state)
}
void
-ClusterState::print(std::ostream& out, bool verbose,
- const std::string&) const
+ClusterState::print(std::ostream& out, bool verbose, const std::string&) const
{
(void) verbose;
vespalib::asciistream tmp;
- serialize(tmp, false);
+ serialize(tmp);
out << tmp.str();
}
void
ClusterState::removeExtraElements()
{
+ removeExtraElements(NodeType::STORAGE);
+ removeExtraElements(NodeType::DISTRIBUTOR);
+}
+
+void
+ClusterState::removeExtraElements(const NodeType & type)
+{
// Simplify the system state by removing the last indexes if the nodes
// are down.
- for (uint32_t i=0; i<2; ++i) {
- const NodeType& type(i == 0 ? NodeType::STORAGE
- : NodeType::DISTRIBUTOR);
- for (int32_t index = _nodeCount[type]; index >= 0; --index) {
- Node node(type, index - 1);
- std::map<Node, NodeState>::iterator it(_nodeStates.find(node));
- if (it == _nodeStates.end()) break;
- if (it->second.getState() != State::DOWN) break;
- if (it->second.getDescription() != "") break;
- _nodeStates.erase(it);
- --_nodeCount[type];
- }
+ for (int32_t index = _nodeCount[type]; index >= 0; --index) {
+ Node node(type, index - 1);
+ const auto it = _nodeStates.find(node);
+ if (it == _nodeStates.end()) break;
+ if (it->second.getState() != State::DOWN) break;
+ if (it->second.getDescription() != "") break;
+ _nodeStates.erase(it);
+ --_nodeCount[type];
}
}
@@ -413,90 +398,89 @@ void
ClusterState::printStateGroupwise(std::ostream& out, const Distribution& dist,
bool verbose, const std::string& indent) const
{
- out << "ClusterState(Version: " << _version << ", Cluster state: "
- << _clusterState->toString(true) << ", Distribution bits: "
- << _distributionBits << ") {";
+ out << "ClusterState(Version: " << _version << ", Cluster state: " << _clusterState->toString(true)
+ << ", Distribution bits: " << _distributionBits << ") {";
printStateGroupwise(out, dist.getNodeGraph(), verbose, indent + " ", true);
out << "\n" << indent << "}";
}
namespace {
- template<typename T>
- std::string getNumberSpec(const std::vector<T>& numbers) {
- std::ostringstream ost;
- bool first = true;
- uint32_t firstInRange = numbers.size() == 0 ? 0 : numbers[0];;
- uint32_t lastInRange = firstInRange;
- for (uint32_t i=1; i<=numbers.size(); ++i) {
- if (i < numbers.size() && numbers[i] == lastInRange + 1) {
- ++lastInRange;
+
+template<typename T>
+std::string getNumberSpec(const std::vector<T>& numbers) {
+ std::ostringstream ost;
+ bool first = true;
+ uint32_t firstInRange = numbers.size() == 0 ? 0 : numbers[0];;
+ uint32_t lastInRange = firstInRange;
+ for (uint32_t i=1; i<=numbers.size(); ++i) {
+ if (i < numbers.size() && numbers[i] == lastInRange + 1) {
+ ++lastInRange;
+ } else {
+ if (first) {
+ first = false;
} else {
- if (first) {
- first = false;
- } else {
- ost << ",";
- }
- if (firstInRange == lastInRange) {
- ost << firstInRange;
- } else {
- ost << firstInRange << "-" << lastInRange;
- }
- if (i < numbers.size()) {
- firstInRange = lastInRange = numbers[i];
- }
+ ost << ",";
+ }
+ if (firstInRange == lastInRange) {
+ ost << firstInRange;
+ } else {
+ ost << firstInRange << "-" << lastInRange;
+ }
+ if (i < numbers.size()) {
+ firstInRange = lastInRange = numbers[i];
}
}
- return ost.str();
}
+ return ost.str();
+}
+
+}
+
+size_t
+ClusterState::printStateGroupwise(std::ostream& out, const Group& group, bool verbose,
+ const std::string& indent, const NodeType& nodeType) const
+{
+ NodeState defState(nodeType, State::UP);
+ size_t printed = 0;
+ for (uint16_t nodeId : group.getNodes()) {
+ Node node(nodeType, nodeId);
+ const NodeState& state(getNodeState(node));
+ if (state != defState) {
+ out << "\n" << indent << " " << node << ": ";
+ state.print(out, verbose, indent + " ");
+ printed++;
+ }
+ }
+ return printed;
}
void
-ClusterState::printStateGroupwise(std::ostream& out, const Group& group,
- bool verbose, const std::string& indent,
- bool rootGroup) const
+ClusterState::printStateGroupwise(std::ostream& out, const Group& group, bool verbose,
+ const std::string& indent, bool rootGroup) const
{
if (rootGroup) {
out << "\n" << indent << "Top group";
} else {
- out << "\n" << indent << "Group " << group.getIndex() << ": "
- << group.getName();
+ out << "\n" << indent << "Group " << group.getIndex() << ": " << group.getName();
if (group.getCapacity() != 1.0) {
out << ", capacity " << group.getCapacity();
}
}
out << ".";
if (group.isLeafGroup()) {
- out << " " << group.getNodes().size() << " node"
- << (group.getNodes().size() != 1 ? "s" : "") << " ["
- << getNumberSpec(group.getNodes()) << "] {";
- bool printedAny = false;
- for (uint32_t j=0; j<2; ++j) {
- const NodeType& nodeType(
- j == 0 ? NodeType::DISTRIBUTOR : NodeType::STORAGE);
- NodeState defState(nodeType, State::UP);
- for (uint32_t i=0; i<group.getNodes().size(); ++i) {
- Node node(nodeType, group.getNodes()[i]);
- const NodeState& state(getNodeState(node));
- if (state != defState) {
- out << "\n" << indent << " " << node << ": ";
- state.print(out, verbose, indent + " ");
- printedAny = true;
- }
- }
- }
- if (!printedAny) {
+ out << " " << group.getNodes().size() << " node" << (group.getNodes().size() != 1 ? "s" : "")
+ << " [" << getNumberSpec(group.getNodes()) << "] {";
+ size_t printed = printStateGroupwise(out, group, verbose, indent, NodeType::DISTRIBUTOR) +
+ printStateGroupwise(out, group, verbose, indent, NodeType::STORAGE);
+ if (printed == 0) {
out << "\n" << indent << " All nodes in group up and available.";
}
} else {
- const std::map<uint16_t, Group*>& children(group.getSubGroups());
- out << " " << children.size() << " branch"
- << (children.size() != 1 ? "es" : "") << " with distribution "
- << group.getDistributionSpec() << " {";
- for (std::map<uint16_t, Group*>::const_iterator it = children.begin();
- it != children.end(); ++it)
- {
- printStateGroupwise(out, *it->second, verbose,
- indent + " ", false);
+ const auto & children(group.getSubGroups());
+ out << " " << children.size() << " branch" << (children.size() != 1 ? "es" : "")
+ << " with distribution " << group.getDistributionSpec() << " {";
+ for (const auto & child : children) {
+ printStateGroupwise(out, *child.second, verbose,indent + " ", false);
}
}
out << "\n" << indent << "}";
diff --git a/vdslib/src/vespa/vdslib/state/clusterstate.h b/vdslib/src/vespa/vdslib/state/clusterstate.h
index 44af81f52ce..90ec7c1aa65 100644
--- a/vdslib/src/vespa/vdslib/state/clusterstate.h
+++ b/vdslib/src/vespa/vdslib/state/clusterstate.h
@@ -10,26 +10,21 @@
#include "node.h"
#include "nodestate.h"
-#include <map>
+#include <vespa/vespalib/stllike/hash_map.h>
+#include <array>
namespace storage::lib {
class Distribution;
class Group;
struct NodeData;
+struct SeparatorPrinter;
class ClusterState : public document::Printable {
- uint32_t _version;
- const State* _clusterState;
- std::map<Node, NodeState> _nodeStates;
- std::vector<uint16_t> _nodeCount;
- vespalib::string _description;
- uint16_t _distributionBits;
-
- void getTextualDifference(std::ostringstream& builder, const NodeType& type,
- const ClusterState& other) const;
-
public:
+ using NodeStatePair = std::pair<Node, NodeState>;
+ using NodeMap = vespalib::hash_map<Node, NodeState>;
+ using NodeCounts = std::array<uint16_t, 2>;
using CSP = std::shared_ptr<const ClusterState>;
using SP = std::shared_ptr<ClusterState>;
using UP = std::unique_ptr<ClusterState>;
@@ -43,17 +38,17 @@ public:
~ClusterState();
std::string getTextualDifference(const ClusterState& other) const;
- void serialize(vespalib::asciistream & out, bool ignoreNewFeatures) const;
+ void serialize(vespalib::asciistream & out) const;
bool operator==(const ClusterState& other) const noexcept;
bool operator!=(const ClusterState& other) const noexcept;
- uint32_t getVersion() const { return _version; }
+ uint32_t getVersion() const noexcept { return _version; }
/**
* Returns the smallest number above the highest node index found of the
* given type that is not down.
*/
- uint16_t getNodeCount(const NodeType& type) const noexcept;
+ uint16_t getNodeCount(const NodeType& type) const noexcept { return _nodeCount[type]; }
uint16_t getDistributionBitCount() const noexcept { return _distributionBits; }
const State& getClusterState() const noexcept { return *_clusterState; }
const NodeState& getNodeState(const Node& node) const;
@@ -65,9 +60,7 @@ public:
void print(std::ostream& out, bool verbose, const std::string& indent) const override;
- void printStateGroupwise(std::ostream& out,
- const Distribution&, bool verbose = false,
- const std::string& indent = "") const;
+ void printStateGroupwise(std::ostream& out, const Distribution&, bool verbose, const std::string& indent) const;
private:
// Preconditions: `key` and `value` MUST point into null-terminated strings.
@@ -75,9 +68,18 @@ private:
// Preconditions: `key` and `value` MUST point into null-terminated strings.
bool parseSorD(vespalib::stringref key, vespalib::stringref value, NodeData & nodeData);
void removeExtraElements();
- void printStateGroupwise(std::ostream& out, const Group&, bool verbose,
- const std::string& indent, bool rootGroup) const;
-
+ void removeExtraElements(const NodeType& type);
+ void printStateGroupwise(std::ostream& out, const Group&, bool verbose, const std::string& indent, bool rootGroup) const;
+ void getTextualDifference(std::ostringstream& builder, const NodeType& type, const ClusterState& other) const;
+ size_t printStateGroupwise(std::ostream& out, const Group&, bool verbose, const std::string& indent, const NodeType& type) const;
+ void serialize_nodes(vespalib::asciistream & out, SeparatorPrinter & sep, const NodeType & nodeType,
+ const std::vector<NodeStatePair> & nodeStates) const;
+ uint32_t _version;
+ NodeCounts _nodeCount;
+ const State* _clusterState;
+ NodeMap _nodeStates;
+ vespalib::string _description;
+ uint16_t _distributionBits;
};
}
diff --git a/vdslib/src/vespa/vdslib/state/node.h b/vdslib/src/vespa/vdslib/state/node.h
index 2e33e54c638..49c8f0e641b 100644
--- a/vdslib/src/vespa/vdslib/state/node.h
+++ b/vdslib/src/vespa/vdslib/state/node.h
@@ -13,24 +13,25 @@ namespace storage::lib {
class Node {
const NodeType* _type;
- uint16_t _index;
+ uint16_t _index;
public:
Node() noexcept : _type(&NodeType::STORAGE), _index(0) { }
Node(const NodeType& type, uint16_t index) noexcept
: _type(&type), _index(index) { }
- const NodeType& getType() const { return *_type; }
- uint16_t getIndex() const { return _index; }
+ const NodeType& getType() const noexcept { return *_type; }
+ uint16_t getIndex() const noexcept { return _index; }
+ uint32_t hash() const noexcept { return (_index << 1) | *_type; }
- bool operator==(const Node& other) const {
+ bool operator==(const Node& other) const noexcept {
return (other._index == _index && *other._type == *_type);
}
- bool operator!=(const Node& other) const {
+ bool operator!=(const Node& other) const noexcept {
return (other._index != _index || *other._type != *_type);
}
- bool operator<(const Node& n) const {
+ bool operator<(const Node& n) const noexcept {
if (*_type != *n._type) return (*_type < *n._type);
return (_index < n._index);
}
diff --git a/vespa-athenz/pom.xml b/vespa-athenz/pom.xml
index 7c3c982af84..66b369f00fe 100644
--- a/vespa-athenz/pom.xml
+++ b/vespa-athenz/pom.xml
@@ -275,6 +275,52 @@
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</dependency>
+ <dependency>
+ <groupId>com.google.http-client</groupId>
+ <artifactId>google-http-client-apache-v2</artifactId>
+ <exclusions>
+ <exclusion>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpcore</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpclient</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.google.http-client</groupId>
+ <artifactId>google-http-client</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>com.google.http-client</groupId>
+ <artifactId>google-http-client</artifactId>
+ <exclusions>
+ <exclusion>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpcore</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.httpcomponents</groupId>
+ <artifactId>httpclient</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
+ <dependency>
+ <groupId>com.google.auth</groupId>
+ <artifactId>google-auth-library-oauth2-http</artifactId>
+ <exclusions>
+ <exclusion>
+ <groupId>com.google.guava</groupId>
+ <artifactId>guava</artifactId>
+ </exclusion>
+ </exclusions>
+ </dependency>
</dependencies>
<build>
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/gcp/GcpCredentials.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/gcp/GcpCredentials.java
new file mode 100644
index 00000000000..bbdc3c2b372
--- /dev/null
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/gcp/GcpCredentials.java
@@ -0,0 +1,180 @@
+package com.yahoo.vespa.athenz.gcp;
+
+import com.google.api.client.http.apache.v2.ApacheHttpTransport;
+import com.google.auth.http.HttpTransportFactory;
+import com.google.auth.oauth2.ExternalAccountCredentials;
+import com.yahoo.security.token.TokenDomain;
+import com.yahoo.security.token.TokenGenerator;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.Slime;
+import com.yahoo.slime.SlimeUtils;
+import com.yahoo.vespa.athenz.api.AthenzDomain;
+import com.yahoo.vespa.athenz.identity.ServiceIdentityProvider;
+import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
+import org.apache.http.impl.client.HttpClientBuilder;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URLEncoder;
+import java.nio.charset.StandardCharsets;
+import java.util.Objects;
+
+public class GcpCredentials {
+ private static final TokenDomain domain = TokenDomain.of("athenz-gcp-oauth2-nonce");
+
+ final private InputStream tokenApiStream;
+ private final HttpTransportFactory httpTransportFactory;
+
+ private GcpCredentials(Builder builder) {
+ String clientId = builder.athenzDomain.getName() + ".gcp";
+ String audience = String.format("//iam.googleapis.com/projects/%s/locations/global/workloadIdentityPools/%s/providers/%s",
+ builder.projectNumber, builder.workloadPoolName, builder.workloadProviderName);
+ String serviceUrl = String.format("https://iamcredentials.googleapis.com/v1/projects/-/serviceAccounts/%s@%s.iam.gserviceaccount.com:generateAccessToken",
+ builder.serviceAccountName, builder.projectName);
+ String scope = URLEncoder.encode(generateIdTokenScope(builder.athenzDomain.getName(), builder.role), StandardCharsets.UTF_8);
+ String redirectUri = URLEncoder.encode(generateRedirectUri(clientId, builder.redirectURISuffix), StandardCharsets.UTF_8);
+ String tokenUrl = String.format("%s/oauth2/auth?response_type=id_token&client_id=%s&redirect_uri=%s&scope=%s&nonce=%s&keyType=EC&fullArn=true&output=json",
+ builder.ztsUrl, clientId, redirectUri, scope, TokenGenerator.generateToken(domain, "", 32).secretTokenString());
+
+ tokenApiStream = createTokenAPIStream(audience, serviceUrl, tokenUrl, builder.tokenLifetimeSeconds);
+ SSLConnectionSocketFactory sslConnectionSocketFactory = new SSLConnectionSocketFactory(builder.identityProvider.getIdentitySslContext());
+ HttpClientBuilder httpClientBuilder = ApacheHttpTransport.newDefaultHttpClientBuilder()
+ .setSSLSocketFactory(sslConnectionSocketFactory);
+ httpTransportFactory = () -> new ApacheHttpTransport(httpClientBuilder.build());
+ }
+
+ public ExternalAccountCredentials getCredential() throws IOException {
+ return ExternalAccountCredentials.fromStream(tokenApiStream, httpTransportFactory);
+ }
+
+ private InputStream createTokenAPIStream(final String audience, final String serviceUrl, final String tokenUrl,
+ int tokenLifetimeSeconds) {
+
+ Slime root = new Slime();
+ Cursor c = root.setObject();
+
+ c.setString("type", "external_account");
+ c.setString("audience", audience);
+ c.setString("subject_token_type", "urn:ietf:params:oauth:token-type:jwt");
+ c.setString("token_url", "https://sts.googleapis.com/v1/token");
+
+ c.setString("service_account_impersonation_url", serviceUrl);
+ Cursor sai = c.setObject("service_account_impersonation");
+ sai.setLong("token_lifetime_seconds", tokenLifetimeSeconds);
+
+ Cursor credentialSource = c.setObject("credential_source");
+ credentialSource.setString("url", tokenUrl);
+
+ Cursor credentialSourceFormat = credentialSource.setObject("format");
+ credentialSourceFormat.setString("type", "json");
+ credentialSourceFormat.setString("subject_token_field_name", "id_token");
+
+ try {
+ return new ByteArrayInputStream(SlimeUtils.toJsonBytes(root));
+ } catch (IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private static String generateIdTokenScope(final String domainName, String roleName) {
+ StringBuilder scope = new StringBuilder(256);
+ scope.append("openid");
+ scope.append(' ').append(domainName).append(":role.").append(roleName);
+ return scope.toString();
+ }
+
+ private static String generateRedirectUri(final String clientId, String uriSuffix) {
+ int idx = clientId.lastIndexOf('.');
+ if (idx == -1) {
+ return "";
+ }
+ final String dashDomain = clientId.substring(0, idx).replace('.', '-');
+ final String service = clientId.substring(idx + 1);
+ return "https://" + service + "." + dashDomain + "." + uriSuffix;
+ }
+
+
+ public static class Builder {
+ private String ztsUrl;
+ private ServiceIdentityProvider identityProvider;
+ private String redirectURISuffix;
+ private AthenzDomain athenzDomain;
+ private String role;
+ private String projectName;
+ private String projectNumber;
+ private String serviceAccountName;
+
+ private int tokenLifetimeSeconds = 3600; // default to 1 hour lifetime
+ private String workloadPoolName = "athenz";
+ private String workloadProviderName = "athenz";
+
+ public GcpCredentials build() {
+ Objects.requireNonNull(ztsUrl);
+ Objects.requireNonNull(identityProvider);
+ Objects.requireNonNull(redirectURISuffix);
+ Objects.requireNonNull(athenzDomain);
+ Objects.requireNonNull(role);
+ Objects.requireNonNull(projectName);
+ Objects.requireNonNull(projectNumber);
+ Objects.requireNonNull(serviceAccountName);
+
+ return new GcpCredentials(this);
+ }
+
+ public Builder setZtsUrl(String ztsUrl) {
+ this.ztsUrl = ztsUrl;
+ return this;
+ }
+
+ public Builder identityProvider(ServiceIdentityProvider provider) {
+ this.identityProvider = provider;
+ return this;
+ }
+
+ public Builder redirectURISuffix(String redirectURISuffix) {
+ this.redirectURISuffix = redirectURISuffix;
+ return this;
+ }
+
+ public Builder athenzDomain(AthenzDomain athenzDomain) {
+ this.athenzDomain = athenzDomain;
+ return this;
+ }
+
+ public Builder role(String gcpRole) {
+ this.role = gcpRole;
+ return this;
+ }
+
+ public Builder projectName(String projectName) {
+ this.projectName = projectName;
+ return this;
+ }
+
+ public Builder projectNumber(String projectNumber) {
+ this.projectNumber = projectNumber;
+ return this;
+ }
+
+ public Builder serviceAccountName(String serviceAccountName) {
+ this.serviceAccountName = serviceAccountName;
+ return this;
+ }
+
+ public Builder tokenLifetimeSeconds(int tokenLifetimeSeconds) {
+ this.tokenLifetimeSeconds = tokenLifetimeSeconds;
+ return this;
+ }
+
+ public Builder workloadPoolName(String workloadPoolName) {
+ this.workloadPoolName = workloadPoolName;
+ return this;
+ }
+
+ public Builder workloadProviderName(String workloadProviderName) {
+ this.workloadProviderName = workloadProviderName;
+ return this;
+ }
+ }
+}
diff --git a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
index 7684e3ea2ae..581ccd1d317 100644
--- a/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
+++ b/vespa-dependencies-enforcer/allowed-maven-dependencies.txt
@@ -24,13 +24,20 @@ com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.15.2
com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.15.2
com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.15.2
com.github.spotbugs:spotbugs-annotations:3.1.9
+com.google.auth:google-auth-library-credentials:1.19.0
+com.google.auth:google-auth-library-oauth2-http:1.19.0
+com.google.auto.value:auto-value-annotations:1.10.1
com.google.code.findbugs:jsr305:3.0.2
+com.google.code.gson:gson:2.10
com.google.errorprone:error_prone_annotations:2.18.0
com.google.guava:failureaccess:1.0.1
com.google.guava:guava:32.1.1-jre
+com.google.http-client:google-http-client:1.43.3
+com.google.http-client:google-http-client-apache-v2:1.43.3
+com.google.http-client:google-http-client-gson:1.42.3
com.google.inject:guice:4.2.3:no_aop
com.google.j2objc:j2objc-annotations:2.8
-com.google.protobuf:protobuf-java:3.21.7
+com.google.protobuf:protobuf-java:3.24.0
com.ibm.icu:icu4j:70.1
com.intellij:annotations:9.0.4
com.microsoft.onnxruntime:onnxruntime:1.13.1
@@ -53,6 +60,7 @@ commons-io:commons-io:2.11.0
commons-logging:commons-logging:1.2
io.airlift:airline:0.9
io.dropwizard.metrics:metrics-core:3.2.5
+io.grpc:grpc-context:1.27.2
io.jsonwebtoken:jjwt-api:0.11.5
io.jsonwebtoken:jjwt-impl:0.11.5
io.jsonwebtoken:jjwt-jackson:0.11.5
@@ -67,6 +75,8 @@ io.netty:netty-transport:4.1.94.Final
io.netty:netty-transport-classes-epoll:4.1.94.Final
io.netty:netty-transport-native-epoll:4.1.94.Final
io.netty:netty-transport-native-unix-common:4.1.94.Final
+io.opencensus:opencensus-api:0.31.1
+io.opencensus:opencensus-contrib-http-util:0.31.1
io.prometheus:simpleclient:0.6.0
io.prometheus:simpleclient_common:0.6.0
javax.annotation:javax.annotation-api:1.2
diff --git a/vespalib/src/tests/guard/guard_test.cpp b/vespalib/src/tests/guard/guard_test.cpp
index 9e5e7e55cc6..c61c4874eff 100644
--- a/vespalib/src/tests/guard/guard_test.cpp
+++ b/vespalib/src/tests/guard/guard_test.cpp
@@ -7,20 +7,7 @@
using namespace vespalib;
-class Test : public TestApp
-{
-public:
- void testFilePointer();
- void testFileDescriptor();
- void testDirPointer();
- void testValueGuard();
- void testMaxValueGuard();
- void testCounterGuard();
- int Main() override;
-};
-
-void
-Test::testFilePointer()
+TEST("testFilePointer")
{
{
FilePointer file(fopen("bogus", "r"));
@@ -72,8 +59,7 @@ Test::testFilePointer()
}
}
-void
-Test::testFileDescriptor()
+TEST("testFileDescriptor")
{
{
FileDescriptor file(open("bogus", O_RDONLY));
@@ -126,124 +112,7 @@ Test::testFileDescriptor()
}
}
-void
-Test::testDirPointer()
-{
- {
- DirPointer dir(opendir("bogus"));
- EXPECT_TRUE(!dir.valid());
- }
- {
- DirPointer dir(opendir(TEST_PATH("").c_str()));
- EXPECT_TRUE(dir.valid());
-
- dirent *de;
- bool foundGuardCpp = false;
- while ((de = readdir(dir)) != NULL) {
- if (strcmp(de->d_name, "guard_test.cpp") == 0) {
- foundGuardCpp = true;
- }
- }
- EXPECT_TRUE(foundGuardCpp);
- }
- {
- DIR *dp = NULL;
- {
- DirPointer dir(opendir("."));
- EXPECT_TRUE(dir.valid());
- dp = dir;
- }
- EXPECT_TRUE(dp != NULL);
- // EXPECT_TRUE(readdir(dp) == NULL);
- }
- {
- DirPointer dir(opendir("."));
- EXPECT_TRUE(dir.valid());
- dir.reset(opendir("."));
- EXPECT_TRUE(dir.valid());
-
- DIR *ref = dir.dp();
- DIR *dp = dir.release();
- EXPECT_TRUE(dp != NULL);
- EXPECT_TRUE(dp == ref);
- EXPECT_TRUE(!dir.valid());
- EXPECT_TRUE(dir.dp() == NULL);
- closedir(dp);
- }
-}
-
-void
-Test::testValueGuard()
-{
- int value = 10;
- {
- ValueGuard<int> guard(value);
- value = 20;
- EXPECT_TRUE(value == 20);
- }
- EXPECT_TRUE(value == 10);
- {
- ValueGuard<int> guard(value, 50);
- value = 20;
- EXPECT_TRUE(value == 20);
- }
- EXPECT_TRUE(value == 50);
- {
- ValueGuard<int> guard(value);
- value = 20;
- guard.update(100);
- EXPECT_TRUE(value == 20);
- }
- EXPECT_TRUE(value == 100);
- {
- ValueGuard<int> guard(value);
- value = 20;
- guard.dismiss();
- EXPECT_TRUE(value == 20);
- }
- EXPECT_TRUE(value == 20);
-}
-
-void
-Test::testMaxValueGuard()
-{
- int value = 10;
- {
- MaxValueGuard<int> guard(value);
- value = 20;
- EXPECT_TRUE(value == 20);
- }
- EXPECT_TRUE(value == 10);
- {
- MaxValueGuard<int> guard(value);
- value = 5;
- EXPECT_TRUE(value == 5);
- }
- EXPECT_TRUE(value == 5);
- {
- MaxValueGuard<int> guard(value, 50);
- value = 100;
- EXPECT_TRUE(value == 100);
- }
- EXPECT_TRUE(value == 50);
- {
- MaxValueGuard<int> guard(value);
- value = 200;
- guard.update(100);
- EXPECT_TRUE(value == 200);
- }
- EXPECT_TRUE(value == 100);
- {
- MaxValueGuard<int> guard(value);
- value = 200;
- guard.dismiss();
- EXPECT_TRUE(value == 200);
- }
- EXPECT_TRUE(value == 200);
-}
-
-void
-Test::testCounterGuard()
+TEST("testCounterGuard")
{
int cnt = 10;
{
@@ -254,17 +123,4 @@ Test::testCounterGuard()
EXPECT_TRUE(cnt == 10);
}
-int
-Test::Main()
-{
- TEST_INIT("guard_test");
- testFilePointer();
- testFileDescriptor();
- testDirPointer();
- testValueGuard();
- testMaxValueGuard();
- testCounterGuard();
- TEST_DONE();
-}
-
-TEST_APPHOOK(Test)
+TEST_MAIN() { TEST_RUN_ALL(); } \ No newline at end of file
diff --git a/vespalib/src/tests/io/fileutil/fileutiltest.cpp b/vespalib/src/tests/io/fileutil/fileutiltest.cpp
index 0948d18304e..93803c1fe9e 100644
--- a/vespalib/src/tests/io/fileutil/fileutiltest.cpp
+++ b/vespalib/src/tests/io/fileutil/fileutiltest.cpp
@@ -102,15 +102,6 @@ TEST("require that vespalib::File::open works")
ASSERT_TRUE(fileExists("mydir/myfile"));
f.unlink();
}
- // Opening with direct IO support works.
- {
- File f("mydir/myfile");
- f.open(File::CREATE | File::DIRECTIO, false);
- ASSERT_TRUE(fileExists("mydir/myfile"));
- if (!f.isOpenWithDirectIO()) {
- std::cerr << "This platform does not support direct IO\n";
- }
- }
// Opening plain file works
{
File f("myfile");
@@ -126,16 +117,6 @@ TEST("require that vespalib::File::open works")
//std::cerr << e.what() << "\n";
EXPECT_EQUAL(IoException::ILLEGAL_PATH, e.getType());
}
- // Test opening already open file
- {
- std::unique_ptr<File> f(new File("myfile"));
- f->open(File::CREATE, false);
- f->closeFileWhenDestructed(false);
- File f2(f->getFileDescriptor(), "myfile");
- f.reset();
- ASSERT_TRUE(f2.isOpen());
- f2.write(" ", 1, 0);
- }
// Test reopening file in same object
{
File f("myfile");
@@ -161,29 +142,6 @@ TEST("require that vespalib::File::isOpen works")
ASSERT_TRUE(!f.isOpen());
}
-TEST("require that vespalib::File::stat works")
-{
- std::filesystem::remove(std::filesystem::path("myfile"));
- std::filesystem::remove_all(std::filesystem::path("mydir"));
- EXPECT_EQUAL(false, fileExists("myfile"));
- EXPECT_EQUAL(false, fileExists("mydir"));
- std::filesystem::create_directory(std::filesystem::path("mydir"));
- File f("myfile");
- f.open(File::CREATE, false);
- f.write("foobar", 6, 0);
-
- FileInfo info = f.stat();
- EXPECT_EQUAL(6, info._size);
- EXPECT_EQUAL(true, info._plainfile);
- EXPECT_EQUAL(false, info._directory);
-
- EXPECT_EQUAL(6, f.getFileSize());
- f.close();
-
- EXPECT_EQUAL(true, fileExists("myfile"));
- EXPECT_EQUAL(true, fileExists("mydir"));
-}
-
TEST("require that vespalib::File::resize works")
{
std::filesystem::remove(std::filesystem::path("myfile"));
@@ -204,47 +162,6 @@ TEST("require that vespalib::File::resize works")
EXPECT_EQUAL(std::string("foo"), std::string(&vec[0], 3));
}
-TEST("require that copy constructor and assignment for vespalib::File works")
-{
- // Copy file not opened.
- {
- File f("myfile");
- File f2(f);
- EXPECT_EQUAL(f.getFilename(), f2.getFilename());
- }
- // Copy file opened
- {
- File f("myfile");
- f.open(File::CREATE);
- File f2(f);
- EXPECT_EQUAL(f.getFilename(), f2.getFilename());
- ASSERT_TRUE(f2.isOpen());
- ASSERT_TRUE(!f.isOpen());
- }
- // Assign file opened to another file opened
- {
- File f("myfile");
- f.open(File::CREATE);
- int fd = f.getFileDescriptor();
- File f2("targetfile");
- f2.open(File::CREATE);
- f = f2;
- EXPECT_EQUAL(std::string("targetfile"), f2.getFilename());
- EXPECT_EQUAL(f.getFilename(), f2.getFilename());
- ASSERT_TRUE(!f2.isOpen());
- ASSERT_TRUE(f.isOpen());
- try{
- File f3(fd, "myfile");
- f3.closeFileWhenDestructed(false); // Already closed
- f3.write("foo", 3, 0);
- TEST_FATAL("This file descriptor should have been closed");
- } catch (IoException& e) {
- //std::cerr << e.what() << "\n";
- EXPECT_EQUAL(IoException::INTERNAL_FAILURE, e.getType());
- }
- }
-}
-
TEST("require that we can read all data written to file")
{
// Write text into a file.
diff --git a/vespalib/src/vespa/vespalib/io/fileutil.cpp b/vespalib/src/vespa/vespalib/io/fileutil.cpp
index 6c169ab8d98..cb478f0f225 100644
--- a/vespalib/src/vespa/vespalib/io/fileutil.cpp
+++ b/vespalib/src/vespa/vespalib/io/fileutil.cpp
@@ -5,7 +5,6 @@
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/util/size_literals.h>
#include <vespa/vespalib/util/stringfmt.h>
-#include <ostream>
#include <cassert>
#include <filesystem>
#include <dirent.h>
@@ -16,35 +15,34 @@
#include <vespa/log/log.h>
LOG_SETUP(".vespalib.io.fileutil");
+namespace fs = std::filesystem;
+
namespace vespalib {
namespace {
- FileInfo::UP
- processStat(struct stat& filestats, bool result, stringref path) {
- FileInfo::UP resval;
- if (result) {
- resval.reset(new FileInfo);
- resval->_plainfile = S_ISREG(filestats.st_mode);
- resval->_directory = S_ISDIR(filestats.st_mode);
- resval->_symlink = S_ISLNK(filestats.st_mode);
- resval->_size = filestats.st_size;
- } else if (errno != ENOENT) {
- asciistream ost;
- ost << "An IO error occured while statting '" << path << "'. "
- << "errno(" << errno << "): " << getErrorString(errno);
- throw IoException(ost.str(), IoException::getErrorType(errno),
- VESPA_STRLOC);
- }
- LOG(debug, "stat(%s): Existed? %s, Plain file? %s, Directory? %s, "
- "Size: %" PRIu64,
- string(path).c_str(),
- resval.get() ? "true" : "false",
- resval.get() && resval->_plainfile ? "true" : "false",
- resval.get() && resval->_directory ? "true" : "false",
- resval.get() ? resval->_size : 0);
- return resval;
+FileInfo::UP
+processStat(struct stat& filestats, bool result, stringref path) {
+ FileInfo::UP resval;
+ if (result) {
+ resval = std::make_unique<FileInfo>();
+ resval->_plainfile = S_ISREG(filestats.st_mode);
+ resval->_directory = S_ISDIR(filestats.st_mode);
+ resval->_size = filestats.st_size;
+ } else if (errno != ENOENT) {
+ asciistream ost;
+ ost << "An IO error occured while statting '" << path << "'. "
+ << "errno(" << errno << "): " << getErrorString(errno);
+ throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
+ LOG(debug, "stat(%s): Existed? %s, Plain file? %s, Directory? %s, Size: %" PRIu64,
+ string(path).c_str(),
+ resval.get() ? "true" : "false",
+ resval.get() && resval->_plainfile ? "true" : "false",
+ resval.get() && resval->_directory ? "true" : "false",
+ resval.get() ? resval->_size : 0);
+ return resval;
+}
string
safeStrerror(int errnum)
@@ -54,167 +52,61 @@ safeStrerror(int errnum)
}
-bool
-FileInfo::operator==(const FileInfo& fi) const
-{
- return (_size == fi._size && _plainfile == fi._plainfile
- && _directory == fi._directory);
-}
-
-std::ostream&
-operator<<(std::ostream& out, const FileInfo& info)
-{
- out << "FileInfo(size: " << info._size;
- if (info._plainfile) out << ", plain file";
- if (info._directory) out << ", directory";
- out << ")";
- return out;
-}
-
File::File(stringref filename)
: _fd(-1),
- _flags(0),
- _filename(filename),
- _close(true),
- _fileReads(0),
- _fileWrites(0)
-{
-}
-
-File::File(int fileDescriptor, stringref filename)
- : _fd(fileDescriptor),
- _flags(0),
- _filename(filename),
- _close(true),
- _fileReads(0),
- _fileWrites(0)
-{
-}
+ _filename(filename)
+{ }
File::~File()
{
- if (_close && _fd != -1) close();
-}
-
-File::File(File& f)
- : _fd(f._fd),
- _flags(f._flags),
- _filename(f._filename),
- _close(f._close),
- _fileReads(f._fileReads),
- _fileWrites(f._fileWrites)
-{
- f._fd = -1;
- f._flags = 0;
- f._close = true;
- f._fileReads = 0;
- f._fileWrites = 0;
-}
-
-File&
-File::operator=(File& f)
-{
- if (_close && _fd != -1) close();
- _fd = f._fd;
- _flags = f._flags;
- _filename = f._filename;
- _close = f._close;
- _fileReads = f._fileReads;
- _fileWrites = f._fileWrites;
- f._fd = -1;
- f._flags = 0;
- f._close = true;
- f._fileReads = 0;
- f._fileWrites = 0;
- return *this;
-}
-
-void
-File::setFilename(stringref filename)
-{
- if (_filename == filename) return;
- if (_close && _fd != -1) close();
- _filename = filename;
- _fd = -1;
- _flags = 0;
- _close = true;
+ if (_fd != -1) close();
}
namespace {
- int openAndCreateDirsIfMissing(const string & filename, int flags,
- bool createDirsIfMissing)
+int openAndCreateDirsIfMissing(const string & filename, int flags, bool createDirsIfMissing)
+{
+ int fd = ::open(filename.c_str(), flags, 0644);
+ if (fd < 0 && errno == ENOENT && ((flags & O_CREAT) != 0)
+ && createDirsIfMissing)
{
- int fd = ::open(filename.c_str(), flags, 0644);
- if (fd < 0 && errno == ENOENT && ((flags & O_CREAT) != 0)
- && createDirsIfMissing)
- {
- auto pos = filename.rfind('/');
- if (pos != string::npos) {
- string path(filename.substr(0, pos));
- std::filesystem::create_directories(std::filesystem::path(path));
- LOG(spam, "open(%s, %d): Retrying open after creating parent "
- "directories.", filename.c_str(), flags);
- fd = ::open(filename.c_str(), flags, 0644);
- }
+ auto pos = filename.rfind('/');
+ if (pos != string::npos) {
+ string path(filename.substr(0, pos));
+ fs::create_directories(fs::path(path));
+ LOG(spam, "open(%s, %d): Retrying open after creating parent directories.", filename.c_str(), flags);
+ fd = ::open(filename.c_str(), flags, 0644);
}
- return fd;
}
+ return fd;
+}
}
void
File::open(int flags, bool autoCreateDirectories) {
if ((flags & File::READONLY) != 0) {
if ((flags & File::CREATE) != 0) {
- throw IllegalArgumentException(
- "Cannot use READONLY and CREATE options at the same time",
- VESPA_STRLOC);
+ throw IllegalArgumentException("Cannot use READONLY and CREATE options at the same time", VESPA_STRLOC);
}
if ((flags & File::TRUNC) != 0) {
- throw IllegalArgumentException(
- "Cannot use READONLY and TRUNC options at the same time",
- VESPA_STRLOC);
+ throw IllegalArgumentException("Cannot use READONLY and TRUNC options at the same time", VESPA_STRLOC);
}
if (autoCreateDirectories) {
- throw IllegalArgumentException(
- "No point in auto-creating directories on read only access",
- VESPA_STRLOC);
+ throw IllegalArgumentException("No point in auto-creating directories on read only access", VESPA_STRLOC);
}
}
int openflags = ((flags & File::READONLY) != 0 ? O_RDONLY : O_RDWR)
| ((flags & File::CREATE) != 0 ? O_CREAT : 0)
-#ifdef __linux__
- | ((flags & File::DIRECTIO) != 0 ? O_DIRECT : 0)
-#endif
| ((flags & File::TRUNC) != 0 ? O_TRUNC: 0);
int fd = openAndCreateDirsIfMissing(_filename, openflags, autoCreateDirectories);
-#ifdef __linux__
- if (fd < 0 && ((flags & File::DIRECTIO) != 0)) {
- openflags = (openflags ^ O_DIRECT);
- flags = (flags ^ DIRECTIO);
- LOG(debug, "open(%s, %d): Retrying without direct IO due to failure "
- "opening with errno(%d): %s",
- _filename.c_str(), flags, errno, safeStrerror(errno).c_str());
- fd = openAndCreateDirsIfMissing(_filename, openflags, autoCreateDirectories);
- }
-#endif
if (fd < 0) {
asciistream ost;
- ost << "open(" << _filename << ", 0x"
- << hex << flags << dec << "): Failed, errno(" << errno
- << "): " << safeStrerror(errno);
+ ost << "open(" << _filename << ", 0x" << hex << flags << dec
+ << "): Failed, errno(" << errno << "): " << safeStrerror(errno);
throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
- _flags = flags;
- if (_close && _fd != -1) close();
+ if (_fd != -1) close();
_fd = fd;
- LOG(debug, "open(%s, %d). File opened with file descriptor %d.",
- _filename.c_str(), flags, fd);
-}
-
-void
-File::closeFileWhenDestructed(bool closeOnDestruct)
-{
- _close = closeOnDestruct;
+ LOG(debug, "open(%s, %d). File opened with file descriptor %d.", _filename.c_str(), flags, fd);
}
FileInfo
@@ -226,13 +118,11 @@ File::stat() const
result = processStat(filestats, fstat(_fd, &filestats) == 0, _filename);
assert(result.get()); // The file must exist in a file instance
} else {
- result = processStat(filestats,
- ::stat(_filename.c_str(), &filestats) == 0,
- _filename);
+ result = processStat(filestats, ::stat(_filename.c_str(), &filestats) == 0, _filename);
// If the file does not exist yet, act like it does. It will
// probably be created when opened.
- if (result.get() == 0) {
- result.reset(new FileInfo());
+ if ( ! result) {
+ result = std::make_unique<FileInfo>();
result->_size = 0;
result->_directory = false;
result->_plainfile = true;
@@ -246,69 +136,32 @@ File::resize(off_t size)
{
if (ftruncate(_fd, size) != 0) {
asciistream ost;
- ost << "resize(" << _filename << ", " << size << "): Failed, errno("
- << errno << "): " << safeStrerror(errno);
+ ost << "resize(" << _filename << ", " << size << "): Failed, errno(" << errno << "): " << safeStrerror(errno);
throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
- LOG(debug, "resize(%s): Resized to %" PRIu64 " bytes.",
- _filename.c_str(), size);
-}
-
-void
-File::verifyDirectIO(uint64_t buf, size_t bufsize, off_t offset) const
-{
- if (offset % 512 != 0) {
- LOG(error,
- "Access to file %s failed because offset %" PRIu64 " wasn't 512-byte "
- "aligned. Buffer memory address was %" PRIx64 ", length %zu",
- _filename.c_str(), static_cast<uint64_t>(offset), buf, bufsize);
- assert(false);
- }
- if (buf % 512 != 0) {
- LOG(error,
- "Access to file %s failed because buffer memory address %" PRIx64 " "
- "wasn't 512-byte aligned. Offset was %" PRIu64 ", length %zu",
- _filename.c_str(), buf, static_cast<uint64_t>(offset), bufsize);
- assert(false);
- }
- if (bufsize % 512 != 0) {
- LOG(error,
- "Access to file %s failed because buffer size %zu wasn't 512-byte "
- "aligned. Buffer memory address was %" PRIx64 ", offset %" PRIu64,
- _filename.c_str(), bufsize, buf, static_cast<uint64_t>(offset));
- assert(false);
- }
+ LOG(debug, "resize(%s): Resized to %" PRIu64 " bytes.", _filename.c_str(), size);
}
off_t
File::write(const void *buf, size_t bufsize, off_t offset)
{
- ++_fileWrites;
size_t left = bufsize;
- LOG(debug, "write(%s): Writing %zu bytes at offset %" PRIu64 ".",
- _filename.c_str(), bufsize, offset);
-
- if (_flags & DIRECTIO) {
- verifyDirectIO((uint64_t)buf, bufsize, offset);
- }
+ LOG(debug, "write(%s): Writing %zu bytes at offset %" PRIu64 ".", _filename.c_str(), bufsize, offset);
while (left > 0) {
ssize_t written = ::pwrite(_fd, buf, left, offset);
if (written > 0) {
- LOG(spam, "write(%s): Wrote %zd bytes at offset %" PRIu64 ".",
- _filename.c_str(), written, offset);
+ LOG(spam, "write(%s): Wrote %zd bytes at offset %" PRIu64 ".", _filename.c_str(), written, offset);
left -= written;
buf = ((const char*) buf) + written;
offset += written;
} else if (written == 0) {
- LOG(spam, "write(%s): Wrote %zd bytes at offset %" PRIu64 ".",
- _filename.c_str(), written, offset);
+ LOG(spam, "write(%s): Wrote %zd bytes at offset %" PRIu64 ".", _filename.c_str(), written, offset);
assert(false); // Can this happen?
} else if (errno != EINTR && errno != EAGAIN) {
asciistream ost;
- ost << "write(" << _fd << ", " << buf
- << ", " << left << ", " << offset << "), Failed, errno("
- << errno << "): " << safeStrerror(errno);
+ ost << "write(" << _fd << ", " << buf << ", " << left << ", " << offset
+ << "), Failed, errno(" << errno << "): " << safeStrerror(errno);
throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
}
@@ -318,37 +171,23 @@ File::write(const void *buf, size_t bufsize, off_t offset)
size_t
File::read(void *buf, size_t bufsize, off_t offset) const
{
- ++_fileReads;
size_t remaining = bufsize;
- LOG(debug, "read(%s): Reading %zu bytes from offset %" PRIu64 ".",
- _filename.c_str(), bufsize, offset);
-
- if (_flags & DIRECTIO) {
- verifyDirectIO((uint64_t)buf, bufsize, offset);
- }
+ LOG(debug, "read(%s): Reading %zu bytes from offset %" PRIu64 ".", _filename.c_str(), bufsize, offset);
while (remaining > 0) {
ssize_t bytesread = ::pread(_fd, buf, remaining, offset);
if (bytesread > 0) {
- LOG(spam, "read(%s): Read %zd bytes from offset %" PRIu64 ".",
- _filename.c_str(), bytesread, offset);
+ LOG(spam, "read(%s): Read %zd bytes from offset %" PRIu64 ".", _filename.c_str(), bytesread, offset);
remaining -= bytesread;
buf = ((char*) buf) + bytesread;
offset += bytesread;
- if (((_flags & DIRECTIO) != 0) && ((bytesread % 512) != 0) && (offset == getFileSize())) {
- LOG(spam, "read(%s): Found EOF. Directio read to unaligned file end at offset %" PRIu64 ".",
- _filename.c_str(), offset);
- break;
- }
} else if (bytesread == 0) { // EOF
- LOG(spam, "read(%s): Found EOF. Zero bytes read from offset %" PRIu64 ".",
- _filename.c_str(), offset);
+ LOG(spam, "read(%s): Found EOF. Zero bytes read from offset %" PRIu64 ".", _filename.c_str(), offset);
break;
} else if (errno != EINTR && errno != EAGAIN) {
asciistream ost;
ost << "read(" << _fd << ", " << buf << ", " << remaining << ", "
- << offset << "): Failed, errno(" << errno << "): "
- << safeStrerror(errno);
+ << offset << "): Failed, errno(" << errno << "): " << safeStrerror(errno);
throw IoException(ost.str(), IoException::getErrorType(errno), VESPA_STRLOC);
}
}
@@ -433,13 +272,7 @@ bool
File::unlink()
{
close();
- return std::filesystem::remove(std::filesystem::path(_filename));
-}
-
-namespace {
-
- uint32_t diskAlignmentSize = 4_Ki;
-
+ return fs::remove(fs::path(_filename));
}
DirectoryList
@@ -465,16 +298,6 @@ listDirectory(const string & path)
return result;
}
-MallocAutoPtr
-getAlignedBuffer(size_t size)
-{
- void *ptr;
- int result = posix_memalign(&ptr, diskAlignmentSize, size);
- assert(result == 0);
- (void)result;
- return MallocAutoPtr(ptr);
-}
-
string dirname(stringref name)
{
size_t found = name.rfind('/');
@@ -517,8 +340,7 @@ getOpenErrorString(const int osError, stringref filename)
{
asciistream os;
string dirName(dirname(filename));
- os << "error=" << osError << "(\"" <<
- getErrorString(osError) << "\") fileStat";
+ os << "error=" << osError << "(\"" << getErrorString(osError) << "\") fileStat";
addStat(os, filename);
os << " dirStat";
addStat(os, dirName);
diff --git a/vespalib/src/vespa/vespalib/io/fileutil.h b/vespalib/src/vespa/vespalib/io/fileutil.h
index 4de36daa85f..148317a7edf 100644
--- a/vespalib/src/vespa/vespalib/io/fileutil.h
+++ b/vespalib/src/vespa/vespalib/io/fileutil.h
@@ -43,14 +43,10 @@ struct FileInfo {
bool _plainfile;
bool _directory;
- bool _symlink;
off_t _size;
- bool operator==(const FileInfo&) const;
};
-std::ostream& operator<<(std::ostream&, const FileInfo&);
-
/**
* @brief A File instance is used to access a single open file.
*
@@ -61,74 +57,44 @@ std::ostream& operator<<(std::ostream&, const FileInfo&);
*/
class File {
private:
- int _fd;
- int _flags;
- vespalib::string _filename;
- bool _close;
- mutable int _fileReads; // Tracks number of file reads done on this file
- mutable int _fileWrites; // Tracks number of file writes done in this file
+ int _fd;
+ string _filename;
+ void sync();
/**
- * Verify that direct I/O alignment preconditions hold. Triggers assertion
- * failure on violations.
+ * Get information about the current file. If file is opened, file descriptor
+ * will be used for stat. If file is not open, and the file does not exist
+ * yet, you will get fileinfo describing an empty file.
*/
- void verifyDirectIO(uint64_t buf, size_t bufsize, off_t offset) const;
-
+ FileInfo stat() const;
public:
using UP = std::unique_ptr<File>;
/**
* If failing to open file using direct IO it will retry using cached IO.
*/
- enum Flag { READONLY = 1, CREATE = 2, DIRECTIO = 4, TRUNC = 8 };
+ enum Flag { READONLY = 1, CREATE = 2, TRUNC = 8 };
/** Create a file instance, without opening the file. */
- File(vespalib::stringref filename);
-
- /** Create a file instance of an already open file. */
- File(int fileDescriptor, vespalib::stringref filename);
-
- /** Copying a file instance, moves any open file descriptor. */
- File(File& f);
- File& operator=(File& f);
+ File(stringref filename);
/** Closes the file if not instructed to do otherwise. */
- virtual ~File();
+ ~File();
- /**
- * Make this instance point at another file.
- * Closes the old file it it was open.
- */
- void setFilename(vespalib::stringref filename);
+ const string& getFilename() const { return _filename; }
- const vespalib::string& getFilename() const { return _filename; }
-
- virtual void open(int flags, bool autoCreateDirectories = false);
+ void open(int flags, bool autoCreateDirectories = false);
bool isOpen() const { return (_fd != -1); }
- bool isOpenWithDirectIO() const { return ((_flags & DIRECTIO) != 0); }
-
- /**
- * Whether or not file should be closed when this instance is destructed.
- * By default it will be closed.
- */
- void closeFileWhenDestructed(bool close);
- virtual int getFileDescriptor() const { return _fd; }
-
- /**
- * Get information about the current file. If file is opened, file descriptor
- * will be used for stat. If file is not open, and the file does not exist
- * yet, you will get fileinfo describing an empty file.
- */
- virtual FileInfo stat() const;
+ int getFileDescriptor() const { return _fd; }
/**
* Get the filesize of a file, specified by a file descriptor.
*
* @throw IoException If we failed to stat the file.
*/
- virtual off_t getFileSize() const { return stat()._size; }
+ off_t getFileSize() const { return stat()._size; }
/**
* Resize the currently open file to a given size,
@@ -138,7 +104,7 @@ public:
* @param size new size of file
* @throw IoException If we failed to resize the file.
*/
- virtual void resize(off_t size);
+ void resize(off_t size);
/**
* Writes data to file.
@@ -152,7 +118,7 @@ public:
* @throw IoException If we failed to write to the file.
* @return Always return bufsize.
*/
- virtual off_t write(const void *buf, size_t bufsize, off_t offset);
+ off_t write(const void *buf, size_t bufsize, off_t offset);
/**
* Read characters from a file.
@@ -167,7 +133,7 @@ public:
* @return The number of bytes actually read. If less than
* bufsize, this indicates that EOF was reached.
*/
- virtual size_t read(void *buf, size_t bufsize, off_t offset) const;
+ size_t read(void *buf, size_t bufsize, off_t offset) const;
/**
* Read the file into a string.
@@ -177,7 +143,7 @@ public:
* @throw IoException If we failed to read from file.
* @return The content of the file.
*/
- vespalib::string readAll() const;
+ string readAll() const;
/**
* Read a file into a string.
@@ -188,7 +154,7 @@ public:
* @throw IoException If we failed to read from file.
* @return The content of the file.
*/
- static vespalib::string readAll(vespalib::stringref path);
+ static string readAll(stringref path);
/**
* Sync file or directory.
@@ -198,24 +164,17 @@ public:
*
* @throw IoException If we failed to sync the file.
*/
- static void sync(vespalib::stringref path);
-
- virtual void sync();
- virtual bool close();
- virtual bool unlink();
+ static void sync(stringref path);
- int getFileReadCount() const { return _fileReads; }
- int getFileWriteCount() const { return _fileWrites; }
+ bool close();
+ bool unlink();
};
/**
* List the contents of the given directory.
*/
-using DirectoryList = std::vector<vespalib::string>;
-extern DirectoryList listDirectory(const vespalib::string & path);
-
-extern MallocAutoPtr getAlignedBuffer(size_t size);
-
+using DirectoryList = std::vector<string>;
+extern DirectoryList listDirectory(const string & path);
string dirname(stringref name);
string getOpenErrorString(const int osError, stringref name);
diff --git a/vespalib/src/vespa/vespalib/stllike/hash_map.cpp b/vespalib/src/vespa/vespalib/stllike/hash_map.cpp
index abb88fe674f..50a3d73fe12 100644
--- a/vespalib/src/vespa/vespalib/stllike/hash_map.cpp
+++ b/vespalib/src/vespa/vespalib/stllike/hash_map.cpp
@@ -16,6 +16,7 @@ VESPALIB_HASH_MAP_INSTANTIATE(vespalib::string, double);
VESPALIB_HASH_MAP_INSTANTIATE(int64_t, int32_t);
VESPALIB_HASH_MAP_INSTANTIATE(int64_t, uint32_t);
VESPALIB_HASH_MAP_INSTANTIATE(int32_t, uint32_t);
+VESPALIB_HASH_MAP_INSTANTIATE(uint16_t, uint16_t);
VESPALIB_HASH_MAP_INSTANTIATE(uint16_t, uint32_t);
VESPALIB_HASH_MAP_INSTANTIATE(uint32_t, int32_t);
VESPALIB_HASH_MAP_INSTANTIATE(uint32_t, uint32_t);
diff --git a/vespalib/src/vespa/vespalib/stllike/hash_set_insert.hpp b/vespalib/src/vespa/vespalib/stllike/hash_set_insert.hpp
index 6d5b7ed8b05..77e46bbf9e8 100644
--- a/vespalib/src/vespa/vespalib/stllike/hash_set_insert.hpp
+++ b/vespalib/src/vespa/vespalib/stllike/hash_set_insert.hpp
@@ -9,7 +9,7 @@ namespace vespalib {
template<typename K, typename H, typename EQ, typename M>
template<typename InputIterator>
hash_set<K, H, EQ, M>::hash_set(InputIterator first, InputIterator last)
- : _ht(0)
+ : _ht(last - first)
{
insert(first, last);
}
@@ -18,7 +18,6 @@ template<typename K, typename H, typename EQ, typename M>
template<typename InputIt>
void
hash_set<K, H, EQ, M>::insert(InputIt first, InputIt last) {
- _ht.resize(last - first + capacity());
for (; first < last; first++) {
insert(*first);
}
diff --git a/vespalib/src/vespa/vespalib/stllike/string.hpp b/vespalib/src/vespa/vespalib/stllike/string.hpp
index e0144ab6f85..3438c6b641a 100644
--- a/vespalib/src/vespa/vespalib/stllike/string.hpp
+++ b/vespalib/src/vespa/vespalib/stllike/string.hpp
@@ -17,8 +17,10 @@ void
small_string<StackSize>::_reserveBytes(size_type newBufferSize) noexcept {
if (isAllocated()) {
_buf = (char *) realloc(_buf, newBufferSize);
+ assert(_buf);
} else {
char *tmp = (char *) malloc(newBufferSize);
+ assert(tmp);
memcpy(tmp, _stack, _sz);
tmp[_sz] = '\0';
_buf = tmp;
@@ -96,6 +98,7 @@ void small_string<StackSize>::init_slower(const void *s) noexcept
{
_bufferSize = _sz+1;
_buf = (char *) malloc(_bufferSize);
+ assert(_buf);
memcpy(_buf, s, _sz);
_buf[_sz] = '\0';
}
@@ -105,6 +108,7 @@ void small_string<StackSize>::appendAlloc(const void * s, size_type addSz) noexc
{
size_type newBufferSize = roundUp2inN(_sz+addSz+1);
char * buf = (char *) malloc(newBufferSize);
+ assert(buf);
memcpy(buf, buffer(), _sz);
if (isAllocated()) {
free(_buf);
diff --git a/vespalib/src/vespa/vespalib/util/guard.h b/vespalib/src/vespa/vespalib/util/guard.h
index 32237a59d9a..efd7b8345c9 100644
--- a/vespalib/src/vespa/vespalib/util/guard.h
+++ b/vespalib/src/vespa/vespalib/util/guard.h
@@ -2,8 +2,7 @@
#pragma once
-#include <stdio.h>
-#include <dirent.h>
+#include <cstdio>
#include <unistd.h>
namespace vespalib {
@@ -19,43 +18,43 @@ class FilePointer
{
private:
FILE *_fp;
- FilePointer(const FilePointer &);
- FilePointer &operator=(const FilePointer &);
public:
/**
* @brief Create a FilePointer from a FILE pointer.
*
* @param file the underlying FILE pointer
**/
- explicit FilePointer(FILE *file = NULL) : _fp(file) {}
+ explicit FilePointer(FILE *file = nullptr) noexcept : _fp(file) {}
+ FilePointer(const FilePointer &) = delete;
+ FilePointer &operator=(const FilePointer &) = delete;
/**
* @brief Close the file if it is still open.
**/
~FilePointer() { reset(); }
/**
- * @brief Check whether we have a FILE pointer (not NULL)
+ * @brief Check whether we have a FILE pointer (not nullptr)
*
* @return true if we have an underlying FILE pointer
**/
- bool valid() const { return (_fp != NULL); }
+ bool valid() const noexcept { return (_fp != nullptr); }
/**
* @brief Obtain the internal FILE pointer
*
* @return internal FILE pointer
**/
- FILE *fp() const { return _fp; }
+ FILE *fp() const noexcept { return _fp; }
/**
* @brief Implicit cast to obtain internal FILE pointer
*
* @return internal FILE pointer
**/
- operator FILE*() { return _fp; }
+ operator FILE*() noexcept { return _fp; }
/**
* @brief Take ownership of a new FILE pointer.
*
* The previously owned FILE pointer is closed, if present.
**/
- void reset(FILE *file = NULL) {
+ void reset(FILE *file = nullptr) {
if (valid()) {
fclose(_fp);
}
@@ -68,81 +67,13 @@ public:
*
* @return the released FILE pointer
**/
- FILE *release() {
+ FILE *release() noexcept {
FILE *tmp = _fp;
- _fp = NULL;
+ _fp = nullptr;
return tmp;
}
};
-
-/**
- * @brief A DirPointer wraps a bald DIR pointer inside a guarding object.
- *
- * The underlying directory is closed when the DirPointer object is
- * destructed.
- **/
-class DirPointer
-{
-private:
- DIR *_dp;
- DirPointer(const DirPointer &);
- DirPointer &operator=(const DirPointer &);
-public:
- /**
- * @brief Create a DirPointer from a DIR pointer.
- *
- * @param dir the underlying DIR pointer
- **/
- explicit DirPointer(DIR *dir = NULL) : _dp(dir) {}
- /**
- * Close the directory if it is still open.
- **/
- ~DirPointer() { reset(); }
- /**
- * @brief Check whether we have a DIR pointer (not NULL)
- *
- * @return true if we have an underlying DIR pointer
- **/
- bool valid() const { return (_dp != NULL); }
- /**
- * @brief Obtain the internal DIR pointer
- *
- * @return internal DIR pointer
- **/
- DIR *dp() const { return _dp; }
- /**
- * @brief Implicit cast to obtain internal DIR pointer
- *
- * @return internal DIR pointer
- **/
- operator DIR*() { return _dp; }
- /**
- * @brief Take ownership of a new DIR pointer.
- *
- * The previously owned DIR pointer is closed, if present.
- **/
- void reset(DIR *dir = NULL) {
- if (valid()) {
- closedir(_dp);
- }
- _dp = dir;
- }
- /**
- * @brief Release ownership of the current DIR pointer.
- *
- * The directory will no longer be closed by the destructor.
- *
- * @return the released DIR pointer
- **/
- DIR *release() {
- DIR *tmp = _dp;
- _dp = NULL;
- return tmp;
- }
-};
-
-
/**
* @brief A FileDescriptor wraps a file descriptor inside a guarding object.
*
@@ -153,15 +84,15 @@ class FileDescriptor
{
private:
int _fd;
- FileDescriptor(const FileDescriptor &);
- FileDescriptor &operator=(const FileDescriptor &);
public:
/**
* @brief Create a FileDescriptor from a file descriptor.
*
* @param file the underlying file descriptor
**/
- explicit FileDescriptor(int file = -1) : _fd(file) {}
+ explicit FileDescriptor(int file = -1) noexcept : _fd(file) {}
+ FileDescriptor(const FileDescriptor &) = delete;
+ FileDescriptor &operator=(const FileDescriptor &) = delete;
/**
* @brief Close the file if it is still open.
**/
@@ -171,13 +102,13 @@ public:
*
* @return true if we have an underlying file descriptor
**/
- bool valid() const { return (_fd >= 0); }
+ bool valid() const noexcept { return (_fd >= 0); }
/**
* @brief Obtain the internal file descriptor
*
* @return internal file descriptor
**/
- int fd() const { return _fd; }
+ int fd() const noexcept { return _fd; }
/**
* @brief Take ownership of a new file descriptor.
*
@@ -196,7 +127,7 @@ public:
*
* @return the released file descriptor
**/
- int release() {
+ int release() noexcept {
int tmp = _fd;
_fd = -1;
return tmp;
@@ -216,161 +147,20 @@ class CounterGuard
{
private:
int &_cnt;
- CounterGuard(const CounterGuard &);
- CounterGuard &operator=(const CounterGuard &);
public:
/**
* @brief Increase the value
*
* @param cnt a reference to the value that will be modified
**/
- explicit CounterGuard(int &cnt) : _cnt(cnt) { ++cnt; }
+ explicit CounterGuard(int &cnt) noexcept : _cnt(cnt) { ++cnt; }
+ CounterGuard(const CounterGuard &) = delete;
+ CounterGuard &operator=(const CounterGuard &) = delete;
/**
* @brief Decrease the value
**/
~CounterGuard() { --_cnt; }
};
-
-/**
- * @brief A ValueGuard is used to set a variable to a specific value
- * when the ValueGuard is destructed.
- *
- * This can be used to revert a variable if an exception is thrown.
- * However, you must remember to dismiss the guard if you don't want
- * it to set the value when it goes out of scope.
- **/
-template<typename T>
-class ValueGuard
-{
-private:
- bool _active;
- T &_ref;
- T _value;
-
- ValueGuard(const ValueGuard &);
- ValueGuard &operator=(const ValueGuard &);
-public:
- /**
- * @brief Create a ValueGuard for the given variable.
- *
- * The variable will be reverted to its original value in the destructor.
- *
- * @param ref the variable that will be modified
- **/
- explicit ValueGuard(T &ref) : _active(true), _ref(ref), _value(ref) {}
- /**
- * @brief Create a ValueGuard for the given variable.
- *
- * The variable will be set to the given value in the destructor.
- *
- * @param ref the variable that will be modified
- * @param val the value it will be set to
- **/
- ValueGuard(T &ref, const T &val) : _active(true), _ref(ref), _value(val) {}
- /**
- * @brief Reset the variable.
- *
- * Set the variable to the value defined in the constructor or the
- * update method. If dismiss has been invoked, the variable is not
- * modified.
- **/
- ~ValueGuard() {
- if (_active) {
- _ref = _value;
- }
- }
- /**
- * @brief Dismiss this guard.
- *
- * When a guard has been dismissed, the destructor will not modify
- * the variable. The dismiss method is typically used to indicate
- * that everything went ok, and that we no longer need to protect
- * the variable from exceptions.
- **/
- void dismiss() { _active = false; }
- /// @brief See dismiss
- void deactivate() { dismiss(); }
- /**
- * @brief Update the value the variable will be set to in the
- * destructor.
- *
- * This can be used to set revert points during execution.
- **/
- void update(const T &val) { _value = val; }
- void operator=(const T& val) { update(val); }
-};
-
-
-/**
- * @brief A MaxValueGuard is used to enfore an upper bound on the
- * value of a variable when the MaxValueGuard is destructed.
- *
- * This can be used to revert a variable if an exception is thrown.
- * However, you must remember to dismiss the guard if you don't want
- * it to set the value when it goes out of scope.
- **/
-template<typename T>
-class MaxValueGuard {
- bool _active;
- T &_ref;
- T _value;
-
- MaxValueGuard(const MaxValueGuard &);
- MaxValueGuard &operator=(const MaxValueGuard &);
-public:
- /**
- * @brief Create a MaxValueGuard for the given variable.
- *
- * The variable will be reverted back to its original value in the
- * destructor if it has increased.
- *
- * @param ref the variable that will be modified
- **/
- explicit MaxValueGuard(T &ref) : _active(true), _ref(ref), _value(ref) {}
- /**
- * @brief Create a ValueGuard for the given variable.
- *
- * The given upper bound will be enforced in the destructor.
- *
- * @param ref the variable that will be modified
- * @param val upper bound for the variable
- **/
- MaxValueGuard(T& ref, const T& val) : _active(true), _ref(ref), _value(val) {}
- /**
- * @brief Enforce the upper bound.
- *
- * If the current value of the variable is greater than the upper
- * bound, it is set to the upper bound as defined in the
- * constructor or the update method. If dismiss has been invoked,
- * the variable is not modified.
- **/
- ~MaxValueGuard() {
- if (_active && _ref > _value) {
- _ref = _value;
- }
- }
- /**
- * @brief Dismiss this guard.
- *
- * When a guard is dismissed, the destructor will not modify the
- * variable. The dismiss method is typically used to indicate that
- * everything went ok, and that we no longer need to protect the
- * variable from exceptions.
- **/
- void dismiss() { _active = false; }
- /// @brief See dismiss
- void deactivate() { dismiss(); }
- /**
- * @brief Update the upper bound that will be enforced in the
- * destructor.
- *
- * This can be used to set revert points during execution.
- **/
- void update(const T &val) { _value = val; }
- /// @brief See update.
- void operator=(const T& val) { update(val); }
-};
-
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp b/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp
index 51a639a3c4e..f711d3d8685 100644
--- a/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp
+++ b/vespalib/src/vespa/vespalib/util/mmap_file_allocator.cpp
@@ -11,6 +11,7 @@
#include <filesystem>
using vespalib::make_string_short::fmt;
+namespace fs = std::filesystem;
namespace vespalib::alloc {
@@ -21,7 +22,7 @@ MmapFileAllocator::MmapFileAllocator(const vespalib::string& dir_name)
_allocations(),
_freelist()
{
- std::filesystem::create_directories(std::filesystem::path(_dir_name));
+ fs::create_directories(fs::path(_dir_name));
_file.open(O_RDWR | O_CREAT | O_TRUNC, false);
}
@@ -30,7 +31,7 @@ MmapFileAllocator::~MmapFileAllocator()
assert(_allocations.empty());
_file.close();
_file.unlink();
- std::filesystem::remove_all(std::filesystem::path(_dir_name));
+ fs::remove_all(fs::path(_dir_name));
}
uint64_t