summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--client/go/.gitignore1
-rw-r--r--client/go/Makefile93
-rw-r--r--client/go/README.md6
-rw-r--r--client/go/cmd/api_key.go12
-rw-r--r--client/go/cmd/api_key_test.go8
-rw-r--r--client/go/cmd/cert.go24
-rw-r--r--client/go/cmd/cert_test.go17
-rw-r--r--client/go/cmd/clone.go2
-rw-r--r--client/go/cmd/command_tester.go13
-rw-r--r--client/go/cmd/config.go167
-rw-r--r--client/go/cmd/config_test.go34
-rw-r--r--client/go/cmd/curl.go143
-rw-r--r--client/go/cmd/curl_test.go53
-rw-r--r--client/go/cmd/deploy.go65
-rw-r--r--client/go/cmd/deploy_test.go11
-rw-r--r--client/go/cmd/document.go10
-rw-r--r--client/go/cmd/helpers.go48
-rw-r--r--client/go/cmd/man.go2
-rw-r--r--client/go/cmd/query.go2
-rw-r--r--client/go/cmd/query_test.go11
-rw-r--r--client/go/cmd/root.go3
-rw-r--r--client/go/cmd/version.go3
-rw-r--r--client/go/cmd/version_test.go2
-rw-r--r--client/go/go.mod1
-rw-r--r--client/go/go.sum2
-rw-r--r--client/go/vespa/deploy.go2
-rw-r--r--client/go/vespa/target.go1
-rw-r--r--client/src/main/java/ai/vespa/client/dsl/Annotation.java4
-rw-r--r--client/src/main/java/ai/vespa/client/dsl/GeoLocation.java44
-rw-r--r--client/src/main/java/ai/vespa/client/dsl/NearestNeighbor.java52
-rw-r--r--client/src/main/java/ai/vespa/client/dsl/Q.java26
-rw-r--r--client/src/test/groovy/ai/vespa/client/dsl/QTest.groovy40
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java7
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java42
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/Container.java9
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/RankProfileSearchFixture.java5
-rw-r--r--config/src/main/java/com/yahoo/config/subscription/impl/ConfigSubscription.java3
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/GetConfigRequest.java7
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/PayloadChecksum.java31
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/PayloadChecksums.java4
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/protocol/JRTClientConfigRequestV3.java23
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequest.java8
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java8
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/protocol/SlimeRequestData.java10
-rw-r--r--config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestV3Test.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java5
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/application/Application.java5
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpConfigRequest.java4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactory.java47
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/rpc/LZ4ConfigResponseFactory.java8
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/rpc/UncompressedConfigResponseFactory.java13
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java4
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactoryTest.java56
-rw-r--r--container-core/src/main/java/com/yahoo/restapi/ByteArrayResponse.java26
-rw-r--r--container-core/src/main/java/com/yahoo/restapi/StringResponse.java18
-rw-r--r--container-core/src/main/resources/configdefinitions/container.qr.def5
-rw-r--r--container-disc/src/main/java/com/yahoo/container/jdisc/SystemInfoProvider.java10
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/query/parser/AllParser.java4
-rw-r--r--container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java7
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationStore.java25
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java50
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java1
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java17
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Change.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java (renamed from controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationPackage.java)7
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageDiff.java112
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java (renamed from controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationPackageValidator.java)3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/LinesComparator.java246
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ZipStreamReader.java (renamed from controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java)34
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/archive/CuratorArchiveBucketDb.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java2
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java52
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgrader.java11
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java8
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java8
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java23
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java64
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java18
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java7
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageDiffTest.java128
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageTest.java (renamed from controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/ApplicationPackageTest.java)6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/LinesComparatorTest.java112
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ZipStreamReaderTest.java (renamed from controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReaderTest.java)10
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java6
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java11
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ApplicationStoreMock.java55
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdaterTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporterTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgraderTest.java10
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java15
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/run-status.json4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java15
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-aws-us-east-2a-runs.json4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-overview.json4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1-log-first-part.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/jobs.json4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-user-instance.json4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiTest.java67
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java19
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json9
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java2
-rw-r--r--eval/src/vespa/eval/eval/llvm/addr_to_symbol.cpp11
-rw-r--r--flags/src/main/java/com/yahoo/vespa/flags/Flags.java17
-rw-r--r--hosted-zone-api/abi-spec.json32
-rw-r--r--hosted-zone-api/src/main/java/ai/vespa/cloud/Cluster.java19
-rw-r--r--hosted-zone-api/src/main/java/ai/vespa/cloud/Node.java34
-rw-r--r--hosted-zone-api/src/main/java/ai/vespa/cloud/SystemInfo.java17
-rw-r--r--hosted-zone-api/src/main/java/ai/vespa/cloud/Zone.java2
-rw-r--r--hosted-zone-api/src/test/java/ai/vespa/cloud/SystemInfoTest.java21
-rw-r--r--model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModel.java1
-rw-r--r--model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModels.java99
-rw-r--r--model-integration/src/main/java/ai/vespa/rankingexpression/importer/configmodelview/ImportedMlModels.java58
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/RealConfigServerClients.java5
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/Event.java54
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java28
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java24
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java23
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java12
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java13
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java3
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/ContainerTester.java4
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/NodeRepoMock.java59
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java2
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java56
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java15
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java19
-rw-r--r--searchcore/CMakeLists.txt1
-rw-r--r--searchcore/src/apps/vespa-feed-bm/CMakeLists.txt34
-rw-r--r--searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp1133
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/CMakeLists.txt42
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_cluster.cpp384
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h79
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_cluster_controller.cpp (renamed from searchcore/src/apps/vespa-feed-bm/bm_cluster_controller.cpp)20
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_cluster_controller.h (renamed from searchcore/src/apps/vespa-feed-bm/bm_cluster_controller.h)7
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.cpp48
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.h71
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_feed.cpp195
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_feed.h57
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_message_bus.cpp (renamed from searchcore/src/apps/vespa-feed-bm/bm_message_bus.cpp)6
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_message_bus.h (renamed from searchcore/src/apps/vespa-feed-bm/bm_message_bus.h)4
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp738
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_node.h53
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_range.h24
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_storage_chain_builder.cpp (renamed from searchcore/src/apps/vespa-feed-bm/bm_storage_chain_builder.cpp)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_storage_chain_builder.h (renamed from searchcore/src/apps/vespa-feed-bm/bm_storage_chain_builder.h)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_storage_link.cpp (renamed from searchcore/src/apps/vespa-feed-bm/bm_storage_link.cpp)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_storage_link.h (renamed from searchcore/src/apps/vespa-feed-bm/bm_storage_link.h)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bm_storage_link_context.h (renamed from searchcore/src/apps/vespa-feed-bm/bm_storage_link_context.h)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bucket_info_queue.cpp (renamed from searchcore/src/apps/vespa-feed-bm/bucket_info_queue.cpp)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bucket_info_queue.h (renamed from searchcore/src/apps/vespa-feed-bm/bucket_info_queue.h)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/bucket_selector.h28
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/document_api_message_bus_bm_feed_handler.cpp (renamed from searchcore/src/apps/vespa-feed-bm/document_api_message_bus_bm_feed_handler.cpp)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/document_api_message_bus_bm_feed_handler.h (renamed from searchcore/src/apps/vespa-feed-bm/document_api_message_bus_bm_feed_handler.h)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/i_bm_feed_handler.h (renamed from searchcore/src/apps/vespa-feed-bm/i_bm_feed_handler.h)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/pending_tracker.cpp (renamed from searchcore/src/apps/vespa-feed-bm/pending_tracker.cpp)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/pending_tracker.h (renamed from searchcore/src/apps/vespa-feed-bm/pending_tracker.h)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/pending_tracker_hash.cpp (renamed from searchcore/src/apps/vespa-feed-bm/pending_tracker_hash.cpp)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/pending_tracker_hash.h (renamed from searchcore/src/apps/vespa-feed-bm/pending_tracker_hash.h)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/spi_bm_feed_handler.cpp (renamed from searchcore/src/apps/vespa-feed-bm/spi_bm_feed_handler.cpp)8
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/spi_bm_feed_handler.h (renamed from searchcore/src/apps/vespa-feed-bm/spi_bm_feed_handler.h)3
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/storage_api_chain_bm_feed_handler.cpp (renamed from searchcore/src/apps/vespa-feed-bm/storage_api_chain_bm_feed_handler.cpp)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/storage_api_chain_bm_feed_handler.h (renamed from searchcore/src/apps/vespa-feed-bm/storage_api_chain_bm_feed_handler.h)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/storage_api_message_bus_bm_feed_handler.cpp (renamed from searchcore/src/apps/vespa-feed-bm/storage_api_message_bus_bm_feed_handler.cpp)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/storage_api_message_bus_bm_feed_handler.h (renamed from searchcore/src/apps/vespa-feed-bm/storage_api_message_bus_bm_feed_handler.h)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/storage_api_rpc_bm_feed_handler.cpp (renamed from searchcore/src/apps/vespa-feed-bm/storage_api_rpc_bm_feed_handler.cpp)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/storage_api_rpc_bm_feed_handler.h (renamed from searchcore/src/apps/vespa-feed-bm/storage_api_rpc_bm_feed_handler.h)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/storage_reply_error_checker.cpp (renamed from searchcore/src/apps/vespa-feed-bm/storage_reply_error_checker.cpp)2
-rw-r--r--searchcore/src/vespa/searchcore/bmcluster/storage_reply_error_checker.h (renamed from searchcore/src/apps/vespa-feed-bm/storage_reply_error_checker.h)2
-rw-r--r--searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp26
-rw-r--r--searchlib/src/tests/tensor/hnsw_saver/hnsw_save_load_test.cpp5
-rw-r--r--searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp21
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp12
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hnsw_index.h2
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.cpp65
-rw-r--r--searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.h37
-rw-r--r--searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h9
-rw-r--r--searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index_loader.h23
-rw-r--r--slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.cpp8
-rw-r--r--slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.h2
-rw-r--r--slobrok/src/vespa/slobrok/server/rpchooks.cpp111
-rw-r--r--slobrok/src/vespa/slobrok/server/rpchooks.h6
-rw-r--r--slobrok/src/vespa/slobrok/server/sbenv.cpp10
-rw-r--r--storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp84
-rw-r--r--storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp1771
-rw-r--r--storage/src/tests/distributor/top_level_distributor_test.cpp16
-rw-r--r--storage/src/tests/distributor/top_level_distributor_test_util.cpp61
-rw-r--r--storage/src/tests/distributor/top_level_distributor_test_util.h17
-rw-r--r--storage/src/vespa/storage/distributor/distributor_stripe.h1
-rw-r--r--storage/src/vespa/storage/distributor/top_level_distributor.cpp1
-rw-r--r--storage/src/vespa/storage/distributor/top_level_distributor.h5
-rw-r--r--vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java10
214 files changed, 6485 insertions, 1938 deletions
diff --git a/client/go/.gitignore b/client/go/.gitignore
index b35a2cef362..eb679add05e 100644
--- a/client/go/.gitignore
+++ b/client/go/.gitignore
@@ -1,4 +1,5 @@
bin/
+dist/
share/
!Makefile
!build/
diff --git a/client/go/Makefile b/client/go/Makefile
index 3297b628cb2..17748d765c8 100644
--- a/client/go/Makefile
+++ b/client/go/Makefile
@@ -1,23 +1,104 @@
# Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+# The version to release. Defaults to the current tag or revision.
+# Use env VERSION=X.Y.Z make ... to override
+VERSION ?= $(shell git describe --tags 2> /dev/null | sed -E "s/^vespa-|-1$$//g")
+DEVEL_VERSION := $(shell echo "0.0.0-`git rev-parse --short HEAD`")
+ifeq ($(VERSION),)
+ VERSION = $(DEVEL_VERSION)
+endif
+
BIN ?= $(CURDIR)/bin
SHARE ?= $(CURDIR)/share
-# When building a new release the build system should set the VERSION
-# environment variable to version being built
-VERSION ?= $(shell echo "0.0.0-`git rev-parse --short HEAD`")
+DIST ?= $(CURDIR)/dist
+
+GO_FLAGS := -ldflags "-X github.com/vespa-engine/vespa/client/go/build.Version=$(VERSION)"
+GIT_ROOT := $(shell git rev-parse --show-toplevel)
+DIST_TARGETS := dist-mac dist-linux dist-win32 dist-win64
all: test checkfmt install
+#
+# Dist targets
+#
+
+# Bump the version of the vespa-cli formula and create a pull request to Homebrew repository.
+#
+# Example:
+#
+# $ git checkout vespa-X.Y.Z-1
+# $ make dist-github
+dist-homebrew: dist-version
+ brew bump-formula-pr --tag vespa-$(VERSION)-1 --version $(VERSION) vespa-cli
+
+# Create a GitHub release draft for all platforms. Note that this only creates a
+# draft, which is not publicly visible until it's explicitly published.
+#
+# Once the release has been created this prints an URL to the release draft.
+#
+# This requires the GitHub CLI to be installed: brew install gh
+#
+# Example:
+#
+# $ git checkout vespa-X.Y.Z-1
+# $ make dist-github
+dist-github: dist
+ gh release create v$(VERSION) --repo vespa-engine/vespa --notes-file $(CURDIR)/README.md --draft --title "Vespa CLI $(VERSION)" \
+ $(DIST)/vespa-cli_$(VERSION)_sha256sums.txt \
+ $(DIST)/vespa-cli_$(VERSION)_*.{zip,tar.gz}
+
+#
+# Cross-platform build targets
+#
+
+dist: $(DIST_TARGETS) dist-sha256sums
+
+dist-mac: GOOS=darwin
+dist-mac: GOARCH=amd64
+
+dist-linux: GOOS=linux
+dist-linux: GOARCH=amd64
+
+dist-win32: GOOS=windows
+dist-win32: GOARCH=386
+
+dist-win64: GOOS=windows
+dist-win64: GOARCH=amd64
+
+$(DIST_TARGETS): DIST_NAME=vespa-cli_$(VERSION)_$(GOOS)_$(GOARCH)
+$(DIST_TARGETS): dist-version manpages
+$(DIST_TARGETS):
+ mkdir -p $(DIST)/$(DIST_NAME)/bin
+ env GOOS=$(GOOS) GOARCH=$(GOARCH) go build -o $(DIST)/$(DIST_NAME)/bin $(GO_FLAGS) ./...
+ cp -a $(GIT_ROOT)/LICENSE $(DIST)/$(DIST_NAME)
+ if [ "$(GOOS)" = "windows" ]; then \
+ cd $(DIST) && zip -r $(DIST)/$(DIST_NAME).zip $(DIST_NAME); \
+ else \
+ cp -a share $(DIST)/$(DIST_NAME); \
+ tar -czvf $(DIST)/$(DIST_NAME).tar.gz -C $(DIST) $(DIST_NAME); \
+ fi
+
+dist-sha256sums:
+ cd $(DIST) && sha256sum vespa-cli_$(VERSION)_*.{zip,tar.gz} > vespa-cli_$(VERSION)_sha256sums.txt
+
+dist-version:
+ifeq ($(VERSION),$(DEVEL_VERSION))
+ $(error Invalid release version: $(VERSION). Try 'git checkout vespa-X.Y.Z-1' or 'env VERSION=X.Y.Z make ...')
+endif
+
+#
+# Development targets
+#
+
install:
- env GOBIN=$(BIN) \
- go install -ldflags "-X github.com/vespa-engine/vespa/client/go/build.Version=$(VERSION)" ./...
+ env GOBIN=$(BIN) go install $(GO_FLAGS) ./...
manpages: install
mkdir -p $(SHARE)/man/man1
$(BIN)/vespa man $(SHARE)/man/man1
clean:
- rm -rf $(BIN) $(SHARE)
+ rm -rf $(BIN) $(SHARE) $(DIST)
test:
go test ./...
diff --git a/client/go/README.md b/client/go/README.md
new file mode 100644
index 00000000000..7b5b222503c
--- /dev/null
+++ b/client/go/README.md
@@ -0,0 +1,6 @@
+The command-line tool for Vespa.ai.
+
+Use it on Vespa instances running locally, remotely or in the cloud.
+Prefer web service API's to this in production.
+
+Vespa documentation: https://docs.vespa.ai
diff --git a/client/go/cmd/api_key.go b/client/go/cmd/api_key.go
index c94faa0d5e3..90cbdbc5bc1 100644
--- a/client/go/cmd/api_key.go
+++ b/client/go/cmd/api_key.go
@@ -7,7 +7,6 @@ import (
"fmt"
"io/ioutil"
"log"
- "path/filepath"
"github.com/spf13/cobra"
"github.com/vespa-engine/vespa/client/go/util"
@@ -29,16 +28,17 @@ var apiKeyCmd = &cobra.Command{
DisableAutoGenTag: true,
Args: cobra.ExactArgs(0),
Run: func(cmd *cobra.Command, args []string) {
- configDir := configDir("")
- if configDir == "" {
- return
- }
app, err := vespa.ApplicationFromString(getApplication())
if err != nil {
fatalErr(err, "Could not parse application")
return
}
- apiKeyFile := filepath.Join(configDir, app.Tenant+".api-key.pem")
+ cfg, err := LoadConfig()
+ if err != nil {
+ fatalErr(err, "Could not load config")
+ return
+ }
+ apiKeyFile := cfg.APIKeyPath(app.Tenant)
if util.PathExists(apiKeyFile) && !overwriteKey {
printErrHint(fmt.Errorf("File %s already exists", apiKeyFile), "Use -f to overwrite it")
printPublicKey(apiKeyFile, app.Tenant)
diff --git a/client/go/cmd/api_key_test.go b/client/go/cmd/api_key_test.go
index 0e50fd6d669..c00f520aa25 100644
--- a/client/go/cmd/api_key_test.go
+++ b/client/go/cmd/api_key_test.go
@@ -11,13 +11,13 @@ import (
)
func TestAPIKey(t *testing.T) {
- configDir := t.TempDir()
- keyFile := configDir + "/.vespa/t1.api-key.pem"
+ homeDir := t.TempDir()
+ keyFile := homeDir + "/.vespa/t1.api-key.pem"
- out := execute(command{args: []string{"api-key", "-a", "t1.a1.i1"}, configDir: configDir}, t, nil)
+ out := execute(command{args: []string{"api-key", "-a", "t1.a1.i1"}, homeDir: homeDir}, t, nil)
assert.True(t, strings.HasPrefix(out, "Success: API private key written to "+keyFile+"\n"))
- out = execute(command{args: []string{"api-key", "-a", "t1.a1.i1"}, configDir: configDir}, t, nil)
+ out = execute(command{args: []string{"api-key", "-a", "t1.a1.i1"}, homeDir: homeDir}, t, nil)
assert.True(t, strings.HasPrefix(out, "Error: File "+keyFile+" already exists\nHint: Use -f to overwrite it\n"))
assert.True(t, strings.Contains(out, "This is your public key"))
}
diff --git a/client/go/cmd/cert.go b/client/go/cmd/cert.go
index e1e11b6f73e..078c0704f9d 100644
--- a/client/go/cmd/cert.go
+++ b/client/go/cmd/cert.go
@@ -28,20 +28,34 @@ var certCmd = &cobra.Command{
DisableAutoGenTag: true,
Args: cobra.MaximumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
- app := getApplication()
+ app, err := vespa.ApplicationFromString(getApplication())
+ if err != nil {
+ fatalErr(err)
+ return
+ }
pkg, err := vespa.ApplicationPackageFrom(applicationSource(args))
if err != nil {
fatalErr(err)
return
}
- configDir := configDir(app)
- if configDir == "" {
+ cfg, err := LoadConfig()
+ if err != nil {
+ fatalErr(err)
return
}
securityDir := filepath.Join(pkg.Path, "security")
pkgCertificateFile := filepath.Join(securityDir, "clients.pem")
- privateKeyFile := filepath.Join(configDir, "data-plane-private-key.pem")
- certificateFile := filepath.Join(configDir, "data-plane-public-cert.pem")
+ privateKeyFile, err := cfg.PrivateKeyPath(app)
+ if err != nil {
+ fatalErr(err)
+ return
+ }
+ certificateFile, err := cfg.CertificatePath(app)
+ if err != nil {
+ fatalErr(err)
+ return
+ }
+
if !overwriteCertificate {
for _, file := range []string{pkgCertificateFile, privateKeyFile, certificateFile} {
if util.PathExists(file) {
diff --git a/client/go/cmd/cert_test.go b/client/go/cmd/cert_test.go
index e655f76b0f1..174b5fe5e9d 100644
--- a/client/go/cmd/cert_test.go
+++ b/client/go/cmd/cert_test.go
@@ -11,20 +11,23 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/vespa-engine/vespa/client/go/vespa"
)
func TestCert(t *testing.T) {
- tmpDir := t.TempDir()
- mockApplicationPackage(t, tmpDir)
- out := execute(command{args: []string{"cert", "-a", "t1.a1.i1", tmpDir}, configDir: tmpDir}, t, nil)
+ homeDir := t.TempDir()
+ mockApplicationPackage(t, homeDir)
+ out := execute(command{args: []string{"cert", "-a", "t1.a1.i1", homeDir}, homeDir: homeDir}, t, nil)
- pkgCertificate := filepath.Join(tmpDir, "security", "clients.pem")
- certificate := filepath.Join(tmpDir, ".vespa", "t1.a1.i1", "data-plane-public-cert.pem")
- privateKey := filepath.Join(tmpDir, ".vespa", "t1.a1.i1", "data-plane-private-key.pem")
+ app, err := vespa.ApplicationFromString("t1.a1.i1")
+ assert.Nil(t, err)
+ pkgCertificate := filepath.Join(homeDir, "security", "clients.pem")
+ certificate := filepath.Join(homeDir, ".vespa", app.String(), "data-plane-public-cert.pem")
+ privateKey := filepath.Join(homeDir, ".vespa", app.String(), "data-plane-private-key.pem")
assert.Equal(t, fmt.Sprintf("Success: Certificate written to %s\nSuccess: Certificate written to %s\nSuccess: Private key written to %s\n", pkgCertificate, certificate, privateKey), out)
- out = execute(command{args: []string{"cert", "-a", "t1.a1.i1", tmpDir}, configDir: tmpDir}, t, nil)
+ out = execute(command{args: []string{"cert", "-a", "t1.a1.i1", homeDir}, homeDir: homeDir}, t, nil)
assert.True(t, strings.HasPrefix(out, "Error: Certificate or private key"))
}
diff --git a/client/go/cmd/clone.go b/client/go/cmd/clone.go
index ffd77030935..136872ecc8a 100644
--- a/client/go/cmd/clone.go
+++ b/client/go/cmd/clone.go
@@ -30,7 +30,7 @@ func init() {
var cloneCmd = &cobra.Command{
// TODO: "application" and "list" subcommands?
- Use: "clone <sample-application-path> <target-directory>",
+ Use: "clone sample-application-path target-directory",
Short: "Create files and directory structure for a new Vespa application from a sample application",
Long: `Creates an application package file structure.
Example: "$ vespa clone vespa-cloud/album-recommendation my-app",
diff --git a/client/go/cmd/command_tester.go b/client/go/cmd/command_tester.go
index be752f03d53..095a1af7ac3 100644
--- a/client/go/cmd/command_tester.go
+++ b/client/go/cmd/command_tester.go
@@ -11,6 +11,7 @@ import (
"log"
"net/http"
"os"
+ "path/filepath"
"strconv"
"testing"
"time"
@@ -23,9 +24,9 @@ import (
)
type command struct {
- configDir string
- args []string
- moreArgs []string
+ homeDir string
+ args []string
+ moreArgs []string
}
func execute(cmd command, t *testing.T, client *mockHttpClient) string {
@@ -37,11 +38,11 @@ func execute(cmd command, t *testing.T, client *mockHttpClient) string {
color = aurora.NewAurora(false)
// Set config dir. Use a separate one per test if none is specified
- if cmd.configDir == "" {
- cmd.configDir = t.TempDir()
+ if cmd.homeDir == "" {
+ cmd.homeDir = t.TempDir()
viper.Reset()
}
- os.Setenv("VESPA_CLI_HOME", cmd.configDir)
+ os.Setenv("VESPA_CLI_HOME", filepath.Join(cmd.homeDir, ".vespa"))
// Reset flags to their default value - persistent flags in Cobra persists over tests
rootCmd.Flags().VisitAll(func(f *pflag.Flag) {
diff --git a/client/go/cmd/config.go b/client/go/cmd/config.go
index bb1662d0b07..13142c92553 100644
--- a/client/go/cmd/config.go
+++ b/client/go/cmd/config.go
@@ -6,6 +6,7 @@ package cmd
import (
"fmt"
+ "io/ioutil"
"log"
"os"
"path/filepath"
@@ -43,90 +44,149 @@ var configCmd = &cobra.Command{
}
var setConfigCmd = &cobra.Command{
- Use: "set <option> <value>",
+ Use: "set option-name value",
Short: "Set a configuration option.",
Example: "$ vespa config set target cloud",
DisableAutoGenTag: true,
Args: cobra.ExactArgs(2),
Run: func(cmd *cobra.Command, args []string) {
- if err := setOption(args[0], args[1]); err != nil {
- log.Print(err)
+ cfg, err := LoadConfig()
+ if err != nil {
+ fatalErr(err, "Could not load config")
+ return
+ }
+ if err := cfg.Set(args[0], args[1]); err != nil {
+ fatalErr(err)
} else {
- writeConfig()
+ if err := cfg.Write(); err != nil {
+ fatalErr(err)
+ }
}
},
}
var getConfigCmd = &cobra.Command{
- Use: "get [<option>]",
+ Use: "get option-name",
Short: "Get a configuration option",
Example: "$ vespa config get target",
Args: cobra.MaximumNArgs(1),
DisableAutoGenTag: true,
Run: func(cmd *cobra.Command, args []string) {
+ cfg, err := LoadConfig()
+ if err != nil {
+ fatalErr(err, "Could not load config")
+ return
+ }
+
if len(args) == 0 { // Print all values
- printOption(targetFlag)
- printOption(applicationFlag)
+ printOption(cfg, targetFlag)
+ printOption(cfg, applicationFlag)
} else {
- printOption(args[0])
+ printOption(cfg, args[0])
}
},
}
-func printOption(option string) {
- value, err := getOption(option)
- if err != nil {
- value = color.Faint("<unset>").String()
- } else {
- value = color.Cyan(value).String()
- }
- log.Printf("%s = %s", option, value)
+type Config struct {
+ Home string
+ createDirs bool
}
-func configDir(application string) string {
+func LoadConfig() (*Config, error) {
home := os.Getenv("VESPA_CLI_HOME")
if home == "" {
var err error
home, err = os.UserHomeDir()
if err != nil {
- fatalErr(err, "Could not determine configuration directory")
- return ""
+ return nil, err
}
+ home = filepath.Join(home, ".vespa")
}
- configDir := filepath.Join(home, ".vespa", application)
- if err := os.MkdirAll(configDir, 0755); err != nil {
- fatalErr(err, "Could not create config directory")
- return ""
+ if err := os.MkdirAll(home, 0700); err != nil {
+ return nil, err
}
- return configDir
+ c := &Config{Home: home, createDirs: true}
+ if err := c.load(); err != nil {
+ return nil, err
+ }
+ return c, nil
}
-func bindFlagToConfig(option string, command *cobra.Command) {
- flagToConfigBindings[option] = command
+func (c *Config) Write() error {
+ if err := os.MkdirAll(c.Home, 0700); err != nil {
+ return err
+ }
+ configFile := filepath.Join(c.Home, configName+"."+configType)
+ if !util.PathExists(configFile) {
+ if _, err := os.Create(configFile); err != nil {
+ return err
+ }
+ }
+ return viper.WriteConfig()
+}
+
+func (c *Config) CertificatePath(app vespa.ApplicationID) (string, error) {
+ return c.applicationFilePath(app, "data-plane-public-cert.pem")
+}
+
+func (c *Config) PrivateKeyPath(app vespa.ApplicationID) (string, error) {
+ return c.applicationFilePath(app, "data-plane-private-key.pem")
+}
+
+func (c *Config) APIKeyPath(tenantName string) string {
+ return filepath.Join(c.Home, tenantName+".api-key.pem")
+}
+
+func (c *Config) ReadAPIKey(tenantName string) ([]byte, error) {
+ return ioutil.ReadFile(c.APIKeyPath(tenantName))
}
-func readConfig() {
- configDir := configDir("")
- if configDir == "" {
- return
+func (c *Config) ReadSessionID(app vespa.ApplicationID) (int64, error) {
+ sessionPath, err := c.applicationFilePath(app, "session_id")
+ if err != nil {
+ return 0, err
+ }
+ b, err := ioutil.ReadFile(sessionPath)
+ if err != nil {
+ return 0, err
}
+ return strconv.ParseInt(strings.TrimSpace(string(b)), 10, 64)
+}
+
+func (c *Config) WriteSessionID(app vespa.ApplicationID, sessionID int64) error {
+ sessionPath, err := c.applicationFilePath(app, "session_id")
+ if err != nil {
+ return err
+ }
+ return ioutil.WriteFile(sessionPath, []byte(fmt.Sprintf("%d\n", sessionID)), 0600)
+}
+
+func (c *Config) applicationFilePath(app vespa.ApplicationID, name string) (string, error) {
+ appDir := filepath.Join(c.Home, app.String())
+ if c.createDirs {
+ if err := os.MkdirAll(appDir, 0700); err != nil {
+ return "", err
+ }
+ }
+ return filepath.Join(appDir, name), nil
+}
+
+func (c *Config) load() error {
viper.SetConfigName(configName)
viper.SetConfigType(configType)
- viper.AddConfigPath(configDir)
+ viper.AddConfigPath(c.Home)
viper.AutomaticEnv()
for option, command := range flagToConfigBindings {
viper.BindPFlag(option, command.PersistentFlags().Lookup(option))
}
err := viper.ReadInConfig()
if _, ok := err.(viper.ConfigFileNotFoundError); ok {
- return // Fine
- }
- if err != nil {
- fatalErr(err, "Could not read configuration")
+ return nil
}
+ return err
}
-func getOption(option string) (string, error) {
+func (c *Config) Get(option string) (string, error) {
value := viper.GetString(option)
if value == "" {
return "", fmt.Errorf("no such option: %q", option)
@@ -134,7 +194,7 @@ func getOption(option string) (string, error) {
return value, nil
}
-func setOption(option, value string) error {
+func (c *Config) Set(option, value string) error {
switch option {
case targetFlag:
switch value {
@@ -162,29 +222,16 @@ func setOption(option, value string) error {
return fmt.Errorf("invalid option or value: %q: %q", option, value)
}
-func writeConfig() {
- configDir := configDir("")
- if configDir == "" {
- return
- }
-
- if !util.PathExists(configDir) {
- if err := os.MkdirAll(configDir, 0700); err != nil {
- fatalErr(err, "Could not create ", color.Cyan(configDir))
- return
- }
- }
-
- configFile := filepath.Join(configDir, configName+"."+configType)
- if !util.PathExists(configFile) {
- if _, err := os.Create(configFile); err != nil {
- fatalErr(err, "Could not create ", color.Cyan(configFile))
- return
- }
+func printOption(cfg *Config, option string) {
+ value, err := cfg.Get(option)
+ if err != nil {
+ value = color.Faint("<unset>").String()
+ } else {
+ value = color.Cyan(value).String()
}
+ log.Printf("%s = %s", option, value)
+}
- if err := viper.WriteConfig(); err != nil {
- fatalErr(err, "Could not write config")
- return
- }
+func bindFlagToConfig(option string, command *cobra.Command) {
+ flagToConfigBindings[option] = command
}
diff --git a/client/go/cmd/config_test.go b/client/go/cmd/config_test.go
index dee63bcb58f..07d165d58e0 100644
--- a/client/go/cmd/config_test.go
+++ b/client/go/cmd/config_test.go
@@ -7,24 +7,24 @@ import (
)
func TestConfig(t *testing.T) {
- configDir := t.TempDir()
- assert.Equal(t, "invalid option or value: \"foo\": \"bar\"\n", execute(command{configDir: configDir, args: []string{"config", "set", "foo", "bar"}}, t, nil))
- assert.Equal(t, "foo = <unset>\n", execute(command{configDir: configDir, args: []string{"config", "get", "foo"}}, t, nil))
- assert.Equal(t, "target = local\n", execute(command{configDir: configDir, args: []string{"config", "get", "target"}}, t, nil))
- assert.Equal(t, "", execute(command{configDir: configDir, args: []string{"config", "set", "target", "cloud"}}, t, nil))
- assert.Equal(t, "target = cloud\n", execute(command{configDir: configDir, args: []string{"config", "get", "target"}}, t, nil))
- assert.Equal(t, "", execute(command{configDir: configDir, args: []string{"config", "set", "target", "http://127.0.0.1:8080"}}, t, nil))
- assert.Equal(t, "", execute(command{configDir: configDir, args: []string{"config", "set", "target", "https://127.0.0.1"}}, t, nil))
- assert.Equal(t, "target = https://127.0.0.1\n", execute(command{configDir: configDir, args: []string{"config", "get", "target"}}, t, nil))
+ homeDir := t.TempDir()
+ assert.Equal(t, "invalid option or value: \"foo\": \"bar\"\n", execute(command{homeDir: homeDir, args: []string{"config", "set", "foo", "bar"}}, t, nil))
+ assert.Equal(t, "foo = <unset>\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "foo"}}, t, nil))
+ assert.Equal(t, "target = local\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "target"}}, t, nil))
+ assert.Equal(t, "", execute(command{homeDir: homeDir, args: []string{"config", "set", "target", "cloud"}}, t, nil))
+ assert.Equal(t, "target = cloud\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "target"}}, t, nil))
+ assert.Equal(t, "", execute(command{homeDir: homeDir, args: []string{"config", "set", "target", "http://127.0.0.1:8080"}}, t, nil))
+ assert.Equal(t, "", execute(command{homeDir: homeDir, args: []string{"config", "set", "target", "https://127.0.0.1"}}, t, nil))
+ assert.Equal(t, "target = https://127.0.0.1\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "target"}}, t, nil))
- assert.Equal(t, "invalid application: \"foo\"\n", execute(command{configDir: configDir, args: []string{"config", "set", "application", "foo"}}, t, nil))
- assert.Equal(t, "application = <unset>\n", execute(command{configDir: configDir, args: []string{"config", "get", "application"}}, t, nil))
- assert.Equal(t, "", execute(command{configDir: configDir, args: []string{"config", "set", "application", "t1.a1.i1"}}, t, nil))
- assert.Equal(t, "application = t1.a1.i1\n", execute(command{configDir: configDir, args: []string{"config", "get", "application"}}, t, nil))
+ assert.Equal(t, "invalid application: \"foo\"\n", execute(command{homeDir: homeDir, args: []string{"config", "set", "application", "foo"}}, t, nil))
+ assert.Equal(t, "application = <unset>\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "application"}}, t, nil))
+ assert.Equal(t, "", execute(command{homeDir: homeDir, args: []string{"config", "set", "application", "t1.a1.i1"}}, t, nil))
+ assert.Equal(t, "application = t1.a1.i1\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "application"}}, t, nil))
- assert.Equal(t, "target = https://127.0.0.1\napplication = t1.a1.i1\n", execute(command{configDir: configDir, args: []string{"config", "get"}}, t, nil))
+ assert.Equal(t, "target = https://127.0.0.1\napplication = t1.a1.i1\n", execute(command{homeDir: homeDir, args: []string{"config", "get"}}, t, nil))
- assert.Equal(t, "", execute(command{configDir: configDir, args: []string{"config", "set", "wait", "60"}}, t, nil))
- assert.Equal(t, "wait option must be an integer >= 0, got \"foo\"\n", execute(command{configDir: configDir, args: []string{"config", "set", "wait", "foo"}}, t, nil))
- assert.Equal(t, "wait = 60\n", execute(command{configDir: configDir, args: []string{"config", "get", "wait"}}, t, nil))
+ assert.Equal(t, "", execute(command{homeDir: homeDir, args: []string{"config", "set", "wait", "60"}}, t, nil))
+ assert.Equal(t, "wait option must be an integer >= 0, got \"foo\"\n", execute(command{homeDir: homeDir, args: []string{"config", "set", "wait", "foo"}}, t, nil))
+ assert.Equal(t, "wait = 60\n", execute(command{homeDir: homeDir, args: []string{"config", "get", "wait"}}, t, nil))
}
diff --git a/client/go/cmd/curl.go b/client/go/cmd/curl.go
new file mode 100644
index 00000000000..4d949b51e8f
--- /dev/null
+++ b/client/go/cmd/curl.go
@@ -0,0 +1,143 @@
+package cmd
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "strings"
+
+ "github.com/kballard/go-shellquote"
+ "github.com/spf13/cobra"
+ "github.com/vespa-engine/vespa/client/go/vespa"
+)
+
+var curlDryRun bool
+var curlPath string
+
+func init() {
+ rootCmd.AddCommand(curlCmd)
+ curlCmd.Flags().StringVarP(&curlPath, "path", "p", "", "The path to curl. If this is unset, curl from PATH is used")
+ curlCmd.Flags().BoolVarP(&curlDryRun, "dry-run", "n", false, "Print the curl command that would be executed")
+}
+
+var curlCmd = &cobra.Command{
+ Use: "curl [curl-options] path",
+ Short: "Query Vespa using curl",
+ Long: `Query Vespa using curl.
+
+Execute curl with the appropriate URL, certificate and private key for your application.`,
+ Example: `$ vespa curl /search/?yql=query
+$ vespa curl -- -v --data-urlencode "yql=select * from sources * where title contains 'foo';" /search/
+$ vespa curl -t local -- -v /search/?yql=query
+`,
+ DisableAutoGenTag: true,
+ Args: cobra.MinimumNArgs(1),
+ Run: func(cmd *cobra.Command, args []string) {
+ cfg, err := LoadConfig()
+ if err != nil {
+ fatalErr(err, "Could not load config")
+ return
+ }
+ app, err := vespa.ApplicationFromString(getApplication())
+ if err != nil {
+ fatalErr(err)
+ return
+ }
+ privateKeyFile, err := cfg.PrivateKeyPath(app)
+ if err != nil {
+ fatalErr(err)
+ return
+ }
+ certificateFile, err := cfg.CertificatePath(app)
+ if err != nil {
+ fatalErr(err)
+ return
+ }
+ service := getService("query", 0)
+ c := &curl{privateKeyPath: privateKeyFile, certificatePath: certificateFile}
+ if curlDryRun {
+ cmd, err := c.command(service.BaseURL, args...)
+ if err != nil {
+ fatalErr(err, "Failed to create curl command")
+ return
+ }
+ log.Print(shellquote.Join(cmd.Args...))
+ } else {
+ if err := c.run(service.BaseURL, args...); err != nil {
+ fatalErr(err, "Failed to run curl")
+ return
+ }
+ }
+ },
+}
+
+type curl struct {
+ path string
+ certificatePath string
+ privateKeyPath string
+}
+
+func (c *curl) run(baseURL string, args ...string) error {
+ cmd, err := c.command(baseURL, args...)
+ if err != nil {
+ return err
+ }
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Start(); err != nil {
+ return err
+ }
+ return cmd.Wait()
+}
+
+func (c *curl) command(baseURL string, args ...string) (*exec.Cmd, error) {
+ if len(args) == 0 {
+ return nil, fmt.Errorf("need at least one argument")
+ }
+
+ if c.path == "" {
+ resolvedPath, err := resolveCurlPath()
+ if err != nil {
+ return nil, err
+ }
+ c.path = resolvedPath
+ }
+
+ path := args[len(args)-1]
+ args = args[:len(args)-1]
+ if !hasOption("--key", args) && c.privateKeyPath != "" {
+ args = append(args, "--key", c.privateKeyPath)
+ }
+ if !hasOption("--cert", args) && c.certificatePath != "" {
+ args = append(args, "--cert", c.certificatePath)
+ }
+
+ baseURL = strings.TrimSuffix(baseURL, "/")
+ path = strings.TrimPrefix(path, "/")
+ args = append(args, baseURL+"/"+path)
+
+ return exec.Command(c.path, args...), nil
+}
+
+func hasOption(option string, args []string) bool {
+ for _, arg := range args {
+ if arg == option {
+ return true
+ }
+ }
+ return false
+}
+
+func resolveCurlPath() (string, error) {
+ var curlPath string
+ var err error
+ curlPath, err = exec.LookPath("curl")
+ if err != nil {
+ curlPath, err = exec.LookPath("curl.exe")
+ if err != nil {
+ return "", err
+ }
+ }
+ return curlPath, nil
+}
diff --git a/client/go/cmd/curl_test.go b/client/go/cmd/curl_test.go
new file mode 100644
index 00000000000..c3163e731ce
--- /dev/null
+++ b/client/go/cmd/curl_test.go
@@ -0,0 +1,53 @@
+package cmd
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCurl(t *testing.T) {
+ homeDir := t.TempDir()
+ httpClient := &mockHttpClient{}
+ convergeServices(httpClient)
+ out := execute(command{homeDir: homeDir, args: []string{"curl", "-n", "-p", "/usr/bin/curl", "-a", "t1.a1.i1", "--", "-v", "--data-urlencode", "arg=with space", "/search"}}, t, httpClient)
+
+ expected := fmt.Sprintf("/usr/bin/curl -v --data-urlencode 'arg=with space' --key %s --cert %s https://127.0.0.1:8080/search\n",
+ filepath.Join(homeDir, ".vespa", "t1.a1.i1", "data-plane-private-key.pem"),
+ filepath.Join(homeDir, ".vespa", "t1.a1.i1", "data-plane-public-cert.pem"))
+ assert.Equal(t, expected, out)
+}
+
+func TestCurlCommand(t *testing.T) {
+ c := &curl{path: "/usr/bin/curl", privateKeyPath: "/tmp/priv-key", certificatePath: "/tmp/cert-key"}
+ assertCurl(t, c, "/usr/bin/curl -v --key /tmp/priv-key --cert /tmp/cert-key https://example.com/", "-v", "/")
+
+ c = &curl{path: "/usr/bin/curl", privateKeyPath: "/tmp/priv-key", certificatePath: "/tmp/cert-key"}
+ assertCurl(t, c, "/usr/bin/curl -v --cert my-cert --key my-key https://example.com/", "-v", "--cert", "my-cert", "--key", "my-key", "/")
+
+ c = &curl{path: "/usr/bin/curl2"}
+ assertCurl(t, c, "/usr/bin/curl2 -v https://example.com/foo", "-v", "/foo")
+
+ c = &curl{path: "/usr/bin/curl"}
+ assertCurl(t, c, "/usr/bin/curl -v https://example.com/foo/bar", "-v", "/foo/bar")
+
+ c = &curl{path: "/usr/bin/curl"}
+ assertCurl(t, c, "/usr/bin/curl -v https://example.com/foo/bar", "-v", "foo/bar")
+
+ c = &curl{path: "/usr/bin/curl"}
+ assertCurlURL(t, c, "/usr/bin/curl -v https://example.com/foo/bar", "https://example.com/", "-v", "foo/bar")
+}
+
+func assertCurl(t *testing.T, c *curl, expectedOutput string, args ...string) {
+ assertCurlURL(t, c, expectedOutput, "https://example.com", args...)
+}
+
+func assertCurlURL(t *testing.T, c *curl, expectedOutput string, url string, args ...string) {
+ cmd, err := c.command("https://example.com", args...)
+ assert.Nil(t, err)
+
+ assert.Equal(t, expectedOutput, strings.Join(cmd.Args, " "))
+}
diff --git a/client/go/cmd/deploy.go b/client/go/cmd/deploy.go
index 19fa08ebaa4..866759b18c5 100644
--- a/client/go/cmd/deploy.go
+++ b/client/go/cmd/deploy.go
@@ -6,12 +6,7 @@ package cmd
import (
"fmt"
- "io/ioutil"
"log"
- "os"
- "path/filepath"
- "strconv"
- "strings"
"github.com/spf13/cobra"
"github.com/vespa-engine/vespa/client/go/vespa"
@@ -33,7 +28,7 @@ func init() {
}
var deployCmd = &cobra.Command{
- Use: "deploy [<application-directory>]",
+ Use: "deploy [application-directory]",
Short: "Deploy (prepare and activate) an application package",
Long: `Deploy (prepare and activate) an application package.
@@ -51,6 +46,11 @@ If application directory is not specified, it defaults to working directory.`,
fatalErr(nil, err.Error())
return
}
+ cfg, err := LoadConfig()
+ if err != nil {
+ fatalErr(err, "Could not load config")
+ return
+ }
target := getTarget()
opts := vespa.DeploymentOpts{ApplicationPackage: pkg, Target: target}
if opts.IsCloud() {
@@ -58,7 +58,11 @@ If application directory is not specified, it defaults to working directory.`,
if !opts.ApplicationPackage.HasCertificate() {
fatalErrHint(fmt.Errorf("Missing certificate in application package"), "Applications in Vespa Cloud require a certificate", "Try 'vespa cert'")
}
- opts.APIKey = readAPIKey(deployment.Application.Tenant)
+ opts.APIKey, err = cfg.ReadAPIKey(deployment.Application.Tenant)
+ if err != nil {
+ fatalErrHint(err, "Deployment to cloud requires an API key. Try 'vespa api-key'")
+ return
+ }
opts.Deployment = deployment
}
if sessionOrRunID, err := vespa.Deploy(opts); err == nil {
@@ -83,7 +87,7 @@ If application directory is not specified, it defaults to working directory.`,
}
var prepareCmd = &cobra.Command{
- Use: "prepare <application-directory>",
+ Use: "prepare application-directory",
Short: "Prepare an application package for activation",
Args: cobra.MaximumNArgs(1),
DisableAutoGenTag: true,
@@ -93,8 +97,9 @@ var prepareCmd = &cobra.Command{
fatalErr(err, "Could not find application package")
return
}
- configDir := configDir("default")
- if configDir == "" {
+ cfg, err := LoadConfig()
+ if err != nil {
+ fatalErr(err, "Could not load config")
return
}
target := getTarget()
@@ -103,7 +108,10 @@ var prepareCmd = &cobra.Command{
Target: target,
})
if err == nil {
- writeSessionID(configDir, sessionID)
+ if err := cfg.WriteSessionID(vespa.DefaultApplication, sessionID); err != nil {
+ fatalErr(err, "Could not write session ID")
+ return
+ }
printSuccess("Prepared ", color.Cyan(pkg.Path), " with session ", sessionID)
} else {
fatalErr(nil, err.Error())
@@ -122,8 +130,16 @@ var activateCmd = &cobra.Command{
fatalErr(err, "Could not find application package")
return
}
- configDir := configDir("default")
- sessionID := readSessionID(configDir)
+ cfg, err := LoadConfig()
+ if err != nil {
+ fatalErr(err, "Could not load config")
+ return
+ }
+ sessionID, err := cfg.ReadSessionID(vespa.DefaultApplication)
+ if err != nil {
+ fatalErr(err, "Could not read session ID")
+ return
+ }
target := getTarget()
err = vespa.Activate(sessionID, vespa.DeploymentOpts{
ApplicationPackage: pkg,
@@ -144,26 +160,3 @@ func waitForQueryService(sessionOrRunID int64) {
waitForService("query", sessionOrRunID)
}
}
-
-func writeSessionID(appConfigDir string, sessionID int64) {
- if err := os.MkdirAll(appConfigDir, 0755); err != nil {
- fatalErr(err, "Could not create directory for session ID")
- }
- if err := ioutil.WriteFile(sessionIDFile(appConfigDir), []byte(fmt.Sprintf("%d\n", sessionID)), 0600); err != nil {
- fatalErr(err, "Could not write session ID")
- }
-}
-
-func readSessionID(appConfigDir string) int64 {
- b, err := ioutil.ReadFile(sessionIDFile(appConfigDir))
- if err != nil {
- fatalErr(err, "Could not read session ID")
- }
- id, err := strconv.ParseInt(strings.TrimSpace(string(b)), 10, 64)
- if err != nil {
- fatalErr(err, "Invalid session ID")
- }
- return id
-}
-
-func sessionIDFile(appConfigDir string) string { return filepath.Join(appConfigDir, "session_id") }
diff --git a/client/go/cmd/deploy_test.go b/client/go/cmd/deploy_test.go
index ff85cd3d835..f24ba0829f9 100644
--- a/client/go/cmd/deploy_test.go
+++ b/client/go/cmd/deploy_test.go
@@ -10,6 +10,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/vespa-engine/vespa/client/go/vespa"
)
func TestPrepareZip(t *testing.T) {
@@ -124,12 +125,14 @@ func assertPrepare(applicationPackage string, arguments []string, t *testing.T)
func assertActivate(applicationPackage string, arguments []string, t *testing.T) {
client := &mockHttpClient{}
- configDir := t.TempDir()
- appConfigDir := filepath.Join(configDir, ".vespa", "default")
- writeSessionID(appConfigDir, 42)
+ homeDir := t.TempDir()
+ cfg := Config{Home: filepath.Join(homeDir, ".vespa"), createDirs: true}
+ if err := cfg.WriteSessionID(vespa.DefaultApplication, 42); err != nil {
+ t.Fatal(err)
+ }
assert.Equal(t,
"Success: Activated "+applicationPackage+" with session 42\n",
- execute(command{args: arguments, configDir: configDir}, t, client))
+ execute(command{args: arguments, homeDir: homeDir}, t, client))
url := "http://127.0.0.1:19071/application/v2/tenant/default/session/42/active"
assert.Equal(t, url, client.lastRequest.URL.String())
assert.Equal(t, "PUT", client.lastRequest.Method)
diff --git a/client/go/cmd/document.go b/client/go/cmd/document.go
index 450f061f140..d2552729aeb 100644
--- a/client/go/cmd/document.go
+++ b/client/go/cmd/document.go
@@ -22,7 +22,7 @@ func init() {
}
var documentCmd = &cobra.Command{
- Use: "document <json-file>",
+ Use: "document json-file",
Short: "Issue a document operation to Vespa",
Long: `Issue a document operation to Vespa.
@@ -43,7 +43,7 @@ should be used instead of this.`,
}
var documentPutCmd = &cobra.Command{
- Use: "put [<id>] <json-file>",
+ Use: "put [id] json-file",
Short: "Writes a document to Vespa",
Long: `Writes the document in the given file to Vespa.
If the document already exists, all its values will be replaced by this document.
@@ -62,7 +62,7 @@ $ vespa document put id:mynamespace:music::a-head-full-of-dreams src/test/resour
}
var documentUpdateCmd = &cobra.Command{
- Use: "update [<id>] <json-file>",
+ Use: "update [id] json-file",
Short: "Modifies some fields of an existing document",
Long: `Updates the values of the fields given in a json file as specified in the file.
If the document id is specified both as an argument and in the file the argument takes precedence.`,
@@ -80,7 +80,7 @@ $ vespa document update id:mynamespace:music::a-head-full-of-dreams src/test/res
}
var documentRemoveCmd = &cobra.Command{
- Use: "remove <id or json.file>",
+ Use: "remove id | json-file",
Short: "Removes a document from Vespa",
Long: `Removes the document specified either as a document id or given in the json file.
If the document id is specified both as an argument and in the file the argument takes precedence.`,
@@ -98,7 +98,7 @@ $ vespa document remove id:mynamespace:music::a-head-full-of-dreams`,
}
var documentGetCmd = &cobra.Command{
- Use: "get <id>",
+ Use: "get id",
Short: "Gets a document",
Args: cobra.ExactArgs(1),
DisableAutoGenTag: true,
diff --git a/client/go/cmd/helpers.go b/client/go/cmd/helpers.go
index b672419cae6..14699abf40e 100644
--- a/client/go/cmd/helpers.go
+++ b/client/go/cmd/helpers.go
@@ -10,7 +10,6 @@ import (
"io/ioutil"
"log"
"os"
- "path/filepath"
"strings"
"time"
@@ -51,16 +50,6 @@ func printSuccess(msg ...interface{}) {
log.Print(color.Green("Success: "), fmt.Sprint(msg...))
}
-func readAPIKey(tenant string) []byte {
- configDir := configDir("")
- apiKeyPath := filepath.Join(configDir, tenant+".api-key.pem")
- key, err := ioutil.ReadFile(apiKeyPath)
- if err != nil {
- fatalErrHint(err, "Deployment to cloud requires an API key. Try 'vespa api-key'")
- }
- return key
-}
-
func deploymentFromArgs() vespa.Deployment {
zone, err := vespa.ZoneFromString(zoneArg)
if err != nil {
@@ -81,7 +70,12 @@ func applicationSource(args []string) string {
}
func getApplication() string {
- app, err := getOption(applicationFlag)
+ cfg, err := LoadConfig()
+ if err != nil {
+ fatalErr(err, "Could not load config")
+ return ""
+ }
+ app, err := cfg.Get(applicationFlag)
if err != nil {
fatalErr(err, "A valid application must be specified")
}
@@ -89,7 +83,12 @@ func getApplication() string {
}
func getTargetType() string {
- target, err := getOption(targetFlag)
+ cfg, err := LoadConfig()
+ if err != nil {
+ fatalErr(err, "Could not load config")
+ return ""
+ }
+ target, err := cfg.Get(targetFlag)
if err != nil {
fatalErr(err, "A valid target must be specified")
}
@@ -122,10 +121,25 @@ func getTarget() vespa.Target {
return vespa.LocalTarget()
case "cloud":
deployment := deploymentFromArgs()
- apiKey := readAPIKey(deployment.Application.Tenant)
- configDir := configDir(deployment.Application.String())
- privateKeyFile := filepath.Join(configDir, "data-plane-private-key.pem")
- certificateFile := filepath.Join(configDir, "data-plane-public-cert.pem")
+ cfg, err := LoadConfig()
+ if err != nil {
+ fatalErr(err, "Could not load config")
+ return nil
+ }
+ apiKey, err := ioutil.ReadFile(cfg.APIKeyPath(deployment.Application.Tenant))
+ if err != nil {
+ fatalErrHint(err, "Deployment to cloud requires an API key. Try 'vespa api-key'")
+ }
+ privateKeyFile, err := cfg.PrivateKeyPath(deployment.Application)
+ if err != nil {
+ fatalErr(err)
+ return nil
+ }
+ certificateFile, err := cfg.CertificatePath(deployment.Application)
+ if err != nil {
+ fatalErr(err)
+ return nil
+ }
kp, err := tls.LoadX509KeyPair(certificateFile, privateKeyFile)
if err != nil {
fatalErr(err, "Could not read key pair")
diff --git a/client/go/cmd/man.go b/client/go/cmd/man.go
index 0bd80f3d985..ff7f6fb1b6a 100644
--- a/client/go/cmd/man.go
+++ b/client/go/cmd/man.go
@@ -10,7 +10,7 @@ func init() {
}
var manCmd = &cobra.Command{
- Use: "man <directory>",
+ Use: "man directory",
Short: "Generate man pages and write them to given directory",
Args: cobra.ExactArgs(1),
Hidden: true, // Not intended to be called by users
diff --git a/client/go/cmd/query.go b/client/go/cmd/query.go
index ea80c037721..f05914eb9a7 100644
--- a/client/go/cmd/query.go
+++ b/client/go/cmd/query.go
@@ -20,7 +20,7 @@ func init() {
}
var queryCmd = &cobra.Command{
- Use: "query <query-parameters>",
+ Use: "query query-parameters",
Short: "Issue a query to Vespa",
Example: `$ vespa query "yql=select * from sources * where title contains 'foo';" hits=5`,
Long: `Issue a query to Vespa.
diff --git a/client/go/cmd/query_test.go b/client/go/cmd/query_test.go
index d87c9a66f77..bd9ae91f24d 100644
--- a/client/go/cmd/query_test.go
+++ b/client/go/cmd/query_test.go
@@ -54,17 +54,6 @@ func assertQuery(t *testing.T, expectedQuery string, query ...string) {
assert.Equal(t, queryURL+"/search/"+expectedQuery, client.lastRequest.URL.String())
}
-func assertQueryNonJsonResult(t *testing.T, expectedQuery string, query ...string) {
- client := &mockHttpClient{}
- queryURL := queryServiceURL(client)
- client.NextResponse(200, "query result")
- assert.Equal(t,
- "query result\n",
- executeCommand(t, client, []string{"query"}, query),
- "query output")
- assert.Equal(t, queryURL+"/search/"+expectedQuery, client.lastRequest.URL.String())
-}
-
func assertQueryError(t *testing.T, status int, errorMessage string) {
client := &mockHttpClient{}
convergeServices(client)
diff --git a/client/go/cmd/root.go b/client/go/cmd/root.go
index fde7d6edb5a..d218d3639b1 100644
--- a/client/go/cmd/root.go
+++ b/client/go/cmd/root.go
@@ -18,7 +18,7 @@ var (
// TODO: add timeout flag
// TODO: add flag to show http request made
rootCmd = &cobra.Command{
- Use: "vespa <command>",
+ Use: "vespa command-name",
Short: "The command-line tool for Vespa.ai",
Long: `The command-line tool for Vespa.ai.
@@ -49,7 +49,6 @@ func configureLogger() {
func init() {
configureLogger()
- cobra.OnInitialize(readConfig)
rootCmd.PersistentFlags().StringVarP(&targetArg, targetFlag, "t", "local", "The name or URL of the recipient of this command")
rootCmd.PersistentFlags().StringVarP(&applicationArg, applicationFlag, "a", "", "The application to manage")
rootCmd.PersistentFlags().IntVarP(&waitSecsArg, waitFlag, "w", 0, "Number of seconds to wait for a service to become ready")
diff --git a/client/go/cmd/version.go b/client/go/cmd/version.go
index 4a5b6ec71b3..05820f4e34b 100644
--- a/client/go/cmd/version.go
+++ b/client/go/cmd/version.go
@@ -2,6 +2,7 @@ package cmd
import (
"log"
+ "runtime"
"github.com/spf13/cobra"
"github.com/vespa-engine/vespa/client/go/build"
@@ -17,6 +18,6 @@ var versionCmd = &cobra.Command{
DisableAutoGenTag: true,
Args: cobra.ExactArgs(0),
Run: func(cmd *cobra.Command, args []string) {
- log.Print("vespa version ", build.Version)
+ log.Printf("vespa version %s compiled with %v on %v/%v", build.Version, runtime.Version(), runtime.GOOS, runtime.GOARCH)
},
}
diff --git a/client/go/cmd/version_test.go b/client/go/cmd/version_test.go
index 02303a08e21..fc977c47938 100644
--- a/client/go/cmd/version_test.go
+++ b/client/go/cmd/version_test.go
@@ -7,5 +7,5 @@ import (
)
func TestVersion(t *testing.T) {
- assert.Equal(t, "vespa version 0.0.0-devel\n", execute(command{args: []string{"version"}}, t, nil))
+ assert.Contains(t, execute(command{args: []string{"version"}}, t, nil), "vespa version 0.0.0-devel compiled with")
}
diff --git a/client/go/go.mod b/client/go/go.mod
index 893add7218b..509eb273c6c 100644
--- a/client/go/go.mod
+++ b/client/go/go.mod
@@ -3,6 +3,7 @@ module github.com/vespa-engine/vespa/client/go
go 1.15
require (
+ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
github.com/logrusorgru/aurora v2.0.3+incompatible
github.com/mattn/go-colorable v0.0.9
github.com/mattn/go-isatty v0.0.3
diff --git a/client/go/go.sum b/client/go/go.sum
index 826f137d5e2..97328690ee5 100644
--- a/client/go/go.sum
+++ b/client/go/go.sum
@@ -170,6 +170,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
diff --git a/client/go/vespa/deploy.go b/client/go/vespa/deploy.go
index 081e9fc17d2..22ab5380c23 100644
--- a/client/go/vespa/deploy.go
+++ b/client/go/vespa/deploy.go
@@ -23,6 +23,8 @@ import (
"github.com/vespa-engine/vespa/client/go/util"
)
+var DefaultApplication = ApplicationID{Tenant: "default", Application: "application", Instance: "default"}
+
type ApplicationID struct {
Tenant string
Application string
diff --git a/client/go/vespa/target.go b/client/go/vespa/target.go
index 8f6bb636812..ada2b2151b2 100644
--- a/client/go/vespa/target.go
+++ b/client/go/vespa/target.go
@@ -30,7 +30,6 @@ const (
type Service struct {
BaseURL string
Name string
- description string
certificate tls.Certificate
}
diff --git a/client/src/main/java/ai/vespa/client/dsl/Annotation.java b/client/src/main/java/ai/vespa/client/dsl/Annotation.java
index 906f2abcca0..1949bc7d3f9 100644
--- a/client/src/main/java/ai/vespa/client/dsl/Annotation.java
+++ b/client/src/main/java/ai/vespa/client/dsl/Annotation.java
@@ -20,6 +20,10 @@ public class Annotation {
return this;
}
+ public boolean contains(String key) {
+ return annotations.containsKey(key);
+ }
+
@Override
public String toString() {
return annotations == null || annotations.isEmpty()
diff --git a/client/src/main/java/ai/vespa/client/dsl/GeoLocation.java b/client/src/main/java/ai/vespa/client/dsl/GeoLocation.java
new file mode 100644
index 00000000000..c0d8fabc42f
--- /dev/null
+++ b/client/src/main/java/ai/vespa/client/dsl/GeoLocation.java
@@ -0,0 +1,44 @@
+package ai.vespa.client.dsl;
+
+import org.apache.commons.text.StringEscapeUtils;
+
+public class GeoLocation extends QueryChain {
+
+ private String fieldName;
+ private Double longitude;
+ private Double latitude;
+ private String radius;
+
+ public GeoLocation(String fieldName, Double longitude, Double latitude, String radius) {
+ this.fieldName = fieldName;
+ this.longitude = longitude;
+ this.latitude = latitude;
+ this.radius = radius;
+ this.nonEmpty = true;
+ }
+
+ @Override
+ boolean hasPositiveSearchField(String fieldName) {
+ return this.fieldName.equals(fieldName);
+ }
+
+ @Override
+ boolean hasPositiveSearchField(String fieldName, Object value) {
+ return false;
+ }
+
+ @Override
+ boolean hasNegativeSearchField(String fieldName) {
+ return false;
+ }
+
+ @Override
+ boolean hasNegativeSearchField(String fieldName, Object value) {
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return Text.format("geoLocation(%s, %f, %f, \"%s\")", fieldName, longitude, latitude, StringEscapeUtils.escapeJava(radius));
+ }
+}
diff --git a/client/src/main/java/ai/vespa/client/dsl/NearestNeighbor.java b/client/src/main/java/ai/vespa/client/dsl/NearestNeighbor.java
new file mode 100644
index 00000000000..6c95d2b6fd7
--- /dev/null
+++ b/client/src/main/java/ai/vespa/client/dsl/NearestNeighbor.java
@@ -0,0 +1,52 @@
+package ai.vespa.client.dsl;
+
+import java.util.stream.Collectors;
+
+public class NearestNeighbor extends QueryChain {
+
+ private Annotation annotation;
+ private String docVectorName;
+ private String queryVectorName;
+
+
+ public NearestNeighbor(String docVectorName, String queryVectorName) {
+ this.docVectorName = docVectorName;
+ this.queryVectorName = queryVectorName;
+ this.nonEmpty = true;
+ }
+
+ NearestNeighbor annotate(Annotation annotation) {
+ this.annotation = annotation;
+ return this;
+ }
+
+ @Override
+ boolean hasPositiveSearchField(String fieldName) {
+ return this.docVectorName.equals(fieldName);
+ }
+
+ @Override
+ boolean hasPositiveSearchField(String fieldName, Object value) {
+ return this.docVectorName.equals(fieldName) && queryVectorName.equals(value);
+ }
+
+ @Override
+ boolean hasNegativeSearchField(String fieldName) {
+ return false;
+ }
+
+ @Override
+ boolean hasNegativeSearchField(String fieldName, Object value) {
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ boolean hasAnnotation = A.hasAnnotation(annotation);
+ if (!hasAnnotation || !annotation.contains("targetHits")) {
+ throw new IllegalArgumentException("must specify target hits in nearest neighbor query");
+ }
+ String s = Text.format("nearestNeighbor(%s, %s)", docVectorName, queryVectorName);
+ return Text.format("([%s]%s)", annotation, s);
+ }
+}
diff --git a/client/src/main/java/ai/vespa/client/dsl/Q.java b/client/src/main/java/ai/vespa/client/dsl/Q.java
index 7637f76f095..f15ffed1ea9 100644
--- a/client/src/main/java/ai/vespa/client/dsl/Q.java
+++ b/client/src/main/java/ai/vespa/client/dsl/Q.java
@@ -169,4 +169,30 @@ public final class Q {
public static WeakAnd weakand(String field, Query query) {
return new WeakAnd(field, query);
}
+
+ /**
+ * GeoLocation geo locatoin
+ * https://docs.vespa.ai/en/reference/query-language-reference.html#geoLocation
+ *
+ * @param field the field
+ * @param longitude longitude
+ * @param latitude latitude
+ * @param radius a string specifying the radius and it's unit
+ * @return the geo-location query
+ */
+ public static GeoLocation geoLocation(String field, Double longitude, Double latitude, String radius) {
+ return new GeoLocation(field, longitude, latitude, radius);
+ }
+
+ /**
+ * NearestNeighbor nearest neighbor
+ * https://docs.vespa.ai/en/reference/query-language-reference.html#nearestneighbor
+ *
+ * @param docVectorName the vector name defined in the vespa schema
+ * @param queryVectorName the vector name in this query
+ * @return the nearest neighbor query
+ */
+ public static NearestNeighbor nearestNeighbor(String docVectorName, String queryVectorName) {
+ return new NearestNeighbor(docVectorName, queryVectorName);
+ }
}
diff --git a/client/src/test/groovy/ai/vespa/client/dsl/QTest.groovy b/client/src/test/groovy/ai/vespa/client/dsl/QTest.groovy
index d1560937fef..1bada4e8f59 100644
--- a/client/src/test/groovy/ai/vespa/client/dsl/QTest.groovy
+++ b/client/src/test/groovy/ai/vespa/client/dsl/QTest.groovy
@@ -244,6 +244,46 @@ class QTest extends Specification {
q == """yql=select * from sd1 where weakAnd(f1, f1 contains "v1", f2 contains "v2") and ([{"scoreThreshold":0.13}]weakAnd(f3, f1 contains "v1", f2 contains "v2"));"""
}
+ def "geo location"() {
+ given:
+ def q = Q.select("*")
+ .from("sd1")
+ .where("a").contains("b").and(Q.geoLocation("taiwan", 25.105497, 121.597366, "200km"))
+ .semicolon()
+ .build()
+
+ expect:
+ q == """yql=select * from sd1 where a contains "b" and geoLocation(taiwan, 25.105497, 121.597366, "200km");"""
+ }
+
+ def "nearest neighbor query"() {
+ when:
+ def q = Q.select("*")
+ .from("sd1")
+ .where("a").contains("b")
+ .and(Q.nearestNeighbor("vec1", "vec2")
+ .annotate(A.a("targetHits", 10, "approximate", false))
+ )
+ .semicolon()
+ .build()
+
+ then:
+ q == """yql=select * from sd1 where a contains "b" and ([{"approximate":false,"targetHits":10}]nearestNeighbor(vec1, vec2));"""
+ }
+
+ def "invalid nearest neighbor should throws an exception (targetHits annotation is required)"() {
+ when:
+ def q = Q.select("*")
+ .from("sd1")
+ .where("a").contains("b").and(Q.nearestNeighbor("vec1", "vec2"))
+ .semicolon()
+ .build()
+
+ then:
+ thrown(IllegalArgumentException)
+ }
+
+
def "rank with only query"() {
given:
def q = Q.select("*")
diff --git a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
index 249ca71117a..13769be9ec1 100644
--- a/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
+++ b/config-model/src/main/java/com/yahoo/config/model/deploy/DeployState.java
@@ -145,7 +145,7 @@ public class DeployState implements ConfigDefinitionStore {
this.zone = zone;
this.queryProfiles = queryProfiles; // TODO: Remove this by seeing how pagetemplates are propagated
this.semanticRules = semanticRules; // TODO: Remove this by seeing how pagetemplates are propagated
- this.importedModels = importMlModels(applicationPackage, modelImporters, deployLogger);
+ this.importedModels = importMlModels(applicationPackage, modelImporters, deployLogger, executor);
this.validationOverrides = applicationPackage.getValidationOverrides().map(ValidationOverrides::fromXml)
.orElse(ValidationOverrides.empty);
@@ -211,9 +211,10 @@ public class DeployState implements ConfigDefinitionStore {
private static ImportedMlModels importMlModels(ApplicationPackage applicationPackage,
Collection<MlModelImporter> modelImporters,
- DeployLogger deployLogger) {
+ DeployLogger deployLogger,
+ ExecutorService executor) {
File importFrom = applicationPackage.getFileReference(ApplicationPackage.MODELS_DIR);
- ImportedMlModels importedModels = new ImportedMlModels(importFrom, modelImporters);
+ ImportedMlModels importedModels = new ImportedMlModels(importFrom, executor, modelImporters);
for (var entry : importedModels.getSkippedModels().entrySet()) {
deployLogger.logApplicationPackage(Level.WARNING, "Skipping import of model " + entry.getKey() + " as an exception " +
"occurred during import. Error: " + entry.getValue());
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java
index 3f1cf130aff..c6c2fea5900 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java
@@ -2,7 +2,6 @@
package com.yahoo.vespa.model;
import ai.vespa.rankingexpression.importer.configmodelview.ImportedMlModel;
-import ai.vespa.rankingexpression.importer.configmodelview.ImportedMlModels;
import com.yahoo.collections.Pair;
import com.yahoo.component.Version;
import com.yahoo.config.ConfigInstance;
@@ -78,6 +77,8 @@ import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.stream.Collectors;
@@ -184,8 +185,7 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
VespaModelBuilder builder = new VespaDomBuilder();
root = builder.getRoot(VespaModel.ROOT_CONFIGID, deployState, this);
- createGlobalRankProfiles(deployState.getDeployLogger(), deployState.getImportedModels(),
- deployState.rankProfileRegistry(), deployState.getQueryProfiles());
+ createGlobalRankProfiles(deployState);
rankProfileList = new RankProfileList(null, // null search -> global
rankingConstants,
largeRankExpressions,
@@ -291,18 +291,24 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
* Creates a rank profile not attached to any search definition, for each imported model in the application package,
* and adds it to the given rank profile registry.
*/
- private void createGlobalRankProfiles(DeployLogger deployLogger, ImportedMlModels importedModels,
- RankProfileRegistry rankProfileRegistry,
- QueryProfiles queryProfiles) {
- if ( ! importedModels.all().isEmpty()) { // models/ directory is available
- for (ImportedMlModel model : importedModels.all()) {
+ private void createGlobalRankProfiles(DeployState deployState) {
+ var importedModels = deployState.getImportedModels().all();
+ DeployLogger deployLogger = deployState.getDeployLogger();
+ RankProfileRegistry rankProfileRegistry = deployState.rankProfileRegistry();
+ QueryProfiles queryProfiles = deployState.getQueryProfiles();
+ List <Future<ConvertedModel>> futureModels = new ArrayList<>();
+ if ( ! importedModels.isEmpty()) { // models/ directory is available
+ for (ImportedMlModel model : importedModels) {
// Due to automatic naming not guaranteeing unique names, there must be a 1-1 between OnnxModels and global RankProfiles.
OnnxModels onnxModels = onnxModelInfoFromSource(model);
RankProfile profile = new RankProfile(model.name(), this, deployLogger, rankProfileRegistry, onnxModels);
rankProfileRegistry.add(profile);
- ConvertedModel convertedModel = ConvertedModel.fromSource(new ModelName(model.name()),
- model.name(), profile, queryProfiles.getRegistry(), model);
- convertedModel.expressions().values().forEach(f -> profile.addFunction(f, false));
+ futureModels.add(deployState.getExecutor().submit(() -> {
+ ConvertedModel convertedModel = ConvertedModel.fromSource(new ModelName(model.name()),
+ model.name(), profile, queryProfiles.getRegistry(), model);
+ convertedModel.expressions().values().forEach(f -> profile.addFunction(f, false));
+ return convertedModel;
+ }));
}
}
else { // generated and stored model information may be available instead
@@ -314,8 +320,18 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
OnnxModels onnxModels = onnxModelInfoFromStore(modelName);
RankProfile profile = new RankProfile(modelName, this, deployLogger, rankProfileRegistry, onnxModels);
rankProfileRegistry.add(profile);
- ConvertedModel convertedModel = ConvertedModel.fromStore(new ModelName(modelName), modelName, profile);
- convertedModel.expressions().values().forEach(f -> profile.addFunction(f, false));
+ futureModels.add(deployState.getExecutor().submit(() -> {
+ ConvertedModel convertedModel = ConvertedModel.fromStore(new ModelName(modelName), modelName, profile);
+ convertedModel.expressions().values().forEach(f -> profile.addFunction(f, false));
+ return convertedModel;
+ }));
+ }
+ }
+ for (var futureConvertedModel : futureModels) {
+ try {
+ futureConvertedModel.get();
+ } catch (ExecutionException |InterruptedException e) {
+ throw new RuntimeException(e);
}
}
new Processing().processRankProfiles(deployLogger, rankProfileRegistry, queryProfiles, true, false);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java b/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java
index cd596038137..b915453b593 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/Container.java
@@ -310,11 +310,12 @@ public abstract class Container extends AbstractService implements
@Override
public void getConfig(QrConfig.Builder builder) {
builder.rpc(new Rpc.Builder()
- .enabled(rpcServerEnabled())
- .port(getRpcPort())
- .slobrokId(serviceSlobrokId()))
+ .enabled(rpcServerEnabled())
+ .port(getRpcPort())
+ .slobrokId(serviceSlobrokId()))
.filedistributor(filedistributorConfig())
- .discriminator((clusterName != null ? clusterName + "." : "" ) + name);
+ .discriminator((clusterName != null ? clusterName + "." : "" ) + name)
+ .nodeIndex(index);
}
/** Returns the jvm args set explicitly for this node */
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankProfileSearchFixture.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankProfileSearchFixture.java
index 010b33597f3..9c363ea0628 100644
--- a/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankProfileSearchFixture.java
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/RankProfileSearchFixture.java
@@ -24,6 +24,8 @@ import ai.vespa.rankingexpression.importer.xgboost.XGBoostImporter;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
import static org.junit.Assert.assertEquals;
@@ -43,6 +45,7 @@ class RankProfileSearchFixture {
private final QueryProfileRegistry queryProfileRegistry;
private final Search search;
private final Map<String, RankProfile> compiledRankProfiles = new HashMap<>();
+ private final ExecutorService executor = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());
public RankProfileRegistry getRankProfileRegistry() {
return rankProfileRegistry;
@@ -105,7 +108,7 @@ class RankProfileSearchFixture {
public RankProfile compileRankProfile(String rankProfile, Path applicationDir) {
RankProfile compiled = rankProfileRegistry.get(search, rankProfile)
.compile(queryProfileRegistry,
- new ImportedMlModels(applicationDir.toFile(), importers));
+ new ImportedMlModels(applicationDir.toFile(), executor, importers));
compiledRankProfiles.put(rankProfile, compiled);
return compiled;
}
diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSubscription.java b/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSubscription.java
index b1939ac23c6..6a81c2279d1 100644
--- a/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSubscription.java
+++ b/config/src/main/java/com/yahoo/config/subscription/impl/ConfigSubscription.java
@@ -19,6 +19,8 @@ import java.io.File;
import java.util.concurrent.atomic.AtomicReference;
import java.util.logging.Logger;
+import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5;
+
/**
* Represents one active subscription to one config
*
@@ -103,6 +105,7 @@ public abstract class ConfigSubscription<T extends ConfigInstance> {
this.configClass = key.getConfigClass();
this.subscriber = subscriber;
this.config.set(new ConfigState<>());
+ getConfigState().getChecksums().removeChecksumsOfType(MD5); // TODO: Temporary until we don't use md5 anymore
}
/**
diff --git a/config/src/main/java/com/yahoo/vespa/config/GetConfigRequest.java b/config/src/main/java/com/yahoo/vespa/config/GetConfigRequest.java
index 4e90ce532e4..35b503416ce 100644
--- a/config/src/main/java/com/yahoo/vespa/config/GetConfigRequest.java
+++ b/config/src/main/java/com/yahoo/vespa/config/GetConfigRequest.java
@@ -44,4 +44,11 @@ public interface GetConfigRequest {
*/
String getRequestDefMd5();
+ /**
+ * Returns the payload checksums from the config request.
+ *
+ * @return the payload checksums from request.
+ */
+ PayloadChecksums configPayloadChecksums();
+
}
diff --git a/config/src/main/java/com/yahoo/vespa/config/PayloadChecksum.java b/config/src/main/java/com/yahoo/vespa/config/PayloadChecksum.java
index bb3ac3f76f1..177fb57116c 100644
--- a/config/src/main/java/com/yahoo/vespa/config/PayloadChecksum.java
+++ b/config/src/main/java/com/yahoo/vespa/config/PayloadChecksum.java
@@ -1,10 +1,17 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config;
+import com.yahoo.text.AbstractUtf8Array;
+import com.yahoo.vespa.config.protocol.Payload;
+import com.yahoo.vespa.config.util.ConfigUtils;
+
import java.util.Objects;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
+import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5;
+import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64;
+
/**
* Checksums of config definition payload or config payload,
* md5 and xxhash64 are the supported types at the moment.
@@ -27,6 +34,30 @@ public class PayloadChecksum {
return new PayloadChecksum("", type);
}
+ public static PayloadChecksum fromPayload(Payload payload, Type type) {
+ switch (type) {
+ case MD5: return fromMd5Data(payload.getData());
+ case XXHASH64: return fromXxhash64Data(payload.getData());
+ default: throw new IllegalArgumentException("Unknown type " + type);
+ }
+ }
+
+ private static PayloadChecksum fromMd5Data(AbstractUtf8Array data) {
+ return new PayloadChecksum(ConfigUtils.getMd5(data), MD5);
+ }
+
+ private static PayloadChecksum fromXxhash64Data(AbstractUtf8Array data) {
+ return new PayloadChecksum(ConfigUtils.getXxhash64(data), XXHASH64);
+ }
+
+ public boolean isEmpty() {
+ switch (type) {
+ case MD5: return this.equals(empty(MD5));
+ case XXHASH64: return this.equals(empty(XXHASH64));
+ default: throw new IllegalArgumentException("Unknown type " + type);
+ }
+ }
+
public String asString() { return checksum; }
public Type type() { return type; }
diff --git a/config/src/main/java/com/yahoo/vespa/config/PayloadChecksums.java b/config/src/main/java/com/yahoo/vespa/config/PayloadChecksums.java
index 1558771bd58..d30e5b055bc 100644
--- a/config/src/main/java/com/yahoo/vespa/config/PayloadChecksums.java
+++ b/config/src/main/java/com/yahoo/vespa/config/PayloadChecksums.java
@@ -52,6 +52,8 @@ public class PayloadChecksums {
return this;
}
+ public void removeChecksumsOfType(PayloadChecksum.Type type) { checksums.remove(type); }
+
public PayloadChecksum getForType(PayloadChecksum.Type type) {
return checksums.get(type);
}
@@ -60,6 +62,8 @@ public class PayloadChecksums {
return checksums.values().stream().allMatch(PayloadChecksum::valid);
}
+ public boolean isEmpty() { return this.equals(empty()); }
+
@Override
public String toString() {
return checksums.values().stream()
diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTClientConfigRequestV3.java b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTClientConfigRequestV3.java
index a6271b159ef..bd69c77921d 100644
--- a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTClientConfigRequestV3.java
+++ b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTClientConfigRequestV3.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.config.protocol;
import com.yahoo.config.ConfigInstance;
import com.yahoo.config.subscription.impl.ConfigSubscription;
import com.yahoo.config.subscription.impl.JRTConfigSubscription;
-import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.jrt.Request;
import com.yahoo.jrt.StringValue;
import com.yahoo.slime.JsonFormat;
@@ -13,14 +12,20 @@ import com.yahoo.text.Utf8;
import com.yahoo.text.Utf8Array;
import com.yahoo.vespa.config.ConfigKey;
import com.yahoo.vespa.config.JRTMethods;
+import com.yahoo.vespa.config.PayloadChecksum;
+import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.vespa.config.RawConfig;
import com.yahoo.vespa.config.util.ConfigUtils;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Optional;
+import java.util.logging.Level;
import java.util.logging.Logger;
+import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5;
+import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64;
+
/**
* Represents version 3 config request for config clients. Provides methods for inspecting request and response
* values.
@@ -252,8 +257,20 @@ public class JRTClientConfigRequestV3 implements JRTClientConfigRequest {
@Override
public boolean hasUpdatedConfig() {
- String respMd5 = getNewConfigMd5();
- return !respMd5.equals("") && !getRequestConfigMd5().equals(respMd5);
+ PayloadChecksums requestConfigChecksums = getRequestConfigChecksums();
+ log.log(Level.FINE, () -> "request checksums for " + getConfigKey() + ":" + requestConfigChecksums);
+
+ PayloadChecksums newChecksums = getNewChecksums();
+ log.log(Level.FINE, () -> "new checksums for " + getConfigKey() + ": " + newChecksums);
+ if (newChecksums.isEmpty()) return false;
+
+ PayloadChecksum respMd5 = newChecksums.getForType(MD5);
+ boolean updated = respMd5 != null && ! requestConfigChecksums.getForType(MD5).equals(respMd5);
+
+ if (updated) return true;
+
+ PayloadChecksum respXxhash64 = newChecksums.getForType(XXHASH64);
+ return respXxhash64 != null && ! requestConfigChecksums.getForType(XXHASH64).equals(respXxhash64);
}
@Override
diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequest.java b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequest.java
index 41106e138b7..938da855014 100644
--- a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequest.java
+++ b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequest.java
@@ -85,4 +85,12 @@ public interface JRTServerConfigRequest extends JRTConfigRequest, GetConfigReque
*/
Payload payloadFromResponse(ConfigResponse response);
+
+ /**
+ * Returns the payload checksums from the config request.
+ *
+ * @return the payload checksumss from request.
+ */
+ PayloadChecksums configPayloadChecksums();
+
}
diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java
index fbb52e81679..13d0ca1119a 100644
--- a/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java
+++ b/config/src/main/java/com/yahoo/vespa/config/protocol/JRTServerConfigRequestV3.java
@@ -273,7 +273,9 @@ public class JRTServerConfigRequestV3 implements JRTServerConfigRequest {
}
@Override
- public Optional<VespaVersion> getVespaVersion() {
- return requestData.getVespaVersion();
- }
+ public Optional<VespaVersion> getVespaVersion() { return requestData.getVespaVersion(); }
+
+ @Override
+ public PayloadChecksums configPayloadChecksums() { return requestData.getRequestConfigChecksums(); }
+
}
diff --git a/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeRequestData.java b/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeRequestData.java
index 679298bac73..2b69ae4ebeb 100644
--- a/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeRequestData.java
+++ b/config/src/main/java/com/yahoo/vespa/config/protocol/SlimeRequestData.java
@@ -1,14 +1,14 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.protocol;
-import com.yahoo.vespa.config.PayloadChecksum;
-import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.jrt.Request;
import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
import com.yahoo.slime.SlimeUtils;
import com.yahoo.vespa.config.ConfigKey;
+import com.yahoo.vespa.config.PayloadChecksum;
+import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.vespa.config.util.ConfigUtils;
import java.util.Optional;
@@ -119,8 +119,10 @@ class SlimeRequestData {
request.setString(REQUEST_CLIENT_CONFIGID, key.getConfigId());
request.setString(REQUEST_CLIENT_HOSTNAME, hostname);
defSchema.serialize(request.setArray(REQUEST_DEF_CONTENT));
- request.setString(REQUEST_CONFIG_MD5, payloadChecksums.getForType(MD5).asString());
- request.setString(REQUEST_CONFIG_XXHASH64, payloadChecksums.getForType(XXHASH64).asString());
+ if (payloadChecksums.getForType(XXHASH64) != null)
+ request.setString(REQUEST_CONFIG_XXHASH64, payloadChecksums.getForType(XXHASH64).asString());
+ if (payloadChecksums.getForType(MD5) != null)
+ request.setString(REQUEST_CONFIG_MD5, payloadChecksums.getForType(MD5).asString());
request.setLong(REQUEST_CURRENT_GENERATION, generation);
request.setLong(REQUEST_TIMEOUT, timeout);
request.setString(REQUEST_COMPRESSION_TYPE, compressionType.name());
diff --git a/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestV3Test.java b/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestV3Test.java
index 0a4fc3da6cb..b1ed3a089ae 100644
--- a/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestV3Test.java
+++ b/config/src/test/java/com/yahoo/vespa/config/protocol/JRTConfigRequestV3Test.java
@@ -8,7 +8,6 @@ import com.yahoo.config.subscription.impl.GenericConfigSubscriber;
import com.yahoo.config.subscription.impl.JRTConfigRequester;
import com.yahoo.config.subscription.impl.JRTConfigSubscription;
import com.yahoo.config.subscription.impl.MockConnection;
-import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.foo.SimpletypesConfig;
import com.yahoo.jrt.Request;
import com.yahoo.slime.Inspector;
@@ -18,6 +17,7 @@ import com.yahoo.test.ManualClock;
import com.yahoo.vespa.config.ConfigKey;
import com.yahoo.vespa.config.ConfigPayload;
import com.yahoo.vespa.config.ErrorCode;
+import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.vespa.config.RawConfig;
import com.yahoo.vespa.config.TimingValues;
import com.yahoo.vespa.config.util.ConfigUtils;
@@ -237,7 +237,7 @@ public class JRTConfigRequestV3Test {
assertTrue(serverReq.validateParameters());
assertValidationFail(createReq("35#$#!$@#", defNamespace, hostname, configId, payloadChecksums, currentGeneration, timeout, trace));
assertValidationFail(createReq(defName, "abcd.o#$*(!&$", hostname, configId, payloadChecksums, currentGeneration, timeout, trace));
- assertValidationFail(createReq(defName, defNamespace, hostname, configId, PayloadChecksums.from("opnq", "1234"), currentGeneration, timeout, trace));
+ assertValidationFail(createReq(defName, defNamespace, hostname, configId, PayloadChecksums.from("1234", "opnq"), currentGeneration, timeout, trace));
assertValidationFail(createReq(defName, defNamespace, hostname, configId, payloadChecksums, -34, timeout, trace));
assertValidationFail(createReq(defName, defNamespace, hostname, configId, payloadChecksums, currentGeneration, -23, trace));
assertValidationFail(createReq(defName, defNamespace, "", configId, payloadChecksums, currentGeneration, timeout, trace));
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java
index 00d010e75c8..7f39d678fdf 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java
@@ -44,7 +44,10 @@ public class SuperModelController {
public ConfigResponse resolveConfig(GetConfigRequest request) {
ConfigKey<?> configKey = request.getConfigKey();
validateConfigDefinition(request.getConfigKey(), request.getDefContent());
- return responseFactory.createResponse(model.getConfig(configKey).toUtf8Array(true), generation, false);
+ return responseFactory.createResponse(model.getConfig(configKey).toUtf8Array(true),
+ generation,
+ false,
+ request.configPayloadChecksums());
}
private void validateConfigDefinition(ConfigKey<?> configKey, DefContent defContent) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/application/Application.java b/configserver/src/main/java/com/yahoo/vespa/config/server/application/Application.java
index df1427bdf6d..0b409d38196 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/application/Application.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/application/Application.java
@@ -130,12 +130,11 @@ public class Application implements ModelResult {
metricUpdater.incrementFailedRequests();
throw new UnknownConfigDefinitionException("Unable to find config definition for '" + configKey.getNamespace() + "." + configKey.getName());
}
- log.log(Level.FINE, () -> TenantRepository.logPre(getId()) + ("Resolving " + configKey + " with config definition " + def));
+ log.log(Level.FINE, () -> TenantRepository.logPre(getId()) + "Resolving " + configKey + " with config definition " + def);
var payload = createPayload(configKey, def);
- var response = responseFactory.createResponse(payload.getFirst(), applicationGeneration, payload.getSecond());
- return response;
+ return responseFactory.createResponse(payload.getFirst(), applicationGeneration, payload.getSecond(), req.configPayloadChecksums());
}
private Pair<AbstractUtf8Array, Boolean> createPayload(ConfigKey<?> configKey, ConfigDefinition def) {
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpConfigRequest.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpConfigRequest.java
index c01008fafa0..8abb701606c 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpConfigRequest.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpConfigRequest.java
@@ -11,6 +11,7 @@ import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.jdisc.application.BindingMatch;
import com.yahoo.vespa.config.ConfigKey;
import com.yahoo.vespa.config.GetConfigRequest;
+import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.vespa.config.protocol.DefContent;
import com.yahoo.vespa.config.protocol.VespaVersion;
import com.yahoo.vespa.config.server.RequestHandler;
@@ -195,4 +196,7 @@ public class HttpConfigRequest implements GetConfigRequest, TenantRequest {
@Override
public String getRequestDefMd5() { return ConfigUtils.getDefMd5(getDefContent().asList()); }
+ @Override
+ public PayloadChecksums configPayloadChecksums() { return PayloadChecksums.empty(); }
+
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactory.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactory.java
index 7afeebdd3cf..8c1cdeb753a 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactory.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactory.java
@@ -1,10 +1,16 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.rpc;
import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.text.AbstractUtf8Array;
import com.yahoo.vespa.config.ConfigPayload;
+import com.yahoo.vespa.config.PayloadChecksum;
+import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.vespa.config.protocol.ConfigResponse;
+import com.yahoo.vespa.config.util.ConfigUtils;
+
+import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5;
+import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64;
/**
* Represents a component that creates config responses from a payload. Different implementations
@@ -28,12 +34,41 @@ public interface ConfigResponseFactory {
/**
* Creates a {@link ConfigResponse} for a given payload and generation.
*
- * @param rawPayload the {@link ConfigPayload} to put in the response
- * @param generation the payload generation
- * @param applyOnRestart true if this config change should only be applied on restart,
- * false if it should be applied immediately
+ * @param rawPayload the {@link ConfigPayload} to put in the response
+ * @param generation the payload generation
+ * @param applyOnRestart true if this config change should only be applied on restart,
+ * false if it should be applied immediately
+ * @param requestsPayloadChecksums payload checksums from requests
* @return a {@link ConfigResponse} that can be sent to the client
*/
- ConfigResponse createResponse(AbstractUtf8Array rawPayload, long generation, boolean applyOnRestart);
+ ConfigResponse createResponse(AbstractUtf8Array rawPayload,
+ long generation,
+ boolean applyOnRestart,
+ PayloadChecksums requestsPayloadChecksums);
+
+ /** Generates payload checksums based on what type of checksums exist in request */
+ default PayloadChecksums generatePayloadChecksums(AbstractUtf8Array rawPayload, PayloadChecksums requestsPayloadChecksums) {
+ PayloadChecksum requestChecksumMd5 = requestsPayloadChecksums.getForType(MD5);
+ PayloadChecksum requestChecksumXxhash64 = requestsPayloadChecksums.getForType(XXHASH64);
+
+ PayloadChecksum md5 = PayloadChecksum.empty(MD5);
+ PayloadChecksum xxhash64 = PayloadChecksum.empty(XXHASH64);
+ // Response contains same checksum type as in request, except when both are empty,
+ // then use both checksum types in response
+ if (requestChecksumMd5.isEmpty() && requestChecksumXxhash64.isEmpty()
+ || ( ! requestChecksumMd5.isEmpty() && ! requestChecksumXxhash64.isEmpty())) {
+ md5 = new PayloadChecksum(ConfigUtils.getMd5(rawPayload), MD5);
+ xxhash64 = new PayloadChecksum(ConfigUtils.getXxhash64(rawPayload), XXHASH64);
+ } else if ( ! requestChecksumMd5.isEmpty()) {
+ md5 = new PayloadChecksum(ConfigUtils.getMd5(rawPayload), MD5);
+ } else if (requestChecksumMd5.isEmpty() && !requestChecksumXxhash64.isEmpty()) {
+ xxhash64 = new PayloadChecksum(ConfigUtils.getXxhash64(rawPayload), XXHASH64);
+ } else {
+ md5 = new PayloadChecksum(ConfigUtils.getMd5(rawPayload), MD5);
+ xxhash64 = new PayloadChecksum(ConfigUtils.getXxhash64(rawPayload), XXHASH64);
+ }
+
+ return PayloadChecksums.from(md5, xxhash64);
+ }
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/LZ4ConfigResponseFactory.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/LZ4ConfigResponseFactory.java
index 3698a50217a..6a1ecfac7bb 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/LZ4ConfigResponseFactory.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/LZ4ConfigResponseFactory.java
@@ -1,15 +1,14 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.rpc;
-import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.text.AbstractUtf8Array;
import com.yahoo.text.Utf8Array;
import com.yahoo.vespa.config.LZ4PayloadCompressor;
+import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.vespa.config.protocol.CompressionInfo;
import com.yahoo.vespa.config.protocol.CompressionType;
import com.yahoo.vespa.config.protocol.ConfigResponse;
import com.yahoo.vespa.config.protocol.SlimeConfigResponse;
-import com.yahoo.vespa.config.util.ConfigUtils;
/**
* Compressor that compresses config payloads to lz4.
@@ -23,10 +22,11 @@ public class LZ4ConfigResponseFactory implements ConfigResponseFactory {
@Override
public ConfigResponse createResponse(AbstractUtf8Array rawPayload,
long generation,
- boolean applyOnRestart) {
+ boolean applyOnRestart,
+ PayloadChecksums requestsPayloadChecksums) {
CompressionInfo info = CompressionInfo.create(CompressionType.LZ4, rawPayload.getByteLength());
Utf8Array compressed = new Utf8Array(compressor.compress(rawPayload.wrap()));
- PayloadChecksums payloadChecksums = PayloadChecksums.from(ConfigUtils.getMd5(rawPayload), ConfigUtils.getXxhash64(rawPayload));
+ PayloadChecksums payloadChecksums = generatePayloadChecksums(rawPayload, requestsPayloadChecksums);
return new SlimeConfigResponse(compressed, generation, applyOnRestart, payloadChecksums, info);
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/UncompressedConfigResponseFactory.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/UncompressedConfigResponseFactory.java
index 97db683e348..ce973e538b7 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/UncompressedConfigResponseFactory.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/UncompressedConfigResponseFactory.java
@@ -1,13 +1,12 @@
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.rpc;
-import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.text.AbstractUtf8Array;
+import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.vespa.config.protocol.CompressionInfo;
import com.yahoo.vespa.config.protocol.CompressionType;
import com.yahoo.vespa.config.protocol.ConfigResponse;
import com.yahoo.vespa.config.protocol.SlimeConfigResponse;
-import com.yahoo.vespa.config.util.ConfigUtils;
/**
* Simply returns an uncompressed payload.
@@ -17,11 +16,13 @@ import com.yahoo.vespa.config.util.ConfigUtils;
public class UncompressedConfigResponseFactory implements ConfigResponseFactory {
@Override
- public ConfigResponse createResponse(AbstractUtf8Array rawPayload, long generation, boolean applyOnRestart) {
- String configMd5 = ConfigUtils.getMd5(rawPayload);
- String xxHash64 = ConfigUtils.getXxhash64(rawPayload);
+ public ConfigResponse createResponse(AbstractUtf8Array rawPayload,
+ long generation,
+ boolean applyOnRestart,
+ PayloadChecksums requestsPayloadChecksums) {
CompressionInfo info = CompressionInfo.create(CompressionType.UNCOMPRESSED, rawPayload.getByteLength());
- return new SlimeConfigResponse(rawPayload, generation, applyOnRestart, PayloadChecksums.from(configMd5, xxHash64), info);
+ PayloadChecksums payloadChecksums = generatePayloadChecksums(rawPayload, requestsPayloadChecksums);
+ return new SlimeConfigResponse(rawPayload, generation, applyOnRestart, payloadChecksums, info);
}
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
index 41af0296534..28d50a5396e 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/ApplicationRepositoryTest.java
@@ -26,6 +26,7 @@ import com.yahoo.text.Utf8;
import com.yahoo.vespa.config.ConfigKey;
import com.yahoo.vespa.config.ConfigPayload;
import com.yahoo.vespa.config.GetConfigRequest;
+import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.vespa.config.protocol.ConfigResponse;
import com.yahoo.vespa.config.protocol.DefContent;
import com.yahoo.vespa.config.protocol.VespaVersion;
@@ -810,6 +811,9 @@ public class ApplicationRepositoryTest {
@Override
public String getRequestDefMd5() { return ""; }
+ @Override
+ public PayloadChecksums configPayloadChecksums() { return PayloadChecksums.empty(); }
+
}, Optional.empty());
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactoryTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactoryTest.java
index 747a0ad3241..b164c3e5cd5 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactoryTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/rpc/ConfigResponseFactoryTest.java
@@ -1,11 +1,16 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.rpc;
import com.yahoo.vespa.config.ConfigPayload;
+import com.yahoo.vespa.config.PayloadChecksum;
+import com.yahoo.vespa.config.PayloadChecksums;
import com.yahoo.vespa.config.protocol.CompressionType;
import com.yahoo.vespa.config.protocol.ConfigResponse;
+import com.yahoo.vespa.config.protocol.Payload;
import org.junit.Test;
+import static com.yahoo.vespa.config.PayloadChecksum.Type.MD5;
+import static com.yahoo.vespa.config.PayloadChecksum.Type.XXHASH64;
import static org.junit.Assert.assertEquals;
/**
@@ -13,22 +18,63 @@ import static org.junit.Assert.assertEquals;
*/
public class ConfigResponseFactoryTest {
+ private static final ConfigPayload payload = ConfigPayload.fromString("{ \"field1\": 11, \"field2\": 11 }");
+
+ private static final PayloadChecksums payloadChecksums = PayloadChecksums.fromPayload(Payload.from(payload));
+ private static final PayloadChecksums payloadChecksumsEmpty = PayloadChecksums.empty();
+ private static final PayloadChecksums payloadChecksumsOnlyMd5 =
+ PayloadChecksums.from(PayloadChecksum.fromPayload(Payload.from(payload), MD5));
+ private static final PayloadChecksums payloadChecksumsOnlyXxhash64 =
+ PayloadChecksums.from(PayloadChecksum.fromPayload(Payload.from(payload), XXHASH64));
+
@Test
public void testUncompressedFactory() {
UncompressedConfigResponseFactory responseFactory = new UncompressedConfigResponseFactory();
- ConfigResponse response = responseFactory.createResponse(ConfigPayload.empty().toUtf8Array(true), 3, false);
+ ConfigResponse response = responseFactory.createResponse(payload.toUtf8Array(true), 3, false, payloadChecksums);
assertEquals(CompressionType.UNCOMPRESSED, response.getCompressionInfo().getCompressionType());
assertEquals(3L,response.getGeneration());
- assertEquals(2, response.getPayload().getByteLength());
+ assertEquals(25, response.getPayload().getByteLength());
+ assertEquals(payloadChecksums, response.getPayloadChecksums());
}
@Test
public void testLZ4CompressedFactory() {
+ // Both checksums in request
+ {
+ ConfigResponse response = createResponse(payloadChecksums);
+ assertEquals(payloadChecksums, response.getPayloadChecksums());
+ }
+
+ // No checksums in request (empty checksums), both checksums should be in response
+ {
+ ConfigResponse response = createResponse(payloadChecksumsEmpty);
+ assertEquals(payloadChecksums.getForType(MD5), response.getPayloadChecksums().getForType(MD5));
+ assertEquals(payloadChecksums.getForType(XXHASH64), response.getPayloadChecksums().getForType(XXHASH64));
+ }
+
+ // Only md5 checksums in request
+ {
+ ConfigResponse response = createResponse(payloadChecksumsOnlyMd5);
+ assertEquals(payloadChecksumsOnlyMd5.getForType(MD5), response.getPayloadChecksums().getForType(MD5));
+ assertEquals(payloadChecksumsOnlyMd5.getForType(XXHASH64), response.getPayloadChecksums().getForType(XXHASH64));
+ }
+
+ // Only xxhash64 checksums in request
+ {
+ ConfigResponse response = createResponse(payloadChecksumsOnlyXxhash64);
+ assertEquals(payloadChecksumsOnlyXxhash64.getForType(MD5), response.getPayloadChecksums().getForType(MD5));
+ assertEquals(payloadChecksumsOnlyXxhash64.getForType(XXHASH64), response.getPayloadChecksums().getForType(XXHASH64));
+ }
+ }
+
+ private ConfigResponse createResponse(PayloadChecksums payloadChecksums) {
LZ4ConfigResponseFactory responseFactory = new LZ4ConfigResponseFactory();
- ConfigResponse response = responseFactory.createResponse(ConfigPayload.empty().toUtf8Array(true), 3, false);
+ ConfigResponse response = responseFactory.createResponse(payload.toUtf8Array(true), 3, false, payloadChecksums);
assertEquals(CompressionType.LZ4, response.getCompressionInfo().getCompressionType());
assertEquals(3L, response.getGeneration());
- assertEquals(3, response.getPayload().getByteLength());
+ assertEquals(23, response.getPayload().getByteLength());
+
+ return response;
}
}
diff --git a/container-core/src/main/java/com/yahoo/restapi/ByteArrayResponse.java b/container-core/src/main/java/com/yahoo/restapi/ByteArrayResponse.java
new file mode 100644
index 00000000000..1299a2c6eb4
--- /dev/null
+++ b/container-core/src/main/java/com/yahoo/restapi/ByteArrayResponse.java
@@ -0,0 +1,26 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.restapi;
+
+import com.yahoo.container.jdisc.HttpResponse;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * @author freva
+ */
+public class ByteArrayResponse extends HttpResponse {
+
+ private final byte[] data;
+
+ public ByteArrayResponse(byte[] data) {
+ super(200);
+ this.data = data;
+ }
+
+ @Override
+ public void render(OutputStream stream) throws IOException {
+ stream.write(data);
+ }
+
+}
diff --git a/container-core/src/main/java/com/yahoo/restapi/StringResponse.java b/container-core/src/main/java/com/yahoo/restapi/StringResponse.java
index 55ea22880de..003b58de827 100644
--- a/container-core/src/main/java/com/yahoo/restapi/StringResponse.java
+++ b/container-core/src/main/java/com/yahoo/restapi/StringResponse.java
@@ -1,27 +1,13 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.restapi;
-import com.yahoo.container.jdisc.HttpResponse;
-
-import java.io.IOException;
-import java.io.OutputStream;
import java.nio.charset.StandardCharsets;
/**
* @author bratseth
*/
-public class StringResponse extends HttpResponse {
-
- private final String message;
-
+public class StringResponse extends ByteArrayResponse {
public StringResponse(String message) {
- super(200);
- this.message = message;
+ super(message.getBytes(StandardCharsets.UTF_8));
}
-
- @Override
- public void render(OutputStream stream) throws IOException {
- stream.write(message.getBytes(StandardCharsets.UTF_8));
- }
-
}
diff --git a/container-core/src/main/resources/configdefinitions/container.qr.def b/container-core/src/main/resources/configdefinitions/container.qr.def
index fe44b04e9d5..9d9b84eb428 100644
--- a/container-core/src/main/resources/configdefinitions/container.qr.def
+++ b/container-core/src/main/resources/configdefinitions/container.qr.def
@@ -23,5 +23,10 @@ rpc.slobrokId string default="" restart
## this string will be unique for every QRS in a Vespa application.
discriminator string default="qrserver.0" restart
+## Index of this container inside the cluster. Guaranteed to be non-negative
+## and unique for every container in a cluster, but not necessarily contiguous
+## or starting from zero.
+nodeIndex int default=0
+
## Force restart of container on deploy, and defer any changes until restart
restartOnDeploy bool default=false restart
diff --git a/container-disc/src/main/java/com/yahoo/container/jdisc/SystemInfoProvider.java b/container-disc/src/main/java/com/yahoo/container/jdisc/SystemInfoProvider.java
index b25517ec1f7..de9e20c3c6d 100644
--- a/container-disc/src/main/java/com/yahoo/container/jdisc/SystemInfoProvider.java
+++ b/container-disc/src/main/java/com/yahoo/container/jdisc/SystemInfoProvider.java
@@ -1,12 +1,16 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.container.jdisc;
+import ai.vespa.cloud.Cluster;
import ai.vespa.cloud.Environment;
+import ai.vespa.cloud.Node;
import ai.vespa.cloud.SystemInfo;
import ai.vespa.cloud.Zone;
import com.google.inject.Inject;
+import com.yahoo.cloud.config.ClusterInfoConfig;
import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.component.AbstractComponent;
+import com.yahoo.container.QrConfig;
import com.yahoo.container.di.componentgraph.Provider;
/**
@@ -20,8 +24,10 @@ public class SystemInfoProvider extends AbstractComponent implements Provider<Sy
private final SystemInfo instance;
@Inject
- public SystemInfoProvider(ConfigserverConfig config) {
- this.instance = new SystemInfo(new Zone(Environment.valueOf(config.environment()), config.region()));
+ public SystemInfoProvider(ConfigserverConfig csConfig, QrConfig qrConfig, ClusterInfoConfig ciConfig) {
+ this.instance = new SystemInfo(new Zone(Environment.valueOf(csConfig.environment()), csConfig.region()),
+ new Cluster(ciConfig.nodeCount()),
+ new Node(qrConfig.nodeIndex()));
}
@Override
diff --git a/container-search/src/main/java/com/yahoo/prelude/query/parser/AllParser.java b/container-search/src/main/java/com/yahoo/prelude/query/parser/AllParser.java
index 49bdba2c90f..793d394801f 100644
--- a/container-search/src/main/java/com/yahoo/prelude/query/parser/AllParser.java
+++ b/container-search/src/main/java/com/yahoo/prelude/query/parser/AllParser.java
@@ -112,6 +112,7 @@ public class AllParser extends SimpleParser {
protected Item negativeItem() {
int position = tokens.getPosition();
Item item = null;
+ boolean isComposited = false;
try {
if ( ! tokens.skip(MINUS)) return null;
if (tokens.currentIsNoIgnore(SPACE)) return null;
@@ -121,6 +122,7 @@ public class AllParser extends SimpleParser {
item = compositeItem();
if (item != null) {
+ isComposited = true;
if (item instanceof OrItem) { // Turn into And
AndItem and = new AndItem();
@@ -137,9 +139,11 @@ public class AllParser extends SimpleParser {
// Heuristic overdrive engaged!
// Interpret -N as a positive item matching a negative number (by backtracking out of this)
// but not if there is an explicit index (such as -a:b)
+ // but interpret -(N) as a negative item matching a positive number
// but interpret --N as a negative item matching a negative number
if (item instanceof IntItem &&
((IntItem)item).getIndexName().isEmpty() &&
+ ! isComposited &&
! ((IntItem)item).getNumber().startsWith(("-")))
item = null;
diff --git a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java b/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java
index cef8ae1751c..8ca711297d3 100644
--- a/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java
+++ b/container-search/src/test/java/com/yahoo/prelude/query/parser/test/ParseTestCase.java
@@ -1970,6 +1970,13 @@ public class ParseTestCase {
}
@Test
+ public void testNegativeTermPositiveNumberInParentheses() {
+ tester.assertParsed("+a -12", "a -(12)", Query.Type.ALL);
+ tester.assertParsed("+a -(AND 12 15)", "a -(12 15)", Query.Type.ALL);
+ tester.assertParsed("+a -12 -15", "a -(12) -(15)", Query.Type.ALL);
+ }
+
+ @Test
public void testSingleNegativeNumberLikeTerm() {
tester.assertParsed("-12", "-12", Query.Type.ALL);
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationStore.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationStore.java
index dd9f8c38802..71f1821ff9a 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationStore.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationStore.java
@@ -1,10 +1,8 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.api.integration.deployment;
-import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import java.time.Instant;
@@ -21,13 +19,19 @@ import java.util.Optional;
public interface ApplicationStore {
/** Returns the tenant application package of the given version. */
- byte[] get(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion);
+ byte[] get(DeploymentId deploymentId, ApplicationVersion applicationVersion);
+
+ /** Returns the application package diff, compared to the previous build, for the given tenant, application and build number */
+ Optional<byte[]> getDiff(TenantName tenantName, ApplicationName applicationName, long buildNumber);
+
+ /** Removes diffs for packages before the given build number */
+ void pruneDiffs(TenantName tenantName, ApplicationName applicationName, long beforeBuildNumber);
/** Find application package by given build number */
Optional<byte[]> find(TenantName tenant, ApplicationName application, long buildNumber);
- /** Stores the given tenant application package of the given version. */
- void put(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion, byte[] applicationPackage);
+ /** Stores the given tenant application package of the given version and diff since previous version. */
+ void put(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion, byte[] applicationPackage, byte[] diff);
/** Removes applications older than the given version, for the given application, and returns whether something was removed. */
boolean prune(TenantName tenant, ApplicationName application, ApplicationVersion olderThanVersion);
@@ -47,11 +51,14 @@ public interface ApplicationStore {
/** Removes all tester packages for the given tester. */
void removeAllTesters(TenantName tenant, ApplicationName application);
- /** Stores the given application package as the development package for the given application and zone. */
- void putDev(ApplicationId application, ZoneId zone, byte[] applicationPackage);
+ /** Returns the application package diff, compared to the previous build, for the given deployment and build number */
+ Optional<byte[]> getDevDiff(DeploymentId deploymentId, long buildNumber);
+
+ /** Removes diffs for dev packages before the given build number */
+ void pruneDevDiffs(DeploymentId deploymentId, long beforeBuildNumber);
- /** Returns the development package for the given application and zone. */
- byte[] getDev(ApplicationId application, ZoneId zone);
+ /** Stores the given application package as the development package for the given deployment and version and diff since previous version. */
+ void putDev(DeploymentId deploymentId, ApplicationVersion version, byte[] applicationPackage, byte[] diff);
/** Stores the given application meta data with the current time as part of the path. */
void putMeta(TenantName tenant, ApplicationName application, Instant now, byte[] metaZip);
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java
index 30fd8fad1bd..f83809e84c2 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/deployment/ApplicationVersion.java
@@ -23,7 +23,7 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> {
*/
public static final ApplicationVersion unknown = new ApplicationVersion(Optional.empty(), OptionalLong.empty(),
Optional.empty(), Optional.empty(), Optional.empty(),
- Optional.empty(), Optional.empty());
+ Optional.empty(), Optional.empty(), true);
// This never changes and is only used to create a valid semantic version number, as required by application bundles
private static final String majorVersion = "1.0";
@@ -35,11 +35,12 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> {
private final Optional<Instant> buildTime;
private final Optional<String> sourceUrl;
private final Optional<String> commit;
+ private final boolean deployedDirectly;
/** Public for serialisation only. */
public ApplicationVersion(Optional<SourceRevision> source, OptionalLong buildNumber, Optional<String> authorEmail,
- Optional<Version> compileVersion, Optional<Instant> buildTime, Optional<String> sourceUrl,
- Optional<String> commit) {
+ Optional<Version> compileVersion, Optional<Instant> buildTime, Optional<String> sourceUrl,
+ Optional<String> commit, boolean deployedDirectly) {
if (buildNumber.isEmpty() && ( source.isPresent() || authorEmail.isPresent() || compileVersion.isPresent()
|| buildTime.isPresent() || sourceUrl.isPresent() || commit.isPresent()))
throw new IllegalArgumentException("Build number must be present if any other attribute is");
@@ -63,45 +64,37 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> {
this.buildTime = buildTime;
this.sourceUrl = Objects.requireNonNull(sourceUrl, "sourceUrl cannot be null");
this.commit = Objects.requireNonNull(commit, "commit cannot be null");
+ this.deployedDirectly = deployedDirectly;
}
/** Create an application package version from a completed build, without an author email */
public static ApplicationVersion from(SourceRevision source, long buildNumber) {
return new ApplicationVersion(Optional.of(source), OptionalLong.of(buildNumber), Optional.empty(),
- Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty());
- }
-
- /** Creates an version from a completed build and an author email. */
- public static ApplicationVersion from(SourceRevision source, long buildNumber, String authorEmail) {
- return new ApplicationVersion(Optional.of(source), OptionalLong.of(buildNumber), Optional.of(authorEmail),
- Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty());
+ Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), false);
}
/** Creates an version from a completed build, an author email, and build meta data. */
public static ApplicationVersion from(SourceRevision source, long buildNumber, String authorEmail,
Version compileVersion, Instant buildTime) {
return new ApplicationVersion(Optional.of(source), OptionalLong.of(buildNumber), Optional.of(authorEmail),
- Optional.of(compileVersion), Optional.of(buildTime), Optional.empty(), Optional.empty());
+ Optional.of(compileVersion), Optional.of(buildTime), Optional.empty(), Optional.empty(), false);
}
/** Creates an version from a completed build, an author email, and build meta data. */
public static ApplicationVersion from(Optional<SourceRevision> source, long buildNumber, Optional<String> authorEmail,
Optional<Version> compileVersion, Optional<Instant> buildTime,
- Optional<String> sourceUrl, Optional<String> commit) {
- return new ApplicationVersion(source, OptionalLong.of(buildNumber), authorEmail, compileVersion, buildTime, sourceUrl, commit);
+ Optional<String> sourceUrl, Optional<String> commit, boolean deployedDirectly) {
+ return new ApplicationVersion(source, OptionalLong.of(buildNumber), authorEmail, compileVersion, buildTime, sourceUrl, commit, deployedDirectly);
}
/** Returns an unique identifier for this version or "unknown" if version is not known */
public String id() {
- if (isUnknown()) {
- return "unknown";
- }
- return String.format("%s.%d-%s",
- majorVersion,
- buildNumber.getAsLong(),
- source.map(SourceRevision::commit).map(ApplicationVersion::abbreviateCommit)
- .or(this::commit)
- .orElse("unknown"));
+ if (isUnknown()) return "unknown";
+
+ return source.map(SourceRevision::commit).map(ApplicationVersion::abbreviateCommit)
+ .or(this::commit)
+ .map(commit -> String.format("%s.%d-%s", majorVersion, buildNumber.getAsLong(), commit))
+ .orElseGet(() -> majorVersion + "." + buildNumber.getAsLong());
}
/**
@@ -142,18 +135,24 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> {
return this.equals(unknown);
}
+ /** Returns whether the application package for this version was deployed directly to zone */
+ public boolean isDeployedDirectly() {
+ return deployedDirectly;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
if ( ! (o instanceof ApplicationVersion)) return false;
ApplicationVersion that = (ApplicationVersion) o;
return Objects.equals(buildNumber, that.buildNumber)
- && Objects.equals(commit(), that.commit());
+ && Objects.equals(commit(), that.commit())
+ && deployedDirectly == that.deployedDirectly;
}
@Override
public int hashCode() {
- return Objects.hash(buildNumber, commit());
+ return Objects.hash(buildNumber, commit(), deployedDirectly);
}
@Override
@@ -175,6 +174,9 @@ public class ApplicationVersion implements Comparable<ApplicationVersion> {
if (buildNumber().isEmpty() || o.buildNumber().isEmpty())
return Boolean.compare(buildNumber().isPresent(), o.buildNumber.isPresent()); // Unknown version sorts first
+ if (deployedDirectly || o.deployedDirectly)
+ return Boolean.compare(deployedDirectly, o.deployedDirectly); // Directly deployed versions sort first
+
return Long.compare(buildNumber().getAsLong(), o.buildNumber().getAsLong());
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
index 135429be8f9..1306f4846c2 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/role/PathGroup.java
@@ -103,6 +103,7 @@ enum PathGroup {
applicationInfo(Matcher.tenant,
Matcher.application,
"/application/v4/tenant/{tenant}/application/{application}/package",
+ "/application/v4/tenant/{tenant}/application/{application}/diff/{number}",
"/application/v4/tenant/{tenant}/application/{application}/compile-version",
"/application/v4/tenant/{tenant}/application/{application}/deployment",
"/application/v4/tenant/{tenant}/application/{application}/deploying/{*}",
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
index 6557247e21a..fe5fc90df60 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
@@ -27,7 +27,6 @@ import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeploymentData
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.identifiers.InstanceId;
import com.yahoo.vespa.hosted.controller.api.identifiers.RevisionId;
-import com.yahoo.vespa.hosted.controller.api.integration.aws.TenantRoles;
import com.yahoo.vespa.hosted.controller.api.integration.billing.BillingController;
import com.yahoo.vespa.hosted.controller.api.integration.billing.Quota;
import com.yahoo.vespa.hosted.controller.api.integration.certificates.EndpointCertificateMetadata;
@@ -45,8 +44,8 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterId;
import com.yahoo.vespa.hosted.controller.api.integration.noderepository.RestartFilter;
import com.yahoo.vespa.hosted.controller.api.integration.secrets.TenantSecretStore;
import com.yahoo.vespa.hosted.controller.application.ActivateResult;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackageValidator;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackageValidator;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
import com.yahoo.vespa.hosted.controller.application.DeploymentQuotaCalculator;
@@ -332,11 +331,6 @@ public class ApplicationController {
});
}
- /** Fetches the requested application package from the artifact store(s). */
- public ApplicationPackage getApplicationPackage(ApplicationId id, ApplicationVersion version) {
- return new ApplicationPackage(applicationStore.get(id.tenant(), id.application(), version));
- }
-
/** Returns given application with a new instance */
public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) {
if (instance.instance().isTester())
@@ -372,7 +366,7 @@ public class ApplicationController {
Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform());
ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication());
- ApplicationPackage applicationPackage = getApplicationPackage(job.application(), zone, revision);
+ ApplicationPackage applicationPackage = new ApplicationPackage(applicationStore.get(new DeploymentId(job.application(), zone), revision));
try (Lock lock = lock(applicationId)) {
LockedApplication application = new LockedApplication(requireApplication(applicationId), lock);
@@ -828,11 +822,6 @@ public class ApplicationController {
return DeploymentQuotaCalculator.calculateQuotaUsage(application);
}
- private ApplicationPackage getApplicationPackage(ApplicationId application, ZoneId zone, ApplicationVersion revision) {
- return new ApplicationPackage(revision.isUnknown() ? applicationStore.getDev(application, zone)
- : applicationStore.get(application.tenant(), application.application(), revision));
- }
-
/*
* Get the AthenzUser from this principal or Optional.empty if this does not represent a user.
*/
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Change.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Change.java
index 33eafecf60a..ff266e18bb6 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Change.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Change.java
@@ -36,7 +36,7 @@ public final class Change {
private Change(Optional<Version> platform, Optional<ApplicationVersion> application, boolean pinned) {
this.platform = requireNonNull(platform, "platform cannot be null");
this.application = requireNonNull(application, "application cannot be null");
- if (application.isPresent() && application.get().isUnknown()) {
+ if (application.isPresent() && (application.get().isUnknown() || application.get().isDeployedDirectly())) {
throw new IllegalArgumentException("Application version to deploy must be a known version");
}
this.pinned = pinned;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationPackage.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java
index c29bc3f3f5e..3fcf9fc41f2 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationPackage.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackage.java
@@ -1,5 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.application;
+package com.yahoo.vespa.hosted.controller.application.pkg;
import com.google.common.hash.Hashing;
import com.yahoo.component.Version;
@@ -251,10 +251,11 @@ public class ApplicationPackage {
private Map<Path, Optional<byte[]>> read(Collection<String> names) {
var entries = new ZipStreamReader(new ByteArrayInputStream(zip),
name -> names.contains(withoutLegacyDir(name)),
- maxSize)
+ maxSize,
+ true)
.entries().stream()
.collect(toMap(entry -> Paths.get(withoutLegacyDir(entry.zipEntry().getName())).normalize(),
- entry -> Optional.of(entry.content())));
+ ZipStreamReader.ZipEntryWithContent::content));
names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty()));
return entries;
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageDiff.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageDiff.java
new file mode 100644
index 00000000000..97810b9de80
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageDiff.java
@@ -0,0 +1,112 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.application.pkg;
+
+import java.io.BufferedReader;
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.UncheckedIOException;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static com.yahoo.vespa.hosted.controller.application.pkg.ZipStreamReader.ZipEntryWithContent;
+
+/**
+ * @author freva
+ */
+public class ApplicationPackageDiff {
+
+ public static byte[] diffAgainstEmpty(ApplicationPackage right) {
+ byte[] emptyZip = new byte[]{80, 75, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ return diff(new ApplicationPackage(emptyZip), right);
+ }
+
+ public static byte[] diff(ApplicationPackage left, ApplicationPackage right) {
+ return diff(left, right, 10 << 20, 1 << 20, 10 << 20);
+ }
+
+ static byte[] diff(ApplicationPackage left, ApplicationPackage right, int maxFileSizeToDiff, int maxDiffSizePerFile, int maxTotalDiffSize) {
+ if (Arrays.equals(left.zippedContent(), right.zippedContent())) return "No diff\n".getBytes(StandardCharsets.UTF_8);
+
+ Map<String, ZipEntryWithContent> leftContents = readContents(left, maxFileSizeToDiff);
+ Map<String, ZipEntryWithContent> rightContents = readContents(right, maxFileSizeToDiff);
+
+ StringBuilder sb = new StringBuilder();
+ List<String> files = Stream.of(leftContents, rightContents)
+ .flatMap(contents -> contents.keySet().stream())
+ .sorted()
+ .distinct()
+ .collect(Collectors.toList());
+ for (String file : files) {
+ if (sb.length() > maxTotalDiffSize)
+ sb.append("--- ").append(file).append('\n').append("Diff skipped: Total diff size >").append(maxTotalDiffSize).append("B)\n\n");
+ else
+ diff(Optional.ofNullable(leftContents.get(file)), Optional.ofNullable(rightContents.get(file)), maxDiffSizePerFile)
+ .ifPresent(diff -> sb.append("--- ").append(file).append('\n').append(diff).append('\n'));
+ }
+
+ return (sb.length() == 0 ? "No diff\n" : sb.toString()).getBytes(StandardCharsets.UTF_8);
+ }
+
+ private static Optional<String> diff(Optional<ZipEntryWithContent> left, Optional<ZipEntryWithContent> right, int maxDiffSizePerFile) {
+ Optional<byte[]> leftContent = left.flatMap(ZipEntryWithContent::content);
+ Optional<byte[]> rightContent = right.flatMap(ZipEntryWithContent::content);
+ if (leftContent.isPresent() && rightContent.isPresent() && Arrays.equals(leftContent.get(), rightContent.get()))
+ return Optional.empty();
+
+ if (Stream.of(left, right).flatMap(Optional::stream).anyMatch(entry -> entry.content().isEmpty()))
+ return Optional.of(String.format("Diff skipped: File too large (%s -> %s)\n",
+ left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
+
+ if (Stream.of(leftContent, rightContent).flatMap(Optional::stream).anyMatch(c -> isBinary(c)))
+ return Optional.of(String.format("Diff skipped: File is binary (%s -> %s)\n",
+ left.map(e -> e.size() + "B").orElse("new file"), right.map(e -> e.size() + "B").orElse("file deleted")));
+
+ return LinesComparator.diff(
+ leftContent.map(c -> lines(c)).orElseGet(List::of),
+ rightContent.map(c -> lines(c)).orElseGet(List::of))
+ .map(diff -> diff.length() > maxDiffSizePerFile ? "Diff skipped: Diff too large (" + diff.length() + "B)\n" : diff);
+ }
+
+ private static Map<String, ZipEntryWithContent> readContents(ApplicationPackage app, int maxFileSizeToDiff) {
+ return new ZipStreamReader(new ByteArrayInputStream(app.zippedContent()), entry -> true, maxFileSizeToDiff, false).entries().stream()
+ .collect(Collectors.toMap(entry -> entry.zipEntry().getName(), e -> e));
+ }
+
+ private static List<String> lines(byte[] data) {
+ List<String> lines = new ArrayList<>(Math.min(16, data.length / 100));
+ try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(data), StandardCharsets.UTF_8))) {
+ String line;
+ while ((line = bufferedReader.readLine()) != null) {
+ lines.add(line);
+ }
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ return lines;
+ }
+
+ private static boolean isBinary(byte[] data) {
+ if (data.length == 0) return false;
+
+ int lengthToCheck = Math.min(data.length, 10000);
+ int ascii = 0;
+
+ for (int i = 0; i < lengthToCheck; i++) {
+ byte b = data[i];
+ if (b < 0x9) return true;
+
+ // TAB, newline/line feed, carriage return
+ if (b == 0x9 || b == 0xA || b == 0xD) ascii++;
+ else if (b >= 0x20 && b <= 0x7E) ascii++;
+ }
+
+ return (double) ascii / lengthToCheck < 0.95;
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationPackageValidator.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java
index bb2d8b3c553..e9edbbc767c 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationPackageValidator.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageValidator.java
@@ -1,5 +1,5 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.application;
+package com.yahoo.vespa.hosted.controller.application.pkg;
import com.yahoo.config.application.api.DeploymentInstanceSpec;
import com.yahoo.config.application.api.DeploymentSpec;
@@ -14,6 +14,7 @@ import com.yahoo.config.provision.zone.ZoneApi;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.application.EndpointId;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentSteps;
import java.time.Instant;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/LinesComparator.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/LinesComparator.java
new file mode 100644
index 00000000000..8b4791c6b1b
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/LinesComparator.java
@@ -0,0 +1,246 @@
+/*
+ * Line based variant of Apache commons-text StringComparator
+ * https://github.com/apache/commons-text/blob/3b1a0a5a47ee9fa2b36f99ca28e2e1d367a10a11/src/main/java/org/apache/commons/text/diff/StringsComparator.java
+ */
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.yahoo.vespa.hosted.controller.application.pkg;
+
+import com.yahoo.collections.Pair;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+/**
+ * <p>
+ * It is guaranteed that the comparisons will always be done as
+ * {@code o1.equals(o2)} where {@code o1} belongs to the first
+ * sequence and {@code o2} belongs to the second sequence. This can
+ * be important if subclassing is used for some elements in the first
+ * sequence and the {@code equals} method is specialized.
+ * </p>
+ * <p>
+ * Comparison can be seen from two points of view: either as giving the smallest
+ * modification allowing to transform the first sequence into the second one, or
+ * as giving the longest sequence which is a subsequence of both initial
+ * sequences. The {@code equals} method is used to compare objects, so any
+ * object can be put into sequences. Modifications include deleting, inserting
+ * or keeping one object, starting from the beginning of the first sequence.
+ * </p>
+ * <p>
+ * This class implements the comparison algorithm, which is the very efficient
+ * algorithm from Eugene W. Myers
+ * <a href="http://www.cis.upenn.edu/~bcpierce/courses/dd/papers/diff.ps">
+ * An O(ND) Difference Algorithm and Its Variations</a>.
+ */
+public class LinesComparator {
+
+ private final List<String> left;
+ private final List<String> right;
+ private final int[] vDown;
+ private final int[] vUp;
+
+ private LinesComparator(List<String> left, List<String> right) {
+ this.left = left;
+ this.right = right;
+
+ int size = left.size() + right.size() + 2;
+ vDown = new int[size];
+ vUp = new int[size];
+ }
+
+ private void buildScript(int start1, int end1, int start2, int end2, List<Pair<LineOperation, String>> result) {
+ Snake middle = getMiddleSnake(start1, end1, start2, end2);
+
+ if (middle == null
+ || middle.start == end1 && middle.diag == end1 - end2
+ || middle.end == start1 && middle.diag == start1 - start2) {
+
+ int i = start1;
+ int j = start2;
+ while (i < end1 || j < end2) {
+ if (i < end1 && j < end2 && left.get(i).equals(right.get(j))) {
+ result.add(new Pair<>(LineOperation.keep, left.get(i)));
+ ++i;
+ ++j;
+ } else {
+ if (end1 - start1 > end2 - start2) {
+ result.add(new Pair<>(LineOperation.delete, left.get(i)));
+ ++i;
+ } else {
+ result.add(new Pair<>(LineOperation.insert, right.get(j)));
+ ++j;
+ }
+ }
+ }
+
+ } else {
+ buildScript(start1, middle.start, start2, middle.start - middle.diag, result);
+ for (int i = middle.start; i < middle.end; ++i) {
+ result.add(new Pair<>(LineOperation.keep, left.get(i)));
+ }
+ buildScript(middle.end, end1, middle.end - middle.diag, end2, result);
+ }
+ }
+
+ private Snake buildSnake(final int start, final int diag, final int end1, final int end2) {
+ int end = start;
+ while (end - diag < end2 && end < end1 && left.get(end).equals(right.get(end - diag))) {
+ ++end;
+ }
+ return new Snake(start, end, diag);
+ }
+
+ private Snake getMiddleSnake(final int start1, final int end1, final int start2, final int end2) {
+ final int m = end1 - start1;
+ final int n = end2 - start2;
+ if (m == 0 || n == 0) {
+ return null;
+ }
+
+ final int delta = m - n;
+ final int sum = n + m;
+ final int offset = (sum % 2 == 0 ? sum : sum + 1) / 2;
+ vDown[1 + offset] = start1;
+ vUp[1 + offset] = end1 + 1;
+
+ for (int d = 0; d <= offset; ++d) {
+ // Down
+ for (int k = -d; k <= d; k += 2) {
+ // First step
+
+ final int i = k + offset;
+ if (k == -d || k != d && vDown[i - 1] < vDown[i + 1]) {
+ vDown[i] = vDown[i + 1];
+ } else {
+ vDown[i] = vDown[i - 1] + 1;
+ }
+
+ int x = vDown[i];
+ int y = x - start1 + start2 - k;
+
+ while (x < end1 && y < end2 && left.get(x).equals(right.get(y))) {
+ vDown[i] = ++x;
+ ++y;
+ }
+ // Second step
+ if (delta % 2 != 0 && delta - d <= k && k <= delta + d) {
+ if (vUp[i - delta] <= vDown[i]) { // NOPMD
+ return buildSnake(vUp[i - delta], k + start1 - start2, end1, end2);
+ }
+ }
+ }
+
+ // Up
+ for (int k = delta - d; k <= delta + d; k += 2) {
+ // First step
+ final int i = k + offset - delta;
+ if (k == delta - d || k != delta + d && vUp[i + 1] <= vUp[i - 1]) {
+ vUp[i] = vUp[i + 1] - 1;
+ } else {
+ vUp[i] = vUp[i - 1];
+ }
+
+ int x = vUp[i] - 1;
+ int y = x - start1 + start2 - k;
+ while (x >= start1 && y >= start2 && left.get(x).equals(right.get(y))) {
+ vUp[i] = x--;
+ y--;
+ }
+ // Second step
+ if (delta % 2 == 0 && -d <= k && k <= d) {
+ if (vUp[i] <= vDown[i + delta]) { // NOPMD
+ return buildSnake(vUp[i], k + start1 - start2, end1, end2);
+ }
+ }
+ }
+ }
+
+ // this should not happen
+ throw new RuntimeException("Internal Error");
+ }
+
+ private static class Snake {
+ private final int start;
+ private final int end;
+ private final int diag;
+
+ private Snake(int start, int end, int diag) {
+ this.start = start;
+ this.end = end;
+ this.diag = diag;
+ }
+ }
+
+ private enum LineOperation {
+ keep(" "), delete("- "), insert("+ ");
+ private final String prefix;
+ LineOperation(String prefix) {
+ this.prefix = prefix;
+ }
+ }
+
+ /** @return line-based diff in unified format. Empty contents are identical. */
+ public static Optional<String> diff(List<String> left, List<String> right) {
+ List<Pair<LineOperation, String>> changes = new ArrayList<>(Math.max(left.size(), right.size()));
+ new LinesComparator(left, right).buildScript(0, left.size(), 0, right.size(), changes);
+
+ // After we have a list of keep, delete, insert for each line from left and right input, generate a unified
+ // diff by printing all delete and insert operations with contextLines of keep lines before and after.
+ // Make sure the change windows are non-overlapping by continuously growing the window
+ int contextLines = 3;
+ List<int[]> changeWindows = new ArrayList<>();
+ int[] last = null;
+ for (int i = 0, leftIndex = 0, rightIndex = 0; i < changes.size(); i++) {
+ if (changes.get(i).getFirst() == LineOperation.keep) {
+ leftIndex++;
+ rightIndex++;
+ continue;
+ }
+
+ // We found a new change and it is too far away from the previous change to be combined into the same window
+ if (last == null || i - last[1] > contextLines) {
+ last = new int[]{Math.max(i - contextLines, 0), Math.min(i + contextLines + 1, changes.size()), Math.max(leftIndex - contextLines, 0), Math.max(rightIndex - contextLines, 0)};
+ changeWindows.add(last);
+ } else // otherwise, extend the previous change window
+ last[1] = Math.min(i + contextLines + 1, changes.size());
+
+ if (changes.get(i).getFirst() == LineOperation.delete) leftIndex++;
+ else rightIndex++;
+ }
+ if (changeWindows.isEmpty()) return Optional.empty();
+
+ StringBuilder sb = new StringBuilder();
+ for (int[] changeWindow: changeWindows) {
+ int start = changeWindow[0], end = changeWindow[1], leftIndex = changeWindow[2], rightIndex = changeWindow[3];
+ Map<LineOperation, Long> counts = IntStream.range(start, end)
+ .mapToObj(i -> changes.get(i).getFirst())
+ .collect(Collectors.groupingBy(i -> i, Collectors.counting()));
+ sb.append("@@ -").append(leftIndex + 1).append(',').append(end - start - counts.getOrDefault(LineOperation.insert, 0L))
+ .append(" +").append(rightIndex + 1).append(',').append(end - start - counts.getOrDefault(LineOperation.delete, 0L)).append(" @@\n");
+ for (int i = start; i < end; i++)
+ sb.append(changes.get(i).getFirst().prefix).append(changes.get(i).getSecond()).append('\n');
+ }
+ return Optional.of(sb.toString());
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ZipStreamReader.java
index 4f01df21430..7ddd0af7a7a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReader.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/pkg/ZipStreamReader.java
@@ -1,5 +1,5 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.application;
+package com.yahoo.vespa.hosted.controller.application.pkg;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
@@ -10,6 +10,7 @@ import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import java.util.Optional;
import java.util.function.Predicate;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
@@ -23,16 +24,15 @@ public class ZipStreamReader {
private final List<ZipEntryWithContent> entries = new ArrayList<>();
private final int maxEntrySizeInBytes;
- public ZipStreamReader(InputStream input, Predicate<String> entryNameMatcher, int maxEntrySizeInBytes) {
+ public ZipStreamReader(InputStream input, Predicate<String> entryNameMatcher, int maxEntrySizeInBytes, boolean throwIfEntryExceedsMaxSize) {
this.maxEntrySizeInBytes = maxEntrySizeInBytes;
try (ZipInputStream zipInput = new ZipInputStream(input)) {
ZipEntry zipEntry;
while (null != (zipEntry = zipInput.getNextEntry())) {
if (!entryNameMatcher.test(requireName(zipEntry.getName()))) continue;
- entries.add(new ZipEntryWithContent(zipEntry, readContent(zipInput)));
+ entries.add(readContent(zipEntry, zipInput, throwIfEntryExceedsMaxSize));
}
-
} catch (IOException e) {
throw new UncheckedIOException("IO error reading zip content", e);
}
@@ -59,7 +59,7 @@ public class ZipStreamReader {
}
}
- private byte[] readContent(ZipInputStream zipInput) {
+ private ZipEntryWithContent readContent(ZipEntry zipEntry, ZipInputStream zipInput, boolean throwIfEntryExceedsMaxSize) {
try (ByteArrayOutputStream bis = new ByteArrayOutputStream()) {
byte[] buffer = new byte[2048];
int read;
@@ -67,12 +67,15 @@ public class ZipStreamReader {
while ( -1 != (read = zipInput.read(buffer))) {
size += read;
if (size > maxEntrySizeInBytes) {
- throw new IllegalArgumentException("Entry in zip content exceeded size limit of " +
- maxEntrySizeInBytes + " bytes");
- }
- bis.write(buffer, 0, read);
+ if (throwIfEntryExceedsMaxSize) throw new IllegalArgumentException(
+ "Entry in zip content exceeded size limit of " + maxEntrySizeInBytes + " bytes");
+ } else bis.write(buffer, 0, read);
}
- return bis.toByteArray();
+
+ boolean hasContent = size <= maxEntrySizeInBytes;
+ return new ZipEntryWithContent(zipEntry,
+ Optional.of(bis).filter(__ -> hasContent).map(ByteArrayOutputStream::toByteArray),
+ size);
} catch (IOException e) {
throw new UncheckedIOException("Failed reading from zipped content", e);
}
@@ -96,16 +99,19 @@ public class ZipStreamReader {
public static class ZipEntryWithContent {
private final ZipEntry zipEntry;
- private final byte[] content;
+ private final Optional<byte[]> content;
+ private final long size;
- public ZipEntryWithContent(ZipEntry zipEntry, byte[] content) {
+ public ZipEntryWithContent(ZipEntry zipEntry, Optional<byte[]> content, long size) {
this.zipEntry = zipEntry;
this.content = content;
+ this.size = size;
}
public ZipEntry zipEntry() { return zipEntry; }
- public byte[] content() { return content; }
-
+ public byte[] contentOrThrow() { return content.orElseThrow(); }
+ public Optional<byte[]> content() { return content; }
+ public long size() { return size; }
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/archive/CuratorArchiveBucketDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/archive/CuratorArchiveBucketDb.java
index 0f5c2123325..52c192cfeab 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/archive/CuratorArchiveBucketDb.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/archive/CuratorArchiveBucketDb.java
@@ -1,6 +1,7 @@
// Copyright 2021 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.archive;
+import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.text.Text;
@@ -45,7 +46,8 @@ public class CuratorArchiveBucketDb {
public CuratorArchiveBucketDb(Controller controller) {
this.archiveService = controller.serviceRegistry().archiveService();
this.curatorDb = controller.curator();
- this.enabled = controller.zoneRegistry().system().isPublic();
+ SystemName system = controller.zoneRegistry().system();
+ this.enabled = system.isPublic() || system.isCd();
}
public Optional<URI> archiveUriFor(ZoneId zoneId, TenantName tenant) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
index 986e89d03f5..eda6051ed07 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunner.java
@@ -41,7 +41,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterId;
import com.yahoo.vespa.hosted.controller.api.integration.organization.DeploymentFailureMails;
import com.yahoo.vespa.hosted.controller.api.integration.organization.Mail;
import com.yahoo.vespa.hosted.controller.application.ActivateResult;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.Endpoint;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
index b622fc0bd75..da5282d8e93 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/JobController.java
@@ -12,7 +12,6 @@ import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.LogEntry;
-import com.yahoo.vespa.hosted.controller.api.integration.configserver.NotFoundException;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
@@ -22,13 +21,13 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.TestReport;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterId;
import com.yahoo.vespa.hosted.controller.application.ApplicationList;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackageDiff;
import com.yahoo.vespa.hosted.controller.persistence.BufferedLogStore;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
-import java.net.URI;
import java.security.cert.X509Certificate;
import java.time.Duration;
import java.time.Instant;
@@ -48,7 +47,6 @@ import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import java.util.function.UnaryOperator;
import java.util.logging.Level;
-import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.google.common.collect.ImmutableList.copyOf;
@@ -369,7 +367,8 @@ public class JobController {
List<Lock> locks = new ArrayList<>();
try {
// Ensure no step is still running before we finish the run — report depends transitively on all the other steps.
- for (Step step : report.allPrerequisites(run(id).get().steps().keySet()))
+ Run unlockedRun = run(id).get();
+ for (Step step : report.allPrerequisites(unlockedRun.steps().keySet()))
locks.add(curator.lock(id.application(), id.type(), step));
locked(id, run -> { // Store the modified run after it has been written to history, in case the latter fails.
@@ -400,6 +399,20 @@ public class JobController {
metric.jobFinished(run.id().job(), finishedRun.status());
return finishedRun;
});
+
+ DeploymentId deploymentId = new DeploymentId(unlockedRun.id().application(), unlockedRun.id().job().type().zone(controller.system()));
+ (unlockedRun.versions().targetApplication().isDeployedDirectly() ?
+ Stream.of(unlockedRun.id().type()) :
+ JobType.allIn(controller.system()).stream().filter(jobType -> !jobType.environment().isManuallyDeployed()))
+ .flatMap(jobType -> controller.jobController().runs(unlockedRun.id().application(), jobType).values().stream())
+ .mapToLong(run -> run.versions().targetApplication().buildNumber().orElse(Integer.MAX_VALUE))
+ .min()
+ .ifPresent(oldestBuild -> {
+ if (unlockedRun.versions().targetApplication().isDeployedDirectly())
+ controller.applications().applicationStore().pruneDevDiffs(deploymentId, oldestBuild);
+ else
+ controller.applications().applicationStore().pruneDiffs(deploymentId.applicationId().tenant(), deploymentId.applicationId().application(), oldestBuild);
+ });
}
finally {
for (Lock lock : locks)
@@ -425,12 +438,19 @@ public class JobController {
applicationPackage.compileVersion(),
applicationPackage.buildTime(),
sourceUrl,
- revision.map(SourceRevision::commit)));
+ revision.map(SourceRevision::commit),
+ false));
+ byte[] diff = application.get().latestVersion()
+ .map(v -> v.buildNumber().getAsLong())
+ .flatMap(prevBuild -> controller.applications().applicationStore().find(id.tenant(), id.application(), prevBuild))
+ .map(prevApplication -> ApplicationPackageDiff.diff(new ApplicationPackage(prevApplication), applicationPackage))
+ .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage));
controller.applications().applicationStore().put(id.tenant(),
id.application(),
version.get(),
- applicationPackage.zippedContent());
+ applicationPackage.zippedContent(),
+ diff);
controller.applications().applicationStore().putTester(id.tenant(),
id.application(),
version.get(),
@@ -480,16 +500,26 @@ public class JobController {
controller.applications().store(application);
});
- last(id, type).filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id()));
+ DeploymentId deploymentId = new DeploymentId(id, type.zone(controller.system()));
+ Optional<Run> lastRun = last(id, type);
+ lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id()));
+
+ long build = 1 + lastRun.map(run -> run.versions().targetApplication().buildNumber().orElse(0)).orElse(0L);
+ ApplicationVersion version = ApplicationVersion.from(Optional.empty(), build, Optional.empty(), Optional.empty(),
+ Optional.empty(), Optional.empty(), Optional.empty(), true);
+
+ byte[] diff = lastRun.map(run -> run.versions().targetApplication())
+ .map(prevVersion -> ApplicationPackageDiff.diff(new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)), applicationPackage))
+ .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage));
controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> {
- controller.applications().applicationStore().putDev(id, type.zone(controller.system()), applicationPackage.zippedContent());
+ controller.applications().applicationStore().putDev(deploymentId, version, applicationPackage.zippedContent(), diff);
start(id,
type,
new Versions(platform.orElse(applicationPackage.deploymentSpec().majorVersion()
.flatMap(controller.applications()::lastCompatibleVersion)
.orElseGet(controller::readSystemVersion)),
- ApplicationVersion.unknown,
+ version,
Optional.empty(),
Optional.empty()),
false,
@@ -558,7 +588,7 @@ public class JobController {
application.get().productionDeployments().values().stream()
.flatMap(List::stream)
.map(Deployment::applicationVersion)
- .filter(version -> ! version.isUnknown())
+ .filter(version -> ! version.isUnknown() && ! version.isDeployedDirectly())
.min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong()))
.ifPresent(oldestDeployed -> {
controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgrader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgrader.java
index a69af024b96..0039ce2320e 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgrader.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgrader.java
@@ -1,13 +1,14 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.controller.maintenance;
+import com.yahoo.component.Version;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.Instance;
-import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.application.Deployment;
+import com.yahoo.vespa.hosted.controller.deployment.Run;
import com.yahoo.vespa.hosted.controller.deployment.Versions;
import com.yahoo.yolean.Exceptions;
@@ -32,7 +33,8 @@ public class DeploymentUpgrader extends ControllerMaintainer {
protected double maintain() {
AtomicInteger attempts = new AtomicInteger();
AtomicInteger failures = new AtomicInteger();
- Versions target = new Versions(controller().readSystemVersion(), ApplicationVersion.unknown, Optional.empty(), Optional.empty());
+ Version systemVersion = controller().readSystemVersion();
+
for (Application application : controller().applications().readable())
for (Instance instance : application.instances().values())
for (Deployment deployment : instance.deployments().values())
@@ -40,8 +42,11 @@ public class DeploymentUpgrader extends ControllerMaintainer {
attempts.incrementAndGet();
JobId job = new JobId(instance.id(), JobType.from(controller().system(), deployment.zone()).get());
if ( ! deployment.zone().environment().isManuallyDeployed()) continue;
+
+ Run last = controller().jobController().last(job).get();
+ Versions target = new Versions(systemVersion, last.versions().targetApplication(), Optional.empty(), Optional.empty());
if ( ! deployment.version().isBefore(target.targetPlatform())) continue;
- if ( controller().clock().instant().isBefore(controller().jobController().last(job).get().start().plus(Duration.ofDays(1)))) continue;
+ if ( controller().clock().instant().isBefore(last.start().plus(Duration.ofDays(1)))) continue;
if ( ! isLikelyNightFor(job)) continue;
log.log(Level.FINE, "Upgrading deployment of " + instance.id() + " in " + deployment.zone());
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
index 26fb4be04af..a5db6a152dd 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
@@ -107,6 +107,7 @@ public class ApplicationSerializer {
private static final String branchField = "branchField";
private static final String commitField = "commitField";
private static final String authorEmailField = "authorEmailField";
+ private static final String deployedDirectlyField = "deployedDirectly";
private static final String compileVersionField = "compileVersion";
private static final String buildTimeField = "buildTime";
private static final String sourceUrlField = "sourceUrl";
@@ -228,6 +229,7 @@ public class ApplicationSerializer {
applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli()));
applicationVersion.sourceUrl().ifPresent(url -> object.setString(sourceUrlField, url));
applicationVersion.commit().ifPresent(commit -> object.setString(commitField, commit));
+ object.setBool(deployedDirectlyField, applicationVersion.isDeployedDirectly());
}
private void toSlime(SourceRevision sourceRevision, Cursor object) {
@@ -422,7 +424,11 @@ public class ApplicationSerializer {
Optional<String> sourceUrl = SlimeUtils.optionalString(object.field(sourceUrlField));
Optional<String> commit = SlimeUtils.optionalString(object.field(commitField));
- return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit);
+ // TODO (freva): Simplify once this has rolled out everywhere
+ Inspector deployedDirectlyInspector = object.field(deployedDirectlyField);
+ boolean deployedDirectly = deployedDirectlyInspector.valid() && deployedDirectlyInspector.asBool();
+
+ return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit, deployedDirectly);
}
private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
index 8ffa4823ead..b4a580a1562 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializer.java
@@ -89,6 +89,7 @@ class RunSerializer {
private static final String branchField = "branch";
private static final String commitField = "commit";
private static final String authorEmailField = "authorEmail";
+ private static final String deployedDirectlyField = "deployedDirectly";
private static final String compileVersionField = "compileVersion";
private static final String buildTimeField = "buildTime";
private static final String sourceUrlField = "sourceUrl";
@@ -175,8 +176,12 @@ class RunSerializer {
Optional<String> sourceUrl = SlimeUtils.optionalString(versionObject.field(sourceUrlField));
Optional<String> commit = SlimeUtils.optionalString(versionObject.field(commitField));
+ // TODO (freva): Simplify once this has rolled out everywhere
+ Inspector deployedDirectlyInspector = versionObject.field(deployedDirectlyField);
+ boolean deployedDirectly = deployedDirectlyInspector.valid() && deployedDirectlyInspector.asBool();
+
return new ApplicationVersion(source, OptionalLong.of(buildNumber), authorEmail,
- compileVersion, buildTime, sourceUrl, commit);
+ compileVersion, buildTime, sourceUrl, commit, deployedDirectly);
}
// Don't change this — introduce a separate array instead.
@@ -259,6 +264,7 @@ class RunSerializer {
applicationVersion.buildTime().ifPresent(time -> versionsObject.setLong(buildTimeField, time.toEpochMilli()));
applicationVersion.sourceUrl().ifPresent(url -> versionsObject.setString(sourceUrlField, url));
applicationVersion.commit().ifPresent(commit -> versionsObject.setString(commitField, commit));
+ versionsObject.setBool(deployedDirectlyField, applicationVersion.isDeployedDirectly());
}
// Don't change this - introduce a separate array with new values if needed.
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index 65a9e2ae282..22bd3c9d062 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -25,6 +25,7 @@ import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.container.jdisc.LoggingRequestHandler;
import com.yahoo.io.IOUtils;
+import com.yahoo.restapi.ByteArrayResponse;
import com.yahoo.restapi.ErrorResponse;
import com.yahoo.restapi.MessageResponse;
import com.yahoo.restapi.Path;
@@ -76,7 +77,7 @@ import com.yahoo.vespa.hosted.controller.api.role.Role;
import com.yahoo.vespa.hosted.controller.api.role.RoleDefinition;
import com.yahoo.vespa.hosted.controller.api.role.SecurityContext;
import com.yahoo.vespa.hosted.controller.application.ActivateResult;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.Deployment;
@@ -245,6 +246,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/compile-version")) return compileVersion(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deployment")) return JobControllerApiHandlerHelper.overviewResponse(controller, TenantAndApplicationId.from(path.get("tenant"), path.get("application")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/package")) return applicationPackage(path.get("tenant"), path.get("application"), request);
+ if (path.matches("/application/v4/tenant/{tenant}/application/{application}/diff/{number}")) return applicationPackageDiff(path.get("tenant"), path.get("application"), path.get("number"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying/pin")) return deploying(path.get("tenant"), path.get("application"), "default", request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/metering")) return metering(path.get("tenant"), path.get("application"), request);
@@ -255,6 +257,7 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job")) return JobControllerApiHandlerHelper.jobTypeResponse(controller, appIdFromPath(path), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}")) return JobControllerApiHandlerHelper.runResponse(controller.jobController().runs(appIdFromPath(path), jobTypeFromPath(path)).descendingMap(), Optional.ofNullable(request.getProperty("limit")), request.getUri());
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/package")) return devApplicationPackage(appIdFromPath(path), jobTypeFromPath(path));
+ if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/diff/{number}")) return devApplicationPackageDiff(runIdFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/test-config")) return testConfig(appIdFromPath(path), jobTypeFromPath(path));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/job/{jobtype}/run/{number}")) return JobControllerApiHandlerHelper.runDetailsResponse(controller.jobController(), runIdFromPath(path), request.getProperty("after"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/instance/{instance}/environment/{environment}/region/{region}")) return deployment(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), request);
@@ -592,13 +595,20 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
throw new IllegalArgumentException("Only manually deployed zones have dev packages");
ZoneId zone = type.zone(controller.system());
- byte[] applicationPackage = controller.applications().applicationStore().getDev(id, zone);
+ ApplicationVersion version = controller.jobController().last(id, type).get().versions().targetApplication();
+ byte[] applicationPackage = controller.applications().applicationStore().get(new DeploymentId(id, zone), version);
return new ZipResponse(id.toFullString() + "." + zone.value() + ".zip", applicationPackage);
}
+ private HttpResponse devApplicationPackageDiff(RunId runId) {
+ DeploymentId deploymentId = new DeploymentId(runId.application(), runId.job().type().zone(controller.system()));
+ return controller.applications().applicationStore().getDevDiff(deploymentId, runId.number())
+ .map(ByteArrayResponse::new)
+ .orElseThrow(() -> new NotExistsException("No application package diff found for " + runId));
+ }
+
private HttpResponse applicationPackage(String tenantName, String applicationName, HttpRequest request) {
var tenantAndApplication = TenantAndApplicationId.from(tenantName, applicationName);
- var applicationId = ApplicationId.from(tenantName, applicationName, InstanceName.defaultName().value());
long buildNumber;
var requestedBuild = Optional.ofNullable(request.getProperty("build")).map(build -> {
@@ -628,6 +638,13 @@ public class ApplicationApiHandler extends AuditLoggingRequestHandler {
return new ZipResponse(filename, applicationPackage.get());
}
+ private HttpResponse applicationPackageDiff(String tenant, String application, String number) {
+ TenantAndApplicationId tenantAndApplication = TenantAndApplicationId.from(tenant, application);
+ return controller.applications().applicationStore().getDiff(tenantAndApplication.tenant(), tenantAndApplication.application(), Long.parseLong(number))
+ .map(ByteArrayResponse::new)
+ .orElseThrow(() -> new NotExistsException("No application package diff found for '" + tenantAndApplication + "' with build number " + number));
+ }
+
private HttpResponse application(String tenantName, String applicationName, HttpRequest request) {
Slime slime = new Slime();
toSlime(slime.setObject(), getApplication(tenantName, applicationName), request);
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
index 260fc9628e9..3fd221abe10 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelper.java
@@ -20,7 +20,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.deployment.ConvergenceSummary;
@@ -40,12 +40,10 @@ import java.time.Instant;
import java.time.format.TextStyle;
import java.util.Arrays;
import java.util.Collection;
-import java.util.Comparator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Optional;
-import java.util.stream.Collectors;
import java.util.stream.Stream;
import static com.yahoo.config.application.api.DeploymentSpec.UpgradePolicy.canary;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java
index 6f5b1f30592..c116aa43c0d 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiHandler.java
@@ -3,22 +3,36 @@ package com.yahoo.vespa.hosted.controller.restapi.horizon;
import com.google.inject.Inject;
import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.TenantName;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.container.jdisc.LoggingRequestHandler;
import com.yahoo.restapi.ErrorResponse;
import com.yahoo.restapi.Path;
+import com.yahoo.vespa.flags.BooleanFlag;
+import com.yahoo.vespa.flags.FetchVector;
+import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.BillingController;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId;
import com.yahoo.vespa.hosted.controller.api.integration.horizon.HorizonClient;
import com.yahoo.vespa.hosted.controller.api.integration.horizon.HorizonResponse;
+import com.yahoo.vespa.hosted.controller.api.role.Role;
+import com.yahoo.vespa.hosted.controller.api.role.RoleDefinition;
import com.yahoo.vespa.hosted.controller.api.role.SecurityContext;
+import com.yahoo.vespa.hosted.controller.api.role.TenantRole;
import com.yahoo.yolean.Exceptions;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.util.EnumSet;
+import java.util.HashSet;
import java.util.Optional;
+import java.util.Set;
import java.util.logging.Level;
+import java.util.stream.Collectors;
/**
* Proxies metrics requests from Horizon UI
@@ -27,22 +41,36 @@ import java.util.logging.Level;
*/
public class HorizonApiHandler extends LoggingRequestHandler {
+ private final BillingController billingController;
private final SystemName systemName;
private final HorizonClient client;
+ private final BooleanFlag enabledHorizonDashboard;
+
+ private static final EnumSet<RoleDefinition> operatorRoleDefinitions =
+ EnumSet.of(RoleDefinition.hostedOperator, RoleDefinition.hostedSupporter);
@Inject
- public HorizonApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller) {
+ public HorizonApiHandler(LoggingRequestHandler.Context parentCtx, Controller controller, FlagSource flagSource) {
super(parentCtx);
+ this.billingController = controller.serviceRegistry().billingController();
this.systemName = controller.system();
this.client = controller.serviceRegistry().horizonClient();
+ this.enabledHorizonDashboard = Flags.ENABLED_HORIZON_DASHBOARD.bindTo(flagSource);
}
@Override
public HttpResponse handle(HttpRequest request) {
+ var roles = getRoles(request);
+ var operator = roles.stream().map(Role::definition).anyMatch(operatorRoleDefinitions::contains);
+ var authorizedTenants = getAuthorizedTenants(roles);
+
+ if (!operator && authorizedTenants.isEmpty())
+ return ErrorResponse.forbidden("No tenant with enabled metrics view");
+
try {
switch (request.getMethod()) {
case GET: return get(request);
- case POST: return post(request);
+ case POST: return post(request, authorizedTenants, operator);
case PUT: return put(request);
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
@@ -65,10 +93,10 @@ public class HorizonApiHandler extends LoggingRequestHandler {
return ErrorResponse.notFoundError("Nothing at " + path);
}
- private HttpResponse post(HttpRequest request) {
+ private HttpResponse post(HttpRequest request, Set<TenantName> authorizedTenants, boolean operator) {
Path path = new Path(request.getUri());
- if (path.matches("/horizon/v1/tsdb/api/query/graph")) return tsdbQuery(request, true);
- if (path.matches("/horizon/v1/meta/search/timeseries")) return tsdbQuery(request, false);
+ if (path.matches("/horizon/v1/tsdb/api/query/graph")) return tsdbQuery(request, authorizedTenants, operator, true);
+ if (path.matches("/horizon/v1/meta/search/timeseries")) return tsdbQuery(request, authorizedTenants, operator, false);
return ErrorResponse.notFoundError("Nothing at " + path);
}
@@ -78,10 +106,9 @@ public class HorizonApiHandler extends LoggingRequestHandler {
return ErrorResponse.notFoundError("Nothing at " + path);
}
- private HttpResponse tsdbQuery(HttpRequest request, boolean isMetricQuery) {
- SecurityContext securityContext = getAttribute(request, SecurityContext.ATTRIBUTE_NAME, SecurityContext.class);
+ private HttpResponse tsdbQuery(HttpRequest request, Set<TenantName> authorizedTenants, boolean operator, boolean isMetricQuery) {
try {
- byte[] data = TsdbQueryRewriter.rewrite(request.getData().readAllBytes(), securityContext.roles(), systemName);
+ byte[] data = TsdbQueryRewriter.rewrite(request.getData().readAllBytes(), authorizedTenants, operator, systemName);
return new JsonInputStreamResponse(isMetricQuery ? client.getMetrics(data) : client.getMetaData(data));
} catch (TsdbQueryRewriter.UnauthorizedException e) {
return ErrorResponse.forbidden("Access denied");
@@ -90,11 +117,22 @@ public class HorizonApiHandler extends LoggingRequestHandler {
}
}
- private static <T> T getAttribute(HttpRequest request, String attributeName, Class<T> clazz) {
- return Optional.ofNullable(request.getJDiscRequest().context().get(attributeName))
- .filter(clazz::isInstance)
- .map(clazz::cast)
- .orElseThrow(() -> new IllegalArgumentException("Attribute '" + attributeName + "' was not set on request"));
+ private static Set<Role> getRoles(HttpRequest request) {
+ return Optional.ofNullable(request.getJDiscRequest().context().get(SecurityContext.ATTRIBUTE_NAME))
+ .filter(SecurityContext.class::isInstance)
+ .map(SecurityContext.class::cast)
+ .map(SecurityContext::roles)
+ .orElseThrow(() -> new IllegalArgumentException("Attribute '" + SecurityContext.ATTRIBUTE_NAME + "' was not set on request"));
+ }
+
+ private Set<TenantName> getAuthorizedTenants(Set<Role> roles) {
+ var horizonEnabled = roles.stream()
+ .filter(TenantRole.class::isInstance)
+ .map(role -> ((TenantRole) role).tenant())
+ .filter(tenant -> enabledHorizonDashboard.with(FetchVector.Dimension.TENANT_ID, tenant.value()).value())
+ .collect(Collectors.toList());
+
+ return new HashSet<>(billingController.tenantsWithPlan(horizonEnabled, PlanId.from("pay-as-you-go")));
}
private static class JsonInputStreamResponse extends HttpResponse {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java
index e034be46063..3e20584dbac 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriter.java
@@ -7,12 +7,8 @@ import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.vespa.hosted.controller.api.role.Role;
-import com.yahoo.vespa.hosted.controller.api.role.RoleDefinition;
-import com.yahoo.vespa.hosted.controller.api.role.TenantRole;
import java.io.IOException;
-import java.util.EnumSet;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
@@ -23,20 +19,8 @@ import java.util.stream.Collectors;
public class TsdbQueryRewriter {
private static final ObjectMapper mapper = new ObjectMapper();
- private static final EnumSet<RoleDefinition> operatorRoleDefinitions =
- EnumSet.of(RoleDefinition.hostedOperator, RoleDefinition.hostedSupporter);
-
- public static byte[] rewrite(byte[] data, Set<Role> roles, SystemName systemName) throws IOException {
- boolean operator = roles.stream().map(Role::definition).anyMatch(operatorRoleDefinitions::contains);
-
- // Anyone with any tenant relation can view metrics for apps within those tenants
- Set<TenantName> authorizedTenants = roles.stream()
- .filter(TenantRole.class::isInstance)
- .map(role -> ((TenantRole) role).tenant())
- .collect(Collectors.toUnmodifiableSet());
- if (!operator && authorizedTenants.isEmpty())
- throw new UnauthorizedException();
+ public static byte[] rewrite(byte[] data, Set<TenantName> authorizedTenants, boolean operator, SystemName systemName) throws IOException {
JsonNode root = mapper.readTree(data);
requireLegalType(root);
getField(root, "executionGraph", ArrayNode.class)
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java
index 7b0a2c9d6d6..0ecc8ac81df 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/user/UserApiHandler.java
@@ -22,6 +22,7 @@ import com.yahoo.text.Text;
import com.yahoo.vespa.flags.BooleanFlag;
import com.yahoo.vespa.flags.FetchVector;
import com.yahoo.vespa.flags.FlagSource;
+import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.flags.IntFlag;
import com.yahoo.vespa.flags.PermanentFlags;
import com.yahoo.vespa.hosted.controller.Controller;
@@ -71,6 +72,7 @@ public class UserApiHandler extends LoggingRequestHandler {
private final Controller controller;
private final BooleanFlag enable_public_signup_flow;
private final IntFlag maxTrialTenants;
+ private final BooleanFlag enabledHorizonDashboard;
@Inject
public UserApiHandler(Context parentCtx, UserManagement users, Controller controller, FlagSource flagSource) {
@@ -79,6 +81,7 @@ public class UserApiHandler extends LoggingRequestHandler {
this.controller = controller;
this.enable_public_signup_flow = PermanentFlags.ENABLE_PUBLIC_SIGNUP_FLOW.bindTo(flagSource);
this.maxTrialTenants = PermanentFlags.MAX_TRIAL_TENANTS.bindTo(flagSource);
+ this.enabledHorizonDashboard = Flags.ENABLED_HORIZON_DASHBOARD.bindTo(flagSource);
}
@Override
@@ -184,6 +187,10 @@ public class UserApiHandler extends LoggingRequestHandler {
Cursor tenantRolesObject = tenantObject.setArray("roles");
tenantRolesByTenantName.getOrDefault(tenant, List.of())
.forEach(role -> tenantRolesObject.addString(role.definition().name()));
+ if (controller.system().isPublic()) {
+ tenantObject.setBool(enabledHorizonDashboard.id().toString(),
+ enabledHorizonDashboard.with(FetchVector.Dimension.TENANT_ID, tenant.value()).value());
+ }
});
if (!operatorRoles.isEmpty()) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
index 86b9370150c..433a976358e 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
@@ -26,7 +26,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.dns.Record;
import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordData;
import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordName;
import com.yahoo.vespa.hosted.controller.api.integration.dns.WeightedAliasTarget;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
import com.yahoo.vespa.hosted.controller.application.Endpoint;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageDiffTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageDiffTest.java
new file mode 100644
index 00000000000..b2aba721a6f
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageDiffTest.java
@@ -0,0 +1,128 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.application.pkg;
+
+import org.junit.Test;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.util.Map;
+import java.util.zip.Deflater;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipOutputStream;
+
+import static com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackageDiff.diff;
+
+import static com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackageDiff.diffAgainstEmpty;
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author freva
+ */
+public class ApplicationPackageDiffTest {
+ private static final ApplicationPackage app1 = applicationPackage(Map.of("file1", "contents of the\nfirst file", "dir/myfile", "Second file", "dir/binary", "øøøø"));
+ private static final ApplicationPackage app2 = applicationPackage(Map.of("file1", "updated contents\nof the\nfirst file\nafter some changes", "dir/myfile2", "Second file", "dir/binary", "øøøø"));
+
+ @Test
+ public void no_diff() {
+ assertEquals("No diff\n", new String(diff(app1, app1)));
+ }
+
+ @Test
+ public void diff_against_empty() {
+ assertEquals("--- dir/binary\n" +
+ "Diff skipped: File is binary (new file -> 8B)\n" +
+ "\n" +
+ "--- dir/myfile\n" +
+ "@@ -1,0 +1,1 @@\n" +
+ "+ Second file\n" +
+ "\n" +
+ "--- file1\n" +
+ "@@ -1,0 +1,2 @@\n" +
+ "+ contents of the\n" +
+ "+ first file\n" +
+ "\n", new String(diffAgainstEmpty(app1)));
+ }
+
+ @Test
+ public void full_diff() {
+ // Even though dir/binary is binary file, we can see they are identical, so it should not print "Diff skipped"
+ assertEquals("--- dir/myfile\n" +
+ "@@ -1,1 +1,0 @@\n" +
+ "- Second file\n" +
+ "\n" +
+ "--- dir/myfile2\n" +
+ "@@ -1,0 +1,1 @@\n" +
+ "+ Second file\n" +
+ "\n" +
+ "--- file1\n" +
+ "@@ -1,2 +1,4 @@\n" +
+ "+ updated contents\n" +
+ "+ of the\n" +
+ "- contents of the\n" +
+ " first file\n" +
+ "+ after some changes\n" +
+ "\n", new String(diff(app1, app2)));
+ }
+
+ @Test
+ public void skips_diff_for_too_large_files() {
+ assertEquals("--- dir/myfile\n" +
+ "@@ -1,1 +1,0 @@\n" +
+ "- Second file\n" +
+ "\n" +
+ "--- dir/myfile2\n" +
+ "@@ -1,0 +1,1 @@\n" +
+ "+ Second file\n" +
+ "\n" +
+ "--- file1\n" +
+ "Diff skipped: File too large (26B -> 53B)\n" +
+ "\n", new String(diff(app1, app2, 12, 1000, 1000)));
+ }
+
+ @Test
+ public void skips_diff_if_file_diff_is_too_large() {
+ assertEquals("--- dir/myfile\n" +
+ "@@ -1,1 +1,0 @@\n" +
+ "- Second file\n" +
+ "\n" +
+ "--- dir/myfile2\n" +
+ "@@ -1,0 +1,1 @@\n" +
+ "+ Second file\n" +
+ "\n" +
+ "--- file1\n" +
+ "Diff skipped: Diff too large (96B)\n" +
+ "\n", new String(diff(app1, app2, 1000, 50, 1000)));
+ }
+
+ @Test
+ public void skips_diff_if_total_diff_is_too_large() {
+ assertEquals("--- dir/myfile\n" +
+ "@@ -1,1 +1,0 @@\n" +
+ "- Second file\n" +
+ "\n" +
+ "--- dir/myfile2\n" +
+ "Diff skipped: Total diff size >20B)\n" +
+ "\n" +
+ "--- file1\n" +
+ "Diff skipped: Total diff size >20B)\n" +
+ "\n", new String(diff(app1, app2, 1000, 1000, 20)));
+ }
+
+ private static ApplicationPackage applicationPackage(Map<String, String> files) {
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ try (ZipOutputStream out = new ZipOutputStream(baos)) {
+ out.setLevel(Deflater.NO_COMPRESSION); // This is for testing purposes so we skip compression for performance
+ for (Map.Entry<String, String> file : files.entrySet()) {
+ ZipEntry entry = new ZipEntry(file.getKey());
+ out.putNextEntry(entry);
+ out.write(file.getValue().getBytes(UTF_8));
+ out.closeEntry();
+ }
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ return new ApplicationPackage(baos.toByteArray());
+ }
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/ApplicationPackageTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageTest.java
index 1849be9b6bd..75e00e3434c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/ApplicationPackageTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ApplicationPackageTest.java
@@ -1,5 +1,5 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.application;
+package com.yahoo.vespa.hosted.controller.application.pkg;
import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.ValidationId;
@@ -109,10 +109,10 @@ public class ApplicationPackageTest {
}
private static Map<String, String> unzip(byte[] zip) {
- return new ZipStreamReader(new ByteArrayInputStream(zip), __ -> true, 1 << 10)
+ return new ZipStreamReader(new ByteArrayInputStream(zip), __ -> true, 1 << 10, true)
.entries().stream()
.collect(Collectors.toMap(entry -> entry.zipEntry().getName(),
- entry -> new String(entry.content(), UTF_8)));
+ entry -> new String(entry.contentOrThrow(), UTF_8)));
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/LinesComparatorTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/LinesComparatorTest.java
new file mode 100644
index 00000000000..92137094f62
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/LinesComparatorTest.java
@@ -0,0 +1,112 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.application.pkg;
+
+import org.junit.Test;
+
+import java.util.Optional;
+import java.util.stream.Collectors;
+
+import static org.junit.Assert.assertEquals;
+
+public class LinesComparatorTest {
+ private static final String text1 = "This part of the\n" +
+ "document has stayed the\n" +
+ "same from version to\n" +
+ "version. It shouldn't\n" +
+ "be shown if it doesn't\n" +
+ "change. Otherwise, that\n" +
+ "would not be helping to\n" +
+ "compress the size of the\n" +
+ "changes.\n" +
+ "\n" +
+ "This paragraph contains\n" +
+ "text that is outdated.\n" +
+ "It will be deleted in the\n" +
+ "near future.\n" +
+ "\n" +
+ "It is important to spell\n" +
+ "check this dokument. On\n" +
+ "the other hand, a\n" +
+ "misspelled word isn't\n" +
+ "the end of the world.\n" +
+ "Nothing in the rest of\n" +
+ "this paragraph needs to\n" +
+ "be changed. Things can\n" +
+ "be added after it.";
+ private static final String text2 = "This is an important\n" +
+ "notice! It should\n" +
+ "therefore be located at\n" +
+ "the beginning of this\n" +
+ "document!\n" +
+ "\n" +
+ "This part of the\n" +
+ "document has stayed the\n" +
+ "same from version to\n" +
+ "version. It shouldn't\n" +
+ "be shown if it doesn't\n" +
+ "change. Otherwise, that\n" +
+ "would not be helping to\n" +
+ "compress the size of the\n" +
+ "changes.\n" +
+ "\n" +
+ "It is important to spell\n" +
+ "check this document. On\n" +
+ "the other hand, a\n" +
+ "misspelled word isn't\n" +
+ "the end of the world.\n" +
+ "Nothing in the rest of\n" +
+ "this paragraph needs to\n" +
+ "be changed. Things can\n" +
+ "be added after it.\n" +
+ "\n" +
+ "This paragraph contains\n" +
+ "important new additions\n" +
+ "to this document.";
+
+ @Test
+ public void diff_test() {
+ assertDiff(null, "", "");
+ assertDiff(null, text1, text1);
+ assertDiff(text1.lines().map(line -> "- " + line).collect(Collectors.joining("\n", "@@ -1,24 +1,0 @@\n", "\n")), text1, "");
+ assertDiff(text1.lines().map(line -> "+ " + line).collect(Collectors.joining("\n", "@@ -1,0 +1,24 @@\n", "\n")), "", text1);
+ assertDiff("@@ -1,3 +1,9 @@\n" +
+ "+ This is an important\n" +
+ "+ notice! It should\n" +
+ "+ therefore be located at\n" +
+ "+ the beginning of this\n" +
+ "+ document!\n" +
+ "+ \n" +
+ " This part of the\n" +
+ " document has stayed the\n" +
+ " same from version to\n" +
+ "@@ -7,14 +13,9 @@\n" +
+ " would not be helping to\n" +
+ " compress the size of the\n" +
+ " changes.\n" +
+ "- \n" +
+ "- This paragraph contains\n" +
+ "- text that is outdated.\n" +
+ "- It will be deleted in the\n" +
+ "- near future.\n" +
+ " \n" +
+ " It is important to spell\n" +
+ "+ check this document. On\n" +
+ "- check this dokument. On\n" +
+ " the other hand, a\n" +
+ " misspelled word isn't\n" +
+ " the end of the world.\n" +
+ "@@ -22,3 +23,7 @@\n" +
+ " this paragraph needs to\n" +
+ " be changed. Things can\n" +
+ " be added after it.\n" +
+ "+ \n" +
+ "+ This paragraph contains\n" +
+ "+ important new additions\n" +
+ "+ to this document.\n", text1, text2);
+ }
+
+ private static void assertDiff(String expected, String left, String right) {
+ assertEquals(Optional.ofNullable(expected),
+ LinesComparator.diff(left.lines().collect(Collectors.toList()), right.lines().collect(Collectors.toList())));
+ }
+} \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReaderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ZipStreamReaderTest.java
index abd234f0fa4..afbd232f01c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/ZipStreamReaderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/pkg/ZipStreamReaderTest.java
@@ -1,5 +1,5 @@
// Copyright 2019 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.application;
+package com.yahoo.vespa.hosted.controller.application.pkg;
import com.yahoo.security.KeyAlgorithm;
import com.yahoo.security.KeyUtils;
@@ -38,15 +38,15 @@ public class ZipStreamReaderTest {
public void test_size_limit() {
Map<String, String> entries = Map.of("foo.xml", "foobar");
try {
- new ZipStreamReader(new ByteArrayInputStream(zip(entries)), "foo.xml"::equals, 1);
+ new ZipStreamReader(new ByteArrayInputStream(zip(entries)), "foo.xml"::equals, 1, true);
fail("Expected exception");
} catch (IllegalArgumentException ignored) {}
entries = Map.of("foo.xml", "foobar",
"foo.jar", "0".repeat(100) // File not extracted and thus not subject to size limit
);
- ZipStreamReader reader = new ZipStreamReader(new ByteArrayInputStream(zip(entries)), "foo.xml"::equals,10);
- byte[] extracted = reader.entries().get(0).content();
+ ZipStreamReader reader = new ZipStreamReader(new ByteArrayInputStream(zip(entries)), "foo.xml"::equals, 10, true);
+ byte[] extracted = reader.entries().get(0).contentOrThrow();
assertEquals("foobar", new String(extracted, StandardCharsets.UTF_8));
}
@@ -65,7 +65,7 @@ public class ZipStreamReaderTest {
);
tests.forEach((name, expectException) -> {
try {
- new ZipStreamReader(new ByteArrayInputStream(zip(Map.of(name, "foo"))), name::equals, 1024);
+ new ZipStreamReader(new ByteArrayInputStream(zip(Map.of(name, "foo"))), name::equals, 1024, true);
assertFalse("Expected exception for '" + name + "'", expectException);
} catch (IllegalArgumentException ignored) {
assertTrue("Unexpected exception for '" + name + "'", expectException);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
index b234ab4960b..73b1489b088 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
@@ -10,7 +10,7 @@ import com.yahoo.security.SignatureAlgorithm;
import com.yahoo.security.X509CertificateBuilder;
import com.yahoo.security.X509CertificateUtils;
import com.yahoo.text.Text;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import javax.security.auth.x500.X500Principal;
import java.io.ByteArrayOutputStream;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
index 420d0be04ac..c225dcbe49d 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentContext.java
@@ -28,7 +28,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterId;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.EndpointId;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
@@ -492,8 +492,8 @@ public class DeploymentContext {
Run run = jobs.last(job)
.filter(r -> r.id().type() == job.type())
.orElseThrow(() -> new AssertionError(job.type() + " is not among the active: " + jobs.active()));
- assertFalse(run.id() + " should not have failed yet", run.hasFailed());
- assertFalse(run.id() + " should not have ended yet", run.hasEnded());
+ assertFalse(run.id() + " should not have failed yet: " + run, run.hasFailed());
+ assertFalse(run.id() + " should not have ended yet: " + run, run.hasEnded());
return run;
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
index d4c9425fc03..ad32266e290 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
@@ -8,7 +8,7 @@ import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import org.junit.Assert;
@@ -61,7 +61,7 @@ import static org.junit.Assert.assertTrue;
*/
public class DeploymentTriggerTest {
- private DeploymentTester tester = new DeploymentTester();
+ private final DeploymentTester tester = new DeploymentTester();
@Test
public void testTriggerFailing() {
@@ -1110,15 +1110,16 @@ public class DeploymentTriggerTest {
// System and staging tests both require unknown versions, and are broken.
tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsCentral1, "user", false);
app.runJob(productionCdUsCentral1)
- .abortJob(systemTest)
- .abortJob(stagingTest)
+ .jobAborted(systemTest)
+ .jobAborted(stagingTest)
.runJob(systemTest)
.runJob(stagingTest)
.runJob(productionCdAwsUsEast1a);
app.runJob(productionCdUsCentral1, cdPackage);
app.submit(cdPackage);
- app.runJob(systemTest);
+ app.jobAborted(systemTest)
+ .runJob(systemTest);
// Staging test requires unknown initial version, and is broken.
tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsCentral1, "user", false);
app.runJob(productionCdUsCentral1)
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java
index d685c6a2354..780d2d226f3 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/InternalStepRunnerTest.java
@@ -25,7 +25,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMailer;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
import com.yahoo.vespa.hosted.controller.config.ControllerConfig;
import com.yahoo.vespa.hosted.controller.integration.ZoneApiMock;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ApplicationStoreMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ApplicationStoreMock.java
index 59e2b6c04d8..521ff160a05 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ApplicationStoreMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ApplicationStoreMock.java
@@ -5,11 +5,11 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationStore;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterId;
+import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import java.time.Instant;
import java.util.Map;
@@ -30,7 +30,9 @@ public class ApplicationStoreMock implements ApplicationStore {
private static final byte[] tombstone = new byte[0];
private final Map<ApplicationId, Map<ApplicationVersion, byte[]>> store = new ConcurrentHashMap<>();
- private final Map<ApplicationId, Map<ZoneId, byte[]>> devStore = new ConcurrentHashMap<>();
+ private final Map<DeploymentId, byte[]> devStore = new ConcurrentHashMap<>();
+ private final Map<ApplicationId, Map<Long, byte[]>> diffs = new ConcurrentHashMap<>();
+ private final Map<DeploymentId, Map<Long, byte[]>> devDiffs = new ConcurrentHashMap<>();
private final Map<ApplicationId, NavigableMap<Instant, byte[]>> meta = new ConcurrentHashMap<>();
private final Map<DeploymentId, NavigableMap<Instant, byte[]>> metaManual = new ConcurrentHashMap<>();
@@ -43,15 +45,30 @@ public class ApplicationStoreMock implements ApplicationStore {
}
@Override
- public byte[] get(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion) {
- byte[] bytes = store.get(appId(tenant, application)).get(applicationVersion);
+ public byte[] get(DeploymentId deploymentId, ApplicationVersion applicationVersion) {
+ if (applicationVersion.isDeployedDirectly())
+ return requireNonNull(devStore.get(deploymentId));
+
+ TenantAndApplicationId tenantAndApplicationId = TenantAndApplicationId.from(deploymentId.applicationId());
+ byte[] bytes = store.get(appId(tenantAndApplicationId.tenant(), tenantAndApplicationId.application())).get(applicationVersion);
if (bytes == null)
- throw new IllegalArgumentException("No application package found for " + tenant + "." + application +
+ throw new IllegalArgumentException("No application package found for " + tenantAndApplicationId +
" with version " + applicationVersion.id());
return bytes;
}
@Override
+ public Optional<byte[]> getDiff(TenantName tenantName, ApplicationName applicationName, long buildNumber) {
+ return Optional.ofNullable(diffs.get(appId(tenantName, applicationName))).map(map -> map.get(buildNumber));
+ }
+
+ @Override
+ public void pruneDiffs(TenantName tenantName, ApplicationName applicationName, long beforeBuildNumber) {
+ Optional.ofNullable(diffs.get(appId(tenantName, applicationName)))
+ .ifPresent(map -> map.keySet().removeIf(buildNumber -> buildNumber < beforeBuildNumber));
+ }
+
+ @Override
public Optional<byte[]> find(TenantName tenant, ApplicationName application, long buildNumber) {
return store.getOrDefault(appId(tenant, application), Map.of()).entrySet().stream()
.filter(kv -> kv.getKey().buildNumber().orElse(Long.MIN_VALUE) == buildNumber)
@@ -60,9 +77,10 @@ public class ApplicationStoreMock implements ApplicationStore {
}
@Override
- public void put(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion, byte[] applicationPackage) {
- store.putIfAbsent(appId(tenant, application), new ConcurrentHashMap<>());
- store.get(appId(tenant, application)).put(applicationVersion, applicationPackage);
+ public void put(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion, byte[] applicationPackage, byte[] diff) {
+ store.computeIfAbsent(appId(tenant, application), __ -> new ConcurrentHashMap<>()).put(applicationVersion, applicationPackage);
+ applicationVersion.buildNumber().ifPresent(buildNumber ->
+ diffs.computeIfAbsent(appId(tenant, application), __ -> new ConcurrentHashMap<>()).put(buildNumber, diff));
}
@Override
@@ -83,8 +101,8 @@ public class ApplicationStoreMock implements ApplicationStore {
@Override
public void putTester(TenantName tenant, ApplicationName application, ApplicationVersion applicationVersion, byte[] testerPackage) {
- store.putIfAbsent(testerId(tenant, application), new ConcurrentHashMap<>());
- store.get(testerId(tenant, application)).put(applicationVersion, testerPackage);
+ store.computeIfAbsent(testerId(tenant, application), key -> new ConcurrentHashMap<>())
+ .put(applicationVersion, testerPackage);
}
@Override
@@ -99,14 +117,21 @@ public class ApplicationStoreMock implements ApplicationStore {
}
@Override
- public void putDev(ApplicationId application, ZoneId zone, byte[] applicationPackage) {
- devStore.putIfAbsent(application, new ConcurrentHashMap<>());
- devStore.get(application).put(zone, applicationPackage);
+ public Optional<byte[]> getDevDiff(DeploymentId deploymentId, long buildNumber) {
+ return Optional.ofNullable(devDiffs.get(deploymentId)).map(map -> map.get(buildNumber));
+ }
+
+ @Override
+ public void pruneDevDiffs(DeploymentId deploymentId, long beforeBuildNumber) {
+ Optional.ofNullable(devDiffs.get(deploymentId))
+ .ifPresent(map -> map.keySet().removeIf(buildNumber -> buildNumber < beforeBuildNumber));
}
@Override
- public byte[] getDev(ApplicationId application, ZoneId zone) {
- return requireNonNull(devStore.get(application).get(zone));
+ public void putDev(DeploymentId deploymentId, ApplicationVersion applicationVersion, byte[] applicationPackage, byte[] diff) {
+ devStore.put(deploymentId, applicationPackage);
+ applicationVersion.buildNumber().ifPresent(buildNumber ->
+ devDiffs.computeIfAbsent(deploymentId, __ -> new ConcurrentHashMap<>()).put(buildNumber, diff));
}
@Override
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
index e18542108c0..cbdf5dcb075 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/ConfigServerMock.java
@@ -40,7 +40,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.TestReport;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TesterCloud;
import com.yahoo.vespa.hosted.controller.api.integration.noderepository.RestartFilter;
import com.yahoo.vespa.hosted.controller.api.integration.secrets.TenantSecretStore;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
import com.yahoo.vespa.serviceview.bindings.ApplicationView;
import com.yahoo.vespa.serviceview.bindings.ClusterView;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdaterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdaterTest.java
index d7934f08fee..bd50078bc87 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdaterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ArchiveUriUpdaterTest.java
@@ -9,7 +9,7 @@ import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.integration.archive.ArchiveBucket;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeRepository;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentContext;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
index 31f8aaf9e2d..b27e30e9ed6 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
@@ -6,7 +6,7 @@ import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporterTest.java
index af2c0c6b08d..7ee1349f6d5 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentIssueReporterTest.java
@@ -7,7 +7,7 @@ import com.yahoo.vespa.hosted.controller.LockedTenant;
import com.yahoo.vespa.hosted.controller.api.integration.organization.Contact;
import com.yahoo.vespa.hosted.controller.api.integration.organization.IssueId;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.LoggingDeploymentIssues;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
index 8083b847c0b..0cb7b192b8b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentMetricsMaintainerTest.java
@@ -9,7 +9,7 @@ import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.ClusterMetrics;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgraderTest.java
index 7a8f775e8b1..ec33c8a7048 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgraderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentUpgraderTest.java
@@ -5,21 +5,13 @@ import com.yahoo.component.Version;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.zone.ZoneId;
-import com.yahoo.vespa.hosted.controller.Instance;
-import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
-import com.yahoo.vespa.hosted.controller.application.Deployment;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
-import com.yahoo.vespa.hosted.controller.deployment.Run;
-import com.yahoo.vespa.hosted.controller.deployment.RunStatus;
import org.junit.Test;
import java.time.Duration;
import java.time.Instant;
-import java.time.temporal.ChronoUnit;
-import java.time.temporal.TemporalUnit;
-import java.util.Optional;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.devUsEast1;
import static com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType.productionUsWest1;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java
index 023c5671b60..7970e20f6c7 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/JobRunnerTest.java
@@ -7,7 +7,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationV
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.TenantAndApplicationId;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.deployment.JobController;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java
index a9046a8e060..2fb5aee354b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/MetricsReporterTest.java
@@ -15,7 +15,7 @@ import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeFilter;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployerTest.java
index 00d39788e38..9e8842243c0 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/OutstandingChangeDeployerTest.java
@@ -4,7 +4,7 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.component.Version;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.SourceRevision;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java
index 5ef64b460b9..a255a6c37d8 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ResourceMeterMaintainerTest.java
@@ -12,7 +12,7 @@ import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceSnapshot;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMeteringClient;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.integration.MetricsMock;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java
index df93efab893..3fd9afe5445 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/RetriggerMaintainerTest.java
@@ -5,7 +5,7 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.deployment.RetriggerEntry;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainerTest.java
index f4e688379e5..3c99034761c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TenantRoleMaintainerTest.java
@@ -6,7 +6,7 @@ import com.yahoo.config.provision.TenantName;
import com.yahoo.vespa.hosted.controller.Instance;
import com.yahoo.vespa.hosted.controller.api.integration.aws.MockRoleService;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import org.hamcrest.Matchers;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java
index 29d77c38b1a..f2f71b9a5b8 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/TrafficShareUpdaterTest.java
@@ -7,7 +7,7 @@ import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.ClusterMetrics;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.integration.NodeRepositoryMock;
import org.junit.Test;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
index 91b5dc232e5..1dd4ef24c9e 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
@@ -6,7 +6,7 @@ import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.RunId;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
index 37a173ffc37..2ae45c75cae 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
@@ -88,7 +88,8 @@ public class ApplicationSerializerTest {
Optional.of(Version.fromString("1.2.3")),
Optional.of(Instant.ofEpochMilli(666)),
Optional.empty(),
- Optional.of("best commit"));
+ Optional.of("best commit"),
+ true);
assertEquals("https://github/org/repo/tree/commit1", applicationVersion1.sourceUrl().get());
ApplicationVersion applicationVersion2 = ApplicationVersion
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java
index 03a050db74e..7623a02f6af 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/RunSerializerTest.java
@@ -82,13 +82,16 @@ public class RunSerializerTest {
assertEquals(running, run.status());
assertEquals(3, run.lastTestLogEntry());
assertEquals(new Version(1, 2, 3), run.versions().targetPlatform());
- ApplicationVersion applicationVersion = ApplicationVersion.from(new SourceRevision("git@github.com:user/repo.git",
- "master",
- "f00bad"),
+ ApplicationVersion applicationVersion = ApplicationVersion.from(Optional.of(new SourceRevision("git@github.com:user/repo.git",
+ "master",
+ "f00bad")),
123,
- "a@b",
- Version.fromString("6.3.1"),
- Instant.ofEpochMilli(100));
+ Optional.of("a@b"),
+ Optional.of(Version.fromString("6.3.1")),
+ Optional.of(Instant.ofEpochMilli(100)),
+ Optional.empty(),
+ Optional.empty(),
+ true);
assertEquals(applicationVersion, run.versions().targetApplication());
assertEquals(applicationVersion.authorEmail(), run.versions().targetApplication().authorEmail());
assertEquals(applicationVersion.buildTime(), run.versions().targetApplication().buildTime());
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/run-status.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/run-status.json
index 0f40dd27664..7b9131a38dd 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/run-status.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/testdata/run-status.json
@@ -40,6 +40,7 @@
"branch": "master",
"commit": "f00bad",
"build": 123,
+ "deployedDirectly": true,
"authorEmail": "a@b",
"compileVersion": "6.3.1",
"buildTime": 100,
@@ -48,7 +49,8 @@
"repository": "git@github.com:user/repo.git",
"branch": "master",
"commit": "badb17",
- "build": 122
+ "build": 122,
+ "deployedDirectly": false
}
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
index 9f366aa25e0..bf2cd039afd 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
@@ -52,7 +52,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.resource.MeteringData;
import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceAllocation;
import com.yahoo.vespa.hosted.controller.api.integration.resource.ResourceSnapshot;
import com.yahoo.vespa.hosted.controller.api.integration.stubs.MockMeteringClient;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Change;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentMetrics;
@@ -300,6 +300,13 @@ public class ApplicationApiTest extends ControllerContainerTest {
.data(createApplicationDeployData(applicationPackageInstance1, false)),
new File("deployment-job-accepted-2.json"));
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/job/dev-us-east-1/diff/1", GET).userIdentity(HOSTED_VESPA_OPERATOR),
+ (response) -> assertTrue(response.getBodyAsString(),
+ response.getBodyAsString().contains("--- search-definitions/test.sd\n" +
+ "@@ -1,0 +1,1 @@\n" +
+ "+ search test { }\n")),
+ 200);
+
// DELETE a dev deployment is allowed under user instance for tenant admins
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/instance/myuser/environment/dev/region/us-east-1", DELETE)
.userIdentity(USER_ID),
@@ -757,6 +764,12 @@ public class ApplicationApiTest extends ControllerContainerTest {
.data(createApplicationSubmissionData(packageWithService, 123)),
"{\"message\":\"Application package version: 1.0.2-commit1, source revision of repository 'repository1', branch 'master' with commit 'commit1', by a@b, built against 6.1 at 1970-01-01T00:00:01Z\"}");
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/diff/2", GET).userIdentity(HOSTED_VESPA_OPERATOR),
+ (response) -> assertTrue(response.getBodyAsString(),
+ response.getBodyAsString().contains("+ <deployment version='1.0' athenz-domain='domain1' athenz-service='service'>\n" +
+ "- <deployment version='1.0' >\n")),
+ 200);
+
// GET last submitted application package
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/package", GET).userIdentity(HOSTED_VESPA_OPERATOR),
(response) -> {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java
index 72295497c03..de5ae466039 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/JobControllerApiHandlerHelperTest.java
@@ -9,7 +9,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServ
import com.yahoo.vespa.hosted.controller.api.integration.deployment.ApplicationVersion;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.TestReport;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import org.junit.Test;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json
index 2813bd0ab7d..abe3d4100d9 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment-overview.json
@@ -502,7 +502,9 @@
"status": "success",
"versions": {
"targetPlatform": "6.1.0",
- "targetApplication": {}
+ "targetApplication": {
+ "build": 1
+ }
},
"steps": [
{
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-aws-us-east-2a-runs.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-aws-us-east-2a-runs.json
index acde58f2a28..dce73ad56cd 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-aws-us-east-2a-runs.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-aws-us-east-2a-runs.json
@@ -8,7 +8,9 @@
"status": "success",
"versions": {
"targetPlatform": "7.1.0",
- "targetApplication": {}
+ "targetApplication": {
+ "build": 1
+ }
},
"steps": [
{
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-overview.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-overview.json
index e3beb371acd..92a823bdfc2 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-overview.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-overview.json
@@ -5,7 +5,9 @@
"runs": [
{
"versions": {
- "targetApplication": {},
+ "targetApplication": {
+ "build": 1
+ },
"targetPlatform": "6.1.0"
},
"start": 0,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1-log-first-part.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1-log-first-part.json
index 72411d155c7..3ef993c6589 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1-log-first-part.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/dev-us-east-1-log-first-part.json
@@ -6,7 +6,7 @@
{
"at": 0,
"type": "info",
- "message": "Deploying platform version 6.1 and application version unknown ..."
+ "message": "Deploying platform version 6.1 and application version 1.0.1 ..."
},
{
"at": 0,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/jobs.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/jobs.json
index 9a742a9b176..7ebc2d24fe9 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/jobs.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/jobs.json
@@ -11,7 +11,9 @@
"status": "success",
"versions": {
"targetPlatform": "6.1.0",
- "targetApplication": {}
+ "targetApplication": {
+ "build": 1
+ }
},
"steps": [
{
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-user-instance.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-user-instance.json
index 2601937faee..f8aba54356b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-user-instance.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/overview-user-instance.json
@@ -5,7 +5,9 @@
"runs": [
{
"versions": {
- "targetApplication": {},
+ "targetApplication": {
+ "build": 1
+ },
"targetPlatform": "7.1.0"
},
"start": 14503000,
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java
index 63474ebb7c9..438da66e6e8 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/BadgeApiTest.java
@@ -3,7 +3,7 @@ package com.yahoo.vespa.hosted.controller.restapi.deployment;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.restapi.ContainerTester;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java
index cd24ec170c5..460afb102d9 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/deployment/DeploymentApiTest.java
@@ -7,7 +7,7 @@ import com.yahoo.config.provision.HostName;
import com.yahoo.config.provision.zone.ZoneId;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.restapi.ContainerTester;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiTest.java
new file mode 100644
index 00000000000..8e51f8210c7
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/HorizonApiTest.java
@@ -0,0 +1,67 @@
+package com.yahoo.vespa.hosted.controller.restapi.horizon;
+
+import com.yahoo.config.provision.SystemName;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.vespa.flags.Flags;
+import com.yahoo.vespa.flags.InMemoryFlagSource;
+import com.yahoo.vespa.hosted.controller.api.integration.billing.PlanId;
+import com.yahoo.vespa.hosted.controller.api.role.Role;
+import com.yahoo.vespa.hosted.controller.restapi.ContainerTester;
+import com.yahoo.vespa.hosted.controller.restapi.ControllerContainerCloudTest;
+import org.junit.Test;
+
+import java.util.Set;
+
+/**
+ * @author olaa
+ */
+public class HorizonApiTest extends ControllerContainerCloudTest {
+
+ @Test
+ public void only_operators_and_flag_enabled_tenants_allowed() {
+ ContainerTester tester = new ContainerTester(container, "");
+ TenantName tenantName = TenantName.defaultName();
+
+ tester.assertResponse(request("/horizon/v1/config/dashboard/topFolders")
+ .roles(Set.of(Role.hostedOperator())),
+ "", 200);
+
+ tester.assertResponse(request("/horizon/v1/config/dashboard/topFolders")
+ .roles(Set.of(Role.reader(tenantName))),
+ "{\"error-code\":\"FORBIDDEN\",\"message\":\"No tenant with enabled metrics view\"}", 403);
+
+ ((InMemoryFlagSource) tester.controller().flagSource())
+ .withBooleanFlag(Flags.ENABLED_HORIZON_DASHBOARD.id(), true);
+
+ tester.controller().serviceRegistry().billingController().setPlan(tenantName, PlanId.from("pay-as-you-go"), true);
+
+ tester.assertResponse(request("/horizon/v1/config/dashboard/topFolders")
+ .roles(Set.of(Role.reader(tenantName))),
+ "", 200);
+ }
+
+ @Override
+ protected SystemName system() {
+ return SystemName.PublicCd;
+ }
+
+ @Override
+ protected String variablePartXml() {
+ return " <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControlRequests'/>\n" +
+ " <component id='com.yahoo.vespa.hosted.controller.security.CloudAccessControl'/>\n" +
+
+ " <handler id=\"com.yahoo.vespa.hosted.controller.restapi.horizon.HorizonApiHandler\" bundle=\"controller-server\">\n" +
+ " <binding>http://*/horizon/v1/*</binding>\n" +
+ " </handler>\n" +
+
+ " <http>\n" +
+ " <server id='default' port='8080' />\n" +
+ " <filtering>\n" +
+ " <request-chain id='default'>\n" +
+ " <filter id='com.yahoo.vespa.hosted.controller.restapi.filter.ControllerAuthorizationFilter'/>\n" +
+ " <binding>http://*/*</binding>\n" +
+ " </request-chain>\n" +
+ " </filtering>\n" +
+ " </http>\n";
+ }
+} \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java
index ab9d50f8eae..d31d9c28c6c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/horizon/TsdbQueryRewriterTest.java
@@ -22,33 +22,28 @@ public class TsdbQueryRewriterTest {
@Test
public void rewrites_query() throws IOException {
- assertRewrite("filters-complex.json", "filters-complex.expected.json", Role.reader(TenantName.from("tenant2")));
+ assertRewrite("filters-complex.json", "filters-complex.expected.json", Set.of(TenantName.from("tenant2")), false);
assertRewrite("filter-in-execution-graph.json",
"filter-in-execution-graph.expected.json",
- Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")));
+ Set.of(TenantName.from("tenant2"), TenantName.from("tenant3")), false);
assertRewrite("filter-in-execution-graph.json",
"filter-in-execution-graph.expected.operator.json",
- Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")), Role.hostedOperator());
+ Set.of(TenantName.from("tenant2"), TenantName.from("tenant3")), true);
assertRewrite("no-filters.json",
"no-filters.expected.json",
- Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")));
+ Set.of(TenantName.from("tenant2"), TenantName.from("tenant3")), false);
assertRewrite("filters-meta-query.json",
"filters-meta-query.expected.json",
- Role.reader(TenantName.from("tenant2")), Role.athenzTenantAdmin(TenantName.from("tenant3")));
+ Set.of(TenantName.from("tenant2"), TenantName.from("tenant3")), false);
}
- @Test(expected = TsdbQueryRewriter.UnauthorizedException.class)
- public void throws_if_no_roles() throws IOException {
- assertRewrite("filters-complex.json", "filters-complex.expected.json");
- }
-
- private static void assertRewrite(String initialFilename, String expectedFilename, Role... roles) throws IOException {
+ private static void assertRewrite(String initialFilename, String expectedFilename, Set<TenantName> tenants, boolean operator) throws IOException {
byte[] data = Files.readAllBytes(Paths.get("src/test/resources/horizon", initialFilename));
- data = TsdbQueryRewriter.rewrite(data, Set.of(roles), SystemName.Public);
+ data = TsdbQueryRewriter.rewrite(data, tenants, operator, SystemName.Public);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
new JsonFormat(false).encode(baos, SlimeUtils.jsonToSlime(data));
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json
index ae3dc68d9e3..e883993cb53 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/user/responses/user-with-applications-cloud.json
@@ -14,17 +14,20 @@
"roles": [
"developer",
"reader"
- ]
+ ],
+ "enabled-horizon-dashboard":false
},
"tenant1": {
"roles": [
"administrator"
- ]
+ ],
+ "enabled-horizon-dashboard":false
},
"tenant2": {
"roles": [
"developer"
- ]
+ ],
+ "enabled-horizon-dashboard":false
}
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java
index aa9775f1d43..1b86a0930d4 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/rotation/RotationRepositoryTest.java
@@ -5,7 +5,7 @@ import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.zone.RoutingMethod;
import com.yahoo.vespa.hosted.controller.ControllerTester;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.AssignedRotation;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentContext;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
index 79b564eee52..3f805ba2916 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/routing/RoutingPoliciesTest.java
@@ -24,7 +24,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
import com.yahoo.vespa.hosted.controller.api.integration.dns.Record;
import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordData;
import com.yahoo.vespa.hosted.controller.api.integration.dns.RecordName;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.Endpoint;
import com.yahoo.vespa.hosted.controller.application.EndpointId;
import com.yahoo.vespa.hosted.controller.application.EndpointList;
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
index e3fef9f9066..a1108d5f03c 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
@@ -12,7 +12,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.configserver.Node;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeFilter;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobId;
import com.yahoo.vespa.hosted.controller.api.integration.deployment.JobType;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.pkg.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.SystemApplication;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
diff --git a/eval/src/vespa/eval/eval/llvm/addr_to_symbol.cpp b/eval/src/vespa/eval/eval/llvm/addr_to_symbol.cpp
index 566ab931837..99aeb390544 100644
--- a/eval/src/vespa/eval/eval/llvm/addr_to_symbol.cpp
+++ b/eval/src/vespa/eval/eval/llvm/addr_to_symbol.cpp
@@ -8,6 +8,7 @@
using vespalib::demangle;
using llvm::object::ObjectFile;
+using SymbolType = llvm::object::SymbolRef::Type;
namespace vespalib::eval {
@@ -15,6 +16,11 @@ namespace {
void my_local_test_symbol() {}
+bool symbol_is_data_or_function(SymbolType type)
+{
+ return ((type == SymbolType::ST_Data) || (type == SymbolType::ST_Function));
+}
+
} // <unnamed>
vespalib::string addr_to_symbol(const void *addr) {
@@ -42,7 +48,10 @@ vespalib::string addr_to_symbol(const void *addr) {
for (const auto &symbol: symbols) {
auto sym_name = symbol.getName();
auto sym_addr = symbol.getAddress();
- if (sym_name && sym_addr && (*sym_addr == offset)) {
+ auto sym_type = symbol.getType();
+ if (sym_name && sym_addr && sym_type &&
+ symbol_is_data_or_function(*sym_type) &&
+ (*sym_addr == offset)) {
return demangle(sym_name->str().c_str());
}
}
diff --git a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
index 0555fa6eb73..439b6c08205 100644
--- a/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
+++ b/flags/src/main/java/com/yahoo/vespa/flags/Flags.java
@@ -13,9 +13,7 @@ import java.util.Optional;
import java.util.TreeMap;
import static com.yahoo.vespa.flags.FetchVector.Dimension.APPLICATION_ID;
-import static com.yahoo.vespa.flags.FetchVector.Dimension.CLUSTER_TYPE;
import static com.yahoo.vespa.flags.FetchVector.Dimension.HOSTNAME;
-import static com.yahoo.vespa.flags.FetchVector.Dimension.NODE_TYPE;
import static com.yahoo.vespa.flags.FetchVector.Dimension.TENANT_ID;
import static com.yahoo.vespa.flags.FetchVector.Dimension.VESPA_VERSION;
import static com.yahoo.vespa.flags.FetchVector.Dimension.ZONE_ID;
@@ -251,13 +249,6 @@ public class Flags {
"Takes effect on next deployment through controller",
APPLICATION_ID);
- public static final UnboundBooleanFlag USE_REAL_RESOURCES = defineFeatureFlag(
- "use-real-resources", false,
- List.of("freva"), "2021-09-08", "2021-10-01",
- "Whether host-admin should use real resources (rather than advertised resources) when creating linux container and reporting metrics",
- "Takes effect on next host-admin tick",
- CLUSTER_TYPE, NODE_TYPE);
-
public static final UnboundListFlag<String> DEFER_APPLICATION_ENCRYPTION = defineListFlag(
"defer-application-encryption", List.of(), String.class,
List.of("mpolden", "hakonhall"), "2021-06-23", "2021-10-01",
@@ -298,6 +289,14 @@ public class Flags {
"Takes effect immediately",
ZONE_ID, APPLICATION_ID);
+ public static final UnboundBooleanFlag ENABLED_HORIZON_DASHBOARD = defineFeatureFlag(
+ "enabled-horizon-dashboard", false,
+ List.of("olaa"), "2021-09-13", "2021-12-31",
+ "Enable Horizon dashboard",
+ "Takes effect immediately",
+ TENANT_ID
+ );
+
/** WARNING: public for testing: All flags should be defined in {@link Flags}. */
public static UnboundBooleanFlag defineFeatureFlag(String flagId, boolean defaultValue, List<String> owners,
String createdAt, String expiresAt, String description,
diff --git a/hosted-zone-api/abi-spec.json b/hosted-zone-api/abi-spec.json
index e5d1db476c2..e3b68d09135 100644
--- a/hosted-zone-api/abi-spec.json
+++ b/hosted-zone-api/abi-spec.json
@@ -1,4 +1,16 @@
{
+ "ai.vespa.cloud.Cluster": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(int)",
+ "public int size()"
+ ],
+ "fields": []
+ },
"ai.vespa.cloud.Environment": {
"superClass": "java.lang.Enum",
"interfaces": [],
@@ -19,6 +31,20 @@
"public static final enum ai.vespa.cloud.Environment prod"
]
},
+ "ai.vespa.cloud.Node": {
+ "superClass": "java.lang.Object",
+ "interfaces": [],
+ "attributes": [
+ "public"
+ ],
+ "methods": [
+ "public void <init>(int)",
+ "public int index()",
+ "public boolean equals(java.lang.Object)",
+ "public int hashCode()"
+ ],
+ "fields": []
+ },
"ai.vespa.cloud.SystemInfo": {
"superClass": "java.lang.Object",
"interfaces": [],
@@ -26,8 +52,10 @@
"public"
],
"methods": [
- "public void <init>(ai.vespa.cloud.Zone)",
- "public ai.vespa.cloud.Zone zone()"
+ "public void <init>(ai.vespa.cloud.Zone, ai.vespa.cloud.Cluster, ai.vespa.cloud.Node)",
+ "public ai.vespa.cloud.Zone zone()",
+ "public ai.vespa.cloud.Cluster cluster()",
+ "public ai.vespa.cloud.Node node()"
],
"fields": []
},
diff --git a/hosted-zone-api/src/main/java/ai/vespa/cloud/Cluster.java b/hosted-zone-api/src/main/java/ai/vespa/cloud/Cluster.java
new file mode 100644
index 00000000000..6e064b09d7a
--- /dev/null
+++ b/hosted-zone-api/src/main/java/ai/vespa/cloud/Cluster.java
@@ -0,0 +1,19 @@
+package ai.vespa.cloud;
+
+/**
+ * The properties of a cluster of nodes.
+ *
+ * @author gjoranv
+ */
+public class Cluster {
+
+ private final int size;
+
+ public Cluster(int size) {
+ this.size = size;
+ }
+
+ /** Returns the number of nodes in this cluster. */
+ public int size() { return size; }
+
+}
diff --git a/hosted-zone-api/src/main/java/ai/vespa/cloud/Node.java b/hosted-zone-api/src/main/java/ai/vespa/cloud/Node.java
new file mode 100644
index 00000000000..19ef2757b6c
--- /dev/null
+++ b/hosted-zone-api/src/main/java/ai/vespa/cloud/Node.java
@@ -0,0 +1,34 @@
+package ai.vespa.cloud;
+
+import java.util.Objects;
+
+/**
+ * A node that is part of a cluster of e.g. Jdisc containers.
+ *
+ * @author gjoranv
+ */
+public class Node {
+
+ private final int index;
+
+ public Node(int index) {
+ this.index = index;
+ }
+
+ /** Returns the unique index for this node in the cluster.
+ * Indices are non-negative, but not necessarily contiguous or starting from zero. */
+ public int index() { return index; }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Node node = (Node) o;
+ return index == node.index;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(index);
+ }
+}
diff --git a/hosted-zone-api/src/main/java/ai/vespa/cloud/SystemInfo.java b/hosted-zone-api/src/main/java/ai/vespa/cloud/SystemInfo.java
index 0ac93861275..c9500df4d7f 100644
--- a/hosted-zone-api/src/main/java/ai/vespa/cloud/SystemInfo.java
+++ b/hosted-zone-api/src/main/java/ai/vespa/cloud/SystemInfo.java
@@ -1,6 +1,8 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.cloud;
+import java.util.Objects;
+
/**
* Provides information about the system in which this container is running.
* This is available and can be injected when running in a cloud environment.
@@ -10,12 +12,25 @@ package ai.vespa.cloud;
public class SystemInfo {
private final Zone zone;
+ private final Cluster cluster;
+ private final Node node;
- public SystemInfo(Zone zone) {
+ public SystemInfo(Zone zone, Cluster cluster, Node node) {
+ Objects.requireNonNull(zone, "Zone cannot be null!");
+ Objects.requireNonNull(cluster, "Cluster cannot be null!");
+ Objects.requireNonNull(node, "Node cannot be null!");
this.zone = zone;
+ this.cluster = cluster;
+ this.node = node;
}
/** Returns the zone this is running in */
public Zone zone() { return zone; }
+ /** Returns the cluster this is part of */
+ public Cluster cluster() { return cluster; }
+
+ /** Returns the node this is running on */
+ public Node node() { return node; }
+
}
diff --git a/hosted-zone-api/src/main/java/ai/vespa/cloud/Zone.java b/hosted-zone-api/src/main/java/ai/vespa/cloud/Zone.java
index 48293aa7908..a6b69d12608 100644
--- a/hosted-zone-api/src/main/java/ai/vespa/cloud/Zone.java
+++ b/hosted-zone-api/src/main/java/ai/vespa/cloud/Zone.java
@@ -16,6 +16,8 @@ public class Zone {
private final String region;
public Zone(Environment environment, String region) {
+ Objects.requireNonNull(environment, "Environment cannot be null!");
+ Objects.requireNonNull(region, "Region cannot be null!");
this.environment = environment;
this.region = region;
}
diff --git a/hosted-zone-api/src/test/java/ai/vespa/cloud/SystemInfoTest.java b/hosted-zone-api/src/test/java/ai/vespa/cloud/SystemInfoTest.java
index 6bc8b395e00..6bdb38eb735 100644
--- a/hosted-zone-api/src/test/java/ai/vespa/cloud/SystemInfoTest.java
+++ b/hosted-zone-api/src/test/java/ai/vespa/cloud/SystemInfoTest.java
@@ -14,8 +14,13 @@ public class SystemInfoTest {
@Test
public void testSystemInfo() {
Zone zone = new Zone(Environment.dev, "us-west-1");
- SystemInfo info = new SystemInfo(zone);
+ Cluster cluster = new Cluster(1);
+ Node node = new Node(0);
+
+ SystemInfo info = new SystemInfo(zone, cluster, node);
assertEquals(zone, info.zone());
+ assertEquals(cluster, info.cluster());
+ assertEquals(node, info.node());
}
@Test
@@ -46,4 +51,18 @@ public class SystemInfoTest {
}
}
+ @Test
+ public void testCluster() {
+ int size = 1;
+ Cluster cluster = new Cluster(size);
+ assertEquals(size, cluster.size());
+ }
+
+ @Test
+ public void testNode() {
+ int index = 0;
+ Node node = new Node(index);
+ assertEquals(index, node.index());
+ }
+
}
diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModel.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModel.java
index cf92cbc1e89..0152669ef78 100644
--- a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModel.java
+++ b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModel.java
@@ -13,7 +13,6 @@ import com.yahoo.tensor.TensorType;
import java.io.File;
import java.io.IOException;
-import java.io.StringReader;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModels.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModels.java
deleted file mode 100644
index fc576df0f09..00000000000
--- a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/ImportedModels.java
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package ai.vespa.rankingexpression.importer;
-
-import com.yahoo.path.Path;
-
-import java.io.File;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Optional;
-
-// TODO: Remove this class after November 2018
-public class ImportedModels {
-
- /** All imported models, indexed by their names */
- private final Map<String, ImportedModel> importedModels;
-
- /** Create a null imported models */
- public ImportedModels() {
- importedModels = Collections.emptyMap();
- }
-
- public ImportedModels(File modelsDirectory, Collection<ModelImporter> importers) {
- Map<String, ImportedModel> models = new HashMap<>();
-
- // Find all subdirectories recursively which contains a model we can read
- importRecursively(modelsDirectory, models, importers);
- importedModels = Collections.unmodifiableMap(models);
- }
-
- /**
- * Returns the model at the given location in the application package.
- *
- * @param modelPath the path to this model (file or directory, depending on model type)
- * under the application package, both from the root or relative to the
- * models directory works
- * @return the model at this path or null if none
- */
- public ImportedModel get(File modelPath) {
- return importedModels.get(toName(modelPath));
- }
-
- /** Returns an immutable collection of all the imported models */
- public Collection<ImportedModel> all() {
- return importedModels.values();
- }
-
- private static void importRecursively(File dir,
- Map<String, ImportedModel> models,
- Collection<ModelImporter> importers) {
- if ( ! dir.isDirectory()) return;
-
- Arrays.stream(dir.listFiles()).sorted().forEach(child -> {
- Optional<ModelImporter> importer = findImporterOf(child, importers);
- if (importer.isPresent()) {
- String name = toName(child);
- ImportedModel existing = models.get(name);
- if (existing != null)
- throw new IllegalArgumentException("The models in " + child + " and " + existing.source() +
- " both resolve to the model name '" + name + "'");
- models.put(name, importer.get().importModel(name, child));
- }
- else {
- importRecursively(child, models, importers);
- }
- });
- }
-
- private static Optional<ModelImporter> findImporterOf(File path, Collection<ModelImporter> importers) {
- return importers.stream().filter(item -> item.canImport(path.toString())).findFirst();
- }
-
- private static String toName(File modelFile) {
- Path modelPath = Path.fromString(modelFile.toString());
- if (modelFile.isFile())
- modelPath = stripFileEnding(modelPath);
- String localPath = concatenateAfterModelsDirectory(modelPath);
- return localPath.replace('.', '_');
- }
-
- private static Path stripFileEnding(Path path) {
- int dotIndex = path.last().lastIndexOf(".");
- if (dotIndex <= 0) return path;
- return path.withLast(path.last().substring(0, dotIndex));
- }
-
- private static String concatenateAfterModelsDirectory(Path path) {
- boolean afterModels = false;
- StringBuilder result = new StringBuilder();
- for (String element : path.elements()) {
- if (afterModels) result.append(element).append("_");
- if (element.equals("models")) afterModels = true;
- }
- return result.substring(0, result.length()-1);
- }
-
-}
diff --git a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/configmodelview/ImportedMlModels.java b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/configmodelview/ImportedMlModels.java
index 4039de85e31..294a4782001 100644
--- a/model-integration/src/main/java/ai/vespa/rankingexpression/importer/configmodelview/ImportedMlModels.java
+++ b/model-integration/src/main/java/ai/vespa/rankingexpression/importer/configmodelview/ImportedMlModels.java
@@ -1,6 +1,7 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package ai.vespa.rankingexpression.importer.configmodelview;
+import com.yahoo.concurrent.InThreadExecutorService;
import com.yahoo.path.Path;
import java.io.File;
@@ -10,6 +11,10 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Future;
/**
* All models imported from the models/ directory in the application package.
@@ -24,18 +29,35 @@ public class ImportedMlModels {
private final Map<String, ImportedMlModel> importedModels;
/** Models that were not imported due to some error */
- private final Map<String, String> skippedModels = new HashMap<>();
+ private final Map<String, String> skippedModels = new ConcurrentHashMap<>();
/** Create a null imported models */
public ImportedMlModels() {
importedModels = Collections.emptyMap();
}
+ /** Will disappear shortly */
+ @Deprecated
public ImportedMlModels(File modelsDirectory, Collection<MlModelImporter> importers) {
- Map<String, ImportedMlModel> models = new HashMap<>();
+ this(modelsDirectory, new InThreadExecutorService(), importers);
+ }
+
+ public ImportedMlModels(File modelsDirectory, ExecutorService executor, Collection<MlModelImporter> importers) {
+ Map<String, Future<ImportedMlModel>> futureModels = new HashMap<>();
// Find all subdirectories recursively which contains a model we can read
- importRecursively(modelsDirectory, models, importers, skippedModels);
+ importRecursively(modelsDirectory, executor, futureModels, importers, skippedModels);
+ Map<String, ImportedMlModel> models = new HashMap<>();
+ futureModels.forEach((name, future) -> {
+ try {
+ ImportedMlModel model = future.get();
+ if (model != null) {
+ models.put(name, model);
+ }
+ } catch (InterruptedException | ExecutionException e) {
+ skippedModels.put(name, e.getMessage());
+ }
+ });
importedModels = Collections.unmodifiableMap(models);
}
@@ -61,7 +83,8 @@ public class ImportedMlModels {
}
private static void importRecursively(File dir,
- Map<String, ImportedMlModel> models,
+ ExecutorService executor,
+ Map<String, Future<ImportedMlModel>> models,
Collection<MlModelImporter> importers,
Map<String, String> skippedModels) {
if ( ! dir.isDirectory()) return;
@@ -70,19 +93,26 @@ public class ImportedMlModels {
Optional<MlModelImporter> importer = findImporterOf(child, importers);
if (importer.isPresent()) {
String name = toName(child);
- ImportedMlModel existing = models.get(name);
- if (existing != null)
- throw new IllegalArgumentException("The models in " + child + " and " + existing.source() +
- " both resolve to the model name '" + name + "'");
- try {
- ImportedMlModel importedModel = importer.get().importModel(name, child);
- models.put(name, importedModel);
- } catch (RuntimeException e) {
- skippedModels.put(name, e.getMessage());
+ Future<ImportedMlModel> existing = models.get(name);
+ if (existing != null) {
+ try {
+ throw new IllegalArgumentException("The models in " + child + " and " + existing.get().source() +
+ " both resolve to the model name '" + name + "'");
+ } catch (InterruptedException | ExecutionException e) {}
}
+
+ Future<ImportedMlModel> future = executor.submit(() -> {
+ try {
+ return importer.get().importModel(name, child);
+ } catch (RuntimeException e) {
+ skippedModels.put(name, e.getMessage());
+ }
+ return null;
+ });
+ models.put(name, future);
}
else {
- importRecursively(child, models, importers, skippedModels);
+ importRecursively(child, executor, models, importers, skippedModels);
}
});
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/RealConfigServerClients.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/RealConfigServerClients.java
index 061a06f4687..7d52b9d72b0 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/RealConfigServerClients.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/RealConfigServerClients.java
@@ -2,7 +2,6 @@
package com.yahoo.vespa.hosted.node.admin.configserver;
import com.yahoo.vespa.flags.FlagRepository;
-import com.yahoo.vespa.flags.FlagSource;
import com.yahoo.vespa.hosted.node.admin.configserver.flags.RealFlagRepository;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeRepository;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.RealNodeRepository;
@@ -27,9 +26,9 @@ public class RealConfigServerClients implements ConfigServerClients {
/**
* @param configServerApi the backend API to use - will be closed at {@link #stop()}.
*/
- public RealConfigServerClients(ConfigServerApi configServerApi, FlagSource flagSource) {
+ public RealConfigServerClients(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
- nodeRepository = new RealNodeRepository(configServerApi, flagSource);
+ nodeRepository = new RealNodeRepository(configServerApi);
orchestrator = new OrchestratorImpl(configServerApi);
state = new StateImpl(configServerApi);
flagRepository = new RealFlagRepository(configServerApi);
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/Event.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/Event.java
new file mode 100644
index 00000000000..ca374533940
--- /dev/null
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/Event.java
@@ -0,0 +1,54 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.node.admin.configserver.noderepository;
+
+import java.time.Instant;
+import java.util.Objects;
+
+/**
+ * @author freva
+ */
+public class Event {
+ private final String agent;
+ private final String type;
+ private final Instant at;
+
+ public Event(String agent, String type, Instant at) {
+ this.agent = Objects.requireNonNull(agent);
+ this.type = Objects.requireNonNull(type);
+ this.at = Objects.requireNonNull(at);
+ }
+
+ public String agent() {
+ return agent;
+ }
+
+ public String type() {
+ return type;
+ }
+
+ public Instant at() {
+ return at;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Event event1 = (Event) o;
+ return agent.equals(event1.agent) && type.equals(event1.type) && at.equals(event1.at);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(agent, type, at);
+ }
+
+ @Override
+ public String toString() {
+ return "Event{" +
+ "agent='" + agent + '\'' +
+ ", type='" + type + '\'' +
+ ", at=" + at +
+ '}';
+ }
+}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java
index 30bc1ef5ea3..e85d51ef992 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/NodeSpec.java
@@ -12,6 +12,7 @@ import com.yahoo.vespa.hosted.node.admin.task.util.file.DiskSize;
import java.net.URI;
import java.time.Instant;
import java.util.EnumSet;
+import java.util.List;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
@@ -60,6 +61,7 @@ public class NodeSpec {
private final Set<String> additionalIpAddresses;
private final NodeReports reports;
+ private final List<Event> events;
private final Optional<String> parentHostname;
private final Optional<URI> archiveUri;
@@ -93,6 +95,7 @@ public class NodeSpec {
Set<String> ipAddresses,
Set<String> additionalIpAddresses,
NodeReports reports,
+ List<Event> events,
Optional<String> parentHostname,
Optional<URI> archiveUri,
Optional<ApplicationId> exclusiveTo) {
@@ -128,9 +131,10 @@ public class NodeSpec {
this.currentFirmwareCheck = Objects.requireNonNull(currentFirmwareCheck);
this.resources = Objects.requireNonNull(resources);
this.realResources = Objects.requireNonNull(realResources);
- this.ipAddresses = Objects.requireNonNull(ipAddresses);
- this.additionalIpAddresses = Objects.requireNonNull(additionalIpAddresses);
+ this.ipAddresses = Set.copyOf(ipAddresses);
+ this.additionalIpAddresses = Set.copyOf(additionalIpAddresses);
this.reports = Objects.requireNonNull(reports);
+ this.events = List.copyOf(events);
this.parentHostname = Objects.requireNonNull(parentHostname);
this.archiveUri = Objects.requireNonNull(archiveUri);
this.exclusiveTo = Objects.requireNonNull(exclusiveTo);
@@ -263,6 +267,10 @@ public class NodeSpec {
public NodeReports reports() { return reports; }
+ public List<Event> events() {
+ return events;
+ }
+
public Optional<String> parentHostname() {
return parentHostname;
}
@@ -308,6 +316,7 @@ public class NodeSpec {
Objects.equals(ipAddresses, that.ipAddresses) &&
Objects.equals(additionalIpAddresses, that.additionalIpAddresses) &&
Objects.equals(reports, that.reports) &&
+ Objects.equals(events, that.events) &&
Objects.equals(parentHostname, that.parentHostname) &&
Objects.equals(archiveUri, that.archiveUri) &&
Objects.equals(exclusiveTo, that.exclusiveTo);
@@ -342,6 +351,7 @@ public class NodeSpec {
ipAddresses,
additionalIpAddresses,
reports,
+ events,
parentHostname,
archiveUri,
exclusiveTo);
@@ -376,6 +386,7 @@ public class NodeSpec {
+ " ipAddresses=" + ipAddresses
+ " additionalIpAddresses=" + additionalIpAddresses
+ " reports=" + reports
+ + " events=" + events
+ " parentHostname=" + parentHostname
+ " archiveUri=" + archiveUri
+ " exclusiveTo=" + exclusiveTo
@@ -409,6 +420,7 @@ public class NodeSpec {
private Set<String> ipAddresses = Set.of();
private Set<String> additionalIpAddresses = Set.of();
private NodeReports reports = new NodeReports();
+ private List<Event> events = List.of();
private Optional<String> parentHostname = Optional.empty();
private Optional<URI> archiveUri = Optional.empty();
private Optional<ApplicationId> exclusiveTo = Optional.empty();
@@ -428,6 +440,7 @@ public class NodeSpec {
currentRebootGeneration(node.currentRebootGeneration);
orchestratorStatus(node.orchestratorStatus);
reports(new NodeReports(node.reports));
+ events(node.events);
node.wantedDockerImage.ifPresent(this::wantedDockerImage);
node.currentDockerImage.ifPresent(this::currentDockerImage);
node.wantedVespaVersion.ifPresent(this::wantedVespaVersion);
@@ -600,6 +613,11 @@ public class NodeSpec {
return this;
}
+ public Builder events(List<Event> events) {
+ this.events = events;
+ return this;
+ }
+
public Builder parentHostname(String parentHostname) {
this.parentHostname = Optional.of(parentHostname);
return this;
@@ -714,6 +732,10 @@ public class NodeSpec {
return reports;
}
+ public List<Event> events() {
+ return events;
+ }
+
public Optional<String> parentHostname() {
return parentHostname;
}
@@ -730,7 +752,7 @@ public class NodeSpec {
wantedRebootGeneration, currentRebootGeneration,
wantedFirmwareCheck, currentFirmwareCheck, modelName,
resources, realResources, ipAddresses, additionalIpAddresses,
- reports, parentHostname, archiveUri, exclusiveTo);
+ reports, events, parentHostname, archiveUri, exclusiveTo);
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java
index 8934100a463..abc779d8a9a 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepository.java
@@ -9,10 +9,6 @@ import com.yahoo.config.provision.DockerImage;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.host.FlavorOverrides;
-import com.yahoo.vespa.flags.BooleanFlag;
-import com.yahoo.vespa.flags.FetchVector;
-import com.yahoo.vespa.flags.FlagSource;
-import com.yahoo.vespa.flags.Flags;
import com.yahoo.vespa.hosted.node.admin.configserver.ConfigServerApi;
import com.yahoo.vespa.hosted.node.admin.configserver.HttpException;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.bindings.GetAclResponse;
@@ -41,11 +37,9 @@ public class RealNodeRepository implements NodeRepository {
private static final Logger logger = Logger.getLogger(RealNodeRepository.class.getName());
private final ConfigServerApi configServerApi;
- private final BooleanFlag useRealResourcesFlag;
- public RealNodeRepository(ConfigServerApi configServerApi, FlagSource flagSource) {
+ public RealNodeRepository(ConfigServerApi configServerApi) {
this.configServerApi = configServerApi;
- this.useRealResourcesFlag = Flags.USE_REAL_RESOURCES.bindTo(flagSource);
}
@Override
@@ -65,7 +59,7 @@ public class RealNodeRepository implements NodeRepository {
final GetNodesResponse nodesForHost = configServerApi.get(path, GetNodesResponse.class);
return nodesForHost.nodes.stream()
- .map(this::createNodeSpec)
+ .map(RealNodeRepository::createNodeSpec)
.collect(Collectors.toList());
}
@@ -75,7 +69,7 @@ public class RealNodeRepository implements NodeRepository {
NodeRepositoryNode nodeResponse = configServerApi.get("/nodes/v2/node/" + hostName,
NodeRepositoryNode.class);
- return Optional.ofNullable(nodeResponse).map(this::createNodeSpec);
+ return Optional.ofNullable(nodeResponse).map(RealNodeRepository::createNodeSpec);
} catch (HttpException.NotFoundException | HttpException.ForbiddenException e) {
// Return empty on 403 in addition to 404 as it likely means we're trying to access a node that
// has been deleted. When a node is deleted, the parent-child relationship no longer exists and
@@ -147,7 +141,7 @@ public class RealNodeRepository implements NodeRepository {
throw new NodeRepositoryException("Failed to set node state: " + response.message + " " + response.errorCode);
}
- private NodeSpec createNodeSpec(NodeRepositoryNode node) {
+ private static NodeSpec createNodeSpec(NodeRepositoryNode node) {
Objects.requireNonNull(node.type, "Unknown node type");
NodeType nodeType = NodeType.valueOf(node.type);
@@ -157,9 +151,10 @@ public class RealNodeRepository implements NodeRepository {
Optional<NodeMembership> membership = Optional.ofNullable(node.membership)
.map(m -> new NodeMembership(m.clusterType, m.clusterId, m.group, m.index, m.retired));
NodeReports reports = NodeReports.fromMap(Optional.ofNullable(node.reports).orElseGet(Map::of));
- boolean useRealResources = useRealResourcesFlag.with(FetchVector.Dimension.CLUSTER_TYPE, membership.map(m -> m.type().value()))
- .with(FetchVector.Dimension.NODE_TYPE, nodeType.name())
- .value();
+ List<Event> events = node.history.stream()
+ .map(event -> new Event(event.agent, event.event, Optional.ofNullable(event.at).map(Instant::ofEpochMilli).orElse(Instant.EPOCH)))
+ .collect(Collectors.toUnmodifiableList());
+
return new NodeSpec(
node.hostname,
Optional.ofNullable(node.openStackId),
@@ -183,10 +178,11 @@ public class RealNodeRepository implements NodeRepository {
Optional.ofNullable(node.currentFirmwareCheck).map(Instant::ofEpochMilli),
Optional.ofNullable(node.modelName),
nodeResources(node.resources),
- nodeResources(useRealResources ? node.realResources : node.resources),
+ nodeResources(node.realResources),
node.ipAddresses,
node.additionalIpAddresses,
reports,
+ events,
Optional.ofNullable(node.parentHostname),
Optional.ofNullable(node.archiveUri).map(URI::create),
Optional.ofNullable(node.exclusiveTo).map(ApplicationId::fromSerializedForm));
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java
index 86caab9bf51..4282c67b4cd 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/bindings/NodeRepositoryNode.java
@@ -6,6 +6,7 @@ import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.databind.JsonNode;
+import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -82,6 +83,8 @@ public class NodeRepositoryNode {
public String archiveUri;
@JsonProperty("exclusiveTo")
public String exclusiveTo;
+ @JsonProperty("history")
+ public List<Event> history;
@JsonProperty("reports")
public Map<String, JsonNode> reports = null;
@@ -123,6 +126,7 @@ public class NodeRepositoryNode {
", archiveUri=" + archiveUri +
", reports=" + reports +
", exclusiveTo=" + exclusiveTo +
+ ", history=" + history +
'}';
}
@@ -198,4 +202,23 @@ public class NodeRepositoryNode {
}
}
+ @JsonIgnoreProperties(ignoreUnknown = true)
+ @JsonInclude(JsonInclude.Include.NON_NULL)
+ public static class Event {
+ @JsonProperty
+ public String event;
+ @JsonProperty
+ public String agent;
+ @JsonProperty
+ public Long at;
+
+ @Override
+ public String toString() {
+ return "Event{" +
+ "agent=" + agent +
+ ", event=" + event +
+ ", at=" + at +
+ '}';
+ }
+ }
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
index 8c8f3d88a71..36668158dd6 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
@@ -79,7 +79,7 @@ public class NodeAdminImpl implements NodeAdmin {
@Override
public void refreshContainersToRun(Set<NodeAgentContext> nodeAgentContexts) {
Map<String, NodeAgentContext> nodeAgentContextsByHostname = nodeAgentContexts.stream()
- .collect(Collectors.toMap(nac -> nac.hostname().value(), Function.identity()));
+ .collect(Collectors.toMap(NodeAdminImpl::nodeAgentId, Function.identity()));
// Stop and remove NodeAgents that should no longer be running
diff(nodeAgentWithSchedulerByHostname.keySet(), nodeAgentContextsByHostname.keySet())
@@ -222,4 +222,14 @@ public class NodeAdminImpl implements NodeAdmin {
NodeAgent nodeAgent = nodeAgentFactory.create(contextManager, context);
return new NodeAgentWithScheduler(nodeAgent, contextManager);
}
+
+ private static String nodeAgentId(NodeAgentContext nac) {
+ // NodeAgentImpl has some internal state that should not be reused when the same hostname is re-allocated
+ // to a different application/cluster, solve this by including reservation timestamp in the key.
+ return nac.hostname().value() + "-" + nac.node().events().stream()
+ .filter(event -> "reserved".equals(event.type()))
+ .findFirst()
+ .map(event -> Long.toString(event.at().toEpochMilli()))
+ .orElse("");
+ }
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
index c24b2261f42..37ecc6c4e56 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
@@ -102,6 +102,8 @@ public class NodeAdminStateUpdater {
* with respect to: freeze, Orchestrator, and services running.
*/
public void converge(State wantedState) {
+ NodeSpec node = nodeRepository.getNode(hostHostname);
+ boolean hostIsActiveInNR = node.state() == NodeState.active;
if (wantedState == RESUMED) {
adjustNodeAgentsToRunFromNodeRepository();
} else if (currentState == TRANSITIONING && nodeAdmin.subsystemFreezeDuration().compareTo(FREEZE_CONVERGENCE_TIMEOUT) > 0) {
@@ -110,21 +112,18 @@ public class NodeAdminStateUpdater {
adjustNodeAgentsToRunFromNodeRepository();
nodeAdmin.setFrozen(false);
- NodeState currentNodeState = nodeRepository.getNode(hostHostname).state();
- if (currentNodeState == NodeState.active) orchestrator.resume(hostHostname);
+ if (hostIsActiveInNR) orchestrator.resume(hostHostname);
throw new ConvergenceException("Timed out trying to freeze all nodes: will force an unfrozen tick");
}
- if (currentState == wantedState) return;
+ boolean wantFrozen = wantedState != RESUMED;
+ if (currentState == wantedState && wantFrozen == node.orchestratorStatus().isSuspended()) return;
currentState = TRANSITIONING;
- boolean wantFrozen = wantedState != RESUMED;
- if (!nodeAdmin.setFrozen(wantFrozen)) {
+ if (!nodeAdmin.setFrozen(wantFrozen))
throw new ConvergenceException("NodeAdmin is not yet " + (wantFrozen ? "frozen" : "unfrozen"));
- }
- boolean hostIsActiveInNR = nodeRepository.getNode(hostHostname).state() == NodeState.active;
switch (wantedState) {
case RESUMED:
if (hostIsActiveInNR) orchestrator.resume(hostHostname);
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java
index af88890f4a2..fe06812c608 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/configserver/noderepository/RealNodeRepositoryTest.java
@@ -7,7 +7,6 @@ import com.yahoo.config.provision.DockerImage;
import com.yahoo.config.provision.NodeResources;
import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.host.FlavorOverrides;
-import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.node.admin.configserver.ConfigServerApi;
import com.yahoo.vespa.hosted.node.admin.configserver.ConfigServerApiImpl;
import com.yahoo.vespa.hosted.provision.restapi.NodesV2ApiHandler;
@@ -80,7 +79,7 @@ public class RealNodeRepositoryTest {
private void waitForJdiscContainerToServe(ConfigServerApi configServerApi) throws InterruptedException {
Instant start = Instant.now();
- nodeRepositoryApi = new RealNodeRepository(configServerApi, new InMemoryFlagSource());
+ nodeRepositoryApi = new RealNodeRepository(configServerApi);
while (Instant.now().minusSeconds(120).isBefore(start)) {
try {
nodeRepositoryApi.getNodes("foobar");
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/ContainerTester.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/ContainerTester.java
index af30d3cbe56..1f3ab416db8 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/ContainerTester.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/ContainerTester.java
@@ -78,7 +78,7 @@ public class ContainerTester implements AutoCloseable {
for (int i = 1; i < 4; i++) ipAddresses.addAddress("host" + i + ".test.yahoo.com", "f000::" + i);
NodeSpec hostSpec = NodeSpec.Builder.testSpec(HOST_HOSTNAME.value()).type(NodeType.host).build();
- nodeRepository.updateNodeRepositoryNode(hostSpec);
+ nodeRepository.updateNodeSpec(hostSpec);
Clock clock = Clock.systemUTC();
Metrics metrics = new Metrics();
@@ -122,7 +122,7 @@ public class ContainerTester implements AutoCloseable {
", but that image does not exist in the container engine");
}
}
- nodeRepository.updateNodeRepositoryNode(new NodeSpec.Builder(nodeSpec)
+ nodeRepository.updateNodeSpec(new NodeSpec.Builder(nodeSpec)
.parentHostname(HOST_HOSTNAME.value())
.build());
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/NodeRepoMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/NodeRepoMock.java
index 5722de4cf90..0c986929de1 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/NodeRepoMock.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integration/NodeRepoMock.java
@@ -3,15 +3,17 @@ package com.yahoo.vespa.hosted.node.admin.integration;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.Acl;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.AddNode;
+import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NoSuchNodeException;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeAttributes;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeRepository;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeState;
-import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Function;
import java.util.stream.Collectors;
/**
@@ -20,55 +22,60 @@ import java.util.stream.Collectors;
* @author dybis
*/
public class NodeRepoMock implements NodeRepository {
- private static final Object monitor = new Object();
- private final Map<String, NodeSpec> nodeRepositoryNodesByHostname = new HashMap<>();
+ private final Map<String, NodeSpec> nodeSpecByHostname = new ConcurrentHashMap<>();
+ private volatile Map<String, Acl> aclByHostname = Map.of();
@Override
public void addNodes(List<AddNode> nodes) { }
@Override
public List<NodeSpec> getNodes(String baseHostName) {
- synchronized (monitor) {
- return nodeRepositoryNodesByHostname.values().stream()
- .filter(node -> baseHostName.equals(node.parentHostname().orElse(null)))
- .collect(Collectors.toList());
- }
+ return nodeSpecByHostname.values().stream()
+ .filter(node -> baseHostName.equals(node.parentHostname().orElse(null)))
+ .collect(Collectors.toList());
}
@Override
public Optional<NodeSpec> getOptionalNode(String hostName) {
- synchronized (monitor) {
- return Optional.ofNullable(nodeRepositoryNodesByHostname.get(hostName));
- }
+ return Optional.ofNullable(nodeSpecByHostname.get(hostName));
}
@Override
public Map<String, Acl> getAcls(String hostname) {
- return Map.of();
+ return aclByHostname;
}
@Override
public void updateNodeAttributes(String hostName, NodeAttributes nodeAttributes) {
- synchronized (monitor) {
- updateNodeRepositoryNode(new NodeSpec.Builder(getNode(hostName))
- .updateFromNodeAttributes(nodeAttributes)
- .build());
- }
+ updateNodeSpec(new NodeSpec.Builder(getNode(hostName))
+ .updateFromNodeAttributes(nodeAttributes)
+ .build());
}
@Override
public void setNodeState(String hostName, NodeState nodeState) {
- synchronized (monitor) {
- updateNodeRepositoryNode(new NodeSpec.Builder(getNode(hostName))
- .state(nodeState)
- .build());
- }
+ updateNodeSpec(new NodeSpec.Builder(getNode(hostName))
+ .state(nodeState)
+ .build());
}
- public void updateNodeRepositoryNode(NodeSpec nodeSpec) {
- synchronized (monitor) {
- nodeRepositoryNodesByHostname.put(nodeSpec.hostname(), nodeSpec);
- }
+ public void updateNodeSpec(NodeSpec nodeSpec) {
+ nodeSpecByHostname.put(nodeSpec.hostname(), nodeSpec);
+ }
+
+ public void updateNodeSpec(String hostname, Function<NodeSpec.Builder, NodeSpec.Builder> mapper) {
+ nodeSpecByHostname.compute(hostname, (__, nodeSpec) -> {
+ if (nodeSpec == null) throw new NoSuchNodeException(hostname);
+ return mapper.apply(new NodeSpec.Builder(nodeSpec)).build();
+ });
+ }
+
+ public void resetNodeSpecs() {
+ nodeSpecByHostname.clear();
+ }
+
+ public void setAcl(Map<String, Acl> aclByHostname) {
+ this.aclByHostname = Map.copyOf(aclByHostname);
}
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java
index 53e283abb42..19d7e294367 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/servicedump/VespaServiceDumperImplTest.java
@@ -65,7 +65,7 @@ class VespaServiceDumperImplTest {
.report(ServiceDumpReport.REPORT_ID, request.toJsonNode())
.archiveUri(URI.create("s3://uri-1/tenant1/"))
.build();
- nodeRepository.updateNodeRepositoryNode(initialSpec);
+ nodeRepository.updateNodeSpec(initialSpec);
// Create dumper and invoke tested method
VespaServiceDumper reporter = new VespaServiceDumperImpl(operations, syncClient, nodeRepository, clock);
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
index 8ee3a95744b..e6fa4118542 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
@@ -2,13 +2,15 @@
package com.yahoo.vespa.hosted.node.admin.nodeadmin;
import com.yahoo.config.provision.HostName;
+import com.yahoo.config.provision.NodeType;
import com.yahoo.test.ManualClock;
import com.yahoo.vespa.flags.InMemoryFlagSource;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.Acl;
-import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeRepository;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeSpec;
import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.NodeState;
+import com.yahoo.vespa.hosted.node.admin.configserver.noderepository.OrchestratorStatus;
import com.yahoo.vespa.hosted.node.admin.configserver.orchestrator.Orchestrator;
+import com.yahoo.vespa.hosted.node.admin.integration.NodeRepoMock;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentContextFactory;
import org.junit.Test;
@@ -16,7 +18,6 @@ import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
-import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
@@ -45,7 +46,7 @@ import static org.mockito.Mockito.when;
*/
public class NodeAdminStateUpdaterTest {
private final NodeAgentContextFactory nodeAgentContextFactory = mock(NodeAgentContextFactory.class);
- private final NodeRepository nodeRepository = mock(NodeRepository.class);
+ private final NodeRepoMock nodeRepository = spy(new NodeRepoMock());
private final Orchestrator orchestrator = mock(Orchestrator.class);
private final NodeAdmin nodeAdmin = mock(NodeAdmin.class);
private final HostName hostHostname = HostName.from("basehost1.test.yahoo.com");
@@ -78,10 +79,17 @@ public class NodeAdminStateUpdaterTest {
verify(orchestrator, times(1)).resume(hostHostname.value());
verify(nodeAdmin, times(2)).setFrozen(eq(false));
+ // Host is externally suspended in orchestrator, should be resumed by node-admin
+ setHostOrchestratorStatus(hostHostname, OrchestratorStatus.ALLOWED_TO_BE_DOWN);
+ updater.converge(RESUMED);
+ verify(orchestrator, times(2)).resume(hostHostname.value());
+ verify(nodeAdmin, times(3)).setFrozen(eq(false));
+ setHostOrchestratorStatus(hostHostname, OrchestratorStatus.NO_REMARKS);
+
// Lets try to suspend node admin only
when(nodeAdmin.setFrozen(eq(true))).thenReturn(false);
assertConvergeError(SUSPENDED_NODE_ADMIN, "NodeAdmin is not yet frozen");
- verify(nodeAdmin, times(2)).setFrozen(eq(false));
+ verify(nodeAdmin, times(3)).setFrozen(eq(false));
}
{
@@ -92,10 +100,24 @@ public class NodeAdminStateUpdaterTest {
doThrow(new RuntimeException(exceptionMessage)).doNothing()
.when(orchestrator).suspend(eq(hostHostname.value()));
assertConvergeError(SUSPENDED_NODE_ADMIN, exceptionMessage);
- verify(nodeAdmin, times(2)).setFrozen(eq(false));
+ verify(nodeAdmin, times(3)).setFrozen(eq(false));
updater.converge(SUSPENDED_NODE_ADMIN);
- verify(nodeAdmin, times(2)).setFrozen(eq(false));
+ verify(nodeAdmin, times(3)).setFrozen(eq(false));
+ verify(orchestrator, times(2)).suspend(hostHostname.value());
+ setHostOrchestratorStatus(hostHostname, OrchestratorStatus.ALLOWED_TO_BE_DOWN);
+
+ // Already suspended, no changes
+ updater.converge(SUSPENDED_NODE_ADMIN);
+ verify(nodeAdmin, times(3)).setFrozen(eq(false));
+ verify(orchestrator, times(2)).suspend(hostHostname.value());
+
+ // Host is externally resumed
+ setHostOrchestratorStatus(hostHostname, OrchestratorStatus.NO_REMARKS);
+ updater.converge(SUSPENDED_NODE_ADMIN);
+ verify(nodeAdmin, times(3)).setFrozen(eq(false));
+ verify(orchestrator, times(3)).suspend(hostHostname.value());
+ setHostOrchestratorStatus(hostHostname, OrchestratorStatus.ALLOWED_TO_BE_DOWN);
}
{
@@ -106,7 +128,7 @@ public class NodeAdminStateUpdaterTest {
assertConvergeError(SUSPENDED, exceptionMessage);
verify(orchestrator, times(1)).suspend(eq(hostHostname.value()), eq(suspendHostnames));
// Make sure we dont roll back if we fail to stop services - we will try to stop again next tick
- verify(nodeAdmin, times(2)).setFrozen(eq(false));
+ verify(nodeAdmin, times(3)).setFrozen(eq(false));
// Finally we are successful in transitioning to frozen
updater.converge(SUSPENDED);
@@ -238,20 +260,22 @@ public class NodeAdminStateUpdaterTest {
}
private void mockNodeRepo(NodeState hostState, int numberOfNodes) {
- List<NodeSpec> containersToRun = IntStream.range(1, numberOfNodes + 1)
- .mapToObj(i -> NodeSpec.Builder.testSpec("host" + i + ".yahoo.com").build())
- .collect(Collectors.toList());
+ nodeRepository.resetNodeSpecs();
+
+ IntStream.rangeClosed(1, numberOfNodes)
+ .mapToObj(i -> NodeSpec.Builder.testSpec("host" + i + ".yahoo.com").parentHostname(hostHostname.value()).build())
+ .forEach(nodeRepository::updateNodeSpec);
- when(nodeRepository.getNodes(eq(hostHostname.value()))).thenReturn(containersToRun);
- when(nodeRepository.getNode(eq(hostHostname.value()))).thenReturn(
- NodeSpec.Builder.testSpec(hostHostname.value(), hostState).build());
+ nodeRepository.updateNodeSpec(NodeSpec.Builder.testSpec(hostHostname.value(), hostState).type(NodeType.host).build());
}
private void mockAcl(Acl acl, int... nodeIds) {
- Map<String, Acl> aclByHostname = Arrays.stream(nodeIds)
+ nodeRepository.setAcl(Arrays.stream(nodeIds)
.mapToObj(i -> "host" + i + ".yahoo.com")
- .collect(Collectors.toMap(Function.identity(), h -> acl));
+ .collect(Collectors.toMap(Function.identity(), h -> acl)));
+ }
- when(nodeRepository.getAcls(eq(hostHostname.value()))).thenReturn(aclByHostname);
+ private void setHostOrchestratorStatus(HostName hostname, OrchestratorStatus orchestratorStatus) {
+ nodeRepository.updateNodeSpec(hostname.value(), node -> node.orchestratorStatus(orchestratorStatus));
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
index 4d67c83a179..968220f8d5e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Nodes.java
@@ -236,13 +236,15 @@ public class Nodes {
* transaction commits.
*/
public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) {
+ if ( ! zone.environment().isProduction() || zone.system().isCd())
+ return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested());
+
var stateless = NodeList.copyOf(nodes).stateless();
var stateful = NodeList.copyOf(nodes).stateful();
List<Node> written = new ArrayList<>();
written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested()));
written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested()));
return written;
-
}
/**
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java
index f6db9a45a61..edc83c2c74e 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RebalancerTest.java
@@ -89,12 +89,13 @@ public class RebalancerTest {
tester.nodeRepository().nodes().deactivate(List.of(cpuSkewedNode),
new ApplicationTransaction(new ProvisionLock(cpuApp, () -> {}), tx));
tx.commit();
+ assertEquals(1, tester.getNodes(Node.State.dirty).size());
// ... if activation fails when trying, we clean up the state
tester.deployer().setFailActivate(true);
tester.maintain();
assertTrue("Want to retire is reset", tester.getNodes(Node.State.active).stream().noneMatch(node -> node.status().wantToRetire()));
- assertEquals("Reserved node was moved to dirty", 1, tester.getNodes(Node.State.dirty).size());
+ assertEquals("Reserved node was moved to dirty", 2, tester.getNodes(Node.State.dirty).size());
String reservedHostname = tester.getNodes(Node.State.dirty).first().get().hostname();
tester.nodeRepository().nodes().setReady(reservedHostname, Agent.system, "Cleanup");
tester.nodeRepository().nodes().removeRecursively(reservedHostname);
@@ -163,12 +164,12 @@ public class RebalancerTest {
static class RebalancerTester {
- static ApplicationId cpuApp = makeApplicationId("t1", "a1");
- static ApplicationId memoryApp = makeApplicationId("t2", "a2");
- private static NodeResources cpuResources = new NodeResources(8, 4, 10, 0.1);
- private static NodeResources memResources = new NodeResources(4, 9, 10, 0.1);
- private TestMetric metric = new TestMetric();
- private ProvisioningTester tester = new ProvisioningTester.Builder()
+ static final ApplicationId cpuApp = makeApplicationId("t1", "a1");
+ static final ApplicationId memoryApp = makeApplicationId("t2", "a2");
+ private static final NodeResources cpuResources = new NodeResources(8, 4, 10, 0.1);
+ private static final NodeResources memResources = new NodeResources(4, 9, 10, 0.1);
+ private final TestMetric metric = new TestMetric();
+ private final ProvisioningTester tester = new ProvisioningTester.Builder()
.zone(new Zone(Environment.perf, RegionName.from("us-east")))
.flavorsConfig(flavorsConfig())
.build();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
index cd89cea60e3..277884a71c6 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
@@ -39,6 +39,7 @@ import java.util.List;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
+import java.util.function.BiConsumer;
import java.util.function.Function;
import java.util.function.Predicate;
import java.util.stream.Collectors;
@@ -967,6 +968,24 @@ public class ProvisioningTest {
newNodes.stream().map(n -> n.membership().get().cluster().type()).collect(Collectors.toSet()));
}
+ @Test
+ public void transitions_directly_to_dirty_in_cd() {
+ ApplicationId application = ProvisioningTester.applicationId();
+ ClusterSpec cluster = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("music")).vespaVersion("1.2.3").build();
+ Capacity capacity = Capacity.from(new ClusterResources(2, 1, defaultResources));
+
+ BiConsumer<Zone, Node.State> stateAsserter = (zone, state) -> {
+ ProvisioningTester tester = new ProvisioningTester.Builder().zone(zone).build();
+ tester.makeReadyHosts(2, defaultResources).activateTenantHosts();
+ tester.activate(application, tester.prepare(application, cluster, capacity));
+ tester.deactivate(application);
+ assertEquals(2, tester.getNodes(application, state).size());
+ };
+
+ stateAsserter.accept(new Zone(Environment.prod, RegionName.from("us-east")), Node.State.inactive);
+ stateAsserter.accept(new Zone(SystemName.cd, Environment.prod, RegionName.from("us-east")), Node.State.dirty);
+ }
+
private SystemState prepare(ApplicationId application, int container0Size, int container1Size, int content0Size,
int content1Size, NodeResources flavor, ProvisioningTester tester) {
return prepare(application, tester, container0Size, container1Size, content0Size, content1Size, flavor, "6.42");
diff --git a/searchcore/CMakeLists.txt b/searchcore/CMakeLists.txt
index 2d5eb8dbc4f..c76f35bd9ff 100644
--- a/searchcore/CMakeLists.txt
+++ b/searchcore/CMakeLists.txt
@@ -20,6 +20,7 @@ vespa_define_module(
fileacquirer
LIBS
+ src/vespa/searchcore/bmcluster
src/vespa/searchcore/config
src/vespa/searchcore/grouping
src/vespa/searchcore/proton/attribute
diff --git a/searchcore/src/apps/vespa-feed-bm/CMakeLists.txt b/searchcore/src/apps/vespa-feed-bm/CMakeLists.txt
index fe83c89d83a..daefef5d413 100644
--- a/searchcore/src/apps/vespa-feed-bm/CMakeLists.txt
+++ b/searchcore/src/apps/vespa-feed-bm/CMakeLists.txt
@@ -2,39 +2,7 @@
vespa_add_executable(searchcore_vespa_feed_bm_app
SOURCES
vespa_feed_bm.cpp
- bm_cluster_controller.cpp
- bm_message_bus.cpp
- bm_storage_chain_builder.cpp
- bm_storage_link.cpp
- bucket_info_queue.cpp
- document_api_message_bus_bm_feed_handler.cpp
- pending_tracker.cpp
- pending_tracker_hash.cpp
- spi_bm_feed_handler.cpp
- storage_api_chain_bm_feed_handler.cpp
- storage_api_message_bus_bm_feed_handler.cpp
- storage_api_rpc_bm_feed_handler.cpp
- storage_reply_error_checker.cpp
OUTPUT_NAME vespa-feed-bm
DEPENDS
- searchcore_server
- searchcore_initializer
- searchcore_reprocessing
- searchcore_index
- searchcore_persistenceengine
- searchcore_docsummary
- searchcore_feedoperation
- searchcore_matching
- searchcore_attribute
- searchcore_documentmetastore
- searchcore_bucketdb
- searchcore_flushengine
- searchcore_pcommon
- searchcore_grouping
- searchcore_proton_metrics
- searchcore_fconfig
- storageserver_storageapp
- messagebus_messagebus-test
- messagebus
- searchlib_searchlib_uca
+ searchcore_bmcluster
)
diff --git a/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp b/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp
index cd1920d237f..940dcf35449 100644
--- a/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp
+++ b/searchcore/src/apps/vespa-feed-bm/vespa_feed_bm.cpp
@@ -1,80 +1,24 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include "bm_cluster_controller.h"
-#include "bm_message_bus.h"
-#include "bm_storage_chain_builder.h"
-#include "bm_storage_link_context.h"
-#include "pending_tracker.h"
-#include "spi_bm_feed_handler.h"
-#include "storage_api_chain_bm_feed_handler.h"
-#include "storage_api_message_bus_bm_feed_handler.h"
-#include "storage_api_rpc_bm_feed_handler.h"
-#include "document_api_message_bus_bm_feed_handler.h"
-#include <tests/proton/common/dummydbowner.h>
-#include <vespa/config-attributes.h>
-#include <vespa/config-bucketspaces.h>
-#include <vespa/config-imported-fields.h>
-#include <vespa/config-indexschema.h>
-#include <vespa/config-persistence.h>
-#include <vespa/config-rank-profiles.h>
-#include <vespa/config-slobroks.h>
-#include <vespa/config-stor-distribution.h>
-#include <vespa/config-stor-filestor.h>
-#include <vespa/config-summary.h>
-#include <vespa/config-summarymap.h>
-#include <vespa/config-upgrading.h>
-#include <vespa/config/common/configcontext.h>
-#include <vespa/document/datatype/documenttype.h>
-#include <vespa/document/fieldset/fieldsetrepo.h>
-#include <vespa/document/fieldvalue/intfieldvalue.h>
#include <vespa/document/repo/configbuilder.h>
#include <vespa/document/repo/document_type_repo_factory.h>
#include <vespa/document/repo/documenttyperepo.h>
-#include <vespa/document/test/make_bucket_space.h>
-#include <vespa/document/update/assignvalueupdate.h>
-#include <vespa/document/update/documentupdate.h>
#include <vespa/fastos/app.h>
-#include <vespa/messagebus/config-messagebus.h>
-#include <vespa/messagebus/testlib/slobrok.h>
-#include <vespa/metrics/config-metricsmanager.h>
-#include <vespa/searchcommon/common/schemaconfigurer.h>
-#include <vespa/searchcore/proton/common/alloc_config.h>
-#include <vespa/searchcore/proton/common/hw_info.h>
-#include <vespa/searchcore/proton/matching/querylimiter.h>
-#include <vespa/searchcore/proton/metrics/metricswireservice.h>
-#include <vespa/searchcore/proton/persistenceengine/ipersistenceengineowner.h>
-#include <vespa/searchcore/proton/persistenceengine/persistenceengine.h>
-#include <vespa/searchcore/proton/server/bootstrapconfig.h>
-#include <vespa/searchcore/proton/server/document_db_maintenance_config.h>
-#include <vespa/searchcore/proton/server/documentdb.h>
-#include <vespa/searchcore/proton/server/documentdbconfigmanager.h>
-#include <vespa/searchcore/proton/server/fileconfigmanager.h>
-#include <vespa/searchcore/proton/server/memoryconfigstore.h>
-#include <vespa/searchcore/proton/server/persistencehandlerproxy.h>
-#include <vespa/searchcore/proton/server/threading_service_config.h>
-#include <vespa/searchcore/proton/test/disk_mem_usage_notifier.h>
+#include <vespa/searchcore/bmcluster/bm_cluster.h>
+#include <vespa/searchcore/bmcluster/bm_cluster_controller.h>
+#include <vespa/searchcore/bmcluster/bm_cluster_params.h>
+#include <vespa/searchcore/bmcluster/bm_feed.h>
+#include <vespa/searchcore/bmcluster/bm_node.h>
+#include <vespa/searchcore/bmcluster/bm_range.h>
+#include <vespa/searchcore/bmcluster/bucket_selector.h>
+#include <vespa/searchcore/bmcluster/spi_bm_feed_handler.h>
#include <vespa/searchlib/index/dummyfileheadercontext.h>
-#include <vespa/searchlib/transactionlog/translogserver.h>
-#include <vespa/searchsummary/config/config-juniperrc.h>
-#include <vespa/slobrok/sbmirror.h>
-#include <vespa/storage/bucketdb/config-stor-bucket-init.h>
-#include <vespa/storage/common/i_storage_chain_builder.h>
-#include <vespa/storage/config/config-stor-bouncer.h>
-#include <vespa/storage/config/config-stor-communicationmanager.h>
-#include <vespa/storage/config/config-stor-distributormanager.h>
-#include <vespa/storage/config/config-stor-opslogger.h>
-#include <vespa/storage/config/config-stor-prioritymapping.h>
-#include <vespa/storage/config/config-stor-server.h>
-#include <vespa/storage/config/config-stor-status.h>
-#include <vespa/storage/config/config-stor-visitordispatcher.h>
-#include <vespa/storage/storageserver/rpc/shared_rpc_resources.h>
-#include <vespa/storage/visiting/config-stor-visitor.h>
-#include <vespa/storageserver/app/distributorprocess.h>
-#include <vespa/storageserver/app/servicelayerprocess.h>
#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/vespalib/util/size_literals.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
#include <getopt.h>
#include <iostream>
#include <thread>
@@ -82,82 +26,31 @@
#include <vespa/log/log.h>
LOG_SETUP("vespa-feed-bm");
-using namespace cloud::config::filedistribution;
-using namespace config;
using namespace proton;
using namespace std::chrono_literals;
-using namespace vespa::config::search::core;
-using namespace vespa::config::search::summary;
-using namespace vespa::config::search;
-using vespa::config::content::PersistenceConfigBuilder;
-using vespa::config::content::StorDistributionConfigBuilder;
-using vespa::config::content::StorFilestorConfigBuilder;
-using vespa::config::content::UpgradingConfigBuilder;
-using vespa::config::content::core::BucketspacesConfig;
-using vespa::config::content::core::BucketspacesConfigBuilder;
-using vespa::config::content::core::StorBouncerConfigBuilder;
-using vespa::config::content::core::StorBucketInitConfigBuilder;
-using vespa::config::content::core::StorCommunicationmanagerConfigBuilder;
-using vespa::config::content::core::StorDistributormanagerConfigBuilder;
-using vespa::config::content::core::StorOpsloggerConfigBuilder;
-using vespa::config::content::core::StorPrioritymappingConfigBuilder;
-using vespa::config::content::core::StorServerConfigBuilder;
-using vespa::config::content::core::StorStatusConfigBuilder;
-using vespa::config::content::core::StorVisitorConfigBuilder;
-using vespa::config::content::core::StorVisitordispatcherConfigBuilder;
-using cloud::config::SlobroksConfigBuilder;
-using messagebus::MessagebusConfigBuilder;
-using metrics::MetricsmanagerConfigBuilder;
-using config::ConfigContext;
-using config::ConfigSet;
-using config::ConfigUri;
-using config::IConfigContext;
-using document::AssignValueUpdate;
-using document::BucketId;
-using document::BucketSpace;
-using document::Document;
-using document::DocumentId;
-using document::DocumentType;
using document::DocumentTypeRepo;
using document::DocumentTypeRepoFactory;
-using document::DocumentUpdate;
using document::DocumenttypesConfig;
using document::DocumenttypesConfigBuilder;
-using document::Field;
-using document::FieldSetRepo;
-using document::FieldUpdate;
-using document::IntFieldValue;
-using document::test::makeBucketSpace;
-using feedbm::BmClusterController;
-using feedbm::BmMessageBus;
-using feedbm::BmStorageChainBuilder;
-using feedbm::BmStorageLinkContext;
-using feedbm::IBmFeedHandler;
-using feedbm::DocumentApiMessageBusBmFeedHandler;
-using feedbm::SpiBmFeedHandler;
-using feedbm::StorageApiChainBmFeedHandler;
-using feedbm::StorageApiMessageBusBmFeedHandler;
-using feedbm::StorageApiRpcBmFeedHandler;
-using search::TuneFileDocumentDB;
+using search::bmcluster::BmClusterController;
+using search::bmcluster::IBmFeedHandler;
+using search::bmcluster::BmClusterParams;
+using search::bmcluster::BmCluster;
+using search::bmcluster::BmFeed;
+using search::bmcluster::BmNode;
+using search::bmcluster::BmRange;
+using search::bmcluster::BucketSelector;
using search::index::DummyFileHeaderContext;
-using search::index::Schema;
-using search::index::SchemaBuilder;
-using search::transactionlog::TransLogServer;
-using storage::rpc::SharedRpcResources;
-using storage::rpc::StorageApiRpcService;
using storage::spi::PersistenceProvider;
-using vespalib::compression::CompressionConfig;
using vespalib::makeLambdaTask;
-using proton::ThreadingServiceConfig;
-
-using DocumentDBMap = std::map<DocTypeName, std::shared_ptr<DocumentDB>>;
namespace {
vespalib::string base_dir = "testdb";
+constexpr int base_port = 9017;
-std::shared_ptr<DocumenttypesConfig> make_document_type() {
+std::shared_ptr<DocumenttypesConfig> make_document_types() {
using Struct = document::config_builder::Struct;
using DataType = document::DataType;
document::config_builder::DocumenttypesConfigBuilderHelper builder;
@@ -165,130 +58,14 @@ std::shared_ptr<DocumenttypesConfig> make_document_type() {
return std::make_shared<DocumenttypesConfig>(builder.config());
}
-std::shared_ptr<AttributesConfig> make_attributes_config() {
- AttributesConfigBuilder builder;
- AttributesConfig::Attribute attribute;
- attribute.name = "int";
- attribute.datatype = AttributesConfig::Attribute::Datatype::INT32;
- builder.attribute.emplace_back(attribute);
- return std::make_shared<AttributesConfig>(builder);
-}
-
-std::shared_ptr<DocumentDBConfig> make_document_db_config(std::shared_ptr<DocumenttypesConfig> document_types, std::shared_ptr<const DocumentTypeRepo> repo, const DocTypeName& doc_type_name)
-{
- auto indexschema = std::make_shared<IndexschemaConfig>();
- auto attributes = make_attributes_config();
- auto summary = std::make_shared<SummaryConfig>();
- std::shared_ptr<Schema> schema(new Schema());
- SchemaBuilder::build(*indexschema, *schema);
- SchemaBuilder::build(*attributes, *schema);
- SchemaBuilder::build(*summary, *schema);
- return std::make_shared<DocumentDBConfig>(
- 1,
- std::make_shared<RankProfilesConfig>(),
- std::make_shared<matching::RankingConstants>(),
- std::make_shared<matching::RankingExpressions>(),
- std::make_shared<matching::OnnxModels>(),
- indexschema,
- attributes,
- summary,
- std::make_shared<SummarymapConfig>(),
- std::make_shared<JuniperrcConfig>(),
- document_types,
- repo,
- std::make_shared<ImportedFieldsConfig>(),
- std::make_shared<TuneFileDocumentDB>(),
- schema,
- std::make_shared<DocumentDBMaintenanceConfig>(),
- search::LogDocumentStore::Config(),
- std::make_shared<const ThreadingServiceConfig>(ThreadingServiceConfig::make(1)),
- std::make_shared<const AllocConfig>(),
- "client",
- doc_type_name.getName());
-}
-
-void
-make_slobroks_config(SlobroksConfigBuilder& slobroks, int slobrok_port)
-{
- SlobroksConfigBuilder::Slobrok slobrok;
- slobrok.connectionspec = vespalib::make_string("tcp/localhost:%d", slobrok_port);
- slobroks.slobrok.push_back(std::move(slobrok));
-}
-
-void
-make_bucketspaces_config(BucketspacesConfigBuilder &bucketspaces)
-{
- BucketspacesConfigBuilder::Documenttype bucket_space_map;
- bucket_space_map.name = "test";
- bucket_space_map.bucketspace = "default";
- bucketspaces.documenttype.emplace_back(std::move(bucket_space_map));
-}
-
-class MyPersistenceEngineOwner : public IPersistenceEngineOwner
-{
- void setClusterState(BucketSpace, const storage::spi::ClusterState &) override { }
-};
-
-struct MyResourceWriteFilter : public IResourceWriteFilter
-{
- bool acceptWriteOperation() const override { return true; }
- State getAcceptState() const override { return IResourceWriteFilter::State(); }
-};
-
-class BucketSelector
-{
- uint32_t _thread_id;
- uint32_t _threads;
- uint32_t _num_buckets;
-public:
- BucketSelector(uint32_t thread_id_in, uint32_t threads_in, uint32_t num_buckets_in)
- : _thread_id(thread_id_in),
- _threads(threads_in),
- _num_buckets((num_buckets_in / _threads) * _threads)
- {
- }
- uint64_t operator()(uint32_t i) const {
- return (static_cast<uint64_t>(i) * _threads + _thread_id) % _num_buckets;
- }
-};
-
-class BMRange
-{
- uint32_t _start;
- uint32_t _end;
-public:
- BMRange(uint32_t start_in, uint32_t end_in)
- : _start(start_in),
- _end(end_in)
- {
- }
- uint32_t get_start() const { return _start; }
- uint32_t get_end() const { return _end; }
-};
-
-class BMParams {
+class BMParams : public BmClusterParams {
uint32_t _documents;
uint32_t _client_threads;
uint32_t _get_passes;
- vespalib::string _indexing_sequencer;
uint32_t _put_passes;
uint32_t _update_passes;
uint32_t _remove_passes;
- uint32_t _rpc_network_threads;
- uint32_t _rpc_events_before_wakeup;
- uint32_t _rpc_targets_per_node;
- uint32_t _response_threads;
uint32_t _max_pending;
- bool _enable_distributor;
- bool _enable_service_layer;
- bool _skip_get_spi_bucket_info;
- bool _use_document_api;
- bool _use_message_bus;
- bool _use_storage_chain;
- bool _use_async_message_handling_on_schedule;
- uint32_t _bucket_db_stripe_bits;
- uint32_t _distributor_stripes;
- bool _skip_communicationmanager_thread;
uint32_t get_start(uint32_t thread_id) const {
return (_documents / _client_threads) * thread_id + std::min(thread_id, _documents % _client_threads);
}
@@ -297,82 +74,38 @@ public:
: _documents(160000),
_client_threads(1),
_get_passes(0),
- _indexing_sequencer(),
_put_passes(2),
_update_passes(1),
_remove_passes(2),
- _rpc_network_threads(1), // Same default as previous in stor-communicationmanager.def
- _rpc_events_before_wakeup(1), // Same default as in stor-communicationmanager.def
- _rpc_targets_per_node(1), // Same default as in stor-communicationmanager.def
- _response_threads(2), // Same default as in stor-filestor.def
- _max_pending(1000),
- _enable_distributor(false),
- _enable_service_layer(false),
- _skip_get_spi_bucket_info(false),
- _use_document_api(false),
- _use_message_bus(false),
- _use_storage_chain(false),
- _use_async_message_handling_on_schedule(false),
- _bucket_db_stripe_bits(0),
- _distributor_stripes(0),
- _skip_communicationmanager_thread(false) // Same default as in stor-communicationmanager.def
+ _max_pending(1000)
{
}
- BMRange get_range(uint32_t thread_id) const {
- return BMRange(get_start(thread_id), get_start(thread_id + 1));
+ BmRange get_range(uint32_t thread_id) const {
+ return BmRange(get_start(thread_id), get_start(thread_id + 1));
}
uint32_t get_documents() const { return _documents; }
uint32_t get_max_pending() const { return _max_pending; }
uint32_t get_client_threads() const { return _client_threads; }
uint32_t get_get_passes() const { return _get_passes; }
- const vespalib::string & get_indexing_sequencer() const { return _indexing_sequencer; }
uint32_t get_put_passes() const { return _put_passes; }
uint32_t get_update_passes() const { return _update_passes; }
uint32_t get_remove_passes() const { return _remove_passes; }
- uint32_t get_rpc_network_threads() const { return _rpc_network_threads; }
- uint32_t get_rpc_events_before_wakup() const { return _rpc_events_before_wakeup; }
- uint32_t get_rpc_targets_per_node() const { return _rpc_targets_per_node; }
- uint32_t get_response_threads() const { return _response_threads; }
- bool get_enable_distributor() const { return _enable_distributor; }
- bool get_skip_get_spi_bucket_info() const { return _skip_get_spi_bucket_info; }
- bool get_use_document_api() const { return _use_document_api; }
- bool get_use_message_bus() const { return _use_message_bus; }
- bool get_use_storage_chain() const { return _use_storage_chain; }
- bool get_use_async_message_handling_on_schedule() const { return _use_async_message_handling_on_schedule; }
- uint32_t get_bucket_db_stripe_bits() const { return _bucket_db_stripe_bits; }
- uint32_t get_distributor_stripes() const { return _distributor_stripes; }
- bool get_skip_communicationmanager_thread() const { return _skip_communicationmanager_thread; }
void set_documents(uint32_t documents_in) { _documents = documents_in; }
void set_max_pending(uint32_t max_pending_in) { _max_pending = max_pending_in; }
void set_client_threads(uint32_t threads_in) { _client_threads = threads_in; }
void set_get_passes(uint32_t get_passes_in) { _get_passes = get_passes_in; }
- void set_indexing_sequencer(vespalib::stringref sequencer) { _indexing_sequencer = sequencer; }
void set_put_passes(uint32_t put_passes_in) { _put_passes = put_passes_in; }
void set_update_passes(uint32_t update_passes_in) { _update_passes = update_passes_in; }
void set_remove_passes(uint32_t remove_passes_in) { _remove_passes = remove_passes_in; }
- void set_rpc_network_threads(uint32_t threads_in) { _rpc_network_threads = threads_in; }
- void set_rpc_events_before_wakeup(uint32_t value) { _rpc_events_before_wakeup = value; }
- void set_rpc_targets_per_node(uint32_t targets_in) { _rpc_targets_per_node = targets_in; }
- void set_response_threads(uint32_t threads_in) { _response_threads = threads_in; }
- void set_enable_distributor(bool value) { _enable_distributor = value; }
- void set_enable_service_layer(bool value) { _enable_service_layer = value; }
- void set_skip_get_spi_bucket_info(bool value) { _skip_get_spi_bucket_info = value; }
- void set_use_document_api(bool value) { _use_document_api = value; }
- void set_use_message_bus(bool value) { _use_message_bus = value; }
- void set_use_storage_chain(bool value) { _use_storage_chain = value; }
- void set_use_async_message_handling_on_schedule(bool value) { _use_async_message_handling_on_schedule = value; }
- void set_bucket_db_stripe_bits(uint32_t value) { _bucket_db_stripe_bits = value; }
- void set_distributor_stripes(uint32_t value) { _distributor_stripes = value; }
- void set_skip_communicationmanager_thread(bool value) { _skip_communicationmanager_thread = value; }
bool check() const;
- bool needs_service_layer() const { return _enable_service_layer || _enable_distributor || _use_storage_chain || _use_message_bus || _use_document_api; }
- bool needs_distributor() const { return _enable_distributor || _use_document_api; }
- bool needs_message_bus() const { return _use_message_bus || _use_document_api; }
};
bool
BMParams::check() const
{
+ if (!BmClusterParams::check()) {
+ return false;
+ }
if (_client_threads < 1) {
std::cerr << "Too few client threads: " << _client_threads << std::endl;
return false;
@@ -389,637 +122,37 @@ BMParams::check() const
std::cerr << "Put passes too low: " << _put_passes << std::endl;
return false;
}
- if (_rpc_network_threads < 1) {
- std::cerr << "Too few rpc network threads: " << _rpc_network_threads << std::endl;
- return false;
- }
- if (_rpc_targets_per_node < 1) {
- std::cerr << "Too few rpc targets per node: " << _rpc_targets_per_node << std::endl;
- return false;
- }
- if (_response_threads < 1) {
- std::cerr << "Too few response threads: " << _response_threads << std::endl;
- return false;
- }
return true;
}
-class MyServiceLayerProcess : public storage::ServiceLayerProcess {
- PersistenceProvider& _provider;
-
-public:
- MyServiceLayerProcess(const config::ConfigUri & configUri,
- PersistenceProvider &provider,
- std::unique_ptr<storage::IStorageChainBuilder> chain_builder);
- ~MyServiceLayerProcess() override { shutdown(); }
-
- void shutdown() override;
- void setupProvider() override;
- PersistenceProvider& getProvider() override;
-};
-
-MyServiceLayerProcess::MyServiceLayerProcess(const config::ConfigUri & configUri,
- PersistenceProvider &provider,
- std::unique_ptr<storage::IStorageChainBuilder> chain_builder)
- : ServiceLayerProcess(configUri),
- _provider(provider)
-{
- if (chain_builder) {
- set_storage_chain_builder(std::move(chain_builder));
- }
-}
-
-void
-MyServiceLayerProcess::shutdown()
-{
- ServiceLayerProcess::shutdown();
-}
-
-void
-MyServiceLayerProcess::setupProvider()
-{
-}
-
-PersistenceProvider&
-MyServiceLayerProcess::getProvider()
-{
- return _provider;
-}
-
-struct MyStorageConfig
-{
- vespalib::string config_id;
- DocumenttypesConfigBuilder documenttypes;
- StorDistributionConfigBuilder stor_distribution;
- StorBouncerConfigBuilder stor_bouncer;
- StorCommunicationmanagerConfigBuilder stor_communicationmanager;
- StorOpsloggerConfigBuilder stor_opslogger;
- StorPrioritymappingConfigBuilder stor_prioritymapping;
- UpgradingConfigBuilder upgrading;
- StorServerConfigBuilder stor_server;
- StorStatusConfigBuilder stor_status;
- BucketspacesConfigBuilder bucketspaces;
- MetricsmanagerConfigBuilder metricsmanager;
- SlobroksConfigBuilder slobroks;
- MessagebusConfigBuilder messagebus;
-
- MyStorageConfig(bool distributor, const vespalib::string& config_id_in, const DocumenttypesConfig& documenttypes_in,
- int slobrok_port, int mbus_port, int rpc_port, int status_port, const BMParams& params)
- : config_id(config_id_in),
- documenttypes(documenttypes_in),
- stor_distribution(),
- stor_bouncer(),
- stor_communicationmanager(),
- stor_opslogger(),
- stor_prioritymapping(),
- upgrading(),
- stor_server(),
- stor_status(),
- bucketspaces(),
- metricsmanager(),
- slobroks(),
- messagebus()
- {
- {
- auto &dc = stor_distribution;
- {
- StorDistributionConfigBuilder::Group group;
- {
- StorDistributionConfigBuilder::Group::Nodes node;
- node.index = 0;
- group.nodes.push_back(std::move(node));
- }
- group.index = "invalid";
- group.name = "invalid";
- group.capacity = 1.0;
- group.partitions = "";
- dc.group.push_back(std::move(group));
- }
- dc.redundancy = 1;
- dc.readyCopies = 1;
- }
- stor_server.isDistributor = distributor;
- stor_server.contentNodeBucketDbStripeBits = params.get_bucket_db_stripe_bits();
- if (distributor) {
- stor_server.rootFolder = "distributor";
- } else {
- stor_server.rootFolder = "storage";
- }
- make_slobroks_config(slobroks, slobrok_port);
- stor_communicationmanager.rpc.numNetworkThreads = params.get_rpc_network_threads();
- stor_communicationmanager.rpc.eventsBeforeWakeup = params.get_rpc_events_before_wakup();
- stor_communicationmanager.rpc.numTargetsPerNode = params.get_rpc_targets_per_node();
- stor_communicationmanager.mbusport = mbus_port;
- stor_communicationmanager.rpcport = rpc_port;
- stor_communicationmanager.skipThread = params.get_skip_communicationmanager_thread();
-
- stor_status.httpport = status_port;
- make_bucketspaces_config(bucketspaces);
- }
-
- ~MyStorageConfig();
-
- void add_builders(ConfigSet &set) {
- set.addBuilder(config_id, &documenttypes);
- set.addBuilder(config_id, &stor_distribution);
- set.addBuilder(config_id, &stor_bouncer);
- set.addBuilder(config_id, &stor_communicationmanager);
- set.addBuilder(config_id, &stor_opslogger);
- set.addBuilder(config_id, &stor_prioritymapping);
- set.addBuilder(config_id, &upgrading);
- set.addBuilder(config_id, &stor_server);
- set.addBuilder(config_id, &stor_status);
- set.addBuilder(config_id, &bucketspaces);
- set.addBuilder(config_id, &metricsmanager);
- set.addBuilder(config_id, &slobroks);
- set.addBuilder(config_id, &messagebus);
- }
-};
-
-MyStorageConfig::~MyStorageConfig() = default;
-
-struct MyServiceLayerConfig : public MyStorageConfig
-{
- PersistenceConfigBuilder persistence;
- StorFilestorConfigBuilder stor_filestor;
- StorBucketInitConfigBuilder stor_bucket_init;
- StorVisitorConfigBuilder stor_visitor;
-
- MyServiceLayerConfig(const vespalib::string& config_id_in, const DocumenttypesConfig& documenttypes_in,
- int slobrok_port, int mbus_port, int rpc_port, int status_port, const BMParams& params)
- : MyStorageConfig(false, config_id_in, documenttypes_in, slobrok_port, mbus_port, rpc_port, status_port, params),
- persistence(),
- stor_filestor(),
- stor_bucket_init(),
- stor_visitor()
- {
- stor_filestor.numResponseThreads = params.get_response_threads();
- stor_filestor.numNetworkThreads = params.get_rpc_network_threads();
- stor_filestor.useAsyncMessageHandlingOnSchedule = params.get_use_async_message_handling_on_schedule();
- }
-
- ~MyServiceLayerConfig();
-
- void add_builders(ConfigSet &set) {
- MyStorageConfig::add_builders(set);
- set.addBuilder(config_id, &persistence);
- set.addBuilder(config_id, &stor_filestor);
- set.addBuilder(config_id, &stor_bucket_init);
- set.addBuilder(config_id, &stor_visitor);
- }
-};
-
-MyServiceLayerConfig::~MyServiceLayerConfig() = default;
-
-struct MyDistributorConfig : public MyStorageConfig
-{
- StorDistributormanagerConfigBuilder stor_distributormanager;
- StorVisitordispatcherConfigBuilder stor_visitordispatcher;
-
- MyDistributorConfig(const vespalib::string& config_id_in, const DocumenttypesConfig& documenttypes_in,
- int slobrok_port, int mbus_port, int rpc_port, int status_port, const BMParams& params)
- : MyStorageConfig(true, config_id_in, documenttypes_in, slobrok_port, mbus_port, rpc_port, status_port, params),
- stor_distributormanager(),
- stor_visitordispatcher()
- {
- stor_distributormanager.numDistributorStripes = params.get_distributor_stripes();
- }
-
- ~MyDistributorConfig();
-
- void add_builders(ConfigSet &set) {
- MyStorageConfig::add_builders(set);
- set.addBuilder(config_id, &stor_distributormanager);
- set.addBuilder(config_id, &stor_visitordispatcher);
- }
-};
-
-MyDistributorConfig::~MyDistributorConfig() = default;
-
-struct MyRpcClientConfig {
- vespalib::string config_id;
- SlobroksConfigBuilder slobroks;
-
- MyRpcClientConfig(const vespalib::string &config_id_in, int slobrok_port)
- : config_id(config_id_in),
- slobroks()
- {
- make_slobroks_config(slobroks, slobrok_port);
- }
- ~MyRpcClientConfig();
-
- void add_builders(ConfigSet &set) {
- set.addBuilder(config_id, &slobroks);
- }
-};
-
-MyRpcClientConfig::~MyRpcClientConfig() = default;
-
-struct MyMessageBusConfig {
- vespalib::string config_id;
- SlobroksConfigBuilder slobroks;
- MessagebusConfigBuilder messagebus;
-
- MyMessageBusConfig(const vespalib::string &config_id_in, int slobrok_port)
- : config_id(config_id_in),
- slobroks(),
- messagebus()
- {
- make_slobroks_config(slobroks, slobrok_port);
- }
- ~MyMessageBusConfig();
-
- void add_builders(ConfigSet &set) {
- set.addBuilder(config_id, &slobroks);
- set.addBuilder(config_id, &messagebus);
- }
-};
-
-MyMessageBusConfig::~MyMessageBusConfig() = default;
-
}
struct PersistenceProviderFixture {
- std::shared_ptr<DocumenttypesConfig> _document_types;
+ std::shared_ptr<const DocumenttypesConfig> _document_types;
std::shared_ptr<const DocumentTypeRepo> _repo;
- DocTypeName _doc_type_name;
- const DocumentType* _document_type;
- const Field& _field;
- std::shared_ptr<DocumentDBConfig> _document_db_config;
- vespalib::string _base_dir;
- DummyFileHeaderContext _file_header_context;
- int _tls_listen_port;
- int _slobrok_port;
- int _rpc_client_port;
- int _service_layer_mbus_port;
- int _service_layer_rpc_port;
- int _service_layer_status_port;
- int _distributor_mbus_port;
- int _distributor_rpc_port;
- int _distributor_status_port;
- TransLogServer _tls;
- vespalib::string _tls_spec;
- matching::QueryLimiter _query_limiter;
- vespalib::Clock _clock;
- DummyWireService _metrics_wire_service;
- MemoryConfigStores _config_stores;
- vespalib::ThreadStackExecutor _summary_executor;
- DummyDBOwner _document_db_owner;
- BucketSpace _bucket_space;
- std::shared_ptr<DocumentDB> _document_db;
- MyPersistenceEngineOwner _persistence_owner;
- MyResourceWriteFilter _write_filter;
- test::DiskMemUsageNotifier _disk_mem_usage_notifier;
- std::shared_ptr<PersistenceEngine> _persistence_engine;
- std::unique_ptr<const FieldSetRepo> _field_set_repo;
- uint32_t _bucket_bits;
- MyServiceLayerConfig _service_layer_config;
- MyDistributorConfig _distributor_config;
- MyRpcClientConfig _rpc_client_config;
- MyMessageBusConfig _message_bus_config;
- ConfigSet _config_set;
- std::shared_ptr<IConfigContext> _config_context;
- std::unique_ptr<IBmFeedHandler> _feed_handler;
- std::unique_ptr<mbus::Slobrok> _slobrok;
- std::shared_ptr<BmStorageLinkContext> _service_layer_chain_context;
- std::unique_ptr<MyServiceLayerProcess> _service_layer;
- std::unique_ptr<SharedRpcResources> _rpc_client_shared_rpc_resources;
- std::shared_ptr<BmStorageLinkContext> _distributor_chain_context;
- std::unique_ptr<storage::DistributorProcess> _distributor;
- std::unique_ptr<BmMessageBus> _message_bus;
+ std::unique_ptr<BmCluster> _bm_cluster;
+ BmFeed _feed;
+ IBmFeedHandler* _feed_handler;
explicit PersistenceProviderFixture(const BMParams& params);
~PersistenceProviderFixture();
- void create_document_db(const BMParams & params);
- uint32_t num_buckets() const { return (1u << _bucket_bits); }
- BucketId make_bucket_id(uint32_t n) const { return BucketId(_bucket_bits, n & (num_buckets() - 1)); }
- document::Bucket make_bucket(uint32_t n) const { return document::Bucket(_bucket_space, make_bucket_id(n)); }
- DocumentId make_document_id(uint32_t n, uint32_t i) const;
- std::unique_ptr<Document> make_document(uint32_t n, uint32_t i) const;
- std::unique_ptr<DocumentUpdate> make_document_update(uint32_t n, uint32_t i) const;
- void create_buckets();
- void wait_slobrok(const vespalib::string &name);
- void start_service_layer(const BMParams& params);
- void start_distributor(const BMParams& params);
- void start_message_bus();
- void create_feed_handler(const BMParams& params);
- void shutdown_feed_handler();
- void shutdown_message_bus();
- void shutdown_distributor();
- void shutdown_service_layer();
};
PersistenceProviderFixture::PersistenceProviderFixture(const BMParams& params)
- : _document_types(make_document_type()),
- _repo(DocumentTypeRepoFactory::make(*_document_types)),
- _doc_type_name("test"),
- _document_type(_repo->getDocumentType(_doc_type_name.getName())),
- _field(_document_type->getField("int")),
- _document_db_config(make_document_db_config(_document_types, _repo, _doc_type_name)),
- _base_dir(base_dir),
- _file_header_context(),
- _tls_listen_port(9017),
- _slobrok_port(9018),
- _rpc_client_port(9019),
- _service_layer_mbus_port(9020),
- _service_layer_rpc_port(9021),
- _service_layer_status_port(9022),
- _distributor_mbus_port(9023),
- _distributor_rpc_port(9024),
- _distributor_status_port(9025),
- _tls("tls", _tls_listen_port, _base_dir, _file_header_context),
- _tls_spec(vespalib::make_string("tcp/localhost:%d", _tls_listen_port)),
- _query_limiter(),
- _clock(),
- _metrics_wire_service(),
- _config_stores(),
- _summary_executor(8, 128_Ki),
- _document_db_owner(),
- _bucket_space(makeBucketSpace(_doc_type_name.getName())),
- _document_db(),
- _persistence_owner(),
- _write_filter(),
- _disk_mem_usage_notifier(),
- _persistence_engine(),
- _field_set_repo(std::make_unique<const FieldSetRepo>(*_repo)),
- _bucket_bits(16),
- _service_layer_config("bm-servicelayer", *_document_types, _slobrok_port, _service_layer_mbus_port, _service_layer_rpc_port, _service_layer_status_port, params),
- _distributor_config("bm-distributor", *_document_types, _slobrok_port, _distributor_mbus_port, _distributor_rpc_port, _distributor_status_port, params),
- _rpc_client_config("bm-rpc-client", _slobrok_port),
- _message_bus_config("bm-message-bus", _slobrok_port),
- _config_set(),
- _config_context(std::make_shared<ConfigContext>(_config_set)),
- _feed_handler(),
- _slobrok(),
- _service_layer_chain_context(),
- _service_layer(),
- _rpc_client_shared_rpc_resources(),
- _distributor_chain_context(),
- _distributor(),
- _message_bus()
+ : _document_types(make_document_types()),
+ _repo(document::DocumentTypeRepoFactory::make(*_document_types)),
+ _bm_cluster(std::make_unique<BmCluster>(base_dir, base_port, params, _document_types, _repo)),
+ _feed(_repo),
+ _feed_handler(nullptr)
{
- create_document_db(params);
- _persistence_engine = std::make_unique<PersistenceEngine>(_persistence_owner, _write_filter, _disk_mem_usage_notifier, -1, false);
- auto proxy = std::make_shared<PersistenceHandlerProxy>(_document_db);
- _persistence_engine->putHandler(_persistence_engine->getWLock(), _bucket_space, _doc_type_name, proxy);
- _service_layer_config.add_builders(_config_set);
- _distributor_config.add_builders(_config_set);
- _rpc_client_config.add_builders(_config_set);
- _message_bus_config.add_builders(_config_set);
- _feed_handler = std::make_unique<SpiBmFeedHandler>(*_persistence_engine, *_field_set_repo, params.get_skip_get_spi_bucket_info());
+ _bm_cluster->make_nodes();
}
-PersistenceProviderFixture::~PersistenceProviderFixture()
-{
- if (_persistence_engine) {
- _persistence_engine->destroyIterators();
- _persistence_engine->removeHandler(_persistence_engine->getWLock(), _bucket_space, _doc_type_name);
- }
- if (_document_db) {
- _document_db->close();
- }
-}
-
-void
-PersistenceProviderFixture::create_document_db(const BMParams & params)
-{
- vespalib::mkdir(_base_dir, false);
- vespalib::mkdir(_base_dir + "/" + _doc_type_name.getName(), false);
- vespalib::string input_cfg = _base_dir + "/" + _doc_type_name.getName() + "/baseconfig";
- {
- FileConfigManager fileCfg(input_cfg, "", _doc_type_name.getName());
- fileCfg.saveConfig(*_document_db_config, 1);
- }
- config::DirSpec spec(input_cfg + "/config-1");
- auto tuneFileDocDB = std::make_shared<TuneFileDocumentDB>();
- DocumentDBConfigHelper mgr(spec, _doc_type_name.getName());
- auto protonCfg = std::make_shared<ProtonConfigBuilder>();
- if ( ! params.get_indexing_sequencer().empty()) {
- vespalib::string sequencer = params.get_indexing_sequencer();
- std::transform(sequencer.begin(), sequencer.end(), sequencer.begin(), [](unsigned char c){ return std::toupper(c); });
- protonCfg->indexing.optimize = ProtonConfig::Indexing::getOptimize(sequencer);
- }
- auto bootstrap_config = std::make_shared<BootstrapConfig>(1,
- _document_types,
- _repo,
- std::move(protonCfg),
- std::make_shared<FiledistributorrpcConfig>(),
- std::make_shared<BucketspacesConfig>(),
- tuneFileDocDB, HwInfo());
- mgr.forwardConfig(bootstrap_config);
- mgr.nextGeneration(0ms);
- _document_db = DocumentDB::create(_base_dir, mgr.getConfig(), _tls_spec, _query_limiter, _clock, _doc_type_name,
- _bucket_space, *bootstrap_config->getProtonConfigSP(), _document_db_owner,
- _summary_executor, _summary_executor, *_persistence_engine, _tls,
- _metrics_wire_service, _file_header_context,
- _config_stores.getConfigStore(_doc_type_name.toString()),
- std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), HwInfo());
- _document_db->start();
- _document_db->waitForOnlineState();
-}
-
-DocumentId
-PersistenceProviderFixture::make_document_id(uint32_t n, uint32_t i) const
-{
- DocumentId id(vespalib::make_string("id::test:n=%u:%u", n & (num_buckets() - 1), i));
- return id;
-}
-
-std::unique_ptr<Document>
-PersistenceProviderFixture::make_document(uint32_t n, uint32_t i) const
-{
- auto id = make_document_id(n, i);
- auto document = std::make_unique<Document>(*_document_type, id);
- document->setRepo(*_repo);
- document->setFieldValue(_field, std::make_unique<IntFieldValue>(i));
- return document;
-}
-
-std::unique_ptr<DocumentUpdate>
-PersistenceProviderFixture::make_document_update(uint32_t n, uint32_t i) const
-{
- auto id = make_document_id(n, i);
- auto document_update = std::make_unique<DocumentUpdate>(*_repo, *_document_type, id);
- document_update->addUpdate(FieldUpdate(_field).addUpdate(AssignValueUpdate(IntFieldValue(15))));
- return document_update;
-}
-
-void
-PersistenceProviderFixture::create_buckets()
-{
- SpiBmFeedHandler feed_handler(*_persistence_engine, *_field_set_repo, false);
- for (unsigned int i = 0; i < num_buckets(); ++i) {
- feed_handler.create_bucket(make_bucket(i));
- }
-}
-
-void
-PersistenceProviderFixture::wait_slobrok(const vespalib::string &name)
-{
- auto &mirror = _rpc_client_shared_rpc_resources->slobrok_mirror();
- LOG(info, "Waiting for %s in slobrok", name.c_str());
- for (;;) {
- auto specs = mirror.lookup(name);
- if (!specs.empty()) {
- LOG(info, "Found %s in slobrok", name.c_str());
- return;
- }
- std::this_thread::sleep_for(100ms);
- }
-}
-
-void
-PersistenceProviderFixture::start_service_layer(const BMParams& params)
-{
- LOG(info, "start slobrok");
- _slobrok = std::make_unique<mbus::Slobrok>(_slobrok_port);
- LOG(info, "start service layer");
- config::ConfigUri config_uri("bm-servicelayer", _config_context);
- std::unique_ptr<BmStorageChainBuilder> chain_builder;
- if (params.get_use_storage_chain() && !params.needs_distributor()) {
- chain_builder = std::make_unique<BmStorageChainBuilder>();
- _service_layer_chain_context = chain_builder->get_context();
- }
- _service_layer = std::make_unique<MyServiceLayerProcess>(config_uri,
- *_persistence_engine,
- std::move(chain_builder));
- _service_layer->setupConfig(100ms);
- _service_layer->createNode();
- _service_layer->getNode().waitUntilInitialized();
- LOG(info, "start rpc client shared resources");
- config::ConfigUri client_config_uri("bm-rpc-client", _config_context);
- _rpc_client_shared_rpc_resources = std::make_unique<SharedRpcResources>
- (client_config_uri, _rpc_client_port, 100, params.get_rpc_events_before_wakup());
- _rpc_client_shared_rpc_resources->start_server_and_register_slobrok("bm-rpc-client");
- wait_slobrok("storage/cluster.storage/storage/0/default");
- wait_slobrok("storage/cluster.storage/storage/0");
- BmClusterController fake_controller(*_rpc_client_shared_rpc_resources);
- fake_controller.set_cluster_up(false);
-}
-
-void
-PersistenceProviderFixture::start_distributor(const BMParams& params)
-{
- config::ConfigUri config_uri("bm-distributor", _config_context);
- std::unique_ptr<BmStorageChainBuilder> chain_builder;
- if (params.get_use_storage_chain() && !params.get_use_document_api()) {
- chain_builder = std::make_unique<BmStorageChainBuilder>();
- _distributor_chain_context = chain_builder->get_context();
- }
- _distributor = std::make_unique<storage::DistributorProcess>(config_uri);
- if (chain_builder) {
- _distributor->set_storage_chain_builder(std::move(chain_builder));
- }
- _distributor->setupConfig(100ms);
- _distributor->createNode();
- wait_slobrok("storage/cluster.storage/distributor/0/default");
- wait_slobrok("storage/cluster.storage/distributor/0");
- BmClusterController fake_controller(*_rpc_client_shared_rpc_resources);
- fake_controller.set_cluster_up(true);
- // Wait for bucket ownership transfer safe time
- std::this_thread::sleep_for(2s);
-}
-
-void
-PersistenceProviderFixture::start_message_bus()
-{
- config::ConfigUri config_uri("bm-message-bus", _config_context);
- LOG(info, "Starting message bus");
- _message_bus = std::make_unique<BmMessageBus>(config_uri, _repo);
- LOG(info, "Started message bus");
-}
-
-void
-PersistenceProviderFixture::create_feed_handler(const BMParams& params)
-{
- StorageApiRpcService::Params rpc_params;
- // This is the same compression config as the default in stor-communicationmanager.def.
- rpc_params.compression_config = CompressionConfig(CompressionConfig::Type::LZ4, 3, 90, 1024);
- rpc_params.num_rpc_targets_per_node = params.get_rpc_targets_per_node();
- if (params.get_use_document_api()) {
- _feed_handler = std::make_unique<DocumentApiMessageBusBmFeedHandler>(*_message_bus);
- } else if (params.get_enable_distributor()) {
- if (params.get_use_storage_chain()) {
- assert(_distributor_chain_context);
- _feed_handler = std::make_unique<StorageApiChainBmFeedHandler>(_distributor_chain_context, true);
- } else if (params.get_use_message_bus()) {
- _feed_handler = std::make_unique<StorageApiMessageBusBmFeedHandler>(*_message_bus, true);
- } else {
- _feed_handler = std::make_unique<StorageApiRpcBmFeedHandler>(*_rpc_client_shared_rpc_resources, _repo, rpc_params, true);
- }
- } else if (params.needs_service_layer()) {
- if (params.get_use_storage_chain()) {
- assert(_service_layer_chain_context);
- _feed_handler = std::make_unique<StorageApiChainBmFeedHandler>(_service_layer_chain_context, false);
- } else if (params.get_use_message_bus()) {
- _feed_handler = std::make_unique<StorageApiMessageBusBmFeedHandler>(*_message_bus, false);
- } else {
- _feed_handler = std::make_unique<StorageApiRpcBmFeedHandler>(*_rpc_client_shared_rpc_resources, _repo, rpc_params, false);
- }
- }
-}
-
-void
-PersistenceProviderFixture::shutdown_feed_handler()
-{
- _feed_handler.reset();
-}
-
-void
-PersistenceProviderFixture::shutdown_message_bus()
-{
- if (_message_bus) {
- LOG(info, "stop message bus");
- _message_bus.reset();
- }
-}
-
-void
-PersistenceProviderFixture::shutdown_distributor()
-{
- if (_distributor) {
- LOG(info, "stop distributor");
- _distributor->getNode().requestShutdown("controlled shutdown");
- _distributor->shutdown();
- }
-}
-
-void
-PersistenceProviderFixture::shutdown_service_layer()
-{
- if (_rpc_client_shared_rpc_resources) {
- LOG(info, "stop rpc client shared resources");
- _rpc_client_shared_rpc_resources->shutdown();
- _rpc_client_shared_rpc_resources.reset();
- }
- if (_service_layer) {
- LOG(info, "stop service layer");
- _service_layer->getNode().requestShutdown("controlled shutdown");
- _service_layer->shutdown();
- }
- if (_slobrok) {
- LOG(info, "stop slobrok");
- _slobrok.reset();
- }
-}
-
-vespalib::nbostream
-make_put_feed(PersistenceProviderFixture &f, BMRange range, BucketSelector bucket_selector)
-{
- vespalib::nbostream serialized_feed;
- LOG(debug, "make_put_feed([%u..%u))", range.get_start(), range.get_end());
- for (unsigned int i = range.get_start(); i < range.get_end(); ++i) {
- auto n = bucket_selector(i);
- serialized_feed << f.make_bucket_id(n);
- auto document = f.make_document(n, i);
- document->serialize(serialized_feed);
- }
- return serialized_feed;
-}
+PersistenceProviderFixture::~PersistenceProviderFixture() = default;
std::vector<vespalib::nbostream>
-make_feed(vespalib::ThreadStackExecutor &executor, const BMParams &bm_params, std::function<vespalib::nbostream(BMRange,BucketSelector)> func, uint32_t num_buckets, const vespalib::string &label)
+make_feed(vespalib::ThreadStackExecutor &executor, const BMParams &bm_params, std::function<vespalib::nbostream(BmRange,BucketSelector)> func, uint32_t num_buckets, const vespalib::string &label)
{
LOG(info, "make_feed %s %u small documents", label.c_str(), bm_params.get_documents());
std::vector<vespalib::nbostream> serialized_feed_v;
@@ -1038,27 +171,6 @@ make_feed(vespalib::ThreadStackExecutor &executor, const BMParams &bm_params, st
return serialized_feed_v;
}
-void
-put_async_task(PersistenceProviderFixture &f, uint32_t max_pending, BMRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias)
-{
- LOG(debug, "put_async_task([%u..%u))", range.get_start(), range.get_end());
- feedbm::PendingTracker pending_tracker(max_pending);
- f._feed_handler->attach_bucket_info_queue(pending_tracker);
- auto &repo = *f._repo;
- vespalib::nbostream is(serialized_feed.data(), serialized_feed.size());
- BucketId bucket_id;
- auto bucket_space = f._bucket_space;
- bool use_timestamp = !f._feed_handler->manages_timestamp();
- for (unsigned int i = range.get_start(); i < range.get_end(); ++i) {
- is >> bucket_id;
- document::Bucket bucket(bucket_space, bucket_id);
- auto document = std::make_unique<Document>(repo, is);
- f._feed_handler->put(bucket, std::move(document), (use_timestamp ? (time_bias + i) : 0), pending_tracker);
- }
- assert(is.empty());
- pending_tracker.drain();
-}
-
class AvgSampler {
private:
double _total;
@@ -1077,73 +189,42 @@ void
run_put_async_tasks(PersistenceProviderFixture& f, vespalib::ThreadStackExecutor& executor, int pass, int64_t& time_bias,
const std::vector<vespalib::nbostream>& serialized_feed_v, const BMParams& bm_params, AvgSampler& sampler)
{
- uint32_t old_errors = f._feed_handler->get_error_count();
+ auto& feed = f._feed;
+ auto& feed_handler = *f._feed_handler;
+ uint32_t old_errors = feed_handler.get_error_count();
auto start_time = std::chrono::steady_clock::now();
for (uint32_t i = 0; i < bm_params.get_client_threads(); ++i) {
auto range = bm_params.get_range(i);
- executor.execute(makeLambdaTask([&f, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range, time_bias]()
- { put_async_task(f, max_pending, range, serialized_feed, time_bias); }));
+ executor.execute(makeLambdaTask([&feed, &feed_handler, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range, time_bias]()
+ { feed.put_async_task(feed_handler, max_pending, range, serialized_feed, time_bias); }));
}
executor.sync();
auto end_time = std::chrono::steady_clock::now();
std::chrono::duration<double> elapsed = end_time - start_time;
- uint32_t new_errors = f._feed_handler->get_error_count() - old_errors;
+ uint32_t new_errors = feed_handler.get_error_count() - old_errors;
double throughput = bm_params.get_documents() / elapsed.count();
sampler.sample(throughput);
LOG(info, "putAsync: pass=%u, errors=%u, puts/s: %8.2f", pass, new_errors, throughput);
time_bias += bm_params.get_documents();
}
-vespalib::nbostream
-make_update_feed(PersistenceProviderFixture &f, BMRange range, BucketSelector bucket_selector)
-{
- vespalib::nbostream serialized_feed;
- LOG(debug, "make_update_feed([%u..%u))", range.get_start(), range.get_end());
- for (unsigned int i = range.get_start(); i < range.get_end(); ++i) {
- auto n = bucket_selector(i);
- serialized_feed << f.make_bucket_id(n);
- auto document_update = f.make_document_update(n, i);
- document_update->serializeHEAD(serialized_feed);
- }
- return serialized_feed;
-}
-
-void
-update_async_task(PersistenceProviderFixture &f, uint32_t max_pending, BMRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias)
-{
- LOG(debug, "update_async_task([%u..%u))", range.get_start(), range.get_end());
- feedbm::PendingTracker pending_tracker(max_pending);
- f._feed_handler->attach_bucket_info_queue(pending_tracker);
- auto &repo = *f._repo;
- vespalib::nbostream is(serialized_feed.data(), serialized_feed.size());
- BucketId bucket_id;
- auto bucket_space = f._bucket_space;
- bool use_timestamp = !f._feed_handler->manages_timestamp();
- for (unsigned int i = range.get_start(); i < range.get_end(); ++i) {
- is >> bucket_id;
- document::Bucket bucket(bucket_space, bucket_id);
- auto document_update = DocumentUpdate::createHEAD(repo, is);
- f._feed_handler->update(bucket, std::move(document_update), (use_timestamp ? (time_bias + i) : 0), pending_tracker);
- }
- assert(is.empty());
- pending_tracker.drain();
-}
-
void
run_update_async_tasks(PersistenceProviderFixture& f, vespalib::ThreadStackExecutor& executor, int pass, int64_t& time_bias,
const std::vector<vespalib::nbostream>& serialized_feed_v, const BMParams& bm_params, AvgSampler& sampler)
{
- uint32_t old_errors = f._feed_handler->get_error_count();
+ auto& feed = f._feed;
+ auto& feed_handler = *f._feed_handler;
+ uint32_t old_errors = feed_handler.get_error_count();
auto start_time = std::chrono::steady_clock::now();
for (uint32_t i = 0; i < bm_params.get_client_threads(); ++i) {
auto range = bm_params.get_range(i);
- executor.execute(makeLambdaTask([&f, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range, time_bias]()
- { update_async_task(f, max_pending, range, serialized_feed, time_bias); }));
+ executor.execute(makeLambdaTask([&feed, &feed_handler, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range, time_bias]()
+ { feed.update_async_task(feed_handler, max_pending, range, serialized_feed, time_bias); }));
}
executor.sync();
auto end_time = std::chrono::steady_clock::now();
std::chrono::duration<double> elapsed = end_time - start_time;
- uint32_t new_errors = f._feed_handler->get_error_count() - old_errors;
+ uint32_t new_errors = feed_handler.get_error_count() - old_errors;
double throughput = bm_params.get_documents() / elapsed.count();
sampler.sample(throughput);
LOG(info, "updateAsync: pass=%u, errors=%u, updates/s: %8.2f", pass, new_errors, throughput);
@@ -1151,94 +232,44 @@ run_update_async_tasks(PersistenceProviderFixture& f, vespalib::ThreadStackExecu
}
void
-get_async_task(PersistenceProviderFixture &f, uint32_t max_pending, BMRange range, const vespalib::nbostream &serialized_feed)
-{
- LOG(debug, "get_async_task([%u..%u))", range.get_start(), range.get_end());
- feedbm::PendingTracker pending_tracker(max_pending);
- vespalib::nbostream is(serialized_feed.data(), serialized_feed.size());
- BucketId bucket_id;
- vespalib::string all_fields(document::AllFields::NAME);
- auto bucket_space = f._bucket_space;
- for (unsigned int i = range.get_start(); i < range.get_end(); ++i) {
- is >> bucket_id;
- document::Bucket bucket(bucket_space, bucket_id);
- DocumentId document_id(is);
- f._feed_handler->get(bucket, all_fields, document_id, pending_tracker);
- }
- assert(is.empty());
- pending_tracker.drain();
-}
-
-void
run_get_async_tasks(PersistenceProviderFixture& f, vespalib::ThreadStackExecutor& executor, int pass,
const std::vector<vespalib::nbostream>& serialized_feed_v, const BMParams& bm_params, AvgSampler& sampler)
{
- uint32_t old_errors = f._feed_handler->get_error_count();
+ auto& feed = f._feed;
+ auto& feed_handler = *f._feed_handler;
+ uint32_t old_errors = feed_handler.get_error_count();
auto start_time = std::chrono::steady_clock::now();
for (uint32_t i = 0; i < bm_params.get_client_threads(); ++i) {
auto range = bm_params.get_range(i);
- executor.execute(makeLambdaTask([&f, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range]()
- { get_async_task(f, max_pending, range, serialized_feed); }));
+ executor.execute(makeLambdaTask([&feed, &feed_handler, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range]()
+ { feed.get_async_task(feed_handler, max_pending, range, serialized_feed); }));
}
executor.sync();
auto end_time = std::chrono::steady_clock::now();
std::chrono::duration<double> elapsed = end_time - start_time;
- uint32_t new_errors = f._feed_handler->get_error_count() - old_errors;
+ uint32_t new_errors = feed_handler.get_error_count() - old_errors;
double throughput = bm_params.get_documents() / elapsed.count();
sampler.sample(throughput);
LOG(info, "getAsync: pass=%u, errors=%u, gets/s: %8.2f", pass, new_errors, throughput);
}
-vespalib::nbostream
-make_remove_feed(PersistenceProviderFixture &f, BMRange range, BucketSelector bucket_selector)
-{
- vespalib::nbostream serialized_feed;
- LOG(debug, "make_update_feed([%u..%u))", range.get_start(), range.get_end());
- for (unsigned int i = range.get_start(); i < range.get_end(); ++i) {
- auto n = bucket_selector(i);
- serialized_feed << f.make_bucket_id(n);
- auto document_id = f.make_document_id(n, i);
- vespalib::string raw_id = document_id.toString();
- serialized_feed.write(raw_id.c_str(), raw_id.size() + 1);
- }
- return serialized_feed;
-}
-
-void
-remove_async_task(PersistenceProviderFixture &f, uint32_t max_pending, BMRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias)
-{
- LOG(debug, "remove_async_task([%u..%u))", range.get_start(), range.get_end());
- feedbm::PendingTracker pending_tracker(max_pending);
- f._feed_handler->attach_bucket_info_queue(pending_tracker);
- vespalib::nbostream is(serialized_feed.data(), serialized_feed.size());
- BucketId bucket_id;
- auto bucket_space = f._bucket_space;
- bool use_timestamp = !f._feed_handler->manages_timestamp();
- for (unsigned int i = range.get_start(); i < range.get_end(); ++i) {
- is >> bucket_id;
- document::Bucket bucket(bucket_space, bucket_id);
- DocumentId document_id(is);
- f._feed_handler->remove(bucket, document_id, (use_timestamp ? (time_bias + i) : 0), pending_tracker);
- }
- assert(is.empty());
- pending_tracker.drain();
-}
-
void
run_remove_async_tasks(PersistenceProviderFixture& f, vespalib::ThreadStackExecutor& executor, int pass, int64_t& time_bias,
const std::vector<vespalib::nbostream>& serialized_feed_v, const BMParams& bm_params, AvgSampler& sampler)
{
- uint32_t old_errors = f._feed_handler->get_error_count();
+ auto& feed = f._feed;
+ auto& feed_handler = *f._feed_handler;
+ uint32_t old_errors = feed_handler.get_error_count();
auto start_time = std::chrono::steady_clock::now();
for (uint32_t i = 0; i < bm_params.get_client_threads(); ++i) {
auto range = bm_params.get_range(i);
- executor.execute(makeLambdaTask([&f, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range, time_bias]()
- { remove_async_task(f, max_pending, range, serialized_feed, time_bias); }));
+ executor.execute(makeLambdaTask([&feed, &feed_handler, max_pending = bm_params.get_max_pending(), &serialized_feed = serialized_feed_v[i], range, time_bias]()
+ { feed.remove_async_task(feed_handler, max_pending, range, serialized_feed, time_bias); }));
}
executor.sync();
auto end_time = std::chrono::steady_clock::now();
std::chrono::duration<double> elapsed = end_time - start_time;
- uint32_t new_errors = f._feed_handler->get_error_count() - old_errors;
+ uint32_t new_errors = feed_handler.get_error_count() - old_errors;
double throughput = bm_params.get_documents() / elapsed.count();
sampler.sample(throughput);
LOG(info, "removeAsync: pass=%u, errors=%u, removes/s: %8.2f", pass, new_errors, throughput);
@@ -1310,27 +341,14 @@ void benchmark_async_spi(const BMParams &bm_params)
{
vespalib::rmdir(base_dir, true);
PersistenceProviderFixture f(bm_params);
- auto &provider = *f._persistence_engine;
- LOG(info, "start initialize");
- provider.initialize();
- LOG(info, "create %u buckets", f.num_buckets());
- if (!bm_params.needs_distributor()) {
- f.create_buckets();
- }
- if (bm_params.needs_service_layer()) {
- f.start_service_layer(bm_params);
- }
- if (bm_params.needs_distributor()) {
- f.start_distributor(bm_params);
- }
- if (bm_params.needs_message_bus()) {
- f.start_message_bus();
- }
- f.create_feed_handler(bm_params);
+ auto& cluster = *f._bm_cluster;
+ cluster.start(f._feed);
+ f._feed_handler = cluster.get_feed_handler();
vespalib::ThreadStackExecutor executor(bm_params.get_client_threads(), 128_Ki);
- auto put_feed = make_feed(executor, bm_params, [&f](BMRange range, BucketSelector bucket_selector) { return make_put_feed(f, range, bucket_selector); }, f.num_buckets(), "put");
- auto update_feed = make_feed(executor, bm_params, [&f](BMRange range, BucketSelector bucket_selector) { return make_update_feed(f, range, bucket_selector); }, f.num_buckets(), "update");
- auto remove_feed = make_feed(executor, bm_params, [&f](BMRange range, BucketSelector bucket_selector) { return make_remove_feed(f, range, bucket_selector); }, f.num_buckets(), "remove");
+ auto& feed = f._feed;
+ auto put_feed = make_feed(executor, bm_params, [&feed](BmRange range, BucketSelector bucket_selector) { return feed.make_put_feed(range, bucket_selector); }, f._feed.num_buckets(), "put");
+ auto update_feed = make_feed(executor, bm_params, [&feed](BmRange range, BucketSelector bucket_selector) { return feed.make_update_feed(range, bucket_selector); }, f._feed.num_buckets(), "update");
+ auto remove_feed = make_feed(executor, bm_params, [&feed](BmRange range, BucketSelector bucket_selector) { return feed.make_remove_feed(range, bucket_selector); }, f._feed.num_buckets(), "remove");
int64_t time_bias = 1;
LOG(info, "Feed handler is '%s'", f._feed_handler->get_name().c_str());
benchmark_async_put(f, executor, time_bias, put_feed, bm_params);
@@ -1339,10 +357,8 @@ void benchmark_async_spi(const BMParams &bm_params)
benchmark_async_remove(f, executor, time_bias, remove_feed, bm_params);
LOG(info, "--------------------------------");
- f.shutdown_feed_handler();
- f.shutdown_message_bus();
- f.shutdown_distributor();
- f.shutdown_service_layer();
+ f._feed_handler = nullptr;
+ cluster.stop();
}
class App : public FastOS_Application
@@ -1412,6 +428,7 @@ App::get_options()
{ "get-passes", 1, nullptr, 0 },
{ "indexing-sequencer", 1, nullptr, 0 },
{ "max-pending", 1, nullptr, 0 },
+ { "nodes", 1, nullptr, 0 },
{ "put-passes", 1, nullptr, 0 },
{ "remove-passes", 1, nullptr, 0 },
{ "response-threads", 1, nullptr, 0 },
@@ -1436,6 +453,7 @@ App::get_options()
LONGOPT_GET_PASSES,
LONGOPT_INDEXING_SEQUENCER,
LONGOPT_MAX_PENDING,
+ LONGOPT_NODES,
LONGOPT_PUT_PASSES,
LONGOPT_REMOVE_PASSES,
LONGOPT_RESPONSE_THREADS,
@@ -1483,6 +501,9 @@ App::get_options()
case LONGOPT_MAX_PENDING:
_bm_params.set_max_pending(atoi(opt_argument));
break;
+ case LONGOPT_NODES:
+ _bm_params.set_num_nodes(atoi(opt_argument));
+ break;
case LONGOPT_PUT_PASSES:
_bm_params.set_put_passes(atoi(opt_argument));
break;
diff --git a/searchcore/src/vespa/searchcore/bmcluster/CMakeLists.txt b/searchcore/src/vespa/searchcore/bmcluster/CMakeLists.txt
new file mode 100644
index 00000000000..8d6dbd0f938
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/bmcluster/CMakeLists.txt
@@ -0,0 +1,42 @@
+# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_library(searchcore_bmcluster STATIC
+ SOURCES
+ bm_cluster.cpp
+ bm_cluster_controller.cpp
+ bm_cluster_params.cpp
+ bm_feed.cpp
+ bm_message_bus.cpp
+ bm_node.cpp
+ bm_storage_chain_builder.cpp
+ bm_storage_link.cpp
+ bucket_info_queue.cpp
+ document_api_message_bus_bm_feed_handler.cpp
+ pending_tracker.cpp
+ pending_tracker_hash.cpp
+ spi_bm_feed_handler.cpp
+ storage_api_chain_bm_feed_handler.cpp
+ storage_api_message_bus_bm_feed_handler.cpp
+ storage_api_rpc_bm_feed_handler.cpp
+ storage_reply_error_checker.cpp
+ DEPENDS
+ searchcore_server
+ searchcore_initializer
+ searchcore_reprocessing
+ searchcore_index
+ searchcore_persistenceengine
+ searchcore_docsummary
+ searchcore_feedoperation
+ searchcore_matching
+ searchcore_attribute
+ searchcore_documentmetastore
+ searchcore_bucketdb
+ searchcore_flushengine
+ searchcore_pcommon
+ searchcore_grouping
+ searchcore_proton_metrics
+ searchcore_fconfig
+ storageserver_storageapp
+ messagebus_messagebus-test
+ messagebus
+ searchlib_searchlib_uca
+)
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.cpp
new file mode 100644
index 00000000000..58011d9c67a
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.cpp
@@ -0,0 +1,384 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "bm_cluster.h"
+#include "bm_cluster_controller.h"
+#include "bm_feed.h"
+#include "bm_message_bus.h"
+#include "bm_node.h"
+#include "spi_bm_feed_handler.h"
+#include <vespa/config/common/configcontext.h>
+#include <vespa/storage/storageserver/rpc/shared_rpc_resources.h>
+#include <vespa/messagebus/config-messagebus.h>
+#include <vespa/messagebus/testlib/slobrok.h>
+#include <vespa/slobrok/sbmirror.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/vespalib/stllike/asciistream.h>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <thread>
+
+#include <vespa/log/log.h>
+LOG_SETUP(".bmcluster.bm_cluster");
+
+using cloud::config::SlobroksConfigBuilder;
+using config::ConfigSet;
+using messagebus::MessagebusConfigBuilder;
+using storage::rpc::SharedRpcResources;
+
+namespace search::bmcluster {
+
+namespace {
+
+vespalib::string message_bus_config_id("bm-message-bus");
+vespalib::string rpc_client_config_id("bm-rpc-client");
+
+enum class PortBias
+{
+ SLOBROK_PORT = 0,
+ RPC_CLIENT_PORT = 1,
+ NUM_PORTS = 2
+};
+
+int port_number(int base_port, PortBias bias)
+{
+ return base_port + static_cast<int>(bias);
+}
+
+void
+make_slobroks_config(SlobroksConfigBuilder& slobroks, int slobrok_port)
+{
+ SlobroksConfigBuilder::Slobrok slobrok;
+ slobrok.connectionspec = vespalib::make_string("tcp/localhost:%d", slobrok_port);
+ slobroks.slobrok.push_back(std::move(slobrok));
+}
+
+}
+
+struct BmCluster::MessageBusConfigSet {
+ vespalib::string config_id;
+ SlobroksConfigBuilder slobroks;
+ MessagebusConfigBuilder messagebus;
+
+ MessageBusConfigSet(const vespalib::string &config_id_in, int slobrok_port)
+ : config_id(config_id_in),
+ slobroks(),
+ messagebus()
+ {
+ make_slobroks_config(slobroks, slobrok_port);
+ }
+ ~MessageBusConfigSet();
+
+ void add_builders(ConfigSet &set) {
+ set.addBuilder(config_id, &slobroks);
+ set.addBuilder(config_id, &messagebus);
+ }
+};
+
+BmCluster::MessageBusConfigSet::~MessageBusConfigSet() = default;
+
+struct BmCluster::RpcClientConfigSet {
+ vespalib::string config_id;
+ SlobroksConfigBuilder slobroks;
+
+ RpcClientConfigSet(const vespalib::string &config_id_in, int slobrok_port)
+ : config_id(config_id_in),
+ slobroks()
+ {
+ make_slobroks_config(slobroks, slobrok_port);
+ }
+ ~RpcClientConfigSet();
+
+ void add_builders(ConfigSet &set) {
+ set.addBuilder(config_id, &slobroks);
+ }
+};
+
+BmCluster::RpcClientConfigSet::~RpcClientConfigSet() = default;
+
+BmCluster::BmCluster(const vespalib::string& base_dir, int base_port, const BmClusterParams& params, std::shared_ptr<DocumenttypesConfig> document_types, std::shared_ptr<const document::DocumentTypeRepo> repo)
+ : _params(params),
+ _slobrok_port(port_number(base_port, PortBias::SLOBROK_PORT)),
+ _rpc_client_port(port_number(base_port, PortBias::RPC_CLIENT_PORT)),
+ _message_bus_config(std::make_unique<MessageBusConfigSet>(message_bus_config_id, _slobrok_port)),
+ _rpc_client_config(std::make_unique<RpcClientConfigSet>(rpc_client_config_id, _slobrok_port)),
+ _config_set(std::make_unique<config::ConfigSet>()),
+ _config_context(std::make_shared<config::ConfigContext>(*_config_set)),
+ _slobrok(),
+ _message_bus(),
+ _rpc_client(),
+ _base_dir(base_dir),
+ _base_port(base_port),
+ _document_types(std::move(document_types)),
+ _repo(std::move(repo)),
+ _nodes(params.get_num_nodes())
+
+{
+ _message_bus_config->add_builders(*_config_set);
+ _rpc_client_config->add_builders(*_config_set);
+ vespalib::mkdir(_base_dir, false);
+}
+
+BmCluster::~BmCluster()
+{
+ _nodes.clear();
+ stop_message_bus();
+ stop_rpc_client();
+ stop_slobrok();
+}
+
+
+void
+BmCluster::start_slobrok()
+{
+ if (!_slobrok) {
+ LOG(info, "start slobrok");
+ _slobrok = std::make_unique<mbus::Slobrok>(_slobrok_port);
+ }
+}
+
+void
+BmCluster::stop_slobrok()
+{
+ if (_slobrok) {
+ LOG(info, "stop slobrok");
+ _slobrok.reset();
+ }
+}
+
+void
+BmCluster::wait_slobrok(const vespalib::string &name)
+{
+ auto &mirror = _rpc_client->slobrok_mirror();
+ LOG(info, "Waiting for %s in slobrok", name.c_str());
+ for (;;) {
+ auto specs = mirror.lookup(name);
+ if (!specs.empty()) {
+ LOG(info, "Found %s in slobrok", name.c_str());
+ return;
+ }
+ std::this_thread::sleep_for(100ms);
+ }
+}
+
+void
+BmCluster::start_message_bus()
+{
+ if (!_message_bus) {
+ LOG(info, "Starting message bus");
+ config::ConfigUri config_uri(message_bus_config_id, _config_context);
+ _message_bus = std::make_unique<BmMessageBus>(config_uri, _repo);
+ LOG(info, "Started message bus");
+ }
+}
+
+void
+BmCluster::stop_message_bus()
+{
+ if (_message_bus) {
+ LOG(info, "stop message bus");
+ _message_bus.reset();
+ }
+}
+
+void
+BmCluster::start_rpc_client()
+{
+ if (!_rpc_client) {
+ LOG(info, "start rpc client");
+ config::ConfigUri client_config_uri(rpc_client_config_id, _config_context);
+ _rpc_client = std::make_unique<SharedRpcResources>
+ (client_config_uri, _rpc_client_port, 100, _params.get_rpc_events_before_wakeup());
+ _rpc_client->start_server_and_register_slobrok(rpc_client_config_id);
+ }
+}
+
+void
+BmCluster::stop_rpc_client()
+{
+ if (_rpc_client) {
+ LOG(info, "stop rpc client");
+ _rpc_client->shutdown();
+ _rpc_client.reset();
+ }
+}
+
+void
+BmCluster::make_node(unsigned int node_idx)
+{
+ assert(node_idx < _nodes.size());
+ assert(!_nodes[node_idx]);
+ vespalib::asciistream s;
+ s << _base_dir << "/n" << node_idx;
+ vespalib::string node_base_dir(s.str());
+ int node_base_port = port_number(_base_port, PortBias::NUM_PORTS) + BmNode::num_ports() * node_idx;
+ _nodes[node_idx] = BmNode::create(node_base_dir, node_base_port, node_idx, *this, _params, _document_types, _slobrok_port);
+}
+
+void
+BmCluster::make_nodes()
+{
+ for (unsigned int node_idx = 0; node_idx < _nodes.size(); ++node_idx) {
+ make_node(node_idx);
+ }
+}
+
+BmNode&
+BmCluster::get_node(unsigned int node_idx)
+{
+ assert(node_idx < _nodes.size());
+ assert(_nodes[node_idx]);
+ return *_nodes[node_idx];
+}
+
+void
+BmCluster::initialize_providers()
+{
+ LOG(info, "start initialize");
+ for (const auto &node : _nodes) {
+ if (node) {
+ node->initialize_persistence_provider();
+ }
+ }
+}
+
+void
+BmCluster::create_buckets(BmFeed& feed)
+{
+ LOG(info, "create %u buckets", feed.num_buckets());
+ auto& node = get_node(0);
+ for (unsigned int i = 0; i < feed.num_buckets(); ++i) {
+ node.create_bucket(feed.make_bucket(i));
+ }
+}
+
+void
+BmCluster::start_service_layers()
+{
+ start_slobrok();
+ for (const auto &node : _nodes) {
+ if (node) {
+ node->start_service_layer(_params);
+ }
+ }
+ for (const auto &node : _nodes) {
+ if (node) {
+ node->wait_service_layer();
+ }
+ }
+ start_rpc_client();
+ for (const auto &node : _nodes) {
+ if (node) {
+ node->wait_service_layer_slobrok();
+ }
+ }
+ BmClusterController fake_controller(get_rpc_client(), _params.get_num_nodes());
+ unsigned int node_idx = 0;
+ for (const auto &node : _nodes) {
+ if (node) {
+ fake_controller.set_cluster_up(node_idx, false);
+ }
+ ++node_idx;
+ }
+}
+
+void
+BmCluster::start_distributors()
+{
+ for (const auto &node : _nodes) {
+ if (node) {
+ node->start_distributor(_params);
+ }
+ }
+ for (const auto &node : _nodes) {
+ if (node) {
+ node->wait_distributor_slobrok();
+ }
+ }
+ BmClusterController fake_controller(get_rpc_client(), _params.get_num_nodes());
+ unsigned int node_idx = 0;
+ for (const auto &node : _nodes) {
+ if (node) {
+ fake_controller.set_cluster_up(node_idx, true);
+ }
+ ++node_idx;
+ }
+ // Wait for bucket ownership transfer safe time
+ std::this_thread::sleep_for(2s);
+}
+
+void
+BmCluster::create_feed_handlers()
+{
+ for (const auto &node : _nodes) {
+ if (node) {
+ node->create_feed_handler(_params);
+ }
+ }
+}
+
+void
+BmCluster::shutdown_feed_handlers()
+{
+ for (const auto &node : _nodes) {
+ if (node) {
+ node->shutdown_feed_handler();
+ }
+ }
+}
+
+void
+BmCluster::shutdown_distributors()
+{
+ for (const auto &node : _nodes) {
+ if (node) {
+ node->shutdown_distributor();
+ }
+ }
+}
+
+void
+BmCluster::shutdown_service_layers()
+{
+ stop_rpc_client();
+ for (const auto &node : _nodes) {
+ if (node) {
+ node->shutdown_service_layer();
+ }
+ }
+ stop_slobrok();
+}
+
+void
+BmCluster::start(BmFeed& feed)
+{
+ initialize_providers();
+ if (!_params.needs_distributor()) {
+ create_buckets(feed);
+ }
+ if (_params.needs_service_layer()) {
+ start_service_layers();
+ }
+ if (_params.needs_distributor()) {
+ start_distributors();
+ }
+ if (_params.needs_message_bus()) {
+ start_message_bus();
+ }
+ create_feed_handlers();
+}
+
+void
+BmCluster::stop()
+{
+ shutdown_feed_handlers();
+ stop_message_bus();
+ shutdown_distributors();
+ shutdown_service_layers();
+}
+
+IBmFeedHandler*
+BmCluster::get_feed_handler()
+{
+ return get_node(0).get_feed_handler();
+}
+
+}
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h
new file mode 100644
index 00000000000..0e2e138ab08
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster.h
@@ -0,0 +1,79 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "bm_cluster_params.h"
+#include <memory>
+#include <vector>
+
+namespace config {
+
+class IConfigContext;
+class ConfigSet;
+
+}
+
+namespace document { class DocumentTypeRepo; }
+namespace document::internal { class InternalDocumenttypesType; }
+namespace mbus { class Slobrok; }
+namespace storage::rpc { class SharedRpcResources; }
+
+namespace search::bmcluster {
+
+class BmFeed;
+class BmMessageBus;
+class BmNode;
+class IBmFeedHandler;
+
+/*
+ * Class representing a benchmark cluster with one or more benchmark nodes.
+ */
+class BmCluster {
+ struct MessageBusConfigSet;
+ struct RpcClientConfigSet;
+ using DocumenttypesConfig = const document::internal::InternalDocumenttypesType;
+ BmClusterParams _params;
+ int _slobrok_port;
+ int _rpc_client_port;
+ std::unique_ptr<MessageBusConfigSet> _message_bus_config;
+ std::unique_ptr<RpcClientConfigSet> _rpc_client_config;
+ std::unique_ptr<config::ConfigSet> _config_set;
+ std::shared_ptr<config::IConfigContext> _config_context;
+ std::unique_ptr<mbus::Slobrok> _slobrok;
+ std::unique_ptr<BmMessageBus> _message_bus;
+ std::unique_ptr<storage::rpc::SharedRpcResources> _rpc_client;
+ vespalib::string _base_dir;
+ int _base_port;
+ std::shared_ptr<DocumenttypesConfig> _document_types;
+ std::shared_ptr<const document::DocumentTypeRepo> _repo;
+ std::vector<std::unique_ptr<BmNode>> _nodes;
+
+public:
+ BmCluster(const vespalib::string& base_dir, int base_port, const BmClusterParams& params, std::shared_ptr<DocumenttypesConfig> document_types, std::shared_ptr<const document::DocumentTypeRepo> repo);
+ ~BmCluster();
+ void start_slobrok();
+ void stop_slobrok();
+ void wait_slobrok(const vespalib::string &name);
+ void start_message_bus();
+ void stop_message_bus();
+ void start_rpc_client();
+ void stop_rpc_client();
+ void start_service_layers();
+ void start_distributors();
+ void create_feed_handlers();
+ void shutdown_feed_handlers();
+ void shutdown_distributors();
+ void shutdown_service_layers();
+ void create_buckets(BmFeed &feed);
+ void initialize_providers();
+ void start(BmFeed &feed);
+ void stop();
+ storage::rpc::SharedRpcResources &get_rpc_client() { return *_rpc_client; }
+ BmMessageBus& get_message_bus() { return *_message_bus; }
+ void make_node(unsigned int node_idx);
+ void make_nodes();
+ BmNode& get_node(unsigned int node_idx);
+ IBmFeedHandler* get_feed_handler();
+};
+
+}
diff --git a/searchcore/src/apps/vespa-feed-bm/bm_cluster_controller.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_controller.cpp
index a1b40c56e11..bdf4b5fed58 100644
--- a/searchcore/src/apps/vespa-feed-bm/bm_cluster_controller.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_controller.cpp
@@ -8,19 +8,22 @@
#include <vespa/vdslib/state/cluster_state_bundle.h>
#include <vespa/fnet/frt/target.h>
#include <vespa/slobrok/sbmirror.h>
+#include <vespa/vespalib/stllike/asciistream.h>
using storage::api::StorageMessageAddress;
using storage::rpc::SharedRpcResources;
using storage::lib::NodeType;
-namespace feedbm {
+namespace search::bmcluster {
namespace {
FRT_RPCRequest *
-make_set_cluster_state_request()
+make_set_cluster_state_request(unsigned int num_nodes)
{
- storage::lib::ClusterStateBundle bundle(storage::lib::ClusterState("version:2 distributor:1 storage:1"));
+ vespalib::asciistream s;
+ s << "version:2 distributor:" << num_nodes << " storage:" << num_nodes;
+ storage::lib::ClusterStateBundle bundle(storage::lib::ClusterState(s.str()));
storage::rpc::SlimeClusterStateBundleCodec codec;
auto encoded_bundle = codec.encode(bundle);
auto *req = new FRT_RPCRequest();
@@ -34,17 +37,18 @@ make_set_cluster_state_request()
}
-BmClusterController::BmClusterController(SharedRpcResources& shared_rpc_resources_in)
- : _shared_rpc_resources(shared_rpc_resources_in)
+BmClusterController::BmClusterController(SharedRpcResources& shared_rpc_resources_in, unsigned int num_nodes)
+ : _shared_rpc_resources(shared_rpc_resources_in),
+ _num_nodes(num_nodes)
{
}
void
-BmClusterController::set_cluster_up(bool distributor)
+BmClusterController::set_cluster_up(unsigned int node_idx, bool distributor)
{
static vespalib::string _storage("storage");
- StorageMessageAddress storage_address(&_storage, distributor ? NodeType::DISTRIBUTOR : NodeType::STORAGE, 0);
- auto req = make_set_cluster_state_request();
+ StorageMessageAddress storage_address(&_storage, distributor ? NodeType::DISTRIBUTOR : NodeType::STORAGE, node_idx);
+ auto req = make_set_cluster_state_request(_num_nodes);
auto target_resolver = std::make_unique<storage::rpc::CachingRpcTargetResolver>(_shared_rpc_resources.slobrok_mirror(),
_shared_rpc_resources.target_factory(), 1);
uint64_t fake_bucket_id = 0;
diff --git a/searchcore/src/apps/vespa-feed-bm/bm_cluster_controller.h b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_controller.h
index 699036be5c9..7b4313453f1 100644
--- a/searchcore/src/apps/vespa-feed-bm/bm_cluster_controller.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_controller.h
@@ -5,7 +5,7 @@
namespace storage::api { class StorageMessageAddress; }
namespace storage::rpc { class SharedRpcResources; }
-namespace feedbm {
+namespace search::bmcluster {
/*
* Fake cluster controller that sets cluster state to be up.
@@ -13,9 +13,10 @@ namespace feedbm {
class BmClusterController
{
storage::rpc::SharedRpcResources& _shared_rpc_resources;
+ unsigned int _num_nodes;
public:
- BmClusterController(storage::rpc::SharedRpcResources& shared_rpc_resources_in);
- void set_cluster_up(bool distributor);
+ BmClusterController(storage::rpc::SharedRpcResources& shared_rpc_resources_in, unsigned int num_nodes);
+ void set_cluster_up(unsigned int node_idx, bool distributor);
};
}
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.cpp
new file mode 100644
index 00000000000..7766ab6c5b3
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.cpp
@@ -0,0 +1,48 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "bm_cluster_params.h"
+#include <iostream>
+
+namespace search::bmcluster {
+
+BmClusterParams::BmClusterParams()
+ : _bucket_db_stripe_bits(0),
+ _distributor_stripes(0),
+ _enable_distributor(false),
+ _enable_service_layer(false),
+ _indexing_sequencer(),
+ _num_nodes(1),
+ _response_threads(2), // Same default as in stor-filestor.def
+ _rpc_events_before_wakeup(1), // Same default as in stor-communicationmanager.def
+ _rpc_network_threads(1), // Same default as previous in stor-communicationmanager.def
+ _rpc_targets_per_node(1), // Same default as in stor-communicationmanager.def
+ _skip_communicationmanager_thread(false), // Same default as in stor-communicationmanager.def
+ _skip_get_spi_bucket_info(false),
+ _use_async_message_handling_on_schedule(false),
+ _use_document_api(false),
+ _use_message_bus(false),
+ _use_storage_chain(false)
+{
+}
+
+BmClusterParams::~BmClusterParams() = default;
+
+bool
+BmClusterParams::check() const
+{
+ if (_response_threads < 1) {
+ std::cerr << "Too few response threads: " << _response_threads << std::endl;
+ return false;
+ }
+ if (_rpc_network_threads < 1) {
+ std::cerr << "Too few rpc network threads: " << _rpc_network_threads << std::endl;
+ return false;
+ }
+ if (_rpc_targets_per_node < 1) {
+ std::cerr << "Too few rpc targets per node: " << _rpc_targets_per_node << std::endl;
+ return false;
+ }
+ return true;
+}
+
+}
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.h b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.h
new file mode 100644
index 00000000000..5bc6b97487c
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_cluster_params.h
@@ -0,0 +1,71 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <cstdint>
+#include <vespa/vespalib/stllike/string.h>
+
+namespace search::bmcluster {
+
+/*
+ * Parameters for setting up a benchmark cluster.
+ */
+class BmClusterParams
+{
+ uint32_t _bucket_db_stripe_bits;
+ uint32_t _distributor_stripes;
+ bool _enable_distributor;
+ bool _enable_service_layer;
+ vespalib::string _indexing_sequencer;
+ uint32_t _num_nodes;
+ uint32_t _response_threads;
+ uint32_t _rpc_events_before_wakeup;
+ uint32_t _rpc_network_threads;
+ uint32_t _rpc_targets_per_node;
+ bool _skip_communicationmanager_thread;
+ bool _skip_get_spi_bucket_info;
+ bool _use_async_message_handling_on_schedule;
+ bool _use_document_api;
+ bool _use_message_bus;
+ bool _use_storage_chain;
+public:
+ BmClusterParams();
+ ~BmClusterParams();
+ uint32_t get_bucket_db_stripe_bits() const { return _bucket_db_stripe_bits; }
+ uint32_t get_distributor_stripes() const { return _distributor_stripes; }
+ bool get_enable_distributor() const { return _enable_distributor; }
+ const vespalib::string & get_indexing_sequencer() const { return _indexing_sequencer; }
+ uint32_t get_num_nodes() const { return _num_nodes; }
+ uint32_t get_response_threads() const { return _response_threads; }
+ uint32_t get_rpc_events_before_wakeup() const { return _rpc_events_before_wakeup; }
+ uint32_t get_rpc_network_threads() const { return _rpc_network_threads; }
+ uint32_t get_rpc_targets_per_node() const { return _rpc_targets_per_node; }
+ bool get_skip_communicationmanager_thread() const { return _skip_communicationmanager_thread; }
+ bool get_skip_get_spi_bucket_info() const { return _skip_get_spi_bucket_info; }
+ bool get_use_async_message_handling_on_schedule() const { return _use_async_message_handling_on_schedule; }
+ bool get_use_document_api() const { return _use_document_api; }
+ bool get_use_message_bus() const { return _use_message_bus; }
+ bool get_use_storage_chain() const { return _use_storage_chain; }
+ bool needs_distributor() const { return _enable_distributor || _use_document_api; }
+ bool needs_message_bus() const { return _use_message_bus || _use_document_api; }
+ bool needs_service_layer() const { return _enable_service_layer || _enable_distributor || _use_storage_chain || _use_message_bus || _use_document_api; }
+ void set_bucket_db_stripe_bits(uint32_t value) { _bucket_db_stripe_bits = value; }
+ void set_distributor_stripes(uint32_t value) { _distributor_stripes = value; }
+ void set_enable_distributor(bool value) { _enable_distributor = value; }
+ void set_enable_service_layer(bool value) { _enable_service_layer = value; }
+ void set_indexing_sequencer(vespalib::stringref sequencer) { _indexing_sequencer = sequencer; }
+ void set_num_nodes(uint32_t value) { _num_nodes = value; }
+ void set_response_threads(uint32_t threads_in) { _response_threads = threads_in; }
+ void set_rpc_events_before_wakeup(uint32_t value) { _rpc_events_before_wakeup = value; }
+ void set_rpc_network_threads(uint32_t threads_in) { _rpc_network_threads = threads_in; }
+ void set_rpc_targets_per_node(uint32_t targets_in) { _rpc_targets_per_node = targets_in; }
+ void set_skip_communicationmanager_thread(bool value) { _skip_communicationmanager_thread = value; }
+ void set_skip_get_spi_bucket_info(bool value) { _skip_get_spi_bucket_info = value; }
+ void set_use_async_message_handling_on_schedule(bool value) { _use_async_message_handling_on_schedule = value; }
+ void set_use_document_api(bool value) { _use_document_api = value; }
+ void set_use_message_bus(bool value) { _use_message_bus = value; }
+ void set_use_storage_chain(bool value) { _use_storage_chain = value; }
+ bool check() const;
+};
+
+}
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_feed.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_feed.cpp
new file mode 100644
index 00000000000..e082f2b96a1
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_feed.cpp
@@ -0,0 +1,195 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "bm_feed.h"
+#include "bm_range.h"
+#include "bucket_selector.h"
+#include "pending_tracker.h"
+#include "i_bm_feed_handler.h"
+#include <vespa/document/base/documentid.h>
+#include <vespa/document/bucket/bucketid.h>
+#include <vespa/document/datatype/documenttype.h>
+#include <vespa/document/fieldset/fieldsets.h>
+#include <vespa/document/fieldvalue/document.h>
+#include <vespa/document/fieldvalue/intfieldvalue.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/document/test/make_bucket_space.h>
+#include <vespa/document/update/assignvalueupdate.h>
+#include <vespa/document/update/documentupdate.h>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <cassert>
+
+#include <vespa/log/log.h>
+LOG_SETUP(".bmcluster.bm_feed");
+
+using document::AssignValueUpdate;
+using document::Document;
+using document::DocumentId;
+using document::DocumentType;
+using document::DocumentTypeRepo;
+using document::DocumentUpdate;
+using document::IntFieldValue;
+using document::FieldUpdate;
+
+namespace search::bmcluster {
+
+BmFeed::BmFeed(std::shared_ptr<const DocumentTypeRepo> repo)
+ : _repo(std::move(repo)),
+ _document_type(_repo->getDocumentType("test")),
+ _field(_document_type->getField("int")),
+ _bucket_bits(16),
+ _bucket_space(document::test::makeBucketSpace("test"))
+{
+}
+
+BmFeed::~BmFeed()
+{
+}
+
+DocumentId
+BmFeed::make_document_id(uint32_t n, uint32_t i) const
+{
+ DocumentId id(vespalib::make_string("id::test:n=%u:%u", n & (num_buckets() - 1), i));
+ return id;
+}
+
+std::unique_ptr<Document>
+BmFeed::make_document(uint32_t n, uint32_t i) const
+{
+ auto id = make_document_id(n, i);
+ auto document = std::make_unique<Document>(*_document_type, id);
+ document->setRepo(*_repo);
+ document->setFieldValue(_field, std::make_unique<IntFieldValue>(i));
+ return document;
+}
+
+std::unique_ptr<DocumentUpdate>
+BmFeed::make_document_update(uint32_t n, uint32_t i) const
+{
+ auto id = make_document_id(n, i);
+ auto document_update = std::make_unique<DocumentUpdate>(*_repo, *_document_type, id);
+ document_update->addUpdate(FieldUpdate(_field).addUpdate(AssignValueUpdate(IntFieldValue(15))));
+ return document_update;
+}
+
+vespalib::nbostream
+BmFeed::make_put_feed(BmRange range, BucketSelector bucket_selector)
+{
+ vespalib::nbostream serialized_feed;
+ LOG(debug, "make_put_feed([%u..%u))", range.get_start(), range.get_end());
+ for (unsigned int i = range.get_start(); i < range.get_end(); ++i) {
+ auto n = bucket_selector(i);
+ serialized_feed << make_bucket_id(n);
+ auto document = make_document(n, i);
+ document->serialize(serialized_feed);
+ }
+ return serialized_feed;
+}
+
+vespalib::nbostream
+BmFeed::make_update_feed(BmRange range, BucketSelector bucket_selector)
+{
+ vespalib::nbostream serialized_feed;
+ LOG(debug, "make_update_feed([%u..%u))", range.get_start(), range.get_end());
+ for (unsigned int i = range.get_start(); i < range.get_end(); ++i) {
+ auto n = bucket_selector(i);
+ serialized_feed << make_bucket_id(n);
+ auto document_update = make_document_update(n, i);
+ document_update->serializeHEAD(serialized_feed);
+ }
+ return serialized_feed;
+}
+
+vespalib::nbostream
+BmFeed::make_remove_feed(BmRange range, BucketSelector bucket_selector)
+{
+ vespalib::nbostream serialized_feed;
+ LOG(debug, "make_remove_feed([%u..%u))", range.get_start(), range.get_end());
+ for (unsigned int i = range.get_start(); i < range.get_end(); ++i) {
+ auto n = bucket_selector(i);
+ serialized_feed << make_bucket_id(n);
+ auto document_id = make_document_id(n, i);
+ vespalib::string raw_id = document_id.toString();
+ serialized_feed.write(raw_id.c_str(), raw_id.size() + 1);
+ }
+ return serialized_feed;
+}
+
+
+void
+BmFeed::put_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias)
+{
+ LOG(debug, "put_async_task([%u..%u))", range.get_start(), range.get_end());
+ PendingTracker pending_tracker(max_pending);
+ feed_handler.attach_bucket_info_queue(pending_tracker);
+ auto &repo = *_repo;
+ vespalib::nbostream is(serialized_feed.data(), serialized_feed.size());
+ document::BucketId bucket_id;
+ bool use_timestamp = !feed_handler.manages_timestamp();
+ for (unsigned int i = range.get_start(); i < range.get_end(); ++i) {
+ is >> bucket_id;
+ document::Bucket bucket(_bucket_space, bucket_id);
+ auto document = std::make_unique<Document>(repo, is);
+ feed_handler.put(bucket, std::move(document), (use_timestamp ? (time_bias + i) : 0), pending_tracker);
+ }
+ assert(is.empty());
+ pending_tracker.drain();
+}
+
+void
+BmFeed::update_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias)
+{
+ LOG(debug, "update_async_task([%u..%u))", range.get_start(), range.get_end());
+ PendingTracker pending_tracker(max_pending);
+ feed_handler.attach_bucket_info_queue(pending_tracker);
+ auto &repo = *_repo;
+ vespalib::nbostream is(serialized_feed.data(), serialized_feed.size());
+ document::BucketId bucket_id;
+ bool use_timestamp = !feed_handler.manages_timestamp();
+ for (unsigned int i = range.get_start(); i < range.get_end(); ++i) {
+ is >> bucket_id;
+ document::Bucket bucket(_bucket_space, bucket_id);
+ auto document_update = DocumentUpdate::createHEAD(repo, is);
+ feed_handler.update(bucket, std::move(document_update), (use_timestamp ? (time_bias + i) : 0), pending_tracker);
+ }
+ assert(is.empty());
+ pending_tracker.drain();
+}
+
+void
+BmFeed::get_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed)
+{
+ LOG(debug, "get_async_task([%u..%u))", range.get_start(), range.get_end());
+ search::bmcluster::PendingTracker pending_tracker(max_pending);
+ vespalib::nbostream is(serialized_feed.data(), serialized_feed.size());
+ document::BucketId bucket_id;
+ vespalib::string all_fields(document::AllFields::NAME);
+ for (unsigned int i = range.get_start(); i < range.get_end(); ++i) {
+ is >> bucket_id;
+ document::Bucket bucket(_bucket_space, bucket_id);
+ DocumentId document_id(is);
+ feed_handler.get(bucket, all_fields, document_id, pending_tracker);
+ }
+ assert(is.empty());
+ pending_tracker.drain();
+}
+
+void
+BmFeed::remove_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias)
+{
+ LOG(debug, "remove_async_task([%u..%u))", range.get_start(), range.get_end());
+ search::bmcluster::PendingTracker pending_tracker(max_pending);
+ feed_handler.attach_bucket_info_queue(pending_tracker);
+ vespalib::nbostream is(serialized_feed.data(), serialized_feed.size());
+ document::BucketId bucket_id;
+ bool use_timestamp = !feed_handler.manages_timestamp();
+ for (unsigned int i = range.get_start(); i < range.get_end(); ++i) {
+ is >> bucket_id;
+ document::Bucket bucket(_bucket_space, bucket_id);
+ DocumentId document_id(is);
+ feed_handler.remove(bucket, document_id, (use_timestamp ? (time_bias + i) : 0), pending_tracker);
+ }
+ assert(is.empty());
+ pending_tracker.drain();
+}
+
+}
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_feed.h b/searchcore/src/vespa/searchcore/bmcluster/bm_feed.h
new file mode 100644
index 00000000000..a6afe7b10d9
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_feed.h
@@ -0,0 +1,57 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/document/base/documentid.h>
+#include <vespa/document/bucket/bucketspace.h>
+#include <vespa/document/bucket/bucketid.h>
+#include <vespa/document/bucket/bucket.h>
+
+namespace document {
+
+class Document;
+class DocumentType;
+class DocumentTypeRepo;
+class DocumentUpdate;
+class Field;
+
+}
+
+namespace vespalib { class nbostream; }
+
+namespace search::bmcluster {
+
+class BmRange;
+class BucketSelector;
+class IBmFeedHandler;
+
+/*
+ * Class to generate synthetic feed of documents.
+ */
+class BmFeed {
+ std::shared_ptr<const document::DocumentTypeRepo> _repo;
+ const document::DocumentType* _document_type;
+ const document::Field& _field;
+ uint32_t _bucket_bits;
+ document::BucketSpace _bucket_space;
+public:
+
+ BmFeed(std::shared_ptr<const document::DocumentTypeRepo> document_types);
+ ~BmFeed();
+ uint32_t num_buckets() const { return (1u << _bucket_bits); }
+ document::BucketSpace get_bucket_space() const noexcept { return _bucket_space; }
+ document::BucketId make_bucket_id(uint32_t n) const { return document::BucketId(_bucket_bits, n & (num_buckets() - 1)); }
+ document::Bucket make_bucket(uint32_t n) const { return document::Bucket(_bucket_space, make_bucket_id(n)); }
+ document::DocumentId make_document_id(uint32_t n, uint32_t i) const;
+ std::unique_ptr<document::Document> make_document(uint32_t n, uint32_t i) const;
+ std::unique_ptr<document::DocumentUpdate> make_document_update(uint32_t n, uint32_t i) const;
+ vespalib::nbostream make_put_feed(BmRange range, BucketSelector bucket_selector);
+ vespalib::nbostream make_update_feed(BmRange range, BucketSelector bucket_selector);
+ vespalib::nbostream make_remove_feed(BmRange range, BucketSelector bucket_selector);
+ void put_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias);
+ void update_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias);
+ void get_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed);
+ void remove_async_task(IBmFeedHandler& feed_handler, uint32_t max_pending, BmRange range, const vespalib::nbostream &serialized_feed, int64_t time_bias);
+};
+
+}
diff --git a/searchcore/src/apps/vespa-feed-bm/bm_message_bus.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_message_bus.cpp
index b608593dada..d947ca5e109 100644
--- a/searchcore/src/apps/vespa-feed-bm/bm_message_bus.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_message_bus.cpp
@@ -24,7 +24,7 @@ using mbus::SourceSession;
using storage::mbusprot::StorageProtocol;
using storage::mbusprot::StorageReply;
-namespace feedbm {
+namespace search::bmcluster {
namespace {
@@ -111,7 +111,9 @@ BmMessageBus::ReplyHandler::handleReply(std::unique_ptr<Reply> reply)
}
if (failed) {
++_errors;
- LOG(error, "Unexpected %s", reply_as_string(*reply).c_str());
+ if (_errors <= 10) {
+ LOG(error, "Unexpected %s", reply_as_string(*reply).c_str());
+ }
}
tracker->release();
} else {
diff --git a/searchcore/src/apps/vespa-feed-bm/bm_message_bus.h b/searchcore/src/vespa/searchcore/bmcluster/bm_message_bus.h
index a9cff1fb826..7829a4e4946 100644
--- a/searchcore/src/apps/vespa-feed-bm/bm_message_bus.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_message_bus.h
@@ -16,12 +16,12 @@ class SourceSession;
}
-namespace feedbm {
+namespace search::bmcluster {
class PendingTracker;
/*
- * Message bus for feed benchmark program.
+ * Message bus for benchmark cluster.
*/
class BmMessageBus
{
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp
new file mode 100644
index 00000000000..84bd921620d
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_node.cpp
@@ -0,0 +1,738 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "bm_node.h"
+#include "bm_cluster.h"
+#include "bm_cluster_params.h"
+#include "bm_message_bus.h"
+#include "bm_storage_chain_builder.h"
+#include "bm_storage_link_context.h"
+#include "storage_api_chain_bm_feed_handler.h"
+#include "storage_api_message_bus_bm_feed_handler.h"
+#include "storage_api_rpc_bm_feed_handler.h"
+#include "document_api_message_bus_bm_feed_handler.h"
+#include "i_bm_feed_handler.h"
+#include "spi_bm_feed_handler.h"
+#include <vespa/config-attributes.h>
+#include <vespa/config-bucketspaces.h>
+#include <vespa/config-imported-fields.h>
+#include <vespa/config-indexschema.h>
+#include <vespa/config-persistence.h>
+#include <vespa/config-rank-profiles.h>
+#include <vespa/config-slobroks.h>
+#include <vespa/config-stor-distribution.h>
+#include <vespa/config-stor-filestor.h>
+#include <vespa/config-summary.h>
+#include <vespa/config-summarymap.h>
+#include <vespa/config-upgrading.h>
+#include <vespa/config/common/configcontext.h>
+#include <vespa/document/bucket/bucketspace.h>
+#include <vespa/document/fieldset/fieldsetrepo.h>
+#include <vespa/document/repo/configbuilder.h>
+#include <vespa/document/repo/document_type_repo_factory.h>
+#include <vespa/document/repo/documenttyperepo.h>
+#include <vespa/document/test/make_bucket_space.h>
+#include <vespa/messagebus/config-messagebus.h>
+#include <vespa/messagebus/testlib/slobrok.h>
+#include <vespa/metrics/config-metricsmanager.h>
+#include <vespa/searchcommon/common/schemaconfigurer.h>
+#include <vespa/searchcore/proton/common/alloc_config.h>
+#include <vespa/searchcore/proton/matching/querylimiter.h>
+#include <vespa/searchcore/proton/metrics/metricswireservice.h>
+#include <vespa/searchcore/proton/persistenceengine/ipersistenceengineowner.h>
+#include <vespa/searchcore/proton/persistenceengine/i_resource_write_filter.h>
+#include <vespa/searchcore/proton/persistenceengine/persistenceengine.h>
+#include <vespa/searchcore/proton/server/bootstrapconfig.h>
+#include <vespa/searchcore/proton/server/documentdb.h>
+#include <vespa/searchcore/proton/server/document_db_maintenance_config.h>
+#include <vespa/searchcore/proton/server/documentdbconfigmanager.h>
+#include <vespa/searchcore/proton/server/fileconfigmanager.h>
+#include <vespa/searchcore/proton/server/memoryconfigstore.h>
+#include <vespa/searchcore/proton/server/persistencehandlerproxy.h>
+#include <vespa/searchcore/proton/test/disk_mem_usage_notifier.h>
+#include <vespa/searchlib/index/dummyfileheadercontext.h>
+#include <vespa/searchlib/transactionlog/translogserver.h>
+#include <vespa/searchsummary/config/config-juniperrc.h>
+#include <vespa/storage/bucketdb/config-stor-bucket-init.h>
+#include <vespa/storage/common/i_storage_chain_builder.h>
+#include <vespa/storage/config/config-stor-bouncer.h>
+#include <vespa/storage/config/config-stor-communicationmanager.h>
+#include <vespa/storage/config/config-stor-distributormanager.h>
+#include <vespa/storage/config/config-stor-opslogger.h>
+#include <vespa/storage/config/config-stor-prioritymapping.h>
+#include <vespa/storage/config/config-stor-server.h>
+#include <vespa/storage/config/config-stor-status.h>
+#include <vespa/storage/config/config-stor-visitordispatcher.h>
+#include <vespa/storage/storageserver/rpc/shared_rpc_resources.h>
+#include <vespa/storage/visiting/config-stor-visitor.h>
+#include <vespa/storageserver/app/distributorprocess.h>
+#include <vespa/storageserver/app/servicelayerprocess.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <vespa/vespalib/stllike/asciistream.h>
+#include <vespa/vespalib/util/size_literals.h>
+#include <tests/proton/common/dummydbowner.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP(".bmcluster.bm_node");
+
+using cloud::config::SlobroksConfigBuilder;
+using cloud::config::filedistribution::FiledistributorrpcConfig;
+using config::ConfigSet;
+using document::BucketSpace;
+using document::DocumenttypesConfig;
+using document::DocumenttypesConfigBuilder;
+using document::DocumentType;
+using document::DocumentTypeRepo;
+using document::Field;
+using messagebus::MessagebusConfigBuilder;
+using metrics::MetricsmanagerConfigBuilder;
+using proton::BootstrapConfig;
+using proton::DocTypeName;
+using proton::DocumentDB;
+using proton::DocumentDBConfig;
+using proton::HwInfo;
+using search::index::Schema;
+using search::index::SchemaBuilder;
+using search::transactionlog::TransLogServer;
+using storage::rpc::SharedRpcResources;
+using storage::rpc::StorageApiRpcService;
+using storage::spi::PersistenceProvider;
+using vespa::config::content::PersistenceConfigBuilder;
+using vespa::config::content::StorDistributionConfigBuilder;
+using vespa::config::content::StorFilestorConfigBuilder;
+using vespa::config::content::UpgradingConfigBuilder;
+using vespa::config::content::core::BucketspacesConfig;
+using vespa::config::content::core::BucketspacesConfigBuilder;
+using vespa::config::content::core::StorBouncerConfigBuilder;
+using vespa::config::content::core::StorBucketInitConfigBuilder;
+using vespa::config::content::core::StorCommunicationmanagerConfigBuilder;
+using vespa::config::content::core::StorDistributormanagerConfigBuilder;
+using vespa::config::content::core::StorOpsloggerConfigBuilder;
+using vespa::config::content::core::StorPrioritymappingConfigBuilder;
+using vespa::config::content::core::StorServerConfigBuilder;
+using vespa::config::content::core::StorStatusConfigBuilder;
+using vespa::config::content::core::StorVisitorConfigBuilder;
+using vespa::config::content::core::StorVisitordispatcherConfigBuilder;
+using vespa::config::search::AttributesConfig;
+using vespa::config::search::AttributesConfigBuilder;
+using vespa::config::search::ImportedFieldsConfig;
+using vespa::config::search::IndexschemaConfig;
+using vespa::config::search::RankProfilesConfig;
+using vespa::config::search::SummaryConfig;
+using vespa::config::search::SummarymapConfig;
+using vespa::config::search::core::ProtonConfig;
+using vespa::config::search::core::ProtonConfigBuilder;
+using vespa::config::search::summary::JuniperrcConfig;
+using vespalib::compression::CompressionConfig;
+
+namespace search::bmcluster {
+
+namespace {
+
+enum PortBias
+{
+ TLS_LISTEN_PORT,
+ SERVICE_LAYER_MBUS_PORT,
+ SERVICE_LAYER_RPC_PORT,
+ SERVICE_LAYER_STATUS_PORT,
+ DISTRIBUTOR_MBUS_PORT,
+ DISTRIBUTOR_RPC_PORT,
+ DISTRIBUTOR_STATUS_PORT,
+ NUM_PORTS,
+
+};
+
+int port_number(int base_port, PortBias bias)
+{
+ return base_port + static_cast<int>(bias);
+}
+
+storage::spi::Context context(storage::spi::Priority(0), 0);
+
+}
+
+std::shared_ptr<AttributesConfig> make_attributes_config() {
+ AttributesConfigBuilder builder;
+ AttributesConfig::Attribute attribute;
+ attribute.name = "int";
+ attribute.datatype = AttributesConfig::Attribute::Datatype::INT32;
+ builder.attribute.emplace_back(attribute);
+ return std::make_shared<AttributesConfig>(builder);
+}
+
+std::shared_ptr<DocumentDBConfig> make_document_db_config(std::shared_ptr<DocumenttypesConfig> document_types, std::shared_ptr<const DocumentTypeRepo> repo, const DocTypeName& doc_type_name)
+{
+ auto indexschema = std::make_shared<IndexschemaConfig>();
+ auto attributes = make_attributes_config();
+ auto summary = std::make_shared<SummaryConfig>();
+ std::shared_ptr<Schema> schema(new Schema());
+ SchemaBuilder::build(*indexschema, *schema);
+ SchemaBuilder::build(*attributes, *schema);
+ SchemaBuilder::build(*summary, *schema);
+ return std::make_shared<DocumentDBConfig>(
+ 1,
+ std::make_shared<RankProfilesConfig>(),
+ std::make_shared<proton::matching::RankingConstants>(),
+ std::make_shared<proton::matching::RankingExpressions>(),
+ std::make_shared<proton::matching::OnnxModels>(),
+ indexschema,
+ attributes,
+ summary,
+ std::make_shared<SummarymapConfig>(),
+ std::make_shared<JuniperrcConfig>(),
+ document_types,
+ repo,
+ std::make_shared<ImportedFieldsConfig>(),
+ std::make_shared<TuneFileDocumentDB>(),
+ schema,
+ std::make_shared<proton::DocumentDBMaintenanceConfig>(),
+ search::LogDocumentStore::Config(),
+ std::make_shared<const proton::ThreadingServiceConfig>(proton::ThreadingServiceConfig::make(1)),
+ std::make_shared<const proton::AllocConfig>(),
+ "client",
+ doc_type_name.getName());
+}
+
+void
+make_slobroks_config(SlobroksConfigBuilder& slobroks, int slobrok_port)
+{
+ SlobroksConfigBuilder::Slobrok slobrok;
+ slobrok.connectionspec = vespalib::make_string("tcp/localhost:%d", slobrok_port);
+ slobroks.slobrok.push_back(std::move(slobrok));
+}
+
+void
+make_bucketspaces_config(BucketspacesConfigBuilder& bucketspaces)
+{
+ BucketspacesConfigBuilder::Documenttype bucket_space_map;
+ bucket_space_map.name = "test";
+ bucket_space_map.bucketspace = "default";
+ bucketspaces.documenttype.emplace_back(std::move(bucket_space_map));
+}
+
+class MyPersistenceEngineOwner : public proton::IPersistenceEngineOwner
+{
+ void setClusterState(BucketSpace, const storage::spi::ClusterState&) override { }
+};
+
+struct MyResourceWriteFilter : public proton::IResourceWriteFilter
+{
+ bool acceptWriteOperation() const override { return true; }
+ State getAcceptState() const override { return IResourceWriteFilter::State(); }
+};
+
+class MyServiceLayerProcess : public storage::ServiceLayerProcess {
+ PersistenceProvider& _provider;
+
+public:
+ MyServiceLayerProcess(const config::ConfigUri& configUri,
+ PersistenceProvider& provider,
+ std::unique_ptr<storage::IStorageChainBuilder> chain_builder);
+ ~MyServiceLayerProcess() override { shutdown(); }
+
+ void shutdown() override;
+ void setupProvider() override;
+ PersistenceProvider& getProvider() override;
+};
+
+MyServiceLayerProcess::MyServiceLayerProcess(const config::ConfigUri& configUri,
+ PersistenceProvider& provider,
+ std::unique_ptr<storage::IStorageChainBuilder> chain_builder)
+ : ServiceLayerProcess(configUri),
+ _provider(provider)
+{
+ if (chain_builder) {
+ set_storage_chain_builder(std::move(chain_builder));
+ }
+}
+
+void
+MyServiceLayerProcess::shutdown()
+{
+ ServiceLayerProcess::shutdown();
+}
+
+void
+MyServiceLayerProcess::setupProvider()
+{
+}
+
+PersistenceProvider&
+MyServiceLayerProcess::getProvider()
+{
+ return _provider;
+}
+
+struct StorageConfigSet
+{
+ vespalib::string config_id;
+ DocumenttypesConfigBuilder documenttypes;
+ StorDistributionConfigBuilder stor_distribution;
+ StorBouncerConfigBuilder stor_bouncer;
+ StorCommunicationmanagerConfigBuilder stor_communicationmanager;
+ StorOpsloggerConfigBuilder stor_opslogger;
+ StorPrioritymappingConfigBuilder stor_prioritymapping;
+ UpgradingConfigBuilder upgrading;
+ StorServerConfigBuilder stor_server;
+ StorStatusConfigBuilder stor_status;
+ BucketspacesConfigBuilder bucketspaces;
+ MetricsmanagerConfigBuilder metricsmanager;
+ SlobroksConfigBuilder slobroks;
+ MessagebusConfigBuilder messagebus;
+
+ StorageConfigSet(const vespalib::string &base_dir, unsigned int node_idx, bool distributor, const vespalib::string& config_id_in, const DocumenttypesConfig& documenttypes_in,
+ int slobrok_port, int mbus_port, int rpc_port, int status_port, const BmClusterParams& params)
+ : config_id(config_id_in),
+ documenttypes(documenttypes_in),
+ stor_distribution(),
+ stor_bouncer(),
+ stor_communicationmanager(),
+ stor_opslogger(),
+ stor_prioritymapping(),
+ upgrading(),
+ stor_server(),
+ stor_status(),
+ bucketspaces(),
+ metricsmanager(),
+ slobroks(),
+ messagebus()
+ {
+ {
+ auto& dc = stor_distribution;
+ {
+ StorDistributionConfigBuilder::Group group;
+ {
+ for (unsigned int i = 0; i < params.get_num_nodes(); ++i) {
+ StorDistributionConfigBuilder::Group::Nodes node;
+ node.index = i;
+ group.nodes.push_back(std::move(node));
+ }
+ }
+ group.index = "invalid";
+ group.name = "invalid";
+ group.capacity = 1.0;
+ group.partitions = "";
+ dc.group.push_back(std::move(group));
+ }
+ dc.redundancy = 1;
+ dc.readyCopies = 1;
+ }
+ stor_server.nodeIndex = node_idx;
+ stor_server.isDistributor = distributor;
+ stor_server.contentNodeBucketDbStripeBits = params.get_bucket_db_stripe_bits();
+ if (distributor) {
+ stor_server.rootFolder = base_dir + "/distributor";
+ } else {
+ stor_server.rootFolder = base_dir + "/storage";
+ }
+ make_slobroks_config(slobroks, slobrok_port);
+ stor_communicationmanager.rpc.numNetworkThreads = params.get_rpc_network_threads();
+ stor_communicationmanager.rpc.eventsBeforeWakeup = params.get_rpc_events_before_wakeup();
+ stor_communicationmanager.rpc.numTargetsPerNode = params.get_rpc_targets_per_node();
+ stor_communicationmanager.mbusport = mbus_port;
+ stor_communicationmanager.rpcport = rpc_port;
+ stor_communicationmanager.skipThread = params.get_skip_communicationmanager_thread();
+
+ stor_status.httpport = status_port;
+ make_bucketspaces_config(bucketspaces);
+ }
+
+ ~StorageConfigSet();
+
+ void add_builders(ConfigSet& set) {
+ set.addBuilder(config_id, &documenttypes);
+ set.addBuilder(config_id, &stor_distribution);
+ set.addBuilder(config_id, &stor_bouncer);
+ set.addBuilder(config_id, &stor_communicationmanager);
+ set.addBuilder(config_id, &stor_opslogger);
+ set.addBuilder(config_id, &stor_prioritymapping);
+ set.addBuilder(config_id, &upgrading);
+ set.addBuilder(config_id, &stor_server);
+ set.addBuilder(config_id, &stor_status);
+ set.addBuilder(config_id, &bucketspaces);
+ set.addBuilder(config_id, &metricsmanager);
+ set.addBuilder(config_id, &slobroks);
+ set.addBuilder(config_id, &messagebus);
+ }
+};
+
+StorageConfigSet::~StorageConfigSet() = default;
+
+struct ServiceLayerConfigSet : public StorageConfigSet
+{
+ PersistenceConfigBuilder persistence;
+ StorFilestorConfigBuilder stor_filestor;
+ StorBucketInitConfigBuilder stor_bucket_init;
+ StorVisitorConfigBuilder stor_visitor;
+
+ ServiceLayerConfigSet(const vespalib::string& base_dir, unsigned int node_idx, const vespalib::string& config_id_in, const DocumenttypesConfig& documenttypes_in,
+ int slobrok_port, int mbus_port, int rpc_port, int status_port, const BmClusterParams& params)
+ : StorageConfigSet(base_dir, node_idx, false, config_id_in, documenttypes_in, slobrok_port, mbus_port, rpc_port, status_port, params),
+ persistence(),
+ stor_filestor(),
+ stor_bucket_init(),
+ stor_visitor()
+ {
+ stor_filestor.numResponseThreads = params.get_response_threads();
+ stor_filestor.numNetworkThreads = params.get_rpc_network_threads();
+ stor_filestor.useAsyncMessageHandlingOnSchedule = params.get_use_async_message_handling_on_schedule();
+ }
+
+ ~ServiceLayerConfigSet();
+
+ void add_builders(ConfigSet& set) {
+ StorageConfigSet::add_builders(set);
+ set.addBuilder(config_id, &persistence);
+ set.addBuilder(config_id, &stor_filestor);
+ set.addBuilder(config_id, &stor_bucket_init);
+ set.addBuilder(config_id, &stor_visitor);
+ }
+};
+
+ServiceLayerConfigSet::~ServiceLayerConfigSet() = default;
+
+struct DistributorConfigSet : public StorageConfigSet
+{
+ StorDistributormanagerConfigBuilder stor_distributormanager;
+ StorVisitordispatcherConfigBuilder stor_visitordispatcher;
+
+ DistributorConfigSet(const vespalib::string& base_dir, unsigned int node_idx, const vespalib::string& config_id_in, const DocumenttypesConfig& documenttypes_in,
+ int slobrok_port, int mbus_port, int rpc_port, int status_port, const BmClusterParams& params)
+ : StorageConfigSet(base_dir, node_idx, true, config_id_in, documenttypes_in, slobrok_port, mbus_port, rpc_port, status_port, params),
+ stor_distributormanager(),
+ stor_visitordispatcher()
+ {
+ stor_distributormanager.numDistributorStripes = params.get_distributor_stripes();
+ }
+
+ ~DistributorConfigSet();
+
+ void add_builders(ConfigSet& set) {
+ StorageConfigSet::add_builders(set);
+ set.addBuilder(config_id, &stor_distributormanager);
+ set.addBuilder(config_id, &stor_visitordispatcher);
+ }
+};
+
+DistributorConfigSet::~DistributorConfigSet() = default;
+
+BmNode::BmNode() = default;
+
+BmNode::~BmNode() = default;
+
+class MyBmNode : public BmNode
+{
+ BmCluster& _cluster;
+ std::shared_ptr<DocumenttypesConfig> _document_types;
+ std::shared_ptr<const DocumentTypeRepo> _repo;
+ proton::DocTypeName _doc_type_name;
+ std::shared_ptr<DocumentDBConfig> _document_db_config;
+ vespalib::string _base_dir;
+ search::index::DummyFileHeaderContext _file_header_context;
+ unsigned int _node_idx;
+ int _tls_listen_port;
+ int _slobrok_port;
+ int _service_layer_mbus_port;
+ int _service_layer_rpc_port;
+ int _service_layer_status_port;
+ int _distributor_mbus_port;
+ int _distributor_rpc_port;
+ int _distributor_status_port;
+ TransLogServer _tls;
+ vespalib::string _tls_spec;
+ proton::matching::QueryLimiter _query_limiter;
+ vespalib::Clock _clock;
+ proton::DummyWireService _metrics_wire_service;
+ proton::MemoryConfigStores _config_stores;
+ vespalib::ThreadStackExecutor _summary_executor;
+ proton::DummyDBOwner _document_db_owner;
+ BucketSpace _bucket_space;
+ std::shared_ptr<DocumentDB> _document_db;
+ MyPersistenceEngineOwner _persistence_owner;
+ MyResourceWriteFilter _write_filter;
+ proton::test::DiskMemUsageNotifier _disk_mem_usage_notifier;
+ std::shared_ptr<proton::PersistenceEngine> _persistence_engine;
+ std::unique_ptr<const document::FieldSetRepo> _field_set_repo;
+ ServiceLayerConfigSet _service_layer_config;
+ DistributorConfigSet _distributor_config;
+ ConfigSet _config_set;
+ std::shared_ptr<config::IConfigContext> _config_context;
+ std::unique_ptr<IBmFeedHandler> _feed_handler;
+ std::unique_ptr<mbus::Slobrok> _slobrok;
+ std::shared_ptr<BmStorageLinkContext> _service_layer_chain_context;
+ std::unique_ptr<MyServiceLayerProcess> _service_layer;
+ std::shared_ptr<BmStorageLinkContext> _distributor_chain_context;
+ std::unique_ptr<storage::DistributorProcess> _distributor;
+
+ void create_document_db(const BmClusterParams& params);
+public:
+ MyBmNode(const vespalib::string &base_dir, int base_port, unsigned int node_idx, BmCluster& cluster, const BmClusterParams& params, std::shared_ptr<document::DocumenttypesConfig> document_types, int slobrok_port);
+ ~MyBmNode() override;
+ void initialize_persistence_provider() override;
+ void create_bucket(const document::Bucket& bucket) override;
+ void start_service_layer(const BmClusterParams& params) override;
+ void wait_service_layer() override;
+ void start_distributor(const BmClusterParams& params) override;
+ void create_feed_handler(const BmClusterParams& params) override;
+ void shutdown_feed_handler() override;
+ void shutdown_distributor() override;
+ void shutdown_service_layer() override;
+ void wait_service_layer_slobrok() override;
+ void wait_distributor_slobrok() override;
+ IBmFeedHandler* get_feed_handler() override;
+ PersistenceProvider* get_persistence_provider() override;
+};
+
+MyBmNode::MyBmNode(const vespalib::string& base_dir, int base_port, unsigned int node_idx, BmCluster& cluster, const BmClusterParams& params, std::shared_ptr<document::DocumenttypesConfig> document_types, int slobrok_port)
+ : BmNode(),
+ _cluster(cluster),
+ _document_types(std::move(document_types)),
+ _repo(document::DocumentTypeRepoFactory::make(*_document_types)),
+ _doc_type_name("test"),
+ _document_db_config(make_document_db_config(_document_types, _repo, _doc_type_name)),
+ _base_dir(base_dir),
+ _file_header_context(),
+ _node_idx(node_idx),
+ _tls_listen_port(port_number(base_port, PortBias::TLS_LISTEN_PORT)),
+ _slobrok_port(slobrok_port),
+ _service_layer_mbus_port(port_number(base_port, PortBias::SERVICE_LAYER_MBUS_PORT)),
+ _service_layer_rpc_port(port_number(base_port, PortBias::SERVICE_LAYER_RPC_PORT)),
+ _service_layer_status_port(port_number(base_port, PortBias::SERVICE_LAYER_STATUS_PORT)),
+ _distributor_mbus_port(port_number(base_port, PortBias::DISTRIBUTOR_MBUS_PORT)),
+ _distributor_rpc_port(port_number(base_port, PortBias::DISTRIBUTOR_RPC_PORT)),
+ _distributor_status_port(port_number(base_port, PortBias::DISTRIBUTOR_STATUS_PORT)),
+ _tls("tls", _tls_listen_port, _base_dir, _file_header_context),
+ _tls_spec(vespalib::make_string("tcp/localhost:%d", _tls_listen_port)),
+ _query_limiter(),
+ _clock(),
+ _metrics_wire_service(),
+ _config_stores(),
+ _summary_executor(8, 128_Ki),
+ _document_db_owner(),
+ _bucket_space(document::test::makeBucketSpace(_doc_type_name.getName())),
+ _document_db(),
+ _persistence_owner(),
+ _write_filter(),
+ _disk_mem_usage_notifier(),
+ _persistence_engine(),
+ _field_set_repo(std::make_unique<const document::FieldSetRepo>(*_repo)),
+ _service_layer_config(_base_dir, _node_idx, "bm-servicelayer", *_document_types, _slobrok_port, _service_layer_mbus_port, _service_layer_rpc_port, _service_layer_status_port, params),
+ _distributor_config(_base_dir, _node_idx, "bm-distributor", *_document_types, _slobrok_port, _distributor_mbus_port, _distributor_rpc_port, _distributor_status_port, params),
+ _config_set(),
+ _config_context(std::make_shared<config::ConfigContext>(_config_set)),
+ _feed_handler(),
+ _slobrok(),
+ _service_layer_chain_context(),
+ _service_layer(),
+ _distributor_chain_context(),
+ _distributor()
+{
+ create_document_db(params);
+ _persistence_engine = std::make_unique<proton::PersistenceEngine>(_persistence_owner, _write_filter, _disk_mem_usage_notifier, -1, false);
+ auto proxy = std::make_shared<proton::PersistenceHandlerProxy>(_document_db);
+ _persistence_engine->putHandler(_persistence_engine->getWLock(), _bucket_space, _doc_type_name, proxy);
+ _service_layer_config.add_builders(_config_set);
+ _distributor_config.add_builders(_config_set);
+ _feed_handler = std::make_unique<SpiBmFeedHandler>(*_persistence_engine, *_field_set_repo, params.get_skip_get_spi_bucket_info());
+}
+
+MyBmNode::~MyBmNode()
+{
+ if (_persistence_engine) {
+ _persistence_engine->destroyIterators();
+ _persistence_engine->removeHandler(_persistence_engine->getWLock(), _bucket_space, _doc_type_name);
+ }
+ if (_document_db) {
+ _document_db->close();
+ }
+}
+
+void
+MyBmNode::create_document_db(const BmClusterParams& params)
+{
+ vespalib::mkdir(_base_dir, false);
+ vespalib::mkdir(_base_dir + "/" + _doc_type_name.getName(), false);
+ vespalib::string input_cfg = _base_dir + "/" + _doc_type_name.getName() + "/baseconfig";
+ {
+ proton::FileConfigManager fileCfg(input_cfg, "", _doc_type_name.getName());
+ fileCfg.saveConfig(*_document_db_config, 1);
+ }
+ config::DirSpec spec(input_cfg + "/config-1");
+ auto tuneFileDocDB = std::make_shared<TuneFileDocumentDB>();
+ proton::DocumentDBConfigHelper mgr(spec, _doc_type_name.getName());
+ auto protonCfg = std::make_shared<ProtonConfigBuilder>();
+ if ( ! params.get_indexing_sequencer().empty()) {
+ vespalib::string sequencer = params.get_indexing_sequencer();
+ std::transform(sequencer.begin(), sequencer.end(), sequencer.begin(), [](unsigned char c){ return std::toupper(c); });
+ protonCfg->indexing.optimize = ProtonConfig::Indexing::getOptimize(sequencer);
+ }
+ auto bootstrap_config = std::make_shared<BootstrapConfig>(1,
+ _document_types,
+ _repo,
+ std::move(protonCfg),
+ std::make_shared<FiledistributorrpcConfig>(),
+ std::make_shared<BucketspacesConfig>(),
+ tuneFileDocDB, HwInfo());
+ mgr.forwardConfig(bootstrap_config);
+ mgr.nextGeneration(0ms);
+ _document_db = DocumentDB::create(_base_dir, mgr.getConfig(), _tls_spec, _query_limiter, _clock, _doc_type_name,
+ _bucket_space, *bootstrap_config->getProtonConfigSP(), _document_db_owner,
+ _summary_executor, _summary_executor, *_persistence_engine, _tls,
+ _metrics_wire_service, _file_header_context,
+ _config_stores.getConfigStore(_doc_type_name.toString()),
+ std::make_shared<vespalib::ThreadStackExecutor>(16, 128_Ki), HwInfo());
+ _document_db->start();
+ _document_db->waitForOnlineState();
+}
+
+void
+MyBmNode::initialize_persistence_provider()
+{
+ get_persistence_provider()->initialize();
+}
+
+void
+MyBmNode::create_bucket(const document::Bucket& bucket)
+{
+ get_persistence_provider()->createBucket(storage::spi::Bucket(bucket), context);
+}
+
+void
+MyBmNode::start_service_layer(const BmClusterParams& params)
+{
+ config::ConfigUri config_uri("bm-servicelayer", _config_context);
+ std::unique_ptr<BmStorageChainBuilder> chain_builder;
+ if (params.get_use_storage_chain() && !params.needs_distributor()) {
+ chain_builder = std::make_unique<BmStorageChainBuilder>();
+ _service_layer_chain_context = chain_builder->get_context();
+ }
+ _service_layer = std::make_unique<MyServiceLayerProcess>(config_uri,
+ *_persistence_engine,
+ std::move(chain_builder));
+ _service_layer->setupConfig(100ms);
+ _service_layer->createNode();
+}
+
+void
+MyBmNode::wait_service_layer()
+{
+ _service_layer->getNode().waitUntilInitialized();
+}
+
+void
+MyBmNode::start_distributor(const BmClusterParams& params)
+{
+ config::ConfigUri config_uri("bm-distributor", _config_context);
+ std::unique_ptr<BmStorageChainBuilder> chain_builder;
+ if (params.get_use_storage_chain() && !params.get_use_document_api()) {
+ chain_builder = std::make_unique<BmStorageChainBuilder>();
+ _distributor_chain_context = chain_builder->get_context();
+ }
+ _distributor = std::make_unique<storage::DistributorProcess>(config_uri);
+ if (chain_builder) {
+ _distributor->set_storage_chain_builder(std::move(chain_builder));
+ }
+ _distributor->setupConfig(100ms);
+ _distributor->createNode();
+}
+
+void
+MyBmNode::create_feed_handler(const BmClusterParams& params)
+{
+ StorageApiRpcService::Params rpc_params;
+ // This is the same compression config as the default in stor-communicationmanager.def.
+ rpc_params.compression_config = CompressionConfig(CompressionConfig::Type::LZ4, 3, 90, 1024);
+ rpc_params.num_rpc_targets_per_node = params.get_rpc_targets_per_node();
+ if (params.get_use_document_api()) {
+ _feed_handler = std::make_unique<DocumentApiMessageBusBmFeedHandler>(_cluster.get_message_bus());
+ } else if (params.get_enable_distributor()) {
+ if (params.get_use_storage_chain()) {
+ assert(_distributor_chain_context);
+ _feed_handler = std::make_unique<StorageApiChainBmFeedHandler>(_distributor_chain_context, true);
+ } else if (params.get_use_message_bus()) {
+ _feed_handler = std::make_unique<StorageApiMessageBusBmFeedHandler>(_cluster.get_message_bus(), true);
+ } else {
+ _feed_handler = std::make_unique<StorageApiRpcBmFeedHandler>(_cluster.get_rpc_client(), _repo, rpc_params, true);
+ }
+ } else if (params.needs_service_layer()) {
+ if (params.get_use_storage_chain()) {
+ assert(_service_layer_chain_context);
+ _feed_handler = std::make_unique<StorageApiChainBmFeedHandler>(_service_layer_chain_context, false);
+ } else if (params.get_use_message_bus()) {
+ _feed_handler = std::make_unique<StorageApiMessageBusBmFeedHandler>(_cluster.get_message_bus(), false);
+ } else {
+ _feed_handler = std::make_unique<StorageApiRpcBmFeedHandler>(_cluster.get_rpc_client(), _repo, rpc_params, false);
+ }
+ }
+}
+
+void
+MyBmNode::shutdown_feed_handler()
+{
+ _feed_handler.reset();
+}
+
+void
+MyBmNode::shutdown_distributor()
+{
+ if (_distributor) {
+ LOG(info, "stop distributor");
+ _distributor->getNode().requestShutdown("controlled shutdown");
+ _distributor->shutdown();
+ }
+}
+
+void
+MyBmNode::shutdown_service_layer()
+{
+ if (_service_layer) {
+ LOG(info, "stop service layer");
+ _service_layer->getNode().requestShutdown("controlled shutdown");
+ _service_layer->shutdown();
+ }
+}
+
+IBmFeedHandler*
+MyBmNode::get_feed_handler()
+{
+ return _feed_handler.get();
+}
+
+PersistenceProvider*
+MyBmNode::get_persistence_provider()
+{
+ return _persistence_engine.get();
+}
+
+void
+MyBmNode::wait_service_layer_slobrok()
+{
+ vespalib::asciistream s;
+ s << "storage/cluster.storage/storage/" << _node_idx;
+ _cluster.wait_slobrok(s.str());
+ s << "/default";
+ _cluster.wait_slobrok(s.str());
+}
+
+void
+MyBmNode::wait_distributor_slobrok()
+{
+ vespalib::asciistream s;
+ s << "storage/cluster.storage/distributor/" << _node_idx;
+ _cluster.wait_slobrok(s.str());
+ s << "/default";
+ _cluster.wait_slobrok(s.str());
+}
+
+unsigned int
+BmNode::num_ports()
+{
+ return static_cast<unsigned int>(PortBias::NUM_PORTS);
+}
+
+std::unique_ptr<BmNode>
+BmNode::create(const vespalib::string& base_dir, int base_port, unsigned int node_idx, BmCluster &cluster, const BmClusterParams& params, std::shared_ptr<document::DocumenttypesConfig> document_types, int slobrok_port)
+{
+ return std::make_unique<MyBmNode>(base_dir, base_port, node_idx, cluster, params, std::move(document_types), slobrok_port);
+}
+
+}
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_node.h b/searchcore/src/vespa/searchcore/bmcluster/bm_node.h
new file mode 100644
index 00000000000..3647981f58b
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_node.h
@@ -0,0 +1,53 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <memory>
+#include <vespa/searchcore/proton/common/doctypename.h>
+
+namespace document {
+
+class Bucket;
+class DocumentTypeRepo;
+class DocumentType;
+class Field;
+
+};
+
+namespace document::internal { class InternalDocumenttypesType; }
+
+namespace storage::spi { struct PersistenceProvider; }
+
+namespace search::bmcluster {
+
+class BmCluster;
+class BmClusterParams;
+class IBmFeedHandler;
+
+/*
+ * Class representing a single benchmark node in a benchmark cluster.
+ */
+class BmNode {
+protected:
+
+ BmNode();
+public:
+ virtual ~BmNode();
+ virtual void initialize_persistence_provider() = 0;
+ virtual void create_bucket(const document::Bucket& bucket) = 0;
+ virtual void start_service_layer(const BmClusterParams& params) = 0;
+ virtual void wait_service_layer() = 0;
+ virtual void start_distributor(const BmClusterParams& params) = 0;
+ virtual void create_feed_handler(const BmClusterParams& params) = 0;
+ virtual void shutdown_feed_handler() = 0;
+ virtual void shutdown_distributor() = 0;
+ virtual void shutdown_service_layer() = 0;
+ virtual void wait_service_layer_slobrok() = 0;
+ virtual void wait_distributor_slobrok() = 0;
+ virtual IBmFeedHandler* get_feed_handler() = 0;
+ virtual storage::spi::PersistenceProvider *get_persistence_provider() = 0;
+ static unsigned int num_ports();
+ static std::unique_ptr<BmNode> create(const vespalib::string &base_dir, int base_port, unsigned int node_idx, BmCluster& cluster, const BmClusterParams& params, std::shared_ptr<const document::internal::InternalDocumenttypesType> document_types, int slobrok_port);
+};
+
+}
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bm_range.h b/searchcore/src/vespa/searchcore/bmcluster/bm_range.h
new file mode 100644
index 00000000000..fda2b1c52b6
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_range.h
@@ -0,0 +1,24 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace search::bmcluster {
+
+/*
+ * Range of document "keys" used to generate documents
+ */
+class BmRange
+{
+ uint32_t _start;
+ uint32_t _end;
+public:
+ BmRange(uint32_t start_in, uint32_t end_in)
+ : _start(start_in),
+ _end(end_in)
+ {
+ }
+ uint32_t get_start() const { return _start; }
+ uint32_t get_end() const { return _end; }
+};
+
+}
diff --git a/searchcore/src/apps/vespa-feed-bm/bm_storage_chain_builder.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_chain_builder.cpp
index bbe0de70ce2..16883e1cc48 100644
--- a/searchcore/src/apps/vespa-feed-bm/bm_storage_chain_builder.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_chain_builder.cpp
@@ -7,7 +7,7 @@
#include <vespa/log/log.h>
LOG_SETUP(".bm_storage_chain_builder");
-namespace feedbm {
+namespace search::bmcluster {
BmStorageChainBuilder::BmStorageChainBuilder()
: storage::StorageChainBuilder(),
diff --git a/searchcore/src/apps/vespa-feed-bm/bm_storage_chain_builder.h b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_chain_builder.h
index bba933da9e0..c61cb200c36 100644
--- a/searchcore/src/apps/vespa-feed-bm/bm_storage_chain_builder.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_chain_builder.h
@@ -4,7 +4,7 @@
#include <vespa/storage/common/storage_chain_builder.h>
-namespace feedbm {
+namespace search::bmcluster {
struct BmStorageLinkContext;
diff --git a/searchcore/src/apps/vespa-feed-bm/bm_storage_link.cpp b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_link.cpp
index 2aeda91c30c..c251c25b15d 100644
--- a/searchcore/src/apps/vespa-feed-bm/bm_storage_link.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_link.cpp
@@ -3,7 +3,7 @@
#include "bm_storage_link.h"
#include "pending_tracker.h"
-namespace feedbm {
+namespace search::bmcluster {
BmStorageLink::BmStorageLink()
diff --git a/searchcore/src/apps/vespa-feed-bm/bm_storage_link.h b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_link.h
index 95528d7b2d9..8c98479a38b 100644
--- a/searchcore/src/apps/vespa-feed-bm/bm_storage_link.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_link.h
@@ -6,7 +6,7 @@
#include "pending_tracker_hash.h"
#include <vespa/storage/common/storagelink.h>
-namespace feedbm {
+namespace search::bmcluster {
class PendingTracker;
diff --git a/searchcore/src/apps/vespa-feed-bm/bm_storage_link_context.h b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_link_context.h
index f2df20f1f66..f7cc1841770 100644
--- a/searchcore/src/apps/vespa-feed-bm/bm_storage_link_context.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/bm_storage_link_context.h
@@ -1,6 +1,6 @@
// Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-namespace feedbm {
+namespace search::bmcluster {
class BmStorageLink;
diff --git a/searchcore/src/apps/vespa-feed-bm/bucket_info_queue.cpp b/searchcore/src/vespa/searchcore/bmcluster/bucket_info_queue.cpp
index fc43402d68e..6670707ed39 100644
--- a/searchcore/src/apps/vespa-feed-bm/bucket_info_queue.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/bucket_info_queue.cpp
@@ -3,7 +3,7 @@
#include "bucket_info_queue.h"
#include <vespa/persistence/spi/persistenceprovider.h>
-namespace feedbm {
+namespace search::bmcluster {
BucketInfoQueue::BucketInfoQueue(storage::spi::PersistenceProvider& provider, std::atomic<uint32_t>& errors)
: _mutex(),
diff --git a/searchcore/src/apps/vespa-feed-bm/bucket_info_queue.h b/searchcore/src/vespa/searchcore/bmcluster/bucket_info_queue.h
index 07a55127234..1a48f9fa478 100644
--- a/searchcore/src/apps/vespa-feed-bm/bucket_info_queue.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/bucket_info_queue.h
@@ -9,7 +9,7 @@
namespace storage::spi { struct PersistenceProvider; }
-namespace feedbm {
+namespace search::bmcluster {
/*
* Class containing a queue of buckets where mutating feed operations
diff --git a/searchcore/src/vespa/searchcore/bmcluster/bucket_selector.h b/searchcore/src/vespa/searchcore/bmcluster/bucket_selector.h
new file mode 100644
index 00000000000..9549bf71401
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/bmcluster/bucket_selector.h
@@ -0,0 +1,28 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace search::bmcluster {
+
+/*
+ * Map from document index to bucket to ensure even spread between buckets
+ * while ensuring that each bucket used belong to a specific thread.
+ */
+class BucketSelector
+{
+ uint32_t _thread_id;
+ uint32_t _threads;
+ uint32_t _num_buckets;
+public:
+ BucketSelector(uint32_t thread_id_in, uint32_t threads_in, uint32_t num_buckets_in)
+ : _thread_id(thread_id_in),
+ _threads(threads_in),
+ _num_buckets((num_buckets_in / _threads) * _threads)
+ {
+ }
+ uint64_t operator()(uint32_t i) const {
+ return (static_cast<uint64_t>(i) * _threads + _thread_id) % _num_buckets;
+ }
+};
+
+}
diff --git a/searchcore/src/apps/vespa-feed-bm/document_api_message_bus_bm_feed_handler.cpp b/searchcore/src/vespa/searchcore/bmcluster/document_api_message_bus_bm_feed_handler.cpp
index 38c8490de69..c6f2626f27c 100644
--- a/searchcore/src/apps/vespa-feed-bm/document_api_message_bus_bm_feed_handler.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/document_api_message_bus_bm_feed_handler.cpp
@@ -17,7 +17,7 @@ using document::DocumentUpdate;
using storage::api::StorageMessageAddress;
using storage::lib::NodeType;
-namespace feedbm {
+namespace search::bmcluster {
namespace {
vespalib::string _Storage("storage");
diff --git a/searchcore/src/apps/vespa-feed-bm/document_api_message_bus_bm_feed_handler.h b/searchcore/src/vespa/searchcore/bmcluster/document_api_message_bus_bm_feed_handler.h
index c71bb113c5b..5358e0a948b 100644
--- a/searchcore/src/apps/vespa-feed-bm/document_api_message_bus_bm_feed_handler.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/document_api_message_bus_bm_feed_handler.h
@@ -9,7 +9,7 @@ namespace document { class DocumentTypeRepo; }
namespace documentapi { class DocumentMessage; };
namespace storage::api { class StorageMessageAddress; }
-namespace feedbm {
+namespace search::bmcluster {
class BmMessageBus;
diff --git a/searchcore/src/apps/vespa-feed-bm/i_bm_feed_handler.h b/searchcore/src/vespa/searchcore/bmcluster/i_bm_feed_handler.h
index 26cbf27b455..fc3953c49a5 100644
--- a/searchcore/src/apps/vespa-feed-bm/i_bm_feed_handler.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/i_bm_feed_handler.h
@@ -12,7 +12,7 @@ class DocumentUpdate;
class DocumentId;
}
-namespace feedbm {
+namespace search::bmcluster {
class BucketInfoQueue;
class PendingTracker;
diff --git a/searchcore/src/apps/vespa-feed-bm/pending_tracker.cpp b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker.cpp
index 94bed4cb3bd..247bf8bece3 100644
--- a/searchcore/src/apps/vespa-feed-bm/pending_tracker.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker.cpp
@@ -7,7 +7,7 @@
using namespace std::chrono_literals;
-namespace feedbm {
+namespace search::bmcluster {
PendingTracker::PendingTracker(uint32_t limit)
: _pending(0u),
diff --git a/searchcore/src/apps/vespa-feed-bm/pending_tracker.h b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker.h
index 4ca84ab7442..a8fa2f77396 100644
--- a/searchcore/src/apps/vespa-feed-bm/pending_tracker.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker.h
@@ -7,7 +7,7 @@
namespace storage::spi { struct PersistenceProvider; }
-namespace feedbm {
+namespace search::bmcluster {
class BucketInfoQueue;
diff --git a/searchcore/src/apps/vespa-feed-bm/pending_tracker_hash.cpp b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker_hash.cpp
index 6863d35703e..515f7f6b2de 100644
--- a/searchcore/src/apps/vespa-feed-bm/pending_tracker_hash.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker_hash.cpp
@@ -5,7 +5,7 @@
#include <vespa/vespalib/stllike/hash_map.hpp>
#include <cassert>
-namespace feedbm {
+namespace search::bmcluster {
PendingTrackerHash::PendingTrackerHash()
: _mutex(),
diff --git a/searchcore/src/apps/vespa-feed-bm/pending_tracker_hash.h b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker_hash.h
index 89be93fd4ed..de9b6f63aa4 100644
--- a/searchcore/src/apps/vespa-feed-bm/pending_tracker_hash.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/pending_tracker_hash.h
@@ -5,7 +5,7 @@
#include <vespa/vespalib/stllike/hash_map.h>
#include <mutex>
-namespace feedbm {
+namespace search::bmcluster {
class PendingTracker;
diff --git a/searchcore/src/apps/vespa-feed-bm/spi_bm_feed_handler.cpp b/searchcore/src/vespa/searchcore/bmcluster/spi_bm_feed_handler.cpp
index 11149eecb3f..e905b493cf4 100644
--- a/searchcore/src/apps/vespa-feed-bm/spi_bm_feed_handler.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/spi_bm_feed_handler.cpp
@@ -15,7 +15,7 @@ using storage::spi::Bucket;
using storage::spi::PersistenceProvider;
using storage::spi::Timestamp;
-namespace feedbm {
+namespace search::bmcluster {
namespace {
@@ -124,12 +124,6 @@ SpiBmFeedHandler::get(const document::Bucket& bucket, vespalib::stringref field_
}
void
-SpiBmFeedHandler::create_bucket(const document::Bucket& bucket)
-{
- _provider.createBucket(Bucket(bucket), context);
-}
-
-void
SpiBmFeedHandler::attach_bucket_info_queue(PendingTracker& tracker)
{
if (!_skip_get_spi_bucket_info) {
diff --git a/searchcore/src/apps/vespa-feed-bm/spi_bm_feed_handler.h b/searchcore/src/vespa/searchcore/bmcluster/spi_bm_feed_handler.h
index a78aa06628b..bbc9e3b8e74 100644
--- a/searchcore/src/apps/vespa-feed-bm/spi_bm_feed_handler.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/spi_bm_feed_handler.h
@@ -8,7 +8,7 @@
namespace document { class FieldSetRepo; }
namespace storage::spi { struct PersistenceProvider; }
-namespace feedbm {
+namespace search::bmcluster {
/*
* Benchmark feed handler for feed directly to persistence provider
@@ -27,7 +27,6 @@ public:
void update(const document::Bucket& bucket, std::unique_ptr<document::DocumentUpdate> document_update, uint64_t timestamp, PendingTracker& tracker) override;
void remove(const document::Bucket& bucket, const document::DocumentId& document_id, uint64_t timestamp, PendingTracker& tracker) override;
void get(const document::Bucket& bucket, vespalib::stringref field_set_string, const document::DocumentId& document_id, PendingTracker& tracker) override;
- void create_bucket(const document::Bucket& bucket);
void attach_bucket_info_queue(PendingTracker &tracker) override;
uint32_t get_error_count() const override;
const vespalib::string &get_name() const override;
diff --git a/searchcore/src/apps/vespa-feed-bm/storage_api_chain_bm_feed_handler.cpp b/searchcore/src/vespa/searchcore/bmcluster/storage_api_chain_bm_feed_handler.cpp
index 82cf2df065f..34669b8cbdc 100644
--- a/searchcore/src/apps/vespa-feed-bm/storage_api_chain_bm_feed_handler.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/storage_api_chain_bm_feed_handler.cpp
@@ -18,7 +18,7 @@ using document::Document;
using document::DocumentId;
using document::DocumentUpdate;
-namespace feedbm {
+namespace search::bmcluster {
namespace {
diff --git a/searchcore/src/apps/vespa-feed-bm/storage_api_chain_bm_feed_handler.h b/searchcore/src/vespa/searchcore/bmcluster/storage_api_chain_bm_feed_handler.h
index 0c4b715122e..1c196d746eb 100644
--- a/searchcore/src/apps/vespa-feed-bm/storage_api_chain_bm_feed_handler.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/storage_api_chain_bm_feed_handler.h
@@ -6,7 +6,7 @@
namespace storage::api { class StorageCommand; }
-namespace feedbm {
+namespace search::bmcluster {
struct BmStorageLinkContext;
diff --git a/searchcore/src/apps/vespa-feed-bm/storage_api_message_bus_bm_feed_handler.cpp b/searchcore/src/vespa/searchcore/bmcluster/storage_api_message_bus_bm_feed_handler.cpp
index f63a8e33cc0..04561b5d93e 100644
--- a/searchcore/src/apps/vespa-feed-bm/storage_api_message_bus_bm_feed_handler.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/storage_api_message_bus_bm_feed_handler.cpp
@@ -15,7 +15,7 @@ using document::DocumentUpdate;
using storage::api::StorageMessageAddress;
using storage::lib::NodeType;
-namespace feedbm {
+namespace search::bmcluster {
namespace {
vespalib::string _Storage("storage");
diff --git a/searchcore/src/apps/vespa-feed-bm/storage_api_message_bus_bm_feed_handler.h b/searchcore/src/vespa/searchcore/bmcluster/storage_api_message_bus_bm_feed_handler.h
index 2aafd0c6830..0027f260b8f 100644
--- a/searchcore/src/apps/vespa-feed-bm/storage_api_message_bus_bm_feed_handler.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/storage_api_message_bus_bm_feed_handler.h
@@ -12,7 +12,7 @@ class StorageCommand;
class StorageMessageAddress;
}
-namespace feedbm {
+namespace search::bmcluster {
class BmMessageBus;
diff --git a/searchcore/src/apps/vespa-feed-bm/storage_api_rpc_bm_feed_handler.cpp b/searchcore/src/vespa/searchcore/bmcluster/storage_api_rpc_bm_feed_handler.cpp
index 04d49bba0a3..3e0426cb308 100644
--- a/searchcore/src/apps/vespa-feed-bm/storage_api_rpc_bm_feed_handler.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/storage_api_rpc_bm_feed_handler.cpp
@@ -22,7 +22,7 @@ using storage::rpc::SharedRpcResources;
using storage::rpc::StorageApiRpcService;
using storage::lib::NodeType;
-namespace feedbm {
+namespace search::bmcluster {
namespace {
vespalib::string _Storage("storage");
diff --git a/searchcore/src/apps/vespa-feed-bm/storage_api_rpc_bm_feed_handler.h b/searchcore/src/vespa/searchcore/bmcluster/storage_api_rpc_bm_feed_handler.h
index 5057d8889a5..360f702e590 100644
--- a/searchcore/src/apps/vespa-feed-bm/storage_api_rpc_bm_feed_handler.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/storage_api_rpc_bm_feed_handler.h
@@ -16,7 +16,7 @@ class MessageCodecProvider;
class SharedRpcResources;
}
-namespace feedbm {
+namespace search::bmcluster {
/*
* Benchmark feed handler for feed to service layer or distributor
diff --git a/searchcore/src/apps/vespa-feed-bm/storage_reply_error_checker.cpp b/searchcore/src/vespa/searchcore/bmcluster/storage_reply_error_checker.cpp
index 260b0c8a7af..ec1ebec2954 100644
--- a/searchcore/src/apps/vespa-feed-bm/storage_reply_error_checker.cpp
+++ b/searchcore/src/vespa/searchcore/bmcluster/storage_reply_error_checker.cpp
@@ -6,7 +6,7 @@
#include <vespa/log/log.h>
LOG_SETUP(".storage_reply_error_checker");
-namespace feedbm {
+namespace search::bmcluster {
StorageReplyErrorChecker::StorageReplyErrorChecker()
: _errors(0u)
diff --git a/searchcore/src/apps/vespa-feed-bm/storage_reply_error_checker.h b/searchcore/src/vespa/searchcore/bmcluster/storage_reply_error_checker.h
index 4743367b426..2fcb6aad14a 100644
--- a/searchcore/src/apps/vespa-feed-bm/storage_reply_error_checker.h
+++ b/searchcore/src/vespa/searchcore/bmcluster/storage_reply_error_checker.h
@@ -6,7 +6,7 @@
namespace storage::api { class StorageMessage; }
-namespace feedbm {
+namespace search::bmcluster {
class StorageReplyErrorChecker {
protected:
diff --git a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
index c17bdf06854..397347b7651 100644
--- a/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
+++ b/searchlib/src/tests/attribute/tensorattribute/tensorattribute_test.cpp
@@ -17,6 +17,7 @@
#include <vespa/searchlib/tensor/hnsw_index.h>
#include <vespa/searchlib/tensor/nearest_neighbor_index.h>
#include <vespa/searchlib/tensor/nearest_neighbor_index_factory.h>
+#include <vespa/searchlib/tensor/nearest_neighbor_index_loader.h>
#include <vespa/searchlib/tensor/nearest_neighbor_index_saver.h>
#include <vespa/searchlib/tensor/serialized_fast_value_attribute.h>
#include <vespa/searchlib/tensor/tensor_attribute.h>
@@ -51,6 +52,7 @@ using search::tensor::HnswIndex;
using search::tensor::HnswNode;
using search::tensor::NearestNeighborIndex;
using search::tensor::NearestNeighborIndexFactory;
+using search::tensor::NearestNeighborIndexLoader;
using search::tensor::NearestNeighborIndexSaver;
using search::tensor::PrepareResult;
using search::tensor::TensorAttribute;
@@ -89,6 +91,24 @@ public:
}
};
+class MockIndexLoader : public NearestNeighborIndexLoader {
+private:
+ int& _index_value;
+ std::unique_ptr<search::fileutil::LoadedBuffer> _buf;
+
+public:
+ MockIndexLoader(int& index_value,
+ std::unique_ptr<search::fileutil::LoadedBuffer> buf)
+ : _index_value(index_value),
+ _buf(std::move(buf))
+ {}
+ bool load_next() override {
+ ASSERT_EQUAL(sizeof(int), _buf->size());
+ _index_value = (reinterpret_cast<const int*>(_buf->buffer()))[0];
+ return false;
+ }
+};
+
class MockPrepareResult : public PrepareResult {
public:
uint32_t docid;
@@ -220,10 +240,8 @@ public:
}
return std::unique_ptr<NearestNeighborIndexSaver>();
}
- bool load(const search::fileutil::LoadedBuffer& buf) override {
- ASSERT_EQUAL(sizeof(int), buf.size());
- _index_value = (reinterpret_cast<const int*>(buf.buffer()))[0];
- return true;
+ std::unique_ptr<NearestNeighborIndexLoader> make_loader(std::unique_ptr<search::fileutil::LoadedBuffer> buf) override {
+ return std::make_unique<MockIndexLoader>(_index_value, std::move(buf));
}
std::vector<Neighbor> find_top_k(uint32_t k, vespalib::eval::TypedCells vector, uint32_t explore_k,
double distance_threshold) const override
diff --git a/searchlib/src/tests/tensor/hnsw_saver/hnsw_save_load_test.cpp b/searchlib/src/tests/tensor/hnsw_saver/hnsw_save_load_test.cpp
index 2db6437664e..74b82649c98 100644
--- a/searchlib/src/tests/tensor/hnsw_saver/hnsw_save_load_test.cpp
+++ b/searchlib/src/tests/tensor/hnsw_saver/hnsw_save_load_test.cpp
@@ -103,9 +103,8 @@ public:
return vector_writer.output;
}
void load_copy(std::vector<char> data) {
- HnswIndexLoader loader(copy);
- LoadedBuffer buffer(&data[0], data.size());
- loader.load(buffer);
+ HnswIndexLoader loader(copy, std::make_unique<LoadedBuffer>(&data[0], data.size()));
+ while (loader.load_next()) {}
}
void expect_copy_as_populated() const {
diff --git a/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp b/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp
index 7c05699b8e1..fd86fbf1c73 100644
--- a/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/dense_tensor_attribute.cpp
@@ -3,6 +3,7 @@
#include "dense_tensor_attribute.h"
#include "dense_tensor_attribute_saver.h"
#include "nearest_neighbor_index.h"
+#include "nearest_neighbor_index_loader.h"
#include "nearest_neighbor_index_saver.h"
#include "tensor_attribute.hpp"
#include <vespa/eval/eval/value.h>
@@ -10,10 +11,11 @@
#include <vespa/searchlib/attribute/load_utils.h>
#include <vespa/searchlib/attribute/readerbase.h>
#include <vespa/vespalib/data/slime/inserter.h>
+#include <vespa/vespalib/util/exceptions.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/vespalib/util/memory_allocator.h>
#include <vespa/vespalib/util/mmap_file_allocator_factory.h>
#include <vespa/vespalib/util/threadstackexecutor.h>
-#include <vespa/vespalib/util/lambdatask.h>
#include <thread>
#include <vespa/log/log.h>
@@ -29,6 +31,7 @@ namespace search::tensor {
namespace {
constexpr uint32_t DENSE_TENSOR_ATTRIBUTE_VERSION = 1;
+constexpr uint32_t LOAD_COMMIT_INTERVAL = 256;
const vespalib::string tensorTypeTag("tensortype");
class BlobSequenceReader : public ReaderBase
@@ -266,7 +269,7 @@ private:
_attr.setCommittedDocIdLimit(std::max(_attr.getCommittedDocIdLimit(), lid + 1));
_attr._index->complete_add_document(lid, std::move(prepared));
--_pending;
- if ((lid % 256) == 0) {
+ if ((lid % LOAD_COMMIT_INTERVAL) == 0) {
_attr.commit();
};
}
@@ -319,7 +322,7 @@ public:
// This ensures that get_vector() (via getTensor()) is able to find the newly added tensor.
_attr.setCommittedDocIdLimit(lid + 1);
_attr._index->add_document(lid);
- if ((lid % 256) == 0) {
+ if ((lid % LOAD_COMMIT_INTERVAL) == 0) {
_attr.commit();
}
}
@@ -375,7 +378,17 @@ DenseTensorAttribute::onLoad(vespalib::Executor *executor)
setCommittedDocIdLimit(numDocs);
if (_index && use_index_file) {
auto buffer = LoadUtils::loadFile(*this, DenseTensorAttributeSaver::index_file_suffix());
- if (!_index->load(*buffer)) {
+ try {
+ auto index_loader = _index->make_loader(std::move(buffer));
+ size_t cnt = 0;
+ while (index_loader->load_next()) {
+ if ((++cnt % LOAD_COMMIT_INTERVAL) == 0) {
+ commit();
+ }
+ }
+ } catch (const vespalib::IoException& ex) {
+ LOG(error, "IoException while loading nearest neighbor index for tensor attribute '%s': %s",
+ getName().c_str(), ex.what());
return false;
}
}
diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp
index 49aa64212ae..8da8c4ba01f 100644
--- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.cpp
@@ -1,16 +1,17 @@
// Copyright 2020 Oath Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include "bitvector_visited_tracker.h"
#include "distance_function.h"
+#include "hash_set_visited_tracker.h"
#include "hnsw_index.h"
#include "hnsw_index_loader.h"
#include "hnsw_index_saver.h"
#include "random_level_generator.h"
-#include "bitvector_visited_tracker.h"
-#include "hash_set_visited_tracker.h"
#include "reusable_set_visited_tracker.h"
#include <vespa/searchcommon/common/compaction_strategy.h>
#include <vespa/searchlib/attribute/address_space_components.h>
#include <vespa/searchlib/attribute/address_space_usage.h>
+#include <vespa/searchlib/util/fileutil.h>
#include <vespa/searchlib/util/state_explorer_utils.h>
#include <vespa/vespalib/data/slime/cursor.h>
#include <vespa/vespalib/data/slime/inserter.h>
@@ -694,12 +695,11 @@ HnswIndex::make_saver() const
return std::make_unique<HnswIndexSaver>(_graph);
}
-bool
-HnswIndex::load(const fileutil::LoadedBuffer& buf)
+std::unique_ptr<NearestNeighborIndexLoader>
+HnswIndex::make_loader(std::unique_ptr<fileutil::LoadedBuffer> buf)
{
assert(get_entry_docid() == 0); // cannot load after index has data
- HnswIndexLoader loader(_graph);
- return loader.load(buf);
+ return std::make_unique<HnswIndexLoader>(_graph, std::move(buf));
}
struct NeighborsByDocId {
diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index.h b/searchlib/src/vespa/searchlib/tensor/hnsw_index.h
index 4503459a88a..4cb7afd1a24 100644
--- a/searchlib/src/vespa/searchlib/tensor/hnsw_index.h
+++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index.h
@@ -183,7 +183,7 @@ public:
void shrink_lid_space(uint32_t doc_id_limit) override;
std::unique_ptr<NearestNeighborIndexSaver> make_saver() const override;
- bool load(const fileutil::LoadedBuffer& buf) override;
+ std::unique_ptr<NearestNeighborIndexLoader> make_loader(std::unique_ptr<fileutil::LoadedBuffer> buf) override;
std::vector<Neighbor> find_top_k(uint32_t k, TypedCells vector, uint32_t explore_k,
double distance_threshold) const override;
diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.cpp b/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.cpp
index c0aec9ff91a..53b702a4d79 100644
--- a/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.cpp
+++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.cpp
@@ -6,45 +6,64 @@
namespace search::tensor {
+void
+HnswIndexLoader::init()
+{
+ size_t num_readable = _buf->size(sizeof(uint32_t));
+ _ptr = static_cast<const uint32_t *>(_buf->buffer());
+ _end = _ptr + num_readable;
+ _entry_docid = next_int();
+ _entry_level = next_int();
+ _num_nodes = next_int();
+}
+
HnswIndexLoader::~HnswIndexLoader() {}
-HnswIndexLoader::HnswIndexLoader(HnswGraph &graph)
- : _graph(graph), _ptr(nullptr), _end(nullptr), _failed(false)
+
+HnswIndexLoader::HnswIndexLoader(HnswGraph& graph, std::unique_ptr<fileutil::LoadedBuffer> buf)
+ : _graph(graph),
+ _buf(std::move(buf)),
+ _ptr(nullptr),
+ _end(nullptr),
+ _entry_docid(0),
+ _entry_level(0),
+ _num_nodes(0),
+ _docid(0),
+ _link_array(),
+ _complete(false)
{
+ init();
}
bool
-HnswIndexLoader::load(const fileutil::LoadedBuffer& buf)
+HnswIndexLoader::load_next()
{
- size_t num_readable = buf.size(sizeof(uint32_t));
- _ptr = static_cast<const uint32_t *>(buf.buffer());
- _end = _ptr + num_readable;
- uint32_t entry_docid = next_int();
- int32_t entry_level = next_int();
- uint32_t num_nodes = next_int();
- std::vector<uint32_t> link_array;
- for (uint32_t docid = 0; docid < num_nodes; ++docid) {
+ assert(!_complete);
+ if (_docid < _num_nodes) {
uint32_t num_levels = next_int();
if (num_levels > 0) {
- _graph.make_node_for_document(docid, num_levels);
+ _graph.make_node_for_document(_docid, num_levels);
for (uint32_t level = 0; level < num_levels; ++level) {
uint32_t num_links = next_int();
- link_array.clear();
+ _link_array.clear();
while (num_links-- > 0) {
- link_array.push_back(next_int());
+ _link_array.push_back(next_int());
}
- _graph.set_link_array(docid, level, link_array);
+ _graph.set_link_array(_docid, level, _link_array);
}
}
}
- if (_failed) return false;
- _graph.node_refs.ensure_size(std::max(num_nodes, 1u));
- _graph.node_refs_size.store(std::max(num_nodes, 1u), std::memory_order_release);
- _graph.trim_node_refs_size();
- auto entry_node_ref = _graph.get_node_ref(entry_docid);
- _graph.set_entry_node({entry_docid, entry_node_ref, entry_level});
- return true;
+ if (++_docid < _num_nodes) {
+ return true;
+ } else {
+ _graph.node_refs.ensure_size(std::max(_num_nodes, 1u));
+ _graph.node_refs_size.store(std::max(_num_nodes, 1u), std::memory_order_release);
+ _graph.trim_node_refs_size();
+ auto entry_node_ref = _graph.get_node_ref(_entry_docid);
+ _graph.set_entry_node({_entry_docid, entry_node_ref, _entry_level});
+ _complete = true;
+ return false;
+ }
}
-
}
diff --git a/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.h b/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.h
index 9f5ae66011f..0b6658e42ec 100644
--- a/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.h
+++ b/searchlib/src/vespa/searchlib/tensor/hnsw_index_loader.h
@@ -2,7 +2,11 @@
#pragma once
+#include "nearest_neighbor_index_loader.h"
+#include <vespa/vespalib/util/exceptions.h>
#include <cstdint>
+#include <memory>
+#include <vector>
namespace search::fileutil { class LoadedBuffer; }
@@ -13,23 +17,34 @@ struct HnswGraph;
/**
* Implements loading of HNSW graph structure from binary format.
**/
-class HnswIndexLoader {
-public:
- HnswIndexLoader(HnswGraph &graph);
- ~HnswIndexLoader();
- bool load(const fileutil::LoadedBuffer& buf);
+class HnswIndexLoader : public NearestNeighborIndexLoader {
private:
- HnswGraph &_graph;
- const uint32_t *_ptr;
- const uint32_t *_end;
- bool _failed;
+ HnswGraph& _graph;
+ std::unique_ptr<fileutil::LoadedBuffer> _buf;
+ const uint32_t* _ptr;
+ const uint32_t* _end;
+ uint32_t _entry_docid;
+ int32_t _entry_level;
+ uint32_t _num_nodes;
+ uint32_t _docid;
+ std::vector<uint32_t> _link_array;
+ bool _complete;
+
+ void init();
uint32_t next_int() {
if (__builtin_expect((_ptr == _end), false)) {
- _failed = true;
- return 0;
+ throw vespalib::IoException
+ (vespalib::IoException::createMessage("Already at the end of buffer when trying to get next int",
+ vespalib::IoException::CORRUPT_DATA),
+ vespalib::IoException::CORRUPT_DATA, "");
}
return *_ptr++;
}
+
+public:
+ HnswIndexLoader(HnswGraph& graph, std::unique_ptr<fileutil::LoadedBuffer> buf);
+ virtual ~HnswIndexLoader();
+ bool load_next() override;
};
}
diff --git a/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h
index b8f30a53ddf..f75cdae8a92 100644
--- a/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h
+++ b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index.h
@@ -22,6 +22,7 @@ class CompactionStrategy;
namespace search::tensor {
+class NearestNeighborIndexLoader;
class NearestNeighborIndexSaver;
/**
@@ -77,7 +78,13 @@ public:
* and the caller ensures that an attribute read guard is held during the lifetime of the saver.
*/
virtual std::unique_ptr<NearestNeighborIndexSaver> make_saver() const = 0;
- virtual bool load(const fileutil::LoadedBuffer& buf) = 0;
+
+ /**
+ * Creates a loader that is used to load the index from the given buffer.
+ *
+ * This might throw vespalib::IoException.
+ */
+ virtual std::unique_ptr<NearestNeighborIndexLoader> make_loader(std::unique_ptr<fileutil::LoadedBuffer> buf) = 0;
virtual std::vector<Neighbor> find_top_k(uint32_t k,
vespalib::eval::TypedCells vector,
diff --git a/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index_loader.h b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index_loader.h
new file mode 100644
index 00000000000..703f8f863d1
--- /dev/null
+++ b/searchlib/src/vespa/searchlib/tensor/nearest_neighbor_index_loader.h
@@ -0,0 +1,23 @@
+// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace search::tensor {
+
+/**
+ * Interface that is used to load a nearest neighbor index from binary form.
+ */
+class NearestNeighborIndexLoader {
+public:
+ virtual ~NearestNeighborIndexLoader() {}
+
+ /**
+ * Loads the next part of the index (e.g. the node corresponding to a given document)
+ * and returns whether there is more data to load.
+ *
+ * This might throw vespalib::IoException.
+ */
+ virtual bool load_next() = 0;
+};
+
+}
diff --git a/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.cpp b/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.cpp
index 55d0e744743..16e47371cbb 100644
--- a/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.cpp
+++ b/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.cpp
@@ -102,6 +102,14 @@ ServiceMapHistory & LocalRpcMonitorMap::history() {
return _history;
}
+bool LocalRpcMonitorMap::wouldConflict(const ServiceMapping &mapping) const {
+ auto iter = _map.find(mapping.name);
+ if (iter == _map.end()) {
+ return false; // no mapping, no conflict
+ }
+ return (iter->second.spec != mapping.spec);
+}
+
void LocalRpcMonitorMap::addLocal(const ServiceMapping &mapping,
std::unique_ptr<AddLocalCompletionHandler> inflight)
{
diff --git a/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.h b/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.h
index 920d54a405f..e3d081eacc9 100644
--- a/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.h
+++ b/slobrok/src/vespa/slobrok/server/local_rpc_monitor_map.h
@@ -129,6 +129,8 @@ public:
MapSource &dispatcher() { return _dispatcher; }
ServiceMapHistory & history();
+ bool wouldConflict(const ServiceMapping &mapping) const;
+
/** for use by register API, will call doneHandler() on inflight script */
void addLocal(const ServiceMapping &mapping,
std::unique_ptr<AddLocalCompletionHandler> inflight);
diff --git a/slobrok/src/vespa/slobrok/server/rpchooks.cpp b/slobrok/src/vespa/slobrok/server/rpchooks.cpp
index d40fd93afd6..ab1d5246ddc 100644
--- a/slobrok/src/vespa/slobrok/server/rpchooks.cpp
+++ b/slobrok/src/vespa/slobrok/server/rpchooks.cpp
@@ -238,6 +238,9 @@ RPCHooks::rpc_listNamesServed(FRT_RPCRequest *req)
void
RPCHooks::rpc_registerRpcServer(FRT_RPCRequest *req)
{
+ if (useNewLogic()) {
+ return new_registerRpcServer(req);
+ }
FRT_Values &args = *req->GetParams();
const char *dName = args[0]._string._str;
const char *dSpec = args[1]._string._str;
@@ -270,9 +273,34 @@ RPCHooks::rpc_registerRpcServer(FRT_RPCRequest *req)
completer.doRequest();
}
+void RPCHooks::new_registerRpcServer(FRT_RPCRequest *req) {
+ FRT_Values &args = *req->GetParams();
+ const char *dName = args[0]._string._str;
+ const char *dSpec = args[1]._string._str;
+ LOG(debug, "RPC: invoked registerRpcServer(%s,%s)", dName, dSpec);
+ _cnts.registerReqs++;
+ ServiceMapping mapping{dName, dSpec};
+ // can we say now, that this will fail?
+ if (_env.consensusMap().wouldConflict(mapping)) {
+ req->SetError(FRTE_RPC_METHOD_FAILED, "conflict detected");
+ LOG(info, "cannot register %s at %s: conflict", dName, dSpec);
+ return;
+ }
+ auto script = ScriptCommand::makeRegCompleter(_env, dName, dSpec, req);
+ req->Detach();
+ _env.localMonitorMap().addLocal(mapping, std::make_unique<ScriptCommandWrapper>(std::move(script)));
+ // TODO: remove this
+ script = ScriptCommand::makeRegRpcSrvCmd(_env, dName, dSpec, nullptr);
+ script.doRequest();
+ return;
+}
+
void
RPCHooks::rpc_unregisterRpcServer(FRT_RPCRequest *req)
{
+ if (useNewLogic()) {
+ return new_unregisterRpcServer(req);
+ }
FRT_Values &args = *req->GetParams();
const char *dName = args[0]._string._str;
const char *dSpec = args[1]._string._str;
@@ -288,6 +316,17 @@ RPCHooks::rpc_unregisterRpcServer(FRT_RPCRequest *req)
return;
}
+void RPCHooks::new_unregisterRpcServer(FRT_RPCRequest *req) {
+ FRT_Values &args = *req->GetParams();
+ const char *dName = args[0]._string._str;
+ const char *dSpec = args[1]._string._str;
+ ServiceMapping mapping{dName, dSpec};
+ _env.localMonitorMap().removeLocal(mapping);
+ _env.exchangeManager().forwardRemove(dName, dSpec);
+ LOG(debug, "unregisterRpcServer(%s,%s)", dName, dSpec);
+ _cnts.otherReqs++;
+ return;
+}
void
RPCHooks::rpc_addPeer(FRT_RPCRequest *req)
@@ -332,6 +371,9 @@ RPCHooks::rpc_removePeer(FRT_RPCRequest *req)
void
RPCHooks::rpc_wantAdd(FRT_RPCRequest *req)
{
+ if (useNewLogic()) {
+ return new_wantAdd(req);
+ }
FRT_Values &args = *req->GetParams();
const char *remsb = args[0]._string._str;
const char *dName = args[1]._string._str;
@@ -351,10 +393,38 @@ RPCHooks::rpc_wantAdd(FRT_RPCRequest *req)
return;
}
+void RPCHooks::new_wantAdd(FRT_RPCRequest *req) {
+ FRT_Values &args = *req->GetParams();
+ const char *remsb = args[0]._string._str;
+ const char *dName = args[1]._string._str;
+ const char *dSpec = args[2]._string._str;
+ FRT_Values &retval = *req->GetReturn();
+ ServiceMapping mapping{dName, dSpec};
+ bool conflict = (
+ _env.consensusMap().wouldConflict(mapping)
+ ||
+ _env.localMonitorMap().wouldConflict(mapping)
+ );
+ if (conflict) {
+ retval.AddInt32(13);
+ retval.AddString("conflict detected");
+ req->SetError(FRTE_RPC_METHOD_FAILED, "conflict detected");
+ } else {
+ retval.AddInt32(0);
+ retval.AddString("ok");
+ }
+ LOG(debug, "%s->wantAdd(%s,%s) %s",
+ remsb, dName, dSpec, conflict ? "conflict" : "OK");
+ _cnts.wantAddReqs++;
+ return;
+}
void
RPCHooks::rpc_doRemove(FRT_RPCRequest *req)
{
+ if (useNewLogic()) {
+ return new_doRemove(req);
+ }
FRT_Values &args = *req->GetParams();
const char *rname = args[0]._string._str;
const char *dname = args[1]._string._str;
@@ -374,9 +444,27 @@ RPCHooks::rpc_doRemove(FRT_RPCRequest *req)
return;
}
+void RPCHooks::new_doRemove(FRT_RPCRequest *req) {
+ FRT_Values &args = *req->GetParams();
+ const char *rname = args[0]._string._str;
+ const char *dname = args[1]._string._str;
+ const char *dspec = args[2]._string._str;
+ FRT_Values &retval = *req->GetReturn();
+ ServiceMapping mapping{dname, dspec};
+ _env.localMonitorMap().removeLocal(mapping);
+ retval.AddInt32(0);
+ retval.AddString("ok");
+ LOG(debug, "%s->doRemove(%s,%s)", rname, dname, dspec);
+ _cnts.doRemoveReqs++;
+ return;
+}
+
void
RPCHooks::rpc_doAdd(FRT_RPCRequest *req)
{
+ if (useNewLogic()) {
+ return new_doAdd(req);
+ }
FRT_Values &args = *req->GetParams();
const char *rname = args[0]._string._str;
const char *dname = args[1]._string._str;
@@ -396,6 +484,28 @@ RPCHooks::rpc_doAdd(FRT_RPCRequest *req)
return;
}
+void RPCHooks::new_doAdd(FRT_RPCRequest *req) {
+ FRT_Values &args = *req->GetParams();
+ const char *remsb = args[0]._string._str;
+ const char *dName = args[1]._string._str;
+ const char *dSpec = args[2]._string._str;
+ FRT_Values &retval = *req->GetReturn();
+ ServiceMapping mapping{dName, dSpec};
+ bool ok = true;
+ if (_env.consensusMap().wouldConflict(mapping)) {
+ retval.AddInt32(13);
+ retval.AddString("conflict detected");
+ req->SetError(FRTE_RPC_METHOD_FAILED, "conflict detected");
+ ok = false;
+ } else {
+ retval.AddInt32(0);
+ retval.AddString("ok");
+ }
+ LOG(debug, "%s->doAdd(%s,%s) %s",
+ remsb, dName, dSpec, ok ? "OK" : "failed");
+ _cnts.doAddReqs++;
+ return;
+}
void
RPCHooks::rpc_lookupRpcServer(FRT_RPCRequest *req)
@@ -465,6 +575,7 @@ RPCHooks::rpc_lookupManaged(FRT_RPCRequest *req)
FRT_Values &args = *req->GetParams();
const char *name = args[0]._string._str;
LOG(debug, "RPC: lookupManaged(%s)", name);
+ // TODO: use local history here
const auto & visible = _env.globalHistory();
auto diff = visible.makeDiffFrom(0);
for (const auto & entry : diff.updated) {
diff --git a/slobrok/src/vespa/slobrok/server/rpchooks.h b/slobrok/src/vespa/slobrok/server/rpchooks.h
index 19ae64df5f1..e8f6c65ea47 100644
--- a/slobrok/src/vespa/slobrok/server/rpchooks.h
+++ b/slobrok/src/vespa/slobrok/server/rpchooks.h
@@ -62,6 +62,12 @@ private:
void rpc_lookupRpcServer(FRT_RPCRequest *req);
+ void new_registerRpcServer(FRT_RPCRequest *req);
+ void new_unregisterRpcServer(FRT_RPCRequest *req);
+ void new_wantAdd(FRT_RPCRequest *req);
+ void new_doRemove(FRT_RPCRequest *req);
+ void new_doAdd(FRT_RPCRequest *req);
+
void rpc_registerRpcServer(FRT_RPCRequest *req);
void rpc_unregisterRpcServer(FRT_RPCRequest *req);
diff --git a/slobrok/src/vespa/slobrok/server/sbenv.cpp b/slobrok/src/vespa/slobrok/server/sbenv.cpp
index 9fb5511948c..ebb9935877f 100644
--- a/slobrok/src/vespa/slobrok/server/sbenv.cpp
+++ b/slobrok/src/vespa/slobrok/server/sbenv.cpp
@@ -121,11 +121,19 @@ SBEnv::SBEnv(const ConfigShim &shim, bool useNewConsensusLogic)
_exchanger(*this, _rpcsrvmap),
_rpcsrvmap()
{
+ if (useNewLogic()) {
+ srandom(time(nullptr) ^ getpid());
+ // note: feedback loop between these two:
+ _localMonitorSubscription = MapSubscription::subscribe(_consensusMap, _localRpcMonitorMap);
+ _consensusSubscription = MapSubscription::subscribe(_localRpcMonitorMap.dispatcher(), _consensusMap);
+ _globalHistorySubscription = MapSubscription::subscribe(_consensusMap, _globalVisibleHistory);
+ _rpcHooks.initRPC(getSupervisor());
+ return;
+ }
srandom(time(nullptr) ^ getpid());
// note: feedback loop between these two:
_localMonitorSubscription = MapSubscription::subscribe(_consensusMap, _localRpcMonitorMap);
_consensusSubscription = MapSubscription::subscribe(_localRpcMonitorMap.dispatcher(), _consensusMap);
- // TODO: use consensus as source here:
_globalHistorySubscription = MapSubscription::subscribe(_rpcsrvmap.proxy(), _globalVisibleHistory);
_rpcHooks.initRPC(getSupervisor());
}
diff --git a/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp b/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp
index 6cca6df9f80..b871bf5841e 100644
--- a/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp
+++ b/storage/src/tests/distributor/legacy_bucket_db_updater_test.cpp
@@ -1212,6 +1212,7 @@ TEST_F(LegacyBucketDBUpdaterTest, notify_change_with_pending_state_queues_bucket
}
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, merge_reply) {
enableDistributorClusterState("distributor:1 storage:3");
@@ -1254,6 +1255,7 @@ TEST_F(LegacyBucketDBUpdaterTest, merge_reply) {
dumpBucket(document::BucketId(16, 1234)));
};
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down) {
enableDistributorClusterState("distributor:1 storage:3");
std::vector<api::MergeBucketCommand::Node> nodes;
@@ -1296,6 +1298,7 @@ TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down) {
dumpBucket(document::BucketId(16, 1234)));
};
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down_after_request_sent) {
enableDistributorClusterState("distributor:1 storage:3");
std::vector<api::MergeBucketCommand::Node> nodes;
@@ -1338,7 +1341,7 @@ TEST_F(LegacyBucketDBUpdaterTest, merge_reply_node_down_after_request_sent) {
dumpBucket(document::BucketId(16, 1234)));
};
-
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, flush) {
enableDistributorClusterState("distributor:1 storage:3");
_sender.clear();
@@ -1417,6 +1420,7 @@ LegacyBucketDBUpdaterTest::getSentNodesDistributionChanged(
return ost.str();
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_send_messages) {
EXPECT_EQ(getNodeList({0, 1, 2}),
getSentNodes("cluster:d",
@@ -1514,6 +1518,7 @@ TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_send_messages) {
"distributor:3 storage:3 .1.s:m"));
};
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_receive) {
DistributorMessageSenderStub sender;
@@ -1552,6 +1557,7 @@ TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_receive) {
EXPECT_EQ(3, (int)pendingTransition.results().size());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down) {
std::string config(getDistConfig6Nodes4Groups());
config += "distributor_auto_ownership_transfer_on_whole_group_down true\n";
@@ -1571,6 +1577,7 @@ TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down) {
"distributor:6 .2.s:d storage:6"));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down_and_no_handover) {
std::string config(getDistConfig6Nodes4Groups());
config += "distributor_auto_ownership_transfer_on_whole_group_down false\n";
@@ -1582,6 +1589,8 @@ TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_with_group_down_and_no_h
"distributor:6 .2.s:d .3.s:d storage:6"));
}
+namespace {
+
void
parseInputData(const std::string& data,
uint64_t timestamp,
@@ -1656,6 +1665,8 @@ struct BucketDumper : public BucketDatabase::EntryProcessor
}
};
+}
+
std::string
LegacyBucketDBUpdaterTest::mergeBucketLists(
const lib::ClusterState& oldState,
@@ -1724,6 +1735,7 @@ LegacyBucketDBUpdaterTest::mergeBucketLists(const std::string& existingData,
includeBucketInfo);
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge) {
// Simple initializing case - ask all nodes for info
EXPECT_EQ(
@@ -1763,6 +1775,7 @@ TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge) {
mergeBucketLists("", "0:5/0/0/0|1:5/2/3/4", true));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) {
// Node went from initializing to up and non-invalid bucket changed.
EXPECT_EQ(
@@ -1775,6 +1788,7 @@ TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) {
true));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_current_state) {
document::BucketId bucket(16, 3);
lib::ClusterState stateBefore("distributor:1 storage:1");
@@ -1804,6 +1818,7 @@ TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_cur
EXPECT_EQ(std::string("NONEXISTING"), dumpBucket(bucket));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) {
document::BucketId bucket(16, 3);
lib::ClusterState stateBefore("distributor:1 storage:1");
@@ -1831,6 +1846,7 @@ TEST_F(LegacyBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pen
EXPECT_EQ(std::string("NONEXISTING"), dumpBucket(bucket));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
/*
* If we get a distribution config change, it's important that cluster states that
* arrive after this--but _before_ the pending cluster state has finished--must trigger
@@ -1880,6 +1896,7 @@ TEST_F(LegacyBucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_dis
EXPECT_EQ(size_t(0), _sender.commands().size());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_triggers_recovery_mode) {
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:6 storage:6"), messageCount(6), 20));
_sender.clear();
@@ -1929,6 +1946,7 @@ std::unique_ptr<BucketDatabase::EntryProcessor> func_processor(Func&& f) {
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_does_not_elide_bucket_db_pruning) {
setDistribution(getDistConfig3Nodes1Group());
@@ -1948,6 +1966,7 @@ TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_does_not_elide_buc
}));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_timestamp) {
getClock().setAbsoluteTimeInSeconds(101234);
lib::ClusterState stateBefore("distributor:1 storage:1");
@@ -1963,6 +1982,7 @@ TEST_F(LegacyBucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_ti
EXPECT_EQ(uint32_t(101234), e->getLastGarbageCollectionTime());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, newer_mutations_not_overwritten_by_earlier_bucket_fetch) {
{
lib::ClusterState stateBefore("distributor:1 storage:1 .0.s:i");
@@ -2051,6 +2071,7 @@ LegacyBucketDBUpdaterTest::getSentNodesWithPreemption(
using nodeVec = std::vector<uint16_t>;
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
/*
* If we don't carry over the set of nodes that we need to fetch from,
* a naive comparison between the active state and the new state will
@@ -2067,6 +2088,7 @@ TEST_F(LegacyBucketDBUpdaterTest, preempted_distributor_change_carries_node_set_
"version:3 distributor:6 storage:6"));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_change_carries_node_set_over_to_next_state_fetch) {
EXPECT_EQ(
expandNodeVec({2, 3}),
@@ -2077,6 +2099,7 @@ TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_change_carries_node_set_over
"version:3 distributor:6 storage:6"));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched) {
EXPECT_EQ(
expandNodeVec({2}),
@@ -2087,6 +2110,7 @@ TEST_F(LegacyBucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched
"version:3 distributor:6 storage:6"));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_state) {
EXPECT_EQ(
nodeVec{},
@@ -2097,6 +2121,7 @@ TEST_F(LegacyBucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_stat
"version:3 distributor:6 storage:6 .2.s:d")); // 2 down again.
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, doNotSendToPreemptedNodeNotPartOfNewState) {
// Even though 100 nodes are preempted, not all of these should be part
// of the request afterwards when only 6 are part of the state.
@@ -2109,6 +2134,7 @@ TEST_F(LegacyBucketDBUpdaterTest, doNotSendToPreemptedNodeNotPartOfNewState) {
"version:3 distributor:6 storage:6"));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, outdated_node_set_cleared_after_successful_state_completion) {
lib::ClusterState stateBefore(
"version:1 distributor:6 storage:6 .1.t:1234");
@@ -2123,6 +2149,7 @@ TEST_F(LegacyBucketDBUpdaterTest, outdated_node_set_cleared_after_successful_sta
EXPECT_EQ(size_t(0), _sender.commands().size());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest (despite being disabled)
// XXX test currently disabled since distribution config currently isn't used
// at all in order to deduce the set of nodes to send to. This might not matter
// in practice since it is assumed that the cluster state matching the new
@@ -2144,6 +2171,7 @@ TEST_F(LegacyBucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to
EXPECT_EQ((nodeVec{0, 1, 2}), getSendSet());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
/**
* Test scenario where a cluster is downsized by removing a subset of the nodes
* from the distribution configuration. The system must be able to deal with
@@ -2188,6 +2216,7 @@ TEST_F(LegacyBucketDBUpdaterTest, node_missing_from_config_is_treated_as_needing
EXPECT_EQ(expandNodeVec({0, 1}), getSendSet());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, changed_distributor_set_implies_ownership_transfer) {
auto fixture = createPendingStateFixtureForStateChange(
"distributor:2 storage:2", "distributor:1 storage:2");
@@ -2198,6 +2227,7 @@ TEST_F(LegacyBucketDBUpdaterTest, changed_distributor_set_implies_ownership_tran
EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership_transfer) {
auto fixture = createPendingStateFixtureForStateChange(
"distributor:2 storage:2", "distributor:2 storage:1");
@@ -2208,18 +2238,21 @@ TEST_F(LegacyBucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership
EXPECT_FALSE(fixture->state->hasBucketOwnershipTransfer());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, changed_distribution_config_implies_ownership_transfer) {
auto fixture = createPendingStateFixtureForDistributionChange(
"distributor:2 storage:2");
EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_for_single_state_change) {
ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:2", 5, messageCount(2)));
EXPECT_EQ(uint64_t(5000), lastTransitionTimeInMillis());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, transition_time_reset_across_non_preempting_state_changes) {
ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:2", 5, messageCount(2)));
ASSERT_NO_FATAL_FAILURE(completeStateTransitionInSeconds("distributor:2 storage:3", 3, messageCount(1)));
@@ -2227,6 +2260,7 @@ TEST_F(LegacyBucketDBUpdaterTest, transition_time_reset_across_non_preempting_st
EXPECT_EQ(uint64_t(3000), lastTransitionTimeInMillis());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_for_distribution_config_change) {
lib::ClusterState state("distributor:2 storage:2");
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(state, messageCount(2), 1));
@@ -2239,6 +2273,7 @@ TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_for_distribution_confi
EXPECT_EQ(uint64_t(4000), lastTransitionTimeInMillis());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_across_preempted_transitions) {
_sender.clear();
lib::ClusterState state("distributor:2 storage:2");
@@ -2252,6 +2287,7 @@ TEST_F(LegacyBucketDBUpdaterTest, transition_time_tracked_across_preempted_trans
EXPECT_EQ(uint64_t(8000), lastTransitionTimeInMillis());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
/*
* Brief reminder on test DSL for checking bucket merge operations:
*
@@ -2275,31 +2311,37 @@ TEST_F(LegacyBucketDBUpdaterTest, batch_update_of_existing_diverging_replicas_do
"0:5/1/2/3|1:5/7/8/9", true));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, batch_add_of_new_diverging_replicas_does_not_mark_any_as_trusted) {
EXPECT_EQ(std::string("5:1/7/8/9/u,0/1/2/3/u|"),
mergeBucketLists("", "0:5/1/2/3|1:5/7/8/9", true));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, batch_add_with_single_resulting_replica_implicitly_marks_as_trusted) {
EXPECT_EQ(std::string("5:0/1/2/3/t|"),
mergeBucketLists("", "0:5/1/2/3", true));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, identity_update_of_single_replica_does_not_clear_trusted) {
EXPECT_EQ(std::string("5:0/1/2/3/t|"),
mergeBucketLists("0:5/1/2/3", "0:5/1/2/3", true));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, identity_update_of_diverging_untrusted_replicas_does_not_mark_any_as_trusted) {
EXPECT_EQ(std::string("5:1/7/8/9/u,0/1/2/3/u|"),
mergeBucketLists("0:5/1/2/3|1:5/7/8/9", "0:5/1/2/3|1:5/7/8/9", true));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, adding_diverging_replica_to_existing_trusted_does_not_remove_trusted) {
EXPECT_EQ(std::string("5:1/2/3/4/u,0/1/2/3/t|"),
mergeBucketLists("0:5/1/2/3", "0:5/1/2/3|1:5/2/3/4", true));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, batch_update_from_distributor_change_does_not_mark_diverging_replicas_as_trusted) {
// This differs from batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted
// in that _all_ content nodes are considered outdated when distributor changes take place,
@@ -2315,6 +2357,7 @@ TEST_F(LegacyBucketDBUpdaterTest, batch_update_from_distributor_change_does_not_
"0:5/1/2/3|1:5/7/8/9", true));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
// TODO remove on Vespa 8 - this is a workaround for https://github.com/vespa-engine/vespa/issues/8475
TEST_F(LegacyBucketDBUpdaterTest, global_distribution_hash_falls_back_to_legacy_format_upon_request_rejection) {
std::string distConfig(getDistConfig6Nodes2Groups());
@@ -2384,6 +2427,7 @@ void for_each_bucket(const DistributorBucketSpaceRepo& repo, Func&& f) {
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_ownership_change) {
getBucketDBUpdater().set_stale_reads_enabled(true);
@@ -2425,6 +2469,7 @@ TEST_F(LegacyBucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_own
});
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_read_only_database) {
constexpr uint32_t n_buckets = 10;
// No ownership change, just node down. Test redundancy is 2, so removing 2 nodes will
@@ -2436,6 +2481,7 @@ TEST_F(LegacyBucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_r
EXPECT_EQ(size_t(0), read_only_global_db().size());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, non_owned_buckets_purged_when_read_only_support_is_config_disabled) {
getBucketDBUpdater().set_stale_reads_enabled(false);
@@ -2481,6 +2527,7 @@ void LegacyBucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transiti
_sender.clear();
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, deferred_activated_state_does_not_enable_state_until_activation_received) {
getBucketDBUpdater().set_stale_reads_enabled(true);
constexpr uint32_t n_buckets = 10;
@@ -2501,6 +2548,7 @@ TEST_F(LegacyBucketDBUpdaterTest, deferred_activated_state_does_not_enable_state
EXPECT_EQ(uint64_t(n_buckets), mutable_global_db().size());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_activated) {
getBucketDBUpdater().set_stale_reads_enabled(true);
constexpr uint32_t n_buckets = 10;
@@ -2513,6 +2561,7 @@ TEST_F(LegacyBucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_act
EXPECT_EQ(uint64_t(0), read_only_global_db().size());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_marked_down) {
getBucketDBUpdater().set_stale_reads_enabled(true);
constexpr uint32_t n_buckets = 10;
@@ -2527,6 +2576,7 @@ TEST_F(LegacyBucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_ma
EXPECT_EQ(uint64_t(n_buckets), read_only_global_db().size());
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_with_mismatching_version_returns_actual_version) {
getBucketDBUpdater().set_stale_reads_enabled(true);
constexpr uint32_t n_buckets = 10;
@@ -2541,6 +2591,7 @@ TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_with_mismatchin
ASSERT_NO_FATAL_FAILURE(assert_has_activate_cluster_state_reply_with_actual_version(5));
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_without_pending_transition_passes_message_through) {
getBucketDBUpdater().set_stale_reads_enabled(true);
constexpr uint32_t n_buckets = 10;
@@ -2557,6 +2608,7 @@ TEST_F(LegacyBucketDBUpdaterTest, activate_cluster_state_request_without_pending
EXPECT_EQ(size_t(0), _sender.replies().size());
}
+// TODO STRIPE disabled benchmark tests are NOT migrated to new test suite
TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_bulk_loading_into_empty_db) {
// Need to trigger an initial edge to complete first bucket scan
ASSERT_NO_FATAL_FAILURE(setAndEnableClusterState(lib::ClusterState("distributor:2 storage:1"),
@@ -2675,6 +2727,7 @@ TEST_F(LegacyBucketDBUpdaterTest, DISABLED_benchmark_all_buckets_removed_during_
fprintf(stderr, "Took %g seconds to scan and remove %u buckets\n", timer.min_time(), n_buckets);
}
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_when_state_is_pending) {
auto initial_baseline = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d");
auto initial_default = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:m");
@@ -2700,7 +2753,7 @@ TEST_F(LegacyBucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_
EXPECT_TRUE(state == nullptr);
}
-struct BucketDBUpdaterSnapshotTest : LegacyBucketDBUpdaterTest {
+struct LegacyBucketDBUpdaterSnapshotTest : LegacyBucketDBUpdaterTest {
lib::ClusterState empty_state;
std::shared_ptr<lib::ClusterState> initial_baseline;
std::shared_ptr<lib::ClusterState> initial_default;
@@ -2708,7 +2761,7 @@ struct BucketDBUpdaterSnapshotTest : LegacyBucketDBUpdaterTest {
Bucket default_bucket;
Bucket global_bucket;
- BucketDBUpdaterSnapshotTest()
+ LegacyBucketDBUpdaterSnapshotTest()
: LegacyBucketDBUpdaterTest(),
empty_state(),
initial_baseline(std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d")),
@@ -2719,7 +2772,7 @@ struct BucketDBUpdaterSnapshotTest : LegacyBucketDBUpdaterTest {
global_bucket(FixedBucketSpaces::global_space(), BucketId(16, 1234))
{
}
- ~BucketDBUpdaterSnapshotTest() override;
+ ~LegacyBucketDBUpdaterSnapshotTest() override;
void SetUp() override {
LegacyBucketDBUpdaterTest::SetUp();
@@ -2746,19 +2799,22 @@ struct BucketDBUpdaterSnapshotTest : LegacyBucketDBUpdaterTest {
}
};
-BucketDBUpdaterSnapshotTest::~BucketDBUpdaterSnapshotTest() = default;
+LegacyBucketDBUpdaterSnapshotTest::~LegacyBucketDBUpdaterSnapshotTest() = default;
-TEST_F(BucketDBUpdaterSnapshotTest, default_space_snapshot_prior_to_activated_state_is_non_routable) {
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
+TEST_F(LegacyBucketDBUpdaterSnapshotTest, default_space_snapshot_prior_to_activated_state_is_non_routable) {
auto rs = getBucketDBUpdater().read_snapshot_for_bucket(default_bucket);
EXPECT_FALSE(rs.is_routable());
}
-TEST_F(BucketDBUpdaterSnapshotTest, global_space_snapshot_prior_to_activated_state_is_non_routable) {
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
+TEST_F(LegacyBucketDBUpdaterSnapshotTest, global_space_snapshot_prior_to_activated_state_is_non_routable) {
auto rs = getBucketDBUpdater().read_snapshot_for_bucket(global_bucket);
EXPECT_FALSE(rs.is_routable());
}
-TEST_F(BucketDBUpdaterSnapshotTest, read_snapshot_returns_appropriate_cluster_states) {
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
+TEST_F(LegacyBucketDBUpdaterSnapshotTest, read_snapshot_returns_appropriate_cluster_states) {
set_cluster_state_bundle(initial_bundle);
// State currently pending, empty initial state is active
@@ -2788,7 +2844,8 @@ TEST_F(BucketDBUpdaterSnapshotTest, read_snapshot_returns_appropriate_cluster_st
EXPECT_FALSE(global_rs.context().has_pending_state_transition());
}
-TEST_F(BucketDBUpdaterSnapshotTest, snapshot_with_no_pending_state_transition_returns_mutable_db_guard) {
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
+TEST_F(LegacyBucketDBUpdaterSnapshotTest, snapshot_with_no_pending_state_transition_returns_mutable_db_guard) {
constexpr uint32_t n_buckets = 10;
ASSERT_NO_FATAL_FAILURE(
trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4,
@@ -2800,7 +2857,8 @@ TEST_F(BucketDBUpdaterSnapshotTest, snapshot_with_no_pending_state_transition_re
n_buckets);
}
-TEST_F(BucketDBUpdaterSnapshotTest, snapshot_returns_unroutable_for_non_owned_bucket_in_current_state) {
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
+TEST_F(LegacyBucketDBUpdaterSnapshotTest, snapshot_returns_unroutable_for_non_owned_bucket_in_current_state) {
ASSERT_NO_FATAL_FAILURE(
trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4,
"version:2 distributor:2 .0.s:d storage:4", 0, 0));
@@ -2810,7 +2868,8 @@ TEST_F(BucketDBUpdaterSnapshotTest, snapshot_returns_unroutable_for_non_owned_bu
EXPECT_FALSE(def_rs.is_routable());
}
-TEST_F(BucketDBUpdaterSnapshotTest, snapshot_with_pending_state_returns_read_only_guard_for_bucket_only_owned_in_current_state) {
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
+TEST_F(LegacyBucketDBUpdaterSnapshotTest, snapshot_with_pending_state_returns_read_only_guard_for_bucket_only_owned_in_current_state) {
constexpr uint32_t n_buckets = 10;
ASSERT_NO_FATAL_FAILURE(
trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4,
@@ -2821,7 +2880,8 @@ TEST_F(BucketDBUpdaterSnapshotTest, snapshot_with_pending_state_returns_read_onl
n_buckets);
}
-TEST_F(BucketDBUpdaterSnapshotTest, snapshot_is_unroutable_if_stale_reads_disabled_and_bucket_not_owned_in_pending_state) {
+// TODO STRIPE migrated to TopLevelBucketDBUpdaterTest
+TEST_F(LegacyBucketDBUpdaterSnapshotTest, snapshot_is_unroutable_if_stale_reads_disabled_and_bucket_not_owned_in_pending_state) {
getBucketDBUpdater().set_stale_reads_enabled(false);
constexpr uint32_t n_buckets = 10;
ASSERT_NO_FATAL_FAILURE(
diff --git a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
index 70e5afaed43..01f7d5a4f0a 100644
--- a/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
+++ b/storage/src/tests/distributor/top_level_bucket_db_updater_test.cpp
@@ -11,13 +11,13 @@
#include <vespa/document/test/make_document_bucket.h>
#include <vespa/document/test/make_bucket_space.h>
#include <vespa/document/bucket/fixed_bucket_spaces.h>
+#include <vespa/metrics/updatehook.h>
#include <vespa/storage/distributor/simpleclusterinformation.h>
#include <vespa/storage/distributor/top_level_distributor.h>
#include <vespa/storage/distributor/distributor_stripe.h>
#include <vespa/storage/distributor/distributor_bucket_space.h>
#include <vespa/vespalib/gtest/gtest.h>
#include <vespa/vespalib/text/stringtokenizer.h>
-#include <vespa/vespalib/util/benchmark_timer.h>
#include <sstream>
#include <iomanip>
@@ -38,6 +38,8 @@ class TopLevelBucketDBUpdaterTest : public Test,
public TopLevelDistributorTestUtil
{
public:
+ using OutdatedNodesMap = dbtransition::OutdatedNodesMap;
+
TopLevelBucketDBUpdaterTest();
~TopLevelBucketDBUpdaterTest() override;
@@ -117,7 +119,19 @@ public:
invalid_bucket_count));
}
- std::string verifyBucket(document::BucketId id, const lib::ClusterState& state) {
+ void send_fake_reply_for_single_bucket_request(
+ const api::RequestBucketInfoCommand& rbi)
+ {
+ ASSERT_EQ(size_t(1), rbi.getBuckets().size());
+ const document::BucketId& bucket(rbi.getBuckets()[0]);
+
+ auto reply = std::make_shared<api::RequestBucketInfoReply>(rbi);
+ reply->getBucketInfo().push_back(
+ api::RequestBucketInfoReply::Entry(bucket, api::BucketInfo(20, 10, 12, 50, 60, true, true)));
+ stripe_of_bucket(bucket).bucket_db_updater().onRequestBucketInfoReply(reply);
+ }
+
+ std::string verify_bucket(document::BucketId id, const lib::ClusterState& state) {
BucketDatabase::Entry entry = get_bucket(id);
if (!entry.valid()) {
return vespalib::make_string("%s doesn't exist in DB", id.toString().c_str());
@@ -182,6 +196,12 @@ public:
sort_sent_messages_by_index(_sender, size_before_state);
}
+ void set_cluster_state_bundle(const lib::ClusterStateBundle& state) {
+ const size_t size_before_state = _sender.commands().size();
+ bucket_db_updater().onSetSystemState(std::make_shared<api::SetSystemStateCommand>(state));
+ sort_sent_messages_by_index(_sender, size_before_state);
+ }
+
void set_cluster_state(const vespalib::string& state_str) {
set_cluster_state(lib::ClusterState(state_str));
}
@@ -191,6 +211,14 @@ public:
std::make_shared<api::ActivateClusterStateVersionCommand>(version));
}
+ void assert_has_activate_cluster_state_reply_with_actual_version(uint32_t version) {
+ ASSERT_EQ(size_t(1), _sender.replies().size());
+ auto* response = dynamic_cast<api::ActivateClusterStateVersionReply*>(_sender.replies().back().get());
+ ASSERT_TRUE(response != nullptr);
+ ASSERT_EQ(version, response->actualVersion());
+ _sender.clear();
+ }
+
void complete_bucket_info_gathering(const lib::ClusterState& state,
size_t expected_msgs,
uint32_t bucket_count = 1,
@@ -290,6 +318,176 @@ public:
ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(state, expected_msgs, n_buckets));
}
+ void complete_state_transition_in_seconds(const std::string& stateStr,
+ uint32_t seconds,
+ uint32_t expectedMsgs)
+ {
+ _sender.clear();
+ lib::ClusterState state(stateStr);
+ set_cluster_state(state);
+ fake_clock().addSecondsToTime(seconds);
+ ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(state, expectedMsgs));
+ }
+
+ uint64_t last_transition_time_in_millis() {
+ {
+ // Force stripe metrics to be aggregated into total.
+ std::mutex l;
+ distributor_metric_update_hook().updateMetrics(metrics::MetricLockGuard(l));
+ }
+ return uint64_t(total_distributor_metrics().stateTransitionTime.getLast());
+ }
+
+ ClusterInformation::CSP create_cluster_info(const std::string& clusterStateString) {
+ lib::ClusterState baseline_cluster_state(clusterStateString);
+ lib::ClusterStateBundle cluster_state_bundle(baseline_cluster_state);
+ auto cluster_info = std::make_shared<SimpleClusterInformation>(
+ _distributor->node_identity().node_index(),
+ cluster_state_bundle,
+ "ui");
+ enable_distributor_cluster_state(clusterStateString);
+ return cluster_info;
+ }
+
+ struct PendingClusterStateFixture {
+ DistributorMessageSenderStub sender;
+ framework::defaultimplementation::FakeClock clock;
+ std::unique_ptr<PendingClusterState> state;
+
+ PendingClusterStateFixture(
+ TopLevelBucketDBUpdaterTest& owner,
+ const std::string& old_cluster_state,
+ const std::string& new_cluster_state)
+ {
+ auto cmd = std::make_shared<api::SetSystemStateCommand>(lib::ClusterState(new_cluster_state));
+ auto cluster_info = owner.create_cluster_info(old_cluster_state);
+ OutdatedNodesMap outdated_nodes_map;
+ state = PendingClusterState::createForClusterStateChange(
+ clock, cluster_info, sender,
+ owner.top_level_bucket_space_repo(),
+ cmd, outdated_nodes_map, api::Timestamp(1));
+ }
+
+ PendingClusterStateFixture(
+ TopLevelBucketDBUpdaterTest& owner,
+ const std::string& old_cluster_state)
+ {
+ auto cluster_info = owner.create_cluster_info(old_cluster_state);
+ state = PendingClusterState::createForDistributionChange(
+ clock, cluster_info, sender, owner.top_level_bucket_space_repo(), api::Timestamp(1));
+ }
+ };
+
+ std::unique_ptr<PendingClusterStateFixture> create_pending_state_fixture_for_state_change(
+ const std::string& oldClusterState,
+ const std::string& newClusterState)
+ {
+ return std::make_unique<PendingClusterStateFixture>(*this, oldClusterState, newClusterState);
+ }
+
+ std::unique_ptr<PendingClusterStateFixture> create_pending_state_fixture_for_distribution_change(
+ const std::string& oldClusterState)
+ {
+ return std::make_unique<PendingClusterStateFixture>(*this, oldClusterState);
+ }
+
+ std::string get_sent_nodes(const std::string& old_cluster_state,
+ const std::string& new_cluster_state);
+
+ std::string get_sent_nodes_distribution_changed(const std::string& old_cluster_state);
+
+ std::string get_node_list(const std::vector<uint16_t>& nodes, size_t count);
+ std::string get_node_list(const std::vector<uint16_t>& nodes);
+
+ std::string merge_bucket_lists(const lib::ClusterState& old_state,
+ const std::string& existing_data,
+ const lib::ClusterState& new_state,
+ const std::string& new_data,
+ bool include_bucket_info = false);
+
+ std::string merge_bucket_lists(const std::string& existingData,
+ const std::string& newData,
+ bool includeBucketInfo = false);
+
+ std::vector<uint16_t> get_send_set() const;
+
+ std::vector<uint16_t> get_sent_nodes_with_preemption(
+ const std::string& old_cluster_state,
+ uint32_t expected_old_state_messages,
+ const std::string& preempted_cluster_state,
+ const std::string& new_cluster_state);
+
+ std::vector<uint16_t> expand_node_vec(const std::vector<uint16_t>& nodes);
+
+ void trigger_completed_but_not_yet_activated_transition(
+ vespalib::stringref initial_state_str,
+ uint32_t initial_buckets,
+ uint32_t initial_expected_msgs,
+ vespalib::stringref pending_state_str,
+ uint32_t pending_buckets,
+ uint32_t pending_expected_msgs);
+
+ const DistributorBucketSpaceRepo& mutable_repo(DistributorStripe& s) const noexcept {
+ return s.getBucketSpaceRepo();
+ }
+ // Note: not calling this "immutable_repo" since it may actually be modified by the pending
+ // cluster state component (just not by operations), so it would not have the expected semantics.
+ const DistributorBucketSpaceRepo& read_only_repo(DistributorStripe& s) const noexcept {
+ return s.getReadOnlyBucketSpaceRepo();
+ }
+
+ const BucketDatabase& mutable_default_db(DistributorStripe& s) const noexcept {
+ return mutable_repo(s).get(FixedBucketSpaces::default_space()).getBucketDatabase();
+ }
+ const BucketDatabase& mutable_global_db(DistributorStripe& s) const noexcept {
+ return mutable_repo(s).get(FixedBucketSpaces::global_space()).getBucketDatabase();
+ }
+ const BucketDatabase& read_only_default_db(DistributorStripe& s) const noexcept {
+ return read_only_repo(s).get(FixedBucketSpaces::default_space()).getBucketDatabase();
+ }
+ const BucketDatabase& read_only_global_db(DistributorStripe& s) const noexcept {
+ return read_only_repo(s).get(FixedBucketSpaces::global_space()).getBucketDatabase();
+ }
+
+ void set_stale_reads_enabled(bool enabled) {
+ for (auto* s : distributor_stripes()) {
+ s->bucket_db_updater().set_stale_reads_enabled(enabled);
+ }
+ bucket_db_updater().set_stale_reads_enabled(enabled);
+ }
+
+ size_t mutable_default_dbs_size() const {
+ size_t total = 0;
+ for (auto* s : distributor_stripes()) {
+ total += mutable_default_db(*s).size();
+ }
+ return total;
+ }
+
+ size_t mutable_global_dbs_size() const {
+ size_t total = 0;
+ for (auto* s : distributor_stripes()) {
+ total += mutable_global_db(*s).size();
+ }
+ return total;
+ }
+
+ size_t read_only_default_dbs_size() const {
+ size_t total = 0;
+ for (auto* s : distributor_stripes()) {
+ total += read_only_default_db(*s).size();
+ }
+ return total;
+ }
+
+ size_t read_only_global_dbs_size() const {
+ size_t total = 0;
+ for (auto* s : distributor_stripes()) {
+ total += read_only_global_db(*s).size();
+ }
+ return total;
+ }
+
};
TopLevelBucketDBUpdaterTest::TopLevelBucketDBUpdaterTest()
@@ -347,6 +545,21 @@ std::string dist_config_6_nodes_across_4_groups() {
"group[3].nodes[1].index 5\n");
}
+std::string dist_config_3_nodes_in_1_group() {
+ return ("redundancy 2\n"
+ "group[2]\n"
+ "group[0].name \"invalid\"\n"
+ "group[0].index \"invalid\"\n"
+ "group[0].partitions 1|*\n"
+ "group[0].nodes[0]\n"
+ "group[1].name rack0\n"
+ "group[1].index 0\n"
+ "group[1].nodes[3]\n"
+ "group[1].nodes[0].index 0\n"
+ "group[1].nodes[1].index 1\n"
+ "group[1].nodes[2].index 2\n");
+}
+
std::string
make_string_list(std::string s, uint32_t count)
{
@@ -368,6 +581,54 @@ make_request_bucket_info_strings(uint32_t count)
}
+
+std::string
+TopLevelBucketDBUpdaterTest::get_node_list(const std::vector<uint16_t>& nodes, size_t count)
+{
+ std::ostringstream ost;
+ bool first = true;
+ for (const auto node : nodes) {
+ for (uint32_t i = 0; i < count; ++i) {
+ if (!first) {
+ ost << ",";
+ }
+ ost << node;
+ first = false;
+ }
+ }
+ return ost.str();
+}
+
+std::string
+TopLevelBucketDBUpdaterTest::get_node_list(const std::vector<uint16_t>& nodes)
+{
+ return get_node_list(nodes, _bucket_spaces.size());
+}
+
+void
+TopLevelBucketDBUpdaterTest::trigger_completed_but_not_yet_activated_transition(
+ vespalib::stringref initial_state_str,
+ uint32_t initial_buckets,
+ uint32_t initial_expected_msgs,
+ vespalib::stringref pending_state_str,
+ uint32_t pending_buckets,
+ uint32_t pending_expected_msgs)
+{
+ lib::ClusterState initial_state(initial_state_str);
+ set_cluster_state(initial_state);
+ ASSERT_EQ(message_count(initial_expected_msgs), _sender.commands().size());
+ ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(
+ initial_state, message_count(initial_expected_msgs), initial_buckets));
+ _sender.clear();
+
+ lib::ClusterState pending_state(pending_state_str); // Ownership change
+ set_cluster_state_bundle(lib::ClusterStateBundle(pending_state, {}, true));
+ ASSERT_EQ(message_count(pending_expected_msgs), _sender.commands().size());
+ ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(
+ pending_state, message_count(pending_expected_msgs), pending_buckets));
+ _sender.clear();
+}
+
TEST_F(TopLevelBucketDBUpdaterTest, normal_usage) {
set_cluster_state(lib::ClusterState("distributor:2 .0.s:i .1.s:i storage:3"));
@@ -525,13 +786,13 @@ TEST_F(TopLevelBucketDBUpdaterTest, failed_request_bucket_info) {
}
for (int i=0; i<10; i++) {
- EXPECT_EQ(std::string(""),
- verifyBucket(document::BucketId(16, i),
- lib::ClusterState("distributor:1 storage:1")));
+ EXPECT_EQ("",
+ verify_bucket(document::BucketId(16, i),
+ lib::ClusterState("distributor:1 storage:1")));
}
// Set system state should now be passed on
- EXPECT_EQ(std::string("Set system state"), _sender_down.getCommands());
+ EXPECT_EQ("Set system state", _sender_down.getCommands());
}
TEST_F(TopLevelBucketDBUpdaterTest, down_while_init) {
@@ -588,9 +849,9 @@ TEST_F(TopLevelBucketDBUpdaterTest, node_down_copies_get_in_sync) {
set_cluster_state("distributor:1 storage:3 .1.s:d");
- EXPECT_EQ(std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x3,docs=3/3,bytes=3/3,trusted=true,active=false,ready=false), "
- "node(idx=2,crc=0x3,docs=3/3,bytes=3/3,trusted=true,active=false,ready=false)"),
+ EXPECT_EQ("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x3,docs=3/3,bytes=3/3,trusted=true,active=false,ready=false), "
+ "node(idx=2,crc=0x3,docs=3/3,bytes=3/3,trusted=true,active=false,ready=false)",
dump_bucket(bid));
}
@@ -651,11 +912,11 @@ TEST_F(TopLevelBucketDBUpdaterTest, bit_change) {
}
}
- EXPECT_EQ(std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)"),
+ EXPECT_EQ("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)",
dump_bucket(bucketlist[0]));
- EXPECT_EQ(std::string("BucketId(0x4000000000000002) : "
- "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)"),
+ EXPECT_EQ("BucketId(0x4000000000000002) : "
+ "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)",
dump_bucket(bucketlist[1]));
{
@@ -688,17 +949,17 @@ TEST_F(TopLevelBucketDBUpdaterTest, bit_change) {
}
}
- EXPECT_EQ(std::string("BucketId(0x4000000000000000) : "
- "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)"),
+ EXPECT_EQ("BucketId(0x4000000000000000) : "
+ "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)",
dump_bucket(document::BucketId(16, 0)));
- EXPECT_EQ(std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)"),
+ EXPECT_EQ("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)",
dump_bucket(document::BucketId(16, 1)));
- EXPECT_EQ(std::string("BucketId(0x4000000000000002) : "
- "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)"),
+ EXPECT_EQ("BucketId(0x4000000000000002) : "
+ "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)",
dump_bucket(document::BucketId(16, 2)));
- EXPECT_EQ(std::string("BucketId(0x4000000000000004) : "
- "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)"),
+ EXPECT_EQ("BucketId(0x4000000000000004) : "
+ "node(idx=0,crc=0xa,docs=1/1,bytes=1/1,trusted=true,active=false,ready=false)",
dump_bucket(document::BucketId(16, 4)));
_sender.clear();
@@ -822,9 +1083,9 @@ TEST_F(TopLevelBucketDBUpdaterTest, notify_bucket_change) {
}
// No database update until request bucket info replies have been received.
- EXPECT_EQ(std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x4d2,docs=1234/1234,bytes=1234/1234,"
- "trusted=false,active=false,ready=false)"),
+ EXPECT_EQ("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x4d2,docs=1234/1234,bytes=1234/1234,"
+ "trusted=false,active=false,ready=false)",
dump_bucket(document::BucketId(16, 1)));
EXPECT_EQ(std::string("NONEXISTING"), dump_bucket(document::BucketId(16, 2)));
@@ -845,11 +1106,11 @@ TEST_F(TopLevelBucketDBUpdaterTest, notify_bucket_change) {
stripe_of_bucket(bucket_id).bucket_db_updater().onRequestBucketInfoReply(reply);
}
- EXPECT_EQ(std::string("BucketId(0x4000000000000001) : "
- "node(idx=0,crc=0x11d7,docs=200/400,bytes=2000/4000,trusted=true,active=true,ready=true)"),
+ EXPECT_EQ("BucketId(0x4000000000000001) : "
+ "node(idx=0,crc=0x11d7,docs=200/400,bytes=2000/4000,trusted=true,active=true,ready=true)",
dump_bucket(document::BucketId(16, 1)));
- EXPECT_EQ(std::string("BucketId(0x4000000000000002) : "
- "node(idx=0,crc=0x2327,docs=300/500,bytes=3000/5000,trusted=true,active=false,ready=false)"),
+ EXPECT_EQ("BucketId(0x4000000000000002) : "
+ "node(idx=0,crc=0x2327,docs=300/500,bytes=3000/5000,trusted=true,active=false,ready=false)",
dump_bucket(document::BucketId(16, 2)));
}
@@ -947,4 +1208,1458 @@ TEST_F(TopLevelBucketDBUpdaterTest, notify_change_with_pending_state_queues_buck
}
}
+TEST_F(TopLevelBucketDBUpdaterTest, merge_reply) {
+ enable_distributor_cluster_state("distributor:1 storage:3");
+
+ document::BucketId bucket_id(16, 1234);
+ add_nodes_to_stripe_bucket_db(bucket_id, "0=1234,1=1234,2=1234");
+
+ std::vector<api::MergeBucketCommand::Node> nodes;
+ nodes.push_back(api::MergeBucketCommand::Node(0));
+ nodes.push_back(api::MergeBucketCommand::Node(1));
+ nodes.push_back(api::MergeBucketCommand::Node(2));
+
+ api::MergeBucketCommand cmd(makeDocumentBucket(bucket_id), nodes, 0);
+ auto reply = std::make_shared<api::MergeBucketReply>(cmd);
+
+ _sender.clear();
+ stripe_of_bucket(bucket_id).bucket_db_updater().onMergeBucketReply(reply);
+
+ ASSERT_EQ(size_t(3), _sender.commands().size());
+
+ for (uint32_t i = 0; i < 3; i++) {
+ auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.command(i));
+
+ ASSERT_TRUE(req.get() != nullptr);
+ ASSERT_EQ(size_t(1), req->getBuckets().size());
+ EXPECT_EQ(bucket_id, req->getBuckets()[0]);
+
+ auto reqreply = std::make_shared<api::RequestBucketInfoReply>(*req);
+ reqreply->getBucketInfo().push_back(
+ api::RequestBucketInfoReply::Entry(bucket_id,
+ api::BucketInfo(10 * (i + 1), 100 * (i +1), 1000 * (i+1))));
+
+ stripe_of_bucket(bucket_id).bucket_db_updater().onRequestBucketInfoReply(reqreply);
+ }
+
+ EXPECT_EQ("BucketId(0x40000000000004d2) : "
+ "node(idx=0,crc=0xa,docs=100/100,bytes=1000/1000,trusted=false,active=false,ready=false), "
+ "node(idx=1,crc=0x14,docs=200/200,bytes=2000/2000,trusted=false,active=false,ready=false), "
+ "node(idx=2,crc=0x1e,docs=300/300,bytes=3000/3000,trusted=false,active=false,ready=false)",
+ dump_bucket(bucket_id));
+};
+
+TEST_F(TopLevelBucketDBUpdaterTest, merge_reply_node_down) {
+ enable_distributor_cluster_state("distributor:1 storage:3");
+ std::vector<api::MergeBucketCommand::Node> nodes;
+
+ document::BucketId bucket_id(16, 1234);
+ add_nodes_to_stripe_bucket_db(bucket_id, "0=1234,1=1234,2=1234");
+
+ for (uint32_t i = 0; i < 3; ++i) {
+ nodes.push_back(api::MergeBucketCommand::Node(i));
+ }
+
+ api::MergeBucketCommand cmd(makeDocumentBucket(bucket_id), nodes, 0);
+ auto reply = std::make_shared<api::MergeBucketReply>(cmd);
+
+ set_cluster_state(lib::ClusterState("distributor:1 storage:2"));
+
+ _sender.clear();
+ stripe_of_bucket(bucket_id).bucket_db_updater().onMergeBucketReply(reply);
+
+ ASSERT_EQ(size_t(2), _sender.commands().size());
+
+ for (uint32_t i = 0; i < 2; i++) {
+ auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.command(i));
+
+ ASSERT_TRUE(req.get() != nullptr);
+ ASSERT_EQ(size_t(1), req->getBuckets().size());
+ EXPECT_EQ(bucket_id, req->getBuckets()[0]);
+
+ auto reqreply = std::make_shared<api::RequestBucketInfoReply>(*req);
+ reqreply->getBucketInfo().push_back(
+ api::RequestBucketInfoReply::Entry(
+ bucket_id,
+ api::BucketInfo(10 * (i + 1), 100 * (i +1), 1000 * (i+1))));
+ stripe_of_bucket(bucket_id).bucket_db_updater().onRequestBucketInfoReply(reqreply);
+ }
+
+ EXPECT_EQ("BucketId(0x40000000000004d2) : "
+ "node(idx=0,crc=0xa,docs=100/100,bytes=1000/1000,trusted=false,active=false,ready=false), "
+ "node(idx=1,crc=0x14,docs=200/200,bytes=2000/2000,trusted=false,active=false,ready=false)",
+ dump_bucket(bucket_id));
+};
+
+TEST_F(TopLevelBucketDBUpdaterTest, merge_reply_node_down_after_request_sent) {
+ enable_distributor_cluster_state("distributor:1 storage:3");
+ std::vector<api::MergeBucketCommand::Node> nodes;
+
+ document::BucketId bucket_id(16, 1234);
+ add_nodes_to_stripe_bucket_db(bucket_id, "0=1234,1=1234,2=1234");
+
+ for (uint32_t i = 0; i < 3; ++i) {
+ nodes.push_back(api::MergeBucketCommand::Node(i));
+ }
+
+ api::MergeBucketCommand cmd(makeDocumentBucket(bucket_id), nodes, 0);
+ auto reply = std::make_shared<api::MergeBucketReply>(cmd);
+
+ _sender.clear();
+ stripe_of_bucket(bucket_id).bucket_db_updater().onMergeBucketReply(reply);
+
+ ASSERT_EQ(size_t(3), _sender.commands().size());
+
+ set_cluster_state(lib::ClusterState("distributor:1 storage:2"));
+
+ for (uint32_t i = 0; i < 3; i++) {
+ auto req = std::dynamic_pointer_cast<api::RequestBucketInfoCommand>(_sender.command(i));
+
+ ASSERT_TRUE(req.get() != nullptr);
+ ASSERT_EQ(size_t(1), req->getBuckets().size());
+ EXPECT_EQ(bucket_id, req->getBuckets()[0]);
+
+ auto reqreply = std::make_shared<api::RequestBucketInfoReply>(*req);
+ reqreply->getBucketInfo().push_back(
+ api::RequestBucketInfoReply::Entry(
+ bucket_id,
+ api::BucketInfo(10 * (i + 1), 100 * (i +1), 1000 * (i+1))));
+ stripe_of_bucket(bucket_id).bucket_db_updater().onRequestBucketInfoReply(reqreply);
+ }
+
+ EXPECT_EQ("BucketId(0x40000000000004d2) : "
+ "node(idx=0,crc=0xa,docs=100/100,bytes=1000/1000,trusted=false,active=false,ready=false), "
+ "node(idx=1,crc=0x14,docs=200/200,bytes=2000/2000,trusted=false,active=false,ready=false)",
+ dump_bucket(bucket_id));
+};
+
+TEST_F(TopLevelBucketDBUpdaterTest, flush) {
+ enable_distributor_cluster_state("distributor:1 storage:3");
+ _sender.clear();
+
+ document::BucketId bucket_id(16, 1234);
+ add_nodes_to_stripe_bucket_db(bucket_id, "0=1234,1=1234,2=1234");
+
+ std::vector<api::MergeBucketCommand::Node> nodes;
+ for (uint32_t i = 0; i < 3; ++i) {
+ nodes.push_back(api::MergeBucketCommand::Node(i));
+ }
+
+ api::MergeBucketCommand cmd(makeDocumentBucket(bucket_id), nodes, 0);
+ auto reply = std::make_shared<api::MergeBucketReply>(cmd);
+
+ _sender.clear();
+ stripe_of_bucket(bucket_id).bucket_db_updater().onMergeBucketReply(reply);
+
+ ASSERT_EQ(size_t(3), _sender.commands().size());
+ ASSERT_EQ(size_t(0), _sender_down.replies().size());
+
+ stripe_of_bucket(bucket_id).bucket_db_updater().flush();
+ // Flushing should drop all merge bucket replies
+ EXPECT_EQ(size_t(0), _sender_down.commands().size());
+}
+
+std::string
+TopLevelBucketDBUpdaterTest::get_sent_nodes(const std::string& old_cluster_state,
+ const std::string& new_cluster_state)
+{
+ auto fixture = create_pending_state_fixture_for_state_change(old_cluster_state, new_cluster_state);
+ sort_sent_messages_by_index(fixture->sender);
+
+ std::ostringstream ost;
+ for (uint32_t i = 0; i < fixture->sender.commands().size(); i++) {
+ auto& req = dynamic_cast<RequestBucketInfoCommand&>(*fixture->sender.command(i));
+
+ if (i > 0) {
+ ost << ",";
+ }
+
+ ost << req.getAddress()->getIndex();
+ }
+
+ return ost.str();
+}
+
+std::string
+TopLevelBucketDBUpdaterTest::get_sent_nodes_distribution_changed(const std::string& old_cluster_state)
+{
+ DistributorMessageSenderStub sender;
+
+ framework::defaultimplementation::FakeClock clock;
+ auto cluster_info = create_cluster_info(old_cluster_state);
+ std::unique_ptr<PendingClusterState> state(
+ PendingClusterState::createForDistributionChange(
+ clock, cluster_info, sender, top_level_bucket_space_repo(), api::Timestamp(1)));
+
+ sort_sent_messages_by_index(sender);
+
+ std::ostringstream ost;
+ for (uint32_t i = 0; i < sender.commands().size(); i++) {
+ auto& req = dynamic_cast<RequestBucketInfoCommand&>(*sender.command(i));
+
+ if (i > 0) {
+ ost << ",";
+ }
+
+ ost << req.getAddress()->getIndex();
+ }
+
+ return ost.str();
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_send_messages) {
+ EXPECT_EQ(get_node_list({0, 1, 2}),
+ get_sent_nodes("cluster:d",
+ "distributor:1 storage:3"));
+
+ EXPECT_EQ(get_node_list({0, 1}),
+ get_sent_nodes("cluster:d",
+ "distributor:1 storage:3 .2.s:m"));
+
+ EXPECT_EQ(get_node_list({2}),
+ get_sent_nodes("distributor:1 storage:2",
+ "distributor:1 storage:3"));
+
+ EXPECT_EQ(get_node_list({2, 3, 4, 5}),
+ get_sent_nodes("distributor:1 storage:2",
+ "distributor:1 storage:6"));
+
+ EXPECT_EQ(get_node_list({0, 1, 2}),
+ get_sent_nodes("distributor:4 storage:3",
+ "distributor:3 storage:3"));
+
+ EXPECT_EQ(get_node_list({0, 1, 2, 3}),
+ get_sent_nodes("distributor:4 storage:3",
+ "distributor:4 .2.s:d storage:4"));
+
+ EXPECT_EQ("",
+ get_sent_nodes("distributor:4 storage:3",
+ "distributor:4 .0.s:d storage:4"));
+
+ EXPECT_EQ("",
+ get_sent_nodes("distributor:3 storage:3",
+ "distributor:4 storage:3"));
+
+ EXPECT_EQ(get_node_list({2}),
+ get_sent_nodes("distributor:3 storage:3 .2.s:i",
+ "distributor:3 storage:3"));
+
+ EXPECT_EQ(get_node_list({1}),
+ get_sent_nodes("distributor:3 storage:3 .1.s:d",
+ "distributor:3 storage:3"));
+
+ EXPECT_EQ(get_node_list({1, 2, 4}),
+ get_sent_nodes("distributor:3 storage:4 .1.s:d .2.s:i",
+ "distributor:3 storage:5"));
+
+ EXPECT_EQ("",
+ get_sent_nodes("distributor:1 storage:3",
+ "cluster:d"));
+
+ EXPECT_EQ("",
+ get_sent_nodes("distributor:1 storage:3",
+ "distributor:1 storage:3"));
+
+ EXPECT_EQ("",
+ get_sent_nodes("distributor:1 storage:3",
+ "cluster:d distributor:1 storage:6"));
+
+ EXPECT_EQ("",
+ get_sent_nodes("distributor:3 storage:3",
+ "distributor:3 .2.s:m storage:3"));
+
+ EXPECT_EQ(get_node_list({0, 1, 2}),
+ get_sent_nodes("distributor:3 .2.s:m storage:3",
+ "distributor:3 .2.s:d storage:3"));
+
+ EXPECT_EQ("",
+ get_sent_nodes("distributor:3 .2.s:m storage:3",
+ "distributor:3 storage:3"));
+
+ EXPECT_EQ(get_node_list({0, 1, 2}),
+ get_sent_nodes_distribution_changed("distributor:3 storage:3"));
+
+ EXPECT_EQ(get_node_list({0, 1}),
+ get_sent_nodes("distributor:10 storage:2",
+ "distributor:10 .1.s:d storage:2"));
+
+ EXPECT_EQ("",
+ get_sent_nodes("distributor:2 storage:2",
+ "distributor:3 .2.s:i storage:2"));
+
+ EXPECT_EQ(get_node_list({0, 1, 2}),
+ get_sent_nodes("distributor:3 storage:3",
+ "distributor:3 .2.s:s storage:3"));
+
+ EXPECT_EQ("",
+ get_sent_nodes("distributor:3 .2.s:s storage:3",
+ "distributor:3 .2.s:d storage:3"));
+
+ EXPECT_EQ(get_node_list({1}),
+ get_sent_nodes("distributor:3 storage:3 .1.s:m",
+ "distributor:3 storage:3"));
+
+ EXPECT_EQ("",
+ get_sent_nodes("distributor:3 storage:3",
+ "distributor:3 storage:3 .1.s:m"));
+};
+
+TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_receive) {
+ DistributorMessageSenderStub sender;
+
+ auto cmd = std::make_shared<api::SetSystemStateCommand>(lib::ClusterState("distributor:1 storage:3"));
+
+ framework::defaultimplementation::FakeClock clock;
+ auto cluster_info = create_cluster_info("cluster:d");
+ OutdatedNodesMap outdated_nodes_map;
+ std::unique_ptr<PendingClusterState> state(
+ PendingClusterState::createForClusterStateChange(
+ clock, cluster_info, sender, top_level_bucket_space_repo(),
+ cmd, outdated_nodes_map, api::Timestamp(1)));
+
+ ASSERT_EQ(message_count(3), sender.commands().size());
+
+ sort_sent_messages_by_index(sender);
+
+ std::ostringstream ost;
+ for (uint32_t i = 0; i < sender.commands().size(); i++) {
+ auto* req = dynamic_cast<RequestBucketInfoCommand*>(sender.command(i).get());
+ ASSERT_TRUE(req != nullptr);
+
+ auto rep = std::make_shared<RequestBucketInfoReply>(*req);
+
+ rep->getBucketInfo().push_back(
+ RequestBucketInfoReply::Entry(
+ document::BucketId(16, i),
+ api::BucketInfo(i, i, i, i, i)));
+
+ ASSERT_TRUE(state->onRequestBucketInfoReply(rep));
+ ASSERT_EQ((i == (sender.commands().size() - 1)), state->done());
+ }
+
+ auto& pending_transition = state->getPendingBucketSpaceDbTransition(makeBucketSpace());
+ EXPECT_EQ(3u, pending_transition.results().size());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_with_group_down) {
+ std::string config = dist_config_6_nodes_across_4_groups();
+ config += "distributor_auto_ownership_transfer_on_whole_group_down true\n";
+ set_distribution(config);
+
+ // Group config has nodes {0, 1}, {2, 3}, {4, 5}
+ // We're node index 0.
+
+ // Entire group 1 goes down. Must refetch from all nodes.
+ EXPECT_EQ(get_node_list({0, 1, 2, 3, 4, 5}),
+ get_sent_nodes("distributor:6 storage:6",
+ "distributor:6 .2.s:d .3.s:d storage:6"));
+
+ // But don't fetch if not the entire group is down.
+ EXPECT_EQ("",
+ get_sent_nodes("distributor:6 storage:6",
+ "distributor:6 .2.s:d storage:6"));
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_with_group_down_and_no_handover) {
+ std::string config = dist_config_6_nodes_across_4_groups();
+ config += "distributor_auto_ownership_transfer_on_whole_group_down false\n";
+ set_distribution(config);
+
+ // Group is down, but config says to not do anything about it.
+ EXPECT_EQ(get_node_list({0, 1, 2, 3, 4, 5}, _bucket_spaces.size() - 1),
+ get_sent_nodes("distributor:6 storage:6",
+ "distributor:6 .2.s:d .3.s:d storage:6"));
+}
+
+
+namespace {
+
+void
+parse_input_data(const std::string& data,
+ uint64_t timestamp,
+ PendingClusterState& state,
+ bool include_bucket_info)
+{
+ vespalib::StringTokenizer tokenizer(data, "|");
+ for (uint32_t i = 0; i < tokenizer.size(); i++) {
+ vespalib::StringTokenizer tok2(tokenizer[i], ":");
+
+ uint16_t node = atoi(tok2[0].data());
+
+ state.setNodeReplied(node);
+ auto& pending_transition = state.getPendingBucketSpaceDbTransition(makeBucketSpace());
+
+ vespalib::StringTokenizer tok3(tok2[1], ",");
+ for (uint32_t j = 0; j < tok3.size(); j++) {
+ if (include_bucket_info) {
+ vespalib::StringTokenizer tok4(tok3[j], "/");
+
+ pending_transition.addNodeInfo(
+ document::BucketId(16, atoi(tok4[0].data())),
+ BucketCopy(
+ timestamp,
+ node,
+ api::BucketInfo(
+ atoi(tok4[1].data()),
+ atoi(tok4[2].data()),
+ atoi(tok4[3].data()),
+ atoi(tok4[2].data()),
+ atoi(tok4[3].data()))));
+ } else {
+ pending_transition.addNodeInfo(
+ document::BucketId(16, atoi(tok3[j].data())),
+ BucketCopy(timestamp,
+ node,
+ api::BucketInfo(3, 3, 3, 3, 3)));
+ }
+ }
+ }
+}
+
+struct BucketDumper : public BucketDatabase::EntryProcessor
+{
+ std::ostringstream ost;
+ bool _include_bucket_info;
+
+ explicit BucketDumper(bool include_bucket_info)
+ : _include_bucket_info(include_bucket_info)
+ {
+ }
+
+ bool process(const BucketDatabase::ConstEntryRef& e) override {
+ document::BucketId bucket_id(e.getBucketId());
+
+ ost << uint32_t(bucket_id.getRawId()) << ":";
+ for (uint32_t i = 0; i < e->getNodeCount(); ++i) {
+ if (i > 0) {
+ ost << ",";
+ }
+ const BucketCopy& copy(e->getNodeRef(i));
+ ost << copy.getNode();
+ if (_include_bucket_info) {
+ ost << "/" << copy.getChecksum()
+ << "/" << copy.getDocumentCount()
+ << "/" << copy.getTotalDocumentSize()
+ << "/" << (copy.trusted() ? "t" : "u");
+ }
+ }
+ ost << "|";
+ return true;
+ }
+};
+
+}
+
+std::string
+TopLevelBucketDBUpdaterTest::merge_bucket_lists(
+ const lib::ClusterState& old_state,
+ const std::string& existing_data,
+ const lib::ClusterState& new_state,
+ const std::string& new_data,
+ bool include_bucket_info)
+{
+ framework::defaultimplementation::FakeClock clock;
+ framework::MilliSecTimer timer(clock);
+
+ DistributorMessageSenderStub sender;
+ OutdatedNodesMap outdated_nodes_map;
+
+ {
+ auto cmd = std::make_shared<api::SetSystemStateCommand>(old_state);
+ api::Timestamp before_time(1);
+ auto cluster_info = create_cluster_info("cluster:d");
+
+ auto state = PendingClusterState::createForClusterStateChange(
+ clock, cluster_info, sender, top_level_bucket_space_repo(),
+ cmd, outdated_nodes_map, before_time);
+
+ parse_input_data(existing_data, before_time, *state, include_bucket_info);
+ auto guard = acquire_stripe_guard();
+ state->merge_into_bucket_databases(*guard);
+ }
+
+ BucketDumper dumper_tmp(true);
+ for (auto* s : distributor_stripes()) {
+ auto& db = s->getBucketSpaceRepo().get(document::FixedBucketSpaces::default_space()).getBucketDatabase();
+ db.forEach(dumper_tmp);
+ }
+
+ {
+ auto cmd = std::make_shared<api::SetSystemStateCommand>(lib::ClusterState(new_state));
+ api::Timestamp after_time(2);
+ auto cluster_info = create_cluster_info(old_state.toString());
+
+ auto state = PendingClusterState::createForClusterStateChange(
+ clock, cluster_info, sender, top_level_bucket_space_repo(),
+ cmd, outdated_nodes_map, after_time);
+
+ parse_input_data(new_data, after_time, *state, include_bucket_info);
+ auto guard = acquire_stripe_guard();
+ state->merge_into_bucket_databases(*guard);
+ }
+
+ BucketDumper dumper(include_bucket_info);
+ for (auto* s : distributor_stripes()) {
+ auto& db = s->getBucketSpaceRepo().get(document::FixedBucketSpaces::default_space()).getBucketDatabase();
+ db.forEach(dumper);
+ db.clear();
+ }
+ return dumper.ost.str();
+}
+
+std::string
+TopLevelBucketDBUpdaterTest::merge_bucket_lists(const std::string& existing_data,
+ const std::string& new_data,
+ bool include_bucket_info)
+{
+ return merge_bucket_lists(
+ lib::ClusterState("distributor:1 storage:3"),
+ existing_data,
+ lib::ClusterState("distributor:1 storage:3"),
+ new_data,
+ include_bucket_info);
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_merge) {
+ // Result is on the form: [bucket w/o count bits]:[node indexes]|..
+ // Input is on the form: [node]:[bucket w/o count bits]|...
+
+ // Simple initializing case - ask all nodes for info
+ EXPECT_EQ("4:0,1|2:0,1|6:1,2|1:0,2|5:2,0|3:2,1|",
+ merge_bucket_lists(
+ "",
+ "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6"));
+
+ // New node came up
+ EXPECT_EQ("4:0,1|2:0,1|6:1,2,3|1:0,2,3|5:2,0,3|3:2,1,3|",
+ merge_bucket_lists(
+ "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6",
+ "3:1,3,5,6"));
+
+ // Node came up with some buckets removed and some added
+ // Buckets that were removed should not be removed as the node
+ // didn't lose a disk.
+ EXPECT_EQ("8:0|4:0,1|2:0,1|6:1,0,2|1:0,2|5:2,0|3:2,1|",
+ merge_bucket_lists(
+ "0:1,2,4,5|1:2,3,4,6|2:1,3,5,6",
+ "0:1,2,6,8"));
+
+ // Bucket info format is "bucketid/checksum/count/size"
+ // Node went from initializing to up and invalid bucket went to empty.
+ EXPECT_EQ("2:0/0/0/0/t|",
+ merge_bucket_lists(
+ "0:2/0/0/1",
+ "0:2/0/0/0",
+ true));
+
+ EXPECT_EQ("5:1/2/3/4/u,0/0/0/0/u|",
+ merge_bucket_lists("", "0:5/0/0/0|1:5/2/3/4", true));
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_merge_replica_changed) {
+ // Node went from initializing to up and non-invalid bucket changed.
+ EXPECT_EQ("2:0/2/3/4/t|3:0/2/4/6/t|",
+ merge_bucket_lists(
+ lib::ClusterState("distributor:1 storage:1 .0.s:i"),
+ "0:2/1/2/3,3/2/4/6",
+ lib::ClusterState("distributor:1 storage:1"),
+ "0:2/2/3/4,3/2/4/6",
+ true));
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_current_state) {
+ document::BucketId bucket(16, 3);
+ lib::ClusterState state_before("distributor:1 storage:1");
+ {
+ uint32_t expected_msgs = _bucket_spaces.size(), dummy_buckets_to_return = 1;
+ ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state_before, expected_msgs, dummy_buckets_to_return));
+ }
+ _sender.clear();
+
+ stripe_of_bucket(bucket).bucket_db_updater().recheckBucketInfo(0, makeDocumentBucket(bucket));
+
+ ASSERT_EQ(size_t(1), _sender.commands().size());
+ auto rbi = std::dynamic_pointer_cast<RequestBucketInfoCommand>(_sender.command(0));
+
+ lib::ClusterState state_after("distributor:3 storage:3");
+
+ {
+ uint32_t expected_msgs = message_count(2), dummy_buckets_to_return = 1;
+ ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state_after, expected_msgs, dummy_buckets_to_return));
+ }
+ EXPECT_FALSE(distributor_bucket_space(bucket).get_bucket_ownership_flags(bucket).owned_in_current_state());
+
+ ASSERT_NO_FATAL_FAILURE(send_fake_reply_for_single_bucket_request(*rbi));
+
+ EXPECT_EQ("NONEXISTING", dump_bucket(bucket));
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, no_db_resurrection_for_bucket_not_owned_in_pending_state) {
+ document::BucketId bucket(16, 3);
+ lib::ClusterState state_before("distributor:1 storage:1");
+ {
+ uint32_t expected_msgs = _bucket_spaces.size(), dummy_buckets_to_return = 1;
+ ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state_before, expected_msgs, dummy_buckets_to_return));
+ }
+ _sender.clear();
+
+ stripe_of_bucket(bucket).bucket_db_updater().recheckBucketInfo(0, makeDocumentBucket(bucket));
+
+ ASSERT_EQ(size_t(1), _sender.commands().size());
+ auto rbi = std::dynamic_pointer_cast<RequestBucketInfoCommand>(_sender.command(0));
+
+ lib::ClusterState state_after("distributor:3 storage:3");
+ // Set, but _don't_ enable cluster state. We want it to be pending.
+ set_cluster_state(state_after);
+ EXPECT_TRUE(distributor_bucket_space(bucket).get_bucket_ownership_flags(bucket).owned_in_current_state());
+ EXPECT_FALSE(distributor_bucket_space(bucket).get_bucket_ownership_flags(bucket).owned_in_pending_state());
+
+ ASSERT_NO_FATAL_FAILURE(send_fake_reply_for_single_bucket_request(*rbi));
+
+ EXPECT_EQ("NONEXISTING", dump_bucket(bucket));
+}
+
+/*
+ * If we get a distribution config change, it's important that cluster states that
+ * arrive after this--but _before_ the pending cluster state has finished--must trigger
+ * a full bucket info fetch no matter what the cluster state change was! Otherwise, we
+ * will with a high likelihood end up not getting the complete view of the buckets in
+ * the cluster.
+ */
+TEST_F(TopLevelBucketDBUpdaterTest, cluster_state_always_sends_full_fetch_when_distribution_change_pending) {
+ lib::ClusterState state_before("distributor:6 storage:6");
+ {
+ uint32_t expected_msgs = message_count(6), dummy_buckets_to_return = 1;
+ ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state_before, expected_msgs, dummy_buckets_to_return));
+ }
+ _sender.clear();
+ std::string distConfig(dist_config_6_nodes_across_2_groups());
+ set_distribution(distConfig);
+
+ sort_sent_messages_by_index(_sender);
+ ASSERT_EQ(message_count(6), _sender.commands().size());
+ // Suddenly, a wild cluster state change appears! Even though this state
+ // does not in itself imply any bucket changes, it will still overwrite the
+ // pending cluster state and thus its state of pending bucket info requests.
+ set_cluster_state("distributor:6 .2.t:12345 storage:6");
+
+ ASSERT_EQ(message_count(12), _sender.commands().size());
+
+ // Send replies for first messageCount(6) (outdated requests).
+ int num_buckets = 10;
+ for (uint32_t i = 0; i < message_count(6); ++i) {
+ ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(lib::ClusterState("distributor:6 storage:6"),
+ *_sender.command(i), num_buckets));
+ }
+ // No change from these.
+ ASSERT_NO_FATAL_FAILURE(assert_correct_buckets(1, "distributor:6 storage:6"));
+
+ // Send for current pending.
+ for (uint32_t i = 0; i < message_count(6); ++i) {
+ ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(lib::ClusterState("distributor:6 .2.t:12345 storage:6"),
+ *_sender.command(i + message_count(6)),
+ num_buckets));
+ }
+ ASSERT_NO_FATAL_FAILURE(assert_correct_buckets(num_buckets, "distributor:6 storage:6"));
+ _sender.clear();
+
+ // No more pending global fetch; this should be a no-op state.
+ set_cluster_state("distributor:6 .3.t:12345 storage:6");
+ EXPECT_EQ(size_t(0), _sender.commands().size());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, changed_distribution_config_triggers_recovery_mode) {
+ uint32_t num_buckets = 20;
+ ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(lib::ClusterState("distributor:6 storage:6"),
+ message_count(6), num_buckets));
+ _sender.clear();
+ EXPECT_TRUE(all_distributor_stripes_are_in_recovery_mode());
+ complete_recovery_mode_on_all_stripes();
+ EXPECT_FALSE(all_distributor_stripes_are_in_recovery_mode());
+
+ set_distribution(dist_config_6_nodes_across_4_groups());
+ sort_sent_messages_by_index(_sender);
+ // No replies received yet, still no recovery mode.
+ EXPECT_FALSE(all_distributor_stripes_are_in_recovery_mode());
+
+ ASSERT_EQ(message_count(6), _sender.commands().size());
+ num_buckets = 10;
+ for (uint32_t i = 0; i < message_count(6); ++i) {
+ ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(lib::ClusterState("distributor:6 storage:6"),
+ *_sender.command(i), num_buckets));
+ }
+
+ // Pending cluster state (i.e. distribution) has been enabled, which should
+ // cause recovery mode to be entered.
+ EXPECT_TRUE(all_distributor_stripes_are_in_recovery_mode());
+ complete_recovery_mode_on_all_stripes();
+ EXPECT_FALSE(all_distributor_stripes_are_in_recovery_mode());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, changed_distribution_config_does_not_elide_bucket_db_pruning) {
+ set_distribution(dist_config_3_nodes_in_1_group());
+
+ constexpr uint32_t n_buckets = 100;
+ ASSERT_NO_FATAL_FAILURE(
+ set_and_enable_cluster_state(lib::ClusterState("distributor:6 storage:6"), message_count(6), n_buckets));
+ _sender.clear();
+
+ // Config implies a different node set than the current cluster state, so it's crucial that
+ // DB pruning is _not_ elided. Yes, this is inherently racing with cluster state changes and
+ // should be changed to be atomic and controlled by the cluster controller instead of config.
+ // But this is where we currently are.
+ set_distribution(dist_config_6_nodes_across_2_groups());
+ for (auto* s : distributor_stripes()) {
+ const auto& db = s->getBucketSpaceRepo().get(document::FixedBucketSpaces::default_space()).getBucketDatabase();
+ db.acquire_read_guard()->for_each([&]([[maybe_unused]] uint64_t key, const auto& e) {
+ auto id = e.getBucketId();
+ EXPECT_TRUE(distributor_bucket_space(id).get_bucket_ownership_flags(id).owned_in_pending_state());
+ });
+ }
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, newly_added_buckets_have_current_time_as_gc_timestamp) {
+ fake_clock().setAbsoluteTimeInSeconds(101234);
+ lib::ClusterState state_before("distributor:1 storage:1");
+ {
+ uint32_t expected_msgs = _bucket_spaces.size(), dummy_buckets_to_return = 1;
+ ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state_before, expected_msgs, dummy_buckets_to_return));
+ }
+ // setAndEnableClusterState adds n buckets with id (16, i)
+ document::BucketId bucket(16, 0);
+ BucketDatabase::Entry e = get_bucket(bucket);
+ ASSERT_TRUE(e.valid());
+ EXPECT_EQ(uint32_t(101234), e->getLastGarbageCollectionTime());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, newer_mutations_not_overwritten_by_earlier_bucket_fetch) {
+ {
+ lib::ClusterState state_before("distributor:1 storage:1 .0.s:i");
+ uint32_t expected_msgs = _bucket_spaces.size(), dummy_buckets_to_return = 0;
+ // This step is required to make the distributor ready for accepting
+ // the below explicit database insertion towards node 0.
+ ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state_before, expected_msgs, dummy_buckets_to_return));
+ }
+ _sender.clear();
+ fake_clock().setAbsoluteTimeInSeconds(1000);
+ lib::ClusterState state("distributor:1 storage:1");
+ set_cluster_state(state);
+ ASSERT_EQ(_bucket_spaces.size(), _sender.commands().size());
+
+ // Before replying with the bucket info, simulate the arrival of a mutation
+ // reply that alters the state of the bucket with information that will be
+ // more recent that what is returned by the bucket info. This information
+ // must not be lost when the bucket info is later merged into the database.
+ document::BucketId bucket(16, 1);
+ constexpr uint64_t insertion_timestamp = 1001ULL * 1000000;
+ api::BucketInfo wanted_info(5, 6, 7);
+ stripe_of_bucket(bucket).bucket_db_updater().operation_context().update_bucket_database(
+ makeDocumentBucket(bucket),
+ BucketCopy(insertion_timestamp, 0, wanted_info),
+ DatabaseUpdate::CREATE_IF_NONEXISTING);
+
+ fake_clock().setAbsoluteTimeInSeconds(1002);
+ constexpr uint32_t buckets_returned = 10; // Buckets (16, 0) ... (16, 9)
+ // Return bucket information which on the timeline might originate from
+ // anywhere between [1000, 1002]. Our assumption is that any mutations
+ // taking place after t=1000 must have its reply received and processed
+ // by this distributor and timestamped strictly higher than t=1000 (modulo
+ // clock skew, of course, but that is outside the scope of this). A mutation
+ // happening before t=1000 but receiving a reply at t>1000 does not affect
+ // correctness, as this should contain the same bucket info as that
+ // contained in the full bucket reply and the DB update is thus idempotent.
+ for (uint32_t i = 0; i < _bucket_spaces.size(); ++i) {
+ ASSERT_NO_FATAL_FAILURE(fake_bucket_reply(state, *_sender.command(i), buckets_returned));
+ }
+
+ BucketDatabase::Entry e = get_bucket(bucket);
+ ASSERT_EQ(uint32_t(1), e->getNodeCount());
+ EXPECT_EQ(wanted_info, e->getNodeRef(0).getBucketInfo());
+}
+
+std::vector<uint16_t>
+TopLevelBucketDBUpdaterTest::get_send_set() const
+{
+ std::vector<uint16_t> nodes;
+ std::transform(_sender.commands().begin(),
+ _sender.commands().end(),
+ std::back_inserter(nodes),
+ [](auto& cmd)
+ {
+ auto& req(dynamic_cast<const api::RequestBucketInfoCommand&>(*cmd));
+ return req.getAddress()->getIndex();
+ });
+ return nodes;
+}
+
+std::vector<uint16_t>
+TopLevelBucketDBUpdaterTest::get_sent_nodes_with_preemption(
+ const std::string& old_cluster_state,
+ uint32_t expected_old_state_messages,
+ const std::string& preempted_cluster_state,
+ const std::string& new_cluster_state)
+{
+ uint32_t dummy_buckets_to_return = 10;
+ // FIXME cannot chain assertion checks in non-void function
+ set_and_enable_cluster_state(lib::ClusterState(old_cluster_state),
+ expected_old_state_messages,
+ dummy_buckets_to_return);
+
+ _sender.clear();
+
+ set_cluster_state(preempted_cluster_state);
+ _sender.clear();
+ // Do not allow the pending state to become the active state; trigger a
+ // new transition without ACKing the info requests first. This will
+ // overwrite the pending state entirely.
+ set_cluster_state(lib::ClusterState(new_cluster_state));
+ return get_send_set();
+}
+
+std::vector<uint16_t>
+TopLevelBucketDBUpdaterTest::expand_node_vec(const std::vector<uint16_t>& nodes)
+{
+ std::vector<uint16_t> res;
+ size_t count = _bucket_spaces.size();
+ for (const auto &node : nodes) {
+ for (uint32_t i = 0; i < count; ++i) {
+ res.push_back(node);
+ }
+ }
+ return res;
+}
+
+/*
+ * If we don't carry over the set of nodes that we need to fetch from,
+ * a naive comparison between the active state and the new state will
+ * make it appear to the distributor that nothing has changed, as any
+ * database modifications caused by intermediate states will not be
+ * accounted for (basically the ABA problem in a distributed setting).
+ */
+TEST_F(TopLevelBucketDBUpdaterTest, preempted_distributor_change_carries_node_set_over_to_next_state_fetch) {
+ EXPECT_EQ(expand_node_vec({0, 1, 2, 3, 4, 5}),
+ get_sent_nodes_with_preemption("version:1 distributor:6 storage:6",
+ message_count(6),
+ "version:2 distributor:6 .5.s:d storage:6",
+ "version:3 distributor:6 storage:6"));
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, preempted_storage_change_carries_node_set_over_to_next_state_fetch) {
+ EXPECT_EQ(expand_node_vec({2, 3}),
+ get_sent_nodes_with_preemption(
+ "version:1 distributor:6 storage:6 .2.s:d",
+ message_count(5),
+ "version:2 distributor:6 storage:6 .2.s:d .3.s:d",
+ "version:3 distributor:6 storage:6"));
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, preempted_storage_node_down_must_be_re_fetched) {
+ EXPECT_EQ(expand_node_vec({2}),
+ get_sent_nodes_with_preemption(
+ "version:1 distributor:6 storage:6",
+ message_count(6),
+ "version:2 distributor:6 storage:6 .2.s:d",
+ "version:3 distributor:6 storage:6"));
+}
+
+using NodeVec = std::vector<uint16_t>;
+
+TEST_F(TopLevelBucketDBUpdaterTest, do_not_send_to_preempted_node_now_in_down_state) {
+ EXPECT_EQ(NodeVec{},
+ get_sent_nodes_with_preemption(
+ "version:1 distributor:6 storage:6 .2.s:d",
+ message_count(5),
+ "version:2 distributor:6 storage:6", // Sends to 2.
+ "version:3 distributor:6 storage:6 .2.s:d")); // 2 down again.
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, do_not_send_to_preempted_node_not_part_of_new_state) {
+ // Even though 100 nodes are preempted, not all of these should be part
+ // of the request afterwards when only 6 are part of the state.
+ EXPECT_EQ(expand_node_vec({0, 1, 2, 3, 4, 5}),
+ get_sent_nodes_with_preemption(
+ "version:1 distributor:6 storage:100",
+ message_count(100),
+ "version:2 distributor:5 .4.s:d storage:100",
+ "version:3 distributor:6 storage:6"));
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, outdated_node_set_cleared_after_successful_state_completion) {
+ lib::ClusterState state_before("version:1 distributor:6 storage:6 .1.t:1234");
+ uint32_t expected_msgs = message_count(6), dummy_buckets_to_return = 10;
+ ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state_before, expected_msgs, dummy_buckets_to_return));
+ _sender.clear();
+ // New cluster state that should not by itself trigger any new fetches,
+ // unless outdated node set is somehow not cleared after an enabled
+ // (completed) cluster state has been set.
+ set_cluster_state("version:3 distributor:6 storage:6");
+ EXPECT_EQ(size_t(0), _sender.commands().size());
+}
+
+// XXX test currently disabled since distribution config currently isn't used
+// at all in order to deduce the set of nodes to send to. This might not matter
+// in practice since it is assumed that the cluster state matching the new
+// distribution config will follow very shortly after the config has been
+// applied to the node. The new cluster state will then send out requests to
+// the correct node set.
+TEST_F(TopLevelBucketDBUpdaterTest, DISABLED_cluster_config_downsize_only_sends_to_available_nodes) {
+ uint32_t expected_msgs = 6, dummy_buckets_to_return = 20;
+ ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(lib::ClusterState("distributor:6 storage:6"),
+ expected_msgs, dummy_buckets_to_return));
+ _sender.clear();
+
+ // Intentionally trigger a racing config change which arrives before the
+ // new cluster state representing it.
+ set_distribution(dist_config_3_nodes_in_1_group());
+ sort_sent_messages_by_index(_sender);
+
+ EXPECT_EQ((NodeVec{0, 1, 2}), get_send_set());
+}
+
+/**
+ * Test scenario where a cluster is downsized by removing a subset of the nodes
+ * from the distribution configuration. The system must be able to deal with
+ * a scenario where the set of nodes between two cluster states across a config
+ * change may differ.
+ *
+ * See VESPA-790 for details.
+ */
+TEST_F(TopLevelBucketDBUpdaterTest, node_missing_from_config_is_treated_as_needing_ownership_transfer) {
+ uint32_t expected_msgs = message_count(3), dummy_buckets_to_return = 1;
+ ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(lib::ClusterState("distributor:3 storage:3"),
+ expected_msgs, dummy_buckets_to_return));
+ _sender.clear();
+
+ // Cluster goes from {0, 1, 2} -> {0, 1}. This leaves us with a config
+ // that does not contain node 2 while the _active_ cluster state still
+ // contains this node.
+ const char* downsize_cfg =
+ "redundancy 2\n"
+ "distributor_auto_ownership_transfer_on_whole_group_down true\n"
+ "group[2]\n"
+ "group[0].name \"invalid\"\n"
+ "group[0].index \"invalid\"\n"
+ "group[0].partitions 1|*\n"
+ "group[0].nodes[0]\n"
+ "group[1].name rack0\n"
+ "group[1].index 0\n"
+ "group[1].nodes[2]\n"
+ "group[1].nodes[0].index 0\n"
+ "group[1].nodes[1].index 1\n";
+
+ set_distribution(downsize_cfg);
+ sort_sent_messages_by_index(_sender);
+ _sender.clear();
+
+ // Attempt to apply state with {0, 1} set. This will compare the new state
+ // with the previous state, which still has node 2.
+ expected_msgs = message_count(2);
+ ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(lib::ClusterState("distributor:2 storage:2"),
+ expected_msgs, dummy_buckets_to_return));
+
+ EXPECT_EQ(expand_node_vec({0, 1}), get_send_set());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, changed_distributor_set_implies_ownership_transfer) {
+ auto fixture = create_pending_state_fixture_for_state_change(
+ "distributor:2 storage:2", "distributor:1 storage:2");
+ EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer());
+
+ fixture = create_pending_state_fixture_for_state_change(
+ "distributor:2 storage:2", "distributor:2 .1.s:d storage:2");
+ EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, unchanged_distributor_set_implies_no_ownership_transfer) {
+ auto fixture = create_pending_state_fixture_for_state_change(
+ "distributor:2 storage:2", "distributor:2 storage:1");
+ EXPECT_FALSE(fixture->state->hasBucketOwnershipTransfer());
+
+ fixture = create_pending_state_fixture_for_state_change(
+ "distributor:2 storage:2", "distributor:2 storage:2 .1.s:d");
+ EXPECT_FALSE(fixture->state->hasBucketOwnershipTransfer());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, changed_distribution_config_implies_ownership_transfer) {
+ auto fixture = create_pending_state_fixture_for_distribution_change("distributor:2 storage:2");
+ EXPECT_TRUE(fixture->state->hasBucketOwnershipTransfer());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, transition_time_tracked_for_single_state_change) {
+ ASSERT_NO_FATAL_FAILURE(complete_state_transition_in_seconds("distributor:2 storage:2", 5, message_count(2)));
+
+ EXPECT_EQ(uint64_t(5000), last_transition_time_in_millis());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, transition_time_reset_across_non_preempting_state_changes) {
+ ASSERT_NO_FATAL_FAILURE(complete_state_transition_in_seconds("distributor:2 storage:2", 5, message_count(2)));
+ ASSERT_NO_FATAL_FAILURE(complete_state_transition_in_seconds("distributor:2 storage:3", 3, message_count(1)));
+
+ EXPECT_EQ(uint64_t(3000), last_transition_time_in_millis());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, transition_time_tracked_for_distribution_config_change) {
+ lib::ClusterState state("distributor:2 storage:2");
+ ASSERT_NO_FATAL_FAILURE(set_and_enable_cluster_state(state, message_count(2), 1));
+
+ _sender.clear();
+ set_distribution(dist_config_3_nodes_in_1_group());
+ fake_clock().addSecondsToTime(4);
+ ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(state, message_count(2)));
+ EXPECT_EQ(uint64_t(4000), last_transition_time_in_millis());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, transition_time_tracked_across_preempted_transitions) {
+ _sender.clear();
+ set_cluster_state("version:1 distributor:2 storage:2");
+ fake_clock().addSecondsToTime(5);
+ // Pre-empted with new state here, which will push out the old pending
+ // state and replace it with a new one. We should still count the time
+ // used processing the old state.
+ ASSERT_NO_FATAL_FAILURE(complete_state_transition_in_seconds("version:2 distributor:2 storage:3", 3, message_count(3)));
+
+ EXPECT_EQ(uint64_t(8000), last_transition_time_in_millis());
+}
+
+/*
+ * Brief reminder on test DSL for checking bucket merge operations:
+ *
+ * merge_bucket_lists() takes as input strings of the format
+ * <node>:<raw bucket id>/<checksum>/<num docs>/<doc size>|<node>:
+ * and returns a string describing the bucket DB post-merge with the format
+ * <raw bucket id>:<node>/<checksum>/<num docs>/<doc size>,<node>:....|<raw bucket id>:....
+ *
+ * Yes, the order of node<->bucket id is reversed between the two, perhaps to make sure you're awake.
+ */
+
+TEST_F(TopLevelBucketDBUpdaterTest, batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted) {
+ // Replacing bucket information for content node 0 should not mark existing
+ // untrusted replica as trusted as a side effect.
+ EXPECT_EQ("5:1/7/8/9/u,0/1/2/3/u|",
+ merge_bucket_lists(
+ lib::ClusterState("distributor:1 storage:3 .0.s:i"),
+ "0:5/0/0/0|1:5/7/8/9",
+ lib::ClusterState("distributor:1 storage:3 .0.s:u"),
+ "0:5/1/2/3|1:5/7/8/9", true));
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, batch_add_of_new_diverging_replicas_does_not_mark_any_as_trusted) {
+ EXPECT_EQ("5:1/7/8/9/u,0/1/2/3/u|",
+ merge_bucket_lists("", "0:5/1/2/3|1:5/7/8/9", true));
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, batch_add_with_single_resulting_replica_implicitly_marks_as_trusted) {
+ EXPECT_EQ("5:0/1/2/3/t|",
+ merge_bucket_lists("", "0:5/1/2/3", true));
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, identity_update_of_single_replica_does_not_clear_trusted) {
+ EXPECT_EQ("5:0/1/2/3/t|",
+ merge_bucket_lists("0:5/1/2/3", "0:5/1/2/3", true));
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, identity_update_of_diverging_untrusted_replicas_does_not_mark_any_as_trusted) {
+ EXPECT_EQ("5:1/7/8/9/u,0/1/2/3/u|",
+ merge_bucket_lists("0:5/1/2/3|1:5/7/8/9", "0:5/1/2/3|1:5/7/8/9", true));
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, adding_diverging_replica_to_existing_trusted_does_not_remove_trusted) {
+ EXPECT_EQ("5:1/2/3/4/u,0/1/2/3/t|",
+ merge_bucket_lists("0:5/1/2/3", "0:5/1/2/3|1:5/2/3/4", true));
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, batch_update_from_distributor_change_does_not_mark_diverging_replicas_as_trusted) {
+ // This differs from batch_update_of_existing_diverging_replicas_does_not_mark_any_as_trusted
+ // in that _all_ content nodes are considered outdated when distributor changes take place,
+ // and therefore a slightly different code path is taken. In particular, bucket info for
+ // outdated nodes gets removed before possibly being re-added (if present in the bucket info
+ // response).
+ EXPECT_EQ("5:1/7/8/9/u,0/1/2/3/u|",
+ merge_bucket_lists(
+ lib::ClusterState("distributor:2 storage:3"),
+ "0:5/1/2/3|1:5/7/8/9",
+ lib::ClusterState("distributor:1 storage:3"),
+ "0:5/1/2/3|1:5/7/8/9", true));
+}
+
+// TODO remove on Vespa 8 - this is a workaround for https://github.com/vespa-engine/vespa/issues/8475
+TEST_F(TopLevelBucketDBUpdaterTest, global_distribution_hash_falls_back_to_legacy_format_upon_request_rejection) {
+ set_distribution(dist_config_6_nodes_across_2_groups());
+
+ const vespalib::string current_hash = "(0d*|*(0;0;1;2)(1;3;4;5))";
+ const vespalib::string legacy_hash = "(0d3|3|*(0;0;1;2)(1;3;4;5))";
+
+ set_cluster_state("distributor:6 storage:6");
+ ASSERT_EQ(message_count(6), _sender.commands().size());
+
+ api::RequestBucketInfoCommand* global_req = nullptr;
+ for (auto& cmd : _sender.commands()) {
+ auto& req_cmd = dynamic_cast<api::RequestBucketInfoCommand&>(*cmd);
+ if (req_cmd.getBucketSpace() == document::FixedBucketSpaces::global_space()) {
+ global_req = &req_cmd;
+ break;
+ }
+ }
+ ASSERT_TRUE(global_req != nullptr);
+ ASSERT_EQ(current_hash, global_req->getDistributionHash());
+
+ auto reply = std::make_shared<api::RequestBucketInfoReply>(*global_req);
+ reply->setResult(api::ReturnCode::REJECTED);
+ bucket_db_updater().onRequestBucketInfoReply(reply);
+
+ fake_clock().addSecondsToTime(10);
+ bucket_db_updater().resend_delayed_messages();
+
+ // Should now be a resent request with legacy distribution hash
+ ASSERT_EQ(message_count(6) + 1, _sender.commands().size());
+ auto& legacy_req = dynamic_cast<api::RequestBucketInfoCommand&>(*_sender.commands().back());
+ ASSERT_EQ(legacy_hash, legacy_req.getDistributionHash());
+
+ // Now if we reject it _again_ we should cycle back to the current hash
+ // in case it wasn't a hash-based rejection after all. And the circle of life continues.
+ reply = std::make_shared<api::RequestBucketInfoReply>(legacy_req);
+ reply->setResult(api::ReturnCode::REJECTED);
+ bucket_db_updater().onRequestBucketInfoReply(reply);
+
+ fake_clock().addSecondsToTime(10);
+ bucket_db_updater().resend_delayed_messages();
+
+ ASSERT_EQ(message_count(6) + 2, _sender.commands().size());
+ auto& new_current_req = dynamic_cast<api::RequestBucketInfoCommand&>(*_sender.commands().back());
+ ASSERT_EQ(current_hash, new_current_req.getDistributionHash());
+}
+
+namespace {
+
+template <typename Func>
+void for_each_bucket(const BucketDatabase& db, const document::BucketSpace& space, Func&& f) {
+ BucketId last(0);
+ auto e = db.getNext(last);
+ while (e.valid()) {
+ f(space, e);
+ e = db.getNext(e.getBucketId());
+ }
+}
+
+template <typename Func>
+void for_each_bucket(const DistributorBucketSpaceRepo& repo, Func&& f) {
+ for (const auto& space : repo) {
+ for_each_bucket(space.second->getBucketDatabase(), space.first, f);
+ }
+}
+
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, non_owned_buckets_moved_to_read_only_db_on_ownership_change) {
+ set_stale_reads_enabled(true);
+
+ lib::ClusterState initial_state("distributor:1 storage:4"); // All buckets owned by us by definition
+ set_cluster_state_bundle(lib::ClusterStateBundle(initial_state, {}, false)); // Skip activation step for simplicity
+
+ ASSERT_EQ(message_count(4), _sender.commands().size());
+ constexpr uint32_t n_buckets = 10;
+ ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(initial_state, message_count(4), n_buckets));
+ _sender.clear();
+
+ EXPECT_EQ(n_buckets, mutable_default_dbs_size());
+ EXPECT_EQ(n_buckets, mutable_global_dbs_size());
+ EXPECT_EQ(0u, read_only_default_dbs_size());
+ EXPECT_EQ(0u, read_only_global_dbs_size());
+
+ lib::ClusterState pending_state("distributor:2 storage:4");
+
+ std::unordered_set<Bucket, Bucket::hash> buckets_not_owned_in_pending_state;
+ for (auto* s : distributor_stripes()) {
+ for_each_bucket(mutable_repo(*s), [&](const auto& space, const auto& entry) {
+ if (!distributor_bucket_space(entry.getBucketId()).owns_bucket_in_state(pending_state, entry.getBucketId())) {
+ buckets_not_owned_in_pending_state.insert(Bucket(space, entry.getBucketId()));
+ }
+ });
+ }
+ EXPECT_FALSE(buckets_not_owned_in_pending_state.empty());
+
+ set_cluster_state_bundle(lib::ClusterStateBundle(pending_state, {}, true)); // Now requires activation
+
+ const auto buckets_not_owned_per_space = (buckets_not_owned_in_pending_state.size() / 2); // 2 spaces
+ const auto expected_mutable_buckets = n_buckets - buckets_not_owned_per_space;
+ EXPECT_EQ(expected_mutable_buckets, mutable_default_dbs_size());
+ EXPECT_EQ(expected_mutable_buckets, mutable_global_dbs_size());
+ EXPECT_EQ(buckets_not_owned_per_space, read_only_default_dbs_size());
+ EXPECT_EQ(buckets_not_owned_per_space, read_only_global_dbs_size());
+
+ for (auto* s : distributor_stripes()) {
+ for_each_bucket(read_only_repo(*s), [&](const auto& space, const auto& entry) {
+ EXPECT_TRUE(buckets_not_owned_in_pending_state.find(Bucket(space, entry.getBucketId()))
+ != buckets_not_owned_in_pending_state.end());
+ });
+ }
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, buckets_no_longer_available_are_not_moved_to_read_only_database) {
+ constexpr uint32_t n_buckets = 10;
+ // No ownership change, just node down. Test redundancy is 2, so removing 2 nodes will
+ // cause some buckets to be entirely unavailable.
+ trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4,
+ "version:2 distributor:1 storage:4 .0.s:d .1.s:m", n_buckets, 0);
+
+ EXPECT_EQ(0u, read_only_default_dbs_size());
+ EXPECT_EQ(0u, read_only_global_dbs_size());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, non_owned_buckets_purged_when_read_only_support_is_config_disabled) {
+ set_stale_reads_enabled(false);
+
+ lib::ClusterState initial_state("distributor:1 storage:4"); // All buckets owned by us by definition
+ set_cluster_state_bundle(lib::ClusterStateBundle(initial_state, {}, false)); // Skip activation step for simplicity
+
+ ASSERT_EQ(message_count(4), _sender.commands().size());
+ constexpr uint32_t n_buckets = 10;
+ ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(initial_state, message_count(4), n_buckets));
+ _sender.clear();
+
+ // Nothing in read-only DB after first bulk load of buckets.
+ EXPECT_EQ(0u, read_only_default_dbs_size());
+ EXPECT_EQ(0u, read_only_global_dbs_size());
+
+ set_cluster_state("distributor:2 storage:4");
+ // No buckets should be moved into read only db after ownership changes.
+ EXPECT_EQ(0u, read_only_default_dbs_size());
+ EXPECT_EQ(0u, read_only_global_dbs_size());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, deferred_activated_state_does_not_enable_state_until_activation_received) {
+ set_stale_reads_enabled(true);
+ constexpr uint32_t n_buckets = 10;
+ ASSERT_NO_FATAL_FAILURE(
+ trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4,
+ "version:2 distributor:1 storage:4", n_buckets, 4));
+
+ // Version should not be switched over yet
+ EXPECT_EQ(1u, current_cluster_state_bundle().getVersion());
+
+ EXPECT_EQ(0u, mutable_default_dbs_size());
+ EXPECT_EQ(0u, mutable_global_dbs_size());
+
+ EXPECT_FALSE(activate_cluster_state_version(2));
+
+ EXPECT_EQ(2u, current_cluster_state_bundle().getVersion());
+ EXPECT_EQ(n_buckets, mutable_default_dbs_size());
+ EXPECT_EQ(n_buckets, mutable_global_dbs_size());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, read_only_db_cleared_once_pending_state_is_activated) {
+ set_stale_reads_enabled(true);
+ constexpr uint32_t n_buckets = 10;
+ ASSERT_NO_FATAL_FAILURE(
+ trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4,
+ "version:2 distributor:2 storage:4", n_buckets, 0));
+ EXPECT_FALSE(activate_cluster_state_version(2));
+
+ EXPECT_EQ(0u, read_only_default_dbs_size());
+ EXPECT_EQ(0u, read_only_global_dbs_size());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, read_only_db_is_populated_even_when_self_is_marked_down) {
+ set_stale_reads_enabled(true);
+ constexpr uint32_t n_buckets = 10;
+ ASSERT_NO_FATAL_FAILURE(
+ trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4,
+ "version:2 distributor:1 .0.s:d storage:4", n_buckets, 0));
+
+ // State not yet activated, so read-only DBs have got all the buckets we used to have.
+ EXPECT_EQ(0u, mutable_default_dbs_size());
+ EXPECT_EQ(0u, mutable_global_dbs_size());
+ EXPECT_EQ(n_buckets, read_only_default_dbs_size());
+ EXPECT_EQ(n_buckets, read_only_global_dbs_size());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, activate_cluster_state_request_with_mismatching_version_returns_actual_version) {
+ set_stale_reads_enabled(true);
+ constexpr uint32_t n_buckets = 10;
+ ASSERT_NO_FATAL_FAILURE(
+ trigger_completed_but_not_yet_activated_transition("version:4 distributor:1 storage:4", n_buckets, 4,
+ "version:5 distributor:2 storage:4", n_buckets, 0));
+
+ EXPECT_TRUE(activate_cluster_state_version(4)); // Too old version
+ ASSERT_NO_FATAL_FAILURE(assert_has_activate_cluster_state_reply_with_actual_version(5));
+
+ EXPECT_TRUE(activate_cluster_state_version(6)); // More recent version than what has been observed
+ ASSERT_NO_FATAL_FAILURE(assert_has_activate_cluster_state_reply_with_actual_version(5));
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, activate_cluster_state_request_without_pending_transition_passes_message_through) {
+ set_stale_reads_enabled(true);
+ constexpr uint32_t n_buckets = 10;
+ ASSERT_NO_FATAL_FAILURE(
+ trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4,
+ "version:2 distributor:1 storage:4", n_buckets, 4));
+ // Activate version 2; no pending cluster state after this.
+ EXPECT_FALSE(activate_cluster_state_version(2));
+
+ // No pending cluster state for version 3, just passed through to be implicitly bounced by state manager.
+ // Note: state manager is not modelled in this test, so we just check that the message handler returns
+ // false (meaning "didn't take message ownership") and there's no auto-generated reply.
+ EXPECT_FALSE(activate_cluster_state_version(3));
+ EXPECT_EQ(size_t(0), _sender.replies().size());
+}
+
+TEST_F(TopLevelBucketDBUpdaterTest, pending_cluster_state_getter_is_non_null_only_when_state_is_pending) {
+ auto initial_baseline = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d");
+ auto initial_default = std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:m");
+
+ lib::ClusterStateBundle initial_bundle(*initial_baseline, {{FixedBucketSpaces::default_space(), initial_default},
+ {FixedBucketSpaces::global_space(), initial_baseline}});
+ set_cluster_state_bundle(initial_bundle);
+
+ for (auto* s : distributor_stripes()) {
+ auto* state = s->bucket_db_updater().pendingClusterStateOrNull(FixedBucketSpaces::default_space());
+ ASSERT_TRUE(state != nullptr);
+ EXPECT_EQ(*initial_default, *state);
+
+ state = s->bucket_db_updater().pendingClusterStateOrNull(FixedBucketSpaces::global_space());
+ ASSERT_TRUE(state != nullptr);
+ EXPECT_EQ(*initial_baseline, *state);
+ }
+
+ ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(*initial_baseline, message_count(1), 0));
+
+ for (auto* s : distributor_stripes()) {
+ auto* state = s->bucket_db_updater().pendingClusterStateOrNull(FixedBucketSpaces::default_space());
+ EXPECT_TRUE(state == nullptr);
+
+ state = s->bucket_db_updater().pendingClusterStateOrNull(FixedBucketSpaces::global_space());
+ EXPECT_TRUE(state == nullptr);
+ }
+}
+
+struct BucketDBUpdaterSnapshotTest : TopLevelBucketDBUpdaterTest {
+ lib::ClusterState empty_state;
+ std::shared_ptr<lib::ClusterState> initial_baseline;
+ std::shared_ptr<lib::ClusterState> initial_default;
+ lib::ClusterStateBundle initial_bundle;
+ Bucket default_bucket;
+ Bucket global_bucket;
+
+ BucketDBUpdaterSnapshotTest()
+ : TopLevelBucketDBUpdaterTest(),
+ empty_state(),
+ initial_baseline(std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:d")),
+ initial_default(std::make_shared<lib::ClusterState>("distributor:1 storage:2 .0.s:m")),
+ initial_bundle(*initial_baseline, {{FixedBucketSpaces::default_space(), initial_default},
+ {FixedBucketSpaces::global_space(), initial_baseline}}),
+ default_bucket(FixedBucketSpaces::default_space(), BucketId(16, 1234)),
+ global_bucket(FixedBucketSpaces::global_space(), BucketId(16, 1234))
+ {
+ }
+ ~BucketDBUpdaterSnapshotTest() override;
+
+ void SetUp() override {
+ TopLevelBucketDBUpdaterTest::SetUp();
+ set_stale_reads_enabled(true);
+ };
+
+ // Assumes that the distributor owns all buckets, so it may choose any arbitrary bucket in the bucket space
+ uint32_t buckets_in_snapshot_matching_current_db(bool check_mutable_repo, BucketSpace bucket_space) {
+ uint32_t found_buckets = 0;
+ for (auto* s : distributor_stripes()) {
+ auto rs = s->bucket_db_updater().read_snapshot_for_bucket(Bucket(bucket_space, BucketId(16, 1234)));
+ if (!rs.is_routable()) {
+ return 0;
+ }
+ auto guard = rs.steal_read_guard();
+ auto& repo = check_mutable_repo ? mutable_repo(*s) : read_only_repo(*s);
+ for_each_bucket(repo, [&](const auto& space, const auto& entry) {
+ if (space == bucket_space) {
+ auto entries = guard->find_parents_and_self(entry.getBucketId());
+ if (entries.size() == 1) {
+ ++found_buckets;
+ }
+ }
+ });
+ }
+ return found_buckets;
+ }
+};
+
+BucketDBUpdaterSnapshotTest::~BucketDBUpdaterSnapshotTest() = default;
+
+TEST_F(BucketDBUpdaterSnapshotTest, default_space_snapshot_prior_to_activated_state_is_non_routable) {
+ auto rs = stripe_of_bucket(default_bucket).bucket_db_updater().read_snapshot_for_bucket(default_bucket);
+ EXPECT_FALSE(rs.is_routable());
+}
+
+TEST_F(BucketDBUpdaterSnapshotTest, global_space_snapshot_prior_to_activated_state_is_non_routable) {
+ auto rs = stripe_of_bucket(global_bucket).bucket_db_updater().read_snapshot_for_bucket(global_bucket);
+ EXPECT_FALSE(rs.is_routable());
+}
+
+TEST_F(BucketDBUpdaterSnapshotTest, read_snapshot_returns_appropriate_cluster_states) {
+ set_cluster_state_bundle(initial_bundle);
+ // State currently pending, empty initial state is active
+
+ auto def_rs = stripe_of_bucket(default_bucket).bucket_db_updater().read_snapshot_for_bucket(default_bucket);
+ EXPECT_EQ(def_rs.context().active_cluster_state()->toString(), empty_state.toString());
+ EXPECT_EQ(def_rs.context().default_active_cluster_state()->toString(), empty_state.toString());
+ ASSERT_TRUE(def_rs.context().has_pending_state_transition());
+ EXPECT_EQ(def_rs.context().pending_cluster_state()->toString(), initial_default->toString());
+
+ auto global_rs = stripe_of_bucket(global_bucket).bucket_db_updater().read_snapshot_for_bucket(global_bucket);
+ EXPECT_EQ(global_rs.context().active_cluster_state()->toString(), empty_state.toString());
+ EXPECT_EQ(global_rs.context().default_active_cluster_state()->toString(), empty_state.toString());
+ ASSERT_TRUE(global_rs.context().has_pending_state_transition());
+ EXPECT_EQ(global_rs.context().pending_cluster_state()->toString(), initial_baseline->toString());
+
+ ASSERT_NO_FATAL_FAILURE(complete_bucket_info_gathering(*initial_baseline, message_count(1), 0));
+ // State now activated, no pending
+
+ def_rs = stripe_of_bucket(default_bucket).bucket_db_updater().read_snapshot_for_bucket(default_bucket);
+ EXPECT_EQ(def_rs.context().active_cluster_state()->toString(), initial_default->toString());
+ EXPECT_EQ(def_rs.context().default_active_cluster_state()->toString(), initial_default->toString());
+ EXPECT_FALSE(def_rs.context().has_pending_state_transition());
+
+ global_rs = stripe_of_bucket(global_bucket).bucket_db_updater().read_snapshot_for_bucket(global_bucket);
+ EXPECT_EQ(global_rs.context().active_cluster_state()->toString(), initial_baseline->toString());
+ EXPECT_EQ(global_rs.context().default_active_cluster_state()->toString(), initial_default->toString());
+ EXPECT_FALSE(global_rs.context().has_pending_state_transition());
+}
+
+TEST_F(BucketDBUpdaterSnapshotTest, snapshot_with_no_pending_state_transition_returns_mutable_db_guard) {
+ constexpr uint32_t n_buckets = 10;
+ ASSERT_NO_FATAL_FAILURE(
+ trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4,
+ "version:2 distributor:1 storage:4", n_buckets, 4));
+ EXPECT_FALSE(activate_cluster_state_version(2));
+ EXPECT_EQ(buckets_in_snapshot_matching_current_db(true, FixedBucketSpaces::default_space()), n_buckets);
+ EXPECT_EQ(buckets_in_snapshot_matching_current_db(true, FixedBucketSpaces::global_space()), n_buckets);
+}
+
+TEST_F(BucketDBUpdaterSnapshotTest, snapshot_returns_unroutable_for_non_owned_bucket_in_current_state) {
+ ASSERT_NO_FATAL_FAILURE(
+ trigger_completed_but_not_yet_activated_transition("version:1 distributor:2 storage:4", 0, 4,
+ "version:2 distributor:2 .0.s:d storage:4", 0, 0));
+ EXPECT_FALSE(activate_cluster_state_version(2));
+ // We're down in state 2 and therefore do not own any buckets
+ auto def_rs = stripe_of_bucket(default_bucket).bucket_db_updater().read_snapshot_for_bucket(default_bucket);
+ EXPECT_FALSE(def_rs.is_routable());
+}
+
+TEST_F(BucketDBUpdaterSnapshotTest, snapshot_with_pending_state_returns_read_only_guard_for_bucket_only_owned_in_current_state) {
+ constexpr uint32_t n_buckets = 10;
+ ASSERT_NO_FATAL_FAILURE(
+ trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4,
+ "version:2 distributor:2 .0.s:d storage:4", 0, 0));
+ EXPECT_EQ(buckets_in_snapshot_matching_current_db(false, FixedBucketSpaces::default_space()), n_buckets);
+ EXPECT_EQ(buckets_in_snapshot_matching_current_db(false, FixedBucketSpaces::global_space()), n_buckets);
+}
+
+TEST_F(BucketDBUpdaterSnapshotTest, snapshot_is_unroutable_if_stale_reads_disabled_and_bucket_not_owned_in_pending_state) {
+ set_stale_reads_enabled(false);
+ constexpr uint32_t n_buckets = 10;
+ ASSERT_NO_FATAL_FAILURE(
+ trigger_completed_but_not_yet_activated_transition("version:1 distributor:1 storage:4", n_buckets, 4,
+ "version:2 distributor:2 .0.s:d storage:4", 0, 0));
+ auto def_rs = stripe_of_bucket(default_bucket).bucket_db_updater().read_snapshot_for_bucket(default_bucket);
+ EXPECT_FALSE(def_rs.is_routable());
+}
+
}
diff --git a/storage/src/tests/distributor/top_level_distributor_test.cpp b/storage/src/tests/distributor/top_level_distributor_test.cpp
index d8df36e53b2..8fae1c6d738 100644
--- a/storage/src/tests/distributor/top_level_distributor_test.cpp
+++ b/storage/src/tests/distributor/top_level_distributor_test.cpp
@@ -70,18 +70,6 @@ struct TopLevelDistributorTest : Test, TopLevelDistributorTestUtil {
return posted_msgs.str();
}
- void tick_distributor_and_stripes_n_times(uint32_t n) {
- for (uint32_t i = 0; i < n; ++i) {
- tick(false);
- }
- }
-
- void tick_top_level_distributor_n_times(uint32_t n) {
- for (uint32_t i = 0; i < n; ++i) {
- tick(true);
- }
- }
-
StatusReporterDelegate& distributor_status_delegate() {
return _distributor->_distributorStatusDelegate;
}
@@ -98,10 +86,6 @@ struct TopLevelDistributorTest : Test, TopLevelDistributorTestUtil {
return _distributor->_status_to_do;
}
- TopLevelDistributor::MetricUpdateHook distributor_metric_update_hook() {
- return _distributor->_metricUpdateHook;
- }
-
BucketSpacesStatsProvider::PerNodeBucketSpacesStats distributor_bucket_spaces_stats() {
return _distributor->getBucketSpacesStats();
}
diff --git a/storage/src/tests/distributor/top_level_distributor_test_util.cpp b/storage/src/tests/distributor/top_level_distributor_test_util.cpp
index c4173f5e8ff..b6e9beb38ae 100644
--- a/storage/src/tests/distributor/top_level_distributor_test_util.cpp
+++ b/storage/src/tests/distributor/top_level_distributor_test_util.cpp
@@ -265,6 +265,25 @@ TopLevelDistributorTestUtil::get_bucket(const document::BucketId& bId) const
return stripe_bucket_database(stripe_index_of_bucket(bId)).get(bId);
}
+DistributorBucketSpaceRepo&
+TopLevelDistributorTestUtil::top_level_bucket_space_repo() noexcept
+{
+ return _distributor->_component.bucket_space_repo();
+}
+
+const DistributorBucketSpaceRepo&
+TopLevelDistributorTestUtil::top_level_bucket_space_repo() const noexcept
+{
+ return _distributor->_component.bucket_space_repo();
+}
+
+std::unique_ptr<StripeAccessGuard>
+TopLevelDistributorTestUtil::acquire_stripe_guard()
+{
+ // Note: this won't actually interact with any threads, as the pool is running in single-threaded test mode.
+ return _distributor->_stripe_accessor->rendezvous_and_hold_all();
+}
+
TopLevelBucketDBUpdater&
TopLevelDistributorTestUtil::bucket_db_updater() {
return *_distributor->_bucket_db_updater;
@@ -356,6 +375,11 @@ TopLevelDistributorTestUtil::reconfigure(const DistributorConfig& cfg)
tick(); // Config is propagated upon next top-level tick
}
+framework::MetricUpdateHook&
+TopLevelDistributorTestUtil::distributor_metric_update_hook() {
+ return _distributor->_metricUpdateHook;
+}
+
BucketDatabase&
TopLevelDistributorTestUtil::stripe_bucket_database(uint16_t stripe_idx) {
assert(stripe_idx < _distributor->_stripes.size());
@@ -430,4 +454,41 @@ TopLevelDistributorTestUtil::trigger_distribution_change(std::shared_ptr<lib::Di
_distributor->enableNextDistribution();
}
+const lib::ClusterStateBundle&
+TopLevelDistributorTestUtil::current_cluster_state_bundle() const
+{
+ // We assume that all stripes have the same cluster state internally, so just use the first.
+ assert(_distributor->_stripes[0]);
+ const auto& bundle = _distributor->_stripes[0]->getClusterStateBundle();
+ // ... but sanity-check just to make sure...
+ for (size_t i = 1; i < _num_distributor_stripes; ++i) {
+ assert(_distributor->_stripes[i]->getClusterStateBundle() == bundle);
+ }
+ return bundle;
+}
+
+void
+TopLevelDistributorTestUtil::tick_distributor_and_stripes_n_times(uint32_t n)
+{
+ for (uint32_t i = 0; i < n; ++i) {
+ tick(false);
+ }
+}
+
+void
+TopLevelDistributorTestUtil::tick_top_level_distributor_n_times(uint32_t n)
+{
+ for (uint32_t i = 0; i < n; ++i) {
+ tick(true);
+ }
+}
+
+void
+TopLevelDistributorTestUtil::complete_recovery_mode_on_all_stripes()
+{
+ for (auto* s : distributor_stripes()) {
+ s->scanAllBuckets();
+ }
+}
+
}
diff --git a/storage/src/tests/distributor/top_level_distributor_test_util.h b/storage/src/tests/distributor/top_level_distributor_test_util.h
index 9048160b652..8832f8ada6e 100644
--- a/storage/src/tests/distributor/top_level_distributor_test_util.h
+++ b/storage/src/tests/distributor/top_level_distributor_test_util.h
@@ -19,12 +19,14 @@ namespace distributor {
class TopLevelDistributor;
class DistributorBucketSpace;
+class DistributorBucketSpaceRepo;
class DistributorMetricSet;
class DistributorNodeContext;
class DistributorStripe;
class DistributorStripeComponent;
class DistributorStripeOperationContext;
class DistributorStripePool;
+class StripeAccessGuard;
class IdealStateMetricSet;
class Operation;
class TopLevelBucketDBUpdater;
@@ -58,6 +60,12 @@ public:
// As the above, but always inserts into default bucket space
void add_nodes_to_stripe_bucket_db(const document::BucketId& id, const std::string& nodeStr);
+ // TODO STRIPE replace with BucketSpaceStateMap once legacy is gone
+ DistributorBucketSpaceRepo& top_level_bucket_space_repo() noexcept;
+ const DistributorBucketSpaceRepo& top_level_bucket_space_repo() const noexcept;
+
+ std::unique_ptr<StripeAccessGuard> acquire_stripe_guard();
+
TopLevelBucketDBUpdater& bucket_db_updater();
const IdealStateMetricSet& total_ideal_state_metrics() const;
const DistributorMetricSet& total_distributor_metrics() const;
@@ -77,12 +85,19 @@ public:
return _node->getClock();
}
+ framework::MetricUpdateHook& distributor_metric_update_hook();
+
BucketDatabase& stripe_bucket_database(uint16_t stripe_idx); // Implicit default space only
BucketDatabase& stripe_bucket_database(uint16_t stripe_idx, document::BucketSpace space);
const BucketDatabase& stripe_bucket_database(uint16_t stripe_idx) const; // Implicit default space only
const BucketDatabase& stripe_bucket_database(uint16_t stripe_idx, document::BucketSpace space) const;
[[nodiscard]] bool all_distributor_stripes_are_in_recovery_mode() const;
+ void tick_distributor_and_stripes_n_times(uint32_t n);
+ void tick_top_level_distributor_n_times(uint32_t n);
+
+ void complete_recovery_mode_on_all_stripes();
+
void setup_distributor(int redundancy,
int node_count,
const std::string& systemState,
@@ -122,6 +137,8 @@ public:
void trigger_distribution_change(std::shared_ptr<lib::Distribution> distr);
+ const lib::ClusterStateBundle& current_cluster_state_bundle() const;
+
static std::vector<document::BucketSpace> bucket_spaces();
protected:
diff --git a/storage/src/vespa/storage/distributor/distributor_stripe.h b/storage/src/vespa/storage/distributor/distributor_stripe.h
index 5426d311558..b1b20cf445a 100644
--- a/storage/src/vespa/storage/distributor/distributor_stripe.h
+++ b/storage/src/vespa/storage/distributor/distributor_stripe.h
@@ -198,6 +198,7 @@ private:
friend class TopLevelDistributor;
friend class DistributorStripeTestUtil;
friend class DistributorTestUtil;
+ friend class TopLevelDistributorTestUtil;
friend class LegacyBucketDBUpdaterTest;
friend class MetricUpdateHook;
friend class MultiThreadedStripeAccessGuard;
diff --git a/storage/src/vespa/storage/distributor/top_level_distributor.cpp b/storage/src/vespa/storage/distributor/top_level_distributor.cpp
index 68a52e27d84..bbef20b2a23 100644
--- a/storage/src/vespa/storage/distributor/top_level_distributor.cpp
+++ b/storage/src/vespa/storage/distributor/top_level_distributor.cpp
@@ -58,6 +58,7 @@ TopLevelDistributor::TopLevelDistributor(DistributorComponentRegister& compReg,
ChainedMessageSender* messageSender)
: StorageLink("distributor"),
framework::StatusReporter("distributor", "Distributor"),
+ _node_identity(node_identity),
_comp_reg(compReg),
_use_legacy_mode(num_distributor_stripes == 0),
_metrics(std::make_shared<DistributorMetricSet>()),
diff --git a/storage/src/vespa/storage/distributor/top_level_distributor.h b/storage/src/vespa/storage/distributor/top_level_distributor.h
index 57ff5268323..5de4b9c1aaa 100644
--- a/storage/src/vespa/storage/distributor/top_level_distributor.h
+++ b/storage/src/vespa/storage/distributor/top_level_distributor.h
@@ -20,6 +20,7 @@
#include <vespa/storage/common/distributorcomponent.h>
#include <vespa/storage/common/doneinitializehandler.h>
#include <vespa/storage/common/messagesender.h>
+#include <vespa/storage/common/node_identity.h>
#include <vespa/storage/distributor/bucketdb/bucketdbmetricupdater.h>
#include <vespa/storage/distributor/maintenance/maintenancescheduler.h>
#include <vespa/storageapi/message/state.h>
@@ -33,7 +34,6 @@
namespace storage {
struct DoneInitializeHandler;
class HostInfo;
- class NodeIdentity;
}
namespace storage::distributor {
@@ -84,6 +84,8 @@ public:
DistributorMetricSet& getMetrics();
+ const NodeIdentity& node_identity() const noexcept { return _node_identity; }
+
// Implements DistributorInterface and DistributorMessageSender.
DistributorMetricSet& metrics() override { return getMetrics(); }
const DistributorConfiguration& config() const override;
@@ -205,6 +207,7 @@ private:
using MessageQueue = std::vector<std::shared_ptr<api::StorageMessage>>;
+ const NodeIdentity _node_identity;
DistributorComponentRegister& _comp_reg;
const bool _use_legacy_mode;
std::shared_ptr<DistributorMetricSet> _metrics;
diff --git a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java
index 51887141646..54f2b2fd9e3 100644
--- a/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java
+++ b/vespa-athenz/src/main/java/com/yahoo/vespa/athenz/client/zms/DefaultZmsClient.java
@@ -1,6 +1,7 @@
// Copyright 2018 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.athenz.client.zms;
+import com.yahoo.io.IOUtils;
import com.yahoo.vespa.athenz.api.AthenzDomain;
import com.yahoo.vespa.athenz.api.AthenzGroup;
import com.yahoo.vespa.athenz.api.AthenzIdentity;
@@ -28,10 +29,13 @@ import com.yahoo.vespa.athenz.utils.AthenzIdentities;
import org.apache.http.Header;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.methods.RequestBuilder;
+import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader;
import javax.net.ssl.SSLContext;
+import java.io.IOException;
import java.net.URI;
+import java.nio.charset.StandardCharsets;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
@@ -190,7 +194,11 @@ public class DefaultZmsClient extends ClientBase implements ZmsClient {
public void createPolicy(AthenzDomain athenzDomain, String athenzPolicy) {
URI uri = zmsUrl.resolve(String.format("domain/%s/policy/%s",
athenzDomain.getName(), athenzPolicy));
- execute(RequestBuilder.put(uri).build(), response -> readEntity(response, Void.class));
+ StringEntity entity = toJsonStringEntity(Map.of("name", athenzPolicy, "assertions", List.of()));
+ HttpUriRequest request = RequestBuilder.put(uri)
+ .setEntity(entity)
+ .build();
+ execute(request, response -> readEntity(response, Void.class));
}
@Override