summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--CMakeLists.txt45
-rw-r--r--CONTRIBUTING.md11
-rw-r--r--application-preprocessor/CMakeLists.txt4
-rw-r--r--application/src/main/java/com/yahoo/application/container/SynchronousRequestResponseHandler.java1
-rw-r--r--application/src/main/java/com/yahoo/application/container/handler/Request.java10
-rwxr-xr-xbootstrap.sh2
-rw-r--r--build_settings.cmake8
-rw-r--r--bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateSourcesMojo.java15
-rw-r--r--chain/CMakeLists.txt (renamed from dist/CMakeLists.txt)2
-rw-r--r--clustercontroller-apps/CMakeLists.txt (renamed from persistence/src/main/resources/configdefinitions/persistence-rpc.def)4
-rw-r--r--clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/StateRestApiV2Handler.java2
-rw-r--r--clustercontroller-apputil/CMakeLists.txt2
-rw-r--r--clustercontroller-core/CMakeLists.txt2
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java2
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java6
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcaster.java8
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java10
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java23
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeInfoTest.java29
-rw-r--r--clustercontroller-utils/CMakeLists.txt2
-rw-r--r--component/CMakeLists.txt2
-rw-r--r--config-bundle/CMakeLists.txt2
-rw-r--r--config-model-api/CMakeLists.txt2
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java158
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/TimeWindow.java141
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java186
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/FileDistribution.java1
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/SuperModel.java97
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/SuperModelListener.java4
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/SuperModelProvider.java11
-rw-r--r--config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java92
-rw-r--r--config-model-api/src/test/java/com/yahoo/config/application/api/TimeWindowTest.java143
-rw-r--r--config-model-fat/CMakeLists.txt5
-rw-r--r--config-model/CMakeLists.txt10
-rw-r--r--config-model/pom.xml2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/MacroShadower.java5
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/Processing.java1
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/processing/ReservedMacroNames.java50
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/AbstractService.java20
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/LogForwarder.java64
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java12
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminBuilderBase.java17
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV2Builder.java3
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilder.java18
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/ModelElement.java14
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java3
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/EngineFactoryBuilder.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/engines/ProtonEngine.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/engines/ProtonProvider.java15
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/engines/RPCEngine.java57
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/NodeFlavorTuning.java13
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java45
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/search/Tuning.java16
-rw-r--r--config-model/src/main/resources/schema/admin.rnc13
-rw-r--r--config-model/src/main/resources/schema/content.rnc3
-rw-r--r--config-model/src/main/resources/schema/deployment.rnc14
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java6
-rw-r--r--config-model/src/test/java/com/yahoo/searchdefinition/processing/ReservedMacroNamesTestCase.java71
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/AdminTestCase.java28
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/admin/DedicatedAdminV4Test.java54
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigValueChangeValidatorTest.java32
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/ContentBuilderTest.java2
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilderTest.java13
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/container/configserver/ConfigserverClusterTest.java4
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/ClusterTest.java3
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/IndexedTest.java1
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/search/NodeFlavorTuningTest.java12
-rw-r--r--config-model/src/test/schema-test-files/deployment.xml2
-rw-r--r--config-model/src/test/schema-test-files/services.xml3
-rw-r--r--config-provisioning/CMakeLists.txt3
-rw-r--r--config-proxy/CMakeLists.txt6
-rw-r--r--config-proxy/src/main/java/com/yahoo/vespa/config/proxy/DelayedResponseHandler.java3
-rw-r--r--config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java26
-rw-r--r--config/src/main/java/com/yahoo/vespa/config/LZ4PayloadCompressor.java3
-rw-r--r--config/src/vespa/config/frt/frtconfigresponsev3.cpp12
-rw-r--r--configd/src/apps/sentinel/sentinel.cpp56
-rw-r--r--configd/src/apps/sentinel/service.cpp11
-rw-r--r--configdefinitions/CMakeLists.txt2
-rw-r--r--configdefinitions/src/vespa/CMakeLists.txt66
-rw-r--r--configdefinitions/src/vespa/configserver.def2
-rw-r--r--configdefinitions/src/vespa/logforwarder.def6
-rw-r--r--configgen/pom.xml5
-rw-r--r--configgen/src/main/scala/com/yahoo/config/codegen/ConfigGenerator.scala16
-rw-r--r--configserver/CMakeLists.txt12
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/RequestHandler.java19
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java8
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelManager.java88
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelRequestHandler.java7
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpHandler.java8
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/SessionPrepareHandler.java5
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/model/LbServicesProducer.java27
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/model/RoutingProducer.java20
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/model/SuperModelConfigProvider.java (renamed from configserver/src/main/java/com/yahoo/vespa/config/server/model/SuperModel.java)28
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java3
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/RemoteSessionRepo.java6
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java2
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRequestHandler.java1
-rwxr-xr-xconfigserver/src/main/sh/start-configserver2
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelControllerTest.java35
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelRequestHandlerTest.java2
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/model/LbServicesProducerTest.java53
-rwxr-xr-xconfigserver/src/test/java/com/yahoo/vespa/config/server/model/RoutingProducerTest.java31
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java1
-rw-r--r--container-accesslogging/CMakeLists.txt2
-rw-r--r--container-core/CMakeLists.txt16
-rw-r--r--container-dependencies-enforcer/OWNERS1
-rw-r--r--container-dependencies-enforcer/README.md6
-rw-r--r--container-dependencies-enforcer/pom.xml210
-rw-r--r--container-dev/pom.xml10
-rw-r--r--container-di/CMakeLists.txt5
-rw-r--r--container-di/src/main/scala/com/yahoo/container/di/CloudSubscriberFactory.scala6
-rw-r--r--container-disc/CMakeLists.txt12
-rw-r--r--container-disc/pom.xml1
-rw-r--r--container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java7
-rw-r--r--container-disc/src/main/java/com/yahoo/container/jdisc/SslKeyStoreFactoryProvider.java2
-rwxr-xr-xcontainer-disc/src/main/sh/vespa-start-container-daemon.sh2
-rw-r--r--container-jersey2/CMakeLists.txt2
-rw-r--r--container-messagebus/CMakeLists.txt3
-rw-r--r--container-search-and-docproc/CMakeLists.txt4
-rw-r--r--container-search/CMakeLists.txt28
-rw-r--r--container/pom.xml27
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/InstanceInformation.java3
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/cost/CostJsonModel.java73
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/cost/CostResource.java41
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/cost/package-info.java5
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/BuildService.java3
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/MetricsService.java34
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerClient.java9
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/NodeList.java36
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/ApplicationCost.java105
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/Backend.java21
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/ClusterCost.java182
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/Cost.java53
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/CostJsonModelAdapter.java93
-rw-r--r--controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/package-info.java5
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java24
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java37
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java18
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationList.java32
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Change.java15
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ClusterCost.java86
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ClusterInfo.java40
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ClusterUtilization.java63
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Deployment.java45
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentCost.java54
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentJobs.java51
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/JobStatus.java20
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentOrder.java43
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java213
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/PolledBuildSystem.java31
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BlockedChangeDeployer.java28
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainer.java84
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterUtilizationMaintainer.java58
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java16
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/FailureRedeployer.java58
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/JobControl.java4
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/Upgrader.java36
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java101
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java53
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/RootHandler.java1
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java120
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiHandler.java55
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/UpgraderResponse.java35
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/screwdriver/ScrewdriverApiHandler.java113
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java3
-rw-r--r--controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VespaVersion.java4
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ConfigServerClientMock.java40
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java156
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java125
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/ClusterUtilizationTest.java29
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/cost/CostMock.java44
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/cost/MockInsightBackend.java41
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java30
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTester.java67
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java67
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/PolledBuildSystemTest.java28
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/MockMetricsService.java13
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainerTest.java38
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterUtilizationMaintainerTest.java38
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/FailureRedeployerTest.java115
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java250
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/testdata/application-without-project-id.json19
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/testdata/canary-with-stale-data.json3
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java101
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerControllerTester.java15
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerTester.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java227
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-deployment-cancelled-no-op.json1
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-deployment-cancelled.json1
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-without-change-multiple-deployments.json204
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application.json144
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deploy-result.json2
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment.json51
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiTest.java31
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json29
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/root-response.json5
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/screwdriver/ScrewdriverApiTest.java54
-rw-r--r--controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java36
-rw-r--r--defaults/CMakeLists.txt2
-rwxr-xr-xdist.sh2
-rwxr-xr-xdist/post_install.sh76
-rw-r--r--dist/vespa.spec23
-rw-r--r--docker-api/CMakeLists.txt2
-rw-r--r--docker-api/pom.xml2
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java29
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java2
-rw-r--r--docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImplTest.java29
-rw-r--r--docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/RunSystemTests.java2
-rwxr-xr-xdocker/build-vespa.sh2
-rwxr-xr-xdocker/build/build-vespa-internal.sh6
-rw-r--r--docproc/CMakeLists.txt4
-rw-r--r--docprocs/CMakeLists.txt2
-rw-r--r--document/CMakeLists.txt2
-rw-r--r--document/src/vespa/document/annotation/spantree.h2
-rw-r--r--document/src/vespa/document/bucket/bucketspace.h5
-rw-r--r--document/src/vespa/document/config/CMakeLists.txt6
-rw-r--r--document/src/vespa/document/fieldvalue/structfieldvalue.cpp3
-rw-r--r--documentapi/src/main/java/com/yahoo/documentapi/VisitorDataHandler.java5
-rw-r--r--documentapi/src/main/java/com/yahoo/documentapi/VisitorParameters.java2
-rw-r--r--documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/SearchColumnPolicy.java2
-rw-r--r--documentapi/src/vespa/documentapi/messagebus/documentprotocol.h7
-rw-r--r--documentapi/src/vespa/documentapi/messagebus/policies/CMakeLists.txt2
-rw-r--r--dummy-persistence/.gitignore2
-rw-r--r--dummy-persistence/OWNERS1
-rw-r--r--dummy-persistence/pom.xml59
-rw-r--r--dummy-persistence/src/main/java/com/yahoo/persistence/dummy/BucketContents.java126
-rw-r--r--dummy-persistence/src/main/java/com/yahoo/persistence/dummy/DummyPersistenceProvider.java223
-rw-r--r--dummy-persistence/src/main/java/com/yahoo/persistence/dummy/DummyPersistenceProviderHandler.java15
-rw-r--r--dummy-persistence/src/main/java/com/yahoo/persistence/dummy/IteratorContext.java52
-rw-r--r--dummy-persistence/src/main/java/com/yahoo/persistence/dummy/package-info.java7
-rw-r--r--dummy-persistence/src/test/config/.gitignore0
-rw-r--r--dummy-persistence/src/test/java/com/yahoo/persistence/dummy/DummyPersistenceTest.java33
-rw-r--r--eval/CMakeLists.txt1
-rw-r--r--eval/src/apps/make_tensor_binary_format_test_spec/make_tensor_binary_format_test_spec.cpp9
-rw-r--r--eval/src/apps/tensor_conformance/.gitignore1
-rw-r--r--eval/src/apps/tensor_conformance/CMakeLists.txt (renamed from persistence/src/vespa/persistence/proxy/CMakeLists.txt)8
-rw-r--r--eval/src/apps/tensor_conformance/generate.cpp18
-rw-r--r--eval/src/apps/tensor_conformance/generate.h22
-rw-r--r--eval/src/apps/tensor_conformance/tensor_conformance.cpp354
-rw-r--r--eval/src/apps/tensor_conformance/test_spec.json5
-rw-r--r--eval/src/tests/tensor/tensor_conformance/.gitignore2
-rw-r--r--eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp30
-rw-r--r--eval/src/tests/tensor/tensor_slime_serialization/tensor_slime_serialization_test.cpp4
-rw-r--r--eval/src/vespa/eval/eval/test/tensor_conformance.cpp2
-rw-r--r--eval/src/vespa/eval/tensor/sparse/sparse_tensor_match.cpp7
-rw-r--r--fileacquirer/CMakeLists.txt2
-rw-r--r--filedistribution/src/vespa/filedistribution/distributor/CMakeLists.txt2
-rw-r--r--filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.cpp4
-rw-r--r--filedistribution/src/vespa/filedistribution/manager/filedistributionmanager.cpp2
-rw-r--r--filedistribution/src/vespa/filedistribution/model/CMakeLists.txt2
-rw-r--r--filedistribution/src/vespa/filedistribution/model/deployedfilestodownload.cpp2
-rw-r--r--filedistribution/src/vespa/filedistribution/model/filedbmodel.h2
-rw-r--r--filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.cpp1
-rw-r--r--filedistribution/src/vespa/filedistribution/model/zkfacade.cpp1
-rw-r--r--filedistribution/src/vespa/filedistribution/model/zkfiledbmodel.cpp3
-rw-r--r--functions.cmake20
-rw-r--r--install_java.cmake161
-rw-r--r--jaxrs_client_utils/src/main/java/com/yahoo/vespa/jaxrs/client/RetryingJaxRsStrategy.java8
-rw-r--r--jdisc_core/CMakeLists.txt4
-rw-r--r--jdisc_http_service/CMakeLists.txt8
-rw-r--r--jdisc_http_service/pom.xml10
-rw-r--r--jdisc_http_service/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java5
-rw-r--r--jdisc_http_service/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java79
-rw-r--r--jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/SslKeyStoreFactory.java2
-rw-r--r--jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/pem/PemKeyStore.java316
-rw-r--r--jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/pem/PemKeyStoreProvider.java20
-rw-r--r--jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/pem/PemSslKeyStore.java53
-rw-r--r--jdisc_http_service/src/main/resources/configdefinitions/jdisc.http.connector.def10
-rw-r--r--jdisc_http_service/src/test/java/com/yahoo/jdisc/http/guiceModules/ConnectorFactoryRegistryModule.java19
-rw-r--r--jdisc_http_service/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java95
-rw-r--r--jdisc_http_service/src/test/resources/pem/test.crt88
-rw-r--r--jdisc_http_service/src/test/resources/pem/test.key27
-rw-r--r--jdisc_jetty/CMakeLists.txt3
-rw-r--r--jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java5
-rw-r--r--logd/src/apps/logd/main.cpp25
-rw-r--r--logd/src/logd/CMakeLists.txt3
-rw-r--r--logd/src/logd/sigterm.cpp27
-rw-r--r--logd/src/logd/sigterm.h6
-rw-r--r--logd/src/logd/watch.cpp7
-rw-r--r--logforwarder/CMakeLists.txt10
-rw-r--r--logforwarder/OWNERS1
-rw-r--r--logforwarder/README5
-rw-r--r--logforwarder/src/apps/vespa-logforwarder-start/.gitignore1
-rw-r--r--logforwarder/src/apps/vespa-logforwarder-start/CMakeLists.txt12
-rw-r--r--logforwarder/src/apps/vespa-logforwarder-start/cf-handler.cpp91
-rw-r--r--logforwarder/src/apps/vespa-logforwarder-start/cf-handler.h20
-rw-r--r--logforwarder/src/apps/vespa-logforwarder-start/main.cpp38
-rw-r--r--logserver/CMakeLists.txt4
-rw-r--r--memfilepersistence/src/tests/spi/basicoperationhandlertest.cpp38
-rw-r--r--memfilepersistence/src/tests/spi/iteratorhandlertest.cpp33
-rw-r--r--memfilepersistence/src/tests/spi/joinoperationhandlertest.cpp32
-rw-r--r--memfilepersistence/src/tests/spi/memfileautorepairtest.cpp9
-rw-r--r--memfilepersistence/src/tests/spi/memfiletestutils.cpp25
-rw-r--r--memfilepersistence/src/tests/spi/splitoperationhandlertest.cpp20
-rw-r--r--memfilepersistence/src/vespa/memfilepersistence/spi/memfilepersistenceprovider.cpp7
-rw-r--r--memfilepersistence/src/vespa/memfilepersistence/spi/memfilepersistenceprovider.h4
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCNetwork.java10
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCSend.java8
-rw-r--r--messagebus/src/tests/protocolrepository/protocolrepository.cpp1
-rw-r--r--messagebus/src/tests/throttling/throttling.cpp1
-rw-r--r--messagebus/src/vespa/messagebus/CMakeLists.txt2
-rw-r--r--messagebus/src/vespa/messagebus/emptyreply.cpp5
-rw-r--r--messagebus/src/vespa/messagebus/emptyreply.h27
-rw-r--r--messagebus/src/vespa/messagebus/iprotocol.h6
-rw-r--r--messagebus/src/vespa/messagebus/network/rpcnetwork.cpp10
-rw-r--r--messagebus/src/vespa/messagebus/network/rpcnetwork.h38
-rw-r--r--messagebus/src/vespa/messagebus/network/rpcsend.cpp30
-rw-r--r--messagebus/src/vespa/messagebus/network/rpcsend.h4
-rw-r--r--messagebus/src/vespa/messagebus/routable.h1
-rw-r--r--messagebus/src/vespa/messagebus/testlib/simpleprotocol.cpp2
-rw-r--r--messagebus/src/vespa/messagebus/testlib/simpleprotocol.h1
-rw-r--r--metrics/src/tests/metricmanagertest.cpp2
-rw-r--r--metrics/src/vespa/metrics/CMakeLists.txt2
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java2
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainer.java16
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java5
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java29
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java64
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java15
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepositoryImpl.java5
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImpl.java18
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProvider.java13
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/NodeAdminProvider.java (renamed from node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProviderImpl.java)23
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/RestApiHandler.java5
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutor.java14
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/HttpException.java35
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ComponentsProviderWithMocks.java57
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java3
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RunInContainerTest.java86
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainerTest.java92
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java2
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImplTest.java9
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutorTest.java20
-rw-r--r--node-maintainer/src/main/java/com/yahoo/vespa/hosted/node/maintainer/CoredumpHandler.java31
-rw-r--r--node-maintainer/src/main/java/com/yahoo/vespa/hosted/node/verification/commons/noderepo/NodeRepoInfoRetriever.java2
-rw-r--r--node-maintainer/src/test/java/com/yahoo/vespa/hosted/node/maintainer/CoredumpHandlerTest.java46
-rw-r--r--node-repository/CMakeLists.txt5
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java2
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetireIPv4OnlyNodes.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/ErrorResponse.java5
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java2
-rw-r--r--orchestrator/CMakeLists.txt2
-rw-r--r--orchestrator/pom.xml6
-rw-r--r--orchestrator/src/main/java/com/yahoo/vespa/orchestrator/ServiceMonitorInstanceLookupService.java16
-rw-r--r--orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionResourceTest.java1
-rw-r--r--persistence/CMakeLists.txt2
-rw-r--r--persistence/pom.xml105
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/rpc/BucketProviderMethod.java22
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderHandler.java401
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderMethod.java39
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/rpc/RPCHandler.java39
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/rpc/TimestampedProviderMethod.java20
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/rpc/package-info.java7
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/AbstractPersistenceProvider.java82
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/Bucket.java30
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/BucketInfo.java155
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/ClusterState.java32
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/ClusterStateImpl.java50
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/DocEntry.java56
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/PartitionState.java27
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/PersistenceProvider.java382
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/Selection.java70
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/conformance/ConformanceTest.java1605
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/conformance/TestDocMan.java37
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/conformance/package-info.java7
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/package-info.java7
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/BucketIdListResult.java36
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/BucketInfoResult.java37
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/CreateIteratorResult.java33
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/GetResult.java60
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/IterateResult.java43
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/PartitionStateListResult.java37
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/RemoveResult.java47
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/Result.java83
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/UpdateResult.java39
-rw-r--r--persistence/src/main/java/com/yahoo/persistence/spi/result/package-info.java7
-rw-r--r--persistence/src/tests/proxy/.gitignore10
-rw-r--r--persistence/src/tests/proxy/CMakeLists.txt31
-rw-r--r--persistence/src/tests/proxy/dummy_provider_factory.h35
-rw-r--r--persistence/src/tests/proxy/external_providerproxy_conformancetest.cpp42
-rw-r--r--persistence/src/tests/proxy/mockprovider.h163
-rw-r--r--persistence/src/tests/proxy/providerproxy_conformancetest.cpp61
-rw-r--r--persistence/src/tests/proxy/providerproxy_test.cpp404
-rw-r--r--persistence/src/tests/proxy/providerstub_test.cpp543
-rw-r--r--persistence/src/tests/proxy/proxy_factory_wrapper.h56
-rwxr-xr-xpersistence/src/tests/proxy/proxy_test.sh6
-rw-r--r--persistence/src/tests/proxy/proxyfactory.h37
-rw-r--r--persistence/src/tests/spi/clusterstatetest.cpp2
-rw-r--r--persistence/src/vespa/persistence/CMakeLists.txt1
-rw-r--r--persistence/src/vespa/persistence/conformancetest/conformancetest.cpp202
-rw-r--r--persistence/src/vespa/persistence/conformancetest/conformancetest.h6
-rw-r--r--persistence/src/vespa/persistence/dummyimpl/dummypersistence.cpp4
-rw-r--r--persistence/src/vespa/persistence/dummyimpl/dummypersistence.h4
-rw-r--r--persistence/src/vespa/persistence/proxy/.gitignore2
-rw-r--r--persistence/src/vespa/persistence/proxy/buildid.cpp8
-rw-r--r--persistence/src/vespa/persistence/proxy/buildid.h12
-rw-r--r--persistence/src/vespa/persistence/proxy/providerproxy.cpp493
-rw-r--r--persistence/src/vespa/persistence/proxy/providerproxy.h76
-rw-r--r--persistence/src/vespa/persistence/proxy/providerstub.cpp928
-rw-r--r--persistence/src/vespa/persistence/proxy/providerstub.h97
-rw-r--r--persistence/src/vespa/persistence/spi/CMakeLists.txt1
-rw-r--r--persistence/src/vespa/persistence/spi/abstractpersistenceprovider.cpp4
-rw-r--r--persistence/src/vespa/persistence/spi/abstractpersistenceprovider.h2
-rw-r--r--persistence/src/vespa/persistence/spi/bucket.h14
-rw-r--r--persistence/src/vespa/persistence/spi/metricpersistenceprovider.cpp8
-rw-r--r--persistence/src/vespa/persistence/spi/metricpersistenceprovider.h4
-rw-r--r--persistence/src/vespa/persistence/spi/persistenceprovider.h5
-rw-r--r--persistence/src/vespa/persistence/spi/test.cpp38
-rw-r--r--persistence/src/vespa/persistence/spi/test.h16
-rw-r--r--pom.xml12
-rw-r--r--searchcore/CMakeLists.txt2
-rw-r--r--searchcore/src/apps/proton/downpersistence.cpp4
-rw-r--r--searchcore/src/apps/proton/downpersistence.h4
-rw-r--r--searchcore/src/apps/tests/persistenceconformance_test.cpp45
-rw-r--r--searchcore/src/apps/vespa-transactionlog-inspect/vespa-transactionlog-inspect.cpp1
-rw-r--r--searchcore/src/tests/proton/attribute/attributeflush_test.cpp6
-rw-r--r--searchcore/src/tests/proton/common/hw_info_sampler/hw_info_sampler_test.cpp53
-rw-r--r--searchcore/src/tests/proton/docsummary/CMakeLists.txt1
-rw-r--r--searchcore/src/tests/proton/docsummary/docsummary.cpp69
-rw-r--r--searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp2
-rw-r--r--searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp4
-rw-r--r--searchcore/src/tests/proton/documentdb/buckethandler/buckethandler_test.cpp4
-rw-r--r--searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp4
-rw-r--r--searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp2
-rw-r--r--searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp39
-rw-r--r--searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp6
-rw-r--r--searchcore/src/tests/proton/documentdb/documentdb_test.cpp33
-rw-r--r--searchcore/src/tests/proton/documentdb/feedhandler/CMakeLists.txt1
-rw-r--r--searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp17
-rw-r--r--searchcore/src/tests/proton/documentdb/feedview/CMakeLists.txt1
-rw-r--r--searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp2
-rw-r--r--searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp4
-rw-r--r--searchcore/src/tests/proton/documentdb/threading_service_config/CMakeLists.txt8
-rw-r--r--searchcore/src/tests/proton/documentdb/threading_service_config/threading_service_config_test.cpp66
-rw-r--r--searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp4
-rw-r--r--searchcore/src/tests/proton/feedtoken/feedtoken.cpp18
-rw-r--r--searchcore/src/tests/proton/index/indexmanager_test.cpp2
-rw-r--r--searchcore/src/tests/proton/matching/CMakeLists.txt1
-rw-r--r--searchcore/src/tests/proton/matching/matching_test.cpp2
-rw-r--r--searchcore/src/tests/proton/persistenceengine/persistence_handler_map/CMakeLists.txt9
-rw-r--r--searchcore/src/tests/proton/persistenceengine/persistence_handler_map/persistence_handler_map_test.cpp144
-rw-r--r--searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp112
-rw-r--r--searchcore/src/tests/proton/proton_configurer/proton_configurer_test.cpp7
-rw-r--r--searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp4
-rw-r--r--searchcore/src/tests/proton/server/CMakeLists.txt1
-rw-r--r--searchcore/src/tests/proton/server/disk_mem_usage_filter/disk_mem_usage_filter_test.cpp12
-rw-r--r--searchcore/src/tests/proton/server/documentretriever_test.cpp6
-rw-r--r--searchcore/src/tests/proton/server/feedstates_test.cpp2
-rw-r--r--searchcore/src/tests/proton/server/visibility_handler/visibility_handler_test.cpp4
-rw-r--r--searchcore/src/tests/proton/summaryengine/summaryengine.cpp4
-rw-r--r--searchcore/src/vespa/searchcore/config/CMakeLists.txt8
-rw-r--r--searchcore/src/vespa/searchcore/config/proton.def87
-rw-r--r--searchcore/src/vespa/searchcore/proton/attribute/flushableattribute.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/CMakeLists.txt2
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/feedtoken.cpp20
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/feedtoken.h46
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/handlermap.hpp9
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/hw_info.h54
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/hw_info_sampler.cpp80
-rw-r--r--searchcore/src/vespa/searchcore/proton/common/hw_info_sampler.h37
-rw-r--r--searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp4
-rw-r--r--searchcore/src/vespa/searchcore/proton/docsummary/summaryflushtarget.cpp5
-rw-r--r--searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp98
-rw-r--r--searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.h39
-rw-r--r--searchcore/src/vespa/searchcore/proton/docsummary/summarymanagerinitializer.cpp21
-rw-r--r--searchcore/src/vespa/searchcore/proton/docsummary/summarymanagerinitializer.h24
-rw-r--r--searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastoreflushtarget.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/flushengine/threadedflushtarget.cpp4
-rw-r--r--searchcore/src/vespa/searchcore/proton/initializer/task_runner.cpp12
-rw-r--r--searchcore/src/vespa/searchcore/proton/persistenceengine/CMakeLists.txt1
-rw-r--r--searchcore/src/vespa/searchcore/proton/persistenceengine/persistence_handler_map.cpp111
-rw-r--r--searchcore/src/vespa/searchcore/proton/persistenceengine/persistence_handler_map.h69
-rw-r--r--searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.cpp99
-rw-r--r--searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.h54
-rw-r--r--searchcore/src/vespa/searchcore/proton/reference/gid_to_lid_change_handler.cpp9
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/bootstrapconfig.h41
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/bucketmovejob.cpp8
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/bucketmovejob.h4
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/clusterstatehandler.cpp2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/combiningfeedview.cpp9
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/combiningfeedview.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_filter.cpp55
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_filter.h18
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_forwarder.cpp4
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_sampler.cpp61
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_sampler.h23
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/document_subdb_initializer.cpp5
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb.cpp136
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdb.h62
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdbconfig.cpp113
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdbconfig.h70
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp100
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.h9
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp18
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.h31
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp19
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.h24
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp301
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/feedhandler.h119
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/i_document_subdb_owner.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/i_proton_configurer_owner.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/ibucketstatecalculator.h4
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h14
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.cpp6
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.h1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/memoryflush.cpp21
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/memoryflush.h20
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/operationdonecontext.cpp6
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/operationdonecontext.h6
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.cpp1
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/persistenceproviderproxy.cpp14
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/persistenceproviderproxy.h165
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/proton.cpp120
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/proton.h2
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/proton_configurer.cpp16
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/proton_configurer.h8
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/reconfig_params.cpp7
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/resource_usage_explorer.cpp11
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.cpp29
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp57
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h44
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp101
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h44
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp51
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h14
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/threading_service_config.cpp43
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/threading_service_config.h36
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/tlcproxy.cpp14
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/tlcproxy.h21
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/tlssyncer.cpp4
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/tlswriter.h8
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/transactionlogmanagerbase.cpp15
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/transactionlogmanagerbase.h8
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/CMakeLists.txt1
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/bucketfactory.cpp (renamed from searchcore/src/vespa/searchcore/proton/common/bucketfactory.cpp)4
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/bucketfactory.h (renamed from searchcore/src/vespa/searchcore/proton/common/bucketfactory.h)0
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/bucketstatecalculator.h7
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/documentdb_config_builder.cpp7
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/documentdb_config_builder.h5
-rw-r--r--searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h27
-rw-r--r--searchcorespi/src/vespa/searchcorespi/index/i_thread_service.h8
-rw-r--r--searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp4
-rw-r--r--searchcorespi/src/vespa/searchcorespi/index/ithreadingservice.h6
-rw-r--r--searchlib/CMakeLists.txt7
-rw-r--r--searchlib/src/apps/docstore/benchmarkdatastore.cpp2
-rw-r--r--searchlib/src/apps/docstore/documentstoreinspect.cpp2
-rw-r--r--searchlib/src/apps/docstore/verifylogdatastore.cpp2
-rw-r--r--searchlib/src/apps/tests/biglogtest.cpp2
-rw-r--r--searchlib/src/apps/tests/btreestress_test.cpp15
-rw-r--r--searchlib/src/apps/tests/memoryindexstress_test.cpp5
-rwxr-xr-xsearchlib/src/main/java/com/yahoo/searchlib/rankingexpression/RankingExpression.java14
-rwxr-xr-xsearchlib/src/test/java/com/yahoo/searchlib/rankingexpression/RankingExpressionTestCase.java21
-rw-r--r--searchlib/src/tests/diskindex/bitvector/bitvector_test.cpp3
-rw-r--r--searchlib/src/tests/diskindex/fieldwriter/fieldwriter_test.cpp250
-rw-r--r--searchlib/src/tests/docstore/document_store/document_store_test.cpp35
-rw-r--r--searchlib/src/tests/docstore/document_store_visitor/document_store_visitor_test.cpp96
-rw-r--r--searchlib/src/tests/docstore/file_chunk/file_chunk_test.cpp13
-rw-r--r--searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp117
-rw-r--r--searchlib/src/tests/features/prod_features_attributematch.cpp7
-rw-r--r--searchlib/src/tests/fef/parameter/parameter_test.cpp6
-rw-r--r--searchlib/src/tests/memoryindex/documentinverter/documentinverter_test.cpp1
-rw-r--r--searchlib/src/tests/memoryindex/fieldinverter/fieldinverter_test.cpp1
-rw-r--r--searchlib/src/tests/memoryindex/memoryindex/memoryindex_test.cpp8
-rw-r--r--searchlib/src/tests/memoryindex/urlfieldinverter/urlfieldinverter_test.cpp1
-rw-r--r--searchlib/src/tests/queryeval/sparse_vector_benchmark/sparse_vector_benchmark_test.cpp2
-rw-r--r--searchlib/src/tests/queryeval/weighted_set_term/weighted_set_term_test.cpp47
-rw-r--r--searchlib/src/tests/transactionlog/translogclient_test.cpp62
-rw-r--r--searchlib/src/tests/transactionlogstress/translogstress.cpp137
-rw-r--r--searchlib/src/vespa/searchlib/attribute/attribute_weighted_set_blueprint.cpp9
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/compression.cpp30
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/compression.h7
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/countcompression.cpp39
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/countcompression.h4
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/pagedict4.cpp255
-rw-r--r--searchlib/src/vespa/searchlib/bitcompression/pagedict4.h43
-rw-r--r--searchlib/src/vespa/searchlib/common/allocatedbitvector.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/common/isequencedtaskexecutor.h11
-rw-r--r--searchlib/src/vespa/searchlib/config/CMakeLists.txt2
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/CMakeLists.txt1
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/bitvectorfile.cpp49
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/bitvectorfile.h30
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/bitvectoridxfile.cpp31
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/bitvectoridxfile.h23
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/bitvectorkeyscope.cpp44
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/bitvectorkeyscope.h21
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/checkpointfile.cpp183
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/checkpointfile.h47
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/docidmapper.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/extposocc.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp95
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fieldreader.h30
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fieldwriter.cpp59
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fieldwriter.h21
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/fusion.cpp8
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp10
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/pagedict4file.cpp159
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/pagedict4file.h34
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/wordnummapper.cpp1
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zcposting.cpp92
-rw-r--r--searchlib/src/vespa/searchlib/diskindex/zcposting.h8
-rw-r--r--searchlib/src/vespa/searchlib/docstore/documentstore.cpp35
-rw-r--r--searchlib/src/vespa/searchlib/docstore/documentstore.h2
-rw-r--r--searchlib/src/vespa/searchlib/docstore/filechunk.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/docstore/logdatastore.cpp59
-rw-r--r--searchlib/src/vespa/searchlib/docstore/logdatastore.h79
-rw-r--r--searchlib/src/vespa/searchlib/docstore/logdocumentstore.cpp17
-rw-r--r--searchlib/src/vespa/searchlib/docstore/logdocumentstore.h26
-rw-r--r--searchlib/src/vespa/searchlib/docstore/visitcache.cpp13
-rw-r--r--searchlib/src/vespa/searchlib/docstore/visitcache.h8
-rw-r--r--searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h23
-rw-r--r--searchlib/src/vespa/searchlib/features/agefeature.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/features/agefeature.h4
-rw-r--r--searchlib/src/vespa/searchlib/features/attributefeature.cpp9
-rw-r--r--searchlib/src/vespa/searchlib/features/attributefeature.h7
-rw-r--r--searchlib/src/vespa/searchlib/features/attributematchfeature.cpp4
-rw-r--r--searchlib/src/vespa/searchlib/features/attributematchfeature.h2
-rw-r--r--searchlib/src/vespa/searchlib/features/debug_attribute_wait.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/features/debug_attribute_wait.h4
-rw-r--r--searchlib/src/vespa/searchlib/features/dotproductfeature.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/features/freshnessfeature.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/features/freshnessfeature.h4
-rw-r--r--searchlib/src/vespa/searchlib/features/internal_max_reduce_prod_join_feature.cpp22
-rw-r--r--searchlib/src/vespa/searchlib/features/nativeattributematchfeature.cpp6
-rw-r--r--searchlib/src/vespa/searchlib/features/nativeattributematchfeature.h4
-rw-r--r--searchlib/src/vespa/searchlib/fef/parameterdescriptions.h75
-rw-r--r--searchlib/src/vespa/searchlib/fef/parametervalidator.cpp14
-rw-r--r--searchlib/src/vespa/searchlib/fef/parametervalidator.h2
-rw-r--r--searchlib/src/vespa/searchlib/index/docidandfeatures.cpp99
-rw-r--r--searchlib/src/vespa/searchlib/index/docidandfeatures.h22
-rw-r--r--searchlib/src/vespa/searchlib/index/postinglistcountfile.h28
-rw-r--r--searchlib/src/vespa/searchlib/index/postinglistcounts.cpp68
-rw-r--r--searchlib/src/vespa/searchlib/index/postinglistcounts.h20
-rw-r--r--searchlib/src/vespa/searchlib/index/postinglistfile.h28
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/iterator_pack.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/iterator_pack.h2
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp22
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h10
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/weighted_set_term_search.cpp7
-rw-r--r--searchlib/src/vespa/searchlib/queryeval/weighted_set_term_search.h3
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp32
-rw-r--r--searchlib/src/vespa/searchlib/test/fakedata/fakeword.h23
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/domain.cpp80
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/domain.h43
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp13
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/domainpart.h22
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/session.cpp38
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/session.h18
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/translogclient.cpp57
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/translogclient.h27
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp77
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/translogserver.h45
-rw-r--r--searchlib/src/vespa/searchlib/transactionlog/translogserverapp.cpp2
-rw-r--r--searchlib/src/vespa/searchlib/util/comprbuffer.cpp25
-rw-r--r--searchlib/src/vespa/searchlib/util/comprbuffer.h13
-rw-r--r--searchlib/src/vespa/searchlib/util/comprfile.cpp77
-rw-r--r--searchlib/src/vespa/searchlib/util/comprfile.h54
-rw-r--r--searchlib/src/vespa/searchlib/util/filealign.cpp26
-rw-r--r--searchlib/src/vespa/searchlib/util/filealign.h15
-rw-r--r--searchsummary/src/tests/docsummary/slime_summary/slime_summary_test.cpp2
-rw-r--r--searchsummary/src/vespa/searchsummary/config/CMakeLists.txt2
-rw-r--r--service-monitor/pom.xml18
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ConfigServerApplication.java52
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ModelGenerator.java139
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ServiceModel.java34
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ServiceMonitorImpl.java65
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/monitor/SlobrokMonitor2.java77
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/monitor/SlobrokMonitorManager.java114
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/monitor/SuperModelListenerImpl.java89
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/monitor/internal/LatencyMeasurement.java36
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/monitor/internal/ServiceModelCache.java62
-rw-r--r--service-monitor/src/main/java/com/yahoo/vespa/service/monitor/internal/ServiceMonitorMetrics.java38
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ConfigServerApplicationTest.java61
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ExampleModel.java164
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ExampleModelTest.java93
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ModelGeneratorTest.java138
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/monitor/SlobrokMonitor2Test.java36
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/monitor/SlobrokMonitorManagerTest.java92
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/monitor/SuperModelListenerImplTest.java52
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/monitor/internal/LatencyMeasurementTest.java33
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/monitor/internal/ServiceModelCacheTest.java60
-rw-r--r--service-monitor/src/test/java/com/yahoo/vespa/service/monitor/internal/ServiceMonitorMetricsTest.java29
-rw-r--r--simplemetrics/CMakeLists.txt4
-rw-r--r--staging_vespalib/src/vespa/vespalib/stllike/cache.h2
-rw-r--r--staging_vespalib/src/vespa/vespalib/stllike/cache.hpp7
-rw-r--r--staging_vespalib/src/vespa/vespalib/util/varholder.h51
-rw-r--r--standalone-container/CMakeLists.txt2
-rw-r--r--statistics/CMakeLists.txt2
-rw-r--r--storage/src/tests/common/hostreporter/util.cpp6
-rw-r--r--storage/src/tests/distributor/distributortest.cpp60
-rw-r--r--storage/src/tests/persistence/common/filestortestfixture.cpp5
-rw-r--r--storage/src/tests/persistence/common/persistenceproviderwrapper.cpp6
-rw-r--r--storage/src/tests/persistence/common/persistenceproviderwrapper.h2
-rw-r--r--storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp5
-rw-r--r--storage/src/tests/persistence/filestorage/filestormanagertest.cpp20
-rw-r--r--storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp11
-rw-r--r--storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp7
-rw-r--r--storage/src/tests/persistence/legacyoperationhandlertest.cpp4
-rw-r--r--storage/src/tests/persistence/persistencetestutils.cpp22
-rw-r--r--storage/src/tests/persistence/persistencethread_splittest.cpp6
-rw-r--r--storage/src/tests/persistence/provider_error_wrapper_test.cpp5
-rw-r--r--storage/src/tests/persistence/splitbitdetectortest.cpp28
-rw-r--r--storage/src/tests/persistence/testandsettest.cpp4
-rw-r--r--storage/src/tests/storageserver/statereportertest.cpp2
-rw-r--r--storage/src/vespa/storage/bucketdb/CMakeLists.txt4
-rw-r--r--storage/src/vespa/storage/bucketdb/mapbucketdatabase.cpp18
-rw-r--r--storage/src/vespa/storage/config/CMakeLists.txt24
-rw-r--r--storage/src/vespa/storage/distributor/distributor.cpp64
-rw-r--r--storage/src/vespa/storage/distributor/distributor.h16
-rw-r--r--storage/src/vespa/storage/persistence/diskmoveoperationhandler.cpp4
-rw-r--r--storage/src/vespa/storage/persistence/filestorage/modifiedbucketchecker.cpp2
-rw-r--r--storage/src/vespa/storage/persistence/mergehandler.cpp10
-rw-r--r--storage/src/vespa/storage/persistence/persistencethread.cpp42
-rw-r--r--storage/src/vespa/storage/persistence/persistenceutil.cpp2
-rw-r--r--storage/src/vespa/storage/persistence/processallhandler.cpp5
-rw-r--r--storage/src/vespa/storage/persistence/provider_error_wrapper.cpp8
-rw-r--r--storage/src/vespa/storage/persistence/provider_error_wrapper.h4
-rw-r--r--storage/src/vespa/storage/storageserver/servicelayernode.cpp45
-rw-r--r--storage/src/vespa/storage/storageserver/servicelayernode.h9
-rw-r--r--storage/src/vespa/storage/storageserver/storagenode.cpp232
-rw-r--r--storage/src/vespa/storage/storageserver/storagenode.h37
-rw-r--r--storage/src/vespa/storage/visiting/CMakeLists.txt2
-rw-r--r--storageapi/src/vespa/storageapi/mbusprot/storageprotocol.h4
-rw-r--r--storageserver/src/apps/storaged/CMakeLists.txt2
-rw-r--r--storageserver/src/apps/storaged/storage.cpp3
-rw-r--r--storageserver/src/vespa/storageserver/app/CMakeLists.txt1
-rw-r--r--storageserver/src/vespa/storageserver/app/rpcservicelayerprocess.cpp44
-rw-r--r--storageserver/src/vespa/storageserver/app/rpcservicelayerprocess.h28
-rwxr-xr-xtravis/travis-build-cpp.sh1
-rwxr-xr-xtravis/travis-build-java.sh3
-rw-r--r--vagrant/README.md44
-rw-r--r--vagrant/Vagrantfile10
-rw-r--r--vdstestlib/src/vespa/vdstestlib/cppunit/macros.h44
-rw-r--r--vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java2
-rw-r--r--vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/util/VespaConfiguration.java14
-rw-r--r--vespa-http-client/CMakeLists.txt2
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/FeedClient.java15
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/FeedClientFactory.java1
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/Result.java3
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/Session.java13
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/SessionFactory.java4
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/SimpleLoggerResultCallback.java2
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/Cluster.java5
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/ConnectionParams.java5
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/Endpoint.java3
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/FeedParams.java4
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/SessionParams.java5
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/FeedClientImpl.java1
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/MultiClusterSessionOutputStream.java3
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ApacheGatewayConnection.java3
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ByteBufferInputStream.java3
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java3
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/DocumentQueue.java6
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/DryRunGatewayConnection.java1
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/EndpointIOException.java3
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/EndpointResultQueue.java8
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/GatewayConnection.java2
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/GatewayThrottler.java2
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/IOThread.java6
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/ConcurrentDocumentOperationBlocker.java2
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/DocumentSendInfo.java2
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/EndPointResultFactory.java4
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/IncompleteResultsThrottler.java2
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationProcessor.java25
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationStats.java2
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/runner/CommandLineArguments.java3
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/runner/FormatInputStream.java2
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/runner/Runner.java4
-rw-r--r--vespa_jersey2/CMakeLists.txt2
-rw-r--r--vespabase/CMakeLists.txt4
-rw-r--r--vespabase/conf/.gitignore1
-rw-r--r--vespabase/conf/default-env.txt.in2
-rwxr-xr-xvespabase/src/rhel-prestart.sh1
-rw-r--r--vespaclient-container-plugin/CMakeLists.txt2
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandler.java18
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandlerImpl.java26
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java60
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/OperationHandlerImplTest.java58
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/MockedOperationHandler.java8
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java17
-rw-r--r--vespaclient-core/CMakeLists.txt3
-rw-r--r--vespaclient-java/CMakeLists.txt12
-rw-r--r--vespaclient-java/src/main/java/com/yahoo/vespavisit/VdsVisit.java2
-rw-r--r--vespajlib/CMakeLists.txt2
-rw-r--r--vespajlib/pom.xml2
-rw-r--r--vespajlib/src/main/java/com/yahoo/concurrent/classlock/ClassLock.java7
-rw-r--r--vespajlib/src/main/java/com/yahoo/concurrent/classlock/ClassLocking.java48
-rw-r--r--vespajlib/src/main/java/com/yahoo/concurrent/classlock/LockInterruptException.java8
-rw-r--r--vespajlib/src/main/java/com/yahoo/io/IOUtils.java8
-rw-r--r--vespalib/src/tests/data/input_reader/input_reader_test.cpp43
-rw-r--r--vespalib/src/tests/executor/executor_test.cpp33
-rw-r--r--vespalib/src/tests/objects/nbostream/nbostream_test.cpp16
-rw-r--r--vespalib/src/tests/slime/slime_binary_format_test.cpp3
-rw-r--r--vespalib/src/tests/slime/slime_json_format_test.cpp27
-rw-r--r--vespalib/src/tests/slime/slime_test.cpp47
-rw-r--r--vespalib/src/vespa/vespalib/data/input_reader.h29
-rw-r--r--vespalib/src/vespa/vespalib/data/slime/json_format.cpp7
-rw-r--r--vespalib/src/vespa/vespalib/data/slime/slime.h6
-rw-r--r--vespalib/src/vespa/vespalib/objects/nbostream.cpp12
-rw-r--r--vespalib/src/vespa/vespalib/objects/nbostream.h10
-rw-r--r--vespalib/src/vespa/vespalib/objects/nbostream.hpp41
-rw-r--r--vespalib/src/vespa/vespalib/util/CMakeLists.txt1
-rw-r--r--vespalib/src/vespa/vespalib/util/lambdatask.h (renamed from searchlib/src/vespa/searchlib/common/lambdatask.h)6
-rw-r--r--vespalib/src/vespa/vespalib/util/sig_catch.cpp22
-rw-r--r--vespalib/src/vespa/vespalib/util/sig_catch.h25
-rw-r--r--vespalib/src/vespa/vespalib/util/signalhandler.cpp1
-rw-r--r--vespalib/src/vespa/vespalib/util/signalhandler.h1
-rwxr-xr-xvespamalloc/src/tests/thread/thread_test.sh6
-rw-r--r--vsm/src/tests/docsum/docsum.cpp2
-rw-r--r--vsm/src/vespa/vsm/config/CMakeLists.txt6
-rw-r--r--zkfacade/CMakeLists.txt2
-rw-r--r--zkfacade/src/main/java/com/yahoo/vespa/curator/Lock.java2
818 files changed, 12092 insertions, 15738 deletions
diff --git a/.gitignore b/.gitignore
index 6e7db58ea54..6697c90b625 100644
--- a/.gitignore
+++ b/.gitignore
@@ -42,3 +42,4 @@ Testing
.preprocessed/
.DS_Store
install_manifest.txt
+*.cbp
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 413d232c00d..db6bbde423a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -16,7 +16,6 @@ find_package(JNI REQUIRED)
include(functions.cmake)
include(build_settings.cmake)
-include(install_java.cmake)
# Enable CTest unit testing
enable_testing()
@@ -24,12 +23,36 @@ enable_testing()
# Include vespa config definitions in every target
include_directories(BEFORE ${CMAKE_BINARY_DIR}/configdefinitions/src)
+add_subdirectory(application-preprocessor)
+add_subdirectory(chain)
+add_subdirectory(component)
+add_subdirectory(config-bundle)
+add_subdirectory(config-model)
+add_subdirectory(config-model-api)
+add_subdirectory(config-provisioning)
+add_subdirectory(config-proxy)
add_subdirectory(config)
+add_subdirectory(config-model-fat)
add_subdirectory(configd)
add_subdirectory(configdefinitions)
add_subdirectory(configserver)
add_subdirectory(configutil)
+add_subdirectory(container-accesslogging)
+add_subdirectory(container-core)
+add_subdirectory(container-di)
+add_subdirectory(container-disc)
+add_subdirectory(container-jersey2)
+add_subdirectory(container-messagebus)
+add_subdirectory(container-search)
+add_subdirectory(container-search-and-docproc)
+add_subdirectory(clustercontroller-apps)
+add_subdirectory(clustercontroller-apputil)
+add_subdirectory(clustercontroller-utils)
+add_subdirectory(clustercontroller-core)
add_subdirectory(defaults)
+add_subdirectory(docker-api)
+add_subdirectory(docproc)
+add_subdirectory(docprocs)
add_subdirectory(document)
add_subdirectory(documentapi)
add_subdirectory(eval)
@@ -41,14 +64,21 @@ add_subdirectory(filedistribution)
add_subdirectory(fnet)
add_subdirectory(frtstream)
add_subdirectory(fsa)
+add_subdirectory(jdisc_core)
+add_subdirectory(jdisc_http_service)
+add_subdirectory(jdisc_jetty)
add_subdirectory(jrt_test)
add_subdirectory(juniper)
add_subdirectory(logd)
+add_subdirectory(logserver)
+add_subdirectory(logforwarder)
add_subdirectory(lowercasing_test)
add_subdirectory(memfilepersistence)
add_subdirectory(messagebus)
add_subdirectory(messagebus_test)
add_subdirectory(metrics)
+add_subdirectory(node-repository)
+add_subdirectory(orchestrator)
add_subdirectory(persistence)
add_subdirectory(persistencetypes)
add_subdirectory(searchcommon)
@@ -56,25 +86,32 @@ add_subdirectory(searchcore)
add_subdirectory(searchcorespi)
add_subdirectory(searchlib)
add_subdirectory(searchsummary)
+add_subdirectory(simplemetrics)
add_subdirectory(slobrok)
add_subdirectory(staging_vespalib)
+add_subdirectory(standalone-container)
add_subdirectory(storage)
add_subdirectory(storageapi)
add_subdirectory(storageframework)
add_subdirectory(storageserver)
+add_subdirectory(statistics)
add_subdirectory(streamingvisitors)
add_subdirectory(vbench)
add_subdirectory(vdslib)
add_subdirectory(vdstestlib)
+add_subdirectory(vespa-http-client)
+add_subdirectory(vespa_jersey2)
add_subdirectory(vespabase)
add_subdirectory(vespaclient)
+add_subdirectory(vespaclient-core)
+add_subdirectory(vespaclient-container-plugin)
+add_subdirectory(vespaclient-java)
+add_subdirectory(vespajlib)
add_subdirectory(vespalib)
add_subdirectory(vespalog)
add_subdirectory(vespamalloc)
add_subdirectory(vsm)
-# Note: Change when cmake gets proper post-install support.
-# Post installation steps are run from dist subdirectory which needs to be the last add_subdirectory(...) call in this file.
-add_subdirectory(dist)
+add_subdirectory(zkfacade)
# Create module targets with name ${MODULE}+module depending on every target defined within that module
__create_module_targets(TARGETS "module")
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index a2690f6d127..f09a13ff8bf 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -12,9 +12,11 @@ In addition to the [public Travis build](https://travis-ci.org/vespa-engine/vesp
we have a large acceptance and performance test suite which
is also run continuously. We plan to add this to the open source code base later.
-All pull requests are reviewed by a member of the
-[Vespa committers](https://github.com/orgs/vespa-engine/teams/vespa/members) team, regardless
-of who made it. If you want to become a committer, making some quality contributions is the way to start.
+All pull requests are reviewed by a member of the Vespa Committers team.
+You can find a suitable reviewer in the OWNERS file upward in the source tree from
+where you are making the change (the OWNERS have a special responsibility for
+ensuring the long-term integrity of a portion of the code).
+If you want to become a committer/OWNER making some quality contributions is the way to start.
## Versioning
Vespa uses semantic versioning - see
@@ -30,6 +32,9 @@ It is fine to submit issues also for feature requests and ideas, whether or not
There is also a [ToDo list](TODO.md) for larger things which nobody are working on yet.
+## Community
+If you have questions, want to share your experience or help others, please join our community on [Stack Overflow](http://stackoverflow.com/questions/tagged/vespa).
+
### Getting started
See [README](README.md) for how to build and test Vespa.
diff --git a/application-preprocessor/CMakeLists.txt b/application-preprocessor/CMakeLists.txt
new file mode 100644
index 00000000000..e40fd4a6736
--- /dev/null
+++ b/application-preprocessor/CMakeLists.txt
@@ -0,0 +1,4 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(application-preprocessor)
+vespa_install_script(src/main/sh/vespa-preprocess-application bin)
+
diff --git a/application/src/main/java/com/yahoo/application/container/SynchronousRequestResponseHandler.java b/application/src/main/java/com/yahoo/application/container/SynchronousRequestResponseHandler.java
index c5ca67d4428..7475c08009c 100644
--- a/application/src/main/java/com/yahoo/application/container/SynchronousRequestResponseHandler.java
+++ b/application/src/main/java/com/yahoo/application/container/SynchronousRequestResponseHandler.java
@@ -97,6 +97,7 @@ final class SynchronousRequestResponseHandler {
for (Map.Entry<String, List<String>> entry : request.getHeaders().entrySet()) {
discRequest.headers().add(entry.getKey(), entry.getValue());
}
+ discRequest.context().putAll(request.getAttributes());
return discRequest;
}
diff --git a/application/src/main/java/com/yahoo/application/container/handler/Request.java b/application/src/main/java/com/yahoo/application/container/handler/Request.java
index fef0f275be1..1a6dbe59f04 100644
--- a/application/src/main/java/com/yahoo/application/container/handler/Request.java
+++ b/application/src/main/java/com/yahoo/application/container/handler/Request.java
@@ -5,6 +5,8 @@ import com.google.common.annotations.Beta;
import net.jcip.annotations.Immutable;
import java.nio.charset.StandardCharsets;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
/**
* A request for use with {@link com.yahoo.application.container.JDisc#handleRequest(Request)}.
@@ -20,6 +22,7 @@ public class Request {
private final String uri;
private final byte[] body;
private final Method method;
+ private final Map<String, Object> attributes = new ConcurrentHashMap<>();
/**
* Creates a Request with an empty body.
@@ -98,6 +101,13 @@ public class Request {
return uri;
}
+ /**
+ * @return a mutable attribute map for this request.
+ */
+ public Map<String, Object> getAttributes() {
+ return attributes;
+ }
+
@Override
public String toString() {
String bodyStr = (body == null || body.length == 0) ? "[empty]" : "[omitted]";
diff --git a/bootstrap.sh b/bootstrap.sh
index 126359b3951..075da74b7c7 100755
--- a/bootstrap.sh
+++ b/bootstrap.sh
@@ -45,7 +45,7 @@ $top/dist/getversion.pl -M $top > $top/dist/vtag.map
# The 'full' mode also builds modules needed by C++ tests.
# must install parent pom first:
-echo "Downloading all dependencies. This make take a few of minutes with an empty Maven cache."
+echo "Downloading all dependencies. This may take a few minutes with an empty Maven cache."
mvn_install -N
# and build plugins first:
diff --git a/build_settings.cmake b/build_settings.cmake
index 578484fc0a7..425a2eddda7 100644
--- a/build_settings.cmake
+++ b/build_settings.cmake
@@ -23,14 +23,8 @@ set(C_WARN_OPTS "-Winline -Wuninitialized -Werror -Wall -W -Wchar-subscripts -Wc
# Note: this is not a union of C_WARN_OPTS, since CMAKE_CXX_FLAGS already includes CMAKE_C_FLAGS, which in turn includes C_WARN_OPTS transitively
set(CXX_SPECIFIC_WARN_OPTS "-Wsuggest-override -Wnon-virtual-dtor")
-# Select C++ ABI
-if(DEFINED VESPA_CXX_ABI_FLAGS)
-else()
- set (VESPA_CXX_ABI_FLAGS "-D_GLIBCXX_USE_CXX11_ABI=0")
-endif()
-
# C and C++ compiler flags
-set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O3 ${C_WARN_OPTS} -fPIC ${VESPA_CXX_ABI_FLAGS} -DBOOST_DISABLE_ASSERTS -march=westmere -mtune=intel ${EXTRA_C_FLAGS}")
+set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O3 -fno-omit-frame-pointer ${C_WARN_OPTS} -fPIC ${VESPA_CXX_ABI_FLAGS} -DBOOST_DISABLE_ASSERTS ${VESPA_CPU_ARCH_FLAGS} -mtune=intel ${EXTRA_C_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CMAKE_C_FLAGS} ${CXX_SPECIFIC_WARN_OPTS} -std=c++1z -fvisibility-inlines-hidden -fdiagnostics-color=auto ${EXTRA_CXX_FLAGS}")
# Linker flags
diff --git a/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateSourcesMojo.java b/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateSourcesMojo.java
index 06486c00810..11f5696c589 100644
--- a/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateSourcesMojo.java
+++ b/bundle-plugin/src/main/java/com/yahoo/container/plugin/mojo/GenerateSourcesMojo.java
@@ -76,11 +76,15 @@ public class GenerateSourcesMojo extends AbstractMojo {
return configGenVersion;
}
- Dependency containerDev = getVespaDependency("container-dev"); // TODO: change to "container"
+ Dependency container = getVespaDependency("container");
+ if (container != null)
+ return container.getVersion();
+
+ Dependency containerDev = getVespaDependency("container-dev");
if (containerDev != null)
return containerDev.getVersion();
- Dependency docproc = getVespaDependency("docproc");
+ Dependency docproc = getVespaDependency("docproc");
if (docproc != null)
return docproc.getVersion();
@@ -89,9 +93,10 @@ public class GenerateSourcesMojo extends AbstractMojo {
return parent.getVersion();
String defaultConfigGenVersion = loadDefaultConfigGenVersion();
- getLog().warn(
- String.format("Did not find container-dev, guessing that version '%s' of config_gen should be used.",
- defaultConfigGenVersion));
+ getLog().warn(String.format(
+ "Did not find either container or container-dev artifact in project dependencies, "
+ + "using default version '%s' of the config class plugin.",
+ defaultConfigGenVersion));
return defaultConfigGenVersion;
}
diff --git a/dist/CMakeLists.txt b/chain/CMakeLists.txt
index d06074074df..3b5b5fd2c99 100644
--- a/dist/CMakeLists.txt
+++ b/chain/CMakeLists.txt
@@ -1,2 +1,2 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-install(CODE "execute_process(COMMAND ${CMAKE_CURRENT_LIST_DIR}/post_install.sh ${CMAKE_INSTALL_PREFIX})")
+install_config_definition(src/main/resources/configdefinitions/chains.def container.core.chains.def)
diff --git a/persistence/src/main/resources/configdefinitions/persistence-rpc.def b/clustercontroller-apps/CMakeLists.txt
index 9cae5812760..f59ffbfa7bf 100644
--- a/persistence/src/main/resources/configdefinitions/persistence-rpc.def
+++ b/clustercontroller-apps/CMakeLists.txt
@@ -1,4 +1,2 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-namespace=persistence
-
-port int default=3456 restart
+install_fat_java_artifact(clustercontroller-apps)
diff --git a/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/StateRestApiV2Handler.java b/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/StateRestApiV2Handler.java
index 431fc797df6..eea085bd103 100644
--- a/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/StateRestApiV2Handler.java
+++ b/clustercontroller-apps/src/main/java/com/yahoo/vespa/clustercontroller/apps/clustercontroller/StateRestApiV2Handler.java
@@ -3,8 +3,6 @@ package com.yahoo.vespa.clustercontroller.apps.clustercontroller;
import com.google.inject.Inject;
import com.yahoo.cloud.config.ClusterInfoConfig;
-import com.yahoo.cloud.config.ModelConfig;
-import com.yahoo.container.jdisc.config.HttpServerConfig;
import com.yahoo.container.logging.AccessLog;
import com.yahoo.log.LogLevel;
import com.yahoo.vespa.clustercontroller.apputil.communication.http.JDiscHttpRequestHandler;
diff --git a/clustercontroller-apputil/CMakeLists.txt b/clustercontroller-apputil/CMakeLists.txt
new file mode 100644
index 00000000000..bdfb3ab3ed7
--- /dev/null
+++ b/clustercontroller-apputil/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(clustercontroller-apputil)
diff --git a/clustercontroller-core/CMakeLists.txt b/clustercontroller-core/CMakeLists.txt
new file mode 100644
index 00000000000..6754e893009
--- /dev/null
+++ b/clustercontroller-core/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(clustercontroller-core)
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java
index 457fe024535..a5419c64818 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java
@@ -550,7 +550,7 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
if ( ! isRunning()) { return; }
if (masterElectionHandler.isMaster()) {
didWork |= broadcastClusterStateToEligibleNodes();
- didWork |= systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this);
+ systemStateBroadcaster.checkIfClusterStateIsAckedByAllDistributors(database, databaseContext, this);
}
if ( ! isRunning()) { return; }
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java
index 3cdbfba282c..15650f0f4aa 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java
@@ -258,7 +258,11 @@ abstract public class NodeInfo implements Comparable<NodeInfo> {
/** Returns the wanted state of this node - which can either be set by a user or configured */
public NodeState getWantedState() {
- if (configuredRetired) return new NodeState(node.getType(), State.RETIRED);
+ NodeState retiredState = new NodeState(node.getType(), State.RETIRED);
+ // Don't let configure retired state override explicitly set Down and Maintenance.
+ if (configuredRetired && wantedState.above(retiredState)) {
+ return retiredState;
+ }
return wantedState;
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcaster.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcaster.java
index 8860bef2fae..5c035a0d5a7 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcaster.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateBroadcaster.java
@@ -109,15 +109,12 @@ public class SystemStateBroadcaster {
* Checks if all distributor nodes have ACKed the most recent cluster state. Iff this
* is the case, triggers handleAllDistributorsInSync() on the provided FleetController
* object and updates the broadcaster's last known in-sync cluster state version.
- *
- * Returns true if distributor nodes were checked, false if cluster is already in sync
- * or no state has been published yet.
*/
- boolean checkIfClusterStateIsAckedByAllDistributors(DatabaseHandler database,
+ void checkIfClusterStateIsAckedByAllDistributors(DatabaseHandler database,
DatabaseHandler.Context dbContext,
FleetController fleetController) throws InterruptedException {
if ((systemState == null) || (lastClusterStateInSync == systemState.getVersion())) {
- return false; // Nothing to do for the current state
+ return; // Nothing to do for the current state
}
boolean anyOutdatedDistributorNodes = dbContext.getCluster().getNodeInfo().stream()
.filter(NodeInfo::isDistributor)
@@ -128,7 +125,6 @@ public class SystemStateBroadcaster {
lastClusterStateInSync = systemState.getVersion();
fleetController.handleAllDistributorsInSync(database, dbContext);
}
- return true;
}
public boolean broadcastNewState(DatabaseHandler database,
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java
index 9ad405d6f90..16e9675d586 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java
@@ -12,8 +12,10 @@ import com.yahoo.vespa.clustercontroller.core.listeners.NodeStateOrHostInfoChang
import com.yahoo.vespa.clustercontroller.utils.util.NoMetricReporter;
import java.util.Collection;
+import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Set;
import java.util.TreeMap;
import static org.mockito.Mockito.mock;
@@ -162,6 +164,14 @@ class ClusterFixture {
this.params.transitionTimes(transitionTimeMs);
}
+ ClusterFixture markNodeAsConfigRetired(int nodeIndex) {
+ Set<ConfiguredNode> configuredNodes = new HashSet<>(cluster.getConfiguredNodes().values());
+ configuredNodes.remove(new ConfiguredNode(nodeIndex, false));
+ configuredNodes.add(new ConfiguredNode(nodeIndex, true));
+ cluster.setNodes(configuredNodes);
+ return this;
+ }
+
AnnotatedClusterState annotatedGeneratedClusterState() {
params.currentTimeInMilllis(timer.getCurrentTimeInMillis());
return ClusterStateGenerator.generatedStateFrom(params);
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java
index b1b219b58d4..958fc1ae232 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java
@@ -272,16 +272,29 @@ public class ClusterStateGeneratorTest {
@Test
public void config_retired_mode_is_reflected_in_generated_state() {
- final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp();
- List<ConfiguredNode> nodes = DistributionBuilder.buildConfiguredNodes(5);
- nodes.set(2, new ConfiguredNode(2, true));
- fixture.cluster.setNodes(nodes);
+ ClusterFixture fixture = ClusterFixture.forFlatCluster(5)
+ .markNodeAsConfigRetired(2)
+ .bringEntireClusterUp();
- final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+ AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
assertThat(state.toString(), equalTo("distributor:5 storage:5 .2.s:r"));
}
+ @Test
+ public void config_retired_mode_is_overridden_by_worse_wanted_state() {
+ ClusterFixture fixture = ClusterFixture.forFlatCluster(5)
+ .markNodeAsConfigRetired(2)
+ .markNodeAsConfigRetired(3)
+ .bringEntireClusterUp()
+ .proposeStorageNodeWantedState(2, State.DOWN)
+ .proposeStorageNodeWantedState(3, State.MAINTENANCE);
+
+ AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+
+ assertThat(state.toString(), equalTo("distributor:5 storage:5 .2.s:d .3.s:m"));
+ }
+
private void do_test_change_within_node_transition_time_window_generates_maintenance(State reportedState) {
final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp();
final ClusterStateGenerator.Params params = fixture.generatorParams()
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeInfoTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeInfoTest.java
index c0253d8a126..e56d8b02cd4 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeInfoTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeInfoTest.java
@@ -6,6 +6,7 @@ import com.yahoo.vdslib.state.NodeType;
import com.yahoo.vdslib.state.State;
import org.junit.Test;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@@ -77,4 +78,32 @@ public class NodeInfoTest {
assertFalse(nodeInfo.recentlyObservedUnstableDuringInit());
}
+ @Test
+ public void down_wanted_state_overrides_config_retired_state() {
+ ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .markNodeAsConfigRetired(1)
+ .proposeStorageNodeWantedState(1, State.DOWN);
+
+ NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1));
+ assertEquals(State.DOWN, nodeInfo.getWantedState().getState());
+ }
+
+ @Test
+ public void maintenance_wanted_state_overrides_config_retired_state() {
+ ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .markNodeAsConfigRetired(1)
+ .proposeStorageNodeWantedState(1, State.MAINTENANCE);
+
+ NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1));
+ assertEquals(State.MAINTENANCE, nodeInfo.getWantedState().getState());
+ }
+
+ @Test
+ public void retired_state_overrides_default_up_wanted_state() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3).markNodeAsConfigRetired(1);
+
+ NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1));
+ assertEquals(State.RETIRED, nodeInfo.getWantedState().getState());
+ }
+
}
diff --git a/clustercontroller-utils/CMakeLists.txt b/clustercontroller-utils/CMakeLists.txt
new file mode 100644
index 00000000000..250a8e7e693
--- /dev/null
+++ b/clustercontroller-utils/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(clustercontroller-utils)
diff --git a/component/CMakeLists.txt b/component/CMakeLists.txt
new file mode 100644
index 00000000000..87d0a4989ba
--- /dev/null
+++ b/component/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(component)
diff --git a/config-bundle/CMakeLists.txt b/config-bundle/CMakeLists.txt
new file mode 100644
index 00000000000..8d4878920a4
--- /dev/null
+++ b/config-bundle/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(config-bundle)
diff --git a/config-model-api/CMakeLists.txt b/config-model-api/CMakeLists.txt
new file mode 100644
index 00000000000..f69e5242e2e
--- /dev/null
+++ b/config-model-api/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(config-model-api)
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java b/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java
index 098f5620723..b86c0a5ca94 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/DeploymentSpec.java
@@ -2,19 +2,16 @@
package com.yahoo.config.application.api;
import com.google.common.collect.ImmutableList;
+import com.yahoo.config.application.api.xml.DeploymentSpecXmlReader;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.RegionName;
-import com.yahoo.io.IOUtils;
-import com.yahoo.text.XML;
-import org.w3c.dom.Element;
import java.io.BufferedReader;
import java.io.FileReader;
-import java.io.IOException;
import java.io.Reader;
import java.time.Duration;
+import java.time.Instant;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.LinkedHashSet;
@@ -39,32 +36,36 @@ import java.util.stream.Collectors;
public class DeploymentSpec {
/** The empty deployment spec, specifying no zones or rotation, and defaults for all settings */
- public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
- UpgradePolicy.defaultPolicy,
- ImmutableList.of(),
+ public static final DeploymentSpec empty = new DeploymentSpec(Optional.empty(),
+ UpgradePolicy.defaultPolicy,
+ Collections.emptyList(),
+ Collections.emptyList(),
"<deployment version='1.0'/>");
private final Optional<String> globalServiceId;
private final UpgradePolicy upgradePolicy;
+ private final List<ChangeBlocker> changeBlockers;
private final List<Step> steps;
private final String xmlForm;
- public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy, List<Step> steps) {
- this(globalServiceId, upgradePolicy, steps, null);
+ public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
+ List<ChangeBlocker> changeBlockers, List<Step> steps) {
+ this(globalServiceId, upgradePolicy, changeBlockers, steps, null);
}
- private DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
- List<Step> steps, String xmlForm) {
+ public DeploymentSpec(Optional<String> globalServiceId, UpgradePolicy upgradePolicy,
+ List<ChangeBlocker> changeBlockers, List<Step> steps, String xmlForm) {
validateTotalDelay(steps);
this.globalServiceId = globalServiceId;
this.upgradePolicy = upgradePolicy;
+ this.changeBlockers = changeBlockers;
this.steps = ImmutableList.copyOf(completeSteps(new ArrayList<>(steps)));
this.xmlForm = xmlForm;
validateZones(this.steps);
}
/** Throw an IllegalArgumentException if the total delay exceeds 24 hours */
- private static void validateTotalDelay(List<Step> steps) {
+ private void validateTotalDelay(List<Step> steps) {
long totalDelaySeconds = steps.stream().filter(step -> step instanceof Delay)
.mapToLong(delay -> ((Delay)delay).duration().getSeconds())
.sum();
@@ -118,7 +119,6 @@ public class DeploymentSpec {
/**
* Removes the first occurrence of a deployment step to the given environment and returns it.
*
- * @param environment
* @return the removed step, or null if it is not present
*/
private static DeclaredZone remove(Environment environment, List<Step> steps) {
@@ -137,6 +137,21 @@ public class DeploymentSpec {
/** Returns the upgrade policy of this, which is defaultPolicy if none is specified */
public UpgradePolicy upgradePolicy() { return upgradePolicy; }
+ /** Returns whether upgrade can occur at the given instant */
+ public boolean canUpgradeAt(Instant instant) {
+ return changeBlockers.stream().filter(block -> block.blocksVersions())
+ .noneMatch(block -> block.window().includes(instant));
+ }
+
+ /** Returns whether an application revision change can occur at the given instant */
+ public boolean canChangeRevisionAt(Instant instant) {
+ return changeBlockers.stream().filter(block -> block.blocksRevisions())
+ .noneMatch(block -> block.window().includes(instant));
+ }
+
+ /** Returns time windows where upgrades are disallowed */
+ public List<ChangeBlocker> changeBlocker() { return changeBlockers; }
+
/** Returns the deployment steps of this in the order they will be performed */
public List<Step> steps() { return steps; }
@@ -163,12 +178,7 @@ public class DeploymentSpec {
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(Reader reader) {
- try {
- return fromXml(IOUtils.readAll(reader));
- }
- catch (IOException e) {
- throw new IllegalArgumentException("Could not read deployment spec", e);
- }
+ return new DeploymentSpecXmlReader().read(reader);
}
/**
@@ -177,96 +187,9 @@ public class DeploymentSpec {
* @throws IllegalArgumentException if the XML is invalid
*/
public static DeploymentSpec fromXml(String xmlForm) {
- List<Step> steps = new ArrayList<>();
- Optional<String> globalServiceId = Optional.empty();
- Element root = XML.getDocument(xmlForm).getDocumentElement();
- for (Element environmentTag : XML.getChildren(root)) {
- if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
-
- Environment environment = Environment.from(environmentTag.getTagName());
-
- if (environment == Environment.prod) {
- for (Element stepTag : XML.getChildren(environmentTag)) {
- if (stepTag.getTagName().equals("delay")) {
- steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
- longAttribute("minutes", stepTag) * 60 +
- longAttribute("seconds", stepTag))));
- } else if (stepTag.getTagName().equals("parallel")) {
- List<DeclaredZone> zones = new ArrayList<>();
- for (Element regionTag : XML.getChildren(stepTag)) {
- zones.add(readDeclaredZone(environment, regionTag));
- }
- steps.add(new ParallelZones(zones));
- } else { // a region: deploy step
- steps.add(readDeclaredZone(environment, stepTag));
- }
- }
- } else {
- steps.add(new DeclaredZone(environment));
- }
-
- if (environment == Environment.prod)
- globalServiceId = readGlobalServiceId(environmentTag);
- else if (readGlobalServiceId(environmentTag).isPresent())
- throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
- }
- return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), steps, xmlForm);
- }
-
- /** Returns the given attribute as an integer, or 0 if it is not present */
- private static long longAttribute(String attributeName, Element tag) {
- String value = tag.getAttribute(attributeName);
- if (value == null || value.isEmpty()) return 0;
- try {
- return Long.parseLong(value);
- }
- catch (NumberFormatException e) {
- throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
- "' but got '" + value + "'");
- }
- }
-
- private static boolean isEnvironmentName(String tagName) {
- return tagName.equals("test") || tagName.equals("staging") || tagName.equals("prod");
- }
-
- private static DeclaredZone readDeclaredZone(Environment environment, Element regionTag) {
- return new DeclaredZone(environment, Optional.of(RegionName.from(XML.getValue(regionTag).trim())),
- readActive(regionTag));
- }
-
- private static Optional<String> readGlobalServiceId(Element environmentTag) {
- String globalServiceId = environmentTag.getAttribute("global-service-id");
- if (globalServiceId == null || globalServiceId.isEmpty()) {
- return Optional.empty();
- }
- else {
- return Optional.of(globalServiceId);
- }
+ return new DeploymentSpecXmlReader().read(xmlForm);
}
- private static UpgradePolicy readUpgradePolicy(Element root) {
- Element upgradeElement = XML.getChild(root, "upgrade");
- if (upgradeElement == null) return UpgradePolicy.defaultPolicy;
-
- String policy = upgradeElement.getAttribute("policy");
- switch (policy) {
- case "canary" : return UpgradePolicy.canary;
- case "default" : return UpgradePolicy.defaultPolicy;
- case "conservative" : return UpgradePolicy.conservative;
- default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
- "Must be one of " + Arrays.toString(UpgradePolicy.values()));
- }
- }
-
- private static boolean readActive(Element regionTag) {
- String activeValue = regionTag.getAttribute("active");
- if ("true".equals(activeValue)) return true;
- if ("false".equals(activeValue)) return false;
- throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
- "to control whether the region should receive production traffic");
- }
-
public static String toMessageString(Throwable t) {
StringBuilder b = new StringBuilder();
String lastMessage = null;
@@ -443,4 +366,23 @@ public class DeploymentSpec {
conservative
}
+ /** A blocking of changes in a given time window */
+ public static class ChangeBlocker {
+
+ private final boolean revision;
+ private final boolean version;
+ private final TimeWindow window;
+
+ public ChangeBlocker(boolean revision, boolean version, TimeWindow window) {
+ this.revision = revision;
+ this.version = version;
+ this.window = window;
+ }
+
+ public boolean blocksRevisions() { return revision; }
+ public boolean blocksVersions() { return version; }
+ public TimeWindow window() { return window; }
+
+ }
+
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/TimeWindow.java b/config-model-api/src/main/java/com/yahoo/config/application/api/TimeWindow.java
new file mode 100644
index 00000000000..40b5a5370e7
--- /dev/null
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/TimeWindow.java
@@ -0,0 +1,141 @@
+package com.yahoo.config.application.api;
+
+import java.time.DateTimeException;
+import java.time.DayOfWeek;
+import java.time.Instant;
+import java.time.LocalDateTime;
+import java.time.ZoneId;
+import java.time.temporal.ChronoField;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.TreeSet;
+import java.util.function.BiFunction;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+/**
+ * This class represents a window of time for selected hours on selected days.
+ *
+ * @author mpolden
+ */
+public class TimeWindow {
+
+ private final List<DayOfWeek> days;
+ private final List<Integer> hours;
+ private final ZoneId zone;
+
+ private TimeWindow(List<DayOfWeek> days, List<Integer> hours, ZoneId zone) {
+ this.days = Collections.unmodifiableList(new ArrayList<>(new TreeSet<>(days)));
+ this.hours = Collections.unmodifiableList(new ArrayList<>(new TreeSet<>(hours)));
+ this.zone = zone;
+ }
+
+ /** Returns days in this time window */
+ public List<DayOfWeek> days() {
+ return days;
+ }
+
+ /** Returns hours in this time window */
+ public List<Integer> hours() {
+ return hours;
+ }
+
+ /** Returns the time zone of this time window */
+ public ZoneId zone() { return zone; }
+
+ /** Returns whether the given instant is in this time window */
+ public boolean includes(Instant instant) {
+ LocalDateTime dt = LocalDateTime.ofInstant(instant, zone);
+ return days.contains(dt.getDayOfWeek()) && hours.contains(dt.getHour());
+ }
+
+ @Override
+ public String toString() {
+ return "time window for hour(s) " +
+ hours.toString() +
+ " on " + days.stream().map(DayOfWeek::name)
+ .map(String::toLowerCase)
+ .collect(Collectors.toList()).toString() +
+ " in " + zone;
+ }
+
+ /** Parse a time window from the given day, hour and time zone specification */
+ public static TimeWindow from(String daySpec, String hourSpec, String zoneSpec) {
+ List<DayOfWeek> days = parse(daySpec, TimeWindow::parseDays);
+ List<Integer> hours = parse(hourSpec, TimeWindow::parseHours);
+ ZoneId zone = zoneFrom(zoneSpec);
+ return new TimeWindow(days, hours, zone);
+ }
+
+ /** Parse a specification, e.g. "1,4-5", using the given value parser */
+ private static <T> List<T> parse(String spec, BiFunction<String, String, List<T>> valueParser) {
+ List<T> values = new ArrayList<>();
+ String[] parts = spec.split(",");
+ for (String part : parts) {
+ if (part.contains("-")) {
+ String[] startAndEnd = part.split("-");
+ if (startAndEnd.length != 2) {
+ throw new IllegalArgumentException("Invalid range '" + part + "'");
+ }
+ values.addAll(valueParser.apply(startAndEnd[0], startAndEnd[1]));
+ } else {
+ values.addAll(valueParser.apply(part, part));
+ }
+ }
+ return Collections.unmodifiableList(values);
+ }
+
+ /** Returns a list of all hours occurring between startInclusive and endInclusive */
+ private static List<Integer> parseHours(String startInclusive, String endInclusive) {
+ int start = hourFrom(startInclusive);
+ int end = hourFrom(endInclusive);
+ if (end < start) {
+ throw new IllegalArgumentException(String.format("Invalid hour range '%s-%s'", startInclusive,
+ endInclusive));
+ }
+ return IntStream.rangeClosed(start, end).boxed()
+ .collect(Collectors.toList());
+ }
+
+ /** Returns a list of all days occurring between startInclusive and endInclusive */
+ private static List<DayOfWeek> parseDays(String startInclusive, String endInclusive) {
+ DayOfWeek start = dayFrom(startInclusive);
+ DayOfWeek end = dayFrom(endInclusive);
+ if (end.getValue() < start.getValue()) {
+ throw new IllegalArgumentException(String.format("Invalid day range '%s-%s'", startInclusive,
+ endInclusive));
+ }
+ return IntStream.rangeClosed(start.getValue(), end.getValue()).boxed()
+ .map(DayOfWeek::of)
+ .collect(Collectors.toList());
+ }
+
+ /** Parse day of week from string */
+ private static DayOfWeek dayFrom(String day) {
+ return Arrays.stream(DayOfWeek.values())
+ .filter(dayOfWeek -> day.length() >= 3 && dayOfWeek.name().toLowerCase().startsWith(day))
+ .findFirst()
+ .orElseThrow(() -> new IllegalArgumentException("Invalid day '" + day + "'"));
+ }
+
+ /** Parse hour from string */
+ private static int hourFrom(String hour) {
+ try {
+ return ChronoField.HOUR_OF_DAY.checkValidIntValue(Integer.parseInt(hour));
+ } catch (DateTimeException|NumberFormatException e) {
+ throw new IllegalArgumentException("Invalid hour '" + hour + "'", e);
+ }
+ }
+
+ /** Parse time zone from string */
+ private static ZoneId zoneFrom(String zone) {
+ try {
+ return ZoneId.of(zone);
+ } catch (DateTimeException e) {
+ throw new IllegalArgumentException("Invalid time zone '" + zone + "'", e);
+ }
+ }
+
+}
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java b/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java
new file mode 100644
index 00000000000..0b49325756d
--- /dev/null
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/xml/DeploymentSpecXmlReader.java
@@ -0,0 +1,186 @@
+package com.yahoo.config.application.api.xml;
+
+import com.yahoo.config.application.api.DeploymentSpec;
+import com.yahoo.config.application.api.DeploymentSpec.Step;
+import com.yahoo.config.application.api.DeploymentSpec.Delay;
+import com.yahoo.config.application.api.DeploymentSpec.DeclaredZone;
+import com.yahoo.config.application.api.DeploymentSpec.ParallelZones;
+import com.yahoo.config.application.api.DeploymentSpec.ChangeBlocker;
+import com.yahoo.config.application.api.TimeWindow;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.RegionName;
+import com.yahoo.io.IOUtils;
+import com.yahoo.text.XML;
+import org.w3c.dom.Element;
+
+import java.io.IOException;
+import java.io.Reader;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Optional;
+import java.util.stream.Collectors;
+
+/**
+ * @author bratseth
+ */
+public class DeploymentSpecXmlReader {
+
+ private static final String testTag = "test";
+ private static final String stagingTag = "staging";
+ private static final String blockChangeTag = "block-change";
+ private static final String prodTag = "prod";
+
+ public DeploymentSpec read(Reader reader) {
+ try {
+ return read(IOUtils.readAll(reader));
+ }
+ catch (IOException e) {
+ throw new IllegalArgumentException("Could not read deployment spec", e);
+ }
+ }
+
+ /** Reads a deployment spec from XML */
+ public DeploymentSpec read(String xmlForm) {
+ List<Step> steps = new ArrayList<>();
+ Optional<String> globalServiceId = Optional.empty();
+ Element root = XML.getDocument(xmlForm).getDocumentElement();
+ validateTagOrder(root);
+ for (Element environmentTag : XML.getChildren(root)) {
+ if ( ! isEnvironmentName(environmentTag.getTagName())) continue;
+
+ Environment environment = Environment.from(environmentTag.getTagName());
+
+ if (environment == Environment.prod) {
+ for (Element stepTag : XML.getChildren(environmentTag)) {
+ if (stepTag.getTagName().equals("delay")) {
+ steps.add(new Delay(Duration.ofSeconds(longAttribute("hours", stepTag) * 60 * 60 +
+ longAttribute("minutes", stepTag) * 60 +
+ longAttribute("seconds", stepTag))));
+ } else if (stepTag.getTagName().equals("parallel")) {
+ List<DeclaredZone> zones = new ArrayList<>();
+ for (Element regionTag : XML.getChildren(stepTag)) {
+ zones.add(readDeclaredZone(environment, regionTag));
+ }
+ steps.add(new ParallelZones(zones));
+ } else { // a region: deploy step
+ steps.add(readDeclaredZone(environment, stepTag));
+ }
+ }
+ } else {
+ steps.add(new DeclaredZone(environment));
+ }
+
+ if (environment == Environment.prod)
+ globalServiceId = readGlobalServiceId(environmentTag);
+ else if (readGlobalServiceId(environmentTag).isPresent())
+ throw new IllegalArgumentException("Attribute 'global-service-id' is only valid on 'prod' tag.");
+ }
+ return new DeploymentSpec(globalServiceId, readUpgradePolicy(root), readChangeBlockers(root), steps, xmlForm);
+ }
+
+ /** Imposes some constraints on tag order which are not expressible in the schema */
+ private void validateTagOrder(Element root) {
+ List<String> tags = XML.getChildren(root).stream().map(Element::getTagName).collect(Collectors.toList());
+ for (int i = 0; i < tags.size(); i++) {
+ if (tags.get(i).equals(blockChangeTag)) {
+ String constraint = "<block-change> must be placed after <test> and <staging> and before <prod>";
+ if (containsAfter(i, testTag, tags)) throw new IllegalArgumentException(constraint);
+ if (containsAfter(i, stagingTag, tags)) throw new IllegalArgumentException(constraint);
+ if (containsBefore(i, prodTag, tags)) throw new IllegalArgumentException(constraint);
+ }
+ }
+ }
+
+ private boolean containsAfter(int i, String item, List<String> items) {
+ return items.subList(i+1, items.size()).contains(item);
+ }
+
+ private boolean containsBefore(int i, String item, List<String> items) {
+ return items.subList(0, i).contains(item);
+ }
+
+ /** Returns the given attribute as an integer, or 0 if it is not present */
+ private long longAttribute(String attributeName, Element tag) {
+ String value = tag.getAttribute(attributeName);
+ if (value == null || value.isEmpty()) return 0;
+ try {
+ return Long.parseLong(value);
+ }
+ catch (NumberFormatException e) {
+ throw new IllegalArgumentException("Expected an integer for attribute '" + attributeName +
+ "' but got '" + value + "'");
+ }
+ }
+
+ private boolean isEnvironmentName(String tagName) {
+ return tagName.equals(testTag) || tagName.equals(stagingTag) || tagName.equals(prodTag);
+ }
+
+ private DeclaredZone readDeclaredZone(Environment environment, Element regionTag) {
+ return new DeclaredZone(environment, Optional.of(RegionName.from(XML.getValue(regionTag).trim())),
+ readActive(regionTag));
+ }
+
+ private Optional<String> readGlobalServiceId(Element environmentTag) {
+ String globalServiceId = environmentTag.getAttribute("global-service-id");
+ if (globalServiceId == null || globalServiceId.isEmpty()) {
+ return Optional.empty();
+ }
+ else {
+ return Optional.of(globalServiceId);
+ }
+ }
+
+ private List<DeploymentSpec.ChangeBlocker> readChangeBlockers(Element root) {
+ List<DeploymentSpec.ChangeBlocker> changeBlockers = new ArrayList<>();
+ for (Element tag : XML.getChildren(root)) {
+ // TODO: Remove block-upgrade on Vespa 7
+ if ( ! blockChangeTag.equals(tag.getTagName()) && !"block-upgrade".equals(tag.getTagName())) continue;
+
+ boolean blockVersions = trueOrMissing(tag.getAttribute("version"));
+ boolean blockRevisions = trueOrMissing(tag.getAttribute("revision"))
+ && !tag.getTagName().equals("block-upgrade"); // TODO: Remove condition on Vespa 7
+
+ String daySpec = tag.getAttribute("days");
+ String hourSpec = tag.getAttribute("hours");
+ String zoneSpec = tag.getAttribute("time-zone");
+ if (zoneSpec.isEmpty()) { // Default to UTC time zone
+ zoneSpec = "UTC";
+ }
+ changeBlockers.add(new DeploymentSpec.ChangeBlocker(blockRevisions, blockVersions,
+ TimeWindow.from(daySpec, hourSpec, zoneSpec)));
+ }
+ return Collections.unmodifiableList(changeBlockers);
+ }
+
+ /** Returns true if the given value is "true", or if it is missing */
+ private boolean trueOrMissing(String value) {
+ return value == null || value.isEmpty() || value.equals("true");
+ }
+
+ private DeploymentSpec.UpgradePolicy readUpgradePolicy(Element root) {
+ Element upgradeElement = XML.getChild(root, "upgrade");
+ if (upgradeElement == null) return DeploymentSpec.UpgradePolicy.defaultPolicy;
+
+ String policy = upgradeElement.getAttribute("policy");
+ switch (policy) {
+ case "canary" : return DeploymentSpec.UpgradePolicy.canary;
+ case "default" : return DeploymentSpec.UpgradePolicy.defaultPolicy;
+ case "conservative" : return DeploymentSpec.UpgradePolicy.conservative;
+ default : throw new IllegalArgumentException("Illegal upgrade policy '" + policy + "': " +
+ "Must be one of " + Arrays.toString(DeploymentSpec.UpgradePolicy.values()));
+ }
+ }
+
+ private boolean readActive(Element regionTag) {
+ String activeValue = regionTag.getAttribute("active");
+ if ("true".equals(activeValue)) return true;
+ if ("false".equals(activeValue)) return false;
+ throw new IllegalArgumentException("Region tags must have an 'active' attribute set to 'true' or 'false' " +
+ "to control whether the region should receive production traffic");
+ }
+
+}
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/FileDistribution.java b/config-model-api/src/main/java/com/yahoo/config/model/api/FileDistribution.java
index d53b6735064..a5d54fb84b3 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/FileDistribution.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/FileDistribution.java
@@ -18,6 +18,7 @@ public interface FileDistribution {
void sendDeployedFiles(String hostName, Set<FileReference> fileReferences);
void reloadDeployFileDistributor();
+ // TODO: Remove when 6.150 is the oldest version used
void limitSendingOfDeployedFilesTo(Collection<String> hostNames);
void removeDeploymentsThatHaveDifferentApplicationId(Collection<String> targetHostnames);
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/SuperModel.java b/config-model-api/src/main/java/com/yahoo/config/model/api/SuperModel.java
new file mode 100644
index 00000000000..8e918392cb0
--- /dev/null
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/SuperModel.java
@@ -0,0 +1,97 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.config.model.api;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.TenantName;
+
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+public class SuperModel {
+ private final Map<TenantName, Map<ApplicationId, ApplicationInfo>> models;
+
+ public SuperModel() {
+ this.models = Collections.emptyMap();
+ }
+
+ public SuperModel(Map<TenantName, Map<ApplicationId, ApplicationInfo>> models) {
+ this.models = models;
+ }
+
+ /**
+ * Do NOT mutate the returned map.
+ * TODO: Make the returned map immutable (and type to Map&lt;ApplicationId, ApplicationInfo&gt;)
+ */
+ public Map<TenantName, Map<ApplicationId, ApplicationInfo>> getAllModels() {
+ return models;
+ }
+
+ public Set<ApplicationId> getAllApplicationIds() {
+ return models.values().stream().flatMap(entry -> entry.keySet().stream())
+ .collect(Collectors.toSet());
+ }
+
+ public List<ApplicationInfo> getAllApplicationInfos() {
+ return models.values().stream().flatMap(entry -> entry.values().stream()).collect(Collectors.toList());
+ }
+
+ public Optional<ApplicationInfo> getApplicationInfo(ApplicationId applicationId) {
+ Map<ApplicationId, ApplicationInfo> tenantInfo = models.get(applicationId.tenant());
+ if (tenantInfo == null) {
+ return Optional.empty();
+ }
+
+ ApplicationInfo applicationInfo = tenantInfo.get(applicationId);
+ if (applicationInfo == null) {
+ return Optional.empty();
+ }
+
+ return Optional.of(applicationInfo);
+ }
+
+ public SuperModel cloneAndSetApplication(ApplicationInfo application) {
+ TenantName tenant = application.getApplicationId().tenant();
+ Map<TenantName, Map<ApplicationId, ApplicationInfo>> newModels = cloneModels(models);
+ if (!newModels.containsKey(tenant)) {
+ // New application has been activated
+ newModels.put(tenant, new LinkedHashMap<>());
+ } else {
+ // Application has been redeployed
+ }
+
+ newModels.get(tenant).put(application.getApplicationId(), application);
+
+ return new SuperModel(newModels);
+ }
+
+ public SuperModel cloneAndRemoveApplication(ApplicationId applicationId) {
+ Map<TenantName, Map<ApplicationId, ApplicationInfo>> newModels = cloneModels(models);
+ if (newModels.containsKey(applicationId.tenant())) {
+ newModels.get(applicationId.tenant()).remove(applicationId);
+ if (newModels.get(applicationId.tenant()).isEmpty()) {
+ newModels.remove(applicationId.tenant());
+ }
+ }
+
+ return new SuperModel(newModels);
+ }
+
+ private static Map<TenantName, Map<ApplicationId, ApplicationInfo>> cloneModels(
+ Map<TenantName, Map<ApplicationId, ApplicationInfo>> models) {
+ Map<TenantName, Map<ApplicationId, ApplicationInfo>> newModels = new LinkedHashMap<>();
+ for (Map.Entry<TenantName, Map<ApplicationId, ApplicationInfo>> entry : models.entrySet()) {
+ Map<ApplicationId, ApplicationInfo> appMap = new LinkedHashMap<>();
+ newModels.put(entry.getKey(), appMap);
+ for (Map.Entry<ApplicationId, ApplicationInfo> appEntry : entry.getValue().entrySet()) {
+ appMap.put(appEntry.getKey(), appEntry.getValue());
+ }
+ }
+
+ return newModels;
+ }
+}
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/SuperModelListener.java b/config-model-api/src/main/java/com/yahoo/config/model/api/SuperModelListener.java
index 16580c9e9f6..497c38af908 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/SuperModelListener.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/SuperModelListener.java
@@ -11,10 +11,10 @@ public interface SuperModelListener {
* Application has been activated: Either deployed the first time,
* internally redeployed, or externally triggered redeploy.
*/
- void applicationActivated(ApplicationInfo application);
+ void applicationActivated(SuperModel superModel, ApplicationInfo application);
/**
* Application has been removed.
*/
- void applicationRemoved(ApplicationId id);
+ void applicationRemoved(SuperModel superModel, ApplicationId id);
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/SuperModelProvider.java b/config-model-api/src/main/java/com/yahoo/config/model/api/SuperModelProvider.java
index 42437b20b83..4c7f15143ec 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/SuperModelProvider.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/SuperModelProvider.java
@@ -1,13 +1,18 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.config.model.api;
-import java.util.List;
+import com.yahoo.config.provision.Zone;
public interface SuperModelProvider {
+ SuperModel getSuperModel();
+
/**
- * Returns all applications in the SuperModel. All changes to the SuperModel
+ * Returns the current SuperModel. All changes to the SuperModel
* following that snapshot will be published to the listener. Warning: The listener
* methods may have been invoked before (or concurrently with) this method returning.
*/
- List<ApplicationInfo> snapshot(SuperModelListener listener);
+ SuperModel snapshot(SuperModelListener listener);
+
+ // TODO: Remove - clients of SuperModel should get zone from elsewhere.
+ Zone getZone();
}
diff --git a/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java b/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java
index 95f9963d6f4..fbf685b9d86 100644
--- a/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java
+++ b/config-model-api/src/test/java/com/yahoo/config/application/api/DeploymentSpecTest.java
@@ -6,6 +6,8 @@ import com.yahoo.config.provision.RegionName;
import org.junit.Test;
import java.io.StringReader;
+import java.time.Instant;
+import java.time.ZoneId;
import java.util.Optional;
import static org.junit.Assert.assertEquals;
@@ -276,4 +278,94 @@ public class DeploymentSpecTest {
}
}
+ @Test
+ public void deploymentSpecWithBlockUpgrade() {
+ StringReader r = new StringReader(
+ "<deployment>\n" +
+ " <block-upgrade days='mon,tue' hours='15-16'/>\n" +
+ " <block-upgrade days='sat' hours='10' time-zone='CET'/>\n" +
+ " <prod>\n" +
+ " <region active='true'>us-west-1</region>\n" +
+ " </prod>\n" +
+ "</deployment>"
+ );
+ DeploymentSpec spec = DeploymentSpec.fromXml(r);
+ assertEquals(2, spec.changeBlocker().size());
+ assertTrue(spec.changeBlocker().get(0).blocksVersions());
+ assertFalse(spec.changeBlocker().get(0).blocksRevisions());
+ assertEquals(ZoneId.of("UTC"), spec.changeBlocker().get(0).window().zone());
+
+ assertTrue(spec.changeBlocker().get(1).blocksVersions());
+ assertFalse(spec.changeBlocker().get(1).blocksRevisions());
+ assertEquals(ZoneId.of("CET"), spec.changeBlocker().get(1).window().zone());
+
+ assertTrue(spec.canUpgradeAt(Instant.parse("2017-09-18T14:15:30.00Z")));
+ assertFalse(spec.canUpgradeAt(Instant.parse("2017-09-18T15:15:30.00Z")));
+ assertFalse(spec.canUpgradeAt(Instant.parse("2017-09-18T16:15:30.00Z")));
+ assertTrue(spec.canUpgradeAt(Instant.parse("2017-09-18T17:15:30.00Z")));
+
+ assertTrue(spec.canUpgradeAt(Instant.parse("2017-09-23T09:15:30.00Z")));
+ assertFalse(spec.canUpgradeAt(Instant.parse("2017-09-23T08:15:30.00Z"))); // 10 in CET
+ assertTrue(spec.canUpgradeAt(Instant.parse("2017-09-23T10:15:30.00Z")));
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void deploymentSpecWithIllegallyOrderedDeploymentSpec1() {
+ StringReader r = new StringReader(
+ "<deployment>\n" +
+ " <block-change days='sat' hours='10' time-zone='CET'/>\n" +
+ " <prod>\n" +
+ " <region active='true'>us-west-1</region>\n" +
+ " </prod>\n" +
+ " <block-change days='mon,tue' hours='15-16'/>\n" +
+ "</deployment>"
+ );
+ DeploymentSpec spec = DeploymentSpec.fromXml(r);
+ }
+
+ @Test(expected = IllegalArgumentException.class)
+ public void deploymentSpecWithIllegallyOrderedDeploymentSpec2() {
+ StringReader r = new StringReader(
+ "<deployment>\n" +
+ " <block-change days='sat' hours='10' time-zone='CET'/>\n" +
+ " <test/>\n" +
+ " <prod>\n" +
+ " <region active='true'>us-west-1</region>\n" +
+ " </prod>\n" +
+ "</deployment>"
+ );
+ DeploymentSpec spec = DeploymentSpec.fromXml(r);
+ }
+
+ @Test
+ public void deploymentSpecWithChangeBlocker() {
+ StringReader r = new StringReader(
+ "<deployment>\n" +
+ " <block-change revision='false' days='mon,tue' hours='15-16'/>\n" +
+ " <block-change days='sat' hours='10' time-zone='CET'/>\n" +
+ " <prod>\n" +
+ " <region active='true'>us-west-1</region>\n" +
+ " </prod>\n" +
+ "</deployment>"
+ );
+ DeploymentSpec spec = DeploymentSpec.fromXml(r);
+ assertEquals(2, spec.changeBlocker().size());
+ assertTrue(spec.changeBlocker().get(0).blocksVersions());
+ assertFalse(spec.changeBlocker().get(0).blocksRevisions());
+ assertEquals(ZoneId.of("UTC"), spec.changeBlocker().get(0).window().zone());
+
+ assertTrue(spec.changeBlocker().get(1).blocksVersions());
+ assertTrue(spec.changeBlocker().get(1).blocksRevisions());
+ assertEquals(ZoneId.of("CET"), spec.changeBlocker().get(1).window().zone());
+
+ assertTrue(spec.canUpgradeAt(Instant.parse("2017-09-18T14:15:30.00Z")));
+ assertFalse(spec.canUpgradeAt(Instant.parse("2017-09-18T15:15:30.00Z")));
+ assertFalse(spec.canUpgradeAt(Instant.parse("2017-09-18T16:15:30.00Z")));
+ assertTrue(spec.canUpgradeAt(Instant.parse("2017-09-18T17:15:30.00Z")));
+
+ assertTrue(spec.canUpgradeAt(Instant.parse("2017-09-23T09:15:30.00Z")));
+ assertFalse(spec.canUpgradeAt(Instant.parse("2017-09-23T08:15:30.00Z"))); // 10 in CET
+ assertTrue(spec.canUpgradeAt(Instant.parse("2017-09-23T10:15:30.00Z")));
+ }
+
}
diff --git a/config-model-api/src/test/java/com/yahoo/config/application/api/TimeWindowTest.java b/config-model-api/src/test/java/com/yahoo/config/application/api/TimeWindowTest.java
new file mode 100644
index 00000000000..86ce0466213
--- /dev/null
+++ b/config-model-api/src/test/java/com/yahoo/config/application/api/TimeWindowTest.java
@@ -0,0 +1,143 @@
+package com.yahoo.config.application.api;
+
+import org.junit.Test;
+
+import java.time.Instant;
+
+import static java.time.DayOfWeek.FRIDAY;
+import static java.time.DayOfWeek.MONDAY;
+import static java.time.DayOfWeek.SATURDAY;
+import static java.time.DayOfWeek.THURSDAY;
+import static java.time.DayOfWeek.TUESDAY;
+import static java.time.DayOfWeek.WEDNESDAY;
+import static java.util.Arrays.asList;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+/**
+ * @author mpolden
+ */
+public class TimeWindowTest {
+
+ @Test
+ public void includesInstant() {
+ {
+ TimeWindow tw = TimeWindow.from("mon", "10,11", "UTC");
+ Instant i0 = Instant.parse("2017-09-17T11:15:30.00Z"); // Wrong day
+ Instant i1 = Instant.parse("2017-09-18T09:15:30.00Z"); // Wrong hour
+ Instant i2 = Instant.parse("2017-09-18T10:15:30.00Z");
+ Instant i3 = Instant.parse("2017-09-18T11:15:30.00Z");
+ Instant i4 = Instant.parse("2017-09-18T12:15:30.00Z"); // Wrong hour
+ Instant i5 = Instant.parse("2017-09-19T11:15:30.00Z"); // Wrong day
+
+ assertFalse("Instant " + i0 + " is not in window", tw.includes(i0));
+ assertFalse("Instant " + i1 + " is not in window", tw.includes(i1));
+ assertTrue("Instant " + i2 + " is in window", tw.includes(i2));
+ assertTrue("Instant " + i3 + " is in window", tw.includes(i3));
+ assertFalse("Instant " + i4 + " is not in window", tw.includes(i4));
+ assertFalse("Instant " + i5 + " is not in window", tw.includes(i5));
+ }
+ {
+ TimeWindow tw = TimeWindow.from("mon", "12,13", "CET");
+ Instant i0 = Instant.parse("2017-09-17T11:15:30.00Z");
+ Instant i1 = Instant.parse("2017-09-18T09:15:30.00Z");
+ Instant i2 = Instant.parse("2017-09-18T10:15:30.00Z"); // Including offset this matches hour 12
+ Instant i3 = Instant.parse("2017-09-18T11:15:30.00Z"); // Including offset this matches hour 13
+ Instant i4 = Instant.parse("2017-09-18T12:15:30.00Z");
+ Instant i5 = Instant.parse("2017-09-19T11:15:30.00Z");
+ assertFalse("Instant " + i0 + " is not in window", tw.includes(i0));
+ assertFalse("Instant " + i1 + " is not in window", tw.includes(i1));
+ assertTrue("Instant " + i2 + " is in window", tw.includes(i2));
+ assertTrue("Instant " + i3 + " is in window", tw.includes(i3));
+ assertFalse("Instant " + i4 + " is not in window", tw.includes(i4));
+ assertFalse("Instant " + i5 + " is not in window", tw.includes(i5));
+ }
+ }
+
+ @Test
+ public void validWindows() {
+ {
+ TimeWindow fz = TimeWindow.from("fri", "8,17-19", "UTC");
+ assertEquals(asList(FRIDAY), fz.days());
+ assertEquals(asList(8, 17, 18, 19), fz.hours());
+ }
+ {
+ TimeWindow fz = TimeWindow.from("sat,", "8,17-19", "UTC");
+ assertEquals(asList(SATURDAY), fz.days());
+ assertEquals(asList(8, 17, 18, 19), fz.hours());
+ }
+ {
+ TimeWindow fz = TimeWindow.from("tue,sat", "0,3,7,10", "UTC");
+ assertEquals(asList(TUESDAY, SATURDAY), fz.days());
+ assertEquals(asList(0, 3, 7, 10), fz.hours());
+ }
+ {
+ TimeWindow fz = TimeWindow.from("mon,wed-thu", "0,17-19", "UTC");
+ assertEquals(asList(MONDAY, WEDNESDAY, THURSDAY), fz.days());
+ assertEquals(asList(0, 17, 18, 19), fz.hours());
+ }
+ {
+ // Full day names is allowed
+ TimeWindow fz = TimeWindow.from("monday,wednesday-thursday", "0,17-19", "UTC");
+ assertEquals(asList(MONDAY, WEDNESDAY, THURSDAY), fz.days());
+ assertEquals(asList(0, 17, 18, 19), fz.hours());
+ }
+ {
+ // Duplicate day and overlapping range is allowed
+ TimeWindow fz = TimeWindow.from("mon,wed-thu,mon", "3,1-4", "UTC");
+ assertEquals(asList(MONDAY, WEDNESDAY, THURSDAY), fz.days());
+ assertEquals(asList(1, 2, 3, 4), fz.hours());
+ }
+ }
+
+ @Test
+ public void invalidWindows() {
+ // Invalid time zone
+ assertInvalidZone("foo", "Invalid time zone 'foo'");
+
+ // Malformed day input
+ assertInvalidDays("", "Invalid day ''");
+ assertInvalidDays("foo-", "Invalid range 'foo-'");
+ assertInvalidDays("foo", "Invalid day 'foo'");
+ assertInvalidDays("f", "Invalid day 'f'");
+ // Window crossing week boundary is disallowed
+ assertInvalidDays("fri-tue", "Invalid day range 'fri-tue'");
+
+ // Malformed hour input
+ assertInvalidHours("", "Invalid hour ''");
+ assertInvalidHours("24", "Invalid hour '24'");
+ assertInvalidHours("-1-9", "Invalid range '-1-9'");
+ // Window crossing day boundary is disallowed
+ assertInvalidHours("23-1", "Invalid hour range '23-1'");
+ }
+
+ private static void assertInvalidZone(String zoneSpec, String exceptionMessage) {
+ try {
+ TimeWindow.from("mon", "1", zoneSpec);
+ fail("Expected exception");
+ } catch (IllegalArgumentException e) {
+ assertEquals(exceptionMessage, e.getMessage());
+ }
+ }
+
+ private static void assertInvalidDays(String daySpec, String exceptionMessage) {
+ try {
+ TimeWindow.from(daySpec, "1", "UTC");
+ fail("Expected exception");
+ } catch (IllegalArgumentException e) {
+ assertEquals(exceptionMessage, e.getMessage());
+ }
+ }
+
+ private static void assertInvalidHours(String hourSpec, String exceptionMessage) {
+ try {
+ TimeWindow.from("mon", hourSpec, "UTC");
+ fail("Expected exception");
+ } catch (IllegalArgumentException e) {
+ assertEquals(exceptionMessage, e.getMessage());
+ }
+ }
+
+}
diff --git a/config-model-fat/CMakeLists.txt b/config-model-fat/CMakeLists.txt
new file mode 100644
index 00000000000..1e2364556dc
--- /dev/null
+++ b/config-model-fat/CMakeLists.txt
@@ -0,0 +1,5 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_java_artifact(config-model-fat)
+
+install(FILES src/main/resources/config-models.xml
+ DESTINATION conf/configserver-app)
diff --git a/config-model/CMakeLists.txt b/config-model/CMakeLists.txt
new file mode 100644
index 00000000000..274ab8a763b
--- /dev/null
+++ b/config-model/CMakeLists.txt
@@ -0,0 +1,10 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(config-model)
+
+vespa_install_script(src/main/perl/vespa-deploy bin)
+vespa_install_script(src/main/perl/vespa-expand-config.pl bin)
+vespa_install_script(src/main/perl/vespa-replicate-log-stream bin)
+vespa_install_script(src/main/sh/vespa-validate-application bin)
+
+install(DIRECTORY src/main/resources/schema DESTINATION share/vespa PATTERN ".gitignore" EXCLUDE)
+install(DIRECTORY src/main/resources/schema DESTINATION share/vespa/schema/version/6.x PATTERN ".gitignore" EXCLUDE)
diff --git a/config-model/pom.xml b/config-model/pom.xml
index c1c08e6e702..4e79da3279d 100644
--- a/config-model/pom.xml
+++ b/config-model/pom.xml
@@ -279,7 +279,7 @@
<artifactId>filedistribution</artifactId>
<version>${project.version}</version>
</dependency>
- <dependency>
+ <dependency>
<groupId>com.yahoo.vespa</groupId>
<artifactId>searchsummary</artifactId>
<version>${project.version}</version>
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/MacroShadower.java b/config-model/src/main/java/com/yahoo/searchdefinition/MacroShadower.java
index 9e847492a81..edf0ce69819 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/MacroShadower.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/MacroShadower.java
@@ -6,7 +6,6 @@ import com.yahoo.searchlib.rankingexpression.rule.*;
import com.yahoo.searchlib.rankingexpression.transform.ExpressionTransformer;
import java.util.Map;
-import java.util.logging.Logger;
/**
* Transforms function nodes to reference nodes if a macro shadows a built-in function.
@@ -23,8 +22,6 @@ import java.util.logging.Logger;
*/
class MacroShadower extends ExpressionTransformer {
- private static final Logger log = Logger.getLogger(MacroShadower.class.getName());
-
private final Map<String, RankProfile.Macro> macros;
public MacroShadower(Map<String, RankProfile.Macro> macros) {
@@ -58,11 +55,9 @@ class MacroShadower extends ExpressionTransformer {
int functionArity = function.getFunction().arity();
int macroArity = macro.getFormalParams() != null ? macro.getFormalParams().size() : 0;
if (functionArity != macroArity) {
- log.warning("Macro \"" + name + "\" has the same name as a built-in function. Due to different number of arguments, the built-in function will be used.");
return transformChildren(function);
}
- log.warning("Macro \"" + name + "\" shadows the built-in function with the same name.");
ReferenceNode node = new ReferenceNode(name, function.children(), null);
return transformChildren(node);
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processing.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processing.java
index 536d4b4d67c..26f98026d4f 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processing.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/Processing.java
@@ -74,6 +74,7 @@ public class Processing {
RankProfileTypeSettingsProcessor::new,
ReferenceFieldsProcessor::new,
FastAccessValidator::new,
+ ReservedMacroNames::new,
// These two should be last.
IndexingValidation::new,
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/processing/ReservedMacroNames.java b/config-model/src/main/java/com/yahoo/searchdefinition/processing/ReservedMacroNames.java
new file mode 100644
index 00000000000..19063b8e7f9
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/processing/ReservedMacroNames.java
@@ -0,0 +1,50 @@
+package com.yahoo.searchdefinition.processing;
+
+import com.yahoo.config.application.api.DeployLogger;
+import com.yahoo.searchdefinition.RankProfile;
+import com.yahoo.searchdefinition.RankProfileRegistry;
+import com.yahoo.searchdefinition.Search;
+import com.yahoo.searchlib.rankingexpression.parser.RankingExpressionParserConstants;
+import com.yahoo.vespa.model.container.search.QueryProfiles;
+
+import java.util.HashSet;
+import java.util.Set;
+import java.util.logging.Level;
+
+/**
+ * Issues a warning if some macro has a reserved name. This is not necessarily
+ * an error, as a macro can shadow a built-in function.
+ *
+ * @author lesters
+ */
+public class ReservedMacroNames extends Processor {
+
+ public ReservedMacroNames(Search search, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) {
+ super(search, deployLogger, rankProfileRegistry, queryProfiles);
+ }
+
+ @Override
+ public void process() {
+ Set<String> reservedNames = getReservedNames();
+ for (RankProfile rp : rankProfileRegistry.allRankProfiles()) {
+ for (String macroName : rp.getMacros().keySet()) {
+ if (reservedNames.contains(macroName)) {
+ deployLogger.log(Level.WARNING, "Macro \"" + macroName + "\" " +
+ "in rank profile \"" + rp.getName() + "\" " +
+ "has a reserved name. This might mean that the macro shadows " +
+ "the built-in function with the same name."
+ );
+ }
+ }
+ }
+ }
+
+ private Set<String> getReservedNames() {
+ Set<String> names = new HashSet<>();
+ for (String token : RankingExpressionParserConstants.tokenImage) {
+ String tokenWithoutQuotes = token.substring(1, token.length()-1);
+ names.add(tokenWithoutQuotes);
+ }
+ return names;
+ }
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/AbstractService.java b/config-model/src/main/java/com/yahoo/vespa/model/AbstractService.java
index 38e417aca9e..b4b89278c51 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/AbstractService.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/AbstractService.java
@@ -8,7 +8,15 @@ import com.yahoo.config.model.producer.AbstractConfigProducer;
import com.yahoo.vespa.defaults.Defaults;
import com.yahoo.vespa.filedistribution.PathDoesNotExistException;
-import java.util.*;
+import java.util.Collection;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
import static com.yahoo.text.Lowercase.toLowerCase;
@@ -45,7 +53,7 @@ public abstract class AbstractService extends AbstractConfigProducer<AbstractCon
/** The optional PRELOAD libraries for this Service. */
// Please keep non-null, as passed to command line in service startup
- private String preload = Defaults.getDefaults().underVespaHome("lib64/vespa/malloc/libvespamallocd.so");
+ private String preload = Defaults.getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so");
// If larger or equal to 0 it mean that explicit mmaps shall not be included in coredump.
private long mmapNoCoreLimit = -1l;
@@ -66,7 +74,7 @@ public abstract class AbstractService extends AbstractConfigProducer<AbstractCon
* more key/value pairs to the service-list dump.
* Supported key datatypes are String, and values may be String or Integer.
*/
- private HashMap<String, Object> serviceProperties = new LinkedHashMap<>();
+ private Map<String, Object> serviceProperties = new LinkedHashMap<>();
/** The affinity properties of this service. */
private Optional<Affinity> affinity = Optional.empty();
@@ -522,15 +530,13 @@ public abstract class AbstractService extends AbstractConfigProducer<AbstractCon
* The service HTTP port for health status
* @return portnumber
*/
- public int getHealthPort() {return -1;}
+ public int getHealthPort() { return -1;}
/**
* Overridden by subclasses. List of default dimensions to be added to this services metrics
* @return The default dimensions for this service
*/
- public HashMap<String, String> getDefaultMetricDimensions(){
- return new LinkedHashMap<>();
- }
+ public HashMap<String, String> getDefaultMetricDimensions(){ return new LinkedHashMap<>(); }
// For testing
public int getNumPortsAllocated() {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/LogForwarder.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/LogForwarder.java
new file mode 100644
index 00000000000..46f5807b350
--- /dev/null
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/LogForwarder.java
@@ -0,0 +1,64 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.model.admin;
+
+import com.yahoo.cloud.config.LogforwarderConfig;
+import com.yahoo.config.model.producer.AbstractConfigProducer;
+import com.yahoo.vespa.model.AbstractService;
+
+import java.util.ArrayList;
+import java.util.List;
+
+public class LogForwarder extends AbstractService implements LogforwarderConfig.Producer {
+
+ public static class Config {
+ public final String deploymentServer;
+ public final String clientName;
+
+ private Config(String ds, String cn) {
+ this.deploymentServer = ds;
+ this.clientName = cn;
+ }
+ public Config withDeploymentServer(String ds) {
+ return new Config(ds, clientName);
+ }
+ public Config withClientName(String cn) {
+ return new Config(deploymentServer, cn);
+ }
+ }
+
+ private final Config config;
+
+ /**
+ * Creates a new LogForwarder instance.
+ */
+ // TODO: Use proper types?
+ public LogForwarder(AbstractConfigProducer parent, int index, Config config) {
+ super(parent, "logforwarder." + index);
+ this.config = config;
+ setProp("clustertype", "hosts");
+ setProp("clustername", "admin");
+ }
+
+ public static Config cfg() {
+ return new Config(null, null);
+ }
+
+ /**
+ * LogForwarder does not need any ports.
+ *
+ * @return The number of ports reserved by the LogForwarder
+ */
+ public int getPortCount() { return 0; }
+
+ /**
+ * @return The command used to start LogForwarder
+ */
+ public String getStartupCommand() { return "exec $ROOT/bin/vespa-logforwarder-start -c " + getConfigId(); }
+
+ @Override
+ public void getConfig(LogforwarderConfig.Builder builder) {
+ builder.deploymentServer(config.deploymentServer);
+ builder.clientName(config.clientName);
+ }
+
+}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
index 4087b9dadbf..ccf7974d381 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/monitoring/VespaMetricSet.java
@@ -30,11 +30,23 @@ public class VespaMetricSet {
metrics.addAll(getQrserverMetrics());
metrics.addAll(getContainerMetrics());
metrics.addAll(getConfigServerMetrics());
+ metrics.addAll(getSentinelMetrics());
metrics.addAll(getOtherMetrics());
return Collections.unmodifiableSet(metrics);
}
+ private static Set<Metric> getSentinelMetrics() {
+ Set<Metric> metrics = new LinkedHashSet<>();
+
+ metrics.add(new Metric("sentinel.restarts.count"));
+
+ metrics.add(new Metric("sentinel.running.count"));
+ metrics.add(new Metric("sentinel.running.last"));
+
+ return metrics;
+ }
+
private static Set<Metric> getOtherMetrics() {
Set<Metric> metrics = new LinkedHashSet<>();
metrics.add(new Metric("slobrok.heartbeats.failed.count", "slobrok.heartbeats.failed"));
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminBuilderBase.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminBuilderBase.java
index 908481aad63..3049112ac0a 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminBuilderBase.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminBuilderBase.java
@@ -111,4 +111,21 @@ public abstract class DomAdminBuilderBase extends VespaDomBuilder.DomConfigProdu
return minutes;
}
+ void addLogForwarders(ModelElement logForwardingElement, Admin admin) {
+ if (logForwardingElement == null) return;
+
+ int i = 0;
+ for (ModelElement e : logForwardingElement.getChildren("splunk")) {
+ LogForwarder.Config cfg = LogForwarder.cfg()
+ .withDeploymentServer(e.getStringAttribute("deployment-server"))
+ .withClientName(e.getStringAttribute("client-name"));
+ for (HostResource host : admin.getHostSystem().getHosts()) {
+ LogForwarder logForwarder = new LogForwarder(admin, i, cfg);
+ logForwarder.setHostResource(host);
+ logForwarder.initService();
+ i++;
+ }
+ }
+ }
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV2Builder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV2Builder.java
index 180cf4eadec..dd1d4e36255 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV2Builder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV2Builder.java
@@ -49,6 +49,9 @@ public class DomAdminV2Builder extends DomAdminBuilderBase {
admin.addSlobroks(getSlobroks(admin, XML.getChild(adminE, "slobroks")));
if ( ! admin.multitenant())
admin.setClusterControllers(addConfiguredClusterControllers(admin, adminE));
+
+ ModelElement adminElement = new ModelElement(adminE);
+ addLogForwarders(adminElement.getChild("logforwarding"), admin);
}
private List<Configserver> parseConfigservers(Admin admin, Element adminE) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java
index 5e22ba3961f..cb8ec205395 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomAdminV4Builder.java
@@ -51,6 +51,8 @@ public class DomAdminV4Builder extends DomAdminBuilderBase {
assignSlobroks(requestedSlobroks.orElse(NodesSpecification.nonDedicated(3, version)), admin);
assignLogserver(requestedLogservers.orElse(NodesSpecification.nonDedicated(1, version)), admin);
+
+ addLogForwarders(adminElement.getChild("logforwarding"), admin);
}
private void assignSlobroks(NodesSpecification nodesSpecification, Admin admin) {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilder.java
index 4e66368ef73..4ea638ca41a 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilder.java
@@ -74,6 +74,8 @@ public class DomSearchTuningBuilder extends VespaDomBuilder.DomConfigProducerBui
handleSummary(parent, e, t.searchNode);
} else if (equals("initialize", e)) {
handleInitialize(e, t.searchNode);
+ } else if (equals("background", e)) {
+ handleBackground(e, t.searchNode);
}
}
}
@@ -245,10 +247,15 @@ public class DomSearchTuningBuilder extends VespaDomBuilder.DomConfigProducerBui
if (equals("maxfilesize", e)) {
s.logStore.maxFileSize = asLong(e);
} else if (equals("maxdiskbloatfactor", e)) {
- s.logStore.maxDiskBloatFactor = asDouble(e);
+ parent.deployLogger().log(Level.WARNING,
+ "Element 'maxdiskbloatfactor is deprecated and ignored." +
+ " The min value from flush.memory.xxx.diskbloatfactor is used instead");
} else if (equals("minfilesizefactor", e)) {
s.logStore.minFileSizeFactor = asDouble(e);
} else if (equals("numthreads", e)) {
+ parent.deployLogger().log(Level.WARNING,
+ "Element 'numthreads is deprecated. Use background.threads instead." +
+ " For now it will take max of the two.");
s.logStore.numThreads = asInt(e);
} else if (equals("chunk", e)) {
s.logStore.chunk = new Tuning.SearchNode.Summary.Store.Component(true);
@@ -266,4 +273,13 @@ public class DomSearchTuningBuilder extends VespaDomBuilder.DomConfigProducerBui
}
}
+ private void handleBackground(Element spec, Tuning.SearchNode sn) {
+ sn.background = new Tuning.SearchNode.Background();
+ for (Element e : XML.getChildren(spec)) {
+ if (equals("threads", e)) {
+ sn.background.threads = asInt(e);
+ }
+ }
+ }
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/ModelElement.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/ModelElement.java
index c5bc275a9e1..1fc70e343f3 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/ModelElement.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/ModelElement.java
@@ -11,7 +11,7 @@ import java.util.List;
import java.util.StringTokenizer;
/**
- * A w3c Element wrapper whith a better API.
+ * A w3c Element wrapper with a better API.
*
* Author unknown.
*/
@@ -46,6 +46,18 @@ public class ModelElement {
return null;
}
+ /**
+ * If not found, return empty list
+ */
+ public List<ModelElement> getChildren(String name) {
+ List<Element> e = XML.getChildren(xml, name);
+
+ List<ModelElement> list = new ArrayList<>();
+ e.forEach(element -> list.add(new ModelElement(element)));
+
+ return list;
+ }
+
public ModelElement getChildByPath(String path) {
StringTokenizer tokenizer = new StringTokenizer(path, ".");
ModelElement curElem = this;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
index b54978f52d3..c4cfd6d185b 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
@@ -204,7 +204,7 @@ public class ContentSearchCluster extends AbstractConfigProducer implements Prot
return hasIndexedCluster() ? getIndexed().getSearchNodes() : nonIndexed;
}
- public SearchNode addSearchNode(ContentNode node, StorageGroup parentGroup, ModelElement element) {
+ public void addSearchNode(ContentNode node, StorageGroup parentGroup, ModelElement element) {
AbstractConfigProducer parent = hasIndexedCluster() ? getIndexed() : this;
NodeSpec spec = getNextSearchNodeSpec(parentGroup);
@@ -229,7 +229,6 @@ public class ContentSearchCluster extends AbstractConfigProducer implements Prot
} else {
nonIndexed.add(snode);
}
- return snode;
}
/** Translates group ids to continuous 0-base "row" id integers */
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/EngineFactoryBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/EngineFactoryBuilder.java
index 95193b03764..04c5fd4fd72 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/EngineFactoryBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/EngineFactoryBuilder.java
@@ -22,8 +22,6 @@ public class EngineFactoryBuilder {
return new ProtonEngine.Factory(c.getSearch());
} else if (persistence.getChild("dummy") != null) {
return new com.yahoo.vespa.model.content.engines.DummyPersistence.Factory();
- } else if (persistence.getChild("rpc") != null) {
- return new RPCEngine.Factory();
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/engines/ProtonEngine.java b/config-model/src/main/java/com/yahoo/vespa/model/content/engines/ProtonEngine.java
index 9c655e62d32..df8cfc6f9bd 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/engines/ProtonEngine.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/engines/ProtonEngine.java
@@ -22,8 +22,8 @@ public class ProtonEngine {
@Override
public PersistenceEngine create(StorageNode storageNode, StorageGroup parentGroup, ModelElement storageNodeElement) {
- SearchNode searchNode = search.addSearchNode(storageNode, parentGroup, storageNodeElement);
- return new ProtonProvider(storageNode, searchNode);
+ search.addSearchNode(storageNode, parentGroup, storageNodeElement);
+ return new ProtonProvider(storageNode);
}
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/engines/ProtonProvider.java b/config-model/src/main/java/com/yahoo/vespa/model/content/engines/ProtonProvider.java
index 9d6bccee7e4..ff3b4891146 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/engines/ProtonProvider.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/engines/ProtonProvider.java
@@ -1,14 +1,23 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.content.engines;
+import com.yahoo.vespa.config.content.core.StorServerConfig;
import com.yahoo.vespa.model.content.StorageNode;
import com.yahoo.vespa.model.search.SearchNode;
/**
* @author baldersheim
*/
-public class ProtonProvider extends RPCEngine {
- public ProtonProvider(StorageNode parent, SearchNode searchNode) {
- super(parent, searchNode);
+public class ProtonProvider extends PersistenceEngine {
+
+ public ProtonProvider(StorageNode parent) {
+ super(parent, "provider");
+ }
+
+ @Override
+ public void getConfig(StorServerConfig.Builder builder) {
+ StorServerConfig.Persistence_provider.Builder provider = new StorServerConfig.Persistence_provider.Builder();
+ provider.type(StorServerConfig.Persistence_provider.Type.Enum.RPC);
+ builder.persistence_provider(provider);
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/engines/RPCEngine.java b/config-model/src/main/java/com/yahoo/vespa/model/content/engines/RPCEngine.java
deleted file mode 100644
index 8be2c0c4dd6..00000000000
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/engines/RPCEngine.java
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.model.content.engines;
-
-import com.yahoo.vespa.config.content.core.StorServerConfig;
-import com.yahoo.vespa.model.builder.xml.dom.ModelElement;
-import com.yahoo.vespa.model.content.StorageGroup;
-import com.yahoo.vespa.model.content.StorageNode;
-import com.yahoo.vespa.model.content.cluster.ContentCluster;
-import com.yahoo.vespa.model.search.SearchNode;
-
-public class RPCEngine extends PersistenceEngine {
-
- private SearchNode searchNode;
- public RPCEngine(StorageNode parent) {
- super(parent, "provider");
- }
-
- public RPCEngine(StorageNode parent, SearchNode searchNode) {
- super(parent, "provider");
- this.searchNode = searchNode;
- }
-
- @Override
- public void getConfig(StorServerConfig.Builder builder) {
- StorServerConfig.Persistence_provider.Builder provider =
- new StorServerConfig.Persistence_provider.Builder();
- provider.type(StorServerConfig.Persistence_provider.Type.Enum.RPC);
-
- if (searchNode != null) {
- provider.rpc(new StorServerConfig.Persistence_provider.Rpc.Builder().connectspec("tcp/localhost:" + searchNode.getPersistenceProviderRpcPort()));
- }
-
- builder.persistence_provider(provider);
- }
-
- public static class Factory implements PersistenceFactory {
- @Override
- public PersistenceEngine create(StorageNode storageNode, StorageGroup parentGroup, ModelElement storageNodeElement) {
- return new RPCEngine(storageNode);
- }
-
- @Override
- public boolean supportRevert() {
- return false;
- }
-
- @Override
- public boolean enableMultiLevelSplitting() {
- return false;
- }
-
- @Override
- public ContentCluster.DistributionMode getDefaultDistributionMode() {
- return ContentCluster.DistributionMode.LOOSE;
- }
- }
-}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java b/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java
index e07327b1666..8860f5c2249 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java
@@ -102,7 +102,6 @@ public class FileDistributor {
}
}
dbHandler.sendDeployedFiles(fileSourceHost, allFilesToSend());
- dbHandler.limitSendingOfDeployedFilesTo(union(getTargetHostnames(), fileSourceHost));
dbHandler.removeDeploymentsThatHaveDifferentApplicationId(getTargetHostnames());
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/NodeFlavorTuning.java b/config-model/src/main/java/com/yahoo/vespa/model/search/NodeFlavorTuning.java
index 5750db08178..5f552f119c5 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/NodeFlavorTuning.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/NodeFlavorTuning.java
@@ -27,9 +27,10 @@ public class NodeFlavorTuning implements ProtonConfig.Producer {
setHwInfo(builder);
tuneDiskWriteSpeed(builder);
tuneDocumentStoreMaxFileSize(builder.summary.log);
- tuneDocumentStoreNumThreads(builder.summary.log);
+ tuneDocumentStoreNumThreads(builder.background);
tuneFlushStrategyMemoryLimits(builder.flush.memory);
tuneFlushStrategyTlsSize(builder.flush.memory);
+ tuneSummaryReadIo(builder.summary.read);
}
private void setHwInfo(ProtonConfig.Builder builder) {
@@ -56,8 +57,8 @@ public class NodeFlavorTuning implements ProtonConfig.Producer {
builder.maxfilesize(fileSizeBytes);
}
- private void tuneDocumentStoreNumThreads(ProtonConfig.Summary.Log.Builder builder) {
- builder.numthreads(max(8, (int)nodeFlavor.getMinCpuCores()/2));
+ private void tuneDocumentStoreNumThreads(ProtonConfig.Background.Builder builder) {
+ builder.threads(max(8, (int)nodeFlavor.getMinCpuCores()/2));
}
private void tuneFlushStrategyMemoryLimits(ProtonConfig.Flush.Memory.Builder builder) {
@@ -72,4 +73,10 @@ public class NodeFlavorTuning implements ProtonConfig.Producer {
builder.maxtlssize(tlsSizeBytes);
}
+ private void tuneSummaryReadIo(ProtonConfig.Summary.Read.Builder builder) {
+ if (nodeFlavor.hasFastDisk()) {
+ builder.io(ProtonConfig.Summary.Read.Io.DIRECTIO);
+ }
+ }
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java b/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java
index 66abf3e23cf..c5f7272ee80 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/SearchNode.java
@@ -56,6 +56,11 @@ public class SearchNode extends AbstractService implements
private TransactionLogServer tls;
private AbstractService serviceLayerService;
private final Optional<Tuning> tuning;
+ private static final int RPC_PORT = 0;
+ private static final int FS4_PORT = 1;
+ private static final int FUTURE_HEALTH_PORT = 2;
+ private static final int UNUSED_3 = 3;
+ private static final int HEALTH_PORT = 4;
public static class Builder extends VespaDomBuilder.DomConfigProducerBuilder<SearchNode> {
@@ -101,11 +106,11 @@ public class SearchNode extends AbstractService implements
this.nodeSpec = nodeSpec;
this.clusterName = clusterName;
this.flushOnShutdown = flushOnShutdown;
- portsMeta.on(0).tag("rpc").tag("rtc").tag("admin").tag("status");
- portsMeta.on(1).tag("fs4");
- portsMeta.on(2).tag("srmp").tag("hack").tag("test");
- portsMeta.on(3).tag("rpc").tag("engines-provider");
- portsMeta.on(4).tag("http").tag("json").tag("health").tag("state");
+ portsMeta.on(RPC_PORT).tag("rpc").tag("rtc").tag("admin").tag("status");
+ portsMeta.on(FS4_PORT).tag("fs4");
+ portsMeta.on(FUTURE_HEALTH_PORT).tag("unused");
+ portsMeta.on(UNUSED_3).tag("unused");
+ portsMeta.on(HEALTH_PORT).tag("http").tag("json").tag("health").tag("state");
// Properties are set in DomSearchBuilder
monitorService();
this.tuning = tuning;
@@ -139,15 +144,6 @@ public class SearchNode extends AbstractService implements
}
/**
- * Returns the connection spec string that resolves to this search node.
- *
- * @return The connection string.
- */
- public String getConnectSpec() {
- return "tcp/" + getHost().getHostName() + ":" + getRpcPort();
- }
-
- /**
* Returns the number of ports needed by this service.
*
* @return The number of ports.
@@ -163,20 +159,7 @@ public class SearchNode extends AbstractService implements
* @return The port.
*/
public int getRpcPort() {
- return getRelativePort(0);
- }
-
- protected int getSlimeMessagingPort() {
- return getRelativePort(2);
- }
-
- /*
- * Returns the rpc port used for the engines provider interface.
- * @return The port
- */
-
- public int getPersistenceProviderRpcPort() {
- return getRelativePort(3);
+ return getRelativePort(RPC_PORT);
}
@Override
@@ -204,11 +187,11 @@ public class SearchNode extends AbstractService implements
}
public int getDispatchPort() {
- return getRelativePort(1);
+ return getRelativePort(FS4_PORT);
}
public int getHttpPort() {
- return getRelativePort(4);
+ return getRelativePort(HEALTH_PORT);
}
@Override
@@ -258,8 +241,6 @@ public class SearchNode extends AbstractService implements
builder.
ptport(getDispatchPort()).
rpcport(getRpcPort()).
- slime_messaging_port(getSlimeMessagingPort()).
- rtcspec(getConnectSpec()).
httpport(getHttpPort()).
partition(getNodeSpec().partitionId()).
clustername(getClusterName()).
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/search/Tuning.java b/config-model/src/main/java/com/yahoo/vespa/model/search/Tuning.java
index 7c40acd91ee..1e3b1783f9b 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/search/Tuning.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/search/Tuning.java
@@ -282,14 +282,12 @@ public class Tuning extends AbstractConfigProducer implements PartitionsConfig.P
public static class LogStore {
public Long maxFileSize = null;
- public Double maxDiskBloatFactor = null;
public Integer numThreads = null;
public Component chunk = null;
public Double minFileSizeFactor = null;
public void getConfig(ProtonConfig.Summary.Log.Builder log) {
if (maxFileSize!=null) log.maxfilesize(maxFileSize);
- if (maxDiskBloatFactor!=null) log.maxdiskbloatfactor(maxDiskBloatFactor);
if (minFileSizeFactor!=null) log.minfilesizefactor(minFileSizeFactor);
if (numThreads != null) log.numthreads(numThreads);
if (chunk != null) {
@@ -304,7 +302,6 @@ public class Tuning extends AbstractConfigProducer implements PartitionsConfig.P
public void getConfig(ProtonConfig.Summary.Builder builder) {
if (cache != null) {
cache.getConfig(builder.cache);
-
}
if (logStore != null) {
logStore.getConfig(builder.log);
@@ -337,6 +334,17 @@ public class Tuning extends AbstractConfigProducer implements PartitionsConfig.P
}
}
+ public static class Background implements ProtonConfig.Producer {
+ public Integer threads = null;
+
+ @Override
+ public void getConfig(ProtonConfig.Builder builder) {
+ if (threads != null) {
+ builder.background.threads(threads);
+ }
+ }
+ }
+
public RequestThreads threads = null;
public FlushStrategy strategy = null;
public Resizing resizing = null;
@@ -344,6 +352,7 @@ public class Tuning extends AbstractConfigProducer implements PartitionsConfig.P
public Attribute attribute = null;
public Summary summary = null;
public Initialize initialize = null;
+ public Background background = null;
@Override
public void getConfig(ProtonConfig.Builder builder) {
@@ -354,6 +363,7 @@ public class Tuning extends AbstractConfigProducer implements PartitionsConfig.P
if (attribute != null) attribute.getConfig(builder);
if (summary != null) summary.getConfig(builder);
if (initialize != null) initialize.getConfig(builder);
+ if (background != null) background.getConfig(builder);
}
}
diff --git a/config-model/src/main/resources/schema/admin.rnc b/config-model/src/main/resources/schema/admin.rnc
index 26705784a34..3ab02af3efd 100644
--- a/config-model/src/main/resources/schema/admin.rnc
+++ b/config-model/src/main/resources/schema/admin.rnc
@@ -13,7 +13,8 @@ AdminV2 =
AdminSlobroks? &
(LegacyAdminMonitoring | AdminMonitoring)? &
(LegacyMetricConsumers | Metrics)? &
- ClusterControllers?
+ ClusterControllers? &
+ LogForwarding?
}
AdminV3 =
@@ -32,7 +33,8 @@ AdminV4 =
AdminV4LogServers? &
GenericConfig* &
(LegacyAdminMonitoring | AdminMonitoring)? &
- (LegacyMetricConsumers | Metrics)?
+ (LegacyMetricConsumers | Metrics)? &
+ LogForwarding?
}
AdminV4Slobroks =
@@ -112,3 +114,10 @@ ClusterControllers = element cluster-controllers {
service.attlist
}+
}
+
+LogForwarding = element logforwarding {
+ element splunk {
+ attribute deployment-server { xsd:string } &
+ attribute client-name { xsd:string }
+ }
+}
diff --git a/config-model/src/main/resources/schema/content.rnc b/config-model/src/main/resources/schema/content.rnc
index 9031e8128df..e75c15ea524 100644
--- a/config-model/src/main/resources/schema/content.rnc
+++ b/config-model/src/main/resources/schema/content.rnc
@@ -353,6 +353,9 @@ Tuning = element tuning {
}? &
element initialize {
element threads { xsd:nonNegativeInteger }?
+ }? &
+ element background {
+ element threads { xsd:nonNegativeInteger }?
}?
}?
}
diff --git a/config-model/src/main/resources/schema/deployment.rnc b/config-model/src/main/resources/schema/deployment.rnc
index 36897643964..9a5eb46590c 100644
--- a/config-model/src/main/resources/schema/deployment.rnc
+++ b/config-model/src/main/resources/schema/deployment.rnc
@@ -5,8 +5,10 @@
start = element deployment {
attribute version { "1.0" } &
Upgrade? &
+ BlockChange* &
Test? &
Staging? &
+ BlockUpgrade* &
Prod*
}
@@ -14,6 +16,18 @@ Upgrade = element upgrade {
attribute policy { xsd:string }
}
+BlockChange = element block-change {
+ attribute revision { xsd:boolean }? &
+ attribute version { xsd:boolean }? &
+ attribute days { xsd:string } &
+ attribute hours { xsd:string } &
+ attribute time-zone { xsd:string }?
+}
+
+BlockUpgrade = element block-upgrade { # Legacy name - remove on Vespa 7
+ BlockChange
+}
+
Test = element test {
text
}
diff --git a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
index 98f599769c0..63d5d37598b 100644
--- a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
@@ -110,9 +110,9 @@ public class ModelProvisioningTest {
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getJvmArgs(), is(""));
assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getJvmArgs(), is(""));
- assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamallocd.so")));
- assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamallocd.so")));
- assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamallocd.so")));
+ assertThat(model.getContainerClusters().get("mydisc").getContainers().get(0).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
+ assertThat(model.getContainerClusters().get("mydisc").getContainers().get(1).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
+ assertThat(model.getContainerClusters().get("mydisc").getContainers().get(2).getPreLoad(), is(getDefaults().underVespaHome("lib64/vespa/malloc/libvespamalloc.so")));
assertThat(model.getContainerClusters().get("mydisc").getMemoryPercentage(), is(Optional.empty()));
assertThat(model.getContainerClusters().get("mydisc2").getContainers().get(0).getJvmArgs(), is("-verbosegc"));
diff --git a/config-model/src/test/java/com/yahoo/searchdefinition/processing/ReservedMacroNamesTestCase.java b/config-model/src/test/java/com/yahoo/searchdefinition/processing/ReservedMacroNamesTestCase.java
new file mode 100644
index 00000000000..a4b4b03718d
--- /dev/null
+++ b/config-model/src/test/java/com/yahoo/searchdefinition/processing/ReservedMacroNamesTestCase.java
@@ -0,0 +1,71 @@
+package com.yahoo.searchdefinition.processing;
+
+import com.yahoo.config.application.api.DeployLogger;
+import com.yahoo.searchdefinition.RankProfileRegistry;
+import com.yahoo.searchdefinition.Search;
+import com.yahoo.searchdefinition.SearchBuilder;
+import com.yahoo.searchdefinition.parser.ParseException;
+import org.junit.Test;
+
+import java.util.logging.Level;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * @author lesters
+ */
+public class ReservedMacroNamesTestCase {
+
+ @Test
+ public void requireThatMacrosWithReservedNamesIssueAWarning() throws ParseException {
+ TestDeployLogger deployLogger = new TestDeployLogger();
+ RankProfileRegistry rankProfileRegistry = new RankProfileRegistry();
+ SearchBuilder builder = new SearchBuilder(rankProfileRegistry);
+ builder.importString(
+ "search test {\n" +
+ " document test { \n" +
+ " field a type string { \n" +
+ " indexing: index \n" +
+ " }\n" +
+ " }\n" +
+ " \n" +
+ " rank-profile test_rank_profile {\n" +
+ " macro not_a_reserved_name(x) {\n" +
+ " expression: x + x\n" +
+ " }\n" +
+ " macro sigmoid(x) {\n" +
+ " expression: x * x\n" +
+ " }\n" +
+ " first-phase {\n" +
+ " expression: sigmoid(2) + not_a_reserved_name(1)\n" +
+ " }\n" +
+ " }\n" +
+ " rank-profile test_rank_profile_2 inherits test_rank_profile {\n" +
+ " macro sin(x) {\n" +
+ " expression: x * x\n" +
+ " }\n" +
+ " first-phase {\n" +
+ " expression: sigmoid(2) + sin(1)\n" +
+ " }\n" +
+ " }\n" +
+ "}\n");
+ builder.build(deployLogger);
+
+ assertTrue(deployLogger.log.contains("sigmoid") && deployLogger.log.contains("test_rank_profile"));
+ assertTrue(deployLogger.log.contains("sigmoid") && deployLogger.log.contains("test_rank_profile_2"));
+ assertTrue(deployLogger.log.contains("sin") && deployLogger.log.contains("test_rank_profile_2"));
+ assertFalse(deployLogger.log.contains("not_a_reserved_name") && deployLogger.log.contains("test_rank_profile"));
+ assertFalse(deployLogger.log.contains("not_a_reserved_name") && deployLogger.log.contains("test_rank_profile_2"));
+
+ }
+
+ public static class TestDeployLogger implements DeployLogger {
+ public String log = "";
+ @Override
+ public void log(Level level, String message) {
+ log += message;
+ }
+ }
+
+}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/AdminTestCase.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/AdminTestCase.java
index 2dfef135425..b5375ccbce4 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/AdminTestCase.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/AdminTestCase.java
@@ -8,6 +8,7 @@ import com.yahoo.cloud.config.SentinelConfig;
import com.yahoo.config.model.ApplicationConfigProducerRoot;
import com.yahoo.config.model.deploy.DeployProperties;
import com.yahoo.config.model.deploy.DeployState;
+import com.yahoo.config.model.test.MockApplicationPackage;
import com.yahoo.config.model.test.TestDriver;
import com.yahoo.config.model.test.TestRoot;
import com.yahoo.config.provision.ApplicationId;
@@ -22,9 +23,12 @@ import com.yahoo.vespa.model.container.ContainerCluster;
import com.yahoo.vespa.model.container.component.Component;
import com.yahoo.vespa.model.container.component.StatisticsComponent;
import com.yahoo.vespa.model.test.utils.VespaModelCreatorWithFilePkg;
+import com.yahoo.vespa.model.test.utils.VespaModelCreatorWithMockPkg;
+import org.junit.Ignore;
import org.junit.Test;
import java.util.Set;
+import java.util.stream.IntStream;
import static org.hamcrest.CoreMatchers.is;
import static org.junit.Assert.assertEquals;
@@ -246,7 +250,29 @@ public class AdminTestCase {
assertEquals(sc.values(0).operations(0).name(), StatisticsConfig.Values.Operations.Name.REGULAR);
assertEquals(sc.values(0).operations(0).arguments(0).key(), "limits");
assertEquals(sc.values(0).operations(0).arguments(0).value(), "25,50,100,500");
-
}
+ @Test
+ public void testLogForwarding() throws Exception {
+ String hosts = "<hosts>"
+ + " <host name=\"myhost0\">"
+ + " <alias>node0</alias>"
+ + " </host>"
+ + "</hosts>";
+
+ String services = "<services>" +
+ " <admin version='2.0'>" +
+ " <adminserver hostalias='node0' />" +
+ " <logforwarding>" +
+ " <splunk deployment-server='foo:123' client-name='foocli'/>" +
+ " </logforwarding>" +
+ " </admin>" +
+ "</services>";
+
+ VespaModel vespaModel = new VespaModelCreatorWithMockPkg(hosts, services).create();
+
+ Set<String> configIds = vespaModel.getConfigIds();
+ // 1 logforwarder on each host
+ assertTrue(configIds.toString(), configIds.contains("admin/logforwarder.0"));
+ }
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/admin/DedicatedAdminV4Test.java b/config-model/src/test/java/com/yahoo/vespa/model/admin/DedicatedAdminV4Test.java
index 5b5094a9c43..e3f773a64c2 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/admin/DedicatedAdminV4Test.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/admin/DedicatedAdminV4Test.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.admin;
+import com.yahoo.cloud.config.LogforwarderConfig;
import com.yahoo.cloud.config.SentinelConfig;
import com.yahoo.config.model.NullConfigModelRegistry;
import com.yahoo.config.application.api.ApplicationPackage;
@@ -16,13 +17,13 @@ import org.junit.Test;
import org.xml.sax.SAXException;
import java.io.IOException;
+import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
+import java.util.stream.IntStream;
import static com.yahoo.vespa.model.admin.monitoring.DefaultMetricsConsumer.VESPA_CONSUMER_ID;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.*;
/**
* @author lulf
@@ -141,6 +142,53 @@ public class DedicatedAdminV4Test {
"slobrok", "logd", "filedistributorservice", "qrserver");
}
+ @Test
+ public void testLogForwarding() throws IOException, SAXException {
+ String services = "<services>" +
+ " <admin version='4.0'>" +
+ " <slobroks><nodes count='2' dedicated='true'/></slobroks>" +
+ " <logservers><nodes count='1' dedicated='true'/></logservers>" +
+ " <logforwarding>" +
+ " <splunk deployment-server='foo:123' client-name='foocli'/>" +
+ " </logforwarding>" +
+ " </admin>" +
+ "</services>";
+
+ VespaModel model = createModel(hosts, services);
+ assertEquals(3, model.getHosts().size());
+
+ assertHostContainsServices(model, "hosts/myhost0",
+ "filedistributorservice", "logd", "logforwarder", "slobrok");
+ assertHostContainsServices(model, "hosts/myhost1",
+ "filedistributorservice", "logd", "logforwarder", "slobrok");
+ assertHostContainsServices(model, "hosts/myhost2",
+ "filedistributorservice", "logd", "logforwarder", "logserver");
+
+ Set<String> configIds = model.getConfigIds();
+ // 1 logforwarder on each host
+ IntStream.of(0, 1, 2).forEach(i -> assertTrue(configIds.toString(), configIds.contains("admin/logforwarder." + i)));
+
+ // First forwarder
+ {
+ LogforwarderConfig.Builder builder = new LogforwarderConfig.Builder();
+ model.getConfig(builder, "admin/logforwarder.0");
+ LogforwarderConfig config = new LogforwarderConfig(builder);
+
+ assertEquals("foo:123", config.deploymentServer());
+ assertEquals("foocli", config.clientName());
+ }
+
+ // Other host's forwarder
+ {
+ LogforwarderConfig.Builder builder = new LogforwarderConfig.Builder();
+ model.getConfig(builder, "admin/logforwarder.2");
+ LogforwarderConfig config = new LogforwarderConfig(builder);
+
+ assertEquals("foo:123", config.deploymentServer());
+ assertEquals("foocli", config.clientName());
+ }
+ }
+
private Set<String> serviceNames(VespaModel model, String hostname) {
SentinelConfig config = model.getConfig(SentinelConfig.class, hostname);
return config.service().stream().map(SentinelConfig.Service::name).collect(Collectors.toSet());
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigValueChangeValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigValueChangeValidatorTest.java
index cbb725987eb..f9944f563c4 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigValueChangeValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ConfigValueChangeValidatorTest.java
@@ -65,6 +65,14 @@ public class ConfigValueChangeValidatorTest {
}
@Test
+ public void requireThatDocumentTypesCanBeAddedWithoutNeedForRestart() {
+ List<ConfigChangeAction> changes = getConfigChanges(
+ createVespaModel("", Arrays.asList("foo")),
+ createVespaModel("", Arrays.asList("foo", "bar")));
+ assertEquals(0, changes.size());
+ }
+
+ @Test
public void requireThatValidatorDetectsConfigChangeFromService() {
MockRoot oldRoot = createRootWithChildren(new SimpleConfigProducer("p", 0)
.withChildren(new ServiceWithAnnotation("s1", 1), new ServiceWithAnnotation("s2", 2)));
@@ -162,6 +170,10 @@ public class ConfigValueChangeValidatorTest {
}
private static VespaModel createVespaModel(String configSegment) {
+ return createVespaModel(configSegment, Arrays.asList("music"));
+ }
+
+ private static VespaModel createVespaModel(String configSegment, List<String> docTypes) {
// Note that the configSegment is here located on root.
return new VespaModelCreatorWithMockPkg(
null,
@@ -178,9 +190,7 @@ public class ConfigValueChangeValidatorTest {
" </jdisc>\n" +
" <content id='basicsearch' version='1.0'>\n" +
" <redundancy>1</redundancy>\n" +
- " <documents>\n" +
- " <document type='music' mode='index'/>\n" +
- " </documents>\n" +
+ createDocumentsSegment(docTypes) + "\n" +
" <group>\n" +
" <node hostalias='node1' distribution-key='0'/>\n" +
" </group>\n" +
@@ -191,10 +201,24 @@ public class ConfigValueChangeValidatorTest {
" </engine>\n" +
" </content>\n" +
"</services>",
- Collections.singletonList("search music { document music { } }")
+ createSearchDefinitions(docTypes)
).create();
}
+ private static String createDocumentsSegment(List<String> docTypes) {
+ return "<documents>\n" +
+ docTypes.stream()
+ .map(type -> "<document type='" + type + "' mode='index'/>")
+ .collect(Collectors.joining("\n")) +
+ "</documents>";
+ }
+
+ private static List<String> createSearchDefinitions(List<String> docTypes) {
+ return docTypes.stream()
+ .map(type -> "search " + type + " { document " + type + " { } }")
+ .collect(Collectors.toList());
+ }
+
private static String createQrStartConfigSegment(boolean verboseGc, int heapsize) {
return "<config name='search.config.qr-start'>\n" +
" <jvm>\n" +
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/ContentBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/ContentBuilderTest.java
index e1818008462..043f961f98e 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/ContentBuilderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/ContentBuilderTest.java
@@ -318,8 +318,6 @@ public class ContentBuilderTest extends DomBuilderTest {
assertTrue(partitionsConfig.dataset(0).engine(0).name_and_port().startsWith("tcp/node0:191"));
IndexedSearchCluster sc = m.getContentClusters().get("clu").getSearch().getIndexed();
assertEquals(2, sc.getSearchNodeCount());
- assertTrue(sc.getSearchNode(0).getPersistenceProviderRpcPort() >= 19100);
- assertTrue(sc.getSearchNode(0).getPersistenceProviderRpcPort() != sc.getSearchNode(1).getPersistenceProviderRpcPort());
}
@Test
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilderTest.java b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilderTest.java
index f421cbd84db..210334a1f23 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilderTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/builder/xml/dom/DomSearchTuningBuilderTest.java
@@ -184,7 +184,6 @@ public class DomSearchTuningBuilderTest extends DomBuilderTest {
"</cache>",
"<logstore>",
"<maxfilesize>512</maxfilesize>",
- "<maxdiskbloatfactor>1.4</maxdiskbloatfactor>",
"<minfilesizefactor>0.3</minfilesizefactor>",
"<numthreads>7</numthreads>",
"<chunk>",
@@ -204,7 +203,6 @@ public class DomSearchTuningBuilderTest extends DomBuilderTest {
t.searchNode.summary.store.cache.compression.type);
assertEquals(3, t.searchNode.summary.store.cache.compression.level.intValue());
assertEquals(512, t.searchNode.summary.store.logStore.maxFileSize.longValue());
- assertEquals(1.4, t.searchNode.summary.store.logStore.maxDiskBloatFactor, DELTA);
assertEquals(0.3, t.searchNode.summary.store.logStore.minFileSizeFactor, DELTA);
assertEquals(7, t.searchNode.summary.store.logStore.numThreads.intValue());
assertEquals(256, t.searchNode.summary.store.logStore.chunk.maxSize.intValue());
@@ -219,7 +217,6 @@ public class DomSearchTuningBuilderTest extends DomBuilderTest {
assertThat(cfg, containsString("summary.cache.compression.type NONE"));
assertThat(cfg, containsString("summary.cache.compression.level 3"));
assertThat(cfg, containsString("summary.log.maxfilesize 512"));
- assertThat(cfg, containsString("summary.log.maxdiskbloatfactor 1.4"));
assertThat(cfg, containsString("summary.log.minfilesizefactor 0.3"));
assertThat(cfg, containsString("summary.log.chunk.maxbytes 256"));
assertThat(cfg, containsString("summary.log.chunk.compression.type LZ4"));
@@ -236,4 +233,14 @@ public class DomSearchTuningBuilderTest extends DomBuilderTest {
assertThat(cfg, containsString("initialize.threads 7"));
}
+ @Test
+ public void requireThatWeCanParseBackgroundTag() {
+ Tuning t = createTuning(parseXml("<background>",
+ "<threads>7</threads>",
+ "</background>"));
+ assertEquals(7, t.searchNode.background.threads.intValue());
+ String cfg = getProtonCfg(t);
+ assertThat(cfg, containsString("background.threads 7"));
+ }
+
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/container/configserver/ConfigserverClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/container/configserver/ConfigserverClusterTest.java
index a89790384ac..cf490ff73ef 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/container/configserver/ConfigserverClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/container/configserver/ConfigserverClusterTest.java
@@ -38,7 +38,7 @@ public class ConfigserverClusterTest {
root = new MockRoot();
new ConfigServerContainerModelBuilder(new TestOptions().rpcPort(12345).useVespaVersionInRequest(true)
.hostedVespa(true).environment("test").region("bar")
- .numParallelTenantLoaders(4))
+ .numParallelTenantLoaders(99))
.build(new DeployState.Builder().build(), null, root, XML.getDocument(services).getDocumentElement());
root.freezeModelTopology();
}
@@ -73,7 +73,7 @@ public class ConfigserverClusterTest {
assertThat(config.httpport(), is(1337));
assertThat(config.serverId(), is(HostName.getLocalhost()));
assertTrue(config.useVespaVersionInRequest());
- assertThat(config.numParallelTenantLoaders(), is(4));
+ assertThat(config.numParallelTenantLoaders(), is(99));
assertFalse(config.multitenant());
assertTrue(config.hostedVespa());
assertThat(config.environment(), is("test"));
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterTest.java
index 5ba0c43fcee..2a3dbe002e6 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterTest.java
@@ -506,7 +506,6 @@ public class ClusterTest extends ContentBaseTest {
cluster.getStorageNodes().getConfig(builder);
cluster.getStorageNodes().getChildren().get("0").getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
- assertEquals("tcp/localhost:19106", config.persistence_provider().rpc().connectspec());
}
{
@@ -514,7 +513,6 @@ public class ClusterTest extends ContentBaseTest {
cluster.getStorageNodes().getConfig(builder);
cluster.getStorageNodes().getChildren().get("1").getConfig(builder);
StorServerConfig config = new StorServerConfig(builder);
- assertEquals("tcp/localhost:19118", config.persistence_provider().rpc().connectspec());
}
}
@@ -602,7 +600,6 @@ public class ClusterTest extends ContentBaseTest {
@Test
public void testProviders() {
testProvider("proton", StorServerConfig.Persistence_provider.Type.RPC);
- testProvider("rpc", StorServerConfig.Persistence_provider.Type.RPC);
testProvider("vds", StorServerConfig.Persistence_provider.Type.STORAGE);
testProvider("dummy", StorServerConfig.Persistence_provider.Type.DUMMY);
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedTest.java
index c1ed602f791..14e3bd72dc7 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/IndexedTest.java
@@ -198,7 +198,6 @@ public class IndexedTest extends ContentBaseTest {
StorServerConfig.Builder builder = new StorServerConfig.Builder();
s.getStorageNodes().getConfig(builder);
s.getStorageNodes().getChildren().get("3").getConfig(builder);
- assertTrue(new StorServerConfig(builder).persistence_provider().rpc().connectspec().startsWith("tcp/localhost:191"));
}
@Test
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/search/NodeFlavorTuningTest.java b/config-model/src/test/java/com/yahoo/vespa/model/search/NodeFlavorTuningTest.java
index 0e1ad92c70d..04ae3de3707 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/search/NodeFlavorTuningTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/search/NodeFlavorTuningTest.java
@@ -87,6 +87,12 @@ public class NodeFlavorTuningTest {
assertFlushStrategyTlsSize(100 * GB, 24000);
}
+ @Test
+ public void require_that_summary_read_io_is_set_based_on_disk() {
+ assertSummaryReadIo(ProtonConfig.Summary.Read.Io.DIRECTIO, true);
+ assertSummaryReadIo(ProtonConfig.Summary.Read.Io.MMAP, false);
+ }
+
private static void assertDocumentStoreMaxFileSize(long expFileSizeBytes, int memoryGb) {
assertEquals(expFileSizeBytes, configFromMemorySetting(memoryGb).summary().log().maxfilesize());
}
@@ -97,13 +103,17 @@ public class NodeFlavorTuningTest {
}
private static void assertDocumentStoreNumThreads(int numThreads, double numCores) {
- assertEquals(numThreads, configFromNumCoresSetting(numCores).summary().log().numthreads());
+ assertEquals(numThreads, configFromNumCoresSetting(numCores).background().threads());
}
private static void assertFlushStrategyTlsSize(long expTlsSizeBytes, int diskGb) {
assertEquals(expTlsSizeBytes, configFromDiskSetting(diskGb).flush().memory().maxtlssize());
}
+ private static void assertSummaryReadIo(ProtonConfig.Summary.Read.Io.Enum expValue, boolean fastDisk) {
+ assertEquals(expValue, configFromDiskSetting(fastDisk).summary().read().io());
+ }
+
private static ProtonConfig configFromDiskSetting(boolean fastDisk) {
return getConfig(new FlavorsConfig.Flavor.Builder().
fastDisk(fastDisk));
diff --git a/config-model/src/test/schema-test-files/deployment.xml b/config-model/src/test/schema-test-files/deployment.xml
index 99b1dc1be69..0b47903ae39 100644
--- a/config-model/src/test/schema-test-files/deployment.xml
+++ b/config-model/src/test/schema-test-files/deployment.xml
@@ -3,6 +3,8 @@
<upgrade policy='canary'/>
<test/>
<staging/>
+ <block-change revision='true' version='false' days="mon,tue" hours="14,15"/>
+ <block-change days="mon,tue" hours="14,15" time-zone="CET"/>
<prod global-service-id='qrs'>
<region active='true'>us-west-1</region>
<delay hours='3'/>
diff --git a/config-model/src/test/schema-test-files/services.xml b/config-model/src/test/schema-test-files/services.xml
index 322f4ed8356..4b6eb12208c 100644
--- a/config-model/src/test/schema-test-files/services.xml
+++ b/config-model/src/test/schema-test-files/services.xml
@@ -26,6 +26,9 @@
<metric-set id="my-set2" />
</consumer>
</metrics>
+ <logforwarding>
+ <splunk deployment-server="foo:8989" client-name="foobar"/>
+ </logforwarding>
</admin>
<config name="bar">
diff --git a/config-provisioning/CMakeLists.txt b/config-provisioning/CMakeLists.txt
new file mode 100644
index 00000000000..829ba87fab8
--- /dev/null
+++ b/config-provisioning/CMakeLists.txt
@@ -0,0 +1,3 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(config-provisioning)
+install_config_definition(src/main/resources/configdefinitions/flavors.def config.provisioning.flavors.def)
diff --git a/config-proxy/CMakeLists.txt b/config-proxy/CMakeLists.txt
new file mode 100644
index 00000000000..a87f10573be
--- /dev/null
+++ b/config-proxy/CMakeLists.txt
@@ -0,0 +1,6 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(config-proxy)
+
+vespa_install_script(src/main/sh/vespa-config-ctl.sh vespa-config-ctl bin)
+vespa_install_script(src/main/sh/vespa-config-loadtester.sh vespa-config-loadtester bin)
+vespa_install_script(src/main/sh/vespa-config-verification.sh vespa-config-verification bin)
diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/DelayedResponseHandler.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/DelayedResponseHandler.java
index 4f298a3a7d7..59dca6e7104 100644
--- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/DelayedResponseHandler.java
+++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/DelayedResponseHandler.java
@@ -55,7 +55,8 @@ public class DelayedResponseHandler implements Runnable {
rpcServer.returnOkResponse(request, config);
i++;
} else {
- log.log(LogLevel.WARNING, "No config found for " + request.getConfigKey() + " within timeout, will retry");
+ log.log(LogLevel.WARNING, "Timed out (timeout " + request.getTimeout() + ") getting config " +
+ request.getConfigKey() + ", will retry");
}
}
if (log.isLoggable(LogLevel.SPAM)) {
diff --git a/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java b/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java
index eff18d02293..7d830484edd 100644
--- a/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java
+++ b/config/src/main/java/com/yahoo/config/subscription/impl/JRTConfigRequester.java
@@ -2,6 +2,8 @@
package com.yahoo.config.subscription.impl;
import java.text.SimpleDateFormat;
+import java.time.Duration;
+import java.time.Instant;
import java.util.TimeZone;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledThreadPoolExecutor;
@@ -39,9 +41,9 @@ public class JRTConfigRequester implements RequestWaiter {
private int fatalFailures = 0; // independent of transientFailures
private int transientFailures = 0; // independent of fatalFailures
private final ScheduledThreadPoolExecutor scheduler = new ScheduledThreadPoolExecutor(1, new JRTSourceThreadFactory());
- private long suspendWarned;
- private long noApplicationWarned;
- private static final long delayBetweenWarnings = 60000; //ms
+ private Instant suspendWarningLogged = Instant.MIN;
+ private Instant noApplicationWarningLogged = Instant.MIN;
+ private static final Duration delayBetweenWarnings = Duration.ofSeconds(60);
private final ConnectionPool connectionPool;
static final float randomFraction = 0.2f;
/* Time to be added to server timeout to create client timeout. This is the time allowed for the server to respond after serverTimeout has elapsed. */
@@ -146,12 +148,11 @@ public class JRTConfigRequester implements RequestWaiter {
break;
case ErrorCode.APPLICATION_NOT_LOADED:
case ErrorCode.UNKNOWN_VESPA_VERSION:
- final long now = System.currentTimeMillis();
- if (noApplicationWarned < (now - delayBetweenWarnings)) {
+ if (noApplicationWarningLogged.isBefore(Instant.now().minus(delayBetweenWarnings))) {
log.log(LogLevel.WARNING, "Request callback failed: " + ErrorCode.getName(jrtReq.errorCode()) +
". Connection spec: " + connection.getAddress() +
", error message: " + jrtReq.errorMessage());
- noApplicationWarned = now;
+ noApplicationWarningLogged = Instant.now();
}
break;
default:
@@ -197,12 +198,11 @@ public class JRTConfigRequester implements RequestWaiter {
JRTConfigSubscription<ConfigInstance> sub,
long delay,
Connection connection) {
- long now = System.currentTimeMillis();
transientFailures++;
- if (suspendWarned < (now - delayBetweenWarnings)) {
+ if (suspendWarningLogged.isBefore(Instant.now().minus(delayBetweenWarnings))) {
log.log(LogLevel.INFO, "Connection to " + connection.getAddress() +
" failed or timed out, clients will keep existing config, will keep trying.");
- suspendWarned = now;
+ suspendWarningLogged = Instant.now();
}
if (sub.getState() != ConfigSubscription.State.OPEN) return;
scheduleNextRequest(jrtReq, sub, delay, calculateErrorTimeout());
@@ -240,7 +240,8 @@ public class JRTConfigRequester implements RequestWaiter {
// Reset counters pertaining to error handling here
fatalFailures = 0;
transientFailures = 0;
- suspendWarned = 0;
+ suspendWarningLogged = Instant.MIN;
+ noApplicationWarningLogged = Instant.MIN;
connection.setSuccess();
sub.setLastCallBackOKTS(System.currentTimeMillis());
if (jrtReq.hasUpdatedGeneration()) {
@@ -293,7 +294,10 @@ public class JRTConfigRequester implements RequestWaiter {
}
public void close() {
- suspendWarned = System.currentTimeMillis(); // Avoid printing warnings after this
+ // Fake that we have logged to avoid printing warnings after this
+ suspendWarningLogged = Instant.now();
+ noApplicationWarningLogged = Instant.now();
+
connectionPool.close();
scheduler.shutdown();
}
diff --git a/config/src/main/java/com/yahoo/vespa/config/LZ4PayloadCompressor.java b/config/src/main/java/com/yahoo/vespa/config/LZ4PayloadCompressor.java
index 8714aa0e4f6..4e65ec130ac 100644
--- a/config/src/main/java/com/yahoo/vespa/config/LZ4PayloadCompressor.java
+++ b/config/src/main/java/com/yahoo/vespa/config/LZ4PayloadCompressor.java
@@ -9,9 +9,9 @@ import net.jpountz.lz4.LZ4Factory;
* Wrapper for LZ4 compression that selects compression level based on properties.
*
* @author lulf
- * @since 5.19
*/
public class LZ4PayloadCompressor {
+
private static final LZ4Factory lz4Factory = LZ4Factory.safeInstance();
private static final String VESPA_CONFIG_PROTOCOL_COMPRESSION_LEVEL = "VESPA_CONFIG_PROTOCOL_COMPRESSION_LEVEL";
private static final int compressionLevel = getCompressionLevel();
@@ -36,4 +36,5 @@ public class LZ4PayloadCompressor {
private LZ4Compressor getCompressor() {
return (compressionLevel < 7) ? lz4Factory.fastCompressor() : lz4Factory.highCompressor();
}
+
}
diff --git a/config/src/vespa/config/frt/frtconfigresponsev3.cpp b/config/src/vespa/config/frt/frtconfigresponsev3.cpp
index 3791d3c55b7..379ebbd1803 100644
--- a/config/src/vespa/config/frt/frtconfigresponsev3.cpp
+++ b/config/src/vespa/config/frt/frtconfigresponsev3.cpp
@@ -58,11 +58,13 @@ FRTConfigResponseV3::readConfigValue() const
Slime * rawData = new Slime();
SlimePtr payloadData(rawData);
DecompressedData data(decompress(((*_returnValues)[1]._data._buf), ((*_returnValues)[1]._data._len), info.compressionType, info.uncompressedSize));
- size_t consumedSize = JsonFormat::decode(data.memRef, *rawData);
- if (consumedSize != data.size) {
- std::string json(make_json(*payloadData, true));
- LOG(error, "Error decoding JSON. Consumed size: %lu, uncompressed size: %u, compression type: %s, assumed uncompressed size(%u), compressed size: %u, slime(%s)", consumedSize, data.size, compressionTypeToString(info.compressionType).c_str(), info.uncompressedSize, ((*_returnValues)[1]._data._len), json.c_str());
- assert(false);
+ if (data.memRef.size > 0) {
+ size_t consumedSize = JsonFormat::decode(data.memRef, *rawData);
+ if (consumedSize == 0) {
+ std::string json(make_json(*payloadData, true));
+ LOG(error, "Error decoding JSON. Consumed size: %lu, uncompressed size: %u, compression type: %s, assumed uncompressed size(%u), compressed size: %u, slime(%s)", consumedSize, data.size, compressionTypeToString(info.compressionType).c_str(), info.uncompressedSize, ((*_returnValues)[1]._data._len), json.c_str());
+ assert(false);
+ }
}
if (LOG_WOULD_LOG(spam)) {
LOG(spam, "read config value md5(%s), payload size: %lu", md5.c_str(), data.memRef.size);
diff --git a/configd/src/apps/sentinel/sentinel.cpp b/configd/src/apps/sentinel/sentinel.cpp
index 45bbbe19cf3..bb05f9e40ad 100644
--- a/configd/src/apps/sentinel/sentinel.cpp
+++ b/configd/src/apps/sentinel/sentinel.cpp
@@ -4,6 +4,7 @@
#include <csignal>
#include <unistd.h>
#include <sys/time.h>
+#include <vespa/vespalib/util/signalhandler.h>
#include <vespa/defaults.h>
#include "config-handler.h"
@@ -14,13 +15,11 @@ using namespace config;
constexpr uint64_t CONFIG_TIMEOUT_MS = 3 * 60 * 1000;
-static int sigPermanent(int sig, void(*handler)(int));
-
-static void gracefulShutdown(int sig);
-static void sigchldHandler(int sig);
-
-sig_atomic_t stop = 0;
-static sig_atomic_t pendingWait = 0;
+static bool stop()
+{
+ return (vespalib::SignalHandler::INT.check() ||
+ vespalib::SignalHandler::TERM.check());
+}
int
main(int argc, char **argv)
@@ -49,10 +48,11 @@ main(int argc, char **argv)
EV_STARTED("config-sentinel");
- sigPermanent(SIGPIPE, SIG_IGN);
- sigPermanent(SIGTERM, gracefulShutdown);
- sigPermanent(SIGINT, gracefulShutdown);
- sigPermanent(SIGCHLD, sigchldHandler);
+ vespalib::SignalHandler::PIPE.ignore();
+ vespalib::SignalHandler::TERM.hook();
+ vespalib::SignalHandler::INT.hook();
+ vespalib::SignalHandler::CHLD.hook();
+
if (setenv("LC_ALL", "C", 1) != 0) {
LOG(error, "Unable to set locale");
exit(EXIT_FAILURE);
@@ -80,15 +80,15 @@ main(int argc, char **argv)
struct timeval lastTv;
gettimeofday(&lastTv, nullptr);
- while (!stop) {
+ while (!stop()) {
try {
- pendingWait = 0;
+ vespalib::SignalHandler::CHLD.clear();
handler.doWork(); // Check for child procs & commands
} catch (InvalidConfigException& ex) {
LOG(warning, "Configuration problem: (ignoring): %s",
ex.what());
}
- if (!pendingWait) {
+ if (!vespalib::SignalHandler::CHLD.check()) {
int maxNum = 0;
fd_set fds;
FD_ZERO(&fds);
@@ -98,7 +98,7 @@ main(int argc, char **argv)
tv.tv_sec = 1;
tv.tv_usec = 0;
- if (!pendingWait) {
+ if (!vespalib::SignalHandler::CHLD.check()) {
select(maxNum, &fds, nullptr, nullptr, &tv);
}
}
@@ -118,29 +118,3 @@ main(int argc, char **argv)
EV_STOPPING("config-sentinel", "normal exit");
return rv;
}
-
-static void
-gracefulShutdown(int sig)
-{
- (void)sig;
- stop = 1;
-}
-
-static void
-sigchldHandler(int sig)
-{
- (void)sig;
- pendingWait = 1;
-}
-
-static int
-sigPermanent(int sig, void(*handler)(int))
-{
- struct sigaction sa;
-
- memset(&sa, 0, sizeof(sa));
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = 0; // no SA_RESTART!
- sa.sa_handler = handler;
- return sigaction(sig, &sa, nullptr);
-}
diff --git a/configd/src/apps/sentinel/service.cpp b/configd/src/apps/sentinel/service.cpp
index fc1f768f989..2f13a05eb4f 100644
--- a/configd/src/apps/sentinel/service.cpp
+++ b/configd/src/apps/sentinel/service.cpp
@@ -3,6 +3,7 @@
#include "service.h"
#include "output-connection.h"
#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/signalhandler.h>
#include <csignal>
#include <unistd.h>
@@ -13,7 +14,11 @@
LOG_SETUP(".service");
#include <vespa/log/llparser.h>
-extern sig_atomic_t stop;
+static bool stop()
+{
+ return (vespalib::SignalHandler::INT.check() ||
+ vespalib::SignalHandler::TERM.check());
+}
using vespalib::make_string;
@@ -212,7 +217,7 @@ Service::start()
static_cast<int>(getpid()));
signal(SIGTERM, SIG_DFL);
signal(SIGINT, SIG_DFL);
- if (stop) {
+ if (stop()) {
kill(getpid(), SIGTERM);
}
if (_restartPenalty > 0) {
@@ -315,7 +320,7 @@ Service::youExited(int status)
} else if (_state == KILLING) {
setState(KILLED);
}
- if (_isAutomatic && _config->autorestart && !stop) {
+ if (_isAutomatic && _config->autorestart && !stop()) {
// ### Implement some rate limiting here maybe?
LOG(debug, "%s: Has autorestart flag, restarting.", name().c_str());
setState(READY);
diff --git a/configdefinitions/CMakeLists.txt b/configdefinitions/CMakeLists.txt
index d8f89e04cc1..ee78759254a 100644
--- a/configdefinitions/CMakeLists.txt
+++ b/configdefinitions/CMakeLists.txt
@@ -8,3 +8,5 @@ vespa_define_module(
LIBS
src/vespa
)
+
+install_fat_java_artifact(configdefinitions)
diff --git a/configdefinitions/src/vespa/CMakeLists.txt b/configdefinitions/src/vespa/CMakeLists.txt
index 016739f4594..9297383c53f 100644
--- a/configdefinitions/src/vespa/CMakeLists.txt
+++ b/configdefinitions/src/vespa/CMakeLists.txt
@@ -5,66 +5,68 @@ vespa_add_library(configdefinitions
DEPENDS
)
vespa_generate_config(configdefinitions application-id.def)
-install(FILES application-id.def RENAME cloud.config.application-id.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(application-id.def cloud.config.application-id.def)
vespa_generate_config(configdefinitions attributes.def)
-install(FILES attributes.def RENAME vespa.config.search.attributes.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(attributes.def vespa.config.search.attributes.def)
vespa_generate_config(configdefinitions cluster-info.def)
-install(FILES cluster-info.def RENAME cloud.config.cluster-info.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(cluster-info.def cloud.config.cluster-info.def)
vespa_generate_config(configdefinitions cluster-list.def)
-install(FILES cluster-list.def RENAME cloud.config.cluster-list.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(cluster-list.def cloud.config.cluster-list.def)
vespa_generate_config(configdefinitions configserver.def)
-install(FILES configserver.def RENAME cloud.config.configserver.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(configserver.def cloud.config.configserver.def)
vespa_generate_config(configdefinitions dispatch.def)
-install(FILES dispatch.def RENAME vespa.config.search.dispatch.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(dispatch.def vespa.config.search.dispatch.def)
vespa_generate_config(configdefinitions fleetcontroller.def)
-install(FILES fleetcontroller.def RENAME vespa.config.content.fleetcontroller.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(fleetcontroller.def vespa.config.content.fleetcontroller.def)
vespa_generate_config(configdefinitions ilscripts.def)
-install(FILES ilscripts.def RENAME vespa.configdefinition.ilscripts.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(ilscripts.def vespa.configdefinition.ilscripts.def)
vespa_generate_config(configdefinitions imported-fields.def)
-install(FILES imported-fields.def RENAME vespa.config.search.imported-fields.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(imported-fields.def vespa.config.search.imported-fields.def)
vespa_generate_config(configdefinitions indexschema.def)
-install(FILES indexschema.def RENAME vespa.config.search.indexschema.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(indexschema.def vespa.config.search.indexschema.def)
vespa_generate_config(configdefinitions lb-services.def)
-install(FILES lb-services.def RENAME cloud.config.lb-services.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(lb-services.def cloud.config.lb-services.def)
vespa_generate_config(configdefinitions load-type.def)
-install(FILES load-type.def RENAME vespa.config.content.load-type.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(load-type.def vespa.config.content.load-type.def)
+vespa_generate_config(configdefinitions logforwarder.def)
+install_config_definition(logforwarder.def cloud.config.logforwarder.def)
vespa_generate_config(configdefinitions messagetyperouteselectorpolicy.def)
-install(FILES messagetyperouteselectorpolicy.def RENAME vespa.config.content.messagetyperouteselectorpolicy.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(messagetyperouteselectorpolicy.def vespa.config.content.messagetyperouteselectorpolicy.def)
vespa_generate_config(configdefinitions model.def)
-install(FILES model.def RENAME cloud.config.model.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(model.def cloud.config.model.def)
vespa_generate_config(configdefinitions orchestrator.def)
-install(FILES orchestrator.def RENAME vespa.orchestrator.config.orchestrator.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(orchestrator.def vespa.orchestrator.config.orchestrator.def)
vespa_generate_config(configdefinitions persistence.def)
-install(FILES persistence.def RENAME vespa.config.content.persistence.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(persistence.def vespa.config.content.persistence.def)
vespa_generate_config(configdefinitions rank-profiles.def)
-install(FILES rank-profiles.def RENAME vespa.config.search.rank-profiles.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(rank-profiles.def vespa.config.search.rank-profiles.def)
vespa_generate_config(configdefinitions routing.def)
-install(FILES routing.def RENAME cloud.config.routing.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(routing.def cloud.config.routing.def)
vespa_generate_config(configdefinitions routing-provider.def)
-install(FILES routing-provider.def RENAME cloud.config.routing-provider.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(routing-provider.def cloud.config.routing-provider.def)
vespa_generate_config(configdefinitions sentinel.def)
-install(FILES sentinel.def RENAME cloud.config.sentinel.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(sentinel.def cloud.config.sentinel.def)
vespa_generate_config(configdefinitions slobroks.def)
-install(FILES slobroks.def RENAME cloud.config.slobroks.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(slobroks.def cloud.config.slobroks.def)
vespa_generate_config(configdefinitions specialtokens.def)
-install(FILES specialtokens.def RENAME vespa.configdefinition.specialtokens.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(specialtokens.def vespa.configdefinition.specialtokens.def)
vespa_generate_config(configdefinitions stor-devices.def)
-install(FILES stor-devices.def RENAME vespa.config.storage.stor-devices.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-devices.def vespa.config.storage.stor-devices.def)
vespa_generate_config(configdefinitions stor-distribution.def)
-install(FILES stor-distribution.def RENAME vespa.config.content.stor-distribution.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-distribution.def vespa.config.content.stor-distribution.def)
vespa_generate_config(configdefinitions stor-filestor.def)
-install(FILES stor-filestor.def RENAME vespa.config.content.stor-filestor.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-filestor.def vespa.config.content.stor-filestor.def)
vespa_generate_config(configdefinitions stor-memfilepersistence.def)
-install(FILES stor-memfilepersistence.def RENAME vespa.config.storage.stor-memfilepersistence.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-memfilepersistence.def vespa.config.storage.stor-memfilepersistence.def)
vespa_generate_config(configdefinitions summary.def)
-install(FILES summary.def RENAME vespa.config.search.summary.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(summary.def vespa.config.search.summary.def)
vespa_generate_config(configdefinitions summarymap.def)
-install(FILES summarymap.def RENAME vespa.config.search.summarymap.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(summarymap.def vespa.config.search.summarymap.def)
vespa_generate_config(configdefinitions upgrading.def)
-install(FILES upgrading.def RENAME vespa.config.content.upgrading.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(upgrading.def vespa.config.content.upgrading.def)
vespa_generate_config(configdefinitions ymon.def)
-install(FILES ymon.def RENAME cloud.config.ymon.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(ymon.def cloud.config.ymon.def)
vespa_generate_config(configdefinitions zookeeper-server.def)
-install(FILES zookeeper-server.def RENAME cloud.config.zookeeper-server.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(zookeeper-server.def cloud.config.zookeeper-server.def)
vespa_generate_config(configdefinitions zookeepers.def)
-install(FILES zookeepers.def RENAME cloud.config.zookeepers.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(zookeepers.def cloud.config.zookeepers.def)
diff --git a/configdefinitions/src/vespa/configserver.def b/configdefinitions/src/vespa/configserver.def
index 19c55941988..28081f8323c 100644
--- a/configdefinitions/src/vespa/configserver.def
+++ b/configdefinitions/src/vespa/configserver.def
@@ -24,7 +24,7 @@ numDelayedResponseThreads int default=1
payloadCompressionType enum { UNCOMPRESSED, LZ4 } default=LZ4
serverId string default="localhost"
hostedVespa bool default=false
-numParallelTenantLoaders int default=1
+numParallelTenantLoaders int default=4
# Zone information
environment string default="prod"
diff --git a/configdefinitions/src/vespa/logforwarder.def b/configdefinitions/src/vespa/logforwarder.def
new file mode 100644
index 00000000000..205e8ad3b8c
--- /dev/null
+++ b/configdefinitions/src/vespa/logforwarder.def
@@ -0,0 +1,6 @@
+namespace=cloud.config
+
+# only splunk type config for now
+
+deploymentServer string default=""
+clientName string default=""
diff --git a/configgen/pom.xml b/configgen/pom.xml
index 0dc53e1e016..6e550ec7321 100644
--- a/configgen/pom.xml
+++ b/configgen/pom.xml
@@ -7,7 +7,6 @@
<artifactId>parent</artifactId>
<version>6-SNAPSHOT</version>
</parent>
- <groupId>com.yahoo.vespa</groupId>
<artifactId>configgen</artifactId>
<packaging>jar</packaging>
<version>6-SNAPSHOT</version>
@@ -27,10 +26,6 @@
<groupId>org.scala-lang.modules</groupId>
<artifactId>scala-xml_${scala.major-version}</artifactId>
</dependency>
- <dependency>
- <groupId>org.scala-lang.modules</groupId>
- <artifactId>scala-parser-combinators_${scala.major-version}</artifactId>
- </dependency>
</dependencies>
<build>
<plugins>
diff --git a/configgen/src/main/scala/com/yahoo/config/codegen/ConfigGenerator.scala b/configgen/src/main/scala/com/yahoo/config/codegen/ConfigGenerator.scala
index 391e1f0a0ef..90e23f24cf3 100644
--- a/configgen/src/main/scala/com/yahoo/config/codegen/ConfigGenerator.scala
+++ b/configgen/src/main/scala/com/yahoo/config/codegen/ConfigGenerator.scala
@@ -7,8 +7,6 @@ import com.yahoo.config.codegen.JavaClassBuilder.Indentation
import com.yahoo.config.codegen.LeafCNode._
import com.yahoo.config.codegen.ReservedWords.{INTERNAL_PREFIX => InternalPrefix}
-import scala.util.parsing.combinator.JavaTokenParsers
-
/**
* @author gjoranv
* @author tonytv
@@ -450,18 +448,4 @@ object ConfigGenerator {
}
}
- /**
- * Deprecated!
- * TODO: Remove when no longer used by the oldest available config-model.
- */
- @deprecated("Use ConfiggenUtil.createClassName() instead", "6.143")
- def createClassName(defName: String): String = {
- val className = defName.split("-").map (_.capitalize).mkString + "Config"
- val parser = new JavaTokenParsers {}
- parser.parseAll(parser.ident, className) match {
- case parser.NoSuccess(msg, _) =>
- throw new CodegenRuntimeException("Illegal config definition file name '" + defName + "': " + msg)
- case success => success.get
- }
- }
}
diff --git a/configserver/CMakeLists.txt b/configserver/CMakeLists.txt
index e54a93b3d28..21cf801a3bc 100644
--- a/configserver/CMakeLists.txt
+++ b/configserver/CMakeLists.txt
@@ -1,9 +1,21 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(configserver)
+
vespa_install_script(src/main/sh/vespa-configserver-remove-state bin)
vespa_install_script(src/main/sh/start-filedistribution libexec/vespa)
vespa_install_script(src/main/sh/ping-configserver libexec/vespa)
vespa_install_script(src/main/sh/start-configserver libexec/vespa)
vespa_install_script(src/main/sh/start-logd libexec/vespa)
vespa_install_script(src/main/sh/stop-configserver libexec/vespa)
+
install(DIRECTORY src/main/resources/logd DESTINATION conf)
install(DIRECTORY src/main/resources/configserver-app DESTINATION conf)
+
+install(CODE "execute_process(COMMAND mkdir -p \$ENV{DESTDIR}/\${CMAKE_INSTALL_PREFIX}/conf/configserver-app/components)")
+install(CODE "execute_process(COMMAND mkdir -p \$ENV{DESTDIR}/\${CMAKE_INSTALL_PREFIX}/conf/configserver-app/config-models)")
+install(CODE "execute_process(COMMAND ln -snf \${CMAKE_INSTALL_PREFIX}/lib/jars/config-model-fat.jar \$ENV{DESTDIR}/\${CMAKE_INSTALL_PREFIX}/conf/configserver-app/components/config-model-fat.jar)")
+install(CODE "execute_process(COMMAND ln -snf \${CMAKE_INSTALL_PREFIX}/lib/jars/configserver-jar-with-dependencies.jar \$ENV{DESTDIR}/\${CMAKE_INSTALL_PREFIX}/conf/configserver-app/components/configserver.jar)")
+install(CODE "execute_process(COMMAND ln -snf \${CMAKE_INSTALL_PREFIX}/lib/jars/orchestrator-jar-with-dependencies.jar \$ENV{DESTDIR}/\${CMAKE_INSTALL_PREFIX}/conf/configserver-app/components/orchestrator.jar)")
+install(CODE "execute_process(COMMAND ln -snf \${CMAKE_INSTALL_PREFIX}/lib/jars/node-repository-jar-with-dependencies.jar \$ENV{DESTDIR}/\${CMAKE_INSTALL_PREFIX}/conf/configserver-app/components/node-repository.jar)")
+install(CODE "execute_process(COMMAND ln -snf \${CMAKE_INSTALL_PREFIX}/lib/jars/zkfacade-jar-with-dependencies.jar \$ENV{DESTDIR}/\${CMAKE_INSTALL_PREFIX}/conf/configserver-app/components/zkfacade.jar)")
+install(CODE "execute_process(COMMAND ln -snf \${CMAKE_INSTALL_PREFIX}/conf/configserver-app/components \$ENV{DESTDIR}/\${CMAKE_INSTALL_PREFIX}/lib/jars/config-models)")
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/RequestHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/RequestHandler.java
index 21d08481ebe..f23c96db6f2 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/RequestHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/RequestHandler.java
@@ -13,57 +13,62 @@ import com.yahoo.vespa.config.protocol.ConfigResponse;
/**
* Instances of this can serve misc config related requests
*
- * @author lulf
- * @since 5.1
+ * @author Ulf Lilleengen
*/
public interface RequestHandler {
/**
* Resolves a config. Mandatory subclass hook for getConfig().
+ *
* @param appId The application id to use
* @param req a config request
* @param vespaVersion vespa version
* @return The resolved config if it exists, else null.
*/
- public ConfigResponse resolveConfig(ApplicationId appId, GetConfigRequest req, Optional<Version> vespaVersion);
+ ConfigResponse resolveConfig(ApplicationId appId, GetConfigRequest req, Optional<Version> vespaVersion);
/**
* Lists all configs (name, configKey) in the config model.
+ *
* @param appId application id to use
* @param vespaVersion optional vespa version
* @param recursive If true descend into all levels
* @return set of keys
*/
- public Set<ConfigKey<?>> listConfigs(ApplicationId appId, Optional<Version> vespaVersion, boolean recursive);
+ Set<ConfigKey<?>> listConfigs(ApplicationId appId, Optional<Version> vespaVersion, boolean recursive);
/**
* Lists all configs (name, configKey) of the given key. The config id of the key is interpreted as a prefix to match.
+ *
* @param appId application id to use
* @param vespaVersion optional vespa version
* @param key def key to match
* @param recursive If true descend into all levels
* @return set of keys
*/
- public Set<ConfigKey<?>> listNamedConfigs(ApplicationId appId, Optional<Version> vespaVersion, ConfigKey<?> key, boolean recursive);
+ Set<ConfigKey<?>> listNamedConfigs(ApplicationId appId, Optional<Version> vespaVersion, ConfigKey<?> key, boolean recursive);
/**
* Lists all available configs produced
+ *
* @param appId application id to use
* @param vespaVersion optional vespa version
* @return set of keys
*/
- public Set<ConfigKey<?>> allConfigsProduced(ApplicationId appId, Optional<Version> vespaVersion);
+ Set<ConfigKey<?>> allConfigsProduced(ApplicationId appId, Optional<Version> vespaVersion);
/**
* List all config ids present
+ *
* @param appId application id to use
* @param vespaVersion optional vespa version
* @return a Set containing all config ids available
*/
- public Set<String> allConfigIds(ApplicationId appId, Optional<Version> vespaVersion);
+ Set<String> allConfigIds(ApplicationId appId, Optional<Version> vespaVersion);
/**
* True if application loaded
+ *
* @param appId The application id to use
* @param vespaVersion optional vespa version
* @return true if app loaded
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java
index 1532c05a56b..8e865f96db3 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelController.java
@@ -13,7 +13,7 @@ import com.yahoo.vespa.config.GetConfigRequest;
import com.yahoo.vespa.config.buildergen.ConfigDefinition;
import com.yahoo.vespa.config.protocol.ConfigResponse;
import com.yahoo.vespa.config.protocol.DefContent;
-import com.yahoo.vespa.config.server.model.SuperModel;
+import com.yahoo.vespa.config.server.model.SuperModelConfigProvider;
import com.yahoo.vespa.config.server.rpc.ConfigResponseFactory;
import java.io.IOException;
@@ -28,12 +28,12 @@ import java.io.StringReader;
*/
public class SuperModelController {
- private final SuperModel model;
+ private final SuperModelConfigProvider model;
private final long generation;
private final ConfigDefinitionRepo configDefinitionRepo;
private final ConfigResponseFactory responseFactory;
- public SuperModelController(SuperModel model, ConfigDefinitionRepo configDefinitionRepo, long generation, ConfigResponseFactory responseFactory) {
+ public SuperModelController(SuperModelConfigProvider model, ConfigDefinitionRepo configDefinitionRepo, long generation, ConfigResponseFactory responseFactory) {
this.model = model;
this.configDefinitionRepo = configDefinitionRepo;
this.generation = generation;
@@ -68,7 +68,7 @@ public class SuperModelController {
}
}
- public SuperModel getSuperModel() {
+ public SuperModelConfigProvider getSuperModel() {
return model;
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelManager.java b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelManager.java
index 06f7ffa66c4..8eca4f0c455 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelManager.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelManager.java
@@ -5,6 +5,7 @@ package com.yahoo.vespa.config.server;
import com.google.inject.Inject;
import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.config.model.api.ApplicationInfo;
+import com.yahoo.config.model.api.SuperModel;
import com.yahoo.config.model.api.SuperModelListener;
import com.yahoo.config.model.api.SuperModelProvider;
import com.yahoo.config.provision.ApplicationId;
@@ -12,18 +13,13 @@ import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.config.GenerationCounter;
-import com.yahoo.vespa.config.server.application.Application;
import com.yahoo.vespa.config.server.application.ApplicationSet;
-import com.yahoo.vespa.config.server.model.SuperModel;
+import com.yahoo.vespa.config.server.model.SuperModelConfigProvider;
import java.time.Instant;
import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.LinkedHashMap;
import java.util.List;
-import java.util.Map;
import java.util.Optional;
-import java.util.stream.Collectors;
/**
* Provides a SuperModel - a model of all application instances, and makes it stays
@@ -31,7 +27,7 @@ import java.util.stream.Collectors;
*/
public class SuperModelManager implements SuperModelProvider {
private final Zone zone;
- private SuperModel superModel; // Guarded by 'this' monitor
+ private SuperModelConfigProvider superModelConfigProvider; // Guarded by 'this' monitor
private final List<SuperModelListener> listeners = new ArrayList<>(); // Guarded by 'this' monitor
// Generation of the super model
@@ -46,11 +42,16 @@ public class SuperModelManager implements SuperModelProvider {
this.zone = new Zone(configserverConfig, nodeFlavors);
this.generationCounter = generationCounter;
this.masterGeneration = configserverConfig.masterGeneration();
- makeNewSuperModel(new HashMap<>());
+ makeNewSuperModelConfigProvider(new SuperModel());
}
+ @Override
public synchronized SuperModel getSuperModel() {
- return superModel;
+ return superModelConfigProvider.getSuperModel();
+ }
+
+ public synchronized SuperModelConfigProvider getSuperModelConfigProvider() {
+ return superModelConfigProvider;
}
public synchronized long getGeneration() {
@@ -58,60 +59,41 @@ public class SuperModelManager implements SuperModelProvider {
}
@Override
- public synchronized List<ApplicationInfo> snapshot(SuperModelListener listener) {
+ public synchronized SuperModel snapshot(SuperModelListener listener) {
listeners.add(listener);
- return superModel.applicationModels().values().stream()
- .flatMap(applications -> applications.values().stream())
- .map(Application::toApplicationInfo)
- .collect(Collectors.toList());
+ return superModelConfigProvider.getSuperModel();
}
- public synchronized void configActivated(TenantName tenant, ApplicationSet applicationSet) {
- Map<TenantName, Map<ApplicationId, Application>> newModels = createModelCopy();
- if (!newModels.containsKey(tenant)) {
- // New application has been activated
- newModels.put(tenant, new LinkedHashMap<>());
- } else {
- // Application has been redeployed
- }
+ @Override
+ public Zone getZone() {
+ return zone;
+ }
+ public synchronized void configActivated(TenantName tenant, ApplicationSet applicationSet) {
// TODO: Should supermodel care about multiple versions?
- Application application = applicationSet.getForVersionOrLatest(Optional.empty(), Instant.now());
- newModels.get(tenant).put(applicationSet.getId(), application);
-
- makeNewSuperModel(newModels);
- listeners.stream().forEach(listener -> listener.applicationActivated(application.toApplicationInfo()));
+ ApplicationInfo applicationInfo = applicationSet
+ .getForVersionOrLatest(Optional.empty(), Instant.now())
+ .toApplicationInfo();
+
+ SuperModel newSuperModel = this.superModelConfigProvider
+ .getSuperModel()
+ .cloneAndSetApplication(applicationInfo);
+ makeNewSuperModelConfigProvider(newSuperModel);
+ listeners.stream().forEach(listener ->
+ listener.applicationActivated(newSuperModel, applicationInfo));
}
public synchronized void applicationRemoved(ApplicationId applicationId) {
- Map<TenantName, Map<ApplicationId, Application>> newModels = createModelCopy();
- if (newModels.containsKey(applicationId.tenant())) {
- newModels.get(applicationId.tenant()).remove(applicationId);
- if (newModels.get(applicationId.tenant()).isEmpty()) {
- newModels.remove(applicationId.tenant());
- }
- }
-
- makeNewSuperModel(newModels);
- listeners.stream().forEach(listener -> listener.applicationRemoved(applicationId));
+ SuperModel newSuperModel = this.superModelConfigProvider
+ .getSuperModel()
+ .cloneAndRemoveApplication(applicationId);
+ makeNewSuperModelConfigProvider(newSuperModel);
+ listeners.stream().forEach(listener ->
+ listener.applicationRemoved(newSuperModel, applicationId));
}
- private void makeNewSuperModel(Map<TenantName, Map<ApplicationId, Application>> newModels) {
+ private void makeNewSuperModelConfigProvider(SuperModel newSuperModel) {
generation = masterGeneration + generationCounter.get();
- superModel = new SuperModel(newModels, zone);
- }
-
- private Map<TenantName, Map<ApplicationId, Application>> createModelCopy() {
- Map<TenantName, Map<ApplicationId, Application>> currentModels = superModel.applicationModels();
- Map<TenantName, Map<ApplicationId, Application>> newModels = new LinkedHashMap<>();
- for (Map.Entry<TenantName, Map<ApplicationId, Application>> entry : currentModels.entrySet()) {
- Map<ApplicationId, Application> appMap = new LinkedHashMap<>();
- newModels.put(entry.getKey(), appMap);
- for (Map.Entry<ApplicationId, Application> appEntry : entry.getValue().entrySet()) {
- appMap.put(appEntry.getKey(), appEntry.getValue());
- }
- }
-
- return newModels;
+ superModelConfigProvider = new SuperModelConfigProvider(newSuperModel, zone);
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelRequestHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelRequestHandler.java
index 95f16a7c1e7..8db7ee9ffc3 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelRequestHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/SuperModelRequestHandler.java
@@ -5,14 +5,11 @@ import com.google.inject.Inject;
import com.yahoo.cloud.config.ConfigserverConfig;
import com.yahoo.config.ConfigInstance;
import com.yahoo.config.model.api.ConfigDefinitionRepo;
-import com.yahoo.config.provision.NodeFlavors;
import com.yahoo.config.provision.Version;
import com.yahoo.log.LogLevel;
import com.yahoo.vespa.config.ConfigKey;
-import com.yahoo.vespa.config.GenerationCounter;
import com.yahoo.vespa.config.GetConfigRequest;
import com.yahoo.vespa.config.protocol.ConfigResponse;
-import com.yahoo.vespa.config.server.application.Application;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.TenantName;
import com.yahoo.vespa.config.server.application.ApplicationSet;
@@ -20,8 +17,6 @@ import com.yahoo.vespa.config.server.rpc.ConfigResponseFactory;
import com.yahoo.vespa.config.server.rpc.ConfigResponseFactoryFactory;
import java.io.IOException;
-import java.util.LinkedHashMap;
-import java.util.Map;
import java.util.Optional;
import java.util.Set;
@@ -72,7 +67,7 @@ public class SuperModelRequestHandler implements RequestHandler {
private void updateHandler() {
handler = new SuperModelController(
- superModelManager.getSuperModel(),
+ superModelManager.getSuperModelConfigProvider(),
configDefinitionRepo,
superModelManager.getGeneration(),
responseFactory);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpHandler.java
index c13d1b3fcfa..cc78c2715e2 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/HttpHandler.java
@@ -13,7 +13,6 @@ import com.yahoo.yolean.Exceptions;
import java.io.PrintWriter;
import java.io.StringWriter;
-import java.time.Duration;
import java.util.concurrent.Executor;
/**
@@ -70,13 +69,6 @@ public class HttpHandler extends LoggingRequestHandler {
}
}
- // Override default, since we need a higher timeout
- // TODO: Make configurable? Should be higher than timeouts used by clients
- @Override
- public Duration getTimeout() {
- return Duration.ofSeconds(910);
- }
-
private String getMessage(Exception e, HttpRequest request) {
if (request.getBooleanProperty("debug")) {
StringWriter sw = new StringWriter();
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/SessionPrepareHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/SessionPrepareHandler.java
index bfc5714467e..03a3f3556e4 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/SessionPrepareHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/http/v2/SessionPrepareHandler.java
@@ -52,6 +52,11 @@ public class SessionPrepareHandler extends SessionHandler {
}
@Override
+ public Duration getTimeout() {
+ return zookeeperBarrierTimeout.plus(Duration.ofSeconds(10));
+ }
+
+ @Override
protected HttpResponse handlePUT(HttpRequest request) {
Tenant tenant = getExistingTenant(request);
TenantName tenantName = tenant.getName();
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/model/LbServicesProducer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/model/LbServicesProducer.java
index f9665262d76..1806414f510 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/model/LbServicesProducer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/model/LbServicesProducer.java
@@ -1,20 +1,21 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.config.server.model;
+import com.google.common.base.Joiner;
+import com.yahoo.cloud.config.LbServicesConfig;
+import com.yahoo.config.model.api.ApplicationInfo;
+import com.yahoo.config.model.api.HostInfo;
+import com.yahoo.config.model.api.ServiceInfo;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.config.provision.Zone;
+
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
-import com.google.common.base.Joiner;
-import com.yahoo.config.model.api.HostInfo;
-import com.yahoo.config.model.api.ServiceInfo;
-import com.yahoo.cloud.config.LbServicesConfig;
-import com.yahoo.config.provision.Zone;
-import com.yahoo.vespa.config.server.application.Application;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.TenantName;
/**
* Produces lb-services cfg
@@ -24,10 +25,10 @@ import com.yahoo.config.provision.TenantName;
*/
public class LbServicesProducer implements LbServicesConfig.Producer {
- private final Map<TenantName, Map<ApplicationId, Application>> models;
+ private final Map<TenantName, Map<ApplicationId, ApplicationInfo>> models;
private final Zone zone;
- public LbServicesProducer(Map<TenantName, Map<ApplicationId, Application>> models, Zone zone) {
+ public LbServicesProducer(Map<TenantName, Map<ApplicationId, ApplicationInfo>> models, Zone zone) {
this.models = models;
this.zone = zone;
}
@@ -41,7 +42,7 @@ public class LbServicesProducer implements LbServicesConfig.Producer {
});
}
- private LbServicesConfig.Tenants.Builder getTenantConfig(Map<ApplicationId, Application> apps) {
+ private LbServicesConfig.Tenants.Builder getTenantConfig(Map<ApplicationId, ApplicationInfo> apps) {
LbServicesConfig.Tenants.Builder tb = new LbServicesConfig.Tenants.Builder();
apps.keySet().stream()
.sorted()
@@ -55,7 +56,7 @@ public class LbServicesProducer implements LbServicesConfig.Producer {
return applicationId.application().value() + ":" + zone.environment().value() + ":" + zone.region().value() + ":" + applicationId.instance().value();
}
- private LbServicesConfig.Tenants.Applications.Builder getAppConfig(Application app) {
+ private LbServicesConfig.Tenants.Applications.Builder getAppConfig(ApplicationInfo app) {
LbServicesConfig.Tenants.Applications.Builder ab = new LbServicesConfig.Tenants.Applications.Builder();
ab.activeRotation(getActiveRotation(app));
app.getModel().getHosts().stream()
@@ -66,7 +67,7 @@ public class LbServicesProducer implements LbServicesConfig.Producer {
return ab;
}
- private boolean getActiveRotation(Application app) {
+ private boolean getActiveRotation(ApplicationInfo app) {
boolean activeRotation = false;
for (HostInfo hostInfo : app.getModel().getHosts()) {
final Optional<ServiceInfo> container = hostInfo.getServices().stream().filter(
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/model/RoutingProducer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/model/RoutingProducer.java
index c8d94235abc..850b3f99d16 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/model/RoutingProducer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/model/RoutingProducer.java
@@ -2,11 +2,11 @@
package com.yahoo.vespa.config.server.model;
import com.yahoo.cloud.config.RoutingConfig;
+import com.yahoo.config.model.api.ApplicationInfo;
import com.yahoo.config.model.api.HostInfo;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ApplicationName;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.vespa.config.server.application.Application;
import com.yahoo.vespa.config.server.tenant.Tenants;
import java.util.Map;
@@ -20,20 +20,22 @@ import java.util.Map;
public class RoutingProducer implements RoutingConfig.Producer {
static final ApplicationName ROUTING_APPLICATION = ApplicationName.from("routing");
- private final Map<TenantName, Map<ApplicationId, Application>> models;
+ private final Map<TenantName, Map<ApplicationId, ApplicationInfo>> models;
- public RoutingProducer(Map<TenantName, Map<ApplicationId, Application>> models) {
+ public RoutingProducer(Map<TenantName, Map<ApplicationId, ApplicationInfo>> models) {
this.models = models;
}
@Override
public void getConfig(RoutingConfig.Builder builder) {
- for (Map<ApplicationId, Application> model : models.values()) {
- model.values().stream().filter(application -> isHostedVespaRoutingApplication(application.getId())).forEach(application -> {
- for (HostInfo host : application.getModel().getHosts()) {
- builder.hosts(host.getHostname());
- }
- });
+ for (Map<ApplicationId, ApplicationInfo> model : models.values()) {
+ model.values().stream()
+ .filter(application -> isHostedVespaRoutingApplication(application.getApplicationId()))
+ .forEach(application -> {
+ for (HostInfo host : application.getModel().getHosts()) {
+ builder.hosts(host.getHostname());
+ }
+ });
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/model/SuperModel.java b/configserver/src/main/java/com/yahoo/vespa/config/server/model/SuperModelConfigProvider.java
index eb41373aab8..e087ef64ae3 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/model/SuperModel.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/model/SuperModelConfigProvider.java
@@ -5,15 +5,15 @@ import com.yahoo.cloud.config.LbServicesConfig;
import com.yahoo.cloud.config.RoutingConfig;
import com.yahoo.config.ConfigInstance;
import com.yahoo.config.ConfigurationRuntimeException;
+import com.yahoo.config.model.api.ApplicationInfo;
+import com.yahoo.config.model.api.SuperModel;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.TenantName;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.config.ConfigKey;
import com.yahoo.vespa.config.ConfigPayload;
-import com.yahoo.vespa.config.server.application.Application;
import java.util.Collections;
-import java.util.LinkedHashMap;
import java.util.Map;
/**
@@ -22,16 +22,20 @@ import java.util.Map;
* @author vegardh
* @since 5.9
*/
-public class SuperModel implements LbServicesConfig.Producer, RoutingConfig.Producer {
+public class SuperModelConfigProvider implements LbServicesConfig.Producer, RoutingConfig.Producer {
- private final Map<TenantName, Map<ApplicationId, Application>> models;
+ private final SuperModel superModel;
private final LbServicesProducer lbProd;
private final RoutingProducer zoneProd;
- public SuperModel(Map<TenantName, Map<ApplicationId, Application>> models, Zone zone) {
- this.models = models;
- this.lbProd = new LbServicesProducer(Collections.unmodifiableMap(models), zone);
- this.zoneProd = new RoutingProducer(Collections.unmodifiableMap(models));
+ public SuperModelConfigProvider(SuperModel superModel, Zone zone) {
+ this.superModel = superModel;
+ this.lbProd = new LbServicesProducer(Collections.unmodifiableMap(superModel.getAllModels()), zone);
+ this.zoneProd = new RoutingProducer(Collections.unmodifiableMap(superModel.getAllModels()));
+ }
+
+ public SuperModel getSuperModel() {
+ return superModel;
}
public ConfigPayload getConfig(ConfigKey<?> configKey) {
@@ -49,7 +53,7 @@ public class SuperModel implements LbServicesConfig.Producer, RoutingConfig.Prod
}
}
- public Map<TenantName, Map<ApplicationId, Application>> applicationModels() { return models; }
+ public Map<TenantName, Map<ApplicationId, ApplicationInfo>> applicationModels() { return superModel.getAllModels(); }
@Override
public void getConfig(LbServicesConfig.Builder builder) {
@@ -65,14 +69,14 @@ public class SuperModel implements LbServicesConfig.Producer, RoutingConfig.Prod
ApplicationId applicationId,
String configId) {
TenantName tenant = applicationId.tenant();
- if (!models.containsKey(tenant)) {
+ if (!superModel.getAllModels().containsKey(tenant)) {
throw new IllegalArgumentException("Tenant " + tenant + " not found");
}
- Map<ApplicationId, Application> applications = models.get(tenant);
+ Map<ApplicationId, ApplicationInfo> applications = superModel.getAllModels().get(tenant);
if (!applications.containsKey(applicationId)) {
throw new IllegalArgumentException("Application id " + applicationId + " not found");
}
- Application application = applications.get(applicationId);
+ ApplicationInfo application = applications.get(applicationId);
ConfigKey<CONFIGTYPE> key = new ConfigKey<>(configClass, configId);
ConfigPayload payload = application.getModel().getConfig(key, null);
return payload.toInstance(configClass, configId);
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
index 6bd21e9cfb6..af4d998c347 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
@@ -7,6 +7,7 @@ import com.yahoo.config.model.api.HostProvisioner;
import com.yahoo.config.model.api.ModelContext;
import com.yahoo.config.model.api.ModelFactory;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ApplicationLockException;
import com.yahoo.config.provision.OutOfCapacityException;
import com.yahoo.config.provision.AllocatedHosts;
import com.yahoo.config.provision.Rotation;
@@ -87,7 +88,7 @@ public abstract class ModelsBuilder<MODELRESULT extends ModelResult> {
if (allApplicationModels.size() > 0 && allApplicationModels.get(0).getModel().skipOldConfigModels(now))
break;
}
- catch (OutOfCapacityException e) {
+ catch (OutOfCapacityException | ApplicationLockException e) {
// Don't wrap this exception, and don't try to load other model versions as this is (most likely)
// caused by the state of the system, not the model version/application combination
throw e;
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java
index 4fe78a30344..3c9917bf17e 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/rpc/RpcServer.java
@@ -146,7 +146,7 @@ public class RpcServer implements Runnable, ReloadListener, TenantListener {
}
public void run() {
- log.log(LogLevel.DEBUG, "Ready for requests on " + spec);
+ log.log(LogLevel.INFO, "Rpc server listening on port " + spec.port());
try {
Acceptor acceptor = supervisor.listen(spec);
supervisor.transport().join();
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/RemoteSessionRepo.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/RemoteSessionRepo.java
index d7c973617c2..659a44bb339 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/RemoteSessionRepo.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/RemoteSessionRepo.java
@@ -139,7 +139,7 @@ public class RemoteSessionRepo extends SessionRepo<RemoteSession> implements Nod
return sessions;
}
- synchronized void sessionsChanged() throws NumberFormatException {
+ private synchronized void sessionsChanged() throws NumberFormatException {
List<Long> sessions = getSessionList(directoryCache.getCurrentData());
checkForRemovedSessions(sessions);
checkForAddedSessions(sessions);
@@ -236,6 +236,9 @@ public class RemoteSessionRepo extends SessionRepo<RemoteSession> implements Nod
case CHILD_REMOVED:
sessionsChanged();
break;
+ case CONNECTION_RECONNECTED:
+ sessionsChanged();
+ break;
}
}
@@ -245,7 +248,6 @@ public class RemoteSessionRepo extends SessionRepo<RemoteSession> implements Nod
if (session == null) continue; // session might have been deleted after getting session list
log.log(LogLevel.DEBUG, session.logPre() + "Confirming upload for session " + sessionId);
session.confirmUpload();
-
}
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
index beb62cf3ac9..31be18d9b22 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
@@ -100,6 +100,8 @@ public class SessionPreparer {
preparation.distribute();
preparation.reloadDeployFileDistributor();
}
+ log.log(LogLevel.DEBUG, () -> "time used " + params.getTimeoutBudget().timesUsed() +
+ " : " + params.getApplicationId());
return preparation.result();
} catch (OutOfCapacityException e) {
throw e;
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRequestHandler.java b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRequestHandler.java
index 93c26d532bb..bb3e957e022 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRequestHandler.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/tenant/TenantRequestHandler.java
@@ -30,7 +30,6 @@ import com.yahoo.vespa.config.server.monitoring.Metrics;
* a set of applications belonging to a tenant.
*
* @author Harald Musum
- * @since 5.1
*/
public class TenantRequestHandler implements RequestHandler, ReloadHandler, HostValidator<ApplicationId> {
diff --git a/configserver/src/main/sh/start-configserver b/configserver/src/main/sh/start-configserver
index 43bb6b2ba83..7d3f305a107 100755
--- a/configserver/src/main/sh/start-configserver
+++ b/configserver/src/main/sh/start-configserver
@@ -80,6 +80,7 @@ fixddir () {
chmod 755 $1
}
+fixddir ${VESPA_HOME}/conf/zookeeper
fixfile ${VESPA_HOME}/conf/zookeeper/zookeeper.cfg
fixfile ${VESPA_HOME}/var/zookeeper/myid
fixddir ${VESPA_HOME}/var/zookeeper/version-2
@@ -135,6 +136,7 @@ fixddir $bundlecachedir
vespa-run-as-vespa-user vespa-runserver -s configserver -r 30 -p $pidfile -- \
java \
-Xms128m -Xmx2048m \
+ -XX:+PreserveFramePointer \
-XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=${VESPA_HOME}/var/crash \
-XX:OnOutOfMemoryError='kill -9 %p' \
$jvmargs \
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelControllerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelControllerTest.java
index e4835f47e48..c10d5123ea7 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelControllerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelControllerTest.java
@@ -2,22 +2,26 @@
package com.yahoo.vespa.config.server;
import com.yahoo.cloud.config.LbServicesConfig;
+import com.yahoo.cloud.config.LbServicesConfig.Tenants.Applications;
+import com.yahoo.config.model.api.ApplicationInfo;
+import com.yahoo.config.model.api.SuperModel;
import com.yahoo.config.model.application.provider.FilesApplicationPackage;
-import com.yahoo.config.provision.*;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ApplicationName;
+import com.yahoo.config.provision.InstanceName;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.config.provision.Version;
+import com.yahoo.config.provision.Zone;
import com.yahoo.jrt.Request;
import com.yahoo.vespa.config.ConfigKey;
-import com.yahoo.cloud.config.LbServicesConfig.Tenants.Applications;
import com.yahoo.vespa.config.protocol.CompressionType;
import com.yahoo.vespa.config.protocol.DefContent;
import com.yahoo.vespa.config.protocol.JRTClientConfigRequestV3;
import com.yahoo.vespa.config.protocol.JRTServerConfigRequestV3;
import com.yahoo.vespa.config.protocol.Trace;
-import com.yahoo.vespa.config.server.application.Application;
-import com.yahoo.vespa.config.server.model.SuperModel;
-import com.yahoo.vespa.config.server.monitoring.MetricUpdater;
+import com.yahoo.vespa.config.server.model.SuperModelConfigProvider;
import com.yahoo.vespa.config.server.rpc.UncompressedConfigResponseFactory;
import com.yahoo.vespa.model.VespaModel;
-
import org.junit.Before;
import org.junit.Test;
import org.xml.sax.SAXException;
@@ -43,13 +47,14 @@ public class SuperModelControllerTest {
@Before
public void setupHandler() throws IOException, SAXException {
- Map<TenantName, Map<ApplicationId, Application>> models = new LinkedHashMap<>();
+ Map<TenantName, Map<ApplicationId, ApplicationInfo>> models = new LinkedHashMap<>();
models.put(TenantName.from("a"), new LinkedHashMap<>());
File testApp = new File("src/test/resources/deploy/app");
ApplicationId app = ApplicationId.from(TenantName.from("a"),
ApplicationName.from("foo"), InstanceName.defaultName());
- models.get(app.tenant()).put(app, new Application(new VespaModel(FilesApplicationPackage.fromFile(testApp)), new ServerCache(), 4l, Version.fromIntValues(1, 2, 3), MetricUpdater.createTestUpdater(), app));
- handler = new SuperModelController(new SuperModel(models, Zone.defaultZone()), new TestConfigDefinitionRepo(), 2, new UncompressedConfigResponseFactory());
+ models.get(app.tenant()).put(app, new ApplicationInfo(app, 4l, new VespaModel(FilesApplicationPackage.fromFile(testApp))));
+ SuperModel superModel = new SuperModel(models);
+ handler = new SuperModelController(new SuperModelConfigProvider(superModel, Zone.defaultZone()), new TestConfigDefinitionRepo(), 2, new UncompressedConfigResponseFactory());
}
@Test
@@ -77,7 +82,7 @@ public class SuperModelControllerTest {
@Test
public void test_lb_config_multiple_apps() throws IOException, SAXException {
- Map<TenantName, Map<ApplicationId, Application>> models = new LinkedHashMap<>();
+ Map<TenantName, Map<ApplicationId, ApplicationInfo>> models = new LinkedHashMap<>();
models.put(TenantName.from("t1"), new LinkedHashMap<>());
models.put(TenantName.from("t2"), new LinkedHashMap<>());
File testApp1 = new File("src/test/resources/deploy/app");
@@ -86,13 +91,13 @@ public class SuperModelControllerTest {
// TODO must fix equals, hashCode on Tenant
Version vespaVersion = Version.fromIntValues(1, 2, 3);
models.get(TenantName.from("t1")).put(applicationId("mysimpleapp"),
- new Application(new VespaModel(FilesApplicationPackage.fromFile(testApp1)), new ServerCache(), 4l, vespaVersion, MetricUpdater.createTestUpdater(), applicationId("mysimpleapp")));
+ new ApplicationInfo(applicationId("mysimpleapp"), 4l, new VespaModel(FilesApplicationPackage.fromFile(testApp1))));
models.get(TenantName.from("t1")).put(applicationId("myadvancedapp"),
- new Application(new VespaModel(FilesApplicationPackage.fromFile(testApp2)), new ServerCache(), 4l, vespaVersion, MetricUpdater.createTestUpdater(), applicationId("myadvancedapp")));
+ new ApplicationInfo(applicationId("myadvancedapp"), 4l, new VespaModel(FilesApplicationPackage.fromFile(testApp2))));
models.get(TenantName.from("t2")).put(applicationId("minetooadvancedapp"),
- new Application(new VespaModel(FilesApplicationPackage.fromFile(testApp3)), new ServerCache(), 4l, vespaVersion, MetricUpdater.createTestUpdater(), applicationId("minetooadvancedapp")));
-
- SuperModelController han = new SuperModelController(new SuperModel(models, Zone.defaultZone()), new TestConfigDefinitionRepo(), 2, new UncompressedConfigResponseFactory());
+ new ApplicationInfo(applicationId("minetooadvancedapp"), 4l, new VespaModel(FilesApplicationPackage.fromFile(testApp3))));
+ SuperModel superModel = new SuperModel(models);
+ SuperModelController han = new SuperModelController(new SuperModelConfigProvider(superModel, Zone.defaultZone()), new TestConfigDefinitionRepo(), 2, new UncompressedConfigResponseFactory());
LbServicesConfig.Builder lb = new LbServicesConfig.Builder();
han.getSuperModel().getConfig(lb);
LbServicesConfig lbc = new LbServicesConfig(lb);
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelRequestHandlerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelRequestHandlerTest.java
index 4c746eba64e..bc07ac7d79c 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelRequestHandlerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/SuperModelRequestHandlerTest.java
@@ -61,7 +61,7 @@ public class SuperModelRequestHandlerTest {
assertThat(controller.getHandler().getGeneration(), is(gen));
// Test that a new app is used when there already exist an application with the same id
ApplicationId appId = new ApplicationId.Builder().tenant(tenantA).applicationName("foo").build();
- assertThat(((TestApplication) controller.getHandler().getSuperModel().applicationModels().get(tenantA).get(appId)).version, is(2l));
+ assertThat(controller.getHandler().getSuperModel().applicationModels().get(tenantA).get(appId).getGeneration(), is(4l));
gen = counter.increment();
controller.reloadConfig(tenantA, createApp(tenantA, "bar", 2l, 3));
assertThat(controller.getHandler().getGeneration(), is(gen));
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/model/LbServicesProducerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/model/LbServicesProducerTest.java
index 4d7f33e6ef9..474b93f6972 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/model/LbServicesProducerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/model/LbServicesProducerTest.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.config.server.model;
import com.yahoo.cloud.config.LbServicesConfig;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.model.NullConfigModelRegistry;
+import com.yahoo.config.model.api.ApplicationInfo;
import com.yahoo.config.model.api.Model;
import com.yahoo.config.model.deploy.DeployProperties;
import com.yahoo.config.model.deploy.DeployState;
@@ -13,18 +14,20 @@ import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.Rotation;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.config.provision.Version;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.config.ConfigPayload;
-import com.yahoo.vespa.config.server.ServerCache;
-import com.yahoo.vespa.config.server.application.Application;
-import com.yahoo.vespa.config.server.monitoring.MetricUpdater;
import com.yahoo.vespa.model.VespaModel;
import org.junit.Test;
import org.xml.sax.SAXException;
import java.io.IOException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
import static org.hamcrest.Matchers.is;
import static org.junit.Assert.assertFalse;
@@ -43,7 +46,7 @@ public class LbServicesProducerTest {
@Test
public void testDeterministicGetConfig() throws IOException, SAXException {
- Map<TenantName, Map<ApplicationId, Application>> testModel = createTestModel(new DeployState.Builder().rotations(rotations));
+ Map<TenantName, Map<ApplicationId, ApplicationInfo>> testModel = createTestModel(new DeployState.Builder().rotations(rotations));
LbServicesConfig last = null;
for (int i = 0; i < 100; i++) {
testModel = randomizeTenant(testModel, i);
@@ -57,7 +60,7 @@ public class LbServicesProducerTest {
@Test
public void testConfigAliases() throws IOException, SAXException {
- Map<TenantName, Map<ApplicationId, Application>> testModel = createTestModel(new DeployState.Builder());
+ Map<TenantName, Map<ApplicationId, ApplicationInfo>> testModel = createTestModel(new DeployState.Builder());
LbServicesConfig conf = getLbServicesConfig(Zone.defaultZone(), testModel);
final LbServicesConfig.Tenants.Applications.Hosts.Services services = conf.tenants("foo").applications("foo:prod:default:default").hosts("foo.foo.yahoo.com").services("qrserver");
assertThat(services.servicealiases().size(), is(1));
@@ -85,12 +88,12 @@ public class LbServicesProducerTest {
private LbServicesConfig createModelAndGetLbServicesConfig(RegionName regionName) throws IOException, SAXException {
final Zone zone = new Zone(Environment.prod, regionName);
- Map<TenantName, Map<ApplicationId, Application>> testModel = createTestModel(new DeployState.Builder().
+ Map<TenantName, Map<ApplicationId, ApplicationInfo>> testModel = createTestModel(new DeployState.Builder().
properties(new DeployProperties.Builder().zone(zone).build()));
return getLbServicesConfig(new Zone(Environment.prod, regionName), testModel);
}
- private LbServicesConfig getLbServicesConfig(Zone zone, Map<TenantName, Map<ApplicationId, Application>> testModel) {
+ private LbServicesConfig getLbServicesConfig(Zone zone, Map<TenantName, Map<ApplicationId, ApplicationInfo>> testModel) {
LbServicesProducer producer = new LbServicesProducer(testModel, zone);
LbServicesConfig.Builder builder = new LbServicesConfig.Builder();
producer.getConfig(builder);
@@ -99,7 +102,7 @@ public class LbServicesProducerTest {
@Test
public void testConfigAliasesWithRotations() throws IOException, SAXException {
- Map<TenantName, Map<ApplicationId, Application>> testModel = createTestModel(new DeployState.Builder().rotations(rotations));
+ Map<TenantName, Map<ApplicationId, ApplicationInfo>> testModel = createTestModel(new DeployState.Builder().rotations(rotations));
RegionName regionName = RegionName.from("us-east-1");
LbServicesConfig conf = getLbServicesConfig(new Zone(Environment.prod, regionName), testModel);
final LbServicesConfig.Tenants.Applications.Hosts.Services services = conf.tenants("foo").applications("foo:prod:" + regionName.value() + ":default").hosts("foo.foo.yahoo.com").services("qrserver");
@@ -113,8 +116,8 @@ public class LbServicesProducerTest {
assertThat(services.endpointaliases(3), is(rotation2));
}
- private Map<TenantName, Map<ApplicationId, Application>> randomizeTenant(Map<TenantName, Map<ApplicationId, Application>> testModel, int seed) {
- Map<TenantName, Map<ApplicationId, Application>> randomizedTenants = new LinkedHashMap<>();
+ private Map<TenantName, Map<ApplicationId, ApplicationInfo>> randomizeTenant(Map<TenantName, Map<ApplicationId, ApplicationInfo>> testModel, int seed) {
+ Map<TenantName, Map<ApplicationId, ApplicationInfo>> randomizedTenants = new LinkedHashMap<>();
List<TenantName> keys = new ArrayList<>(testModel.keySet());
Collections.shuffle(keys, new Random(seed));
for (TenantName key : keys) {
@@ -123,8 +126,8 @@ public class LbServicesProducerTest {
return randomizedTenants;
}
- private Map<ApplicationId, Application> randomizeApplications(Map<ApplicationId, Application> applicationIdApplicationMap, int seed) {
- Map<ApplicationId, Application> randomizedApplications = new LinkedHashMap<>();
+ private Map<ApplicationId, ApplicationInfo> randomizeApplications(Map<ApplicationId, ApplicationInfo> applicationIdApplicationMap, int seed) {
+ Map<ApplicationId, ApplicationInfo> randomizedApplications = new LinkedHashMap<>();
List<ApplicationId> keys = new ArrayList<>(applicationIdApplicationMap.keySet());
Collections.shuffle(keys, new Random(seed));
for (ApplicationId key : keys) {
@@ -133,8 +136,8 @@ public class LbServicesProducerTest {
return randomizedApplications;
}
- private Map<TenantName, Map<ApplicationId, Application>> createTestModel(DeployState.Builder deployStateBuilder) throws IOException, SAXException {
- Map<TenantName, Map<ApplicationId, Application>> tMap = new LinkedHashMap<>();
+ private Map<TenantName, Map<ApplicationId, ApplicationInfo>> createTestModel(DeployState.Builder deployStateBuilder) throws IOException, SAXException {
+ Map<TenantName, Map<ApplicationId, ApplicationInfo>> tMap = new LinkedHashMap<>();
TenantName foo = TenantName.from("foo");
TenantName bar = TenantName.from("bar");
TenantName baz = TenantName.from("baz");
@@ -144,8 +147,8 @@ public class LbServicesProducerTest {
return tMap;
}
- private Map<ApplicationId, Application> createTestApplications(TenantName tenant, DeployState.Builder deploystateBuilder) throws IOException, SAXException {
- Map<ApplicationId, Application> aMap = new LinkedHashMap<>();
+ private Map<ApplicationId, ApplicationInfo> createTestApplications(TenantName tenant, DeployState.Builder deploystateBuilder) throws IOException, SAXException {
+ Map<ApplicationId, ApplicationInfo> aMap = new LinkedHashMap<>();
ApplicationId fooApp = new ApplicationId.Builder().tenant(tenant).applicationName("foo").build();
ApplicationId barApp = new ApplicationId.Builder().tenant(tenant).applicationName("bar").build();
ApplicationId bazApp = new ApplicationId.Builder().tenant(tenant).applicationName("baz").build();
@@ -155,15 +158,13 @@ public class LbServicesProducerTest {
return aMap;
}
- private Application createApplication(ApplicationId appId, DeployState.Builder deploystateBuilder) throws IOException, SAXException {
- return new Application(createVespaModel(createApplicationPackage(
- appId.tenant() + "." + appId.application() + ".yahoo.com", appId.tenant().value() + "." + appId.application().value() + "2.yahoo.com"),
- deploystateBuilder),
- new ServerCache(),
+ private ApplicationInfo createApplication(ApplicationId appId, DeployState.Builder deploystateBuilder) throws IOException, SAXException {
+ return new ApplicationInfo(
+ appId,
3l,
- Version.fromIntValues(1, 2, 3),
- MetricUpdater.createTestUpdater(),
- appId);
+ createVespaModel(createApplicationPackage(
+ appId.tenant() + "." + appId.application() + ".yahoo.com", appId.tenant().value() + "." + appId.application().value() + "2.yahoo.com"),
+ deploystateBuilder));
}
private ApplicationPackage createApplicationPackage(String host1, String host2) {
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/model/RoutingProducerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/model/RoutingProducerTest.java
index f50dc7828b0..25030332664 100755
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/model/RoutingProducerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/model/RoutingProducerTest.java
@@ -4,15 +4,12 @@ package com.yahoo.vespa.config.server.model;
import com.yahoo.cloud.config.RoutingConfig;
import com.yahoo.config.application.api.ApplicationPackage;
import com.yahoo.config.model.NullConfigModelRegistry;
+import com.yahoo.config.model.api.ApplicationInfo;
import com.yahoo.config.model.api.Model;
import com.yahoo.config.model.deploy.DeployState;
import com.yahoo.config.model.test.MockApplicationPackage;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.TenantName;
-import com.yahoo.config.provision.Version;
-import com.yahoo.vespa.config.server.ServerCache;
-import com.yahoo.vespa.config.server.application.Application;
-import com.yahoo.vespa.config.server.monitoring.MetricUpdater;
import com.yahoo.vespa.config.server.tenant.Tenants;
import com.yahoo.vespa.model.VespaModel;
import org.junit.Test;
@@ -31,7 +28,7 @@ import static org.junit.Assert.assertThat;
public class RoutingProducerTest {
@Test
public void testNodesFromRoutingAppOnly() throws Exception {
- Map<TenantName, Map<ApplicationId, Application>> testModel = createTestModel(new DeployState.Builder());
+ Map<TenantName, Map<ApplicationId, ApplicationInfo>> testModel = createTestModel(new DeployState.Builder());
RoutingProducer producer = new RoutingProducer(testModel);
RoutingConfig.Builder builder = new RoutingConfig.Builder();
producer.getConfig(builder);
@@ -41,8 +38,8 @@ public class RoutingProducerTest {
assertThat(config.hosts(1), is("hosted-vespa.routing2.yahoo.com"));
}
- private Map<TenantName, Map<ApplicationId, Application>> createTestModel(DeployState.Builder deployStateBuilder) throws IOException, SAXException {
- Map<TenantName, Map<ApplicationId, Application>> tMap = new LinkedHashMap<>();
+ private Map<TenantName, Map<ApplicationId, ApplicationInfo>> createTestModel(DeployState.Builder deployStateBuilder) throws IOException, SAXException {
+ Map<TenantName, Map<ApplicationId, ApplicationInfo>> tMap = new LinkedHashMap<>();
TenantName foo = TenantName.from("foo");
TenantName bar = TenantName.from("bar");
TenantName routing = TenantName.from(Tenants.HOSTED_VESPA_TENANT.value());
@@ -52,8 +49,8 @@ public class RoutingProducerTest {
return tMap;
}
- private Map<ApplicationId, Application> createTestApplications(TenantName tenant, DeployState.Builder deploystateBuilder) throws IOException, SAXException {
- Map<ApplicationId, Application> aMap = new LinkedHashMap<>();
+ private Map<ApplicationId, ApplicationInfo> createTestApplications(TenantName tenant, DeployState.Builder deploystateBuilder) throws IOException, SAXException {
+ Map<ApplicationId, ApplicationInfo> aMap = new LinkedHashMap<>();
ApplicationId fooApp = new ApplicationId.Builder().tenant(tenant).applicationName("foo").build();
ApplicationId barApp = new ApplicationId.Builder().tenant(tenant).applicationName("bar").build();
ApplicationId routingApp = new ApplicationId.Builder().tenant(tenant).applicationName(RoutingProducer.ROUTING_APPLICATION.value()).build();
@@ -63,15 +60,15 @@ public class RoutingProducerTest {
return aMap;
}
- private Application createApplication(ApplicationId appId, DeployState.Builder deploystateBuilder) throws IOException, SAXException {
- return new Application(createVespaModel(createApplicationPackage(
- appId.tenant() + "." + appId.application() + ".yahoo.com", appId.tenant().value() + "." + appId.application().value() + "2.yahoo.com"),
- deploystateBuilder),
- new ServerCache(),
+ private ApplicationInfo createApplication(ApplicationId appId, DeployState.Builder deploystateBuilder) throws IOException, SAXException {
+ return new ApplicationInfo(
+ appId,
3l,
- Version.fromIntValues(1, 2, 3),
- MetricUpdater.createTestUpdater(),
- appId);
+ createVespaModel(
+ createApplicationPackage(
+ appId.tenant() + "." + appId.application() + ".yahoo.com",
+ appId.tenant().value() + "." + appId.application().value() + "2.yahoo.com"),
+ deploystateBuilder));
}
private ApplicationPackage createApplicationPackage(String host1, String host2) {
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java
index 265eaa501a2..5dc529e3381 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/session/SessionPreparerTest.java
@@ -134,7 +134,6 @@ public class SessionPreparerTest extends TestWithCurator {
public void require_that_application_is_prepared() throws Exception {
preparer.prepare(getContext(getApplicationPackage(testApp)), getLogger(), new PrepareParams.Builder().build(), Optional.empty(), tenantPath, Instant.now());
assertThat(fileDistributionFactory.mockFileDistributionProvider.getMockFileDBHandler().sendDeployedFilesCalled, is(2));
- assertThat(fileDistributionFactory.mockFileDistributionProvider.getMockFileDBHandler().limitSendingOfDeployedFilesToCalled, is(2));
// Should be called only once no matter how many model versions are built
assertThat(fileDistributionFactory.mockFileDistributionProvider.getMockFileDBHandler().reloadDeployFileDistributorCalled, is(1));
assertTrue(configCurator.exists(sessionsPath.append(ConfigCurator.USERAPP_ZK_SUBPATH).append("services.xml").getAbsolute()));
diff --git a/container-accesslogging/CMakeLists.txt b/container-accesslogging/CMakeLists.txt
new file mode 100644
index 00000000000..fb2ba9ac031
--- /dev/null
+++ b/container-accesslogging/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_config_definition(src/main/resources/configdefinitions/access-log.def container.core.access-log.def)
diff --git a/container-core/CMakeLists.txt b/container-core/CMakeLists.txt
new file mode 100644
index 00000000000..2d5723865eb
--- /dev/null
+++ b/container-core/CMakeLists.txt
@@ -0,0 +1,16 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_config_definition(src/main/resources/configdefinitions/application-metadata.def container.core.application-metadata.def)
+install_config_definition(src/main/resources/configdefinitions/container-document.def container.core.document.container-document.def)
+install_config_definition(src/main/resources/configdefinitions/container-http.def container.core.container-http.def)
+install_config_definition(src/main/resources/configdefinitions/diagnostics.def container.core.diagnostics.def)
+install_config_definition(src/main/resources/configdefinitions/health-monitor.def container.jdisc.config.health-monitor.def)
+install_config_definition(src/main/resources/configdefinitions/http-filter.def container.core.http.http-filter.def)
+install_config_definition(src/main/resources/configdefinitions/metrics-presentation.def metrics.metrics-presentation.def)
+install_config_definition(src/main/resources/configdefinitions/mockservice.def container.handler.test.mockservice.def)
+install_config_definition(src/main/resources/configdefinitions/qr-logging.def container.core.qr-logging.def)
+install_config_definition(src/main/resources/configdefinitions/qr-searchers.def container.qr-searchers.def)
+install_config_definition(src/main/resources/configdefinitions/qr-templates.def container.core.qr-templates.def)
+install_config_definition(src/main/resources/configdefinitions/qr.def container.qr.def)
+install_config_definition(src/main/resources/configdefinitions/servlet-config.def container.servlet.servlet-config.def)
+install_config_definition(src/main/resources/configdefinitions/threadpool.def container.handler.threadpool.def)
+install_config_definition(src/main/resources/configdefinitions/vip-status.def container.core.vip-status.def)
diff --git a/container-dependencies-enforcer/OWNERS b/container-dependencies-enforcer/OWNERS
new file mode 100644
index 00000000000..3b2ba1ede81
--- /dev/null
+++ b/container-dependencies-enforcer/OWNERS
@@ -0,0 +1 @@
+gjoranv
diff --git a/container-dependencies-enforcer/README.md b/container-dependencies-enforcer/README.md
new file mode 100644
index 00000000000..7a14cea809c
--- /dev/null
+++ b/container-dependencies-enforcer/README.md
@@ -0,0 +1,6 @@
+# Dependencies enforcer for 3rd party container projects.
+
+Enforces that only whitelisted dependencies are visible in
+the provided classpath for 3rd party projects. The whitelist
+must only contain artifacts that are provided runtime from
+the JDisc container.
diff --git a/container-dependencies-enforcer/pom.xml b/container-dependencies-enforcer/pom.xml
index caffe130445..aaa6601333c 100644
--- a/container-dependencies-enforcer/pom.xml
+++ b/container-dependencies-enforcer/pom.xml
@@ -5,13 +5,24 @@
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
- <groupId>com.yahoo.vespa</groupId>
+ <parent>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>parent</artifactId>
+ <version>6-SNAPSHOT</version>
+ </parent>
+
<artifactId>container-dependencies-enforcer</artifactId>
<version>6-SNAPSHOT</version>
<packaging>pom</packaging>
<dependencies>
<dependency>
+ <groupId>junit</groupId>
+ <artifactId>junit</artifactId>
+ <version>4.11</version>
+ <scope>test</scope>
+ </dependency>
+ <dependency>
<groupId>com.yahoo.vespa</groupId>
<artifactId>application</artifactId>
<version>${project.version}</version>
@@ -19,100 +30,115 @@
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
- <artifactId>container-dev</artifactId>
+ <artifactId>container</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
</dependency>
</dependencies>
+ <profiles>
+ <profile>
+ <id>enforce-container-deps</id>
+ <activation>
+ <activeByDefault>false</activeByDefault>
+ <property>
+ <!-- Dependency resolution is broken for old maven used in our CentOS docker containers -->
+ <name>maven.version</name>
+ <value>!3.0.5</value>
+ </property>
+ </activation>
+ <build>
+ <plugins>
+ <plugin>
+ <groupId>org.apache.maven.plugins</groupId>
+ <artifactId>maven-enforcer-plugin</artifactId>
+ <version>3.0.0-M1</version>
+ <executions>
+ <execution>
+ <!-- To allow running 'mvn enforcer:enforce' from the command line -->
+ <id>default-cli</id>
+ <goals>
+ <goal>enforce</goal>
+ </goals>
+ <configuration>
+ <rules>
+ <bannedDependencies>
+ <excludes>
+ <!-- Only allow explicitly listed deps in provided scope -->
+ <exclude>*:*:*:jar:provided:*</exclude>
+ </excludes>
+ <includes>
+ <include>com.yahoo.vespa</include>
+ <include>aopalliance:aopalliance:[1.0]:jar:provided</include>
+ <include>com.fasterxml.jackson.core:jackson-annotations:[${jackson2.version}]:jar:provided</include>
+ <include>com.fasterxml.jackson.core:jackson-core:[${jackson2.version}]:jar:provided</include>
+ <include>com.fasterxml.jackson.core:jackson-databind:[${jackson2.version}]:jar:provided</include>
+ <include>com.fasterxml.jackson.datatype:jackson-datatype-jdk8:[${jackson2.version}]:jar:provided</include>
+
+ <!-- Use version range for jax deps, because jersey and junit affect the versions. -->
+ <include>com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:[2.5.4, ${jackson2.version}]:jar:provided</include>
+ <include>com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:[2.5.4, ${jackson2.version}]:jar:provided</include>
+ <include>com.fasterxml.jackson.module:jackson-module-jaxb-annotations:[2.5.4, ${jackson2.version}]:jar:provided</include>
+
+ <include>com.google.code.findbugs:annotations:[1.3.9]:jar:provided</include>
+ <include>com.google.code.findbugs:jsr305:[1.3.9]:jar:provided</include>
+ <include>com.google.guava:guava:[18.0]:jar:provided</include>
+ <include>com.google.inject.extensions:guice-assistedinject:[3.0]:jar:provided</include>
+ <include>com.google.inject.extensions:guice-multibindings:[3.0]:jar:provided</include>
+ <include>com.google.inject:guice:[3.0]:jar:provided:no_aop</include>
+ <include>commons-codec:commons-codec:[1.4]:jar:provided</include>
+ <include>commons-daemon:commons-daemon:[1.0.3]:jar:provided</include>
+ <include>commons-logging:commons-logging:[1.1.1]:jar:provided</include>
+ <include>javax.annotation:javax.annotation-api:[1.2]:jar:provided</include>
+ <include>javax.inject:javax.inject:[1]:jar:provided</include>
+ <include>javax.servlet:javax.servlet-api:[3.1.0]:jar:provided</include>
+ <include>javax.validation:validation-api:[1.1.0.Final]:jar:provided</include>
+ <include>javax.ws.rs:javax.ws.rs-api:[${javax.ws.rs-api.version}]:jar:provided</include>
+ <include>net.jcip:jcip-annotations:[1.0]:jar:provided</include>
+ <include>net.jpountz.lz4:lz4:[1.3.0]:jar:provided</include>
+ <include>org.apache.felix:org.apache.felix.framework:[4.2.1]:jar:provided</include>
+ <include>org.apache.felix:org.apache.felix.log:[1.0.1]:jar:provided</include>
+ <include>org.apache.felix:org.apache.felix.main:[4.2.1]:jar:provided</include>
+ <include>org.apache.httpcomponents:httpclient:[4.3.6]:jar:provided</include>
+ <include>org.apache.httpcomponents:httpcore:[4.3.3]:jar:provided</include>
+ <include>org.eclipse.jetty:jetty-http:[${jetty.version}]:jar:provided</include>
+ <include>org.eclipse.jetty:jetty-io:[${jetty.version}]:jar:provided</include>
+ <include>org.eclipse.jetty:jetty-util:[${jetty.version}]:jar:provided</include>
+ <include>org.glassfish.hk2.external:aopalliance-repackaged:[2.5.0-b05]:jar:provided</include>
+ <include>org.glassfish.hk2.external:javax.inject:[2.5.0-b05]:jar:provided</include>
+ <include>org.glassfish.hk2:hk2-api:[2.5.0-b05]:jar:provided</include>
+ <include>org.glassfish.hk2:hk2-locator:[2.5.0-b05]:jar:provided</include>
+ <include>org.glassfish.hk2:hk2-utils:[2.5.0-b05]:jar:provided</include>
+ <include>org.glassfish.hk2:osgi-resource-locator:[1.0.1]:jar:provided</include>
+ <include>org.glassfish.jersey.bundles.repackaged:jersey-guava:[${jersey2.version}]:jar:provided</include>
+ <include>org.glassfish.jersey.containers:jersey-container-servlet-core:[${jersey2.version}]:jar:provided</include>
+ <include>org.glassfish.jersey.containers:jersey-container-servlet:[${jersey2.version}]:jar:provided</include>
+ <include>org.glassfish.jersey.core:jersey-client:[${jersey2.version}]:jar:provided</include>
+ <include>org.glassfish.jersey.core:jersey-common:[${jersey2.version}]:jar:provided</include>
+ <include>org.glassfish.jersey.core:jersey-server:[${jersey2.version}]:jar:provided</include>
+ <include>org.glassfish.jersey.ext:jersey-entity-filtering:[${jersey2.version}]:jar:provided</include>
+ <include>org.glassfish.jersey.ext:jersey-proxy-client:[${jersey2.version}]:jar:provided</include>
+ <include>org.glassfish.jersey.media:jersey-media-jaxb:[${jersey2.version}]:jar:provided</include>
+ <include>org.glassfish.jersey.media:jersey-media-json-jackson:[${jersey2.version}]:jar:provided</include>
+ <include>org.glassfish.jersey.media:jersey-media-multipart:[${jersey2.version}]:jar:provided</include>
+ <include>org.javassist:javassist:[3.20.0-GA]:jar:provided</include>
+ <include>org.json:json:[20090211]:jar:provided</include>
+ <include>org.jvnet.mimepull:mimepull:[1.9.6]:jar:provided</include>
+ <include>org.slf4j:jcl-over-slf4j:[1.7.5]:jar:provided</include>
+ <include>org.slf4j:log4j-over-slf4j:[1.7.5]:jar:provided</include>
+ <include>org.slf4j:slf4j-api:[1.7.5]:jar:provided</include>
+ <include>org.slf4j:slf4j-jdk14:[1.7.5]:jar:provided</include>
+ <include>xml-apis:xml-apis:[1.4.01]:jar:provided</include>
+ </includes>
+ </bannedDependencies>
+ </rules>
+ <fail>true</fail>
+ </configuration>
+ </execution>
+ </executions>
+ </plugin>
+ </plugins>
+ </build>
+ </profile>
+ </profiles>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-enforcer-plugin</artifactId>
- <version>3.0.0-M1</version>
- <executions>
- <execution>
- <!-- To allow running 'mvn enforcer:enforce' from the command line -->
- <id>default-cli</id>
- <goals>
- <goal>enforce</goal>
- </goals>
- <configuration>
- <rules>
- <bannedDependencies>
- <excludes>
- <!-- Only allow explicitly listed deps in provided scope -->
- <exclude>*:*:*:jar:provided:*</exclude>
- </excludes>
- <includes>
- <include>com.yahoo.vespa</include>
- <include>aopalliance:aopalliance:1.0:jar:provided</include>
- <include>com.fasterxml.jackson.core:jackson-annotations:2.8.3:jar:provided</include>
- <include>com.fasterxml.jackson.core:jackson-core:2.8.3:jar:provided</include>
- <include>com.fasterxml.jackson.core:jackson-databind:2.8.3:jar:provided</include>
- <include>com.fasterxml.jackson.datatype:jackson-datatype-jdk8:2.8.3:jar:provided</include>
- <include>com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.5.4:jar:provided</include>
- <include>com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.5.4:jar:provided</include>
- <include>com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.5.4:jar:provided</include>
- <include>com.google.code.findbugs:annotations:1.3.9:jar:provided</include>
- <include>com.google.code.findbugs:jsr305:1.3.9:jar:provided</include>
- <include>com.google.guava:guava:18.0:jar:provided</include>
- <include>com.google.inject.extensions:guice-assistedinject:3.0:jar:provided</include>
- <include>com.google.inject.extensions:guice-multibindings:3.0:jar:provided</include>
- <include>com.google.inject:guice:3.0:jar:provided:no_aop</include>
- <include>commons-codec:commons-codec:1.4:jar:provided</include>
- <include>commons-daemon:commons-daemon:1.0.3:jar:provided</include>
- <include>commons-logging:commons-logging:1.1.1:jar:provided</include>
- <include>javax.annotation:javax.annotation-api:1.2:jar:provided</include>
- <include>javax.inject:javax.inject:1:jar:provided</include>
- <include>javax.servlet:javax.servlet-api:3.1.0:jar:provided</include>
- <include>javax.validation:validation-api:1.1.0.Final:jar:provided</include>
- <include>javax.ws.rs:javax.ws.rs-api:2.0.1:jar:provided</include>
- <include>net.jcip:jcip-annotations:1.0:jar:provided</include>
- <include>net.jpountz.lz4:lz4:1.3.0:jar:provided</include>
- <include>org.apache.felix:org.apache.felix.framework:4.2.1:jar:provided</include>
- <include>org.apache.felix:org.apache.felix.log:1.0.1:jar:provided</include>
- <include>org.apache.felix:org.apache.felix.main:4.2.1:jar:provided</include>
- <include>org.apache.httpcomponents:httpclient:4.3.6:jar:provided</include>
- <include>org.apache.httpcomponents:httpcore:4.3.3:jar:provided</include>
- <include>org.eclipse.jetty:jetty-http:9.4.6.v20170531:jar:provided</include>
- <include>org.eclipse.jetty:jetty-io:9.4.6.v20170531:jar:provided</include>
- <include>org.eclipse.jetty:jetty-util:9.4.6.v20170531:jar:provided</include>
- <include>org.glassfish.hk2.external:aopalliance-repackaged:2.5.0-b05:jar:provided</include>
- <include>org.glassfish.hk2.external:javax.inject:2.5.0-b05:jar:provided</include>
- <include>org.glassfish.hk2:hk2-api:2.5.0-b05:jar:provided</include>
- <include>org.glassfish.hk2:hk2-locator:2.5.0-b05:jar:provided</include>
- <include>org.glassfish.hk2:hk2-utils:2.5.0-b05:jar:provided</include>
- <include>org.glassfish.hk2:osgi-resource-locator:1.0.1:jar:provided</include>
- <include>org.glassfish.jersey.bundles.repackaged:jersey-guava:2.23.2:jar:provided</include>
- <include>org.glassfish.jersey.containers:jersey-container-servlet-core:2.23.2:jar:provided</include>
- <include>org.glassfish.jersey.containers:jersey-container-servlet:2.23.2:jar:provided</include>
- <include>org.glassfish.jersey.core:jersey-client:2.23.2:jar:provided</include>
- <include>org.glassfish.jersey.core:jersey-common:2.23.2:jar:provided</include>
- <include>org.glassfish.jersey.core:jersey-server:2.23.2:jar:provided</include>
- <include>org.glassfish.jersey.ext:jersey-entity-filtering:2.23.2:jar:provided</include>
- <include>org.glassfish.jersey.ext:jersey-proxy-client:2.23.2:jar:provided</include>
- <include>org.glassfish.jersey.media:jersey-media-jaxb:2.23.2:jar:provided</include>
- <include>org.glassfish.jersey.media:jersey-media-json-jackson:2.23.2:jar:provided</include>
- <include>org.glassfish.jersey.media:jersey-media-multipart:2.23.2:jar:provided</include>
- <include>org.javassist:javassist:3.20.0-GA:jar:provided</include>
- <include>org.json:json:20090211:jar:provided</include>
- <include>org.jvnet.mimepull:mimepull:1.9.6:jar:provided</include>
- <include>org.scala-lang.modules:scala-parser-combinators_2.11:1.0.1:jar:provided</include>
- <include>org.slf4j:jcl-over-slf4j:1.7.5:jar:provided</include>
- <include>org.slf4j:log4j-over-slf4j:1.7.5:jar:provided</include>
- <include>org.slf4j:slf4j-api:1.7.5:jar:provided</include>
- <include>org.slf4j:slf4j-jdk14:1.7.5:jar:provided</include>
- <include>xml-apis:xml-apis:1.4.01:jar:provided</include>
- </includes>
- </bannedDependencies>
- </rules>
- <fail>true</fail>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
</project>
diff --git a/container-dev/pom.xml b/container-dev/pom.xml
index 35417a314f8..8bb759bf867 100644
--- a/container-dev/pom.xml
+++ b/container-dev/pom.xml
@@ -37,6 +37,16 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>jdisc_http_service</artifactId>
<version>${project.version}</version>
+ <exclusions>
+ <exclusion>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcpkix-jdk15on</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk15on</artifactId>
+ </exclusion>
+ </exclusions>
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
diff --git a/container-di/CMakeLists.txt b/container-di/CMakeLists.txt
new file mode 100644
index 00000000000..c2b033baa92
--- /dev/null
+++ b/container-di/CMakeLists.txt
@@ -0,0 +1,5 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_config_definition(src/main/resources/configdefinitions/bundles.def container.bundles.def)
+install_config_definition(src/main/resources/configdefinitions/components.def container.components.def)
+install_config_definition(src/main/resources/configdefinitions/jersey-bundles.def container.di.config.jersey-bundles.def)
+install_config_definition(src/main/resources/configdefinitions/jersey-injection.def container.di.config.jersey-injection.def)
diff --git a/container-di/src/main/scala/com/yahoo/container/di/CloudSubscriberFactory.scala b/container-di/src/main/scala/com/yahoo/container/di/CloudSubscriberFactory.scala
index 23ae21ae84e..6c09ef80b85 100644
--- a/container-di/src/main/scala/com/yahoo/container/di/CloudSubscriberFactory.scala
+++ b/container-di/src/main/scala/com/yahoo/container/di/CloudSubscriberFactory.scala
@@ -7,7 +7,6 @@ import com.yahoo.config.ConfigInstance
import com.yahoo.config.subscription.{ConfigHandle, ConfigSource, ConfigSourceSet, ConfigSubscriber}
import com.yahoo.container.di.CloudSubscriberFactory._
import com.yahoo.container.di.config.{Subscriber, SubscriberFactory}
-import com.yahoo.log.LogLevel
import com.yahoo.vespa.config.ConfigKey
import scala.collection.JavaConverters._
@@ -65,7 +64,7 @@ object CloudSubscriberFactory {
override def waitNextGeneration() = {
require(!handles.isEmpty)
- /* Catch and ignore config exceptions due to missing config values for parameters that do
+ /* Catch and just log config exceptions due to missing config values for parameters that do
* not have a default value. These exceptions occur when the user has removed a component
* from services.xml, and the component takes a config that has parameters without a
* default value in the def-file. There is a new 'components' config underway, where the
@@ -79,7 +78,8 @@ object CloudSubscriberFactory {
} catch {
case e: IllegalArgumentException =>
numExceptions += 1
- log.log(LogLevel.DEBUG, "Ignoring exception from the config library: " + e.getMessage + "\n" + e.getStackTrace)
+ log.warning("Got exception from the config system (please ignore the exception if you just removed "
+ + "a component from your application that used the mentioned config): " + e.getMessage)
if (numExceptions >= 5)
throw new IllegalArgumentException("Failed retrieving the next config generation.", e)
}
diff --git a/container-disc/CMakeLists.txt b/container-disc/CMakeLists.txt
new file mode 100644
index 00000000000..1b661020166
--- /dev/null
+++ b/container-disc/CMakeLists.txt
@@ -0,0 +1,12 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(container-disc)
+
+vespa_install_script(src/main/sh/vespa-start-container-daemon.sh vespa-start-container-daemon bin)
+
+install_config_definition(src/main/resources/configdefinitions/container.jdisc.config.http-server.def)
+install_config_definition(src/main/resources/configdefinitions/jdisc-bindings.def container.jdisc.jdisc-bindings.def)
+install_config_definition(src/main/resources/configdefinitions/jersey-connection.def container.config.jersey.jersey-connection.def)
+install_config_definition(src/main/resources/configdefinitions/jersey-init.def container.config.jersey.jersey-init.def)
+install_config_definition(src/main/resources/configdefinitions/jersey-web-app-pool.def container.config.jersey.jersey-web-app-pool.def)
+install_config_definition(src/main/resources/configdefinitions/metric-defaults.def container.jdisc.config.metric-defaults.def)
+install_config_definition(src/main/resources/configdefinitions/score-board.def jdisc.metrics.yamasconsumer.cloud.score-board.def)
diff --git a/container-disc/pom.xml b/container-disc/pom.xml
index 85f52bff5cc..44afe74459d 100644
--- a/container-disc/pom.xml
+++ b/container-disc/pom.xml
@@ -168,7 +168,6 @@
container-search-and-docproc-jar-with-dependencies.jar,
docprocs-jar-with-dependencies.jar,
jdisc_http_service-jar-with-dependencies.jar,
- persistence-jar-with-dependencies.jar,
vespaclient-container-plugin-jar-with-dependencies.jar,
simplemetrics-jar-with-dependencies.jar,
defaults-jar-with-dependencies.jar,
diff --git a/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java b/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java
index e59f012856a..fa2ee8e89a9 100644
--- a/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java
+++ b/container-disc/src/main/java/com/yahoo/container/jdisc/ConfiguredApplication.java
@@ -42,7 +42,6 @@ import java.util.Map;
import java.util.Set;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -80,7 +79,7 @@ public final class ConfiguredApplication implements Application {
new ComponentRegistry<>(),
new ComponentRegistry<>());
private final OsgiFramework restrictedOsgiFramework;
- private final AtomicInteger applicationSerialNo = new AtomicInteger(0);
+ private volatile int applicationSerialNo = 0;
private HandlersConfigurerDi configurer;
private ScheduledThreadPoolExecutor shutdownDeadlineExecutor;
private Thread reconfigurerThread;
@@ -173,12 +172,10 @@ public final class ConfiguredApplication implements Application {
startAndStopServers();
log.info("Switching to the latest deployed set of configurations and components. " +
- "Application switch number: " + applicationSerialNo.getAndIncrement());
+ "Application switch number: " + (applicationSerialNo++));
}
private ContainerBuilder createBuilderWithGuiceBindings() {
- log.info("Initializing new set of configurations and components. " +
- "Application switch number: " + applicationSerialNo.get());
ContainerBuilder builder = activator.newContainerBuilder();
setupGuiceBindings(builder.guiceModules());
return builder;
diff --git a/container-disc/src/main/java/com/yahoo/container/jdisc/SslKeyStoreFactoryProvider.java b/container-disc/src/main/java/com/yahoo/container/jdisc/SslKeyStoreFactoryProvider.java
index 8f760fcfab1..64773af905e 100644
--- a/container-disc/src/main/java/com/yahoo/container/jdisc/SslKeyStoreFactoryProvider.java
+++ b/container-disc/src/main/java/com/yahoo/container/jdisc/SslKeyStoreFactoryProvider.java
@@ -8,7 +8,7 @@ import com.yahoo.jdisc.http.ssl.SslKeyStoreFactory;
/**
* An SSL key store provider which provides a factory which throws exception on
- * invocation - as no SSL key store is currently provided by default.
+ * invocation as no SSL key store is currently provided by default.
* The purpose of this is to provide a ssl store factory for injection in the case where
* no secret store component is provided.
*
diff --git a/container-disc/src/main/sh/vespa-start-container-daemon.sh b/container-disc/src/main/sh/vespa-start-container-daemon.sh
index 33f61c3660c..6f0c97f6177 100755
--- a/container-disc/src/main/sh/vespa-start-container-daemon.sh
+++ b/container-disc/src/main/sh/vespa-start-container-daemon.sh
@@ -186,6 +186,7 @@ exec_jsvc () {
configure_preload
exec $numactlcmd $envcmd $jsvc_binary_name \
-Dconfig.id="${VESPA_CONFIG_ID}" \
+ -XX:+PreserveFramePointer \
${jsvc_opts} \
${memory_options} \
${jvm_gcopts} \
@@ -258,6 +259,7 @@ maybe_use_jsvc
exec $numactlcmd $envcmd java \
-Dconfig.id="${VESPA_CONFIG_ID}" \
+ -XX:+PreserveFramePointer \
${memory_options} \
${jvm_gcopts} \
-XX:MaxJavaStackTraceDepth=-1 \
diff --git a/container-jersey2/CMakeLists.txt b/container-jersey2/CMakeLists.txt
new file mode 100644
index 00000000000..d2490563372
--- /dev/null
+++ b/container-jersey2/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(container-jersey2)
diff --git a/container-messagebus/CMakeLists.txt b/container-messagebus/CMakeLists.txt
new file mode 100644
index 00000000000..3cd62d83b73
--- /dev/null
+++ b/container-messagebus/CMakeLists.txt
@@ -0,0 +1,3 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_config_definition(src/main/resources/configdefinitions/container-mbus.def container.jdisc.container-mbus.def)
+install_config_definition(src/main/resources/configdefinitions/session.def container.jdisc.config.session.def)
diff --git a/container-search-and-docproc/CMakeLists.txt b/container-search-and-docproc/CMakeLists.txt
new file mode 100644
index 00000000000..29bbe5bdb0f
--- /dev/null
+++ b/container-search-and-docproc/CMakeLists.txt
@@ -0,0 +1,4 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(container-search-and-docproc)
+
+install_config_definition(src/main/resources/configdefinitions/application-userdata.def container.handler.observability.application-userdata.def)
diff --git a/container-search/CMakeLists.txt b/container-search/CMakeLists.txt
new file mode 100644
index 00000000000..dcf6c3461d3
--- /dev/null
+++ b/container-search/CMakeLists.txt
@@ -0,0 +1,28 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_config_definition(src/main/resources/configdefinitions/cluster.def search.config.cluster.def)
+install_config_definition(src/main/resources/configdefinitions/documentdb-info.def prelude.fastsearch.documentdb-info.def)
+install_config_definition(src/main/resources/configdefinitions/emulation.def prelude.emulation.def)
+install_config_definition(src/main/resources/configdefinitions/federation.def search.federation.federation.def)
+install_config_definition(src/main/resources/configdefinitions/fs4.def container.search.fs4.def)
+install_config_definition(src/main/resources/configdefinitions/index-info.def search.config.index-info.def)
+install_config_definition(src/main/resources/configdefinitions/keyvalue.def prelude.searcher.keyvalue.def)
+install_config_definition(src/main/resources/configdefinitions/legacy-emulation.def container.search.legacy-emulation.def)
+install_config_definition(src/main/resources/configdefinitions/lowercasing.def search.querytransform.lowercasing.def)
+install_config_definition(src/main/resources/configdefinitions/measure-qps.def search.statistics.measure-qps.def)
+install_config_definition(src/main/resources/configdefinitions/page-templates.def search.pagetemplates.page-templates.def)
+install_config_definition(src/main/resources/configdefinitions/provider.def search.federation.provider.def)
+install_config_definition(src/main/resources/configdefinitions/qr-binary-cache-region.def search.cache.qr-binary-cache-region.def)
+install_config_definition(src/main/resources/configdefinitions/qr-binary-cache.def search.cache.qr-binary-cache.def)
+install_config_definition(src/main/resources/configdefinitions/qr-monitor.def prelude.cluster.qr-monitor.def)
+install_config_definition(src/main/resources/configdefinitions/qr-quotetable.def prelude.searcher.qr-quotetable.def)
+install_config_definition(src/main/resources/configdefinitions/qr-start.def search.config.qr-start.def)
+install_config_definition(src/main/resources/configdefinitions/query-profiles.def search.query.profile.config.query-profiles.def)
+install_config_definition(src/main/resources/configdefinitions/rate-limiting.def search.config.rate-limiting.def)
+install_config_definition(src/main/resources/configdefinitions/resolvers.def search.pagetemplates.resolvers.def)
+install_config_definition(src/main/resources/configdefinitions/rewrites.def search.query.rewrite.rewrites.def)
+install_config_definition(src/main/resources/configdefinitions/search-nodes.def search.config.dispatchprototype.search-nodes.def)
+install_config_definition(src/main/resources/configdefinitions/search-with-renderer-handler.def search.handler.search-with-renderer-handler.def)
+install_config_definition(src/main/resources/configdefinitions/searchchain-forward.def search.federation.searchchain-forward.def)
+install_config_definition(src/main/resources/configdefinitions/semantic-rules.def prelude.semantics.semantic-rules.def)
+install_config_definition(src/main/resources/configdefinitions/strict-contracts.def search.federation.strict-contracts.def)
+install_config_definition(src/main/resources/configdefinitions/timing-searcher.def search.statistics.timing-searcher.def)
diff --git a/container/pom.xml b/container/pom.xml
index 3884305e091..4726071db55 100644
--- a/container/pom.xml
+++ b/container/pom.xml
@@ -25,11 +25,28 @@
<groupId>com.yahoo.vespa</groupId>
<artifactId>vespaclient-container-plugin</artifactId>
<version>${project.version}</version>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>application</artifactId>
- <version>${project.version}</version>
+ <exclusions>
+ <exclusion>
+ <groupId>com.fasterxml.jackson.dataformat</groupId>
+ <artifactId>jackson-dataformat-xml</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>metrics</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>vespaclient-core</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>io.airlift</groupId>
+ <artifactId>airline</artifactId>
+ </exclusion>
+ <exclusion>
+ <groupId>org.apache.commons</groupId>
+ <artifactId>commons-lang3</artifactId>
+ </exclusion>
+ </exclusions>
</dependency>
</dependencies>
</project>
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/InstanceInformation.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/InstanceInformation.java
index e862bd744dc..391aaa4639e 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/InstanceInformation.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/application/v4/model/InstanceInformation.java
@@ -2,7 +2,6 @@
package com.yahoo.vespa.hosted.controller.api.application.v4.model;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.yahoo.vespa.hosted.controller.api.cost.CostJsonModel;
import com.yahoo.vespa.hosted.controller.api.identifiers.GitBranch;
import com.yahoo.vespa.hosted.controller.api.identifiers.GitCommit;
import com.yahoo.vespa.hosted.controller.api.identifiers.GitRepository;
@@ -29,6 +28,4 @@ public class InstanceInformation {
public GitRepository gitRepository;
public GitBranch gitBranch;
public GitCommit gitCommit;
-
- public CostJsonModel.Application cost;
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/cost/CostJsonModel.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/cost/CostJsonModel.java
deleted file mode 100644
index bfc451946f6..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/cost/CostJsonModel.java
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.cost;
-
-import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
-import com.fasterxml.jackson.annotation.JsonProperty;
-
-import java.util.List;
-import java.util.Map;
-
-/**
- * JSON datamodel for the cost api.
- *
- * @author smorgrav
- */
-public class CostJsonModel {
-
- @JsonIgnoreProperties(ignoreUnknown = true)
- public static class Application {
-
- @JsonProperty
- public String zone;
- @JsonProperty
- public String tenant;
- @JsonProperty
- public String app;
- @JsonProperty
- public int tco;
- @JsonProperty
- public float utilization;
- @JsonProperty
- public float waste;
- @JsonProperty
- public Map<String, Cluster> cluster;
- }
-
- @JsonIgnoreProperties(ignoreUnknown = true)
- public static class Cluster {
-
- @JsonProperty
- public int count;
- @JsonProperty
- public String resource;
- @JsonProperty
- public float utilization;
- @JsonProperty
- public int tco;
- @JsonProperty
- public String flavor;
- @JsonProperty
- public int waste;
- @JsonProperty
- public String type;
- @JsonProperty
- public HardwareResources util;
- @JsonProperty
- public HardwareResources usage;
- @JsonProperty
- public List<String> hostnames;
- }
-
- @JsonIgnoreProperties(ignoreUnknown = true)
- public static class HardwareResources {
-
- @JsonProperty
- public float mem;
- @JsonProperty
- public float disk;
- @JsonProperty
- public float cpu;
- @JsonProperty("diskbusy")
- public float diskBusy;
- }
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/cost/CostResource.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/cost/CostResource.java
deleted file mode 100644
index 3cc6d682f4a..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/cost/CostResource.java
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.cost;
-
-import javax.ws.rs.GET;
-import javax.ws.rs.Path;
-import javax.ws.rs.PathParam;
-import javax.ws.rs.Produces;
-import javax.ws.rs.core.MediaType;
-import java.util.List;
-
-/**
- * Cost and Utilization APi for hosted Vespa.
- *
- * Used to give insight to PEG and application owners about
- * TOC and if the application is reasonable scaled.
- *
- * @author smorgrav
- */
-@Path("v1")
-@Produces(MediaType.APPLICATION_JSON)
-public interface CostResource {
-
- @GET
- @Path("/analysis/cpu")
- List<CostJsonModel.Application> getCPUAnalysis();
-
- @GET
- @Produces("text/csv")
- @Path("/csv")
- String getCSV();
-
- @GET
- @Path("/apps")
- List<CostJsonModel.Application> getApplicationsCost();
-
- @GET
- @Path("/apps/{environment}/{region}/{application}")
- CostJsonModel.Application getApplicationCost(@PathParam("application") String appName,
- @PathParam("region") String regionName,
- @PathParam("environment") String envName);
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/cost/package-info.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/cost/package-info.java
deleted file mode 100644
index 8e95bd4f6f1..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/cost/package-info.java
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-@ExportPackage
-package com.yahoo.vespa.hosted.controller.api.cost;
-
-import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/BuildService.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/BuildService.java
index bbd15707cde..7933a23c45f 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/BuildService.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/BuildService.java
@@ -8,7 +8,8 @@ public interface BuildService {
/**
* Enqueue a job defined by "buildJob in an external build system, and return the outcome of the enqueue request.
- * This method should return @false only when a retry is in order, and @true otherwise, e.g., on succes, or for invalid jobs.
+ * This method should return @false only when a retry is in order, and @true otherwise, e.g., on success, or for
+ * invalid jobs.
*/
boolean trigger(BuildJob buildJob);
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/MetricsService.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/MetricsService.java
index 5547f94c8cf..1ceb064ad44 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/MetricsService.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/MetricsService.java
@@ -4,7 +4,7 @@ package com.yahoo.vespa.hosted.controller.api.integration;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Zone;
-import java.util.List;
+import java.util.Map;
/**
* A service which returns metric values on request
@@ -17,7 +17,7 @@ public interface MetricsService {
DeploymentMetrics getDeploymentMetrics(ApplicationId application, Zone zone);
- List<ClusterCostMetrics> getClusterCostMetrics(ApplicationId application, Zone zone);
+ Map<String, SystemMetrics> getSystemMetrics(ApplicationId application, Zone zone);
class DeploymentMetrics {
@@ -67,22 +67,30 @@ public interface MetricsService {
}
- class CostMetrics {
+ class SystemMetrics {
private final double cpuUtil;
private final double memUtil;
private final double diskUtil;
- public CostMetrics(double cpuUtil, double memUtil, double diskUtil) {
+ /**
+ * @param cpuUtil percentage of system cpu utilization
+ * @param memUtil percentage of system memory utilization
+ * @param diskUtil percentage of system disk utilization
+ */
+ public SystemMetrics(double cpuUtil, double memUtil, double diskUtil) {
this.cpuUtil = cpuUtil;
this.memUtil = memUtil;
this.diskUtil = diskUtil;
}
+ /** @return the percentage of cpu utilization **/
public double cpuUtil() { return cpuUtil; }
+ /** @return the percentage of memory utilization **/
public double memUtil() { return memUtil; }
+ /** @return the percentage of disk utilization **/
public double diskUtil() { return diskUtil; }
public static class Builder {
@@ -102,25 +110,9 @@ public interface MetricsService {
this.diskUtil = diskUtil;
}
- public CostMetrics build() { return new CostMetrics(cpuUtil, memUtil, diskUtil); }
+ public SystemMetrics build() { return new SystemMetrics(cpuUtil, memUtil, diskUtil); }
}
}
- class ClusterCostMetrics {
-
- private final String clusterId;
- private final CostMetrics costMetrics;
-
- public ClusterCostMetrics(String clusterId, CostMetrics costMetrics) {
- this.clusterId = clusterId;
- this.costMetrics = costMetrics;
- }
-
- public String clusterId() { return clusterId; }
-
- public CostMetrics costMetrics() { return costMetrics; }
-
- }
-
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerClient.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerClient.java
index 1958c5bd0ff..8c8b5fdf22e 100644
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerClient.java
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/ConfigServerClient.java
@@ -66,4 +66,13 @@ public interface ConfigServerClient {
* @throws IOException If trouble contacting the server
*/
EndpointStatus getGlobalRotationStatus(DeploymentId deployment, String endpoint) throws IOException;
+
+ /**
+ * The nodes allocated to the deployment
+ *
+ * @param deployment The application/zone pair
+ * @return The nodes for this deployment
+ * @throws IOException If unable to retrieve the node list
+ */
+ NodeList getNodeList(DeploymentId deployment) throws IOException;
}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/NodeList.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/NodeList.java
new file mode 100644
index 00000000000..755bec2fcec
--- /dev/null
+++ b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/configserver/NodeList.java
@@ -0,0 +1,36 @@
+package com.yahoo.vespa.hosted.controller.api.integration.configserver;// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+import java.util.List;
+
+/**
+ * @author smorgrav
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+public class NodeList {
+
+ @JsonProperty("nodes")
+ public List<Node> nodes;
+
+ @JsonIgnoreProperties(ignoreUnknown = true)
+ public static class Node {
+ @JsonProperty("hostname")
+ public String hostname;
+ @JsonProperty("flavor")
+ public String flavor;
+ @JsonProperty("membership")
+ public Membership membership;
+ @JsonProperty("cost")
+ public int cost;
+
+ @JsonIgnoreProperties(ignoreUnknown = true)
+ public static class Membership {
+ @JsonProperty("clustertype")
+ public String clusterType;
+ @JsonProperty("clusterid")
+ public String clusterId;
+ }
+ }
+} \ No newline at end of file
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/ApplicationCost.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/ApplicationCost.java
deleted file mode 100644
index 9bc9cfa8ed0..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/ApplicationCost.java
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.integration.cost;
-
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Cost data model for an application instance. I.e one running vespa application in one zone.
- *
- * @author smorgrav
- */
-// TODO: Make immutable
-// TODO: Make the Application own this and rename to Cost
-// TODO: Enforce constraints
-// TODO: Remove application id elements
-// TODO: Model zone as Zone
-// TODO: Cost per zone + total
-// TODO: Use doubles
-public class ApplicationCost {
-
- /** This contains environment.region */
- private String zone;
-
- private String tenant;
-
- // This must contain applicationName.instanceName. TODO: Fix
- private String app;
-
- private int tco;
- private float utilization;
- private float waste;
- Map<String, ClusterCost> cluster;
-
- /** Create an empty (invalid) application cost */
- public ApplicationCost() {}
-
- public ApplicationCost(String zone, String tenant, String app, int tco, float utilization, float waste,
- Map<String, ClusterCost> clusterCost) {
- this.zone = zone;
- this.tenant = tenant;
- this.app = app;
- this.tco = tco;
- this.utilization = utilization;
- this.waste = waste;
- cluster = new HashMap<>(clusterCost);
- }
-
- public String getZone() {
- return zone;
- }
-
- public void setZone(String zone) {
- this.zone = zone;
- }
-
- public String getApp() {
- return app;
- }
-
- public void setApp(String app) {
- this.app = app;
- }
-
- public Map<String, ClusterCost> getCluster() {
- return cluster;
- }
-
- public void setCluster(Map<String, ClusterCost> cluster) {
- this.cluster = cluster;
- }
-
- public int getTco() {
- return tco;
- }
-
- public void setTco(int tco) {
- if (tco < 0) throw new IllegalArgumentException("TCO cannot be negative");
- this.tco = tco;
- }
-
- public String getTenant() {
- return tenant;
- }
-
- public void setTenant(String tenant) {
- this.tenant = tenant;
- }
-
- public float getUtilization() {
- return utilization;
- }
-
- public void setUtilization(float utilization) {
- if (utilization < 0) throw new IllegalArgumentException("Utilization cannot be negative");
- this.utilization = utilization;
- }
-
- public float getWaste() {
- return waste;
- }
-
- public void setWaste(float waste) {
- this.waste = waste;
- }
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/Backend.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/Backend.java
deleted file mode 100644
index d9edf22d42c..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/Backend.java
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.integration.cost;
-
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.Environment;
-import com.yahoo.config.provision.RegionName;
-import com.yahoo.vespa.hosted.controller.common.NotFoundCheckedException;
-
-import java.util.List;
-
-/**
- * Interface for retrieving cost data directly or indirectly from yamas and
- * the noderepository.
- *
- *
- * @author smorgrav
- */
-public interface Backend {
- List<ApplicationCost> getApplicationCost();
- ApplicationCost getApplicationCost(Environment env, RegionName region, ApplicationId appId) throws NotFoundCheckedException;
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/ClusterCost.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/ClusterCost.java
deleted file mode 100644
index 1e41325a4fd..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/ClusterCost.java
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.integration.cost;
-
-import java.util.List;
-
-/**
- * Cost data model for a cluster. I.e one cluster within one vespa application in one zone.
- *
- * @author smorgrav
- */
-// TODO: Use doubles
-// TODO: Make immutable
-// TODO: Enforce constraints
-// TODO: Document content
-public class ClusterCost {
-
- private int count;
- private String resource;
- private float utilization;
- private int tco;
- private String flavor;
- private int waste;
- private String type;
- private float utilMem;
- private float utilCpu;
- private float utilDisk;
- private float utilDiskBusy;
- private float usageMem;
- private float usageCpu;
- private float usageDisk;
- private float usageDiskBusy;
- private List<String> hostnames;
-
- /** Create an empty (invalid) cluster cost */
- public ClusterCost() {}
-
- public int getCount() {
- return count;
- }
-
- public void setCount(int count) {
- this.count = count;
- }
-
- public String getFlavor() {
- return flavor;
- }
-
- public void setFlavor(String flavor) {
- this.flavor = flavor;
- }
-
- public List<String> getHostnames() {
- return hostnames;
- }
-
- public void setHostnames(List<String> hostnames) {
- this.hostnames = hostnames;
- }
-
- public String getResource() {
- return resource;
- }
-
- public void setResource(String resource) {
- this.resource = resource;
- }
-
- public int getTco() {
- return tco;
- }
-
- public void setTco(int tco) {
- this.tco = tco;
- }
-
- public String getType() {
- return type;
- }
-
- public void setType(String type) {
- this.type = type;
- }
-
- public float getUtilization() {
- return utilization;
- }
-
- public void setUtilization(float utilization) {
- validateUtilRatio(utilization);
- this.utilization = utilization;
- }
-
- public int getWaste() {
- return waste;
- }
-
- public void setWaste(int waste) {
- this.waste = waste;
- }
-
- public float getUsageCpu() {
- return usageCpu;
- }
-
- public void setUsageCpu(float usageCpu) {
- validateUsageRatio(usageCpu);
- this.usageCpu = usageCpu;
- }
-
- public float getUsageDisk() {
- return usageDisk;
- }
-
- public void setUsageDisk(float usageDisk) {
- validateUsageRatio(usageDisk);
- this.usageDisk = usageDisk;
- }
-
- public float getUsageMem() {
- return usageMem;
- }
-
- public void setUsageMem(float usageMem) {
- validateUsageRatio(usageMem);
- this.usageMem = usageMem;
- }
-
- public float getUtilCpu() {
- return utilCpu;
- }
-
- public void setUtilCpu(float utilCpu) {
- validateUtilRatio(utilCpu);
- this.utilCpu = utilCpu;
- }
-
- public float getUtilDisk() {
- return utilDisk;
- }
-
- public void setUtilDisk(float utilDisk) {
- validateUtilRatio(utilDisk);
- this.utilDisk = utilDisk;
- }
-
- public float getUtilMem() {
- return utilMem;
- }
-
- public void setUtilMem(float utilMem) {
- validateUsageRatio(utilMem);
- this.utilMem = utilMem;
- }
-
- public float getUsageDiskBusy() {
- return usageDiskBusy;
- }
-
- public void setUsageDiskBusy(float usageDiskBusy) {
- validateUsageRatio(usageDiskBusy);
- this.usageDiskBusy = usageDiskBusy;
- }
-
- public float getUtilDiskBusy() {
- return utilDiskBusy;
- }
-
- public void setUtilDiskBusy(float utilDiskBusy) {
- validateUtilRatio(utilDiskBusy);
- this.utilDiskBusy = utilDiskBusy;
- }
-
- private void validateUsageRatio(float ratio) {
- if (ratio < 0) throw new IllegalArgumentException("Usage cannot be negative");
- if (ratio > 1) throw new IllegalArgumentException("Usage exceed 1 (using more than it has available)");
- }
-
- private void validateUtilRatio(float ratio) {
- if (ratio < 0) throw new IllegalArgumentException("Utilization cannot be negative");
- }
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/Cost.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/Cost.java
deleted file mode 100644
index 7297b60de5c..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/Cost.java
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.integration.cost;
-
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.Environment;
-import com.yahoo.config.provision.RegionName;
-import com.yahoo.vespa.hosted.controller.common.NotFoundCheckedException;
-
-import java.util.List;
-
-/**
- * Cost domain model declaration
- *
- * @author smorgrav
- */
-public interface Cost {
-
- /**
- * Calculate a list of the applications that is wasting most
- * in absolute terms. To improve utilization, it should make
- * sense to focus on this list.
- *
- * @return An ordered set of applications with the highest potential for
- * improved CPU utilization across all environments and regions.
- */
- List<ApplicationCost> getCPUAnalysis(int nofApplications);
-
- /**
- * Collect all information and format it as a Cvs blob for download.
- *
- * @return A String with comma separated values. Can be big!
- */
- String getCsvForLocalAnalysis();
-
- /**
- * Get application costs for all applications across all regions and environments
- *
- * @return A list of applications in given zone
- */
- List<ApplicationCost> getApplicationCost();
-
- /**
- * Get application costs for a given application instance in a given zone.
- *
- * @param env Environment like test, dev, perf, staging or prod
- * @param region Region name like us-east-1
- * @param app ApplicationId like tenant:application:instance
- *
- * @return A list of applications in given zone
- */
- ApplicationCost getApplicationCost(Environment env, RegionName region, ApplicationId app)
- throws NotFoundCheckedException;
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/CostJsonModelAdapter.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/CostJsonModelAdapter.java
deleted file mode 100644
index 088b1fa12bc..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/CostJsonModelAdapter.java
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.api.integration.cost;
-
-import com.yahoo.slime.Cursor;
-import com.yahoo.vespa.hosted.controller.api.cost.CostJsonModel;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Map;
-
-/**
- * Converting from cost data model to the JSON data model used in the cost REST API.
- *
- * @author smorgrav
- */
-public class CostJsonModelAdapter {
-
- public static CostJsonModel.Application toJsonModel(ApplicationCost appCost) {
- CostJsonModel.Application app = new CostJsonModel.Application();
- app.zone = appCost.getZone();
- app.tenant = appCost.getTenant();
- app.app = appCost.getApp();
- app.tco = appCost.getTco();
- app.utilization = appCost.getUtilization();
- app.waste = appCost.getWaste();
- app.cluster = new HashMap<>();
- Map<String, ClusterCost> clusterMap = appCost.getCluster();
- for (String key : clusterMap.keySet()) {
- app.cluster.put(key, toJsonModel(clusterMap.get(key)));
- }
-
- return app;
- }
-
- public static void toSlime(ApplicationCost appCost, Cursor object) {
- object.setString("zone", appCost.getZone());
- object.setString("tenant", appCost.getTenant());
- object.setString("app", appCost.getApp());
- object.setLong("tco", appCost.getTco());
- object.setDouble("utilization", appCost.getUtilization());
- object.setDouble("waste", appCost.getWaste());
- Cursor clustersObject = object.setObject("cluster");
- for (Map.Entry<String, ClusterCost> clusterEntry : appCost.getCluster().entrySet())
- toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey()));
- }
-
- public static CostJsonModel.Cluster toJsonModel(ClusterCost clusterCost) {
- CostJsonModel.Cluster cluster = new CostJsonModel.Cluster();
- cluster.count = clusterCost.getCount();
- cluster.resource = clusterCost.getResource();
- cluster.utilization = clusterCost.getUtilization();
- cluster.tco = clusterCost.getTco();
- cluster.flavor = clusterCost.getFlavor();
- cluster.waste = clusterCost.getWaste();
- cluster.type = clusterCost.getType();
- cluster.util = new CostJsonModel.HardwareResources();
- cluster.util.cpu = clusterCost.getUtilCpu();
- cluster.util.mem = clusterCost.getUtilMem();
- cluster.util.disk = clusterCost.getUtilDisk();
- cluster.usage = new CostJsonModel.HardwareResources();
- cluster.usage.cpu = clusterCost.getUsageCpu();
- cluster.usage.mem = clusterCost.getUsageMem();
- cluster.usage.disk = clusterCost.getUsageDisk();
- cluster.hostnames = new ArrayList<>(clusterCost.getHostnames());
- cluster.usage.diskBusy = clusterCost.getUsageDiskBusy();
- cluster.util.diskBusy = clusterCost.getUtilDiskBusy();
- return cluster;
- }
-
- private static void toSlime(ClusterCost clusterCost, Cursor object) {
- object.setLong("count", clusterCost.getCount());
- object.setString("resource", clusterCost.getResource());
- object.setDouble("utilization", clusterCost.getUtilization());
- object.setLong("tco", clusterCost.getTco());
- object.setString("flavor", clusterCost.getFlavor());
- object.setLong("waste", clusterCost.getWaste());
- object.setString("type", clusterCost.getType());
- Cursor utilObject = object.setObject("util");
- utilObject.setDouble("cpu", clusterCost.getUtilCpu());
- utilObject.setDouble("mem", clusterCost.getUtilMem());
- utilObject.setDouble("disk", clusterCost.getUtilDisk());
- utilObject.setDouble("diskBusy", clusterCost.getUtilDiskBusy());
- Cursor usageObject = object.setObject("usage");
- usageObject.setDouble("cpu", clusterCost.getUsageCpu());
- usageObject.setDouble("mem", clusterCost.getUsageMem());
- usageObject.setDouble("disk", clusterCost.getUsageDisk());
- usageObject.setDouble("diskBusy", clusterCost.getUsageDiskBusy());
- Cursor hostnamesArray = object.setArray("hostnames");
- for (String hostname : clusterCost.getHostnames())
- hostnamesArray.addString(hostname);
- }
-
-}
diff --git a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/package-info.java b/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/package-info.java
deleted file mode 100644
index f08e6cc9b36..00000000000
--- a/controller-api/src/main/java/com/yahoo/vespa/hosted/controller/api/integration/cost/package-info.java
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-@ExportPackage
-package com.yahoo.vespa.hosted.controller.api.integration.cost;
-
-import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
index 5cc4c441c3b..8a49ab1083e 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Application.java
@@ -16,6 +16,7 @@ import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobReport;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobType;
import java.time.Instant;
+import java.util.Collections;
import java.util.Comparator;
import java.util.LinkedHashMap;
import java.util.List;
@@ -43,8 +44,9 @@ public class Application {
/** Creates an empty application */
public Application(ApplicationId id) {
- this(id, DeploymentSpec.empty, ValidationOverrides.empty, ImmutableMap.of(), new DeploymentJobs(0L),
- Optional.empty(), false); // TODO: Get rid of the 0
+ this(id, DeploymentSpec.empty, ValidationOverrides.empty, ImmutableMap.of(),
+ new DeploymentJobs(Optional.empty(), Collections.emptyList(), Optional.empty()),
+ Optional.empty(), false);
}
/** Used from persistence layer: Do not use */
@@ -52,7 +54,7 @@ public class Application {
List<Deployment> deployments,
DeploymentJobs deploymentJobs, Optional<Change> deploying, boolean outstandingChange) {
this(id, deploymentSpec, validationOverrides,
- deployments.stream().collect(Collectors.toMap(d -> d.zone(), d -> d)),
+ deployments.stream().collect(Collectors.toMap(Deployment::zone, d -> d)),
deploymentJobs, deploying, outstandingChange);
}
@@ -274,4 +276,20 @@ public class Application {
return true;
}
+ /** Returns true if there is no current change to deploy - i.e deploying is empty or completely deployed */
+ public boolean deployingCompleted() {
+ if ( ! deploying.isPresent()) return true;
+ return deploymentJobs().isDeployed(deploying.get());
+ }
+
+ /** Returns true if there is a current change which is blocked from being deployed to production at this instant */
+ public boolean deployingBlocked(Instant instant) {
+ if ( ! deploying.isPresent()) return false;
+ return deploying.get().blockedBy(deploymentSpec, instant);
+ }
+
+ public boolean isBlocked(Instant instant) {
+ return ! deploymentSpec.canUpgradeAt(instant) || ! deploymentSpec.canChangeRevisionAt(instant);
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
index c04e68b7a50..672f50f83d7 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/ApplicationController.java
@@ -274,22 +274,16 @@ public class ApplicationController {
// Determine what we are doing
Application application = get(applicationId).orElse(new Application(applicationId));
- // Decide version to deploy, if applicable.
Version version;
if (options.deployCurrentVersion)
version = application.currentVersion(controller, zone);
+ else if (canDeployDirectlyTo(zone, options))
+ version = options.vespaVersion.map(Version::new).orElse(controller.systemVersion());
else if ( ! application.deploying().isPresent() && ! zone.environment().isManuallyDeployed())
return unexpectedDeployment(applicationId, zone, applicationPackage);
else
version = application.currentDeployVersion(controller, zone);
- // Ensure that the deploying change is tested
- if ( ! zone.environment().isManuallyDeployed() &&
- ! application.deploymentJobs().isDeployableTo(zone.environment(), application.deploying()))
- throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone +
- " as pending " + application.deploying().get() +
- " is untested");
-
DeploymentJobs.JobType jobType = DeploymentJobs.JobType.from(controller.zoneRegistry().system(), zone);
ApplicationRevision revision = toApplicationPackageRevision(applicationPackage, options.screwdriverBuildJob);
@@ -301,9 +295,8 @@ public class ApplicationController {
application = application.withProjectId(options.screwdriverBuildJob.get().screwdriverId.value());
if (application.deploying().isPresent() && application.deploying().get() instanceof Change.ApplicationChange)
application = application.withDeploying(Optional.of(Change.ApplicationChange.of(revision)));
- if ( ! triggeredWith(revision, application, jobType) && !zone.environment().isManuallyDeployed() && jobType != null) {
+ if ( ! triggeredWith(revision, application, jobType) && !canDeployDirectlyTo(zone, options) && jobType != null) {
// Triggering information is used to store which changes were made or attempted
- // - For self-triggered applications we don't have any trigger information, so we add it here.
// - For all applications, we don't have complete control over which revision is actually built,
// so we update it here with what we actually triggered if necessary
application = application.with(application.deploymentJobs()
@@ -322,6 +315,12 @@ public class ApplicationController {
store(application, lock); // store missing information even if we fail deployment below
}
+ // Ensure that the deploying change is tested
+ if (! canDeployDirectlyTo(zone, options) &&
+ ! application.deploymentJobs().isDeployableTo(zone.environment(), application.deploying()))
+ throw new IllegalArgumentException("Rejecting deployment of " + application + " to " + zone +
+ " as " + application.deploying().get() + " is not tested");
+
// Carry out deployment
DeploymentId deploymentId = new DeploymentId(applicationId, zone);
ApplicationRotation rotationInDns = registerRotationInDns(deploymentId, getOrAssignRotation(deploymentId,
@@ -331,7 +330,16 @@ public class ApplicationController {
configserverClient.prepare(deploymentId, options, rotationInDns.cnames(), rotationInDns.rotations(),
applicationPackage.zippedContent());
preparedApplication.activate();
- application = application.with(new Deployment(zone, revision, version, clock.instant()));
+
+ Deployment previousDeployment = application.deployments().get(zone);
+ Deployment newDeployment = previousDeployment;
+ if (previousDeployment == null) {
+ newDeployment = new Deployment(zone, revision, version, clock.instant(), new HashMap<>(), new HashMap<>());
+ } else {
+ newDeployment = new Deployment(zone, revision, version, clock.instant(), previousDeployment.clusterUtils(), previousDeployment.clusterInfo());
+ }
+
+ application = application.with(newDeployment);
store(application, lock);
return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse());
@@ -586,6 +594,13 @@ public class ApplicationController {
return curator.lock(application, Duration.ofMinutes(10));
}
+ /** Returns whether a direct deployment to given zone is allowed */
+ private static boolean canDeployDirectlyTo(Zone zone, DeployOptions options) {
+ return !options.screwdriverBuildJob.isPresent() ||
+ options.screwdriverBuildJob.get().screwdriverId == null ||
+ zone.environment().isManuallyDeployed();
+ }
+
private static final class ApplicationRotation {
private final ImmutableSet<String> cnames;
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
index 68912ac55ef..4f5c2ead4da 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/Controller.java
@@ -18,8 +18,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.athens.Athens;
import com.yahoo.vespa.hosted.controller.api.integration.athens.ZmsClient;
import com.yahoo.vespa.hosted.controller.api.integration.chef.Chef;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerClient;
-import com.yahoo.vespa.hosted.controller.api.integration.cost.ApplicationCost;
-import com.yahoo.vespa.hosted.controller.api.integration.cost.Cost;
import com.yahoo.vespa.hosted.controller.api.integration.dns.NameService;
import com.yahoo.vespa.hosted.controller.api.integration.entity.EntityService;
import com.yahoo.vespa.hosted.controller.api.integration.github.GitHub;
@@ -28,7 +26,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.routing.GlobalRoutingSe
import com.yahoo.vespa.hosted.controller.api.integration.routing.RotationStatus;
import com.yahoo.vespa.hosted.controller.api.integration.routing.RoutingGenerator;
import com.yahoo.vespa.hosted.controller.api.integration.zone.ZoneRegistry;
-import com.yahoo.vespa.hosted.controller.common.NotFoundCheckedException;
import com.yahoo.vespa.hosted.controller.persistence.ControllerDb;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
import com.yahoo.vespa.hosted.controller.versions.VersionStatus;
@@ -81,7 +78,6 @@ public class Controller extends AbstractComponent {
private final EntityService entityService;
private final GlobalRoutingService globalRoutingService;
private final ZoneRegistry zoneRegistry;
- private final Cost cost;
private final ConfigServerClient configServerClient;
private final MetricsService metricsService;
private final Chef chefClient;
@@ -98,19 +94,19 @@ public class Controller extends AbstractComponent {
public Controller(ControllerDb db, CuratorDb curator, RotationRepository rotationRepository,
GitHub gitHub, Jira jiraClient, EntityService entityService,
GlobalRoutingService globalRoutingService,
- ZoneRegistry zoneRegistry, Cost cost, ConfigServerClient configServerClient,
+ ZoneRegistry zoneRegistry, ConfigServerClient configServerClient,
MetricsService metricsService, NameService nameService,
RoutingGenerator routingGenerator, Chef chefClient, Athens athens) {
this(db, curator, rotationRepository,
gitHub, jiraClient, entityService, globalRoutingService, zoneRegistry,
- cost, configServerClient, metricsService, nameService, routingGenerator, chefClient,
+ configServerClient, metricsService, nameService, routingGenerator, chefClient,
Clock.systemUTC(), athens);
}
public Controller(ControllerDb db, CuratorDb curator, RotationRepository rotationRepository,
GitHub gitHub, Jira jiraClient, EntityService entityService,
GlobalRoutingService globalRoutingService,
- ZoneRegistry zoneRegistry, Cost cost, ConfigServerClient configServerClient,
+ ZoneRegistry zoneRegistry, ConfigServerClient configServerClient,
MetricsService metricsService, NameService nameService,
RoutingGenerator routingGenerator, Chef chefClient, Clock clock, Athens athens) {
Objects.requireNonNull(db, "Controller db cannot be null");
@@ -121,7 +117,6 @@ public class Controller extends AbstractComponent {
Objects.requireNonNull(entityService, "EntityService cannot be null");
Objects.requireNonNull(globalRoutingService, "GlobalRoutingService cannot be null");
Objects.requireNonNull(zoneRegistry, "ZoneRegistry cannot be null");
- Objects.requireNonNull(cost, "Cost cannot be null");
Objects.requireNonNull(configServerClient, "ConfigServerClient cannot be null");
Objects.requireNonNull(metricsService, "MetricsService cannot be null");
Objects.requireNonNull(nameService, "NameService cannot be null");
@@ -136,7 +131,6 @@ public class Controller extends AbstractComponent {
this.entityService = entityService;
this.globalRoutingService = globalRoutingService;
this.zoneRegistry = zoneRegistry;
- this.cost = cost;
this.configServerClient = configServerClient;
this.metricsService = metricsService;
this.chefClient = chefClient;
@@ -175,12 +169,6 @@ public class Controller extends AbstractComponent {
public Clock clock() { return clock; }
- public ApplicationCost getApplicationCost(com.yahoo.config.provision.ApplicationId application,
- com.yahoo.config.provision.Zone zone)
- throws NotFoundCheckedException {
- return cost.getApplicationCost(zone.environment(), zone.region(), application);
- }
-
public URI getElkUri(Environment environment, RegionName region, DeploymentId deploymentId) {
return elkUrl(zoneRegistry.getLogServerUri(environment, region), deploymentId);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationList.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationList.java
index 5af4523b579..7ff5a23e178 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationList.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ApplicationList.java
@@ -5,6 +5,7 @@ import com.google.common.collect.ImmutableList;
import com.yahoo.component.Version;
import com.yahoo.config.application.api.DeploymentSpec.UpgradePolicy;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Environment;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.ApplicationController;
@@ -111,6 +112,23 @@ public class ApplicationList {
return listOf(list.stream().filter(a -> ! a.id().instance().value().startsWith("default-pr")));
}
+ /** Returns the subset of applications which have at least one production deployment */
+ public ApplicationList hasProductionDeployment() {
+ return listOf(list.stream().filter(a -> a.deployments().keySet().stream()
+ .anyMatch(zone -> zone.environment() == Environment.prod)));
+ }
+
+ /** Returns the subset of applications that are allowed to upgrade at the given time */
+ public ApplicationList canUpgradeAt(Instant instant) {
+ return listOf(list.stream().filter(a -> a.deploymentSpec().canUpgradeAt(instant)));
+ }
+
+ /** Returns the first n application in this (or all, if there are less than n). */
+ public ApplicationList first(int n) {
+ if (list.size() < n) return this;
+ return new ApplicationList(list.subList(0, n));
+ }
+
// ----------------------------------- Sorting
/**
@@ -122,9 +140,9 @@ public class ApplicationList {
return listOf(list.stream().sorted(Comparator.comparing(application -> application.deployedVersion().orElse(Version.emptyVersion))));
}
- /** Returns the subset of applications which currently do not have any job in progress for the given change */
- public ApplicationList notRunningJobFor(Change.VersionChange change) {
- return listOf(list.stream().filter(a -> !hasRunningJob(a, change)));
+ /** Returns the subset of applications that are not currently upgrading */
+ public ApplicationList notCurrentlyUpgrading(Change.VersionChange change, Instant jobTimeoutLimit) {
+ return listOf(list.stream().filter(a -> !currentlyUpgrading(change, a, jobTimeoutLimit)));
}
// ----------------------------------- Internal helpers
@@ -152,11 +170,11 @@ public class ApplicationList {
return false;
}
- private static boolean hasRunningJob(Application application, Change.VersionChange change) {
+ private static boolean currentlyUpgrading(Change.VersionChange change, Application application, Instant jobTimeoutLimit) {
return application.deploymentJobs().jobStatus().values().stream()
- .filter(JobStatus::inProgress)
- .filter(jobStatus -> jobStatus.lastTriggered().isPresent())
- .map(jobStatus -> jobStatus.lastTriggered().get())
+ .filter(status -> status.isRunning(jobTimeoutLimit))
+ .filter(status -> status.lastTriggered().isPresent())
+ .map(status -> status.lastTriggered().get())
.anyMatch(jobRun -> jobRun.version().equals(change.version()));
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Change.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Change.java
index 596cbbebd45..bf067a39bbb 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Change.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Change.java
@@ -2,7 +2,9 @@
package com.yahoo.vespa.hosted.controller.application;
import com.yahoo.component.Version;
+import com.yahoo.config.application.api.DeploymentSpec;
+import java.time.Instant;
import java.util.Objects;
import java.util.Optional;
@@ -13,6 +15,9 @@ import java.util.Optional;
*/
public abstract class Change {
+ /** Returns true if this change is blocked by the given spec at the given instant */
+ public abstract boolean blockedBy(DeploymentSpec deploymentSpec, Instant instant);
+
/** A change to the application package revision of an application */
public static class ApplicationChange extends Change {
@@ -27,6 +32,11 @@ public abstract class Change {
public Optional<ApplicationRevision> revision() { return revision; }
@Override
+ public boolean blockedBy(DeploymentSpec deploymentSpec, Instant instant) {
+ return ! deploymentSpec.canChangeRevisionAt(instant);
+ }
+
+ @Override
public int hashCode() { return revision.hashCode(); }
@Override
@@ -71,6 +81,11 @@ public abstract class Change {
public Version version() { return version; }
@Override
+ public boolean blockedBy(DeploymentSpec deploymentSpec, Instant instant) {
+ return ! deploymentSpec.canUpgradeAt(instant);
+ }
+
+ @Override
public int hashCode() { return version.hashCode(); }
@Override
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ClusterCost.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ClusterCost.java
new file mode 100644
index 00000000000..03d0cd28ca1
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ClusterCost.java
@@ -0,0 +1,86 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.application;
+
+/**
+ * Calculate utilization relative to the target utilization,
+ * tco and waste for one cluster of one deployment.
+ *
+ * The target utilization is defined the following assumptions:
+ * 1. CPU contention starts to cause problems on 0.8
+ * 2. Memory management starts to casue problems on 0.7
+ * 3. Load is evenly divided between two deployments - each deployments can handle the other.
+ * 4. Memory and disk are agnostic to query load.
+ * 5. Peak utilization (daily variations) are twice the size of the average.
+ *
+ * With this in mind we get:
+ * CPU: 0.8/2/2 = 0.2
+ * Mem: 0.7
+ * Disk: 0.7
+ * Disk busy: 0.3
+ *
+ * @author smorgrav
+ */
+public class ClusterCost {
+ private final double tco;
+ private final double waste;
+ private final ClusterInfo clusterInfo;
+ private final ClusterUtilization systemUtilization;
+ private final ClusterUtilization targetUtilization;
+ private final ClusterUtilization resultUtilization;
+
+ /**
+ * @param clusterInfo Value object with cluster info e.g. the TCO for the hardware used
+ * @param systemUtilization Utilization of system resources (as ratios)
+ */
+ public ClusterCost(ClusterInfo clusterInfo,
+ ClusterUtilization systemUtilization) {
+
+ this.clusterInfo = clusterInfo;
+ this.systemUtilization = systemUtilization;
+ this.targetUtilization = new ClusterUtilization(0.7,0.2, 0.7, 0.3);
+ this.resultUtilization = calculateResultUtilization(systemUtilization, targetUtilization);
+
+ this.tco = clusterInfo.getCost() * Math.min(1, this.resultUtilization.getMaxUtilization());
+ this.waste = clusterInfo.getCost() - tco;
+ }
+
+ /** @return TCO in dollars */
+ public double getTco() {
+ return tco;
+ }
+
+ /** @return Waste in dollars */
+ public double getWaste() {
+ return waste;
+ }
+
+ public ClusterInfo getClusterInfo() {
+ return clusterInfo;
+ }
+
+ public ClusterUtilization getSystemUtilization() {
+ return systemUtilization;
+ }
+
+ public ClusterUtilization getTargetUtilization() {
+ return targetUtilization;
+ }
+
+ public ClusterUtilization getResultUtilization() {
+ return resultUtilization;
+ }
+
+ static ClusterUtilization calculateResultUtilization(ClusterUtilization system, ClusterUtilization target) {
+ double cpu = ratio(system.getCpu(),target.getCpu());
+ double mem = ratio(system.getMemory(),target.getMemory());
+ double disk = ratio(system.getDisk(),target.getDisk());
+ double diskbusy = ratio(system.getDiskBusy(),target.getDiskBusy());
+
+ return new ClusterUtilization(mem, cpu, disk, diskbusy);
+ }
+
+ private static double ratio(double a, double b) {
+ if (b == 0) return 1;
+ return a/b;
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ClusterInfo.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ClusterInfo.java
new file mode 100644
index 00000000000..cb39177c811
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ClusterInfo.java
@@ -0,0 +1,40 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.application;
+
+import com.yahoo.config.provision.ClusterSpec;
+
+import java.util.List;
+
+/**
+ * Value object of static cluster information, in particular the TCO
+ * of the hardware used for this cluster.
+ *
+ * @author smorgrav
+ */
+public class ClusterInfo {
+ private final String flavor;
+ private final int cost;
+ private final ClusterSpec.Type clusterType;
+ private final List<String> hostnames;
+
+ public ClusterInfo(String flavor, int cost, ClusterSpec.Type clusterType, List<String> hostnames) {
+ this.flavor = flavor;
+ this.cost = cost;
+ this.clusterType = clusterType;
+ this.hostnames = hostnames;
+ }
+
+ public String getFlavor() {
+ return flavor;
+ }
+
+ public int getCost() { return cost; }
+
+ public ClusterSpec.Type getClusterType() {
+ return clusterType;
+ }
+
+ public List<String> getHostnames() {
+ return hostnames;
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ClusterUtilization.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ClusterUtilization.java
new file mode 100644
index 00000000000..ff92ce36d1b
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/ClusterUtilization.java
@@ -0,0 +1,63 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.application;
+
+/**
+ * System resources as _ratios_ of available resources.
+ *
+ * Can be for actual readings or target numbers.
+ *
+ * @author smorgrav
+ */
+public class ClusterUtilization {
+
+ private final double memory;
+ private final double cpu;
+ private final double disk;
+ private final double diskBusy;
+ private final double maxUtilization;
+
+ /**
+ * Resource utilization as ratios. The ratio is normally between 0 and 1 where
+ * one is fully utilized - but can be higher as it consumes more than it are guaranteed.
+ *
+ * @param memory Memory utilization
+ * @param cpu CPU utilization
+ * @param disk Disk utilization
+ * @param diskBusy Disk busy
+ */
+ public ClusterUtilization(double memory, double cpu, double disk, double diskBusy) {
+ this.memory = memory;
+ this.cpu = cpu;
+ this.disk = disk;
+ this.diskBusy = diskBusy;
+
+ double maxUtil = Math.max(cpu, disk);
+ maxUtil = Math.max(maxUtil, memory);
+ this.maxUtilization = Math.max(maxUtil, diskBusy);
+ }
+
+ /** @return The utilization ratio of the resource that is utilized the most. */
+ public double getMaxUtilization() {
+ return maxUtilization;
+ }
+
+ /** @return The utilization ratio for memory */
+ public double getMemory() {
+ return memory;
+ }
+
+ /** @return The utilization ratio for cpu */
+ public double getCpu() {
+ return cpu;
+ }
+
+ /** @return The utilization ratio for disk */
+ public double getDisk() {
+ return disk;
+ }
+
+ /** @return The disk busy ratio */
+ public double getDiskBusy() {
+ return diskBusy;
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Deployment.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Deployment.java
index 75e0f82cdcf..01219e940a3 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Deployment.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/Deployment.java
@@ -2,9 +2,12 @@
package com.yahoo.vespa.hosted.controller.application;
import com.yahoo.component.Version;
+import com.yahoo.config.provision.ClusterSpec.Id;
import com.yahoo.config.provision.Zone;
import java.time.Instant;
+import java.util.HashMap;
+import java.util.Map;
import java.util.Objects;
/**
@@ -18,16 +21,26 @@ public class Deployment {
private final ApplicationRevision revision;
private final Version version;
private final Instant deployTime;
+ private final Map<Id, ClusterUtilization> clusterUtils;
+ private final Map<Id, ClusterInfo> clusterInfo;
public Deployment(Zone zone, ApplicationRevision revision, Version version, Instant deployTime) {
+ this(zone, revision, version, deployTime, new HashMap<>(), new HashMap<>());
+ }
+
+ public Deployment(Zone zone, ApplicationRevision revision, Version version, Instant deployTime, Map<Id, ClusterUtilization> clusterUtils, Map<Id, ClusterInfo> clusterInfo) {
Objects.requireNonNull(zone, "zone cannot be null");
Objects.requireNonNull(revision, "revision cannot be null");
Objects.requireNonNull(version, "version cannot be null");
Objects.requireNonNull(deployTime, "deployTime cannot be null");
+ Objects.requireNonNull(clusterUtils, "clusterUtils cannot be null");
+ Objects.requireNonNull(clusterInfo, "clusterInfo cannot be null");
this.zone = zone;
this.revision = revision;
this.version = version;
this.deployTime = deployTime;
+ this.clusterUtils = clusterUtils;
+ this.clusterInfo = clusterInfo;
}
/** Returns the zone this was deployed to */
@@ -42,9 +55,39 @@ public class Deployment {
/** Returns the time this was deployed */
public Instant at() { return deployTime; }
+ public Map<Id, ClusterInfo> clusterInfo() {
+ return clusterInfo;
+ }
+
+ public Map<Id, ClusterUtilization> clusterUtils() {
+ return clusterUtils;
+ }
+
+ public Deployment withClusterUtils(Map<Id, ClusterUtilization> clusterUtilization) {
+ return new Deployment(zone, revision, version, deployTime, clusterUtilization, clusterInfo);
+ }
+
+ public Deployment withClusterInfo(Map<Id, ClusterInfo> newClusterInfo) {
+ return new Deployment(zone, revision, version, deployTime, clusterUtils, newClusterInfo);
+ }
+
+ /**
+ * Calculate cost for this deployment.
+ *
+ * This is based on cluster utilization and cluster info.
+ */
+ public DeploymentCost calculateCost() {
+
+ Map<String, ClusterCost> costClusters = new HashMap<>();
+ for (Id clusterId : clusterUtils.keySet()) {
+ costClusters.put(clusterId.value(), new ClusterCost(clusterInfo.get(clusterId), clusterUtils.get(clusterId)));
+ }
+
+ return new DeploymentCost(costClusters);
+ }
+
@Override
public String toString() {
return "deployment to " + zone + " of " + revision + " on version " + version + " at " + deployTime;
}
-
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentCost.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentCost.java
new file mode 100644
index 00000000000..fce825bd99e
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentCost.java
@@ -0,0 +1,54 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.application;
+
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Calculates cost for for an application deployment.
+ *
+ * @author smorgrav
+ */
+public class DeploymentCost {
+
+ private final double utilization;
+ private final double waste;
+ private final double tco;
+
+ private final Map<String, ClusterCost> clusters;
+
+ DeploymentCost(Map<String, ClusterCost> clusterCosts) {
+ clusters = new HashMap<>(clusterCosts);
+
+ double tco = 0;
+ double util = 0;
+ double waste = 0;
+
+ for (ClusterCost costCluster : clusterCosts.values()) {
+ tco += costCluster.getTco();
+ waste += costCluster.getWaste();
+ int nodesInCluster = costCluster.getClusterInfo().getHostnames().size();
+ util = Math.max(util, nodesInCluster*costCluster.getResultUtilization().getMaxUtilization());
+ }
+
+ this.utilization = util;
+ this.waste = waste;
+ this.tco = tco;
+ }
+
+ public Map<String, ClusterCost> getCluster() {
+ return clusters;
+ }
+
+ public double getTco() {
+ return tco;
+ }
+
+ public double getUtilization() {
+ return utilization;
+ }
+
+ public double getWaste() {
+ return waste;
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentJobs.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentJobs.java
index 722e8d10f40..1ffa06bb624 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentJobs.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/DeploymentJobs.java
@@ -32,24 +32,20 @@ public class DeploymentJobs {
private final ImmutableMap<JobType, JobStatus> status;
private final Optional<String> jiraIssueId;
- /** Creates an empty set of deployment jobs */
- public DeploymentJobs(long projectId) {
- this(Optional.of(projectId), ImmutableMap.of(), Optional.empty());
- }
-
- public DeploymentJobs(Optional<Long> projectId, Collection<JobStatus> jobStatusEntries, Optional<String> jiraIssueId) {
+ public DeploymentJobs(Optional<Long> projectId, Collection<JobStatus> jobStatusEntries,
+ Optional<String> jiraIssueId) {
this(projectId, asMap(jobStatusEntries), jiraIssueId);
}
-
+
private DeploymentJobs(Optional<Long> projectId, Map<JobType, JobStatus> status, Optional<String> jiraIssueId) {
- Objects.requireNonNull(projectId, "projectId cannot be null");
+ requireId(projectId, "projectId cannot be null or <= 0");
Objects.requireNonNull(status, "status cannot be null");
Objects.requireNonNull(jiraIssueId, "jiraIssueId cannot be null");
this.projectId = projectId;
this.status = ImmutableMap.copyOf(status);
this.jiraIssueId = jiraIssueId;
}
-
+
private static Map<JobType, JobStatus> asMap(Collection<JobStatus> jobStatusEntries) {
ImmutableMap.Builder<JobType, JobStatus> b = new ImmutableMap.Builder<>();
for (JobStatus jobStatusEntry : jobStatusEntries)
@@ -95,10 +91,6 @@ public class DeploymentJobs {
status.remove(job);
return new DeploymentJobs(projectId, status, jiraIssueId);
}
-
- public DeploymentJobs asSelfTriggering(boolean selfTriggering) {
- return new DeploymentJobs(projectId, status, jiraIssueId);
- }
/** Returns an immutable map of the status entries in this */
public Map<JobType, JobStatus> jobStatus() { return status; }
@@ -109,8 +101,15 @@ public class DeploymentJobs {
}
/** Returns whether any job is currently in progress */
- public boolean inProgress() {
- return status.values().stream().anyMatch(JobStatus::inProgress);
+ public boolean isRunning(Instant timeoutLimit) {
+ return status.values().stream().anyMatch(job -> job.isRunning(timeoutLimit));
+ }
+
+ /** Returns whether the given job type is currently running and was started after timeoutLimit */
+ public boolean isRunning(JobType jobType, Instant timeoutLimit) {
+ JobStatus jobStatus = status.get(jobType);
+ if ( jobStatus == null) return false;
+ return jobStatus.isRunning(timeoutLimit);
}
/** Returns whether change can be deployed to the given environment */
@@ -126,14 +125,11 @@ public class DeploymentJobs {
return true; // other environments do not have any preconditions
}
- /** Returns whether change has been deployed completely */
- public boolean isDeployed(Optional<Change> change) {
- if (!change.isPresent()) {
- return true;
- }
+ /** Returns whether the given change has been deployed completely */
+ public boolean isDeployed(Change change) {
return status.values().stream()
.filter(status -> status.type().isProduction())
- .allMatch(status -> isSuccessful(change.get(), status.type()));
+ .allMatch(status -> isSuccessful(change, status.type()));
}
/** Returns whether job has completed successfully */
@@ -310,4 +306,15 @@ public class DeploymentJobs {
}
}
-}
+ private static Optional<Long> requireId(Optional<Long> id, String message) {
+ Objects.requireNonNull(id, message);
+ if (!id.isPresent()) {
+ return id;
+ }
+ if (id.get() <= 0) {
+ throw new IllegalArgumentException(message);
+ }
+ return id;
+ }
+
+} \ No newline at end of file
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/JobStatus.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/JobStatus.java
index 96e91d41584..1c8aa2adada 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/JobStatus.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/application/JobStatus.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.controller.application;
import com.yahoo.component.Version;
import com.yahoo.vespa.hosted.controller.Controller;
+import java.time.Duration;
import java.time.Instant;
import java.util.Objects;
import java.util.Optional;
@@ -98,21 +99,18 @@ public class JobStatus {
/** Returns true unless this job last completed with a failure */
public boolean isSuccess() { return ! jobError.isPresent(); }
+
+ /** Returns true if last triggered is newer than last completed and was started after timeoutLimit */
+ public boolean isRunning(Instant timeoutLimit) {
+ if ( ! lastTriggered.isPresent()) return false;
+ if (lastTriggered.get().at().isBefore(timeoutLimit)) return false;
+ if ( ! lastCompleted.isPresent()) return true;
+ return lastTriggered.get().at().isAfter(lastCompleted.get().at());
+ }
/** The error of the last completion, or empty if the last run succeeded */
public Optional<DeploymentJobs.JobError> jobError() { return jobError; }
- /** Returns true if job is in progress */
- public boolean inProgress() {
- if (!lastTriggered().isPresent()) {
- return false;
- }
- if (!lastCompleted().isPresent()) {
- return true;
- }
- return lastTriggered().get().at().isAfter(lastCompleted().get().at());
- }
-
/**
* Returns the last triggering of this job, or empty if the controller has never triggered it
* and not seen a deployment for it
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentOrder.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentOrder.java
index 82e6ca919e1..0ae69b4f2ca 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentOrder.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentOrder.java
@@ -1,9 +1,13 @@
package com.yahoo.vespa.hosted.controller.deployment;
import com.yahoo.config.application.api.DeploymentSpec;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.application.Change;
+import com.yahoo.vespa.hosted.controller.application.Deployment;
+import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobType;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
@@ -11,10 +15,15 @@ import java.time.Clock;
import java.time.Duration;
import java.time.Instant;
import java.util.Collections;
+import java.util.Comparator;
+import java.util.LinkedHashMap;
import java.util.List;
+import java.util.Map;
import java.util.Objects;
import java.util.Optional;
+import java.util.function.Function;
import java.util.logging.Logger;
+import java.util.stream.Collector;
import java.util.stream.Collectors;
import static java.util.stream.Collectors.collectingAndThen;
@@ -43,8 +52,8 @@ public class DeploymentOrder {
return Collections.emptyList();
}
- // Always trigger system test after component as deployment spec might not be available yet (e.g. if this is a
- // new application with no previous deployments)
+ // Always trigger system test after component as deployment spec might not be available yet
+ // (e.g. if this is a new application with no previous deployments)
if (job == JobType.component) {
return Collections.singletonList(JobType.systemTest);
}
@@ -104,6 +113,27 @@ public class DeploymentOrder {
.collect(Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList));
}
+ /** Returns job status sorted according to deployment spec */
+ public Map<JobType, JobStatus> sortBy(DeploymentSpec deploymentSpec, Map<JobType, JobStatus> jobStatus) {
+ List<DeploymentJobs.JobType> jobs = jobsFrom(deploymentSpec);
+ return jobStatus.entrySet().stream()
+ .sorted(Comparator.comparingInt(kv -> jobs.indexOf(kv.getKey())))
+ .collect(Collectors.collectingAndThen(toLinkedMap(Map.Entry::getKey, Map.Entry::getValue),
+ Collections::unmodifiableMap));
+ }
+
+ /** Returns deployments sorted according to declared zones */
+ public Map<Zone, Deployment> sortBy(List<DeploymentSpec.DeclaredZone> zones, Map<Zone, Deployment> deployments) {
+ List<Zone> productionZones = zones.stream()
+ .filter(z -> z.environment() == Environment.prod && z.region().isPresent())
+ .map(z -> new Zone(z.environment(), z.region().get()))
+ .collect(Collectors.toList());
+ return deployments.entrySet().stream()
+ .sorted(Comparator.comparingInt(kv -> productionZones.indexOf(kv.getKey())))
+ .collect(Collectors.collectingAndThen(toLinkedMap(Map.Entry::getKey, Map.Entry::getValue),
+ Collections::unmodifiableMap));
+ }
+
/** Returns jobs for the given step */
private List<JobType> jobsFrom(DeploymentSpec.Step step) {
return step.zones().stream()
@@ -166,4 +196,13 @@ public class DeploymentOrder {
return totalDelay;
}
+ private static <T, K, U> Collector<T, ?, Map<K,U>> toLinkedMap(Function<? super T, ? extends K> keyMapper,
+ Function<? super T, ? extends U> valueMapper) {
+ return Collectors.toMap(keyMapper, valueMapper,
+ (u, v) -> {
+ throw new IllegalStateException(String.format("Duplicate key %s", u));
+ },
+ LinkedHashMap::new);
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
index c0e73a44444..407d28264d1 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTrigger.java
@@ -3,12 +3,16 @@ package com.yahoo.vespa.hosted.controller.deployment;
import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.ApplicationController;
import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.application.ApplicationList;
import com.yahoo.vespa.hosted.controller.application.Change;
+import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobError;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobReport;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobType;
@@ -31,9 +35,13 @@ import java.util.logging.Logger;
* This class is multithread safe.
*
* @author bratseth
+ * @author mpolden
*/
public class DeploymentTrigger {
-
+
+ /** The max duration a job may run before we consider it dead/hanging */
+ private final Duration jobTimeout;
+
private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName());
private final Controller controller;
@@ -43,13 +51,18 @@ public class DeploymentTrigger {
public DeploymentTrigger(Controller controller, CuratorDb curator, Clock clock) {
Objects.requireNonNull(controller,"controller cannot be null");
+ Objects.requireNonNull(curator,"curator cannot be null");
Objects.requireNonNull(clock,"clock cannot be null");
this.controller = controller;
this.clock = clock;
this.buildSystem = new PolledBuildSystem(controller, curator);
this.order = new DeploymentOrder(controller);
+ this.jobTimeout = controller.system().equals(SystemName.main) ? Duration.ofHours(12) : Duration.ofHours(1);
}
+ /** Returns the time in the past before which jobs are at this moment considered unresponsive */
+ public Instant jobTimeoutLimit() { return clock.instant().minus(jobTimeout); }
+
//--- Start of methods which triggers deployment jobs -------------------------
/**
@@ -63,17 +76,24 @@ public class DeploymentTrigger {
Application application = applications().require(report.applicationId());
application = application.withJobCompletion(report, clock.instant(), controller);
- // Handle successful first and last job
- if (order.isFirst(report.jobType()) && report.success()) { // the first job tells us that a change occurred
- if (application.deploying().isPresent() && ! application.deploymentJobs().hasFailures()) { // postpone until the current deployment is done
- applications().store(application.withOutstandingChange(true), lock);
- return;
- }
- else { // start a new change deployment
- application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown()));
+ // Handle successful starting and ending
+ if (report.success()) {
+ if (order.isFirst(report.jobType())) { // the first job tells us that a change occurred
+ if (acceptNewRevisionNow(application)) {
+ // Set this as the change we are doing, unless we are already pushing a platform change
+ if ( ! ( application.deploying().isPresent() &&
+ (application.deploying().get() instanceof Change.VersionChange)))
+ application = application.withDeploying(Optional.of(Change.ApplicationChange.unknown()));
+ }
+ else { // postpone
+ applications().store(application.withOutstandingChange(true), lock);
+ return;
+ }
+ }
+ else if (order.isLast(report.jobType(), application) && application.deployingCompleted()) {
+ // change completed
+ application = application.withDeploying(Optional.empty());
}
- } else if (order.isLast(report.jobType(), application) && report.success() && application.deploymentJobs().isDeployed(application.deploying())) {
- application = application.withDeploying(Optional.empty());
}
// Trigger next
@@ -95,29 +115,84 @@ public class DeploymentTrigger {
}
/**
+ * Find jobs that can and should run but are currently not.
+ */
+ public void triggerReadyJobs() {
+ ApplicationList applications = ApplicationList.from(applications().asList());
+ applications = applications.notPullRequest();
+ for (Application application : applications.asList()) {
+ try (Lock lock = applications().lock(application.id())) {
+ triggerReadyJobs(application, lock);
+ }
+ }
+ }
+
+ private void triggerReadyJobs(Application application, Lock lock) {
+ if ( ! application.deploying().isPresent()) return;
+ for (JobType jobType : order.jobsFrom(application.deploymentSpec())) {
+ // TODO: Do this for all jobs not just staging, and (with more work) remove triggerFailing and triggerDelayed
+ if (jobType.environment().equals(Environment.staging)) {
+ JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType);
+ if (jobStatus.isRunning(jobTimeoutLimit())) continue;
+
+ for (JobType nextJobType : order.nextAfter(jobType, application)) {
+ JobStatus nextStatus = application.deploymentJobs().jobStatus().get(nextJobType);
+
+ // Attempt to trigger if there are changes available - is rejected if the change is in progress,
+ // or is currently blocked
+ if (changesAvailable(jobStatus, nextStatus))
+ trigger(nextJobType, application, false, "Triggering previously blocked job", lock);
+ }
+
+ }
+ }
+ }
+
+ /**
+ * Returns true if the previous job has completed successfully with a revision and/or version which is
+ * newer (different) than the one last completed successfully in next
+ */
+ private boolean changesAvailable(JobStatus previous, JobStatus next) {
+ if ( ! previous.lastSuccess().isPresent()) return false;
+ if (next == null) return true;
+ if ( ! next.lastSuccess().isPresent()) return true;
+
+ JobStatus.JobRun previousSuccess = previous.lastSuccess().get();
+ JobStatus.JobRun nextSuccess = next.lastSuccess().get();
+ if (previousSuccess.revision().isPresent() && ! previousSuccess.revision().get().equals(nextSuccess.revision().get()))
+ return true;
+ if (! previousSuccess.version().equals(nextSuccess.version()))
+ return true;
+ return false;
+ }
+
+ /**
* Called periodically to cause triggering of jobs in the background
*/
- public void triggerFailing(ApplicationId applicationId, String cause) {
+ public void triggerFailing(ApplicationId applicationId) {
try (Lock lock = applications().lock(applicationId)) {
Application application = applications().require(applicationId);
- if (shouldRetryFromBeginning(application)) {
- // failed for a long time: Discard existing change and restart from the component job
- application = application.withDeploying(Optional.empty());
- application = trigger(JobType.component, application, false, "Retrying failing deployment from beginning: " + cause, lock);
- applications().store(application, lock);
- } else {
- // retry the failed job (with backoff)
- for (JobType jobType : order.jobsFrom(application.deploymentSpec())) { // retry the *first* failing job
- JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType);
- if (isFailing(jobStatus)) {
- if (shouldRetryNow(jobStatus)) {
- application = trigger(jobType, application, false, "Retrying failing job: " + cause, lock);
- applications().store(application, lock);
- }
- break;
+ if ( ! application.deploying().isPresent()) return; // No ongoing change, no need to retry
+
+ // Retry first failing job
+ for (JobType jobType : order.jobsFrom(application.deploymentSpec())) {
+ JobStatus jobStatus = application.deploymentJobs().jobStatus().get(jobType);
+ if (isFailing(application.deploying().get(), jobStatus)) {
+ if (shouldRetryNow(jobStatus)) {
+ application = trigger(jobType, application, false, "Retrying failing job", lock);
+ applications().store(application, lock);
}
+ break;
}
}
+
+ // Retry dead job
+ Optional<JobStatus> firstDeadJob = firstDeadJob(application.deploymentJobs());
+ if (firstDeadJob.isPresent()) {
+ application = trigger(firstDeadJob.get().type(), application, false, "Retrying dead job",
+ lock);
+ applications().store(application, lock);
+ }
}
}
@@ -126,7 +201,7 @@ public class DeploymentTrigger {
for (Application application : applications().asList()) {
if ( ! application.deploying().isPresent() ) continue;
if (application.deploymentJobs().hasFailures()) continue;
- if (application.deploymentJobs().inProgress()) continue;
+ if (application.deploymentJobs().isRunning(controller.applications().deploymentTrigger().jobTimeoutLimit())) continue;
if (application.deploymentSpec().steps().stream().noneMatch(step -> step instanceof DeploymentSpec.Delay)) {
continue; // Application does not have any delayed deployments
}
@@ -184,39 +259,39 @@ public class DeploymentTrigger {
//--- End of methods which triggers deployment jobs ----------------------------
private ApplicationController applications() { return controller.applications(); }
-
- private boolean isFailing(JobStatus jobStatusOrNull) {
- return jobStatusOrNull != null && !jobStatusOrNull.isSuccess();
+
+ /** Returns whether a job is failing for the current change in the given application */
+ private boolean isFailing(Change change, JobStatus status) {
+ return status != null &&
+ !status.isSuccess() &&
+ status.lastCompletedFor(change);
}
private boolean isCapacityConstrained(JobType jobType) {
return jobType == JobType.stagingTest || jobType == JobType.systemTest;
}
- private boolean shouldRetryFromBeginning(Application application) {
- Instant eightHoursAgo = clock.instant().minus(Duration.ofHours(8));
- Instant failingSince = application.deploymentJobs().failingSince();
- if (failingSince != null && failingSince.isAfter(eightHoursAgo)) return false;
-
- JobStatus componentJobStatus = application.deploymentJobs().jobStatus().get(JobType.component);
- if (componentJobStatus == null) return true;
- if ( ! componentJobStatus.lastCompleted().isPresent() ) return true;
- return componentJobStatus.lastCompleted().get().at().isBefore(eightHoursAgo);
+ /** Returns the first job that has been running for more than the given timeout */
+ private Optional<JobStatus> firstDeadJob(DeploymentJobs jobs) {
+ Optional<JobStatus> oldestRunningJob = jobs.jobStatus().values().stream()
+ .filter(job -> job.isRunning(Instant.ofEpochMilli(0)))
+ .sorted(Comparator.comparing(status -> status.lastTriggered().get().at()))
+ .findFirst();
+ return oldestRunningJob.filter(job -> job.lastTriggered().get().at().isBefore(jobTimeoutLimit()));
}
/** Decide whether the job should be triggered by the periodic trigger */
private boolean shouldRetryNow(JobStatus job) {
if (job.isSuccess()) return false;
+ if (job.isRunning(jobTimeoutLimit())) return false;
- if ( ! job.lastCompleted().isPresent()) return true; // Retry when we don't hear back
+ // Retry after 10% of the time since it started failing
+ Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10);
+ if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true;
- // Always retry if we haven't tried in 4 hours
+ // ... or retry anyway if we haven't tried in 4 hours
if (job.lastCompleted().get().at().isBefore(clock.instant().minus(Duration.ofHours(4)))) return true;
- // Wait for 10% of the time since it started failing
- Duration aTenthOfFailTime = Duration.ofMillis( (clock.millis() - job.firstFailing().get().at().toEpochMilli()) / 10);
- if (job.lastCompleted().get().at().isBefore(clock.instant().minus(aTenthOfFailTime))) return true;
-
return false;
}
@@ -238,12 +313,12 @@ public class DeploymentTrigger {
.isAfter(clock.instant().minus(Duration.ofMinutes(15)));
}
- /** Decide whether job type should be triggered according to deployment spec */
+ /** Returns whether the given job type should be triggered according to deployment spec */
private boolean deploysTo(Application application, JobType jobType) {
Optional<Zone> zone = jobType.zone(controller.system());
if (zone.isPresent() && jobType.isProduction()) {
// Skip triggering of jobs for zones where the application should not be deployed
- if (!application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) {
+ if ( ! application.deploymentSpec().includes(jobType.environment(), Optional.of(zone.get().region()))) {
return false;
}
}
@@ -260,26 +335,46 @@ public class DeploymentTrigger {
* @return the application in the triggered state, which *must* be stored by the caller
*/
private Application trigger(JobType jobType, Application application, boolean first, String cause, Lock lock) {
- if (jobType == null) return application; // previous was last job
+ if (jobType == null) { // previous was last job
+ return application;
+ }
+
+ // Note: We could make a more fine-grained and more correct determination about whether to block
+ // by instead basing the decision on what is currently deployed in the zone. However,
+ // this leads to some additional corner cases, and the possibility of blocking an application
+ // fix to a version upgrade, so not doing it now
+ if (jobType.isProduction() && application.deployingBlocked(clock.instant())) {
+ return application;
+ }
+
+ if (application.deploymentJobs().isRunning(jobType, jobTimeoutLimit())) {
+ return application;
+ }
// TODO: Remove when we can determine why this occurs
- if (jobType != JobType.component && !application.deploying().isPresent()) {
+ if (jobType != JobType.component && ! application.deploying().isPresent()) {
log.warning(String.format("Want to trigger %s for %s with reason %s, but this application is not " +
"currently deploying a change",
jobType, application, cause));
return application;
}
- if (!deploysTo(application, jobType)) {
+ if ( ! deploysTo(application, jobType)) {
return application;
}
- if (!application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) {
+ // Note that this allows a new change to catch up and prevent an older one from continuing
+ if ( ! application.deploymentJobs().isDeployableTo(jobType.environment(), application.deploying())) {
log.warning(String.format("Want to trigger %s for %s with reason %s, but change is untested", jobType,
application, cause));
return application;
}
+ // Ignore applications that are not associated with a project
+ if ( ! application.deploymentJobs().projectId().isPresent()) {
+ return application;
+ }
+
log.info(String.format("Triggering %s for %s, %s: %s", jobType, application,
application.deploying().map(d -> "deploying " + d).orElse("restarted deployment"),
cause));
@@ -289,12 +384,22 @@ public class DeploymentTrigger {
}
private Application trigger(List<JobType> jobs, Application application, String cause, Lock lock) {
- for (JobType job : jobs) {
+ for (JobType job : jobs)
application = trigger(job, application, false, cause, lock);
- }
return application;
}
+ private boolean acceptNewRevisionNow(Application application) {
+ if ( ! application.deploying().isPresent()) return true;
+ if ( application.deploying().get() instanceof Change.ApplicationChange) return true; // more changes are ok
+
+ if ( application.deploymentJobs().hasFailures()) return true; // allow changes to fix upgrade problems
+ if ( application.isBlocked(clock.instant())) return true; // allow testing changes while upgrade blocked (debatable)
+ return false;
+ }
+
public BuildSystem buildSystem() { return buildSystem; }
+ public DeploymentOrder deploymentOrder() { return order; }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/PolledBuildSystem.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/PolledBuildSystem.java
index 41adb4abe6a..56b4023f932 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/PolledBuildSystem.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/deployment/PolledBuildSystem.java
@@ -12,6 +12,8 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.Deque;
import java.util.List;
+import java.util.Optional;
+import java.util.logging.Logger;
/**
* Stores a queue for each type of job, and offers jobs from each of these to a periodic
@@ -22,10 +24,14 @@ import java.util.List;
*/
public class PolledBuildSystem implements BuildSystem {
- private final Controller controller;
+ private static final Logger log = Logger.getLogger(PolledBuildSystem.class.getName());
+
+ // The number of jobs to offer, on each poll, for zones that have limited capacity
+ private static final int maxCapacityConstrainedJobsToOffer = 2;
+ private final Controller controller;
private final CuratorDb curator;
-
+
public PolledBuildSystem(Controller controller, CuratorDb curator) {
this.controller = controller;
this.curator = curator;
@@ -71,16 +77,27 @@ public class PolledBuildSystem implements BuildSystem {
}
private List<BuildJob> getJobs(boolean removeFromQueue) {
+ int capacityConstrainedJobsOffered = 0;
try (Lock lock = curator.lockJobQueues()) {
List<BuildJob> jobsToRun = new ArrayList<>();
for (JobType jobType : JobType.values()) {
Deque<ApplicationId> queue = curator.readJobQueue(jobType);
for (ApplicationId a : queue) {
ApplicationId application = removeFromQueue ? queue.poll() : a;
- jobsToRun.add(new BuildJob(projectIdFor(application), jobType.id()));
- // Return only one job at a time for capacity constrained queues
- if (removeFromQueue && isCapacityConstrained(jobType)) break;
+ Optional<Long> projectId = projectId(application);
+ if (projectId.isPresent()) {
+ jobsToRun.add(new BuildJob(projectId.get(), jobType.id()));
+ } else {
+ log.warning("Not queuing " + jobType.id() + " for " + application.toShortString() +
+ " because project ID is missing");
+ }
+
+ // Return a limited number of jobs at a time for capacity constrained zones
+ if (removeFromQueue && isCapacityConstrained(jobType) &&
+ ++capacityConstrainedJobsOffered >= maxCapacityConstrainedJobsToOffer) {
+ break;
+ }
}
if (removeFromQueue)
curator.writeJobQueue(jobType, queue);
@@ -89,8 +106,8 @@ public class PolledBuildSystem implements BuildSystem {
}
}
- private Long projectIdFor(ApplicationId applicationId) {
- return controller.applications().require(applicationId).deploymentJobs().projectId().get();
+ private Optional<Long> projectId(ApplicationId applicationId) {
+ return controller.applications().require(applicationId).deploymentJobs().projectId();
}
private static boolean isCapacityConstrained(JobType jobType) {
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BlockedChangeDeployer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BlockedChangeDeployer.java
new file mode 100644
index 00000000000..4a68fd6cfab
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/BlockedChangeDeployer.java
@@ -0,0 +1,28 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+import com.yahoo.vespa.hosted.controller.Application;
+import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.application.ApplicationList;
+import com.yahoo.vespa.hosted.controller.application.Change;
+
+import java.time.Duration;
+
+/**
+ * Deploys application changes which have not made it to production because of a revision change block.
+ *
+ * @author bratseth
+ */
+@SuppressWarnings("unused")
+public class BlockedChangeDeployer extends Maintainer {
+
+ public BlockedChangeDeployer(Controller controller, Duration interval, JobControl jobControl) {
+ super(controller, interval, jobControl);
+ }
+
+ @Override
+ protected void maintain() {
+ controller().applications().deploymentTrigger().triggerReadyJobs();
+ }
+
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainer.java
new file mode 100644
index 00000000000..8f5db8832fa
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainer.java
@@ -0,0 +1,84 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.vespa.curator.Lock;
+import com.yahoo.vespa.hosted.controller.Application;
+import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeList;
+import com.yahoo.vespa.hosted.controller.application.ClusterInfo;
+import com.yahoo.vespa.hosted.controller.application.Deployment;
+
+import java.io.IOException;
+import java.time.Duration;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+
+/**
+ * Maintain info about hardware, hostnames and cluster specifications.
+ * <p>
+ * This is used to calculate cost metrics for the application api.
+ *
+ * @author smorgrav
+ */
+public class ClusterInfoMaintainer extends Maintainer {
+
+ private final Controller controller;
+
+ ClusterInfoMaintainer(Controller controller, Duration duration, JobControl jobControl) {
+ super(controller, duration, jobControl);
+ this.controller = controller;
+ }
+
+ private static String clusterid(NodeList.Node node) {
+ return node.membership.clusterId;
+ }
+
+ private Map<ClusterSpec.Id, ClusterInfo> getClusterInfo(NodeList nodes) {
+ Map<ClusterSpec.Id, ClusterInfo> infoMap = new HashMap<>();
+
+ // Group nodes by clusterid
+ Map<String, List<NodeList.Node>> clusters = nodes.nodes.stream()
+ .filter(node -> node.membership != null)
+ .collect(Collectors.groupingBy(ClusterInfoMaintainer::clusterid));
+
+ // For each cluster - get info
+ for (String id : clusters.keySet()) {
+ List<NodeList.Node> clusterNodes = clusters.get(id);
+
+ //Assume they are all equal and use first node as a representatitve for the cluster
+ NodeList.Node node = clusterNodes.get(0);
+
+ // Add to map
+ List<String> hostnames = clusterNodes.stream().map(node1 -> node1.hostname).collect(Collectors.toList());
+ ClusterInfo inf = new ClusterInfo(node.flavor, node.cost, ClusterSpec.Type.from(node.membership.clusterType), hostnames);
+ infoMap.put(new ClusterSpec.Id(id), inf);
+ }
+
+ return infoMap;
+ }
+
+ @Override
+ protected void maintain() {
+
+ for (Application application : controller().applications().asList()) {
+ try (Lock lock = controller().applications().lock(application.id())) {
+ for (Deployment deployment : application.deployments().values()) {
+ DeploymentId deploymentId = new DeploymentId(application.id(), deployment.zone());
+ try {
+ NodeList nodes = controller().applications().configserverClient().getNodeList(deploymentId);
+ Map<ClusterSpec.Id, ClusterInfo> clusterInfo = getClusterInfo(nodes);
+ Application app = application.with(deployment.withClusterInfo(clusterInfo));
+ controller.applications().store(app, lock);
+ } catch (IOException ioe) {
+ Logger.getLogger(ClusterInfoMaintainer.class.getName()).fine(ioe.getMessage());
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterUtilizationMaintainer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterUtilizationMaintainer.java
new file mode 100644
index 00000000000..8806651c60d
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterUtilizationMaintainer.java
@@ -0,0 +1,58 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.vespa.curator.Lock;
+import com.yahoo.vespa.hosted.controller.Application;
+import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.api.integration.MetricsService;
+import com.yahoo.vespa.hosted.controller.application.ClusterUtilization;
+import com.yahoo.vespa.hosted.controller.application.Deployment;
+
+import java.time.Duration;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Fetch utilization metrics and update applications with this data.
+ *
+ * @author smorgrav
+ */
+public class ClusterUtilizationMaintainer extends Maintainer {
+
+ Controller controller;
+
+ public ClusterUtilizationMaintainer(Controller controller, Duration duration, JobControl jobControl) {
+ super(controller, duration, jobControl);
+ this.controller = controller;
+ }
+
+ private Map<ClusterSpec.Id, ClusterUtilization> getUpdatedClusterUtilizations(ApplicationId app, Zone zone) {
+ Map<String, MetricsService.SystemMetrics> systemMetrics = controller.metricsService().getSystemMetrics(app, zone);
+
+ Map<ClusterSpec.Id, ClusterUtilization> utilizationMap = new HashMap<>();
+ for (Map.Entry<String, MetricsService.SystemMetrics> metrics : systemMetrics.entrySet()) {
+ MetricsService.SystemMetrics systemMetric = metrics.getValue();
+ ClusterUtilization utilization = new ClusterUtilization(systemMetric.memUtil() / 100, systemMetric.cpuUtil() / 100, systemMetric.diskUtil() / 100, 0);
+ utilizationMap.put(new ClusterSpec.Id(metrics.getKey()), utilization);
+ }
+
+ return utilizationMap;
+ }
+
+ @Override
+ protected void maintain() {
+
+ for (Application application : controller().applications().asList()) {
+ try (Lock lock = controller().applications().lock(application.id())) {
+ for (Deployment deployment : application.deployments().values()) {
+ Map<ClusterSpec.Id, ClusterUtilization> clusterUtilization = getUpdatedClusterUtilizations(application.id(), deployment.zone());
+ Application app = application.with(deployment.withClusterUtils(clusterUtilization));
+ controller.applications().store(app, lock);
+ }
+ }
+ }
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
index 016ea66cb1a..880abaaa6f9 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/ControllerMaintenance.java
@@ -9,6 +9,7 @@ import com.yahoo.vespa.hosted.controller.api.integration.Issues;
import com.yahoo.vespa.hosted.controller.api.integration.Properties;
import com.yahoo.vespa.hosted.controller.api.integration.chef.Chef;
import com.yahoo.vespa.hosted.controller.maintenance.config.MaintainerConfig;
+import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
import java.time.Duration;
@@ -31,9 +32,12 @@ public class ControllerMaintenance extends AbstractComponent {
private final VersionStatusUpdater versionStatusUpdater;
private final Upgrader upgrader;
private final DelayedDeployer delayedDeployer;
+ private final BlockedChangeDeployer blockedChangeDeployer;
+ private final ClusterInfoMaintainer clusterInfoMaintainer;
+ private final ClusterUtilizationMaintainer clusterUtilizationMaintainer;
@SuppressWarnings("unused") // instantiated by Dependency Injection
- public ControllerMaintenance(MaintainerConfig maintainerConfig, Controller controller,
+ public ControllerMaintenance(MaintainerConfig maintainerConfig, Controller controller, CuratorDb curator,
JobControl jobControl, Metric metric, Chef chefClient,
Contacts contactsClient, Properties propertiesClient, Issues issuesClient) {
Duration maintenanceInterval = Duration.ofMinutes(maintainerConfig.intervalMinutes());
@@ -45,9 +49,14 @@ public class ControllerMaintenance extends AbstractComponent {
failureRedeployer = new FailureRedeployer(controller, maintenanceInterval, jobControl);
outstandingChangeDeployer = new OutstandingChangeDeployer(controller, maintenanceInterval, jobControl);
versionStatusUpdater = new VersionStatusUpdater(controller, Duration.ofMinutes(3), jobControl);
- upgrader = new Upgrader(controller, maintenanceInterval, jobControl);
+ upgrader = new Upgrader(controller, maintenanceInterval, jobControl, curator);
delayedDeployer = new DelayedDeployer(controller, maintenanceInterval, jobControl);
+ blockedChangeDeployer = new BlockedChangeDeployer(controller, maintenanceInterval, jobControl);
+ clusterInfoMaintainer = new ClusterInfoMaintainer(controller, Duration.ofHours(2), jobControl);
+ clusterUtilizationMaintainer = new ClusterUtilizationMaintainer(controller, Duration.ofHours(2), jobControl);
}
+
+ public Upgrader upgrader() { return upgrader; }
/** Returns control of the maintenance jobs of this */
public JobControl jobControl() { return jobControl; }
@@ -62,6 +71,9 @@ public class ControllerMaintenance extends AbstractComponent {
versionStatusUpdater.deconstruct();
upgrader.deconstruct();
delayedDeployer.deconstruct();
+ blockedChangeDeployer.deconstruct();
+ clusterUtilizationMaintainer.deconstruct();
+ clusterInfoMaintainer.deconstruct();
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/FailureRedeployer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/FailureRedeployer.java
index 2c048bfa3ce..72f8faa5180 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/FailureRedeployer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/FailureRedeployer.java
@@ -4,13 +4,9 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.application.ApplicationList;
-import com.yahoo.vespa.hosted.controller.application.JobStatus;
import java.time.Duration;
-import java.time.Instant;
-import java.util.Comparator;
import java.util.List;
-import java.util.Optional;
/**
* Attempts redeployment of failed jobs and deployments.
@@ -20,8 +16,6 @@ import java.util.Optional;
*/
public class FailureRedeployer extends Maintainer {
- private final static Duration jobTimeout = Duration.ofHours(12);
-
public FailureRedeployer(Controller controller, Duration interval, JobControl jobControl) {
super(controller, interval, jobControl);
}
@@ -31,57 +25,11 @@ public class FailureRedeployer extends Maintainer {
List<Application> applications = ApplicationList.from(controller().applications().asList())
.notPullRequest()
.asList();
- retryFailingJobs(applications);
- retryStuckJobs(applications);
+ applications.forEach(application -> triggerFailing(application));
}
- private void retryFailingJobs(List<Application> applications) {
- for (Application application : applications) {
- if (!application.deploying().isPresent()) {
- continue;
- }
- if (application.deploymentJobs().inProgress()) {
- continue;
- }
- Optional<JobStatus> failingJob = jobFailingFor(application);
- failingJob.ifPresent(job -> triggerFailing(application, "Job " + job.type().id() +
- " has been failing since " + job.firstFailing().get()));
- }
- }
-
- private void retryStuckJobs(List<Application> applications) {
- Instant maxAge = controller().clock().instant().minus(jobTimeout);
- for (Application application : applications) {
- Optional<JobStatus> job = oldestRunningJob(application);
- if (!job.isPresent()) {
- continue;
- }
- // Ignore job if it doesn't belong to a zone in this system
- if (!job.get().type().zone(controller().system()).isPresent()) {
- continue;
- }
- if (job.get().lastTriggered().get().at().isBefore(maxAge)) {
- triggerFailing(application, "Job " + job.get().type().id() +
- " has been running for more than " + jobTimeout);
- }
- }
- }
-
- private Optional<JobStatus> jobFailingFor(Application application) {
- return application.deploymentJobs().jobStatus().values().stream()
- .filter(status -> !status.isSuccess() && status.lastCompletedFor(application.deploying().get()))
- .findFirst();
- }
-
- private Optional<JobStatus> oldestRunningJob(Application application) {
- return application.deploymentJobs().jobStatus().values().stream()
- .filter(JobStatus::inProgress)
- .sorted(Comparator.comparing(status -> status.lastTriggered().get().at()))
- .findFirst();
- }
-
- private void triggerFailing(Application application, String cause) {
- controller().applications().deploymentTrigger().triggerFailing(application.id(), cause);
+ private void triggerFailing(Application application) {
+ controller().applications().deploymentTrigger().triggerFailing(application.id());
}
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/JobControl.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/JobControl.java
index e05612aaf57..d7396cb2acb 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/JobControl.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/JobControl.java
@@ -13,14 +13,12 @@ import java.util.logging.Logger;
* Provides status and control over running maintenance jobs.
* This is multithread safe.
*
- * Job deactivation is stored in a local file.
+ * Job deactivation is stored in zookeeper.
*
* @author bratseth
*/
public class JobControl {
- private static final Logger log = Logger.getLogger(JobControl.class.getName());
-
private final CuratorDb curator;
/** This is not stored in ZooKeeper as all nodes start all jobs */
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/Upgrader.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/Upgrader.java
index a4d50d0c150..0722a58e18d 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/Upgrader.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/maintenance/Upgrader.java
@@ -7,10 +7,13 @@ import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.application.ApplicationList;
import com.yahoo.vespa.hosted.controller.application.Change;
+import com.yahoo.vespa.hosted.controller.deployment.BuildSystem;
+import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import com.yahoo.yolean.Exceptions;
import java.time.Duration;
+import java.time.Instant;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -18,13 +21,19 @@ import java.util.logging.Logger;
* Maintenance job which schedules applications for Vespa version upgrade
*
* @author bratseth
+ * @author mpolden
*/
public class Upgrader extends Maintainer {
+ private static final Duration upgradeTimeout = Duration.ofHours(12);
+
private static final Logger log = Logger.getLogger(Upgrader.class.getName());
- public Upgrader(Controller controller, Duration interval, JobControl jobControl) {
+ private final CuratorDb curator;
+
+ public Upgrader(Controller controller, Duration interval, JobControl jobControl, CuratorDb curator) {
super(controller, interval, jobControl);
+ this.curator = curator;
}
/**
@@ -34,7 +43,7 @@ public class Upgrader extends Maintainer {
public void maintain() {
VespaVersion target = controller().versionStatus().version(controller().systemVersion());
if (target == null) return; // we don't have information about the current system version at this time
-
+
switch (target.confidence()) {
case broken:
ApplicationList toCancel = applications().upgradingTo(target.versionNumber())
@@ -64,11 +73,15 @@ public class Upgrader extends Maintainer {
Change.VersionChange change = new Change.VersionChange(version);
cancelUpgradesOf(applications.upgradingToLowerThan(version));
applications = applications.notPullRequest(); // Pull requests are deployed as separate applications to test then deleted; No need to upgrade
+ applications = applications.hasProductionDeployment();
applications = applications.onLowerVersionThan(version);
applications = applications.notDeployingApplication(); // wait with applications deploying an application change
applications = applications.notFailingOn(version); // try to upgrade only if it hasn't failed on this version
- applications = applications.notRunningJobFor(change); // do not trigger multiple jobs simultaneously for same upgrade
- for (Application application : applications.byIncreasingDeployedVersion().asList()) {
+ applications = applications.notCurrentlyUpgrading(change, controller().applications().deploymentTrigger().jobTimeoutLimit());
+ applications = applications.canUpgradeAt(controller().clock().instant()); // wait with applications that are currently blocking upgrades
+ applications = applications.byIncreasingDeployedVersion(); // start with lowest versions
+ applications = applications.first(numberOfApplicationsToUpgrade()); // throttle upgrades
+ for (Application application : applications.asList()) {
try {
controller().applications().deploymentTrigger().triggerChange(application.id(), change);
} catch (IllegalArgumentException e) {
@@ -83,4 +96,19 @@ public class Upgrader extends Maintainer {
}
}
+ /** Returns the number of applications to upgrade in this run */
+ private int numberOfApplicationsToUpgrade() {
+ return Math.max(1, (int)(maintenanceInterval().getSeconds() * (upgradesPerMinute() / 60)));
+ }
+
+ /** Returns number upgrades per minute */
+ public double upgradesPerMinute() {
+ return curator.readUpgradesPerMinute();
+ }
+
+ /** Sets the number upgrades per minute */
+ public void setUpgradesPerMinute(double n) {
+ curator.writeUpgradesPerMinute(n);
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
index 36363dd052e..859e322b227 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializer.java
@@ -5,6 +5,7 @@ import com.yahoo.component.Version;
import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.Zone;
@@ -16,6 +17,8 @@ import com.yahoo.vespa.config.SlimeUtils;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.application.ApplicationRevision;
import com.yahoo.vespa.hosted.controller.application.Change;
+import com.yahoo.vespa.hosted.controller.application.ClusterInfo;
+import com.yahoo.vespa.hosted.controller.application.ClusterUtilization;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobError;
@@ -25,7 +28,9 @@ import com.yahoo.vespa.hosted.controller.application.SourceRevision;
import java.time.Instant;
import java.util.ArrayList;
import java.util.Collection;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.Optional;
/**
@@ -61,7 +66,6 @@ public class ApplicationSerializer {
private final String projectIdField = "projectId";
private final String jobStatusField = "jobStatus";
private final String jiraIssueIdField = "jiraIssueId";
- private final String selfTriggeringField = "selfTriggering";
// JobStatus field
private final String jobTypeField = "jobType";
@@ -76,6 +80,21 @@ public class ApplicationSerializer {
private final String revisionField = "revision";
private final String atField = "at";
private final String upgradeField = "upgrade";
+
+ // ClusterInfo fields
+ private final String clusterInfoField = "clusterInfo";
+ private final String clusterInfoFlavorField = "flavor";
+ private final String clusterInfoCostField = "cost";
+ private final String clusterInfoTypeField = "clusterType";
+ private final String clusterInfoHostnamesField = "hostnames";
+
+ // ClusterUtils fields
+ private final String clusterUtilsField = "clusterUtils";
+ private final String clusterUtilsCpuField = "cpu";
+ private final String clusterUtilsMemField = "mem";
+ private final String clusterUtilsDiskField = "disk";
+ private final String clusterUtilsDiskBusyField = "diskbusy";
+
// ------------------ Serialization
@@ -102,8 +121,41 @@ public class ApplicationSerializer {
object.setString(versionField, deployment.version().toString());
object.setLong(deployTimeField, deployment.at().toEpochMilli());
toSlime(deployment.revision(), object.setObject(applicationPackageRevisionField));
+ clusterInfoToSlime(deployment.clusterInfo(), object);
+ clusterUtilsToSlime(deployment.clusterUtils(), object);
}
-
+
+ private void clusterInfoToSlime(Map<ClusterSpec.Id, ClusterInfo> clusters, Cursor object) {
+ Cursor root = object.setObject(clusterInfoField);
+ for (Map.Entry<ClusterSpec.Id, ClusterInfo> entry : clusters.entrySet()) {
+ toSlime(entry.getValue(), root.setObject(entry.getKey().value()));
+ }
+ }
+
+ private void toSlime(ClusterInfo info, Cursor object) {
+ object.setString(clusterInfoFlavorField, info.getFlavor());
+ object.setLong(clusterInfoCostField, info.getCost());
+ object.setString(clusterInfoTypeField, info.getClusterType().name());
+ Cursor array = object.setArray(clusterInfoHostnamesField);
+ for (String host : info.getHostnames()) {
+ array.addString(host);
+ }
+ }
+
+ private void clusterUtilsToSlime(Map<ClusterSpec.Id, ClusterUtilization> clusters, Cursor object) {
+ Cursor root = object.setObject(clusterUtilsField);
+ for (Map.Entry<ClusterSpec.Id, ClusterUtilization> entry : clusters.entrySet()) {
+ toSlime(entry.getValue(), root.setObject(entry.getKey().value()));
+ }
+ }
+
+ private void toSlime(ClusterUtilization utils, Cursor object) {
+ object.setDouble(clusterUtilsCpuField, utils.getCpu());
+ object.setDouble(clusterUtilsMemField, utils.getMemory());
+ object.setDouble(clusterUtilsDiskField, utils.getDisk());
+ object.setDouble(clusterUtilsDiskBusyField, utils.getDiskBusy());
+ }
+
private void zoneToSlime(Zone zone, Cursor object) {
object.setString(environmentField, zone.environment().value());
object.setString(regionField, zone.region().value());
@@ -122,7 +174,9 @@ public class ApplicationSerializer {
}
private void toSlime(DeploymentJobs deploymentJobs, Cursor cursor) {
- deploymentJobs.projectId().ifPresent(projectId -> cursor.setLong(projectIdField, projectId));
+ deploymentJobs.projectId()
+ .filter(id -> id > 0) // TODO: Discards invalid data. Remove filter after October 2017
+ .ifPresent(projectId -> cursor.setLong(projectIdField, projectId));
jobStatusToSlime(deploymentJobs.jobStatus().values(), cursor.setArray(jobStatusField));
deploymentJobs.jiraIssueId().ifPresent(jiraIssueId -> cursor.setString(jiraIssueIdField, jiraIssueId));
}
@@ -190,9 +244,42 @@ public class ApplicationSerializer {
return new Deployment(zoneFromSlime(deploymentObject.field(zoneField)),
applicationRevisionFromSlime(deploymentObject.field(applicationPackageRevisionField)).get(),
Version.fromString(deploymentObject.field(versionField).asString()),
- Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()));
+ Instant.ofEpochMilli(deploymentObject.field(deployTimeField).asLong()),
+ clusterUtilsMapFromSlime(deploymentObject.field(clusterUtilsField)),
+ clusterInfoMapFromSlime(deploymentObject.field(clusterInfoField)));
}
-
+
+ private Map<ClusterSpec.Id, ClusterInfo> clusterInfoMapFromSlime(Inspector object) {
+ Map<ClusterSpec.Id, ClusterInfo> map = new HashMap<>();
+ object.traverse((String name, Inspector obect) -> map.put(new ClusterSpec.Id(name), clusterInfoFromSlime(obect)));
+ return map;
+ }
+
+ private Map<ClusterSpec.Id, ClusterUtilization> clusterUtilsMapFromSlime(Inspector object) {
+ Map<ClusterSpec.Id, ClusterUtilization> map = new HashMap<>();
+ object.traverse((String name, Inspector value) -> map.put(new ClusterSpec.Id(name), clusterUtililzationFromSlime(value)));
+ return map;
+ }
+
+ private ClusterUtilization clusterUtililzationFromSlime(Inspector object) {
+ double cpu = object.field(clusterUtilsCpuField).asDouble();
+ double mem = object.field(clusterUtilsMemField).asDouble();
+ double disk = object.field(clusterUtilsDiskField).asDouble();
+ double diskBusy = object.field(clusterUtilsDiskBusyField).asDouble();
+
+ return new ClusterUtilization(mem, cpu, disk, diskBusy);
+ }
+
+ private ClusterInfo clusterInfoFromSlime(Inspector inspector) {
+ String flavor = inspector.field(clusterInfoFlavorField).asString();
+ int cost = (int)inspector.field(clusterInfoCostField).asLong();
+ String type = inspector.field(clusterInfoTypeField).asString();
+
+ List<String> hostnames = new ArrayList<>();
+ inspector.field(clusterInfoHostnamesField).traverse((ArrayTraverser)(int index, Inspector value) -> hostnames.add(value.asString()));
+ return new ClusterInfo(flavor, cost, ClusterSpec.Type.from(type), hostnames);
+ }
+
private Zone zoneFromSlime(Inspector object) {
return new Zone(Environment.from(object.field(environmentField).asString()),
RegionName.from(object.field(regionField).asString()));
@@ -214,10 +301,10 @@ public class ApplicationSerializer {
}
private DeploymentJobs deploymentJobsFromSlime(Inspector object) {
- Optional<Long> projectId = optionalLong(object.field(projectIdField));
+ Optional<Long> projectId = optionalLong(object.field(projectIdField))
+ .filter(id -> id > 0); // TODO: Discards invalid data. Remove filter after October 2017
List<JobStatus> jobStatusList = jobStatusListFromSlime(object.field(jobStatusField));
Optional<String> jiraIssueKey = optionalString(object.field(jiraIssueIdField));
- boolean selfTriggering = object.field(selfTriggeringField).asBool();
return new DeploymentJobs(projectId, jobStatusList, jiraIssueKey);
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
index a83a24764ce..a70e31d9de8 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/persistence/CuratorDb.java
@@ -16,12 +16,14 @@ import com.yahoo.vespa.hosted.controller.api.identifiers.TenantId;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.zookeeper.ZooKeeperServer;
+import java.nio.ByteBuffer;
import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.ArrayDeque;
import java.util.Collections;
import java.util.Deque;
import java.util.HashSet;
+import java.util.List;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
@@ -140,6 +142,10 @@ public class CuratorDb {
return lock(root.append("locks").append("maintenanceJobLocks").append(jobName), Duration.ofSeconds(1));
}
+ public Lock lockProvisionState(String provisionStateId) {
+ return lock(lockPath(provisionStateId), Duration.ofMinutes(30));
+ }
+
// -------------- Read and write --------------------------------------------------
public Version readSystemVersion() {
@@ -192,6 +198,35 @@ public class CuratorDb {
transaction.commit();
}
+ public double readUpgradesPerMinute() {
+ Optional<byte[]> n = curator.getData(upgradePerMinutePath());
+ if (!n.isPresent() || n.get().length == 0) {
+ return 0.5; // Default if value has never been written
+ }
+ return ByteBuffer.wrap(n.get()).getDouble();
+ }
+
+ public void writeUpgradesPerMinute(double n) {
+ if (n < 0) {
+ throw new IllegalArgumentException("Upgrades per minute must be >= 0");
+ }
+ NestedTransaction transaction = new NestedTransaction();
+ curator.set(upgradePerMinutePath(), ByteBuffer.allocate(Double.BYTES).putDouble(n).array());
+ transaction.commit();
+ }
+
+ public Optional<byte[]> readProvisionState(String provisionId) {
+ return curator.getData(provisionStatePath().append(provisionId));
+ }
+
+ public void writeProvisionState(String provisionId, byte[] data) {
+ curator.set(provisionStatePath().append(provisionId), data);
+ }
+
+ public List<String> readProvisionStateIds() {
+ return curator.getChildren(provisionStatePath());
+ }
+
// -------------- Paths --------------------------------------------------
private Path systemVersionPath() {
@@ -214,6 +249,13 @@ public class CuratorDb {
return lockPath;
}
+ private Path lockPath(String provisionId) {
+ Path lockPath = root.append("locks")
+ .append(provisionStatePath());
+ curator.create(lockPath);
+ return lockPath;
+ }
+
private Path inactiveJobsPath() {
return root.append("inactiveJobs");
}
@@ -222,4 +264,15 @@ public class CuratorDb {
return root.append("jobQueues").append(jobType.name());
}
+ private Path upgradePerMinutePath() {
+ return root.append("upgrader").append("upgradesPerMinute");
+ }
+
+ private Path provisionStatePath() {
+ return root.append("provisioning").append("states");
+ }
+
+ private Path provisionStatePath(String provisionId) {
+ return provisionStatePath().append(provisionId);
+ }
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/RootHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/RootHandler.java
index 9283b1c3018..50c4efc2d27 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/RootHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/RootHandler.java
@@ -66,7 +66,6 @@ public class RootHandler extends LoggingRequestHandler {
jerseyService(services, "statuspage", "/statuspage/v1/", "/statuspage/application.wadl");
jerseyService(services, "zone", "/zone/v1/", "/zone/application.wadl");
jerseyService(services, "zone", "/zone/v2/", "/zone/application.wadl");
- jerseyService(services, "cost", "/cost/v1/", "/cost/application.wadl");
handlerService(services, "application", "/application/v4/");
handlerService(services, "deployment", "/deployment/v1/");
handlerService(services, "screwdriver", "/screwdriver/v1/release/vespa");
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
index 9d6aa7d3632..42e89e7893f 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiHandler.java
@@ -55,16 +55,17 @@ import com.yahoo.vespa.hosted.controller.api.integration.athens.NToken;
import com.yahoo.vespa.hosted.controller.api.integration.athens.ZmsException;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Log;
-import com.yahoo.vespa.hosted.controller.api.integration.cost.ApplicationCost;
-import com.yahoo.vespa.hosted.controller.api.integration.cost.CostJsonModelAdapter;
import com.yahoo.vespa.hosted.controller.api.integration.routing.RotationStatus;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.ApplicationRevision;
import com.yahoo.vespa.hosted.controller.application.Change;
+import com.yahoo.vespa.hosted.controller.application.ClusterCost;
+import com.yahoo.vespa.hosted.controller.application.ClusterUtilization;
import com.yahoo.vespa.hosted.controller.application.Deployment;
+import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
+import com.yahoo.vespa.hosted.controller.application.DeploymentCost;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
import com.yahoo.vespa.hosted.controller.application.SourceRevision;
-import com.yahoo.vespa.hosted.controller.common.NotFoundCheckedException;
import com.yahoo.vespa.hosted.controller.restapi.ErrorResponse;
import com.yahoo.vespa.hosted.controller.restapi.MessageResponse;
import com.yahoo.vespa.hosted.controller.restapi.Path;
@@ -97,7 +98,9 @@ import java.util.logging.Level;
* on hosted Vespa.
*
* @author bratseth
+ * @author mpolden
*/
+@SuppressWarnings("unused") // created by injection
public class ApplicationApiHandler extends LoggingRequestHandler {
private final Controller controller;
@@ -122,7 +125,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
case PUT: return handlePUT(request);
case POST: return handlePOST(request);
case DELETE: return handleDELETE(request);
- case OPTIONS: return handleOPTIONS(request);
+ case OPTIONS: return handleOPTIONS();
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
@@ -151,7 +154,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
if (path.matches("/application/v4/tenant")) return tenants(request);
if (path.matches("/application/v4/tenant-pipeline")) return tenantPipelines();
if (path.matches("/application/v4/athensDomain")) return athensDomains(request);
- if (path.matches("/application/v4/property")) return properties(request);
+ if (path.matches("/application/v4/property")) return properties();
if (path.matches("/application/v4/cookiefreshness")) return cookieFreshness(request);
if (path.matches("/application/v4/tenant/{tenant}")) return tenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application")) return applications(path.get("tenant"), request);
@@ -194,13 +197,14 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
Path path = new Path(request.getUri().getPath());
if (path.matches("/application/v4/tenant/{tenant}")) return deleteTenant(path.get("tenant"), request);
if (path.matches("/application/v4/tenant/{tenant}/application/{application}")) return deleteApplication(path.get("tenant"), path.get("application"), request);
+ if (path.matches("/application/v4/tenant/{tenant}/application/{application}/deploying")) return cancelDeploy(path.get("tenant"), path.get("application"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}")) return deactivate(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"));
if (path.matches("/application/v4/tenant/{tenant}/application/{application}/environment/{environment}/region/{region}/instance/{instance}/global-rotation/override"))
return setGlobalRotationOverride(path.get("tenant"), path.get("application"), path.get("instance"), path.get("environment"), path.get("region"), true, request);
return ErrorResponse.notFoundError("Nothing at " + path);
}
- private HttpResponse handleOPTIONS(HttpRequest request) {
+ private HttpResponse handleOPTIONS() {
// We implement this to avoid redirect loops on OPTIONS requests from browsers, but do not really bother
// spelling out the methods supported at each path, which we should
EmptyJsonResponse response = new EmptyJsonResponse();
@@ -209,8 +213,8 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
}
private HttpResponse root(HttpRequest request) {
- return new ResourceResponse(request,
- "user", "tenant", "tenant-pipeline", "athensDomain", "property", "cookiefreshness");
+ return new ResourceResponse(request, "user", "tenant", "tenant-pipeline", "athensDomain",
+ "property", "cookiefreshness");
}
private HttpResponse authenticatedUser(HttpRequest request) {
@@ -268,7 +272,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
return new SlimeJsonResponse(slime);
}
- private HttpResponse properties(HttpRequest request) {
+ private HttpResponse properties() {
Slime slime = new Slime();
Cursor response = slime.setObject();
Cursor array = response.setArray("properties");
@@ -322,9 +326,13 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
toSlime(((Change.ApplicationChange)application.deploying().get()).revision().get(), deployingObject.setObject("revision"));
}
- // Deployment jobs
+ // Jobs sorted according to deployment spec
+ Map<DeploymentJobs.JobType, JobStatus> jobStatus = controller.applications().deploymentTrigger()
+ .deploymentOrder()
+ .sortBy(application.deploymentSpec(), application.deploymentJobs().jobStatus());
+
Cursor deploymentsArray = response.setArray("deploymentJobs");
- for (JobStatus job : application.deploymentJobs().jobStatus().values()) {
+ for (JobStatus job : jobStatus.values()) {
Cursor jobObject = deploymentsArray.addObject();
jobObject.setString("type", job.type().id());
jobObject.setBool("success", job.isSuccess());
@@ -346,9 +354,12 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
for (URI rotation : rotations)
globalRotationsArray.addString(rotation.toString());
- // Deployments
+ // Deployments sorted according to deployment spec
+ Map<Zone, Deployment> deployments = controller.applications().deploymentTrigger()
+ .deploymentOrder()
+ .sortBy(application.deploymentSpec().zones(), application.deployments());
Cursor instancesArray = response.setArray("instances");
- for (Deployment deployment : application.deployments().values()) {
+ for (Deployment deployment : deployments.values()) {
Cursor deploymentObject = instancesArray.addObject();
deploymentObject.setString("environment", deployment.zone().environment().value());
deploymentObject.setString("region", deployment.zone().region().value());
@@ -416,19 +427,14 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
application.deploymentJobs().projectId().ifPresent(i -> response.setString("screwdriverId", String.valueOf(i)));
sourceRevisionToSlime(deployment.revision().source(), response);
- com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, instanceName);
- Zone zoneId = new Zone(Environment.from(environment), RegionName.from(region));
-
// Cost
- try {
- ApplicationCost appCost = controller.getApplicationCost(applicationId, zoneId);
- Cursor costObject = response.setObject("cost");
- CostJsonModelAdapter.toSlime(appCost, costObject);
- } catch (NotFoundCheckedException nfce) {
- log.log(Level.FINE, "Application cost data not found. " + nfce.getMessage());
- }
+ DeploymentCost appCost = deployment.calculateCost();
+ Cursor costObject = response.setObject("cost");
+ toSlime(appCost, costObject);
// Metrics
+ com.yahoo.config.provision.ApplicationId applicationId = com.yahoo.config.provision.ApplicationId.from(tenantName, applicationName, instanceName);
+ Zone zoneId = new Zone(Environment.from(environment), RegionName.from(region));
try {
MetricsService.DeploymentMetrics metrics = controller.metricsService().getDeploymentMetrics(applicationId, zoneId);
Cursor metricsObject = response.setObject("metrics");
@@ -679,7 +685,7 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
Application application = controller.applications().require(id);
if (application.deploying().isPresent())
throw new IllegalArgumentException("Can not start a deployment of " + application + " at this time: " +
- application.deploying() + " is in progress");
+ application.deploying().get() + " is in progress");
Version version = decideDeployVersion(request);
if ( ! systemHasVersion(version))
@@ -687,14 +693,26 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
"Version is not active in this system. " +
"Active versions: " + controller.versionStatus().versions());
- // Since we manually triggered it we don't want this to be self-triggering for the time being
- controller.applications().store(application.with(application.deploymentJobs().asSelfTriggering(false)), lock);
-
controller.applications().deploymentTrigger().triggerChange(application.id(), new Change.VersionChange(version));
return new MessageResponse("Triggered deployment of " + application + " on version " + version);
}
}
-
+
+ /** Cancel any ongoing change for given application */
+ private HttpResponse cancelDeploy(String tenantName, String applicationName) {
+ ApplicationId id = ApplicationId.from(tenantName, applicationName, "default");
+ try (Lock lock = controller.applications().lock(id)) {
+ Application application = controller.applications().require(id);
+ Optional<Change> change = application.deploying();
+ if (!change.isPresent()) {
+ return new MessageResponse("No deployment in progress for " + application + " at this time");
+ }
+ controller.applications().deploymentTrigger().cancelChange(id);
+ return new MessageResponse("Cancelled " + change.get() + " for " + application);
+ }
+ }
+
+ /** Schedule restart of deployment, or specific host in a deployment */
private HttpResponse restart(String tenantName, String applicationName, String instanceName, String environment, String region, HttpRequest request) {
DeploymentId deploymentId = new DeploymentId(ApplicationId.from(tenantName, applicationName, instanceName),
new Zone(Environment.from(environment), RegionName.from(region)));
@@ -1064,4 +1082,50 @@ public class ApplicationApiHandler extends LoggingRequestHandler {
return controller.systemVersion();
}
+ public static void toSlime(DeploymentCost deploymentCost, Cursor object) {
+ object.setLong("tco", (long)deploymentCost.getTco());
+ object.setDouble("utilization", deploymentCost.getUtilization());
+ object.setDouble("waste", deploymentCost.getWaste());
+ Cursor clustersObject = object.setObject("cluster");
+ for (Map.Entry<String, ClusterCost> clusterEntry : deploymentCost.getCluster().entrySet())
+ toSlime(clusterEntry.getValue(), clustersObject.setObject(clusterEntry.getKey()));
+ }
+
+ private static void toSlime(ClusterCost clusterCost, Cursor object) {
+ object.setLong("count", clusterCost.getClusterInfo().getHostnames().size());
+ object.setString("resource", getResourceName(clusterCost.getResultUtilization()));
+ object.setDouble("utilization", clusterCost.getResultUtilization().getMaxUtilization());
+ object.setLong("tco", (int)clusterCost.getTco());
+ object.setString("flavor", clusterCost.getClusterInfo().getFlavor());
+ object.setLong("waste", (int)clusterCost.getWaste());
+ object.setString("type", clusterCost.getClusterInfo().getClusterType().name());
+ Cursor utilObject = object.setObject("util");
+ utilObject.setDouble("cpu", clusterCost.getResultUtilization().getCpu());
+ utilObject.setDouble("mem", clusterCost.getResultUtilization().getMemory());
+ utilObject.setDouble("disk", clusterCost.getResultUtilization().getDisk());
+ utilObject.setDouble("diskBusy", clusterCost.getResultUtilization().getDiskBusy());
+ Cursor usageObject = object.setObject("usage");
+ usageObject.setDouble("cpu", clusterCost.getSystemUtilization().getCpu());
+ usageObject.setDouble("mem", clusterCost.getSystemUtilization().getMemory());
+ usageObject.setDouble("disk", clusterCost.getSystemUtilization().getDisk());
+ usageObject.setDouble("diskBusy", clusterCost.getSystemUtilization().getDiskBusy());
+ Cursor hostnamesArray = object.setArray("hostnames");
+ for (String hostname : clusterCost.getClusterInfo().getHostnames())
+ hostnamesArray.addString(hostname);
+ }
+
+ private static String getResourceName(ClusterUtilization utilization) {
+ String name = "cpu";
+ double max = utilization.getMaxUtilization();
+
+ if (utilization.getMemory() == max) {
+ name = "mem";
+ } else if (utilization.getDisk() == max) {
+ name = "disk";
+ } else if (utilization.getDiskBusy() == max) {
+ name = "diskbusy";
+ }
+
+ return name;
+ }
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiHandler.java
index e02a31440ce..03ac073a34a 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiHandler.java
@@ -5,6 +5,10 @@ import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.container.jdisc.LoggingRequestHandler;
import com.yahoo.container.logging.AccessLog;
+import com.yahoo.io.IOUtils;
+import com.yahoo.slime.Inspector;
+import com.yahoo.slime.Slime;
+import com.yahoo.vespa.config.SlimeUtils;
import com.yahoo.vespa.hosted.controller.maintenance.ControllerMaintenance;
import com.yahoo.vespa.hosted.controller.restapi.ErrorResponse;
import com.yahoo.vespa.hosted.controller.restapi.MessageResponse;
@@ -12,6 +16,9 @@ import com.yahoo.vespa.hosted.controller.restapi.Path;
import com.yahoo.vespa.hosted.controller.restapi.ResourceResponse;
import com.yahoo.yolean.Exceptions;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.UncheckedIOException;
import java.util.concurrent.Executor;
import java.util.logging.Level;
@@ -21,6 +28,7 @@ import java.util.logging.Level;
*
* @author bratseth
*/
+@SuppressWarnings("unused") // Created by injection
public class ControllerApiHandler extends LoggingRequestHandler {
private final ControllerMaintenance maintenance;
@@ -34,9 +42,10 @@ public class ControllerApiHandler extends LoggingRequestHandler {
public HttpResponse handle(HttpRequest request) {
try {
switch (request.getMethod()) {
- case GET: return handleGET(request);
- case POST: return handlePOST(request);
- case DELETE: return handleDELETE(request);
+ case GET: return get(request);
+ case POST: return post(request);
+ case DELETE: return delete(request);
+ case PATCH: return patch(request);
default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
}
}
@@ -49,27 +58,36 @@ public class ControllerApiHandler extends LoggingRequestHandler {
}
}
- private HttpResponse handleGET(HttpRequest request) {
+ private HttpResponse get(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/controller/v1/")) return root(request);
if (path.matches("/controller/v1/maintenance/")) return new JobsResponse(maintenance.jobControl());
- return ErrorResponse.notFoundError("Nothing at " + path);
+ if (path.matches("/controller/v1/jobs/upgrader")) return new UpgraderResponse(maintenance.upgrader().upgradesPerMinute());
+ return notFound(path);
}
- private HttpResponse handlePOST(HttpRequest request) {
+ private HttpResponse post(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/controller/v1/maintenance/inactive/{jobName}"))
return setActive(path.get("jobName"), false);
- return ErrorResponse.notFoundError("Nothing at " + path);
+ return notFound(path);
}
- private HttpResponse handleDELETE(HttpRequest request) {
+ private HttpResponse delete(HttpRequest request) {
Path path = new Path(request.getUri().getPath());
if (path.matches("/controller/v1/maintenance/inactive/{jobName}"))
return setActive(path.get("jobName"), true);
- return ErrorResponse.notFoundError("Nothing at " + path);
+ return notFound(path);
}
+ private HttpResponse patch(HttpRequest request) {
+ Path path = new Path(request.getUri().getPath());
+ if (path.matches("/controller/v1/jobs/upgrader")) return configureUpgrader(request);
+ return notFound(path);
+ }
+
+ private HttpResponse notFound(Path path) { return ErrorResponse.notFoundError("Nothing at " + path); }
+
private HttpResponse root(HttpRequest request) {
return new ResourceResponse(request, "maintenance");
}
@@ -81,4 +99,23 @@ public class ControllerApiHandler extends LoggingRequestHandler {
return new MessageResponse((active ? "Re-activated" : "Deactivated" ) + " job '" + jobName + "'");
}
+ private HttpResponse configureUpgrader(HttpRequest request) {
+ String upgradesPerMinuteField = "upgradesPerMinute";
+ Slime slime = toSlime(request.getData());
+ Inspector inspect = slime.get();
+ if (inspect.field(upgradesPerMinuteField).valid()) {
+ maintenance.upgrader().setUpgradesPerMinute(inspect.field(upgradesPerMinuteField).asDouble());
+ }
+ return new UpgraderResponse(maintenance.upgrader().upgradesPerMinute());
+ }
+
+ private Slime toSlime(InputStream jsonStream) {
+ try {
+ byte[] jsonBytes = IOUtils.readBytes(jsonStream, 1000 * 1000);
+ return SlimeUtils.jsonToSlime(jsonBytes);
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/UpgraderResponse.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/UpgraderResponse.java
new file mode 100644
index 00000000000..fe88a0f1f22
--- /dev/null
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/controller/UpgraderResponse.java
@@ -0,0 +1,35 @@
+package com.yahoo.vespa.hosted.controller.restapi.controller;
+
+import com.yahoo.container.jdisc.HttpResponse;
+import com.yahoo.slime.Cursor;
+import com.yahoo.slime.JsonFormat;
+import com.yahoo.slime.Slime;
+
+import java.io.IOException;
+import java.io.OutputStream;
+
+/**
+ * @author mpolden
+ */
+public class UpgraderResponse extends HttpResponse {
+
+ private final double upgradesPerMinute;
+
+ public UpgraderResponse(double upgradesPerMinute) {
+ super(200);
+ this.upgradesPerMinute = upgradesPerMinute;
+ }
+
+ @Override
+ public void render(OutputStream outputStream) throws IOException {
+ Slime slime = new Slime();
+ Cursor root = slime.setObject();
+ root.setDouble("upgradesPerMinute", upgradesPerMinute);
+ new JsonFormat(true).encode(outputStream, slime);
+ }
+
+ @Override
+ public String getContentType() {
+ return "application/json";
+ }
+}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/screwdriver/ScrewdriverApiHandler.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/screwdriver/ScrewdriverApiHandler.java
index c2695554da7..7ea82881014 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/screwdriver/ScrewdriverApiHandler.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/restapi/screwdriver/ScrewdriverApiHandler.java
@@ -12,12 +12,14 @@ import com.yahoo.slime.Cursor;
import com.yahoo.slime.Inspector;
import com.yahoo.slime.Slime;
import com.yahoo.vespa.config.SlimeUtils;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
import com.yahoo.vespa.hosted.controller.api.integration.BuildService.BuildJob;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobError;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobReport;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobType;
import com.yahoo.vespa.hosted.controller.restapi.ErrorResponse;
+import com.yahoo.vespa.hosted.controller.restapi.Path;
import com.yahoo.vespa.hosted.controller.restapi.SlimeJsonResponse;
import com.yahoo.vespa.hosted.controller.restapi.StringResponse;
import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
@@ -27,6 +29,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import java.util.Optional;
+import java.util.Scanner;
import java.util.concurrent.Executor;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -36,13 +39,14 @@ import java.util.logging.Logger;
* on completion.
*
* @author bratseth
+ * @author mpolden
*/
+@SuppressWarnings("unused") // Handler
public class ScrewdriverApiHandler extends LoggingRequestHandler {
private final static Logger log = Logger.getLogger(ScrewdriverApiHandler.class.getName());
private final Controller controller;
- // TODO: Remember to distinguish between PR jobs and component ones, by adding reports to the right jobs?
public ScrewdriverApiHandler(Executor executor, AccessLog accessLog, Controller controller) {
super(executor, accessLog);
@@ -51,24 +55,13 @@ public class ScrewdriverApiHandler extends LoggingRequestHandler {
@Override
public HttpResponse handle(HttpRequest request) {
+ Method method = request.getMethod();
try {
- Method method = request.getMethod();
- String path = request.getUri().getPath();
switch (method) {
- case GET: switch (path) {
- case "/screwdriver/v1/release/vespa": return vespaVersion();
- case "/screwdriver/v1/jobsToRun": return buildJobResponse(controller.applications().deploymentTrigger().buildSystem().jobs());
- default: return ErrorResponse.notFoundError(String.format( "No '%s' handler at '%s'", method, path));
- }
- case POST: switch (path) {
- case "/screwdriver/v1/jobreport": return handleJobReportPost(request);
- default: return ErrorResponse.notFoundError(String.format( "No '%s' handler at '%s'", method, path));
- }
- case DELETE: switch (path) {
- case "/screwdriver/v1/jobsToRun": return buildJobResponse(controller.applications().deploymentTrigger().buildSystem().takeJobsToRun());
- default: return ErrorResponse.notFoundError(String.format( "No '%s' handler at '%s'", method, path));
- }
- default: return ErrorResponse.methodNotAllowed("Method '" + request.getMethod() + "' is not supported");
+ case GET: return get(request);
+ case POST: return post(request);
+ case DELETE: return delete(request);
+ default: return ErrorResponse.methodNotAllowed("Method '" + method + "' is unsupported");
}
} catch (IllegalArgumentException|IllegalStateException e) {
return ErrorResponse.badRequest(Exceptions.toMessageString(e));
@@ -77,7 +70,57 @@ public class ScrewdriverApiHandler extends LoggingRequestHandler {
return ErrorResponse.internalServerError(Exceptions.toMessageString(e));
}
}
-
+
+ private HttpResponse get(HttpRequest request) {
+ Path path = new Path(request.getUri().getPath());
+ if (path.matches("/screwdriver/v1/release/vespa")) {
+ return vespaVersion();
+ }
+ if (path.matches("/screwdriver/v1/jobsToRun")) {
+ return buildJobs(controller.applications().deploymentTrigger().buildSystem().jobs());
+ }
+ return notFound(request);
+ }
+
+ private HttpResponse post(HttpRequest request) {
+ Path path = new Path(request.getUri().getPath());
+ if (path.matches("/screwdriver/v1/jobreport")) {
+ return notifyJobCompletion(request);
+ }
+ if (path.matches("/screwdriver/v1/trigger/tenant/{tenant}/application/{application}")) {
+ return trigger(request, path.get("tenant"), path.get("application"));
+ }
+ return notFound(request);
+ }
+
+ private HttpResponse delete(HttpRequest request) {
+ Path path = new Path(request.getUri().getPath());
+ if (path.matches("/screwdriver/v1/jobsToRun")) {
+ return buildJobs(controller.applications().deploymentTrigger().buildSystem().takeJobsToRun());
+ }
+ return notFound(request);
+ }
+
+ private HttpResponse trigger(HttpRequest request, String tenantName, String applicationName) {
+ ApplicationId applicationId = ApplicationId.from(tenantName, applicationName, "default");
+ Optional<Application> application = controller.applications().get(applicationId);
+ if (!application.isPresent()) {
+ return ErrorResponse.notFoundError("No such application '" + applicationId.toShortString() + "'");
+ }
+ JobType jobType = Optional.of(asString(request.getData()))
+ .filter(s -> !s.isEmpty())
+ .map(JobType::fromId)
+ .orElse(JobType.component);
+ // Since this is a manual operation we likely want it to trigger as soon as possible so we add it at to the
+ // front of the queue
+ controller.applications().deploymentTrigger().buildSystem().addJob(application.get().id(), jobType, true);
+
+ Slime slime = new Slime();
+ Cursor cursor = slime.setObject();
+ cursor.setString("message", "Triggered " + jobType.id() + " for " + application.get().id());
+ return new SlimeJsonResponse(slime);
+ }
+
private HttpResponse vespaVersion() {
VespaVersion version = controller.versionStatus().version(controller.systemVersion());
if (version == null)
@@ -92,7 +135,7 @@ public class ScrewdriverApiHandler extends LoggingRequestHandler {
}
- private HttpResponse buildJobResponse(List<BuildJob> buildJobs) {
+ private HttpResponse buildJobs(List<BuildJob> buildJobs) {
Slime slime = new Slime();
Cursor buildJobArray = slime.setArray();
for (BuildJob buildJob : buildJobs) {
@@ -103,24 +146,7 @@ public class ScrewdriverApiHandler extends LoggingRequestHandler {
return new SlimeJsonResponse(slime);
}
- /**
- * Parse a JSON blob of the form:
- * {
- * "tenant" : String
- * "application" : String
- * "instance" : String
- * "jobName" : String
- * "projectId" : long
- * "success" : boolean
- * "selfTriggering": boolean
- * "vespaVersion" : String
- * }
- * and notify the controller of the report.
- *
- * @param request The JSON blob.
- * @return 200
- */
- private HttpResponse handleJobReportPost(HttpRequest request) {
+ private HttpResponse notifyJobCompletion(HttpRequest request) {
controller.applications().notifyJobCompletion(toJobReport(toSlime(request.getData()).get()));
return new StringResponse("ok");
}
@@ -151,4 +177,17 @@ public class ScrewdriverApiHandler extends LoggingRequestHandler {
);
}
+ private static String asString(InputStream in) {
+ Scanner scanner = new Scanner(in).useDelimiter("\\A");
+ if (scanner.hasNext()) {
+ return scanner.next();
+ }
+ return "";
+ }
+
+ private static HttpResponse notFound(HttpRequest request) {
+ return ErrorResponse.notFoundError(String.format("No '%s' handler at '%s'", request.getMethod(),
+ request.getUri().getPath()));
+ }
+
}
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java
index 948301929cf..c3f3773cde6 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VersionStatus.java
@@ -8,6 +8,7 @@ import com.yahoo.component.Vtag;
import com.yahoo.vespa.hosted.controller.api.integration.github.GitSha;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.Controller;
+import com.yahoo.vespa.hosted.controller.application.ApplicationList;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
@@ -132,7 +133,7 @@ public class VersionStatus {
versionMap.put(infrastructureVersion, DeploymentStatistics.empty(infrastructureVersion));
}
- for (Application application : applications) {
+ for (Application application : ApplicationList.from(applications).notPullRequest().asList()) {
DeploymentJobs jobs = application.deploymentJobs();
// Note that each version deployed on this application exists
diff --git a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VespaVersion.java b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VespaVersion.java
index ce5533bd0bc..3208f4d09c6 100644
--- a/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VespaVersion.java
+++ b/controller-server/src/main/java/com/yahoo/vespa/hosted/controller/versions/VespaVersion.java
@@ -49,8 +49,8 @@ public class VespaVersion implements Comparable<VespaVersion> {
.notFailing();
ApplicationList failingOnThis = ApplicationList.from(statistics.failing(), controller.applications());
ApplicationList all = ApplicationList.from(controller.applications().asList())
- .hasDeployment()
- .notPullRequest();
+ .hasDeployment()
+ .notPullRequest();
// 'broken' if any Canary fails
if ( ! failingOnThis.with(UpgradePolicy.canary).isEmpty())
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ConfigServerClientMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ConfigServerClientMock.java
index 6018c99206e..9db852374b8 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ConfigServerClientMock.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ConfigServerClientMock.java
@@ -9,6 +9,7 @@ import com.yahoo.component.Version;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerClient;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.Log;
+import com.yahoo.vespa.hosted.controller.api.integration.configserver.NodeList;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.PrepareResponse;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeployOptions;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.EndpointStatus;
@@ -21,6 +22,7 @@ import com.yahoo.vespa.serviceview.bindings.ApplicationView;
import com.yahoo.vespa.serviceview.bindings.ClusterView;
import com.yahoo.vespa.serviceview.bindings.ServiceView;
+import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
@@ -51,8 +53,17 @@ public class ConfigServerClientMock extends AbstractComponent implements ConfigS
/** The exception to throw on the next prepare run, or null to continue normally */
private RuntimeException prepareException = null;
- /** The version given in the previous prepare call, or null if no call has been made */
- public Optional<Version> lastPrepareVersion = null;
+ private Optional<Version> lastPrepareVersion = Optional.empty();
+
+ /** The version given in the previous prepare call, or empty if no call has been made */
+ public Optional<Version> lastPrepareVersion() {
+ return lastPrepareVersion;
+ }
+
+ /** Return map of applications that may have been activated */
+ public Map<ApplicationId, Boolean> activated() {
+ return Collections.unmodifiableMap(applicationActivated);
+ }
@Override
public PreparedApplication prepare(DeploymentId deployment, DeployOptions deployOptions, Set<String> rotationCnames, Set<Rotation> rotations, byte[] content) {
@@ -201,4 +212,29 @@ public class ConfigServerClientMock extends AbstractComponent implements ConfigS
? endpoints.get(endpoint)
: result;
}
+
+ @Override
+ public NodeList getNodeList(DeploymentId deployment) throws IOException {
+ NodeList list = new NodeList();
+ list.nodes = new ArrayList<>();
+ NodeList.Node hostA = new NodeList.Node();
+ hostA.hostname = "hostA";
+ hostA.cost = 10;
+ hostA.flavor = "C-2B/24/500";
+ hostA.membership = new NodeList.Node.Membership();
+ hostA.membership.clusterId = "clusterA";
+ hostA.membership.clusterType = "container";
+ list.nodes.add(hostA);
+
+ NodeList.Node hostB = new NodeList.Node();
+ hostB.hostname = "hostB";
+ hostB.cost = 20;
+ hostB.flavor = "C-2C/24/500";
+ hostB.membership = new NodeList.Node.Membership();
+ hostB.membership.clusterId = "clusterB";
+ hostB.membership.clusterType = "content";
+ list.nodes.add(hostB);
+
+ return list;
+ }
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
index 7a6dce9f8fa..6fc787d940e 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTest.java
@@ -17,16 +17,10 @@ import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.controller.api.Tenant;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.DeployOptions;
import com.yahoo.vespa.hosted.controller.api.application.v4.model.EndpointStatus;
-import com.yahoo.vespa.hosted.controller.api.application.v4.model.GitRevision;
-import com.yahoo.vespa.hosted.controller.api.application.v4.model.ScrewdriverBuildJob;
import com.yahoo.vespa.hosted.controller.api.identifiers.AthensDomain;
import com.yahoo.vespa.hosted.controller.api.identifiers.DeploymentId;
-import com.yahoo.vespa.hosted.controller.api.identifiers.GitBranch;
-import com.yahoo.vespa.hosted.controller.api.identifiers.GitCommit;
-import com.yahoo.vespa.hosted.controller.api.identifiers.GitRepository;
import com.yahoo.vespa.hosted.controller.api.identifiers.Property;
import com.yahoo.vespa.hosted.controller.api.identifiers.PropertyId;
-import com.yahoo.vespa.hosted.controller.api.identifiers.ScrewdriverId;
import com.yahoo.vespa.hosted.controller.api.identifiers.TenantId;
import com.yahoo.vespa.hosted.controller.api.identifiers.UserGroup;
import com.yahoo.vespa.hosted.controller.api.integration.BuildService.BuildJob;
@@ -207,7 +201,6 @@ public class ControllerTest {
Version systemVersion = tester.controller().versionStatus().systemVersion().get().versionNumber();
Application app1 = tester.createApplication("application1", "tenant1", 1, 1L);
- applications.store(app1.with(app1.deploymentJobs().asSelfTriggering(false)), applications.lock(app1.id()));
// First deployment: An application change
applications.notifyJobCompletion(mockReport(app1, component, true));
@@ -217,7 +210,7 @@ public class ControllerTest {
app1 = applications.require(app1.id());
assertEquals("First deployment gets system version", systemVersion, app1.deployedVersion().get());
- assertEquals(systemVersion, tester.configServerClientMock().lastPrepareVersion.get());
+ assertEquals(systemVersion, tester.configServer().lastPrepareVersion().get());
// Unexpected deployment
tester.deploy(productionUsWest1, app1, applicationPackage);
@@ -240,18 +233,14 @@ public class ControllerTest {
app1 = applications.require(app1.id());
assertEquals("Application change preserves version", systemVersion, app1.deployedVersion().get());
- assertEquals(systemVersion, tester.configServerClientMock().lastPrepareVersion.get());
+ assertEquals(systemVersion, tester.configServer().lastPrepareVersion().get());
// A deployment to the new region gets the same version
- applicationPackage = new ApplicationPackageBuilder()
- .environment(Environment.prod)
- .region("us-west-1")
- .region("us-east-3")
- .build();
tester.deployAndNotify(app1, applicationPackage, true, productionUsEast3);
app1 = applications.require(app1.id());
assertEquals("Application change preserves version", systemVersion, app1.deployedVersion().get());
- assertEquals(systemVersion, tester.configServerClientMock().lastPrepareVersion.get());
+ assertEquals(systemVersion, tester.configServer().lastPrepareVersion().get());
+ assertFalse("Change deployed", app1.deploying().isPresent());
// Version upgrade changes system version
Change.VersionChange change = new Change.VersionChange(newSystemVersion);
@@ -263,7 +252,7 @@ public class ControllerTest {
app1 = applications.require(app1.id());
assertEquals("Version upgrade changes version", newSystemVersion, app1.deployedVersion().get());
- assertEquals(newSystemVersion, tester.configServerClientMock().lastPrepareVersion.get());
+ assertEquals(newSystemVersion, tester.configServer().lastPrepareVersion().get());
}
/** Adds a new version, higher than the current system version, makes it the system version and returns it */
@@ -407,64 +396,76 @@ public class ControllerTest {
public void requeueOutOfCapacityStagingJob() {
DeploymentTester tester = new DeploymentTester();
- long fooProjectId = 1;
- long barProjectId = 2;
- Application foo = tester.createApplication("app1", "foo", fooProjectId, 1L);
- Application bar = tester.createApplication("app2", "bar", barProjectId, 1L);
+ long project1 = 1;
+ long project2 = 2;
+ long project3 = 3;
+ Application app1 = tester.createApplication("app1", "tenant1", project1, 1L);
+ Application app2 = tester.createApplication("app2", "tenant2", project2, 1L);
+ Application app3 = tester.createApplication("app3", "tenant3", project3, 1L);
BuildSystem buildSystem = tester.controller().applications().deploymentTrigger().buildSystem();
- // foo: passes system test
- tester.notifyJobCompletion(component, foo, true);
- tester.deployAndNotify(foo, applicationPackage, true, systemTest);
+ // all applications: system-test completes successfully
+ tester.notifyJobCompletion(component, app1, true);
+ tester.deployAndNotify(app1, applicationPackage, true, systemTest);
- // bar: passes system test
- tester.notifyJobCompletion(component, bar, true);
- tester.deployAndNotify(bar, applicationPackage, true, systemTest);
+ tester.notifyJobCompletion(component, app2, true);
+ tester.deployAndNotify(app2, applicationPackage, true, systemTest);
- // foo and bar: staging test jobs queued
- assertEquals(2, buildSystem.jobs().size());
+ tester.notifyJobCompletion(component, app3, true);
+ tester.deployAndNotify(app3, applicationPackage, true, systemTest);
- // foo: staging-test job fails with out of capacity and is added to the front of the queue
- {
- tester.deploy(stagingTest, foo, applicationPackage);
- tester.notifyJobCompletion(stagingTest, foo, Optional.of(JobError.outOfCapacity));
- List<BuildJob> nextJobs = buildSystem.takeJobsToRun();
- assertEquals("staging-test jobs are returned one at a time",1, nextJobs.size());
- assertEquals(stagingTest.id(), nextJobs.get(0).jobName());
- assertEquals(fooProjectId, nextJobs.get(0).projectId());
- }
+ // all applications: staging test jobs queued
+ assertEquals(3, buildSystem.jobs().size());
- // bar: Completes deployment
- tester.deployAndNotify(bar, applicationPackage, true, stagingTest);
- tester.deployAndNotify(bar, applicationPackage, true, productionCorpUsEast1);
+ // app1: staging-test job fails with out of capacity and is added to the front of the queue
+ tester.deploy(stagingTest, app1, applicationPackage);
+ tester.notifyJobCompletion(stagingTest, app1, Optional.of(JobError.outOfCapacity));
+ assertEquals(stagingTest.id(), buildSystem.jobs().get(0).jobName());
+ assertEquals(project1, buildSystem.jobs().get(0).projectId());
- // foo: 15 minutes pass, staging-test job is still failing due out of capacity, but is no longer re-queued by
+ // app2 and app3: Completes deployment
+ tester.deployAndNotify(app2, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(app2, applicationPackage, true, productionCorpUsEast1);
+ tester.deployAndNotify(app3, applicationPackage, true, stagingTest);
+ tester.deployAndNotify(app3, applicationPackage, true, productionCorpUsEast1);
+
+ // app1: 15 minutes pass, staging-test job is still failing due out of capacity, but is no longer re-queued by
// out of capacity retry mechanism
tester.clock().advance(Duration.ofMinutes(15));
- tester.notifyJobCompletion(component, foo, true);
- tester.deployAndNotify(foo, applicationPackage, true, systemTest);
- tester.deploy(stagingTest, foo, applicationPackage);
+ tester.notifyJobCompletion(component, app1, true);
+ tester.deployAndNotify(app1, applicationPackage, true, systemTest);
+ tester.deploy(stagingTest, app1, applicationPackage);
assertEquals(1, buildSystem.takeJobsToRun().size());
- tester.notifyJobCompletion(stagingTest, foo, Optional.of(JobError.outOfCapacity));
+ tester.notifyJobCompletion(stagingTest, app1, Optional.of(JobError.outOfCapacity));
assertTrue("No jobs queued", buildSystem.jobs().isEmpty());
- // bar: New change triggers another staging-test job
- tester.notifyJobCompletion(component, bar, true);
- tester.deployAndNotify(bar, applicationPackage, true, systemTest);
- assertEquals(1, buildSystem.jobs().size());
+ // app2 and app3: New change triggers staging-test jobs
+ tester.notifyJobCompletion(component, app2, true);
+ tester.deployAndNotify(app2, applicationPackage, true, systemTest);
+
+ tester.notifyJobCompletion(component, app3, true);
+ tester.deployAndNotify(app3, applicationPackage, true, systemTest);
+
+ assertEquals(2, buildSystem.jobs().size());
- // foo: 4 hours pass in total, staging-test job is re-queued by periodic trigger mechanism and added at the
+ // app1: 4 hours pass in total, staging-test job is re-queued by periodic trigger mechanism and added at the
// back of the queue
tester.clock().advance(Duration.ofHours(3));
tester.clock().advance(Duration.ofMinutes(50));
tester.failureRedeployer().maintain();
List<BuildJob> nextJobs = buildSystem.takeJobsToRun();
+ assertEquals(2, nextJobs.size());
assertEquals(stagingTest.id(), nextJobs.get(0).jobName());
- assertEquals(barProjectId, nextJobs.get(0).projectId());
+ assertEquals(project2, nextJobs.get(0).projectId());
+ assertEquals(stagingTest.id(), nextJobs.get(1).jobName());
+ assertEquals(project3, nextJobs.get(1).projectId());
+
+ // And finally the requeued job for app1
nextJobs = buildSystem.takeJobsToRun();
+ assertEquals(1, nextJobs.size());
assertEquals(stagingTest.id(), nextJobs.get(0).jobName());
- assertEquals(fooProjectId, nextJobs.get(0).projectId());
+ assertEquals(project1, nextJobs.get(0).projectId());
}
private void assertStatus(JobStatus expectedStatus, ApplicationId id, Controller controller) {
@@ -517,41 +518,31 @@ public class ControllerTest {
@Test
public void testDeployUntestedChangeFails() {
ControllerTester tester = new ControllerTester();
- ApplicationController applications = tester.controller().applications();TenantId tenant = tester.createTenant("tenant1", "domain1", 11L);
+ ApplicationController applications = tester.controller().applications();
+ TenantId tenant = tester.createTenant("tenant1", "domain1", 11L);
Application app = tester.createApplication(tenant, "app1", "default", 1);
- app = app.withDeploying(Optional.of(new Change.VersionChange(Version.fromString("6.3"))))
- .with(app.deploymentJobs().asSelfTriggering(false));
+ app = app.withDeploying(Optional.of(new Change.VersionChange(Version.fromString("6.3"))));
applications.store(app, applications.lock(app.id()));
try {
tester.deploy(app, new Zone(Environment.prod, RegionName.from("us-east-3")));
fail("Expected exception");
} catch (IllegalArgumentException e) {
- assertEquals("Rejecting deployment of application 'tenant1.app1' to zone prod.us-east-3 as pending version change to 6.3 is untested", e.getMessage());
+ assertEquals("Rejecting deployment of application 'tenant1.app1' to zone prod.us-east-3 as version change to 6.3 is not tested", e.getMessage());
}
}
- private void legacyDeploy(Controller controller, Application application, ApplicationPackage applicationPackage, Zone zone, Optional<Version> version, boolean deployCurrentVersion) {
- ScrewdriverId app1ScrewdriverId = new ScrewdriverId(String.valueOf(application.deploymentJobs().projectId().get()));
- GitRevision app1RevisionId = new GitRevision(new GitRepository("repo"), new GitBranch("master"), new GitCommit("commit1"));
- controller.applications().deployApplication(application.id(),
- zone,
- applicationPackage,
- new DeployOptions(Optional.of(new ScrewdriverBuildJob(app1ScrewdriverId, app1RevisionId)), version, false, deployCurrentVersion));
-
- }
-
@Test
public void testCleanupOfStaleDeploymentData() throws IOException {
DeploymentTester tester = new DeploymentTester();
- tester.controllerTester().getZoneRegistryMock().setSystem(SystemName.cd);
+ tester.controllerTester().zoneRegistry().setSystem(SystemName.cd);
Supplier<Map<JobType, JobStatus>> statuses = () ->
tester.application(ApplicationId.from("vespa", "canary", "default")).deploymentJobs().jobStatus();
// Current system version, matches version in test data
Version version = Version.fromString("6.141.117");
- tester.configServerClientMock().setDefaultConfigServerVersion(version);
+ tester.configServer().setDefaultConfigServerVersion(version);
tester.updateVersionStatus(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
@@ -581,7 +572,7 @@ public class ControllerTest {
// New version is released
version = Version.fromString("6.142.1");
- tester.configServerClientMock().setDefaultConfigServerVersion(version);
+ tester.configServer().setDefaultConfigServerVersion(version);
tester.updateVersionStatus(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
@@ -623,4 +614,31 @@ public class ControllerTest {
assertEquals("fake-global-rotation-tenant1.app1", record.get().value());
}
+ @Test
+ public void testDeployWithoutProjectId() {
+ DeploymentTester tester = new DeploymentTester();
+ tester.controllerTester().zoneRegistry().setSystem(SystemName.cd);
+ ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
+ .environment(Environment.prod)
+ .region("cd-us-central-1")
+ .build();
+
+ // Create application
+ Application app = tester.createApplication("app1", "tenant1", 1, 2L);
+
+ // Direct deploy is allowed when project ID is missing
+ Zone zone = new Zone(Environment.prod, RegionName.from("cd-us-central-1"));
+ // Same options as used in our integration tests
+ DeployOptions options = new DeployOptions(Optional.empty(), Optional.empty(), false,
+ false);
+ tester.controller().applications().deployApplication(app.id(), zone, applicationPackage, options);
+
+ assertTrue("Application deployed and activated",
+ tester.controllerTester().configServer().activated().getOrDefault(app.id(), false));
+
+ assertTrue("No job status added",
+ tester.applications().require(app.id()).deploymentJobs().jobStatus().isEmpty());
+
+ }
+
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
index 70e370da502..5184cde79c2 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/ControllerTester.java
@@ -30,8 +30,6 @@ import com.yahoo.vespa.hosted.controller.api.integration.github.GitHubMock;
import com.yahoo.vespa.hosted.controller.api.integration.jira.JiraMock;
import com.yahoo.vespa.hosted.controller.api.integration.routing.MemoryGlobalRoutingService;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
-import com.yahoo.vespa.hosted.controller.cost.CostMock;
-import com.yahoo.vespa.hosted.controller.cost.MockInsightBackend;
import com.yahoo.vespa.hosted.controller.integration.MockMetricsService;
import com.yahoo.vespa.hosted.controller.persistence.ControllerDb;
import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
@@ -48,64 +46,68 @@ import static org.junit.Assert.assertTrue;
/**
* Convenience methods for controller tests.
- * This completely wraps TestEnvironment to make it easier to get rid of that in the future.
- *
+ *
* @author bratseth
+ * @author mpolden
*/
public final class ControllerTester {
- private final ControllerDb db = new MemoryControllerDb();
- private final AthensDbMock athensDb = new AthensDbMock();
- private final ManualClock clock = new ManualClock();
- private final ConfigServerClientMock configServerClientMock = new ConfigServerClientMock();
- private final ZoneRegistryMock zoneRegistryMock = new ZoneRegistryMock();
- private final GitHubMock gitHubMock = new GitHubMock();
- private final CuratorDb curator = new MockCuratorDb();
- private final MemoryNameService memoryNameService = new MemoryNameService();
- private Controller controller = createController(db, curator, configServerClientMock, clock, gitHubMock,
- zoneRegistryMock, athensDb, memoryNameService);
-
- private static final Controller createController(ControllerDb db, CuratorDb curator,
- ConfigServerClientMock configServerClientMock, ManualClock clock,
- GitHubMock gitHubClientMock, ZoneRegistryMock zoneRegistryMock,
- AthensDbMock athensDb, MemoryNameService nameService) {
- Controller controller = new Controller(db,
- curator,
- new MemoryRotationRepository(),
- gitHubClientMock,
- new JiraMock(),
- new MemoryEntityService(),
- new MemoryGlobalRoutingService(),
- zoneRegistryMock,
- new CostMock(new MockInsightBackend()),
- configServerClientMock,
- new MockMetricsService(),
- nameService,
- new MockRoutingGenerator(),
- new ChefMock(),
- clock,
- new AthensMock(athensDb));
- controller.updateVersionStatus(VersionStatus.compute(controller));
- return controller;
+ private final ControllerDb db;
+ private final AthensDbMock athensDb;
+ private final ManualClock clock;
+ private final ConfigServerClientMock configServer;
+ private final ZoneRegistryMock zoneRegistry;
+ private final GitHubMock gitHub;
+ private final CuratorDb curator;
+ private final MemoryNameService nameService;
+
+ private Controller controller;
+
+ public ControllerTester() {
+ this(new MemoryControllerDb(), new AthensDbMock(), new ManualClock(), new ConfigServerClientMock(),
+ new ZoneRegistryMock(), new GitHubMock(), new MockCuratorDb(), new MemoryNameService());
+ }
+
+ public ControllerTester(ManualClock clock) {
+ this(new MemoryControllerDb(), new AthensDbMock(), clock, new ConfigServerClientMock(),
+ new ZoneRegistryMock(), new GitHubMock(), new MockCuratorDb(), new MemoryNameService());
+ }
+
+ private ControllerTester(ControllerDb db, AthensDbMock athensDb, ManualClock clock,
+ ConfigServerClientMock configServer, ZoneRegistryMock zoneRegistry,
+ GitHubMock gitHub, CuratorDb curator, MemoryNameService nameService) {
+ this.db = db;
+ this.athensDb = athensDb;
+ this.clock = clock;
+ this.configServer = configServer;
+ this.zoneRegistry = zoneRegistry;
+ this.gitHub = gitHub;
+ this.curator = curator;
+ this.nameService = nameService;
+ this.controller = createController(db, curator, configServer, clock, gitHub, zoneRegistry,
+ athensDb, nameService);
}
public Controller controller() { return controller; }
+
public CuratorDb curator() { return curator; }
+
public ManualClock clock() { return clock; }
+
public AthensDbMock athensDb() { return athensDb; }
- public MemoryNameService nameService() { return memoryNameService; }
- /** Create a new controller instance. Useful to verify that controller state is rebuilt from persistence */
- public final void createNewController() {
- controller = createController(db, curator, configServerClientMock, clock, gitHubMock, zoneRegistryMock,
- athensDb, memoryNameService);
- }
+ public MemoryNameService nameService() { return nameService; }
- public ZoneRegistryMock getZoneRegistryMock() { return zoneRegistryMock; }
+ public ZoneRegistryMock zoneRegistry() { return zoneRegistry; }
- public ConfigServerClientMock configServerClientMock() { return configServerClientMock; }
+ public ConfigServerClientMock configServer() { return configServer; }
- public GitHubMock gitHubClientMock () { return gitHubMock; }
+ public GitHubMock gitHub() { return gitHub; }
+
+ /** Create a new controller instance. Useful to verify that controller state is rebuilt from persistence */
+ public final void createNewController() {
+ controller = createController(db, curator, configServer, clock, gitHub, zoneRegistry, athensDb, nameService);
+ }
/** Creates the given tenant and application and deploys it */
public Application createAndDeploy(String tenantName, String domainName, String applicationName, Environment environment, long projectId, Long propertyId) {
@@ -113,7 +115,7 @@ public final class ControllerTester {
}
/** Creates the given tenant and application and deploys it */
- public Application createAndDeploy(String tenantName, String domainName, String applicationName,
+ public Application createAndDeploy(String tenantName, String domainName, String applicationName,
String instanceName, Zone zone, long projectId, Long propertyId) {
TenantId tenant = createTenant(tenantName, domainName, propertyId);
Application application = createApplication(tenant, applicationName, instanceName, projectId);
@@ -136,7 +138,7 @@ public final class ControllerTester {
public Application createAndDeploy(String tenantName, String domainName, String applicationName, Environment environment, long projectId) {
return createAndDeploy(tenantName, domainName, applicationName, environment, projectId, null);
}
-
+
public Zone toZone(Environment environment) {
switch (environment) {
case dev: case test: return new Zone(environment, RegionName.from("us-east-1"));
@@ -150,7 +152,7 @@ public final class ControllerTester {
athensDb.addDomain(new AthensDbMock.Domain(domain));
return domain;
}
-
+
public TenantId createTenant(String tenantName, String domainName, Long propertyId) {
TenantId id = new TenantId(tenantName);
Optional<Tenant> existing = controller().tenants().tenant(id);
@@ -162,7 +164,7 @@ public final class ControllerTester {
assertNotNull(controller().tenants().tenant(id));
return id;
}
-
+
public Application createApplication(TenantId tenant, String applicationName, String instanceName, long projectId) {
ApplicationId applicationId = applicationId(tenant.id(), applicationName, instanceName);
Application application = controller().applications().createApplication(applicationId, Optional.of(TestIdentities.userNToken))
@@ -194,4 +196,27 @@ public final class ControllerTester {
InstanceName.from(instance));
}
+ private static Controller createController(ControllerDb db, CuratorDb curator,
+ ConfigServerClientMock configServerClientMock, ManualClock clock,
+ GitHubMock gitHubClientMock, ZoneRegistryMock zoneRegistryMock,
+ AthensDbMock athensDb, MemoryNameService nameService) {
+ Controller controller = new Controller(db,
+ curator,
+ new MemoryRotationRepository(),
+ gitHubClientMock,
+ new JiraMock(),
+ new MemoryEntityService(),
+ new MemoryGlobalRoutingService(),
+ zoneRegistryMock,
+ configServerClientMock,
+ new MockMetricsService(),
+ nameService,
+ new MockRoutingGenerator(),
+ new ChefMock(),
+ clock,
+ new AthensMock(athensDb));
+ controller.updateVersionStatus(VersionStatus.compute(controller));
+ return controller;
+ }
+
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/ClusterUtilizationTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/ClusterUtilizationTest.java
new file mode 100644
index 00000000000..ca67e605682
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/application/ClusterUtilizationTest.java
@@ -0,0 +1,29 @@
+package com.yahoo.vespa.hosted.controller.application;
+
+import org.junit.Assert;
+import org.junit.Test;
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+/**
+ * @author smorgrav
+ */
+public class ClusterUtilizationTest {
+
+ private static final double delta = Double.MIN_NORMAL;
+
+ @Test
+ public void getMaxUtilization() throws Exception {
+ ClusterUtilization resources = new ClusterUtilization(0.3, 0.1, 0.4, 0.5);
+ Assert.assertEquals(0.5, resources.getMaxUtilization(), delta);
+
+ resources = new ClusterUtilization(0.3, 0.1, 0.4, 0.0);
+ Assert.assertEquals(0.4, resources.getMaxUtilization(), delta);
+
+ resources = new ClusterUtilization(0.4, 0.3, 0.3, 0.0);
+ Assert.assertEquals(0.4, resources.getMaxUtilization(), delta);
+
+ resources = new ClusterUtilization(0.1, 0.3, 0.3, 0.0);
+ Assert.assertEquals(0.3, resources.getMaxUtilization(), delta);
+ }
+
+} \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/cost/CostMock.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/cost/CostMock.java
deleted file mode 100644
index 0a5ddfb5efc..00000000000
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/cost/CostMock.java
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.cost;
-
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.Environment;
-import com.yahoo.config.provision.RegionName;
-import com.yahoo.vespa.hosted.controller.api.integration.cost.ApplicationCost;
-import com.yahoo.vespa.hosted.controller.api.integration.cost.Backend;
-import com.yahoo.vespa.hosted.controller.api.integration.cost.Cost;
-import com.yahoo.vespa.hosted.controller.common.NotFoundCheckedException;
-
-import java.util.List;
-
-/**
- * @author mpolden
- */
-public class CostMock implements Cost {
-
- private final Backend backend;
-
- public CostMock(Backend backend) {
- this.backend = backend;
- }
-
- @Override
- public List<ApplicationCost> getCPUAnalysis(int nofApplications) {
- return null;
- }
-
- @Override
- public String getCsvForLocalAnalysis() {
- return null;
- }
-
- @Override
- public List<ApplicationCost> getApplicationCost() {
- return backend.getApplicationCost();
- }
-
- @Override
- public ApplicationCost getApplicationCost(Environment env, RegionName region, ApplicationId app) throws NotFoundCheckedException {
- return backend.getApplicationCost(env, region, app);
- }
-}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/cost/MockInsightBackend.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/cost/MockInsightBackend.java
deleted file mode 100644
index c4ba5fa4fc5..00000000000
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/cost/MockInsightBackend.java
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.controller.cost;
-
-import com.yahoo.component.AbstractComponent;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.Environment;
-import com.yahoo.config.provision.RegionName;
-import com.yahoo.vespa.hosted.controller.api.integration.cost.ApplicationCost;
-import com.yahoo.vespa.hosted.controller.api.integration.cost.Backend;
-
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * @author bratseth
- */
-public class MockInsightBackend extends AbstractComponent implements Backend {
-
- private final Map<ApplicationId, ApplicationCost> applicationCost = new HashMap<>();
-
- @Override
- public List<ApplicationCost> getApplicationCost() {
- return new ArrayList<>(applicationCost.values());
- }
-
- /**
- * Get cost for a specific application in one zone or null if this application is not known.
- * The zone information is ignored in the dummy backend.
- */
- @Override
- public ApplicationCost getApplicationCost(Environment env, RegionName region, ApplicationId application) {
- return applicationCost.get(application);
- }
-
- public void setApplicationCost(ApplicationId application, ApplicationCost cost) {
- applicationCost.put(application, cost);
- }
-
-}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
index 29cd35d1efc..72bfa238094 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/ApplicationPackageBuilder.java
@@ -28,6 +28,8 @@ public class ApplicationPackageBuilder {
private Environment environment = Environment.prod;
private final StringBuilder environmentBody = new StringBuilder();
private final StringBuilder validationOverridesBody = new StringBuilder();
+ private final StringBuilder blockChange = new StringBuilder();
+ private String searchDefinition = "search test { }";
public ApplicationPackageBuilder upgradePolicy(String upgradePolicy) {
this.upgradePolicy = upgradePolicy;
@@ -60,6 +62,18 @@ public class ApplicationPackageBuilder {
return this;
}
+ public ApplicationPackageBuilder blockChange(boolean revision, boolean version,
+ String daySpec, String hourSpec, String zoneSpec) {
+ blockChange.append(" <block-change");
+ blockChange.append(" revision='").append(revision).append("'");
+ blockChange.append(" version='").append(version).append("'");
+ blockChange.append(" days='").append(daySpec).append("'");
+ blockChange.append(" hours='").append(hourSpec).append("'");
+ blockChange.append(" time-zone='").append(zoneSpec).append("'");
+ blockChange.append("/>\n");
+ return this;
+ }
+
public ApplicationPackageBuilder allow(ValidationId validationId) {
validationOverridesBody.append(" <allow until='");
validationOverridesBody.append(asIso8601Date(Instant.now().plus(Duration.ofDays(29))));
@@ -69,6 +83,12 @@ public class ApplicationPackageBuilder {
return this;
}
+ /** Sets the content of the search definition test.sd */
+ public ApplicationPackageBuilder searchDefinition(String testSearchDefinition) {
+ this.searchDefinition = testSearchDefinition;
+ return this;
+ }
+
private byte[] deploymentSpec() {
StringBuilder xml = new StringBuilder("<deployment version='1.0'>\n");
if (upgradePolicy != null) {
@@ -76,6 +96,7 @@ public class ApplicationPackageBuilder {
xml.append(upgradePolicy);
xml.append("'/>\n");
}
+ xml.append(blockChange);
xml.append(" <");
xml.append(environment.value());
xml.append(">\n");
@@ -85,7 +106,7 @@ public class ApplicationPackageBuilder {
xml.append(">\n</deployment>");
return xml.toString().getBytes(StandardCharsets.UTF_8);
}
-
+
private byte[] validationOverrides() {
String xml = "<validation-overrides version='1.0'>\n" +
validationOverridesBody +
@@ -93,6 +114,10 @@ public class ApplicationPackageBuilder {
return xml.getBytes(StandardCharsets.UTF_8);
}
+ private byte[] searchDefinition() {
+ return searchDefinition.getBytes(StandardCharsets.UTF_8);
+ }
+
public ApplicationPackage build() {
ByteArrayOutputStream zip = new ByteArrayOutputStream();
ZipOutputStream out = new ZipOutputStream(zip);
@@ -103,6 +128,9 @@ public class ApplicationPackageBuilder {
out.putNextEntry(new ZipEntry("validation-overrides.xml"));
out.write(validationOverrides());
out.closeEntry();
+ out.putNextEntry(new ZipEntry("search-definitions/test.sd"));
+ out.write(searchDefinition());
+ out.closeEntry();
} catch (IOException e) {
throw new UncheckedIOException(e);
} finally {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTester.java
index dc62de5cb52..be14947de2b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTester.java
@@ -22,34 +22,61 @@ import com.yahoo.vespa.hosted.controller.maintenance.Upgrader;
import com.yahoo.vespa.hosted.controller.versions.VersionStatus;
import java.time.Duration;
+import java.util.List;
import java.util.Optional;
import java.util.UUID;
+import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
/**
+ * This class provides convenience methods for testing deployments
+ *
* @author bratseth
+ * @author mpolden
*/
public class DeploymentTester {
- private ControllerTester tester = new ControllerTester();
-
- private Upgrader upgrader = new Upgrader(tester.controller(), Duration.ofMinutes(2),
- new JobControl(tester.curator()));
- private FailureRedeployer failureRedeployer = new FailureRedeployer(tester.controller(), Duration.ofMinutes(2),
- new JobControl(tester.curator()));
+ // Set a long interval so that maintainers never do scheduled runs during tests
+ private static final Duration maintenanceInterval = Duration.ofDays(1);
+
+ private final ControllerTester tester;
+ private final Upgrader upgrader;
+ private final FailureRedeployer failureRedeployer;
+
+ public DeploymentTester() {
+ this(new ControllerTester());
+ }
+
+ public DeploymentTester(ControllerTester tester) {
+ this.tester = tester;
+ tester.curator().writeUpgradesPerMinute(100);
+ this.upgrader = new Upgrader(tester.controller(), maintenanceInterval, new JobControl(tester.curator()),
+ tester.curator());
+ this.failureRedeployer = new FailureRedeployer(tester.controller(), maintenanceInterval,
+ new JobControl(tester.curator()));
+ }
public Upgrader upgrader() { return upgrader; }
+
public FailureRedeployer failureRedeployer() { return failureRedeployer; }
+
public Controller controller() { return tester.controller(); }
+
public ApplicationController applications() { return tester.controller().applications(); }
+
public BuildSystem buildSystem() { return tester.controller().applications().deploymentTrigger().buildSystem(); }
+
public DeploymentTrigger deploymentTrigger() { return tester.controller().applications().deploymentTrigger(); }
+
public ManualClock clock() { return tester.clock(); }
+
public ControllerTester controllerTester() { return tester; }
+ public ConfigServerClientMock configServer() { return tester.configServer(); }
+
public Application application(String name) {
return application(ApplicationId.from("tenant1", name, "default"));
}
@@ -63,8 +90,6 @@ public class DeploymentTester {
.filter(c -> c instanceof Change.VersionChange)
.map(Change.VersionChange.class::cast);
}
-
- public ConfigServerClientMock configServerClientMock() { return tester.configServerClientMock(); }
public void updateVersionStatus(Version currentVersion) {
controller().updateVersionStatus(VersionStatus.compute(controller(), currentVersion));
@@ -105,12 +130,23 @@ public class DeploymentTester {
public void deployCompletely(Application application, ApplicationPackage applicationPackage) {
notifyJobCompletion(JobType.component, application, true);
assertTrue(applications().require(application.id()).deploying().isPresent());
- completeDeployment(application, applicationPackage, Optional.empty());
+ completeDeployment(application, applicationPackage, Optional.empty(), true);
}
- private void completeDeployment(Application application, ApplicationPackage applicationPackage, Optional<JobType> failOnJob) {
+ /** Deploy application using the given application package, but expecting to stop after test phases */
+ public void deployTestOnly(Application application, ApplicationPackage applicationPackage) {
+ notifyJobCompletion(JobType.component, application, true);
+ assertTrue(applications().require(application.id()).deploying().isPresent());
+ completeDeployment(application, applicationPackage, Optional.empty(), false);
+ }
+
+ private void completeDeployment(Application application, ApplicationPackage applicationPackage,
+ Optional<JobType> failOnJob, boolean includingProductionZones) {
DeploymentOrder order = new DeploymentOrder(controller());
- for (JobType job : order.jobsFrom(applicationPackage.deploymentSpec())) {
+ List<JobType> jobs = order.jobsFrom(applicationPackage.deploymentSpec());
+ if ( ! includingProductionZones)
+ jobs = jobs.stream().filter(job -> ! job.isProduction()).collect(Collectors.toList());
+ for (JobType job : jobs) {
boolean failJob = failOnJob.map(j -> j.equals(job)).orElse(false);
deployAndNotify(application, applicationPackage, !failJob, job);
if (failJob) {
@@ -120,9 +156,12 @@ public class DeploymentTester {
if (failOnJob.isPresent()) {
assertTrue(applications().require(application.id()).deploying().isPresent());
assertTrue(applications().require(application.id()).deploymentJobs().hasFailures());
- } else {
+ } else if (includingProductionZones) {
assertFalse(applications().require(application.id()).deploying().isPresent());
}
+ else {
+ assertTrue(applications().require(application.id()).deploying().isPresent());
+ }
}
public void notifyJobCompletion(JobType jobType, Application application, boolean success) {
@@ -136,7 +175,7 @@ public class DeploymentTester {
public void completeUpgrade(Application application, Version version, String upgradePolicy) {
assertTrue(applications().require(application.id()).deploying().isPresent());
assertEquals(new Change.VersionChange(version), applications().require(application.id()).deploying().get());
- completeDeployment(application, applicationPackage(upgradePolicy), Optional.empty());
+ completeDeployment(application, applicationPackage(upgradePolicy), Optional.empty(), true);
}
public void completeUpgradeWithError(Application application, Version version, String upgradePolicy, JobType failOnJob) {
@@ -150,7 +189,7 @@ public class DeploymentTester {
private void completeUpgradeWithError(Application application, Version version, ApplicationPackage applicationPackage, Optional<JobType> failOnJob) {
assertTrue(applications().require(application.id()).deploying().isPresent());
assertEquals(new Change.VersionChange(version), applications().require(application.id()).deploying().get());
- completeDeployment(application, applicationPackage, failOnJob);
+ completeDeployment(application, applicationPackage, failOnJob, true);
}
public void deploy(JobType job, Application application, ApplicationPackage applicationPackage) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
index 0c0cc0485c8..65ed5eeb95b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/DeploymentTriggerTest.java
@@ -3,14 +3,20 @@ package com.yahoo.vespa.hosted.controller.deployment;
import com.yahoo.component.Version;
import com.yahoo.config.provision.Environment;
+import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.controller.Application;
+import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.api.identifiers.TenantId;
+import com.yahoo.vespa.hosted.controller.api.integration.BuildService;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobType;
+import com.yahoo.vespa.hosted.controller.maintenance.BlockedChangeDeployer;
+import com.yahoo.vespa.hosted.controller.maintenance.JobControl;
import org.junit.Test;
import java.time.Duration;
+import java.time.Instant;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -47,22 +53,23 @@ public class DeploymentTriggerTest {
tester.updateVersionStatus(version);
tester.upgrader().maintain();
+ // system-test fails and is retried
tester.deployAndNotify(app, applicationPackage, false, JobType.systemTest);
assertEquals("Retried immediately", 1, tester.buildSystem().jobs().size());
-
tester.buildSystem().takeJobsToRun();
assertEquals("Job removed", 0, tester.buildSystem().jobs().size());
- tester.clock().advance(Duration.ofHours(2));
- tester.failureRedeployer().maintain();
- assertEquals("Retried job", 1, tester.buildSystem().jobs().size());
- assertEquals(JobType.systemTest.id(), tester.buildSystem().jobs().get(0).jobName());
+ tester.clock().advance(Duration.ofHours(4).plus(Duration.ofSeconds(1)));
+ tester.failureRedeployer().maintain(); // Causes retry of systemTests
+
+ assertEquals("Scheduled retry", 1, tester.buildSystem().jobs().size());
+ tester.deployAndNotify(app, applicationPackage, true, JobType.systemTest);
+ // staging-test times out and is retried
tester.buildSystem().takeJobsToRun();
- assertEquals("Job removed", 0, tester.buildSystem().jobs().size());
tester.clock().advance(Duration.ofHours(12).plus(Duration.ofSeconds(1)));
tester.failureRedeployer().maintain();
- assertEquals("Retried from the beginning", 1, tester.buildSystem().jobs().size());
- assertEquals(JobType.component.id(), tester.buildSystem().jobs().get(0).jobName());
+ assertEquals("Retried dead job", 1, tester.buildSystem().jobs().size());
+ assertEquals(JobType.stagingTest.id(), tester.buildSystem().jobs().get(0).jobName());
}
@Test
@@ -256,6 +263,50 @@ public class DeploymentTriggerTest {
}
@Test
+ public void testBlockRevisionChange() {
+ ManualClock clock = new ManualClock(Instant.parse("2017-09-26T17:30:00.00Z")); // Tuesday, 17:30
+ DeploymentTester tester = new DeploymentTester(new ControllerTester(clock));
+ BlockedChangeDeployer blockedChangeDeployer = new BlockedChangeDeployer(tester.controller(),
+ Duration.ofHours(1),
+ new JobControl(tester.controllerTester().curator()));
+ Version version = Version.fromString("5.0");
+ tester.updateVersionStatus(version);
+
+ ApplicationPackageBuilder applicationPackageBuilder = new ApplicationPackageBuilder()
+ .upgradePolicy("canary")
+ // Block revision changes on tuesday in hours 18 and 19
+ .blockChange(true, false, "tue", "18-19", "UTC")
+ .region("us-west-1");
+
+ Application app = tester.createAndDeploy("app1", 1, applicationPackageBuilder.build());
+
+ tester.clock().advance(Duration.ofHours(1)); // --------------- Enter block window: 18:30
+
+ blockedChangeDeployer.run();
+ assertEquals(0, tester.buildSystem().jobs().size());
+
+ String searchDefinition =
+ "search test {\n" +
+ " document test {\n" +
+ " field test type string {\n" +
+ " }\n" +
+ " }\n" +
+ "}\n";
+ ApplicationPackage changedApplication = applicationPackageBuilder.searchDefinition(searchDefinition).build();
+
+ tester.deployTestOnly(app, changedApplication);
+
+ blockedChangeDeployer.run();
+ assertEquals(0, tester.buildSystem().jobs().size());
+
+ tester.clock().advance(Duration.ofHours(2)); // ---------------- Exit block window: 20:30
+ tester.deploymentTrigger().triggerReadyJobs(); // Schedules the blocked production job(s)
+ assertEquals(1, tester.buildSystem().jobs().size());
+ BuildService.BuildJob productionJob = tester.buildSystem().takeJobsToRun().get(0);
+ assertEquals("production-us-west-1", productionJob.jobName());
+ }
+
+ @Test
public void testHandleMultipleNotificationsFromLastJob() {
DeploymentTester tester = new DeploymentTester();
BuildSystem buildSystem = tester.buildSystem();
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/PolledBuildSystemTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/PolledBuildSystemTest.java
index c869bd90924..e66d7e9168d 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/PolledBuildSystemTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/deployment/PolledBuildSystemTest.java
@@ -22,7 +22,7 @@ import static org.junit.Assert.assertEquals;
public class PolledBuildSystemTest {
@Parameterized.Parameters(name = "jobType={0}")
- public static Iterable<? extends Object> capacityConstrainedJobs() {
+ public static Iterable<?> capacityConstrainedJobs() {
return Arrays.asList(JobType.systemTest, JobType.stagingTest);
}
@@ -37,26 +37,32 @@ public class PolledBuildSystemTest {
DeploymentTester tester = new DeploymentTester();
BuildSystem buildSystem = new PolledBuildSystem(tester.controller(), new MockCuratorDb());
- int fooProjectId = 1;
- int barProjectId = 2;
+ int project1 = 1;
+ int project2 = 2;
+ int project3 = 3;
ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
.region("us-west-1")
.build();
- ApplicationId foo = tester.createAndDeploy("app1", fooProjectId, applicationPackage).id();
- ApplicationId bar = tester.createAndDeploy("app2", barProjectId, applicationPackage).id();
+ ApplicationId app1 = tester.createAndDeploy("app1", project1, applicationPackage).id();
+ ApplicationId app2 = tester.createAndDeploy("app2", project2, applicationPackage).id();
+ ApplicationId app3 = tester.createAndDeploy("app3", project3, applicationPackage).id();
// Trigger jobs in capacity constrained environment
- buildSystem.addJob(foo, jobType, false);
- buildSystem.addJob(bar, jobType, false);
+ buildSystem.addJob(app1, jobType, false);
+ buildSystem.addJob(app2, jobType, false);
+ buildSystem.addJob(app3, jobType, false);
- // Capacity constrained jobs are returned one a at a time
+ // A limited number of jobs are offered at a time:
+ // First offer
List<BuildJob> nextJobs = buildSystem.takeJobsToRun();
- assertEquals(1, nextJobs.size());
- assertEquals(fooProjectId, nextJobs.get(0).projectId());
+ assertEquals(2, nextJobs.size());
+ assertEquals(project1, nextJobs.get(0).projectId());
+ assertEquals(project2, nextJobs.get(1).projectId());
+ // Second offer
nextJobs = buildSystem.takeJobsToRun();
assertEquals(1, nextJobs.size());
- assertEquals(barProjectId, nextJobs.get(0).projectId());
+ assertEquals(project3, nextJobs.get(0).projectId());
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/MockMetricsService.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/MockMetricsService.java
index 79b4c5f6d6a..2dc6471effb 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/MockMetricsService.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/integration/MockMetricsService.java
@@ -4,8 +4,8 @@ package com.yahoo.vespa.hosted.controller.integration;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.Zone;
-import java.util.Collections;
-import java.util.List;
+import java.util.HashMap;
+import java.util.Map;
/**
* @author bratseth
@@ -23,10 +23,11 @@ public class MockMetricsService implements com.yahoo.vespa.hosted.controller.api
}
@Override
- public List<ClusterCostMetrics> getClusterCostMetrics(ApplicationId application, Zone zone) {
- CostMetrics costMetrics = new CostMetrics(55.54, 69.90, 34.59);
- ClusterCostMetrics clusterCostMetrics = new ClusterCostMetrics("default", costMetrics);
- return Collections.singletonList(clusterCostMetrics);
+ public Map<String, SystemMetrics> getSystemMetrics(ApplicationId application, Zone zone) {
+ Map<String, SystemMetrics> result = new HashMap<>();
+ SystemMetrics system = new SystemMetrics(55.54, 69.90, 34.59);
+ result.put("default", system);
+ return result;
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainerTest.java
new file mode 100644
index 00000000000..7ae89082660
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterInfoMaintainerTest.java
@@ -0,0 +1,38 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.vespa.hosted.controller.ControllerTester;
+import com.yahoo.vespa.hosted.controller.application.Deployment;
+import com.yahoo.vespa.hosted.controller.persistence.MockCuratorDb;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.time.Duration;
+
+/**
+ * @author smorgrav
+ */
+public class ClusterInfoMaintainerTest {
+
+ @Test
+ public void maintain() {
+ ControllerTester tester = new ControllerTester();
+ ApplicationId app = tester.createAndDeploy("tenant1", "domain1", "app1", Environment.dev, 123).id();
+
+ // Precondition: no cluster info attached to the deployments
+ Deployment deployment = tester.controller().applications().get(app).get().deployments().values().stream().findAny().get();
+ Assert.assertEquals(0, deployment.clusterInfo().size());
+
+ ClusterInfoMaintainer mainainer = new ClusterInfoMaintainer(tester.controller(), Duration.ofHours(1), new JobControl(new MockCuratorDb()));
+ mainainer.maintain();
+
+ deployment = tester.controller().applications().get(app).get().deployments().values().stream().findAny().get();
+ Assert.assertEquals(2, deployment.clusterInfo().size());
+ Assert.assertEquals(10, deployment.clusterInfo().get(ClusterSpec.Id.from("clusterA")).getCost());
+
+ }
+
+} \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterUtilizationMaintainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterUtilizationMaintainerTest.java
new file mode 100644
index 00000000000..da97c1122c3
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/ClusterUtilizationMaintainerTest.java
@@ -0,0 +1,38 @@
+package com.yahoo.vespa.hosted.controller.maintenance;
+
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.vespa.hosted.controller.ControllerTester;
+import com.yahoo.vespa.hosted.controller.application.Deployment;
+import com.yahoo.vespa.hosted.controller.persistence.MockCuratorDb;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.time.Duration;
+
+/**
+ * @author smorgrav
+ */
+public class ClusterUtilizationMaintainerTest {
+
+ @Test
+ public void maintain() {
+ ControllerTester tester = new ControllerTester();
+ ApplicationId app = tester.createAndDeploy("tenant1", "domain1", "app1", Environment.dev, 123).id();
+
+ // Precondition: no cluster info attached to the deployments
+ Deployment deployment = tester.controller().applications().get(app).get().deployments().values().stream().findAny().get();
+ Assert.assertEquals(0, deployment.clusterUtils().size());
+
+ ClusterUtilizationMaintainer mainainer = new ClusterUtilizationMaintainer(tester.controller(), Duration.ofHours(1), new JobControl(new MockCuratorDb()));
+ mainainer.maintain();
+
+ deployment = tester.controller().applications().get(app).get().deployments().values().stream().findAny().get();
+ Assert.assertEquals(1, deployment.clusterUtils().size());
+ Assert.assertEquals(0.5554, deployment.clusterUtils().get(ClusterSpec.Id.from("default")).getCpu(), Double.MIN_VALUE);
+ }
+
+} \ No newline at end of file
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
index 5b758957571..ef0b05f9bb2 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/DeploymentExpirerTest.java
@@ -34,7 +34,10 @@ public class DeploymentExpirerTest {
@Test
public void testDeploymentExpiry() throws IOException, InterruptedException {
- tester.controllerTester().getZoneRegistryMock().setDeploymentTimeToLive(new Zone(Environment.dev, RegionName.from("us-east-1")), Duration.ofDays(14));
+ tester.controllerTester().zoneRegistry().setDeploymentTimeToLive(
+ new Zone(Environment.dev, RegionName.from("us-east-1")),
+ Duration.ofDays(14)
+ );
DeploymentExpirer expirer = new DeploymentExpirer(tester.controller(), Duration.ofDays(10),
tester.clock(), new JobControl(new MockCuratorDb()));
Application devApp = tester.createApplication("app1", "tenant1", 123L, 1L);
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/FailureRedeployerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/FailureRedeployerTest.java
index fd7a3605766..286db864c22 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/FailureRedeployerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/FailureRedeployerTest.java
@@ -13,7 +13,6 @@ import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.deployment.DeploymentTester;
import com.yahoo.vespa.hosted.controller.persistence.ApplicationSerializer;
-import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import org.junit.Test;
import java.nio.file.Files;
@@ -116,81 +115,16 @@ public class FailureRedeployerTest {
tester.failureRedeployer().maintain();
assertTrue("No jobs retried", tester.buildSystem().jobs().isEmpty());
- // Just over 12 hours pass, deployment is retried from beginning
+ // Just over 12 hours pass, job is retried
tester.clock().advance(Duration.ofHours(12).plus(Duration.ofSeconds(1)));
tester.failureRedeployer().maintain();
- assertEquals(DeploymentJobs.JobType.component.id(), tester.buildSystem().takeJobsToRun().get(0).jobName());
-
- // Ensure that system-test is triggered after component. Triggering component records a new change, but in this
- // case there's already a change in progress which we want to discard and start over
- tester.notifyJobCompletion(DeploymentJobs.JobType.component, app, true);
- assertEquals(DeploymentJobs.JobType.systemTest.id(), tester.buildSystem().jobs().get(0).jobName());
- }
-
- @Test
- public void testAlwaysRestartsDeploymentOfApplicationsWithStuckJobs() {
- DeploymentTester tester = new DeploymentTester();
- Version version = Version.fromString("5.0");
- tester.updateVersionStatus(version);
-
- ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
- .environment(Environment.prod)
- .region("us-west-1")
- .build();
-
- // Setup applications
- Application canary0 = tester.createAndDeploy("canary0", 0, "canary");
- Application canary1 = tester.createAndDeploy("canary1", 1, "canary");
- Application default0 = tester.createAndDeploy("default0", 2, "default");
- Application default1 = tester.createAndDeploy("default1", 3, "default");
- Application default2 = tester.createAndDeploy("default2", 4, "default");
- Application default3 = tester.createAndDeploy("default3", 5, "default");
- Application default4 = tester.createAndDeploy("default4", 6, "default");
-
- // New version is released
- version = Version.fromString("5.1");
- tester.updateVersionStatus(version);
- assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
- tester.upgrader().maintain();
-
- // Canaries upgrade and raise confidence
- tester.completeUpgrade(canary0, version, "canary");
- tester.completeUpgrade(canary1, version, "canary");
- tester.updateVersionStatus(version);
- assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
-
- // Applications with default policy start upgrading
- tester.clock().advance(Duration.ofMinutes(1));
- tester.upgrader().maintain();
- assertEquals("Upgrade scheduled for remaining apps", 5, tester.buildSystem().jobs().size());
-
- // 4/5 applications fail, confidence is lowered and upgrade is cancelled
- tester.completeUpgradeWithError(default0, version, "default", DeploymentJobs.JobType.systemTest);
- tester.completeUpgradeWithError(default1, version, "default", DeploymentJobs.JobType.systemTest);
- tester.completeUpgradeWithError(default2, version, "default", DeploymentJobs.JobType.systemTest);
- tester.completeUpgradeWithError(default3, version, "default", DeploymentJobs.JobType.systemTest);
- tester.updateVersionStatus(version);
- assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence());
- tester.upgrader().maintain();
-
- // 5th app never reports back and has a dead locked job, but no ongoing change
- Application deadLocked = tester.applications().require(default4.id());
- assertTrue("Jobs in progress", deadLocked.deploymentJobs().inProgress());
- assertFalse("No change present", deadLocked.deploying().isPresent());
-
- // 4/5 applications are repaired and confidence is restored
- tester.deployCompletely(default0, applicationPackage);
- tester.deployCompletely(default1, applicationPackage);
- tester.deployCompletely(default2, applicationPackage);
- tester.deployCompletely(default3, applicationPackage);
- tester.updateVersionStatus(version);
- assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
+ assertEquals(DeploymentJobs.JobType.stagingTest.id(), tester.buildSystem().takeJobsToRun().get(0).jobName());
- // Over 12 hours pass and failure redeployer restarts deployment of 5th app
- tester.clock().advance(Duration.ofHours(12).plus(Duration.ofSeconds(1)));
- tester.failureRedeployer().maintain();
- assertEquals("Deployment is restarted", DeploymentJobs.JobType.component.id(),
- tester.buildSystem().jobs().get(0).jobName());
+ // Deployment completes
+ tester.deploy(DeploymentJobs.JobType.stagingTest, app, applicationPackage, true);
+ tester.notifyJobCompletion(DeploymentJobs.JobType.stagingTest, app, true);
+ tester.deployAndNotify(app, applicationPackage, true, DeploymentJobs.JobType.productionUsEast3);
+ assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty());
}
@Test
@@ -243,11 +177,11 @@ public class FailureRedeployerTest {
@Test
public void retryIgnoresStaleJobData() throws Exception {
DeploymentTester tester = new DeploymentTester();
- tester.controllerTester().getZoneRegistryMock().setSystem(SystemName.cd);
+ tester.controllerTester().zoneRegistry().setSystem(SystemName.cd);
// Current system version, matches version in test data
Version version = Version.fromString("6.141.117");
- tester.configServerClientMock().setDefaultConfigServerVersion(version);
+ tester.configServer().setDefaultConfigServerVersion(version);
tester.updateVersionStatus(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
@@ -266,7 +200,7 @@ public class FailureRedeployerTest {
// New version is released
version = Version.fromString("6.142.1");
- tester.configServerClientMock().setDefaultConfigServerVersion(version);
+ tester.configServer().setDefaultConfigServerVersion(version);
tester.updateVersionStatus(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
tester.upgrader().maintain();
@@ -299,11 +233,11 @@ public class FailureRedeployerTest {
@Test
public void ignoresPullRequestInstances() throws Exception {
DeploymentTester tester = new DeploymentTester();
- tester.controllerTester().getZoneRegistryMock().setSystem(SystemName.cd);
+ tester.controllerTester().zoneRegistry().setSystem(SystemName.cd);
// Current system version, matches version in test data
Version version = Version.fromString("6.42.1");
- tester.configServerClientMock().setDefaultConfigServerVersion(version);
+ tester.configServer().setDefaultConfigServerVersion(version);
tester.updateVersionStatus(version);
assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
@@ -322,4 +256,29 @@ public class FailureRedeployerTest {
assertTrue("No jobs scheduled", tester.buildSystem().jobs().isEmpty());
}
+ @Test
+ public void applicationWithoutProjectIdIsNotTriggered() throws Exception {
+ DeploymentTester tester = new DeploymentTester();
+
+ // Current system version, matches version in test data
+ Version version = Version.fromString("6.42.1");
+ tester.configServer().setDefaultConfigServerVersion(version);
+ tester.updateVersionStatus(version);
+ assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
+
+ // Load test data data
+ ApplicationSerializer serializer = new ApplicationSerializer();
+ byte[] json = Files.readAllBytes(Paths.get("src/test/java/com/yahoo/vespa/hosted/controller/maintenance/testdata/application-without-project-id.json"));
+ Slime slime = SlimeUtils.jsonToSlime(json);
+ Application application = serializer.fromSlime(slime);
+
+ try (Lock lock = tester.controller().applications().lock(application.id())) {
+ tester.controller().applications().store(application, lock);
+ }
+
+ // Failure redeployer does not restart deployment
+ tester.failureRedeployer().maintain();
+ assertTrue("No jobs scheduled", tester.buildSystem().jobs().isEmpty());
+ }
+
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
index 3046a89efe6..29f1bce5ebe 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/UpgraderTest.java
@@ -3,7 +3,11 @@ package com.yahoo.vespa.hosted.controller.maintenance;
import com.yahoo.component.Version;
import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.test.ManualClock;
import com.yahoo.vespa.hosted.controller.Application;
+import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
@@ -12,6 +16,7 @@ import com.yahoo.vespa.hosted.controller.versions.VespaVersion;
import org.junit.Test;
import java.time.Duration;
+import java.time.Instant;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@@ -36,12 +41,12 @@ public class UpgraderTest {
assertEquals("No applications: Nothing to do", 0, tester.buildSystem().jobs().size());
// Setup applications
- Application canary0 = tester.createAndDeploy("canary0", 0, "canary");
- Application canary1 = tester.createAndDeploy("canary1", 1, "canary");
- Application default0 = tester.createAndDeploy("default0", 2, "default");
- Application default1 = tester.createAndDeploy("default1", 3, "default");
- Application default2 = tester.createAndDeploy("default2", 4, "default");
- Application conservative0 = tester.createAndDeploy("conservative0", 5, "conservative");
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
+ Application default0 = tester.createAndDeploy("default0", 3, "default");
+ Application default1 = tester.createAndDeploy("default1", 4, "default");
+ Application default2 = tester.createAndDeploy("default2", 5, "default");
+ Application conservative0 = tester.createAndDeploy("conservative0", 6, "conservative");
tester.upgrader().maintain();
assertEquals("All already on the right version: Nothing to do", 0, tester.buildSystem().jobs().size());
@@ -54,7 +59,7 @@ public class UpgraderTest {
assertEquals("New system version: Should upgrade Canaries", 2, tester.buildSystem().jobs().size());
tester.completeUpgrade(canary0, version, "canary");
- assertEquals(version, tester.configServerClientMock().lastPrepareVersion.get());
+ assertEquals(version, tester.configServer().lastPrepareVersion().get());
tester.updateVersionStatus(version);
tester.upgrader().maintain();
@@ -104,7 +109,7 @@ public class UpgraderTest {
assertEquals("New system version: Should upgrade Canaries", 2, tester.buildSystem().jobs().size());
tester.completeUpgrade(canary0, version, "canary");
- assertEquals(version, tester.configServerClientMock().lastPrepareVersion.get());
+ assertEquals(version, tester.configServer().lastPrepareVersion().get());
tester.updateVersionStatus(version);
tester.upgrader().maintain();
@@ -131,9 +136,6 @@ public class UpgraderTest {
// --- Failing application is repaired by changing the application, causing confidence to move above 'high' threshold
// Deploy application change
tester.deployCompletely("default0");
- // Complete upgrade
- tester.upgrader().maintain();
- tester.completeUpgrade(default0, version, "default");
tester.updateVersionStatus(version);
assertEquals(VespaVersion.Confidence.high, tester.controller().versionStatus().systemVersion().get().confidence());
@@ -160,18 +162,18 @@ public class UpgraderTest {
assertEquals("No applications: Nothing to do", 0, tester.buildSystem().jobs().size());
// Setup applications
- Application canary0 = tester.createAndDeploy("canary0", 0, "canary");
- Application canary1 = tester.createAndDeploy("canary1", 1, "canary");
- Application default0 = tester.createAndDeploy("default0", 2, "default");
- Application default1 = tester.createAndDeploy("default1", 3, "default");
- Application default2 = tester.createAndDeploy("default2", 4, "default");
- Application default3 = tester.createAndDeploy("default3", 5, "default");
- Application default4 = tester.createAndDeploy("default4", 6, "default");
- Application default5 = tester.createAndDeploy("default5", 7, "default");
- Application default6 = tester.createAndDeploy("default6", 8, "default");
- Application default7 = tester.createAndDeploy("default7", 9, "default");
- Application default8 = tester.createAndDeploy("default8", 10, "default");
- Application default9 = tester.createAndDeploy("default9", 11, "default");
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
+ Application default0 = tester.createAndDeploy("default0", 3, "default");
+ Application default1 = tester.createAndDeploy("default1", 4, "default");
+ Application default2 = tester.createAndDeploy("default2", 5, "default");
+ Application default3 = tester.createAndDeploy("default3", 6, "default");
+ Application default4 = tester.createAndDeploy("default4", 7, "default");
+ Application default5 = tester.createAndDeploy("default5", 8, "default");
+ Application default6 = tester.createAndDeploy("default6", 9, "default");
+ Application default7 = tester.createAndDeploy("default7", 10, "default");
+ Application default8 = tester.createAndDeploy("default8", 11, "default");
+ Application default9 = tester.createAndDeploy("default9", 12, "default");
tester.upgrader().maintain();
assertEquals("All already on the right version: Nothing to do", 0, tester.buildSystem().jobs().size());
@@ -184,7 +186,7 @@ public class UpgraderTest {
assertEquals("New system version: Should upgrade Canaries", 2, tester.buildSystem().jobs().size());
tester.completeUpgrade(canary0, version, "canary");
- assertEquals(version, tester.configServerClientMock().lastPrepareVersion.get());
+ assertEquals(version, tester.configServer().lastPrepareVersion().get());
tester.updateVersionStatus(version);
tester.upgrader().maintain();
@@ -272,13 +274,13 @@ public class UpgraderTest {
tester.updateVersionStatus(version);
// Setup applications
- Application canary0 = tester.createAndDeploy("canary0", 0, "canary");
- Application canary1 = tester.createAndDeploy("canary1", 1, "canary");
- Application default0 = tester.createAndDeploy("default0", 2, "default");
- Application default1 = tester.createAndDeploy("default1", 3, "default");
- Application default2 = tester.createAndDeploy("default2", 4, "default");
- Application default3 = tester.createAndDeploy("default3", 5, "default");
- Application default4 = tester.createAndDeploy("default4", 6, "default");
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
+ Application default0 = tester.createAndDeploy("default0", 3, "default");
+ Application default1 = tester.createAndDeploy("default1", 4, "default");
+ Application default2 = tester.createAndDeploy("default2", 5, "default");
+ Application default3 = tester.createAndDeploy("default3", 6, "default");
+ Application default4 = tester.createAndDeploy("default4", 7, "default");
// New version is released
version = Version.fromString("5.1");
@@ -318,13 +320,13 @@ public class UpgraderTest {
tester.updateVersionStatus(version);
// Setup applications
- Application canary0 = tester.createAndDeploy("canary0", 0, "canary");
- Application canary1 = tester.createAndDeploy("canary1", 1, "canary");
- Application default0 = tester.createAndDeploy("default0", 2, "default");
- Application default1 = tester.createAndDeploy("default1", 3, "default");
- Application default2 = tester.createAndDeploy("default2", 4, "default");
- Application default3 = tester.createAndDeploy("default3", 5, "default");
- Application default4 = tester.createAndDeploy("default4", 5, "default");
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
+ Application default0 = tester.createAndDeploy("default0", 3, "default");
+ Application default1 = tester.createAndDeploy("default1", 4, "default");
+ Application default2 = tester.createAndDeploy("default2", 5, "default");
+ Application default3 = tester.createAndDeploy("default3", 6, "default");
+ Application default4 = tester.createAndDeploy("default4", 7, "default");
// New version is released
version = Version.fromString("5.1");
@@ -360,4 +362,176 @@ public class UpgraderTest {
assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
}
+ @Test
+ public void testBlockVersionChange() {
+ ManualClock clock = new ManualClock(Instant.parse("2017-09-26T18:00:00.00Z")); // A tuesday
+ DeploymentTester tester = new DeploymentTester(new ControllerTester(clock));
+ Version version = Version.fromString("5.0");
+ tester.updateVersionStatus(version);
+
+ ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
+ .upgradePolicy("canary")
+ // Block upgrades on tuesday in hours 18 and 19
+ .blockChange(false, true, "tue", "18-19", "UTC")
+ .region("us-west-1")
+ .build();
+
+ Application app = tester.createAndDeploy("app1", 1, applicationPackage);
+
+ // New version is released
+ version = Version.fromString("5.1");
+ tester.updateVersionStatus(version);
+
+ // Application is not upgraded at this time
+ tester.upgrader().maintain();
+ assertTrue("No jobs scheduled", tester.buildSystem().jobs().isEmpty());
+
+ // One hour passes, time is 19:00, still no upgrade
+ tester.clock().advance(Duration.ofHours(1));
+ tester.upgrader().maintain();
+ assertTrue("No jobs scheduled", tester.buildSystem().jobs().isEmpty());
+
+ // Two hours pass in total, time is 20:00 and application upgrades
+ tester.clock().advance(Duration.ofHours(1));
+ tester.upgrader().maintain();
+ assertFalse("Job is scheduled", tester.buildSystem().jobs().isEmpty());
+ tester.completeUpgrade(app, version, "canary");
+ assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty());
+ }
+
+ @Test
+ public void testReschedulesUpgradeAfterTimeout() {
+ DeploymentTester tester = new DeploymentTester();
+ Version version = Version.fromString("5.0");
+ tester.updateVersionStatus(version);
+
+ ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
+ .environment(Environment.prod)
+ .region("us-west-1")
+ .build();
+
+ // Setup applications
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
+ Application default0 = tester.createAndDeploy("default0", 3, "default");
+ Application default1 = tester.createAndDeploy("default1", 4, "default");
+ Application default2 = tester.createAndDeploy("default2", 5, "default");
+ Application default3 = tester.createAndDeploy("default3", 6, "default");
+ Application default4 = tester.createAndDeploy("default4", 7, "default");
+
+ assertEquals(version, default0.deployedVersion().get());
+
+ // New version is released
+ version = Version.fromString("5.1");
+ tester.updateVersionStatus(version);
+ assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
+ tester.upgrader().maintain();
+
+ // Canaries upgrade and raise confidence
+ tester.completeUpgrade(canary0, version, "canary");
+ tester.completeUpgrade(canary1, version, "canary");
+ tester.updateVersionStatus(version);
+ assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
+
+ // Applications with default policy start upgrading
+ tester.clock().advance(Duration.ofMinutes(1));
+ tester.upgrader().maintain();
+ assertEquals("Upgrade scheduled for remaining apps", 5, tester.buildSystem().jobs().size());
+
+ // 4/5 applications fail, confidence is lowered and upgrade is cancelled
+ tester.completeUpgradeWithError(default0, version, "default", DeploymentJobs.JobType.systemTest);
+ tester.completeUpgradeWithError(default1, version, "default", DeploymentJobs.JobType.systemTest);
+ tester.completeUpgradeWithError(default2, version, "default", DeploymentJobs.JobType.systemTest);
+ tester.completeUpgradeWithError(default3, version, "default", DeploymentJobs.JobType.systemTest);
+ tester.updateVersionStatus(version);
+ assertEquals(VespaVersion.Confidence.broken, tester.controller().versionStatus().systemVersion().get().confidence());
+ tester.upgrader().maintain();
+
+ // 5th app never reports back and has a dead job, but no ongoing change
+ Application deadLocked = tester.applications().require(default4.id());
+ assertTrue("Jobs in progress", deadLocked.deploymentJobs().isRunning(tester.controller().applications().deploymentTrigger().jobTimeoutLimit()));
+ assertFalse("No change present", deadLocked.deploying().isPresent());
+
+ // 4/5 applications are repaired and confidence is restored
+ tester.deployCompletely(default0, applicationPackage);
+ tester.deployCompletely(default1, applicationPackage);
+ tester.deployCompletely(default2, applicationPackage);
+ tester.deployCompletely(default3, applicationPackage);
+
+ tester.updateVersionStatus(version);
+ assertEquals(VespaVersion.Confidence.normal, tester.controller().versionStatus().systemVersion().get().confidence());
+
+ tester.upgrader().maintain();
+ assertEquals("Upgrade scheduled for previously failing apps", 4, tester.buildSystem().jobs().size());
+ tester.completeUpgrade(default0, version, "default");
+ tester.completeUpgrade(default1, version, "default");
+ tester.completeUpgrade(default2, version, "default");
+ tester.completeUpgrade(default3, version, "default");
+
+ assertEquals(version, tester.application(default0.id()).deployedVersion().get());
+ assertEquals(version, tester.application(default1.id()).deployedVersion().get());
+ assertEquals(version, tester.application(default2.id()).deployedVersion().get());
+ assertEquals(version, tester.application(default3.id()).deployedVersion().get());
+
+ // Over 12 hours pass and upgrade is rescheduled for 5th app
+ assertEquals(0, tester.buildSystem().jobs().size());
+ tester.clock().advance(Duration.ofHours(12).plus(Duration.ofSeconds(1)));
+ tester.upgrader().maintain();
+ assertEquals(1, tester.buildSystem().jobs().size());
+ assertEquals("Upgrade is rescheduled", DeploymentJobs.JobType.systemTest.id(),
+ tester.buildSystem().jobs().get(0).jobName());
+ tester.deployCompletely(default4, applicationPackage);
+ assertEquals(version, tester.application(default4.id()).deployedVersion().get());
+ }
+
+ @Test
+ public void testThrottlesUpgrades() {
+ DeploymentTester tester = new DeploymentTester();
+ Version version = Version.fromString("5.0");
+ tester.updateVersionStatus(version);
+
+ // Setup our own upgrader as we need to control the interval
+ Upgrader upgrader = new Upgrader(tester.controller(), Duration.ofMinutes(10),
+ new JobControl(tester.controllerTester().curator()),
+ tester.controllerTester().curator());
+ upgrader.setUpgradesPerMinute(0.2);
+
+ // Setup applications
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
+ Application default0 = tester.createAndDeploy("default0", 3, "default");
+ Application default1 = tester.createAndDeploy("default1", 4, "default");
+ Application default2 = tester.createAndDeploy("default2", 5, "default");
+ Application default3 = tester.createAndDeploy("default3", 6, "default");
+
+ // Dev deployment which should be ignored
+ Application dev0 = tester.createApplication("dev0", "tenant1", 7, 1L);
+ tester.controllerTester().deploy(dev0, new Zone(Environment.dev, RegionName.from("dev-region")));
+
+ // New version is released and canaries upgrade
+ version = Version.fromString("5.1");
+ tester.updateVersionStatus(version);
+ assertEquals(version, tester.controller().versionStatus().systemVersion().get().versionNumber());
+ upgrader.maintain();
+
+ assertEquals(2, tester.buildSystem().jobs().size());
+ tester.completeUpgrade(canary0, version, "canary");
+ tester.completeUpgrade(canary1, version, "canary");
+ tester.updateVersionStatus(version);
+
+ // Next run upgrades a subset
+ upgrader.maintain();
+ assertEquals(2, tester.buildSystem().jobs().size());
+ tester.completeUpgrade(default0, version, "default");
+ tester.completeUpgrade(default2, version, "default");
+
+ // Remaining applications upgraded
+ upgrader.maintain();
+ assertEquals(2, tester.buildSystem().jobs().size());
+ tester.completeUpgrade(default1, version, "default");
+ tester.completeUpgrade(default3, version, "default");
+ upgrader.maintain();
+ assertTrue("All jobs consumed", tester.buildSystem().jobs().isEmpty());
+ }
+
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/testdata/application-without-project-id.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/testdata/application-without-project-id.json
new file mode 100644
index 00000000000..63832531c7d
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/testdata/application-without-project-id.json
@@ -0,0 +1,19 @@
+{
+ "id": "tenant1:app1:default",
+ "deploymentSpecField": "<deployment version='1.0'>\n <test />\n <staging />\n <prod>\n <region active=\"true\">cd-us-central-1</region>\n <region active=\"true\">cd-us-central-2</region>\n </prod>\n</deployment>\n",
+ "validationOverrides": "<deployment version='1.0'/>",
+ "deployments": [],
+ "deploymentJobs": {
+ "jobStatus": [
+ {
+ "jobType": "system-test",
+ "lastTriggered": {
+ "version": "6.42.1",
+ "upgrade": false,
+ "at": 1506330088050
+ }
+ }
+ ]
+ },
+ "outstandingChangeField": false
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/testdata/canary-with-stale-data.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/testdata/canary-with-stale-data.json
index 323889c7c45..9864ce502f6 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/testdata/canary-with-stale-data.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/maintenance/testdata/canary-with-stale-data.json
@@ -288,8 +288,7 @@
"at": 1493033800469
}
}
- ],
- "selfTriggering": false
+ ]
},
"outstandingChangeField": false
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
index 8babd181d36..3e73bf4445b 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/persistence/ApplicationSerializerTest.java
@@ -5,6 +5,7 @@ import com.yahoo.component.Version;
import com.yahoo.config.application.api.DeploymentSpec;
import com.yahoo.config.application.api.ValidationOverrides;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.Zone;
@@ -14,6 +15,8 @@ import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.ControllerTester;
import com.yahoo.vespa.hosted.controller.application.ApplicationRevision;
import com.yahoo.vespa.hosted.controller.application.Change;
+import com.yahoo.vespa.hosted.controller.application.ClusterInfo;
+import com.yahoo.vespa.hosted.controller.application.ClusterUtilization;
import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobError;
@@ -25,7 +28,9 @@ import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.time.Instant;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.Optional;
import static org.junit.Assert.assertEquals;
@@ -37,10 +42,10 @@ import static org.junit.Assert.assertFalse;
public class ApplicationSerializerTest {
private static final ApplicationSerializer applicationSerializer = new ApplicationSerializer();
-
+
private static final Zone zone1 = new Zone(Environment.from("prod"), RegionName.from("us-west-1"));
private static final Zone zone2 = new Zone(Environment.from("prod"), RegionName.from("us-east-3"));
-
+
@Test
public void testSerialization() {
ControllerTester tester = new ControllerTester();
@@ -50,12 +55,13 @@ public class ApplicationSerializerTest {
ValidationOverrides validationOverrides = ValidationOverrides.fromXml("<validation-overrides version='1.0'>" +
" <allow until='2017-06-15'>deployment-removal</allow>" +
"</validation-overrides>");
-
+
List<Deployment> deployments = new ArrayList<>();
ApplicationRevision revision1 = ApplicationRevision.from("appHash1");
ApplicationRevision revision2 = ApplicationRevision.from("appHash2", new SourceRevision("repo1", "branch1", "commit1"));
- deployments.add(new Deployment(zone1, revision1, Version.fromString("1.2.3"), Instant.ofEpochMilli(3)));
- deployments.add(new Deployment(zone2, revision2, Version.fromString("1.2.3"), Instant.ofEpochMilli(5)));
+ deployments.add(new Deployment(zone1, revision1, Version.fromString("1.2.3"), Instant.ofEpochMilli(3))); // One deployment without cluster info and utils
+ deployments.add(new Deployment(zone2, revision2, Version.fromString("1.2.3"), Instant.ofEpochMilli(5),
+ createClusterUtils(3, 0.2), createClusterInfo(3, 4)));
Optional<Long> projectId = Optional.of(123L);
List<JobStatus> statusList = new ArrayList<>();
@@ -69,20 +75,20 @@ public class ApplicationSerializerTest {
DeploymentJobs deploymentJobs = new DeploymentJobs(projectId, statusList, Optional.empty());
- Application original = new Application(ApplicationId.from("t1", "a1", "i1"),
- deploymentSpec,
+ Application original = new Application(ApplicationId.from("t1", "a1", "i1"),
+ deploymentSpec,
validationOverrides,
- deployments, deploymentJobs,
- Optional.of(new Change.VersionChange(Version.fromString("6.7"))),
+ deployments, deploymentJobs,
+ Optional.of(new Change.VersionChange(Version.fromString("6.7"))),
true);
Application serialized = applicationSerializer.fromSlime(applicationSerializer.toSlime(original));
-
+
assertEquals(original.id(), serialized.id());
-
+
assertEquals(original.deploymentSpec().xmlForm(), serialized.deploymentSpec().xmlForm());
assertEquals(original.validationOverrides().xmlForm(), serialized.validationOverrides().xmlForm());
-
+
assertEquals(2, serialized.deployments().size());
assertEquals(original.deployments().get(zone1).revision(), serialized.deployments().get(zone1).revision());
assertEquals(original.deployments().get(zone2).revision(), serialized.deployments().get(zone2).revision());
@@ -98,11 +104,25 @@ public class ApplicationSerializerTest {
assertEquals( original.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.stagingTest),
serialized.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.stagingTest));
assertEquals(original.deploymentJobs().failingSince(), serialized.deploymentJobs().failingSince());
-
+
assertEquals(original.hasOutstandingChange(), serialized.hasOutstandingChange());
-
+
assertEquals(original.deploying(), serialized.deploying());
+ // Test cluster utilization
+ assertEquals(0, serialized.deployments().get(zone1).clusterUtils().size());
+ assertEquals(3, serialized.deployments().get(zone2).clusterUtils().size());
+ assertEquals(0.4, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id2")).getCpu(), 0.01);
+ assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getCpu(), 0.01);
+ assertEquals(0.2, serialized.deployments().get(zone2).clusterUtils().get(ClusterSpec.Id.from("id1")).getMemory(), 0.01);
+
+ // Test cluster info
+ assertEquals(3, serialized.deployments().get(zone2).clusterInfo().size());
+ assertEquals(10, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getCost());
+ assertEquals(ClusterSpec.Type.content, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getClusterType());
+ assertEquals("flavor2", serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getFlavor());
+ assertEquals(4, serialized.deployments().get(zone2).clusterInfo().get(ClusterSpec.Id.from("id2")).getHostnames().size());
+
{ // test more deployment serialization cases
Application original2 = original.withDeploying(Optional.of(Change.ApplicationChange.of(ApplicationRevision.from("hash1"))));
Application serialized2 = applicationSerializer.fromSlime(applicationSerializer.toSlime(original2));
@@ -123,6 +143,36 @@ public class ApplicationSerializerTest {
}
}
+ private Map<ClusterSpec.Id, ClusterInfo> createClusterInfo(int clusters, int hosts) {
+ Map<ClusterSpec.Id, ClusterInfo> result = new HashMap<>();
+
+ for (int cluster = 0; cluster < clusters; cluster++) {
+ List<String> hostnames = new ArrayList<>();
+ for (int host = 0; host < hosts; host++) {
+ hostnames.add("hostname" + cluster*host + host);
+ }
+
+ result.put(ClusterSpec.Id.from("id" + cluster), new ClusterInfo("flavor" + cluster, 10,
+ ClusterSpec.Type.content, hostnames));
+ }
+ return result;
+ }
+
+ private Map<ClusterSpec.Id, ClusterUtilization> createClusterUtils(int clusters, double inc) {
+ Map<ClusterSpec.Id, ClusterUtilization> result = new HashMap<>();
+
+ ClusterUtilization util = new ClusterUtilization(0,0,0,0);
+ for (int cluster = 0; cluster < clusters; cluster++) {
+ double agg = cluster*inc;
+ result.put(ClusterSpec.Id.from("id" + cluster), new ClusterUtilization(
+ util.getMemory()+ agg,
+ util.getCpu()+ agg,
+ util.getDisk() + agg,
+ util.getDiskBusy() + agg));
+ }
+ return result;
+ }
+
@Test
public void testLegacySerialization() throws IOException {
Application applicationWithSuccessfulJob = applicationSerializer.fromSlime(applicationSlime(false));
@@ -138,17 +188,30 @@ public class ApplicationSerializerTest {
assertFalse(application.deploymentJobs().jobStatus().get(DeploymentJobs.JobType.systemTest).lastCompleted().get().upgrade());
}
+ // TODO: Remove after October 2017
+ @Test
+ public void testLegacySerializationWithZeroProjectId() {
+ Application original = applicationSerializer.fromSlime(applicationSlime(0, false));
+ assertFalse(original.deploymentJobs().projectId().isPresent());
+ Application serialized = applicationSerializer.fromSlime(applicationSerializer.toSlime(original));
+ assertFalse(serialized.deploymentJobs().projectId().isPresent());
+ }
+
private Slime applicationSlime(boolean error) {
- return SlimeUtils.jsonToSlime(applicationJson(error).getBytes(StandardCharsets.UTF_8));
+ return applicationSlime(123, error);
}
- private String applicationJson(boolean error) {
+ private Slime applicationSlime(long projectId, boolean error) {
+ return SlimeUtils.jsonToSlime(applicationJson(projectId, error).getBytes(StandardCharsets.UTF_8));
+ }
+
+ private String applicationJson(long projectId, boolean error) {
return
"{\n" +
" \"id\": \"t1:a1:i1\",\n" +
" \"deploymentSpecField\": \"<deployment version='1.0'/>\",\n" +
" \"deploymentJobs\": {\n" +
- " \"projectId\": 123,\n" +
+ " \"projectId\": " + projectId + ",\n" +
" \"jobStatus\": [\n" +
" {\n" +
" \"jobType\": \"system-test\",\n" +
@@ -166,10 +229,8 @@ public class ApplicationSerializerTest {
" \"at\": 1505725189469\n" +
" }\n" +
" }\n" +
- " ],\n" +
- " \"selfTriggering\": false\n" +
+ " ]\n" +
" }\n" +
"}\n";
}
-
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerControllerTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerControllerTester.java
index ed7378ac6b5..a792626d691 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerControllerTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerControllerTester.java
@@ -23,13 +23,14 @@ import com.yahoo.vespa.hosted.controller.api.identifiers.TenantId;
import com.yahoo.vespa.hosted.controller.api.identifiers.UserId;
import com.yahoo.vespa.hosted.controller.api.integration.athens.Athens;
import com.yahoo.vespa.hosted.controller.api.integration.athens.AthensPrincipal;
-import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
-import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
-import com.yahoo.vespa.hosted.controller.api.integration.athens.mock.AthensMock;
import com.yahoo.vespa.hosted.controller.api.integration.athens.mock.AthensDbMock;
+import com.yahoo.vespa.hosted.controller.api.integration.athens.mock.AthensMock;
import com.yahoo.vespa.hosted.controller.api.integration.athens.mock.ZmsClientFactoryMock;
+import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
import com.yahoo.vespa.hosted.controller.maintenance.JobControl;
import com.yahoo.vespa.hosted.controller.maintenance.Upgrader;
+import com.yahoo.vespa.hosted.controller.persistence.CuratorDb;
import com.yahoo.vespa.hosted.controller.persistence.MockCuratorDb;
import java.io.File;
@@ -51,7 +52,9 @@ public class ContainerControllerTester {
public ContainerControllerTester(JDisc container, String responseFilePath) {
containerTester = new ContainerTester(container, responseFilePath);
controller = (Controller)container.components().getComponent("com.yahoo.vespa.hosted.controller.Controller");
- upgrader = new Upgrader(controller, Duration.ofMinutes(2), new JobControl(new MockCuratorDb()));
+ CuratorDb curatorDb = new MockCuratorDb();
+ curatorDb.writeUpgradesPerMinute(100);
+ upgrader = new Upgrader(controller, Duration.ofDays(1), new JobControl(curatorDb), curatorDb);
}
public Controller controller() { return controller; }
@@ -112,4 +115,8 @@ public class ContainerControllerTester {
containerTester.assertResponse(request, expectedResponse);
}
+ public void assertResponse(Request request, String expectedResponse, int expectedStatusCode) throws IOException {
+ containerTester.assertResponse(request, expectedResponse, expectedStatusCode);
+ }
+
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerTester.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerTester.java
index 4fc6e91039c..b55ee9a195f 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerTester.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ContainerTester.java
@@ -78,8 +78,8 @@ public class ContainerTester {
public void assertResponse(Request request, String expectedResponse, int expectedStatusCode) throws IOException {
Response response = container.handleRequest(request);
- assertEquals("Status code", expectedStatusCode, response.getStatus());
assertEquals(expectedResponse, response.getBodyAsString());
+ assertEquals("Status code", expectedStatusCode, response.getStatus());
}
private Set<String> fieldsToCensor(String fieldNameOrNull, Inspector value, Set<String> fieldsToCensor) {
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java
index 8b2595c6254..fd07428126a 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/ControllerContainerTest.java
@@ -49,8 +49,6 @@ public class ControllerContainerTest {
" <component id='com.yahoo.vespa.hosted.controller.ConfigServerClientMock'/>" +
" <component id='com.yahoo.vespa.hosted.controller.ZoneRegistryMock'/>" +
" <component id='com.yahoo.vespa.hosted.controller.Controller'/>" +
- " <component id='com.yahoo.vespa.hosted.controller.cost.MockInsightBackend'/>" +
- " <component id='com.yahoo.vespa.hosted.controller.cost.CostMock'/>" +
" <component id='com.yahoo.vespa.hosted.controller.integration.MockMetricsService'/>" +
" <component id='com.yahoo.vespa.hosted.controller.maintenance.ControllerMaintenance'/>" +
" <component id='com.yahoo.vespa.hosted.controller.maintenance.JobControl'/>" +
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
index 7d1700270ea..13b1165ccb2 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/ApplicationApiTest.java
@@ -3,25 +3,28 @@ package com.yahoo.vespa.hosted.controller.restapi.application;
import com.yahoo.application.container.handler.Request;
import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.Environment;
+import com.yahoo.vespa.curator.Lock;
+import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.ConfigServerClientMock;
import com.yahoo.vespa.hosted.controller.api.identifiers.AthensDomain;
import com.yahoo.vespa.hosted.controller.api.identifiers.UserId;
import com.yahoo.vespa.hosted.controller.api.integration.athens.Athens;
import com.yahoo.vespa.hosted.controller.api.integration.athens.AthensPrincipal;
+import com.yahoo.vespa.hosted.controller.api.integration.athens.mock.AthensDbMock;
+import com.yahoo.vespa.hosted.controller.api.integration.athens.mock.AthensMock;
+import com.yahoo.vespa.hosted.controller.api.integration.athens.mock.ZmsClientFactoryMock;
import com.yahoo.vespa.hosted.controller.api.integration.configserver.ConfigServerException;
-import com.yahoo.vespa.hosted.controller.api.integration.cost.ApplicationCost;
-import com.yahoo.vespa.hosted.controller.api.integration.cost.ClusterCost;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
+import com.yahoo.vespa.hosted.controller.application.ClusterInfo;
+import com.yahoo.vespa.hosted.controller.application.ClusterUtilization;
+import com.yahoo.vespa.hosted.controller.application.Deployment;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs;
-import com.yahoo.vespa.hosted.controller.cost.MockInsightBackend;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
import com.yahoo.vespa.hosted.controller.restapi.ContainerControllerTester;
import com.yahoo.vespa.hosted.controller.restapi.ContainerTester;
import com.yahoo.vespa.hosted.controller.restapi.ControllerContainerTest;
-import com.yahoo.vespa.hosted.controller.api.integration.athens.mock.AthensMock;
-import com.yahoo.vespa.hosted.controller.api.integration.athens.mock.AthensDbMock;
-import com.yahoo.vespa.hosted.controller.api.integration.athens.mock.ZmsClientFactoryMock;
import org.apache.http.HttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.mime.MultipartEntityBuilder;
@@ -32,7 +35,6 @@ import java.io.File;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.net.URI;
-import java.net.URISyntaxException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.HashMap;
@@ -42,6 +44,7 @@ import java.util.Optional;
/**
* @author bratseth
+ * @author mpolden
*/
public class ApplicationApiTest extends ControllerContainerTest {
@@ -53,8 +56,9 @@ public class ApplicationApiTest extends ControllerContainerTest {
private static final String athensUserDomain = "domain1";
private static final String athensScrewdriverDomain = "screwdriver-domain";
+
@Test
- public void testApplicationApi() throws IOException {
+ public void testApplicationApi() throws Exception {
ContainerControllerTester controllerTester = new ContainerControllerTester(container, responseFiles);
ContainerTester tester = controllerTester.containerTester();
tester.updateSystemVersion();
@@ -104,6 +108,14 @@ public class ApplicationApiTest extends ControllerContainerTest {
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deploying", "6.1.0", Request.Method.POST),
new File("application-deployment.json"));
+ // DELETE (cancel) ongoing change
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deploying", "", Request.Method.DELETE),
+ new File("application-deployment-cancelled.json"));
+
+ // DELETE (cancel) again is a no-op
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deploying", "", Request.Method.DELETE),
+ new File("application-deployment-cancelled-no-op.json"));
+
// POST (deploy) an application to a zone - manual user deployment
HttpEntity entity = createApplicationDeployData(applicationPackage, Optional.empty());
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/dev/region/us-west-1/instance/default/deploy",
@@ -118,28 +130,32 @@ public class ApplicationApiTest extends ControllerContainerTest {
addScrewdriverUserToDomain("screwdriveruser1", "domain1"); // (Necessary but not provided in this API)
+ // Trigger deployment
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/deploying", "6.1.0", Request.Method.POST),
+ new File("application-deployment.json"));
+
// ... systemtest
- tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/test-region/instance/default/",
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default/",
createApplicationDeployData(applicationPackage, Optional.of(screwdriverProjectId)),
Request.Method.POST,
athensScrewdriverDomain, "screwdriveruser1"),
new File("deploy-result.json"));
- tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/test-region/instance/default",
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default",
"",
Request.Method.DELETE),
- "Deactivated tenant/tenant1/application/application1/environment/test/region/test-region/instance/default");
+ "Deactivated tenant/tenant1/application/application1/environment/test/region/us-east-1/instance/default");
controllerTester.notifyJobCompletion(id, screwdriverProjectId, true, DeploymentJobs.JobType.systemTest); // Called through the separate screwdriver/v1 API
// ... staging
- tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/staging-region/instance/default/",
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/default/",
createApplicationDeployData(applicationPackage, Optional.of(screwdriverProjectId)),
Request.Method.POST,
athensScrewdriverDomain, "screwdriveruser1"),
new File("deploy-result.json"));
- tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/staging-region/instance/default",
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/default",
"",
Request.Method.DELETE),
- "Deactivated tenant/tenant1/application/application1/environment/staging/region/staging-region/instance/default");
+ "Deactivated tenant/tenant1/application/application1/environment/staging/region/us-east-3/instance/default");
controllerTester.notifyJobCompletion(id, screwdriverProjectId, true, DeploymentJobs.JobType.stagingTest);
// ... prod zone
@@ -157,7 +173,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", "", Request.Method.GET),
new File("application.json"));
// GET an application deployment
- addMockObservedApplicationCost("tenant1", "application1", "default");
+ addMockObservedApplicationCost(controllerTester);
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default", "", Request.Method.GET),
new File("deployment.json"));
// POST a 'restart application' command
@@ -266,10 +282,110 @@ public class ApplicationApiTest extends ControllerContainerTest {
"{\"message\":\"Successfully copied environment hosted-verified-prod to hosted-instance_tenant1_application1_placeholder_component_default\"}");
tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/promote", "", Request.Method.POST),
"{\"message\":\"Successfully copied environment hosted-instance_tenant1_application1_placeholder_component_default to hosted-instance_tenant1_application1_us-west-1_prod_default\"}");
+
+ controllerTester.controller().deconstruct();
+ }
+
+ @Test
+ public void testDeployDirectly() throws Exception {
+ // Setup
+ ContainerControllerTester controllerTester = new ContainerControllerTester(container, responseFiles);
+ ContainerTester tester = controllerTester.containerTester();
+ tester.updateSystemVersion();
+ addTenantAthensDomain(athensUserDomain, "mytenant");
+ addScrewdriverUserToDomain("screwdriveruser1", "domain1");
+
+ // Create tenant
+ tester.assertResponse(request("/application/v4/tenant/tenant1",
+ "{\"athensDomain\":\"domain1\", \"property\":\"property1\"}",
+ Request.Method.POST),
+ new File("tenant-without-applications.json"));
+
+ // Create application
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1",
+ "",
+ Request.Method.POST),
+ new File("application-reference.json"));
+
+ // POST (deploy) an application to a prod zone - allowed when project ID is not specified
+ HttpEntity entity = createApplicationDeployData(applicationPackage, Optional.empty());
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default/deploy",
+ entity,
+ Request.Method.POST,
+ athensScrewdriverDomain, "screwdriveruser1"),
+ new File("deploy-result.json"));
+ }
+
+ @Test
+ public void testSortsDeploymentsAndJobs() throws Exception {
+ // Setup
+ ContainerControllerTester controllerTester = new ContainerControllerTester(container, responseFiles);
+ ContainerTester tester = controllerTester.containerTester();
+ tester.updateSystemVersion();
+ addTenantAthensDomain(athensUserDomain, "mytenant");
+ addScrewdriverUserToDomain("screwdriveruser1", "domain1");
+
+ // Create tenant
+ tester.assertResponse(request("/application/v4/tenant/tenant1",
+ "{\"athensDomain\":\"domain1\", \"property\":\"property1\"}",
+ Request.Method.POST),
+ new File("tenant-without-applications.json"));
+
+ // Create application
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1",
+ "",
+ Request.Method.POST),
+ new File("application-reference.json"));
+
+ // Deploy
+ ApplicationPackage applicationPackage = new ApplicationPackageBuilder()
+ .region("us-east-3")
+ .build();
+ ApplicationId id = ApplicationId.from("tenant1", "application1", "default");
+ long projectId = 1;
+ HttpEntity deployData = createApplicationDeployData(applicationPackage, Optional.of(projectId));
+
+ startAndTestChange(controllerTester, id, projectId, deployData);
+
+ // us-east-3
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east-3/instance/default/deploy",
+ deployData,
+ Request.Method.POST,
+ athensScrewdriverDomain, "screwdriveruser1"),
+ new File("deploy-result.json"));
+ controllerTester.notifyJobCompletion(id, projectId, true, DeploymentJobs.JobType.productionUsEast3);
+
+ // New zone is added before us-east-3
+ applicationPackage = new ApplicationPackageBuilder()
+ // These decides the ordering of deploymentJobs and instances in the response
+ .region("us-west-1")
+ .region("us-east-3")
+ .build();
+ deployData = createApplicationDeployData(applicationPackage, Optional.of(projectId));
+ startAndTestChange(controllerTester, id, projectId, deployData);
+
+ // us-west-1
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default/deploy",
+ deployData,
+ Request.Method.POST,
+ athensScrewdriverDomain, "screwdriveruser1"),
+ new File("deploy-result.json"));
+ controllerTester.notifyJobCompletion(id, projectId, true, DeploymentJobs.JobType.productionUsWest1);
+
+ // us-east-3
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east-3/instance/default/deploy",
+ deployData,
+ Request.Method.POST,
+ athensScrewdriverDomain, "screwdriveruser1"),
+ new File("deploy-result.json"));
+ controllerTester.notifyJobCompletion(id, projectId, true, DeploymentJobs.JobType.productionUsEast3);
+
+ tester.assertResponse(request("/application/v4/tenant/tenant1/application/application1", "", Request.Method.GET),
+ new File("application-without-change-multiple-deployments.json"));
}
@Test
- public void testErrorResponses() throws IOException, URISyntaxException {
+ public void testErrorResponses() throws Exception {
ContainerTester tester = new ContainerTester(container, responseFiles);
tester.updateSystemVersion();
addTenantAthensDomain("domain1", "mytenant");
@@ -385,7 +501,7 @@ public class ApplicationApiTest extends ControllerContainerTest {
}
@Test
- public void testAuthorization() throws IOException, URISyntaxException {
+ public void testAuthorization() throws Exception {
ContainerTester tester = new ContainerTester(container, responseFiles);
String authorizedUser = "mytenant";
String unauthorizedUser = "othertenant";
@@ -579,27 +695,60 @@ public class ApplicationApiTest extends ControllerContainerTest {
AthensDbMock.Domain domain = mock.getSetup().domains.get(new AthensDomain(domainName));
domain.admin(new AthensPrincipal(new AthensDomain(athensScrewdriverDomain), new UserId(screwdriverUserId)));
}
-
- private void addMockObservedApplicationCost(String tenant, String application, String instance) {
- MockInsightBackend mock = (MockInsightBackend) container.components().getComponent("com.yahoo.vespa.hosted.controller.cost.MockInsightBackend");
-
- ClusterCost cost = new ClusterCost();
- cost.setCount(2);
- cost.setResource("cpu");
- cost.setUtilization(1.0f);
- cost.setTco(25);
- cost.setFlavor("flavor1");
- cost.setWaste(10);
- cost.setType("content");
- List<String> hostnames = new ArrayList<>();
- hostnames.add("host1");
- hostnames.add("host2");
- cost.setHostnames(hostnames);
- Map<String, ClusterCost> clusterCosts = new HashMap<>();
- clusterCosts.put("cluster1", cost);
-
- mock.setApplicationCost(new ApplicationId.Builder().tenant(tenant).applicationName(application).instanceName(instance).build(),
- new ApplicationCost("prod.us-west-1", tenant, application + "." + instance, 37, 1.0f, 0.0f, clusterCosts));
+
+ private void startAndTestChange(ContainerControllerTester controllerTester, ApplicationId application, long projectId,
+ HttpEntity deployData) throws IOException {
+ ContainerTester tester = controllerTester.containerTester();
+
+ // Trigger application change
+ controllerTester.notifyJobCompletion(application, projectId, true, DeploymentJobs.JobType.component);
+
+ // system-test
+ String testPath = String.format("/application/v4/tenant/%s/application/%s/environment/test/region/us-east-1/instance/default",
+ application.tenant().value(), application.application().value());
+ tester.assertResponse(request(testPath,
+ deployData,
+ Request.Method.POST,
+ athensScrewdriverDomain, "screwdriveruser1"),
+ new File("deploy-result.json"));
+ tester.assertResponse(request(testPath,
+ "",
+ Request.Method.DELETE),
+ "Deactivated " + testPath.replaceFirst("/application/v4/", ""));
+ controllerTester.notifyJobCompletion(application, projectId, true, DeploymentJobs.JobType.systemTest);
+
+ // staging
+ String stagingPath = String.format("/application/v4/tenant/%s/application/%s/environment/staging/region/us-east-3/instance/default",
+ application.tenant().value(), application.application().value());
+ tester.assertResponse(request(stagingPath,
+ deployData,
+ Request.Method.POST,
+ athensScrewdriverDomain, "screwdriveruser1"),
+ new File("deploy-result.json"));
+ tester.assertResponse(request(stagingPath,
+ "",
+ Request.Method.DELETE),
+ "Deactivated " + stagingPath.replaceFirst("/application/v4/", ""));
+ controllerTester.notifyJobCompletion(application, projectId, true, DeploymentJobs.JobType.stagingTest);
+ }
+
+ private void addMockObservedApplicationCost(ContainerControllerTester controllerTester) {
+ for (Application application : controllerTester.controller().applications().asList()) {
+ try (Lock lock = controllerTester.controller().applications().lock(application.id())) {
+ for (Deployment deployment : application.deployments().values()) {
+ Map<ClusterSpec.Id, ClusterInfo> clusterInfo = new HashMap<>();
+ List<String> hostnames = new ArrayList<>();
+ hostnames.add("host1");
+ hostnames.add("host2");
+ clusterInfo.put(ClusterSpec.Id.from("cluster1"), new ClusterInfo("flavor1", 37, ClusterSpec.Type.content, hostnames));
+ Map<ClusterSpec.Id, ClusterUtilization> clusterUtils = new HashMap<>();
+ clusterUtils.put(ClusterSpec.Id.from("cluster1"), new ClusterUtilization(0.3, 0.6, 0.4, 0.3));
+ deployment = deployment.withClusterInfo(clusterInfo);
+ deployment = deployment.withClusterUtils(clusterUtils);
+ application = application.with(deployment);
+ controllerTester.controller().applications().store(application, lock);
+ }
+ }
+ }
}
-
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-deployment-cancelled-no-op.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-deployment-cancelled-no-op.json
new file mode 100644
index 00000000000..91d3e64d6db
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-deployment-cancelled-no-op.json
@@ -0,0 +1 @@
+{"message":"No deployment in progress for application 'tenant1.application1' at this time"}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-deployment-cancelled.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-deployment-cancelled.json
new file mode 100644
index 00000000000..d1e1ebe94fd
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-deployment-cancelled.json
@@ -0,0 +1 @@
+{"message":"Cancelled version change to 6.1 for application 'tenant1.application1'"}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-without-change-multiple-deployments.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-without-change-multiple-deployments.json
new file mode 100644
index 00000000000..a82bdaa454a
--- /dev/null
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application-without-change-multiple-deployments.json
@@ -0,0 +1,204 @@
+{
+ "deploymentJobs": [
+ {
+ "type": "component",
+ "success": true,
+ "lastCompleted": {
+ "version": "(ignore)",
+ "at": "(ignore)"
+ },
+ "lastSuccess": {
+ "version": "(ignore)",
+ "at": "(ignore)"
+ }
+ },
+ {
+ "type": "system-test",
+ "success": true,
+ "lastTriggered": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
+ "at": "(ignore)"
+ },
+ "lastCompleted": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
+ "at": "(ignore)"
+ },
+ "lastSuccess": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
+ "at": "(ignore)"
+ }
+ },
+ {
+ "type": "staging-test",
+ "success": true,
+ "lastTriggered": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
+ "at": "(ignore)"
+ },
+ "lastCompleted": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
+ "at": "(ignore)"
+ },
+ "lastSuccess": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
+ "at": "(ignore)"
+ }
+ },
+ {
+ "type": "production-us-west-1",
+ "success": true,
+ "lastTriggered": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
+ "at": "(ignore)"
+ },
+ "lastCompleted": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
+ "at": "(ignore)"
+ },
+ "lastSuccess": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
+ "at": "(ignore)"
+ }
+ },
+ {
+ "type": "production-us-east-3",
+ "success": true,
+ "lastTriggered": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
+ "at": "(ignore)"
+ },
+ "lastCompleted": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
+ "at": "(ignore)"
+ },
+ "lastSuccess": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
+ "at": "(ignore)"
+ }
+ }
+ ],
+ "compileVersion": "(ignore)",
+ "globalRotations": [
+ "http://fake-global-rotation-tenant1.application1"
+ ],
+ "instances": [
+ {
+ "environment": "prod",
+ "region": "us-west-1",
+ "instance": "default",
+ "bcpStatus": {
+ "rotationStatus": "IN"
+ },
+ "url": "http://localhost:8080/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-west-1/instance/default"
+ },
+ {
+ "environment": "prod",
+ "region": "us-east-3",
+ "instance": "default",
+ "bcpStatus": {
+ "rotationStatus": "UNKNOWN"
+ },
+ "url": "http://localhost:8080/application/v4/tenant/tenant1/application/application1/environment/prod/region/us-east-3/instance/default"
+ }
+ ],
+ "metrics": {
+ "queryServiceQuality": 0.5,
+ "writeServiceQuality": 0.7
+ }
+}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application.json
index 5df690f5bc7..cc17e76642f 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/application.json
@@ -8,71 +8,119 @@
"success": true,
"lastTriggered": {
"version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
"at": "(ignore)"
},
"lastCompleted": {
"version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
"at": "(ignore)"
},
"lastSuccess": {
"version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
"at": "(ignore)"
}
},
{
- "type":"staging-test",
- "success":true,
- "lastTriggered":{
- "version":"(ignore)",
- "at":"(ignore)"
+ "type": "staging-test",
+ "success": true,
+ "lastTriggered": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
+ "at": "(ignore)"
},
- "lastCompleted":{
- "version":"(ignore)",
- "at":"(ignore)"
+ "lastCompleted": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
+ "at": "(ignore)"
},
- "lastSuccess":{
- "version":"(ignore)",
- "at":"(ignore)"
+ "lastSuccess": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
+ }
+ },
+ "at": "(ignore)"
}
},
{
- "type":"production-corp-us-east-1",
- "success":false,
- "lastTriggered":{
- "version":"(ignore)",
- "revision":{
- "hash":"(ignore)",
- "source":{
- "gitRepository":"repository1",
- "gitBranch":"master",
- "gitCommit":"commit1"
+ "type": "production-corp-us-east-1",
+ "success": false,
+ "lastTriggered": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
}
},
- "at":"(ignore)"
+ "at": "(ignore)"
},
- "lastCompleted":{
- "version":"(ignore)",
- "revision":{
- "hash":"(ignore)",
- "source":{
- "gitRepository":"repository1",
- "gitBranch":"master",
- "gitCommit":"commit1"
+ "lastCompleted": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
}
},
- "at":"(ignore)"
+ "at": "(ignore)"
},
- "firstFailing":{
- "version":"(ignore)",
- "revision":{
- "hash":"(ignore)",
- "source":{
- "gitRepository":"repository1",
- "gitBranch":"master",
- "gitCommit":"commit1"
+ "firstFailing": {
+ "version": "(ignore)",
+ "revision": {
+ "hash": "(ignore)",
+ "source": {
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1"
}
},
- "at":"(ignore)"
+ "at": "(ignore)"
}
}
],
@@ -91,14 +139,14 @@
"environment": "prod",
"region": "corp-us-east-1",
"instance": "default",
- "bcpStatus": {"rotationStatus":"UNKNOWN"},
+ "bcpStatus": {
+ "rotationStatus": "UNKNOWN"
+ },
"url": "http://localhost:8080/application/v4/tenant/tenant1/application/application1/environment/prod/region/corp-us-east-1/instance/default"
}
- ]
- ,
- "metrics":
- {
- "queryServiceQuality":0.5,
- "writeServiceQuality":0.7
- }
+ ],
+ "metrics": {
+ "queryServiceQuality": 0.5,
+ "writeServiceQuality": 0.7
+ }
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deploy-result.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deploy-result.json
index d1ae5253a00..06b48064b94 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deploy-result.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deploy-result.json
@@ -1,6 +1,6 @@
{
"revisionId":"(ignore)",
- "applicationZipSize":412,
+ "applicationZipSize":"(ignore)",
"prepareMessages":[],
"configChangeActions":{
"restart":[],
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment.json
index e47e85f67a4..67fc48d4646 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/application/responses/deployment.json
@@ -1,6 +1,10 @@
{
"serviceUrls": [
- "http://old-endpoint.vespa.yahooapis.com:4080","http://qrs-endpoint.vespa.yahooapis.com:4080","http://feeding-endpoint.vespa.yahooapis.com:4080","http://global-endpoint.vespa.yahooapis.com:4080","http://alias-endpoint.vespa.yahooapis.com:4080"
+ "http://old-endpoint.vespa.yahooapis.com:4080",
+ "http://qrs-endpoint.vespa.yahooapis.com:4080",
+ "http://feeding-endpoint.vespa.yahooapis.com:4080",
+ "http://global-endpoint.vespa.yahooapis.com:4080",
+ "http://alias-endpoint.vespa.yahooapis.com:4080"
],
"nodes": "http://localhost:8080/zone/v2/prod/corp-us-east-1/nodes/v2/node/%3F&recursive=true&application=tenant1.application1.default",
"elkUrl": "http://log.prod.corp-us-east-1.test/#/discover?_g=()&_a=(columns:!(_source),index:'logstash-*',interval:auto,query:(query_string:(analyze_wildcard:!t,query:'HV-tenant:%22tenant1%22%20AND%20HV-application:%22application1%22%20AND%20HV-region:%22corp-us-east-1%22%20AND%20HV-instance:%22default%22%20AND%20HV-environment:%22prod%22')),sort:!('@timestamp',desc))",
@@ -8,37 +12,34 @@
"version": "(ignore)",
"revision": "(ignore)",
"deployTimeEpochMs": "(ignore)",
- "screwdriverId":"123",
- "gitRepository":"repository1",
- "gitBranch":"master",
- "gitCommit":"commit1",
+ "screwdriverId": "123",
+ "gitRepository": "repository1",
+ "gitBranch": "master",
+ "gitCommit": "commit1",
"cost": {
- "zone": "prod.us-west-1",
- "tenant": "tenant1",
- "app": "application1.default",
"tco": 37,
- "utilization": 1.0,
+ "utilization": 5.999999999999999,
"waste": 0.0,
"cluster": {
"cluster1": {
"count": 2,
"resource": "cpu",
- "utilization": 1.0,
- "tco": 25,
+ "utilization": 2.999999999999999,
+ "tco": 37,
"flavor": "flavor1",
- "waste": 10,
+ "waste": 0,
"type": "content",
"util": {
- "cpu": 0.0,
- "mem": 0.0,
- "disk": 0.0,
- "diskBusy": 0.0
+ "cpu": 2.999999999999999,
+ "mem": 0.4285714285714286,
+ "disk": 0.5714285714285715,
+ "diskBusy": 1.0
},
"usage": {
- "cpu": 0.0,
- "mem": 0.0,
- "disk": 0.0,
- "diskBusy": 0.0
+ "cpu": 0.6,
+ "mem": 0.3,
+ "disk": 0.4,
+ "diskBusy": 0.3
},
"hostnames": [
"host1",
@@ -48,10 +49,10 @@
}
},
"metrics": {
- "queriesPerSecond":1.0,
- "writesPerSecond":2.0,
- "documentCount":3.0,
- "queryLatencyMillis":4.0,
- "writeLatencyMillis":5.0
+ "queriesPerSecond": 1.0,
+ "writesPerSecond": 2.0,
+ "documentCount": 3.0,
+ "queryLatencyMillis": 4.0,
+ "writeLatencyMillis": 5.0
}
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiTest.java
index 011bbadb91c..e1c5cdb7742 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/ControllerApiTest.java
@@ -7,7 +7,6 @@ import com.yahoo.vespa.hosted.controller.restapi.ControllerContainerTest;
import org.junit.Test;
import java.io.File;
-import java.io.IOException;
/**
* @author bratseth
@@ -17,7 +16,7 @@ public class ControllerApiTest extends ControllerContainerTest {
private final static String responseFiles = "src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/";
@Test
- public void testControllerApi() throws IOException {
+ public void testControllerApi() throws Exception {
ContainerControllerTester tester = new ContainerControllerTester(container, responseFiles);
tester.assertResponse(new Request("http://localhost:8080/controller/v1/"), new File("root.json"));
@@ -37,4 +36,32 @@ public class ControllerApiTest extends ControllerContainerTest {
"{\"message\":\"Re-activated job 'DeploymentExpirer'\"}");
}
+ @Test
+ public void testUpgraderApi() throws Exception {
+ ContainerControllerTester tester = new ContainerControllerTester(container, responseFiles);
+
+ // Get current configuration
+ tester.assertResponse(new Request("http://localhost:8080/controller/v1/jobs/upgrader"),
+ "{\"upgradesPerMinute\":0.5}",
+ 200);
+
+ // Set invalid configuration
+ tester.assertResponse(new Request("http://localhost:8080/controller/v1/jobs/upgrader",
+ "{\"upgradesPerMinute\":-1}", Request.Method.PATCH),
+ "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Upgrades per minute must be >= 0\"}",
+ 400);
+
+ // Unrecognized fields are ignored
+ tester.assertResponse(new Request("http://localhost:8080/controller/v1/jobs/upgrader",
+ "{\"foo\":bar}", Request.Method.PATCH),
+ "{\"upgradesPerMinute\":0.5}",
+ 200);
+
+ // Set configuration
+ tester.assertResponse(new Request("http://localhost:8080/controller/v1/jobs/upgrader",
+ "{\"upgradesPerMinute\":42}", Request.Method.PATCH),
+ "{\"upgradesPerMinute\":42.0}",
+ 200);
+ }
+
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json
index d8ca5e59b4f..e974c315eb2 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/controller/responses/maintenance.json
@@ -1,31 +1,40 @@
{
- "jobs":[
+ "jobs": [
{
- "name":"DelayedDeployer"
+ "name": "DelayedDeployer"
},
{
- "name":"Upgrader"
+ "name": "ClusterUtilizationMaintainer"
},
{
- "name":"FailureRedeployer"
+ "name":"BlockedChangeDeployer"
},
{
- "name":"DeploymentExpirer"
+ "name": "Upgrader"
},
{
- "name":"MetricsReporter"
+ "name": "FailureRedeployer"
},
{
- "name":"VersionStatusUpdater"
+ "name": "ClusterInfoMaintainer"
},
{
- "name":"DeploymentIssueReporter"
+ "name": "DeploymentExpirer"
},
{
- "name":"OutstandingChangeDeployer"
+ "name": "MetricsReporter"
+ },
+ {
+ "name": "VersionStatusUpdater"
+ },
+ {
+ "name": "DeploymentIssueReporter"
+ },
+ {
+ "name": "OutstandingChangeDeployer"
}
],
- "inactive":[
+ "inactive": [
"DeploymentExpirer"
]
}
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/root-response.json b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/root-response.json
index 90b1b027529..f4a524a5943 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/root-response.json
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/root-response.json
@@ -21,11 +21,6 @@
"wadl":"http://localhost:8080/zone/application.wadl"
},
{
- "name":"cost",
- "url":"http://localhost:8080/cost/v1/",
- "wadl":"http://localhost:8080/cost/application.wadl"
- },
- {
"name":"application",
"url":"http://localhost:8080/application/v4/"
},
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/screwdriver/ScrewdriverApiTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/screwdriver/ScrewdriverApiTest.java
index bdfd0f9794f..fcabaa28652 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/screwdriver/ScrewdriverApiTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/restapi/screwdriver/ScrewdriverApiTest.java
@@ -9,12 +9,14 @@ import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.config.SlimeUtils;
+import com.yahoo.vespa.curator.Lock;
import com.yahoo.vespa.hosted.controller.Application;
import com.yahoo.vespa.hosted.controller.application.ApplicationPackage;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobError;
import com.yahoo.vespa.hosted.controller.application.DeploymentJobs.JobType;
import com.yahoo.vespa.hosted.controller.application.JobStatus;
import com.yahoo.vespa.hosted.controller.deployment.ApplicationPackageBuilder;
+import com.yahoo.vespa.hosted.controller.deployment.BuildSystem;
import com.yahoo.vespa.hosted.controller.restapi.ContainerControllerTester;
import com.yahoo.vespa.hosted.controller.restapi.ControllerContainerTest;
import com.yahoo.vespa.hosted.controller.versions.VersionStatus;
@@ -74,7 +76,7 @@ public class ScrewdriverApiTest extends ControllerContainerTest {
// Notifying about unknown job fails
tester.containerTester().assertResponse(new Request("http://localhost:8080/screwdriver/v1/jobreport",
jsonReport(app.id(), JobType.productionUsEast3, projectId, 1L,
- Optional.empty(), false, true)
+ Optional.empty())
.getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
new File("unexpected-completion.json"), 400);
@@ -138,24 +140,66 @@ public class ScrewdriverApiTest extends ControllerContainerTest {
assertFalse(jobStatus.isSuccess());
assertEquals(JobError.outOfCapacity, jobStatus.jobError().get());
}
+
+ @Test
+ public void testTriggerJobForApplication() throws Exception {
+ ContainerControllerTester tester = new ContainerControllerTester(container, responseFiles);
+ BuildSystem buildSystem = tester.controller().applications().deploymentTrigger().buildSystem();
+ tester.containerTester().updateSystemVersion();
+
+ Application app = tester.createApplication();
+ try (Lock lock = tester.controller().applications().lock(app.id())) {
+ app = app.withProjectId(1);
+ tester.controller().applications().store(app, lock);
+ }
+
+ // Unknown application
+ assertResponse(new Request("http://localhost:8080/screwdriver/v1/trigger/tenant/foo/application/bar",
+ new byte[0], Request.Method.POST),
+ 404, "{\"error-code\":\"NOT_FOUND\",\"message\":\"No such application 'foo.bar'\"}");
+
+ // Invalid job
+ assertResponse(new Request("http://localhost:8080/screwdriver/v1/trigger/tenant/" +
+ app.id().tenant().value() + "/application/" + app.id().application().value(),
+ "invalid".getBytes(StandardCharsets.UTF_8), Request.Method.POST),
+ 400, "{\"error-code\":\"BAD_REQUEST\",\"message\":\"Unknown job id 'invalid'\"}");
+
+ // component is triggered if no job is specified in request body
+ assertResponse(new Request("http://localhost:8080/screwdriver/v1/trigger/tenant/" +
+ app.id().tenant().value() + "/application/" + app.id().application().value(),
+ new byte[0], Request.Method.POST),
+ 200, "{\"message\":\"Triggered component for tenant1.application1\"}");
+
+ assertFalse(buildSystem.jobs().isEmpty());
+ assertEquals(JobType.component.id(), buildSystem.jobs().get(0).jobName());
+ assertEquals(1L, buildSystem.jobs().get(0).projectId());
+ buildSystem.takeJobsToRun();
+
+ // Triggers specific job when given
+ assertResponse(new Request("http://localhost:8080/screwdriver/v1/trigger/tenant/" +
+ app.id().tenant().value() + "/application/" + app.id().application().value(),
+ "staging-test".getBytes(StandardCharsets.UTF_8), Request.Method.POST),
+ 200, "{\"message\":\"Triggered staging-test for tenant1.application1\"}");
+ assertFalse(buildSystem.jobs().isEmpty());
+ assertEquals(JobType.stagingTest.id(), buildSystem.jobs().get(0).jobName());
+ assertEquals(1L, buildSystem.jobs().get(0).projectId());
+ }
private void notifyCompletion(ApplicationId app, long projectId, JobType jobType, Optional<JobError> error) throws IOException {
assertResponse(new Request("http://localhost:8080/screwdriver/v1/jobreport",
- jsonReport(app, jobType, projectId, 1L, error, false, true).getBytes(StandardCharsets.UTF_8),
+ jsonReport(app, jobType, projectId, 1L, error).getBytes(StandardCharsets.UTF_8),
Request.Method.POST),
200, "ok");
}
private static String jsonReport(ApplicationId applicationId, JobType jobType, long projectId, long buildNumber,
- Optional<JobError> jobError, boolean selfTriggering, boolean gitChanges) {
+ Optional<JobError> jobError) {
return
"{\n" +
" \"projectId\" : " + projectId + ",\n" +
" \"jobName\" :\"" + jobType.id() + "\",\n" +
" \"buildNumber\" : " + buildNumber + ",\n" +
jobError.map(message -> " \"jobError\" : \"" + message + "\",\n").orElse("") +
- " \"selfTriggering\": " + selfTriggering + ",\n" +
- " \"gitChanges\" : " + gitChanges + ",\n" +
" \"tenant\" :\"" + applicationId.tenant().value() + "\",\n" +
" \"application\" :\"" + applicationId.application().value() + "\",\n" +
" \"instance\" :\"" + applicationId.instance().value() + "\"\n" +
diff --git a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
index 1de9603bde0..7bbbf8f0499 100644
--- a/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
+++ b/controller-server/src/test/java/com/yahoo/vespa/hosted/controller/versions/VersionStatusTest.java
@@ -46,7 +46,7 @@ public class VersionStatusTest {
public void testSystemVersionIsControllerVersionIfConfigserversAreNewer() {
ControllerTester tester = new ControllerTester();
Version largerThanCurrent = new Version(Vtag.currentVersion.getMajor() + 1);
- tester.configServerClientMock().setDefaultConfigServerVersion(largerThanCurrent);
+ tester.configServer().setDefaultConfigServerVersion(largerThanCurrent);
VersionStatus versionStatus = VersionStatus.compute(tester.controller());
assertEquals(Vtag.currentVersion, versionStatus.systemVersion().get().versionNumber());
}
@@ -55,7 +55,7 @@ public class VersionStatusTest {
public void testSystemVersionIsVersionOfOldestConfigServer() throws URISyntaxException {
ControllerTester tester = new ControllerTester();
Version oldest = new Version(5);
- tester.configServerClientMock().configServerVersions().put(new URI("http://cfg.prod.corp-us-east-1.test"), oldest);
+ tester.configServer().configServerVersions().put(new URI("http://cfg.prod.corp-us-east-1.test"), oldest);
VersionStatus versionStatus = VersionStatus.compute(tester.controller());
assertEquals(oldest, versionStatus.systemVersion().get().versionNumber());
}
@@ -95,7 +95,7 @@ public class VersionStatusTest {
assertEquals("The version of this controller, the default config server version, plus the two versions above exist", 4, versions.size());
VespaVersion v0 = versions.get(2);
- assertEquals(tester.configServerClientMock().getDefaultConfigServerVersion(), v0.versionNumber());
+ assertEquals(tester.configServer().getDefaultConfigServerVersion(), v0.versionNumber());
assertEquals(0, v0.statistics().failing().size());
assertEquals(0, v0.statistics().production().size());
@@ -131,20 +131,20 @@ public class VersionStatusTest {
tester.upgradeSystem(version0);
// Setup applications
- Application canary0 = tester.createAndDeploy("canary0", 0, "canary");
- Application canary1 = tester.createAndDeploy("canary1", 1, "canary");
- Application canary2 = tester.createAndDeploy("canary2", 2, "canary");
- Application default0 = tester.createAndDeploy("default0", 00, "default");
- Application default1 = tester.createAndDeploy("default1", 11, "default");
- Application default2 = tester.createAndDeploy("default2", 22, "default");
- Application default3 = tester.createAndDeploy("default3", 33, "default");
- Application default4 = tester.createAndDeploy("default4", 44, "default");
- Application default5 = tester.createAndDeploy("default5", 55, "default");
- Application default6 = tester.createAndDeploy("default6", 66, "default");
- Application default7 = tester.createAndDeploy("default7", 77, "default");
- Application default8 = tester.createAndDeploy("default8", 88, "default");
- Application default9 = tester.createAndDeploy("default9", 99, "default");
- Application conservative0 = tester.createAndDeploy("conservative1", 000, "conservative");
+ Application canary0 = tester.createAndDeploy("canary0", 1, "canary");
+ Application canary1 = tester.createAndDeploy("canary1", 2, "canary");
+ Application canary2 = tester.createAndDeploy("canary2", 3, "canary");
+ Application default0 = tester.createAndDeploy("default0", 4, "default");
+ Application default1 = tester.createAndDeploy("default1", 5, "default");
+ Application default2 = tester.createAndDeploy("default2", 6, "default");
+ Application default3 = tester.createAndDeploy("default3", 7, "default");
+ Application default4 = tester.createAndDeploy("default4", 8, "default");
+ Application default5 = tester.createAndDeploy("default5", 9, "default");
+ Application default6 = tester.createAndDeploy("default6", 10, "default");
+ Application default7 = tester.createAndDeploy("default7", 11, "default");
+ Application default8 = tester.createAndDeploy("default8", 12, "default");
+ Application default9 = tester.createAndDeploy("default9", 13, "default");
+ Application conservative0 = tester.createAndDeploy("conservative1", 14, "conservative");
// The following applications should not affect confidence calculation:
@@ -242,7 +242,7 @@ public class VersionStatusTest {
ControllerTester tester = new ControllerTester();
ApplicationController applications = tester.controller().applications();
- tester.gitHubClientMock()
+ tester.gitHub()
.mockAny(false)
.knownTag(Vtag.currentVersion.toFullString(), "foo") // controller
.knownTag("6.1.0", "bar"); // config server
diff --git a/defaults/CMakeLists.txt b/defaults/CMakeLists.txt
index 2b7f719d297..c42e5402688 100644
--- a/defaults/CMakeLists.txt
+++ b/defaults/CMakeLists.txt
@@ -6,3 +6,5 @@ vespa_define_module(
APPS
src/apps/printdefault
)
+
+install_fat_java_artifact(defaults)
diff --git a/dist.sh b/dist.sh
index 6396ce35368..c1fd499fdf9 100755
--- a/dist.sh
+++ b/dist.sh
@@ -9,5 +9,5 @@ fi
VERSION="$1"
mkdir -p ~/rpmbuild/{SOURCES,SPECS}
-GZIP=-1 tar -zcf ~/rpmbuild/SOURCES/vespa-$VERSION.tar.gz --transform "flags=r;s,^,vespa-$VERSION/," *
+GZIP=-1 tar -zcf ~/rpmbuild/SOURCES/vespa-$VERSION.tar.gz --exclude target --exclude cmake-build-debug --transform "flags=r;s,^,vespa-$VERSION/," *
sed -e "s,VESPA_VERSION,$VERSION," < dist/vespa.spec > ~/rpmbuild/SPECS/vespa-$VERSION.spec
diff --git a/dist/post_install.sh b/dist/post_install.sh
deleted file mode 100755
index fb0e19370ee..00000000000
--- a/dist/post_install.sh
+++ /dev/null
@@ -1,76 +0,0 @@
-#!/bin/bash
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-if [ $# -ne 1 ]; then
- echo "Usage: $0 <install prefix>"
- exit 1
-fi
-
-declare -r PREFIX="$1"
-declare -r INSTALLPATH="$DESTDIR/$PREFIX"
-
-# Rewrite config def file names
-for path in $INSTALLPATH/var/db/vespa/config_server/serverdb/classes/*.def; do
- dir=$(dirname $path)
- filename=$(basename $path)
- namespace=$(grep '^ *namespace *=' $path | sed 's/ *namespace *= *//')
- if [ "$namespace" ]; then
- case $filename in
- $namespace.*)
- ;;
- *)
- mv $path $dir/$namespace.$filename ;;
- esac
- fi
-done
-
-mkdir -p $INSTALLPATH/conf/configserver/
-mkdir -p $INSTALLPATH/conf/configserver-app/
-mkdir -p $INSTALLPATH/conf/configserver-app/config-models/
-mkdir -p $INSTALLPATH/conf/configserver-app/components/
-mkdir -p $INSTALLPATH/conf/filedistributor/
-mkdir -p $INSTALLPATH/conf/node-admin-app/
-mkdir -p $INSTALLPATH/conf/node-admin-app/components/
-mkdir -p $INSTALLPATH/conf/zookeeper/
-mkdir -p $INSTALLPATH/libexec/jdisc_core/
-mkdir -p $INSTALLPATH/libexec/vespa/modelplugins/
-mkdir -p $INSTALLPATH/libexec/vespa/plugins/qrs/
-mkdir -p $INSTALLPATH/logs/jdisc_core/
-mkdir -p $INSTALLPATH/logs/vespa/
-mkdir -p $INSTALLPATH/logs/vespa/
-mkdir -p $INSTALLPATH/logs/vespa/configserver/
-mkdir -p $INSTALLPATH/logs/vespa/search/
-mkdir -p $INSTALLPATH/logs/vespa/qrs/
-mkdir -p $INSTALLPATH/share/vespa/
-mkdir -p $INSTALLPATH/share/vespa/schema/version/6.x/schema/
-mkdir -p $INSTALLPATH/tmp/vespa/
-mkdir -p $INSTALLPATH/var/db/jdisc/logcontrol/
-mkdir -p $INSTALLPATH/var/db/vespa/
-mkdir -p $INSTALLPATH/var/db/vespa/config_server/serverdb/applications/
-mkdir -p $INSTALLPATH/var/db/vespa/logcontrol/
-mkdir -p $INSTALLPATH/var/jdisc_container/
-mkdir -p $INSTALLPATH/var/jdisc_core/
-mkdir -p $INSTALLPATH/var/run/
-mkdir -p $INSTALLPATH/var/spool/vespa/
-mkdir -p $INSTALLPATH/var/spool/master/inbox/
-mkdir -p $INSTALLPATH/var/vespa/bundlecache/
-mkdir -p $INSTALLPATH/var/vespa/cache/config/
-mkdir -p $INSTALLPATH/var/vespa/cmdlines/
-mkdir -p $INSTALLPATH/var/zookeeper/version-2/
-mkdir -p $INSTALLPATH/sbin
-
-ln -sf $PREFIX/lib/jars/config-model-fat.jar $INSTALLPATH/conf/configserver-app/components/config-model-fat.jar
-ln -sf $PREFIX/lib/jars/configserver-jar-with-dependencies.jar $INSTALLPATH/conf/configserver-app/components/configserver.jar
-ln -sf $PREFIX/lib/jars/orchestrator-jar-with-dependencies.jar $INSTALLPATH/conf/configserver-app/components/orchestrator.jar
-ln -sf $PREFIX/lib/jars/node-repository-jar-with-dependencies.jar $INSTALLPATH/conf/configserver-app/components/node-repository.jar
-ln -sf $PREFIX/lib/jars/zkfacade-jar-with-dependencies.jar $INSTALLPATH/conf/configserver-app/components/zkfacade.jar
-ln -snf $PREFIX/conf/configserver-app/components $INSTALLPATH/lib/jars/config-models
-ln -sf vespa-storaged-bin $INSTALLPATH/sbin/vespa-distributord-bin
-
-# Setup default enviroment
-mkdir -p $INSTALLPATH/conf/vespa
-cat > $INSTALLPATH/conf/vespa/default-env.txt <<EOF
-fallback VESPA_HOME $PREFIX
-override VESPA_USER vespa
-EOF
-
diff --git a/dist/vespa.spec b/dist/vespa.spec
index eae6a0b3817..bf6e49fdf85 100644
--- a/dist/vespa.spec
+++ b/dist/vespa.spec
@@ -16,12 +16,14 @@ URL: http://vespa.ai
Source0: vespa-%{version}.tar.gz
%if 0%{?centos}
-BuildRequires: epel-release
+BuildRequires: epel-release
BuildRequires: centos-release-scl
BuildRequires: devtoolset-6-gcc-c++
BuildRequires: devtoolset-6-libatomic-devel
BuildRequires: devtoolset-6-binutils
+BuildRequires: rh-maven33
%define _devtoolset_enable /opt/rh/devtoolset-6/enable
+%define _rhmaven33_enable /opt/rh/rh-maven33/enable
%endif
%if 0%{?fedora}
BuildRequires: gcc-c++
@@ -36,6 +38,7 @@ BuildRequires: vespa-zookeeper-c-client-devel >= 3.4.9-6
%endif
%if 0%{?fedora}
BuildRequires: cmake >= 3.9.1
+BuildRequires: maven
%if 0%{?fc25}
BuildRequires: llvm-devel >= 3.9.1
BuildRequires: boost-devel >= 1.60
@@ -49,7 +52,6 @@ BuildRequires: zookeeper-devel >= 3.4.9
BuildRequires: lz4-devel
BuildRequires: libzstd-devel
BuildRequires: zlib-devel
-BuildRequires: maven
BuildRequires: libicu-devel
BuildRequires: java-1.8.0-openjdk-devel
BuildRequires: openssl-devel
@@ -90,7 +92,6 @@ Requires: zookeeper >= 3.4.9
%define _extra_link_directory /opt/vespa-libtorrent/lib;/opt/vespa-cppunit/lib
%define _extra_include_directory /opt/vespa-libtorrent/include;/opt/vespa-cppunit/include
%define _vespa_boost_lib_suffix %{nil}
-%define _vespa_cxx_abi_flags -D_GLIBCXX_USE_CXX11_ABI=1
%endif
Requires: java-1.8.0-openjdk
Requires: openssl
@@ -113,8 +114,11 @@ Vespa - The open big data serving engine
%if 0%{?_devtoolset_enable:1}
source %{_devtoolset_enable} || true
%endif
+%if 0%{?_rhmaven33_enable:1}
+source %{_rhmaven33_enable} || true
+%endif
sh bootstrap.sh java
-mvn -nsu -T 2C install -DskipTests -Dmaven.javadoc.skip=true
+mvn -nsu -T 2C install -Dmaven.test.skip=true -Dmaven.javadoc.skip=true
cmake3 -DCMAKE_INSTALL_PREFIX=%{_prefix} \
-DJAVA_HOME=/usr/lib/jvm/java-openjdk \
-DEXTRA_LINK_DIRECTORY="%{_extra_link_directory}" \
@@ -122,7 +126,6 @@ cmake3 -DCMAKE_INSTALL_PREFIX=%{_prefix} \
-DCMAKE_INSTALL_RPATH="%{_prefix}/lib64%{?_extra_link_directory:;%{_extra_link_directory}};/usr/lib/jvm/java-1.8.0/jre/lib/amd64/server" \
%{?_vespa_llvm_version:-DVESPA_LLVM_VERSION="%{_vespa_llvm_version}"} \
%{?_vespa_boost_lib_suffix:-DVESPA_BOOST_LIB_SUFFIX="%{_vespa_boost_lib_suffix}"} \
- %{?_vespa_cxx_abi_flags:-DVESPA_CXX_ABI_FLAGS="%{_vespa_cxx_abi_flags}"} \
.
make %{_smp_mflags}
@@ -149,18 +152,18 @@ chmod +x /etc/profile.d/vespa.sh
exit 0
%post
-%systemd_post vespa-configserver.service
-%systemd_post vespa.service
+%systemd_post vespa-configserver.service
+%systemd_post vespa.service
%preun
%systemd_preun vespa.service
%systemd_preun vespa-configserver.service
%postun
-%systemd_postun_with_restart vespa.service
-%systemd_postun_with_restart vespa-configserver.service
+%systemd_postun_with_restart vespa.service
+%systemd_postun_with_restart vespa-configserver.service
rm -f /etc/profile.d/vespa.sh
-userdel vespa
+userdel vespa
%files
%defattr(-,vespa,vespa,-)
diff --git a/docker-api/CMakeLists.txt b/docker-api/CMakeLists.txt
new file mode 100644
index 00000000000..25957c81e4c
--- /dev/null
+++ b/docker-api/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_config_definition(src/main/resources/configdefinitions/docker.def vespa.hosted.dockerapi.docker.def)
diff --git a/docker-api/pom.xml b/docker-api/pom.xml
index b3f6d1ec12f..fc374a12dd2 100644
--- a/docker-api/pom.xml
+++ b/docker-api/pom.xml
@@ -26,7 +26,7 @@
<dependency>
<groupId>com.github.docker-java</groupId>
<artifactId>docker-java</artifactId>
- <version>3.0.8</version>
+ <version>3.0.13</version>
<exclusions>
<exclusion>
<groupId>org.slf4j</groupId>
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java
index e676a86d9fd..8e8a650d906 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImpl.java
@@ -9,6 +9,7 @@ import com.github.dockerjava.api.model.Ulimit;
import java.net.Inet6Address;
import java.net.InetAddress;
+import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.HashMap;
@@ -19,6 +20,7 @@ import java.util.Optional;
import java.util.Random;
import java.util.Set;
import java.util.stream.Collectors;
+import java.util.stream.IntStream;
class CreateContainerCommandImpl implements Docker.CreateContainerCommand {
private final DockerClient docker;
@@ -153,7 +155,7 @@ class CreateContainerCommandImpl implements Docker.CreateContainerCommand {
networkMode
.filter(mode -> ! mode.toLowerCase().equals("host"))
- .ifPresent(mode -> containerCmd.withMacAddress(generateRandomMACAddress()));
+ .ifPresent(mode -> containerCmd.withMacAddress(generateMACAddress(hostName, ipv4Address, ipv6Address)));
memoryInB.ifPresent(containerCmd::withMemory);
cpuShares.ifPresent(containerCmd::withCpuShares);
@@ -204,18 +206,29 @@ class CreateContainerCommandImpl implements Docker.CreateContainerCommand {
+ dockerImage.asString();
}
- private String generateRandomMACAddress() {
- Random rand = new SecureRandom();
+ /**
+ * Generates a pseudo-random MAC address based on the hostname, IPv4- and IPv6-address.
+ */
+ static String generateMACAddress(String hostname, Optional<String> ipv4Address, Optional<String> ipv6Address) {
+ final String seed = hostname + ipv4Address.orElse("") + ipv6Address.orElse("");
+ Random rand = getPRNG(seed);
byte[] macAddr = new byte[6];
rand.nextBytes(macAddr);
// Set second-last bit (locally administered MAC address), unset last bit (unicast)
macAddr[0] = (byte) ((macAddr[0] | 2) & 254);
- StringBuilder sb = new StringBuilder(18);
- for (byte b : macAddr) {
- sb.append(":").append(String.format("%02x", b));
- }
+ return IntStream.range(0, macAddr.length)
+ .mapToObj(i -> String.format("%02x", macAddr[i]))
+ .collect(Collectors.joining(":"));
+ }
- return sb.substring(1);
+ private static Random getPRNG(String seed) {
+ try {
+ SecureRandom rand = SecureRandom.getInstance("SHA1PRNG");
+ rand.setSeed(seed.getBytes());
+ return rand;
+ } catch (NoSuchAlgorithmException e) {
+ throw new RuntimeException("Failed to get pseudo-random number generator", e);
+ }
}
}
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java
index db10a85bb45..a6f8783a22c 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java
@@ -419,7 +419,7 @@ public class DockerImpl implements Docker {
@Override
public void buildImage(File dockerfile, DockerImage image) {
try {
- dockerClient.buildImageCmd(dockerfile).withTag(image.asString())
+ dockerClient.buildImageCmd(dockerfile).withTags(Collections.singleton(image.asString()))
.exec(new BuildImageResultCallback()).awaitImageId();
} catch (RuntimeException e) {
numberOfDockerDaemonFails.add();
diff --git a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImplTest.java b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImplTest.java
new file mode 100644
index 00000000000..1bc73d981c3
--- /dev/null
+++ b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/CreateContainerCommandImplTest.java
@@ -0,0 +1,29 @@
+package com.yahoo.vespa.hosted.dockerapi;
+
+
+import org.junit.Test;
+
+import java.util.Optional;
+import java.util.stream.Stream;
+
+import static org.junit.Assert.assertEquals;
+
+public class CreateContainerCommandImplTest {
+
+ @Test
+ public void generateMacAddressTest() {
+ String[][] addresses = {
+ {"test123.host.yahoo.com", null, "abcd:1234::1", "ee:ae:a9:de:ad:c2"},
+ {"test123.host.yahoo.com", null, "abcd:1234::2", "fa:81:11:1b:ff:fb"},
+ {"unique.host.yahoo.com", null, "abcd:1234::1", "96:a4:00:77:90:3b"},
+ {"test123.host.yahoo.com", "10.0.0.1", null, "7e:de:b3:7c:9e:96"},
+ {"test123.host.yahoo.com", "10.0.0.1", "abcd:1234::1", "6a:06:af:16:25:95"}};
+
+ Stream.of(addresses).forEach(address -> {
+ String generatedMac = CreateContainerCommandImpl.generateMACAddress(
+ address[0], Optional.ofNullable(address[1]), Optional.ofNullable(address[2]));
+ String expectedMac = address[3];
+ assertEquals(expectedMac, generatedMac);
+ });
+ }
+} \ No newline at end of file
diff --git a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/RunSystemTests.java b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/RunSystemTests.java
index 915b3b53867..9613470a735 100644
--- a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/RunSystemTests.java
+++ b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/RunSystemTests.java
@@ -155,7 +155,7 @@ public class RunSystemTests {
logger.info("Pulling " + vespaBaseImage.asString() + " (This may take a while)");
while (docker.pullImageAsyncIfNeeded(vespaBaseImage)) {
Thread.sleep(5000);
- };
+ }
}
Path systestBuildDirectory = pathToVespaRepoInHost.resolve("docker-api/src/test/resources/systest/");
diff --git a/docker/build-vespa.sh b/docker/build-vespa.sh
index 15a0df6a9e9..d19c1fc8dc4 100755
--- a/docker/build-vespa.sh
+++ b/docker/build-vespa.sh
@@ -11,7 +11,7 @@ DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)
cd $DIR
VESPA_VERSION=$1
-DOCKER_IMAGE="vespaengine/vespa-dev:latest"
+DOCKER_IMAGE="centos:latest"
docker pull ${DOCKER_IMAGE}
docker run --rm -v $(pwd)/..:/vespa --entrypoint /vespa/docker/build/build-vespa-internal.sh "$DOCKER_IMAGE" "$VESPA_VERSION" "$(id -u)" "$(id -g)"
diff --git a/docker/build/build-vespa-internal.sh b/docker/build/build-vespa-internal.sh
index 9da687a2e00..7f448f087c5 100755
--- a/docker/build/build-vespa-internal.sh
+++ b/docker/build/build-vespa-internal.sh
@@ -13,6 +13,12 @@ CALLER_GID=$3
cd /vespa
./dist.sh ${VESPA_VERSION}
+
+yum -y install epel-release
+yum -y install centos-release-scl
+yum-config-manager --add-repo https://copr.fedorainfracloud.org/coprs/g/vespa/vespa/repo/epel-7/group_vespa-vespa-epel-7.repo
+
+yum-builddep -y ~/rpmbuild/SPECS/vespa-${VESPA_VERSION}.spec
rpmbuild -bb ~/rpmbuild/SPECS/vespa-${VESPA_VERSION}.spec
chown ${CALLER_UID}:${CALLER_GID} ~/rpmbuild/RPMS/x86_64/*.rpm
mv ~/rpmbuild/RPMS/x86_64/*.rpm /vespa/docker
diff --git a/docproc/CMakeLists.txt b/docproc/CMakeLists.txt
new file mode 100644
index 00000000000..bacb45df319
--- /dev/null
+++ b/docproc/CMakeLists.txt
@@ -0,0 +1,4 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_config_definition(src/main/resources/configdefinitions/docproc.def config.docproc.docproc.def)
+install_config_definition(src/main/resources/configdefinitions/schemamapping.def config.docproc.schemamapping.def)
+install_config_definition(src/main/resources/configdefinitions/splitter-joiner-document-processor.def config.docproc.splitter-joiner-document-processor.def)
diff --git a/docprocs/CMakeLists.txt b/docprocs/CMakeLists.txt
new file mode 100644
index 00000000000..8786a77cbf8
--- /dev/null
+++ b/docprocs/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(docprocs)
diff --git a/document/CMakeLists.txt b/document/CMakeLists.txt
index ca2ee029c87..72f4ee445fb 100644
--- a/document/CMakeLists.txt
+++ b/document/CMakeLists.txt
@@ -40,3 +40,5 @@ vespa_define_module(
src/tests/struct_anno
src/tests/tensor_fieldvalue
)
+
+install_java_artifact(document)
diff --git a/document/src/vespa/document/annotation/spantree.h b/document/src/vespa/document/annotation/spantree.h
index 2518db32fe3..e2839c4793b 100644
--- a/document/src/vespa/document/annotation/spantree.h
+++ b/document/src/vespa/document/annotation/spantree.h
@@ -2,7 +2,7 @@
#pragma once
-#include <vespa/document/annotation/annotation.h>
+#include "annotation.h"
#include <vector>
#include <cassert>
diff --git a/document/src/vespa/document/bucket/bucketspace.h b/document/src/vespa/document/bucket/bucketspace.h
index 8f93526afa7..c13c81dbd73 100644
--- a/document/src/vespa/document/bucket/bucketspace.h
+++ b/document/src/vespa/document/bucket/bucketspace.h
@@ -32,6 +32,11 @@ public:
return std::hash<Type>()(bs.getId());
}
};
+
+ /*
+ * Temporary placeholder value while wiring in use of BucketSpace in APIs.
+ */
+ static BucketSpace placeHolder() { return BucketSpace(0); }
private:
Type _id;
};
diff --git a/document/src/vespa/document/config/CMakeLists.txt b/document/src/vespa/document/config/CMakeLists.txt
index 32bbce210a5..fc711c5e57a 100644
--- a/document/src/vespa/document/config/CMakeLists.txt
+++ b/document/src/vespa/document/config/CMakeLists.txt
@@ -4,7 +4,5 @@ vespa_add_library(document_documentconfig OBJECT
DEPENDS
)
vespa_generate_config(document_documentconfig documenttypes.def)
-install(FILES documenttypes.def RENAME document.documenttypes.def
- DESTINATION var/db/vespa/config_server/serverdb/classes)
-install(FILES documentmanager.def RENAME document.config.documentmanager.def
- DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(documenttypes.def document.documenttypes.def)
+install_config_definition(documentmanager.def document.config.documentmanager.def)
diff --git a/document/src/vespa/document/fieldvalue/structfieldvalue.cpp b/document/src/vespa/document/fieldvalue/structfieldvalue.cpp
index 0a05ae60600..2526976f69d 100644
--- a/document/src/vespa/document/fieldvalue/structfieldvalue.cpp
+++ b/document/src/vespa/document/fieldvalue/structfieldvalue.cpp
@@ -252,9 +252,6 @@ StructFieldValue::setFieldValue(const Field& field, FieldValue::UP value)
{
int fieldId = field.getId();
std::unique_ptr<ByteBuffer> serialized(value->serialize());
- if (serialized->getLength() >= 0x4000000) { // Max 64 MB fields.
- throw SerializeException(make_string("Field value for field %i larger than 64 MB", fieldId), VESPA_STRLOC);
- }
serialized->flip();
if (_chunks.empty()) {
_chunks.push_back(SerializableArray::UP(new SerializableArray()));
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/VisitorDataHandler.java b/documentapi/src/main/java/com/yahoo/documentapi/VisitorDataHandler.java
index 782a788b9c5..fc27af2c306 100644
--- a/documentapi/src/main/java/com/yahoo/documentapi/VisitorDataHandler.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/VisitorDataHandler.java
@@ -20,9 +20,10 @@ import java.util.List;
* <p>
* Use a data handler that fits your needs to be able to use visiting easily.
*
- * @author <a href="mailto:humbe@yahoo-inc.com">H&aring;kon Humberset</a>
+ * @author HÃ¥kon Humberset
*/
-public abstract class VisitorDataHandler {
+public abstract class VisitorDataHandler {
+
protected VisitorControlSession session;
/** Creates a new visitor data handler. */
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/VisitorParameters.java b/documentapi/src/main/java/com/yahoo/documentapi/VisitorParameters.java
index 5ba87d971df..01abd6454a9 100644
--- a/documentapi/src/main/java/com/yahoo/documentapi/VisitorParameters.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/VisitorParameters.java
@@ -15,7 +15,7 @@ import java.util.TreeMap;
/**
* Parameters for creating or opening a visitor session
*
- * @author <a href="mailto:humbe@yahoo-inc.com">H&aring;kon Humberset</a>
+ * @author HÃ¥kon Humberset
*/
public class VisitorParameters extends Parameters {
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/SearchColumnPolicy.java b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/SearchColumnPolicy.java
index fc32ab4e71e..aabb6407d14 100644
--- a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/SearchColumnPolicy.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/protocol/SearchColumnPolicy.java
@@ -42,7 +42,7 @@ import java.util.logging.Logger;
* to all recipients receives more than "maxbadparts" out-of-service replies,
* according to (2.a) above.</p>
*
- * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen</a>
+ * @author Simon Thoresen
*/
public class SearchColumnPolicy implements DocumentProtocolRoutingPolicy {
diff --git a/documentapi/src/vespa/documentapi/messagebus/documentprotocol.h b/documentapi/src/vespa/documentapi/messagebus/documentprotocol.h
index b2d1456fd98..c3417d85197 100644
--- a/documentapi/src/vespa/documentapi/messagebus/documentprotocol.h
+++ b/documentapi/src/vespa/documentapi/messagebus/documentprotocol.h
@@ -24,7 +24,7 @@ class SystemState;
class IRoutingPolicyFactory;
class IRoutableFactory;
-class DocumentProtocol : public mbus::IProtocol {
+class DocumentProtocol final : public mbus::IProtocol {
private:
std::unique_ptr<RoutingPolicyRepository> _routingPolicyRepository;
std::unique_ptr<RoutableRepository> _routableRepository;
@@ -264,8 +264,7 @@ public:
* @param buf A byte buffer that contains a serialized routable.
* @return The deserialized routable.
*/
- mbus::Routable::UP deserialize(uint32_t type,
- document::ByteBuffer &buf) const;
+ mbus::Routable::UP deserialize(uint32_t type, document::ByteBuffer &buf) const;
/**
* This is a convenient entry to the {@link #merge(RoutingContext,std::set)} method by way of a routing
@@ -307,7 +306,7 @@ public:
mbus::IRoutingPolicy::UP createPolicy(const mbus::string &name, const mbus::string &param) const override;
mbus::Blob encode(const vespalib::Version &version, const mbus::Routable &routable) const override;
mbus::Routable::UP decode(const vespalib::Version &version, mbus::BlobRef data) const override;
+ bool requireSequencing() const override { return false; }
};
}
-
diff --git a/documentapi/src/vespa/documentapi/messagebus/policies/CMakeLists.txt b/documentapi/src/vespa/documentapi/messagebus/policies/CMakeLists.txt
index 143310d1f67..f1a691bc46d 100644
--- a/documentapi/src/vespa/documentapi/messagebus/policies/CMakeLists.txt
+++ b/documentapi/src/vespa/documentapi/messagebus/policies/CMakeLists.txt
@@ -20,4 +20,4 @@ vespa_add_library(documentapi_documentapipolicies OBJECT
DEPENDS
)
vespa_generate_config(documentapi_documentapipolicies ../../../../main/resources/configdefinitions/documentrouteselectorpolicy.def)
-install(FILES ../../../../main/resources/configdefinitions/documentrouteselectorpolicy.def RENAME documentapi.messagebus.protocol.documentrouteselectorpolicy.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(../../../../main/resources/configdefinitions/documentrouteselectorpolicy.def documentapi.messagebus.protocol.documentrouteselectorpolicy.def)
diff --git a/dummy-persistence/.gitignore b/dummy-persistence/.gitignore
deleted file mode 100644
index 12251442258..00000000000
--- a/dummy-persistence/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/target
-/pom.xml.build
diff --git a/dummy-persistence/OWNERS b/dummy-persistence/OWNERS
deleted file mode 100644
index dbcff24b338..00000000000
--- a/dummy-persistence/OWNERS
+++ /dev/null
@@ -1 +0,0 @@
-vekterli
diff --git a/dummy-persistence/pom.xml b/dummy-persistence/pom.xml
deleted file mode 100644
index 4be954fa88a..00000000000
--- a/dummy-persistence/pom.xml
+++ /dev/null
@@ -1,59 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>parent</artifactId>
- <version>6-SNAPSHOT</version>
- </parent>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>dummy-persistence</artifactId> <!-- Also used as Bundle-SymbolicName -->
- <version>6-SNAPSHOT</version> <!-- Also used as the Bundle-Version -->
- <packaging>container-plugin</packaging>
- <dependencies>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- <scope>test</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>container-dev</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>document</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>persistence</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- </dependencies>
- <build>
- <plugins>
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-compiler-plugin</artifactId>
- <configuration>
- <compilerArgs>
- <arg>-Xlint:all</arg>
- <arg>-Werror</arg>
- </compilerArgs>
- </configuration>
- </plugin>
- <plugin>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>bundle-plugin</artifactId>
- <extensions>true</extensions>
- </plugin>
- </plugins>
- </build>
-</project>
diff --git a/dummy-persistence/src/main/java/com/yahoo/persistence/dummy/BucketContents.java b/dummy-persistence/src/main/java/com/yahoo/persistence/dummy/BucketContents.java
deleted file mode 100644
index 40251405858..00000000000
--- a/dummy-persistence/src/main/java/com/yahoo/persistence/dummy/BucketContents.java
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.dummy;
-
-import com.yahoo.collections.Pair;
-import com.yahoo.document.BucketId;
-import com.yahoo.document.BucketIdFactory;
-import com.yahoo.document.Document;
-import com.yahoo.document.DocumentId;
-import com.yahoo.persistence.spi.BucketInfo;
-import com.yahoo.persistence.spi.DocEntry;
-import com.yahoo.persistence.spi.result.GetResult;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * Class used by DummyPersistence to store its contents.
- */
-public class BucketContents {
- List<DocEntry> entries = new ArrayList<DocEntry>();
-
- BucketInfo.ActiveState active;
-
- public void setActiveState(BucketInfo.ActiveState state) {
- active = state;
- }
-
- public boolean isActive() {
- return active == BucketInfo.ActiveState.ACTIVE;
- }
-
- public BucketInfo getBucketInfo() {
- int count = 0;
- int meta = 0;
- int checksum = 0;
-
- for (DocEntry e : entries) {
- if (e.getType() == DocEntry.Type.PUT_ENTRY) {
- ++count;
- checksum ^= e.getTimestamp();
- }
- ++meta;
- }
-
-
- return new BucketInfo(checksum,
- count,
- meta,
- meta,
- meta,
- BucketInfo.ReadyState.READY,
- active);
- }
-
- public void put(long timestamp, Document doc) {
- for (DocEntry e : entries) {
- if (e.getDocumentId().equals(doc.getId())) {
- if (e.getTimestamp() > timestamp) {
- return;
- }
-
- entries.remove(e);
- break;
- }
- }
-
- entries.add(new DocEntry(timestamp, doc));
- }
-
- public boolean remove(long timestamp, DocumentId docId) {
- DocEntry found = null;
-
- for (DocEntry e : entries) {
- if (
- e.getType() == DocEntry.Type.PUT_ENTRY &&
- e.getDocumentId().equals(docId) &&
- e.getTimestamp() <= timestamp)
- {
- found = e;
- entries.remove(e);
- break;
- }
- }
-
- entries.add(new DocEntry(timestamp, docId));
- return found != null;
- }
-
- public GetResult get(DocumentId id) {
- for (DocEntry e : entries) {
- if (e.getType() == DocEntry.Type.PUT_ENTRY && e.getDocumentId().equals(id)) {
- return new GetResult(e.getDocument(), e.getTimestamp());
- }
- }
-
- return new GetResult();
- }
-
- public Pair<BucketContents, BucketContents> split(BucketId target1, BucketId target2) {
- BucketContents a = new BucketContents();
- BucketContents b = new BucketContents();
-
- for (DocEntry e : entries) {
- BucketId bucketId = new BucketIdFactory().getBucketId(e.getDocumentId());
- if (target1.contains(bucketId)) {
- a.entries.add(e);
- } else {
- b.entries.add(e);
- }
- }
-
- return new Pair<BucketContents, BucketContents>(a, b);
- }
-
- public BucketContents() {}
-
- public BucketContents(BucketContents a, BucketContents b) {
- if (a != null) {
- entries.addAll(a.entries);
- }
- if (b != null) {
- entries.addAll(b.entries);
- }
- }
-
-}
diff --git a/dummy-persistence/src/main/java/com/yahoo/persistence/dummy/DummyPersistenceProvider.java b/dummy-persistence/src/main/java/com/yahoo/persistence/dummy/DummyPersistenceProvider.java
deleted file mode 100644
index 4eee730a28b..00000000000
--- a/dummy-persistence/src/main/java/com/yahoo/persistence/dummy/DummyPersistenceProvider.java
+++ /dev/null
@@ -1,223 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.dummy;
-
-import com.yahoo.collections.Pair;
-import com.yahoo.document.fieldset.FieldSet;
-import com.yahoo.persistence.spi.AbstractPersistenceProvider;
-import com.yahoo.persistence.spi.*;
-import com.yahoo.persistence.spi.conformance.ConformanceTest;
-import com.yahoo.persistence.spi.result.*;
-import java.util.*;
-import com.yahoo.document.*;
-
-/**
- * Simple memory-based implementation of the persistence provider interface.
- * Intended as an example for future implementations.
- */
-public class DummyPersistenceProvider extends AbstractPersistenceProvider
-{
- Map<BucketId, BucketContents> bucketContents = new TreeMap<BucketId, BucketContents>();
- long nextIteratorId = 1;
- Map<Long, IteratorContext> iteratorContexts = new TreeMap<Long, IteratorContext>();
-
- @Override
- public synchronized Result initialize() {
- bucketContents.clear();
- iteratorContexts.clear();
- return new Result();
- }
-
- @Override
- public synchronized BucketIdListResult listBuckets(short partition) {
- return new BucketIdListResult(new ArrayList<BucketId>(bucketContents.keySet()));
- }
-
- @Override
- public synchronized BucketInfoResult getBucketInfo(Bucket bucket) {
- BucketContents contents = bucketContents.get(bucket.getBucketId());
- if (contents == null) {
- return new BucketInfoResult(new BucketInfo());
- }
- return new BucketInfoResult(contents.getBucketInfo());
- }
-
- @Override
- public synchronized Result put(Bucket bucket, long timestamp, Document doc) {
- bucketContents.get(bucket.getBucketId()).put(timestamp, doc);
- return new Result();
- }
-
- @Override
- public synchronized RemoveResult remove(Bucket bucket, long timestamp, DocumentId id) {
- return new RemoveResult(bucketContents.get(bucket.getBucketId()).remove(timestamp, id));
- }
-
- @Override
- public synchronized GetResult get(Bucket bucket, FieldSet fieldSet, DocumentId id) {
- BucketContents contents = bucketContents.get(bucket.getBucketId());
- if (contents == null) {
- return new GetResult();
- }
-
- return contents.get(id);
- }
-
- @Override
- public synchronized CreateIteratorResult createIterator(Bucket bucket, FieldSet fieldSet, Selection selection, PersistenceProvider.IncludedVersions versions) {
- nextIteratorId++;
-
- List<Long> timestamps = new ArrayList<Long>();
- if (selection.getTimestampSubset() == null) {
- for (DocEntry e : bucketContents.get(bucket.getBucketId()).entries) {
- timestamps.add(e.getTimestamp());
- }
- } else {
- timestamps.addAll(selection.getTimestampSubset());
- // Explicitly specifying a timestamp subset implies that any version may
- // be returned. This is essential for merging to work correctly.
- versions = IncludedVersions.ALL_VERSIONS;
- }
-
- iteratorContexts.put(nextIteratorId - 1, new IteratorContext(bucket, fieldSet, selection, timestamps, versions));
- return new CreateIteratorResult(nextIteratorId - 1);
- }
-
- @Override
- public synchronized IterateResult iterate(long iteratorId, long maxByteSize) {
- IteratorContext context = iteratorContexts.get(iteratorId);
-
- if (context == null) {
- return new IterateResult(Result.ErrorType.PERMANENT_ERROR, "Iterator id not found");
- }
-
- ArrayList<DocEntry> entries = new ArrayList<DocEntry>();
- for (DocEntry e : bucketContents.get(context.getBucket().getBucketId()).entries) {
- if (maxByteSize < 0) {
- return new IterateResult(entries, false);
- }
-
- if (context.getTimestamps().contains(e.getTimestamp())) {
- context.getTimestamps().remove(e.getTimestamp());
- } else {
- continue;
- }
-
- if (e.getType() == DocEntry.Type.PUT_ENTRY) {
-
- if (context.getSelection() != null && !context.getSelection().match(e.getDocument(), e.getTimestamp())) {
- continue;
- }
- entries.add(e);
- maxByteSize -= e.getDocument().getSerializedSize();
- } else if (context.getIncludedVersions() == PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_OR_REMOVE
- || context.getIncludedVersions() == PersistenceProvider.IncludedVersions.ALL_VERSIONS)
- {
-
- if (context.getSelection() != null && !context.getSelection().match(e.getTimestamp())) {
- continue;
- }
-
- entries.add(e);
- maxByteSize -= e.getDocumentId().toString().length();
- }
- }
-
- return new IterateResult(entries, true);
- }
-
- @Override
- public synchronized Result destroyIterator(long iteratorId) {
- iteratorContexts.remove(iteratorId);
- return new Result();
- }
-
- @Override
- public synchronized Result createBucket(Bucket bucket) {
- bucketContents.put(bucket.getBucketId(), new BucketContents());
- return new Result();
- }
-
- @Override
- public synchronized Result deleteBucket(Bucket bucket) {
- bucketContents.remove(bucket.getBucketId());
- return new Result();
- }
-
- private void mergeExistingBucketContentsIntoNew(BucketContents newC, BucketContents oldC) {
- if (oldC == null) {
- return;
- }
- Set<Long> newTimestamps = new HashSet<Long>();
- for (DocEntry entry : newC.entries) {
- newTimestamps.add(entry.getTimestamp());
- }
- // Don't overwrite new entries with old ones
- for (DocEntry oldEntry : oldC.entries) {
- if (newTimestamps.contains(oldEntry.getTimestamp())) {
- continue;
- }
- newC.entries.add(oldEntry);
- }
- }
-
- @Override
- public synchronized Result split(Bucket source, Bucket target1, Bucket target2) {
- BucketContents sourceContent = bucketContents.get(source.getBucketId());
- BucketContents existingTarget1 = bucketContents.get(target1.getBucketId());
- BucketContents existingTarget2 = bucketContents.get(target2.getBucketId());
-
- Pair<BucketContents, BucketContents> contents
- = sourceContent.split(target1.getBucketId(), target2.getBucketId());
-
- bucketContents.remove(source.getBucketId());
- mergeExistingBucketContentsIntoNew(contents.getFirst(), existingTarget1);
- mergeExistingBucketContentsIntoNew(contents.getSecond(), existingTarget2);
-
- BucketInfo.ActiveState targetActiveState
- = (sourceContent.getBucketInfo().isActive()
- ? BucketInfo.ActiveState.ACTIVE
- : BucketInfo.ActiveState.NOT_ACTIVE);
- contents.getFirst().setActiveState(targetActiveState);
- contents.getSecond().setActiveState(targetActiveState);
-
- bucketContents.put(target1.getBucketId(), contents.getFirst());
- bucketContents.put(target2.getBucketId(), contents.getSecond());
-
- return new Result();
- }
-
- @Override
- public synchronized Result join(Bucket source1, Bucket source2, Bucket target) {
- BucketInfo.ActiveState activeState = BucketInfo.ActiveState.NOT_ACTIVE;
- BucketContents targetExisting = bucketContents.get(target.getBucketId());
- BucketContents sourceExisting1 = bucketContents.get(source1.getBucketId());
- BucketContents sourceExisting2 = null;
- boolean singleBucketJoin = source2.getBucketId().equals(source1.getBucketId());
- if (!singleBucketJoin) {
- sourceExisting2 = bucketContents.get(source2.getBucketId());
- }
-
- if (sourceExisting1 != null && sourceExisting1.isActive()) {
- activeState = BucketInfo.ActiveState.ACTIVE;
- }
- if (sourceExisting2 != null && sourceExisting2.isActive()) {
- activeState = BucketInfo.ActiveState.ACTIVE;
- }
-
- BucketContents contents = new BucketContents(sourceExisting1, sourceExisting2);
- bucketContents.remove(source1.getBucketId());
- if (sourceExisting2 != null) {
- bucketContents.remove(source2.getBucketId());
- }
- mergeExistingBucketContentsIntoNew(contents, targetExisting);
- contents.setActiveState(activeState);
- bucketContents.put(target.getBucketId(), contents);
- return new Result();
- }
-
- @Override
- public synchronized Result setActiveState(Bucket bucket, BucketInfo.ActiveState active) {
- bucketContents.get(bucket.getBucketId()).setActiveState(active);
- return new Result();
- }
-}
diff --git a/dummy-persistence/src/main/java/com/yahoo/persistence/dummy/DummyPersistenceProviderHandler.java b/dummy-persistence/src/main/java/com/yahoo/persistence/dummy/DummyPersistenceProviderHandler.java
deleted file mode 100644
index 34e7a948b76..00000000000
--- a/dummy-persistence/src/main/java/com/yahoo/persistence/dummy/DummyPersistenceProviderHandler.java
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.dummy;
-
-import com.yahoo.document.DocumentTypeManager;
-import com.yahoo.document.config.DocumentmanagerConfig;
-import com.yahoo.persistence.rpc.PersistenceProviderHandler;
-
-public class DummyPersistenceProviderHandler {
- DummyPersistenceProvider provider;
-
- public DummyPersistenceProviderHandler(PersistenceProviderHandler rpcHandler, DocumentmanagerConfig docManConfig) {
- provider = new DummyPersistenceProvider();
- rpcHandler.initialize(provider, new DocumentTypeManager(docManConfig));
- }
-}
diff --git a/dummy-persistence/src/main/java/com/yahoo/persistence/dummy/IteratorContext.java b/dummy-persistence/src/main/java/com/yahoo/persistence/dummy/IteratorContext.java
deleted file mode 100644
index dd9b734216a..00000000000
--- a/dummy-persistence/src/main/java/com/yahoo/persistence/dummy/IteratorContext.java
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.dummy;
-
-import com.yahoo.document.fieldset.FieldSet;
-import com.yahoo.persistence.spi.Bucket;
-import com.yahoo.persistence.spi.PersistenceProvider;
-import com.yahoo.persistence.spi.Selection;
-
-import java.util.List;
-
-/**
- * Class to represent an ongoing iterator in dummy persistence.
- */
-public class IteratorContext {
- List<Long> timestamps;
-
- public FieldSet getFieldSet() {
- return fieldSet;
- }
-
- private FieldSet fieldSet;
-
- public Bucket getBucket() {
- return bucket;
- }
-
- private Bucket bucket;
-
- public Selection getSelection() {
- return selection;
- }
-
- private Selection selection;
-
- public PersistenceProvider.IncludedVersions getIncludedVersions() {
- return includedVersions;
- }
-
- private PersistenceProvider.IncludedVersions includedVersions;
-
- IteratorContext(Bucket bucket, FieldSet fieldSet, Selection selection,
- List<Long> timestamps,
- PersistenceProvider.IncludedVersions versions) {
- this.fieldSet = fieldSet;
- this.bucket = bucket;
- this.selection = selection;
- this.includedVersions = versions;
- this.timestamps = timestamps;
- }
-
- public List<Long> getTimestamps() { return timestamps; }
-}
diff --git a/dummy-persistence/src/main/java/com/yahoo/persistence/dummy/package-info.java b/dummy-persistence/src/main/java/com/yahoo/persistence/dummy/package-info.java
deleted file mode 100644
index 8abb7de8d67..00000000000
--- a/dummy-persistence/src/main/java/com/yahoo/persistence/dummy/package-info.java
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-@ExportPackage
-@PublicApi
-package com.yahoo.persistence.dummy;
-
-import com.yahoo.api.annotations.PublicApi;
-import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/dummy-persistence/src/test/config/.gitignore b/dummy-persistence/src/test/config/.gitignore
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/dummy-persistence/src/test/config/.gitignore
+++ /dev/null
diff --git a/dummy-persistence/src/test/java/com/yahoo/persistence/dummy/DummyPersistenceTest.java b/dummy-persistence/src/test/java/com/yahoo/persistence/dummy/DummyPersistenceTest.java
deleted file mode 100644
index edd1dc12d8b..00000000000
--- a/dummy-persistence/src/test/java/com/yahoo/persistence/dummy/DummyPersistenceTest.java
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.dummy;
-
-import com.yahoo.document.DocumentTypeManager;
-import com.yahoo.persistence.PersistenceRpcConfig;
-import com.yahoo.persistence.rpc.PersistenceProviderHandler;
-import com.yahoo.persistence.spi.PersistenceProvider;
-import com.yahoo.persistence.spi.conformance.ConformanceTest;
-
-public class DummyPersistenceTest extends ConformanceTest {
-
- class DummyPersistenceFactory implements PersistenceProviderFactory {
-
- @Override
- public PersistenceProvider createProvider(DocumentTypeManager manager) {
- return new DummyPersistenceProvider();
- }
-
- @Override
- public boolean supportsActiveState() {
- return true;
- }
- }
-
- public void testConstruct() {
- DummyPersistenceProviderHandler provider = new DummyPersistenceProviderHandler(
- new PersistenceProviderHandler(new PersistenceRpcConfig(new PersistenceRpcConfig.Builder())), null);
- }
-
- public void testConformance() throws Exception {
- doConformanceTest(new DummyPersistenceFactory());
- }
-}
diff --git a/eval/CMakeLists.txt b/eval/CMakeLists.txt
index 19a614464a1..89e8a72e330 100644
--- a/eval/CMakeLists.txt
+++ b/eval/CMakeLists.txt
@@ -6,6 +6,7 @@ vespa_define_module(
APPS
src/apps/eval_expr
src/apps/make_tensor_binary_format_test_spec
+ src/apps/tensor_conformance
TESTS
src/tests/eval/aggr
diff --git a/eval/src/apps/make_tensor_binary_format_test_spec/make_tensor_binary_format_test_spec.cpp b/eval/src/apps/make_tensor_binary_format_test_spec/make_tensor_binary_format_test_spec.cpp
index 5040ae35ff9..a7695408a85 100644
--- a/eval/src/apps/make_tensor_binary_format_test_spec/make_tensor_binary_format_test_spec.cpp
+++ b/eval/src/apps/make_tensor_binary_format_test_spec/make_tensor_binary_format_test_spec.cpp
@@ -188,7 +188,6 @@ void make_matrix_test(Cursor &test, size_t x_size, size_t y_size) {
//-----------------------------------------------------------------------------
void make_map_test(Cursor &test, const Dict &x_dict_in) {
- TensorSpec spec("tensor(x{})");
nbostream sparse_base = make_sparse();
sparse_base.putInt1_4Bytes(1);
sparse_base.writeSmallString("x");
@@ -200,6 +199,7 @@ void make_map_test(Cursor &test, const Dict &x_dict_in) {
mixed_base.putInt1_4Bytes(x_dict_in.size());
auto x_perm = make_permutations(x_dict_in);
for (const Dict &x_dict: x_perm) {
+ TensorSpec spec("tensor(x{})");
nbostream sparse = sparse_base;
nbostream mixed = mixed_base;
for (vespalib::string x: x_dict) {
@@ -214,13 +214,13 @@ void make_map_test(Cursor &test, const Dict &x_dict_in) {
add_binary(test, {sparse, mixed});
}
if (x_dict_in.empty()) {
+ TensorSpec spec("tensor(x{})");
set_tensor(test, spec);
add_binary(test, {sparse_base, mixed_base});
}
}
void make_mesh_test(Cursor &test, const Dict &x_dict_in, const vespalib::string &y) {
- TensorSpec spec("tensor(x{},y{})");
nbostream sparse_base = make_sparse();
sparse_base.putInt1_4Bytes(2);
sparse_base.writeSmallString("x");
@@ -234,6 +234,7 @@ void make_mesh_test(Cursor &test, const Dict &x_dict_in, const vespalib::string
mixed_base.putInt1_4Bytes(x_dict_in.size() * 1);
auto x_perm = make_permutations(x_dict_in);
for (const Dict &x_dict: x_perm) {
+ TensorSpec spec("tensor(x{},y{})");
nbostream sparse = sparse_base;
nbostream mixed = mixed_base;
for (vespalib::string x: x_dict) {
@@ -250,6 +251,7 @@ void make_mesh_test(Cursor &test, const Dict &x_dict_in, const vespalib::string
add_binary(test, {sparse, mixed});
}
if (x_dict_in.empty()) {
+ TensorSpec spec("tensor(x{},y{})");
set_tensor(test, spec);
add_binary(test, {sparse_base, mixed_base});
}
@@ -264,7 +266,6 @@ void make_vector_map_test(Cursor &test,
auto type_str = vespalib::make_string("tensor(%s{},%s[%zu])",
mapped_name.c_str(), indexed_name.c_str(), indexed_size);
ValueType type = ValueType::from_spec(type_str);
- TensorSpec spec(type.to_spec()); // ensures type string is normalized
nbostream mixed_base = make_mixed();
mixed_base.putInt1_4Bytes(1);
mixed_base.writeSmallString(mapped_name);
@@ -274,6 +275,7 @@ void make_vector_map_test(Cursor &test,
mixed_base.putInt1_4Bytes(mapped_dict.size());
auto mapped_perm = make_permutations(mapped_dict);
for (const Dict &dict: mapped_perm) {
+ TensorSpec spec(type.to_spec()); // ensures type string is normalized
nbostream mixed = mixed_base;
for (vespalib::string label: dict) {
mixed.writeSmallString(label);
@@ -287,6 +289,7 @@ void make_vector_map_test(Cursor &test,
add_binary(test, mixed);
}
if (mapped_dict.empty()) {
+ TensorSpec spec(type.to_spec()); // ensures type string is normalized
set_tensor(test, spec);
add_binary(test, mixed_base);
}
diff --git a/eval/src/apps/tensor_conformance/.gitignore b/eval/src/apps/tensor_conformance/.gitignore
new file mode 100644
index 00000000000..3e87a05826c
--- /dev/null
+++ b/eval/src/apps/tensor_conformance/.gitignore
@@ -0,0 +1 @@
+/vespa-tensor-conformance
diff --git a/persistence/src/vespa/persistence/proxy/CMakeLists.txt b/eval/src/apps/tensor_conformance/CMakeLists.txt
index fdebad2fe49..76ababd9f5e 100644
--- a/persistence/src/vespa/persistence/proxy/CMakeLists.txt
+++ b/eval/src/apps/tensor_conformance/CMakeLists.txt
@@ -1,8 +1,8 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_library(persistence_proxy OBJECT
+vespa_add_executable(vespa-tensor-conformance
SOURCES
- buildid.cpp
- providerproxy.cpp
- providerstub.cpp
+ generate.cpp
+ tensor_conformance.cpp
DEPENDS
+ vespaeval
)
diff --git a/eval/src/apps/tensor_conformance/generate.cpp b/eval/src/apps/tensor_conformance/generate.cpp
new file mode 100644
index 00000000000..45ff6243d81
--- /dev/null
+++ b/eval/src/apps/tensor_conformance/generate.cpp
@@ -0,0 +1,18 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "generate.h"
+
+using TensorSpec = vespalib::eval::TensorSpec;
+
+TensorSpec spec(double value) { return TensorSpec("double").add({}, value); }
+
+void
+Generator::generate(TestBuilder &dst)
+{
+ // smoke tests with expected result
+ dst.add("a+a", {{"a", spec(2.0)}}, spec(4.0));
+ dst.add("a*b", {{"a", spec(2.0)}, {"b", spec(3.0)}}, spec(6.0));
+ dst.add("(a+b)*(a-b)", {{"a", spec(5.0)}, {"b", spec(2.0)}}, spec(21.0));
+ // smoke test without expected result
+ dst.add("(a-b)/(a+b)", {{"a", spec(5.0)}, {"b", spec(2.0)}});
+}
diff --git a/eval/src/apps/tensor_conformance/generate.h b/eval/src/apps/tensor_conformance/generate.h
new file mode 100644
index 00000000000..0f74ce924b3
--- /dev/null
+++ b/eval/src/apps/tensor_conformance/generate.h
@@ -0,0 +1,22 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/eval/eval/tensor_spec.h>
+#include <map>
+
+struct TestBuilder {
+ using TensorSpec = vespalib::eval::TensorSpec;
+ // add test with pre-defined expected result
+ virtual void add(const vespalib::string &expression,
+ const std::map<vespalib::string,TensorSpec> &inputs,
+ const TensorSpec &expect) = 0;
+ // add test with undefined expected result
+ virtual void add(const vespalib::string &expression,
+ const std::map<vespalib::string,TensorSpec> &inputs) = 0;
+ virtual ~TestBuilder() {}
+};
+
+struct Generator {
+ static void generate(TestBuilder &out);
+};
diff --git a/eval/src/apps/tensor_conformance/tensor_conformance.cpp b/eval/src/apps/tensor_conformance/tensor_conformance.cpp
new file mode 100644
index 00000000000..cfe9542ecda
--- /dev/null
+++ b/eval/src/apps/tensor_conformance/tensor_conformance.cpp
@@ -0,0 +1,354 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/vespalib/testkit/test_kit.h>
+#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vespalib/data/slime/json_format.h>
+#include <vespa/vespalib/objects/nbostream.h>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/eval/eval/tensor_spec.h>
+#include <vespa/eval/eval/tensor.h>
+#include <vespa/eval/eval/function.h>
+#include <vespa/eval/eval/interpreted_function.h>
+#include <vespa/eval/eval/tensor_engine.h>
+#include <vespa/eval/eval/simple_tensor_engine.h>
+#include <vespa/eval/tensor/default_tensor_engine.h>
+#include <vespa/eval/eval/value_type.h>
+#include <vespa/eval/eval/value.h>
+#include <unistd.h>
+
+#include "generate.h"
+
+using namespace vespalib;
+using namespace vespalib::eval;
+using namespace vespalib::slime::convenience;
+using slime::JsonFormat;
+using tensor::DefaultTensorEngine;
+
+constexpr size_t CHUNK_SIZE = 16384;
+
+//-----------------------------------------------------------------------------
+
+class StdIn : public Input {
+private:
+ bool _eof = false;
+ SimpleBuffer _input;
+public:
+ ~StdIn() {}
+ Memory obtain() override {
+ if ((_input.get().size == 0) && !_eof) {
+ WritableMemory buf = _input.reserve(CHUNK_SIZE);
+ ssize_t res = read(STDIN_FILENO, buf.data, buf.size);
+ _eof = (res == 0);
+ assert(res >= 0); // fail on stdio read errors
+ _input.commit(res);
+ }
+ return _input.obtain();
+ }
+ Input &evict(size_t bytes) override {
+ _input.evict(bytes);
+ return *this;
+ }
+};
+
+class StdOut : public Output {
+private:
+ SimpleBuffer _output;
+public:
+ ~StdOut() {}
+ WritableMemory reserve(size_t bytes) override {
+ return _output.reserve(bytes);
+ }
+ Output &commit(size_t bytes) override {
+ _output.commit(bytes);
+ Memory buf = _output.obtain();
+ ssize_t res = write(STDOUT_FILENO, buf.data, buf.size);
+ assert(res == ssize_t(buf.size)); // fail on stdout write failures
+ _output.evict(res);
+ return *this;
+ }
+};
+
+void write_compact(const Slime &slime, Output &out) {
+ JsonFormat::encode(slime, out, true);
+ out.reserve(1).data[0] = '\n';
+ out.commit(1);
+}
+
+void write_readable(const Slime &slime, Output &out) {
+ JsonFormat::encode(slime, out, false);
+}
+
+//-----------------------------------------------------------------------------
+
+uint8_t unhex(char c) {
+ if (c >= '0' && c <= '9') {
+ return (c - '0');
+ }
+ if (c >= 'A' && c <= 'F') {
+ return ((c - 'A') + 10);
+ }
+ TEST_ERROR("bad hex char");
+ return 0;
+}
+
+void extract_data_from_string(Memory hex_dump, nbostream &data) {
+ if ((hex_dump.size > 2) && (hex_dump.data[0] == '0') && (hex_dump.data[1] == 'x')) {
+ for (size_t i = 2; i < (hex_dump.size - 1); i += 2) {
+ data << uint8_t((unhex(hex_dump.data[i]) << 4) | unhex(hex_dump.data[i + 1]));
+ }
+ }
+}
+
+nbostream extract_data(const Inspector &value) {
+ nbostream data;
+ if (value.asString().size > 0) {
+ extract_data_from_string(value.asString(), data);
+ } else {
+ Memory buf = value.asData();
+ data.write(buf.data, buf.size);
+ }
+ return data;
+}
+
+//-----------------------------------------------------------------------------
+
+TensorSpec to_spec(const Value &value) {
+ if (value.is_error()) {
+ return TensorSpec("error");
+ } else if (value.is_double()) {
+ return TensorSpec("double").add({}, value.as_double());
+ } else {
+ ASSERT_TRUE(value.is_tensor());
+ auto tensor = value.as_tensor();
+ return tensor->engine().to_spec(*tensor);
+ }
+}
+
+const Value &to_value(const TensorSpec &spec, const TensorEngine &engine, Stash &stash) {
+ if (spec.type() == "error") {
+ return stash.create<ErrorValue>();
+ } else if (spec.type() == "double") {
+ double value = 0.0;
+ for (const auto &cell: spec.cells()) {
+ value += cell.second;
+ }
+ return stash.create<DoubleValue>(value);
+ } else {
+ ASSERT_TRUE(starts_with(spec.type(), "tensor("));
+ return stash.create<TensorValue>(engine.create(spec));
+ }
+}
+
+void insert_value(Cursor &cursor, const vespalib::string &name, const TensorSpec &spec) {
+ Stash stash;
+ nbostream data;
+ const Value &value = to_value(spec, SimpleTensorEngine::ref(), stash);
+ SimpleTensorEngine::ref().encode(value, data, stash);
+ cursor.setData(name, Memory(data.peek(), data.size()));
+}
+
+TensorSpec extract_value(const Inspector &inspector) {
+ Stash stash;
+ nbostream data = extract_data(inspector);
+ return to_spec(SimpleTensorEngine::ref().decode(data, stash));
+}
+
+//-----------------------------------------------------------------------------
+
+TensorSpec eval_expr(const Inspector &test, const TensorEngine &engine) {
+ Stash stash;
+ Function fun = Function::parse(test["expression"].asString().make_string());
+ std::vector<Value::CREF> param_values;
+ std::vector<ValueType> param_types;
+ for (size_t i = 0; i < fun.num_params(); ++i) {
+ param_values.emplace_back(to_value(extract_value(test["inputs"][fun.param_name(i)]), engine, stash));
+ }
+ for (size_t i = 0; i < fun.num_params(); ++i) {
+ param_types.emplace_back(param_values[i].get().type());
+ }
+ NodeTypes types(fun, param_types);
+ InterpretedFunction ifun(engine, fun, types);
+ InterpretedFunction::Context ctx(ifun);
+ InterpretedFunction::SimpleObjectParams params(param_values);
+ return to_spec(ifun.eval(ctx, params));
+}
+
+//-----------------------------------------------------------------------------
+
+std::vector<vespalib::string> extract_fields(const Inspector &object) {
+ struct FieldExtractor : slime::ObjectTraverser {
+ std::vector<vespalib::string> result;
+ void field(const Memory &symbol, const Inspector &) override {
+ result.push_back(symbol.make_string());
+ }
+ } extractor;
+ object.traverse(extractor);
+ return std::move(extractor.result);
+};
+
+//-----------------------------------------------------------------------------
+
+class MyTestBuilder : public TestBuilder {
+private:
+ Output &_out;
+ size_t _num_tests;
+ void make_test(const vespalib::string &expression,
+ const std::map<vespalib::string,TensorSpec> &input_map,
+ const TensorSpec *expect = nullptr)
+ {
+ Slime slime;
+ Cursor &test = slime.setObject();
+ test.setString("expression", expression);
+ Cursor &inputs = test.setObject("inputs");
+ for (const auto &input: input_map) {
+ insert_value(inputs, input.first, input.second);
+ }
+ if (expect != nullptr) {
+ insert_value(test.setObject("result"), "expect", *expect);
+ } else {
+ insert_value(test.setObject("result"), "expect",
+ eval_expr(slime.get(), SimpleTensorEngine::ref()));
+ }
+ write_compact(slime, _out);
+ ++_num_tests;
+ }
+public:
+ MyTestBuilder(Output &out) : _out(out), _num_tests(0) {}
+ void add(const vespalib::string &expression,
+ const std::map<vespalib::string,TensorSpec> &inputs,
+ const TensorSpec &expect) override
+ {
+ make_test(expression, inputs, &expect);
+ }
+ void add(const vespalib::string &expression,
+ const std::map<vespalib::string,TensorSpec> &inputs) override
+ {
+ make_test(expression, inputs);
+ }
+ void make_summary() {
+ Slime slime;
+ Cursor &summary = slime.setObject();
+ summary.setLong("num_tests", _num_tests);
+ write_compact(slime, _out);
+ }
+};
+
+void generate(Output &out) {
+ MyTestBuilder my_test_builder(out);
+ Generator::generate(my_test_builder);
+ my_test_builder.make_summary();
+}
+
+//-----------------------------------------------------------------------------
+
+void for_each_test(Input &in,
+ const std::function<void(Slime&)> &handle_test,
+ const std::function<void(Slime&)> &handle_summary)
+{
+ size_t num_tests = 0;
+ bool got_summary = false;
+ while (in.obtain().size > 0) {
+ Slime slime;
+ if (JsonFormat::decode(in, slime)) {
+ bool is_test = slime.get()["expression"].valid();
+ bool is_summary = slime.get()["num_tests"].valid();
+ ASSERT_TRUE(is_test != is_summary);
+ if (is_test) {
+ ++num_tests;
+ ASSERT_TRUE(!got_summary);
+ handle_test(slime);
+ } else {
+ got_summary = true;
+ ASSERT_EQUAL(slime.get()["num_tests"].asLong(), int64_t(num_tests));
+ handle_summary(slime);
+ }
+ } else {
+ ASSERT_EQUAL(in.obtain().size, 0u);
+ }
+ }
+ ASSERT_TRUE(got_summary);
+}
+
+//-----------------------------------------------------------------------------
+
+void evaluate(Input &in, Output &out) {
+ auto handle_test = [&out](Slime &slime)
+ {
+ insert_value(slime.get()["result"], "prod_cpp",
+ eval_expr(slime.get(), DefaultTensorEngine::ref()));
+ write_compact(slime, out);
+ };
+ auto handle_summary = [&out](Slime &slime)
+ {
+ write_compact(slime, out);
+ };
+ for_each_test(in, handle_test, handle_summary);
+}
+
+//-----------------------------------------------------------------------------
+
+void dump_test(const Inspector &test) {
+ fprintf(stderr, "expression: '%s'\n", test["expression"].asString().make_string().c_str());
+ for (const auto &input: extract_fields(test["inputs"])) {
+ auto value = extract_value(test["inputs"][input]);
+ fprintf(stderr, "input '%s': %s\n", input.c_str(), value.to_string().c_str());
+ }
+}
+
+void verify(Input &in, Output &out) {
+ std::map<vespalib::string,size_t> result_map;
+ auto handle_test = [&out,&result_map](Slime &slime)
+ {
+ TensorSpec reference_result = eval_expr(slime.get(), SimpleTensorEngine::ref());
+ for (const auto &result: extract_fields(slime.get()["result"])) {
+ ++result_map[result];
+ TEST_STATE(make_string("verifying result: '%s'", result.c_str()).c_str());
+ if (!EXPECT_EQUAL(reference_result, extract_value(slime.get()["result"][result]))) {
+ dump_test(slime.get());
+ }
+ }
+ };
+ auto handle_summary = [&out,&result_map](Slime &slime)
+ {
+ Cursor &stats = slime.get().setObject("stats");
+ for (const auto &entry: result_map) {
+ stats.setLong(entry.first, entry.second);
+ }
+ write_readable(slime, out);
+ };
+ for_each_test(in, handle_test, handle_summary);
+}
+
+//-----------------------------------------------------------------------------
+
+int usage(const char *self) {
+ fprintf(stderr, "usage: %s <mode>\n", self);
+ fprintf(stderr, " <mode>: which mode to activate\n");
+ fprintf(stderr, " 'generate': write test cases to stdout\n");
+ fprintf(stderr, " 'evaluate': read test cases from stdin, annotate them with\n");
+ fprintf(stderr, " results from various implementations and write\n");
+ fprintf(stderr, " them to stdout\n");
+ fprintf(stderr, " 'verify': read annotated test cases from stdin and verify\n");
+ fprintf(stderr, " that all results are as expected\n");
+ return 1;
+}
+
+int main(int argc, char **argv) {
+ StdIn std_in;
+ StdOut std_out;
+ if (argc != 2) {
+ return usage(argv[0]);
+ }
+ vespalib::string mode = argv[1];
+ TEST_MASTER.init(make_string("vespa-tensor-conformance-%s", mode.c_str()).c_str());
+ if (mode == "generate") {
+ generate(std_out);
+ } else if (mode == "evaluate") {
+ evaluate(std_in, std_out);
+ } else if (mode == "verify") {
+ verify(std_in, std_out);
+ } else {
+ TEST_ERROR(make_string("unknown mode: %s", mode.c_str()).c_str());
+ }
+ return (TEST_MASTER.fini() ? 0 : 1);
+}
diff --git a/eval/src/apps/tensor_conformance/test_spec.json b/eval/src/apps/tensor_conformance/test_spec.json
new file mode 100644
index 00000000000..c66931c2df8
--- /dev/null
+++ b/eval/src/apps/tensor_conformance/test_spec.json
@@ -0,0 +1,5 @@
+{"expression":"a+a","inputs":{"a":"0x02004000000000000000"},"result":{"expect":"0x02004010000000000000"}}
+{"expression":"a*b","inputs":{"a":"0x02004000000000000000","b":"0x02004008000000000000"},"result":{"expect":"0x02004018000000000000"}}
+{"expression":"(a+b)*(a-b)","inputs":{"a":"0x02004014000000000000","b":"0x02004000000000000000"},"result":{"expect":"0x02004035000000000000"}}
+{"expression":"(a-b)/(a+b)","inputs":{"a":"0x02004014000000000000","b":"0x02004000000000000000"},"result":{"expect":"0x02003FDB6DB6DB6DB6DB"}}
+{"num_tests":4}
diff --git a/eval/src/tests/tensor/tensor_conformance/.gitignore b/eval/src/tests/tensor/tensor_conformance/.gitignore
new file mode 100644
index 00000000000..60177365cf7
--- /dev/null
+++ b/eval/src/tests/tensor/tensor_conformance/.gitignore
@@ -0,0 +1,2 @@
+/binary_test_spec.json
+/conformance_test_spec.json
diff --git a/eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp b/eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
index daf641143e8..ec9fc396f0c 100644
--- a/eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
+++ b/eval/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
@@ -3,20 +3,42 @@
#include <vespa/eval/eval/test/tensor_conformance.h>
#include <vespa/eval/eval/simple_tensor_engine.h>
#include <vespa/eval/tensor/default_tensor_engine.h>
+#include <vespa/vespalib/util/stringfmt.h>
using vespalib::eval::SimpleTensorEngine;
using vespalib::eval::test::TensorConformance;
using vespalib::tensor::DefaultTensorEngine;
+using vespalib::make_string;
-vespalib::string module_path(TEST_PATH("../../../../"));
-
+vespalib::string module_src_path(TEST_PATH("../../../../"));
+vespalib::string module_build_path("../../../../");
TEST("require that reference tensor implementation passes all conformance tests") {
- TEST_DO(TensorConformance::run_tests(module_path, SimpleTensorEngine::ref()));
+ TEST_DO(TensorConformance::run_tests(module_src_path, SimpleTensorEngine::ref()));
}
TEST("require that production tensor implementation passes all conformance tests") {
- TEST_DO(TensorConformance::run_tests(module_path, DefaultTensorEngine::ref()));
+ TEST_DO(TensorConformance::run_tests(module_src_path, DefaultTensorEngine::ref()));
+}
+
+TEST("require that tensor serialization test spec can be generated") {
+ vespalib::string spec = module_src_path + "src/apps/make_tensor_binary_format_test_spec/test_spec.json";
+ vespalib::string binary = module_build_path + "src/apps/make_tensor_binary_format_test_spec/eval_make_tensor_binary_format_test_spec_app";
+ EXPECT_EQUAL(system(make_string("%s > binary_test_spec.json", binary.c_str()).c_str()), 0);
+ EXPECT_EQUAL(system(make_string("diff -u %s binary_test_spec.json", spec.c_str()).c_str()), 0);
+}
+
+TEST("require that cross-language tensor conformance test spec can be generated") {
+ vespalib::string spec = module_src_path + "src/apps/tensor_conformance/test_spec.json";
+ vespalib::string binary = module_build_path + "src/apps/tensor_conformance/vespa-tensor-conformance";
+ EXPECT_EQUAL(system(make_string("%s generate > conformance_test_spec.json", binary.c_str()).c_str()), 0);
+ EXPECT_EQUAL(system(make_string("diff -u %s conformance_test_spec.json", spec.c_str()).c_str()), 0);
+}
+
+TEST("require that cross-language tensor conformance tests pass with production C++ expression evaluation") {
+ vespalib::string spec = module_src_path + "src/apps/tensor_conformance/test_spec.json";
+ vespalib::string binary = module_build_path + "src/apps/tensor_conformance/vespa-tensor-conformance";
+ EXPECT_EQUAL(system(make_string("cat %s | %s evaluate | %s verify", spec.c_str(), binary.c_str(), binary.c_str()).c_str()), 0);
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/eval/src/tests/tensor/tensor_slime_serialization/tensor_slime_serialization_test.cpp b/eval/src/tests/tensor/tensor_slime_serialization/tensor_slime_serialization_test.cpp
index 916b6ad4462..e6e6c7de686 100644
--- a/eval/src/tests/tensor/tensor_slime_serialization/tensor_slime_serialization_test.cpp
+++ b/eval/src/tests/tensor/tensor_slime_serialization/tensor_slime_serialization_test.cpp
@@ -35,7 +35,7 @@ struct Fixture
vespalib::Memory memory_exp(exp);
vespalib::Slime expSlime;
size_t used = vespalib::slime::JsonFormat::decode(memory_exp, expSlime);
- EXPECT_EQUAL(used, memory_exp.size);
+ EXPECT_TRUE(used > 0);
EXPECT_EQUAL(expSlime, *slime);
}
};
@@ -135,7 +135,7 @@ struct DenseFixture
vespalib::Memory memory_exp(exp);
vespalib::Slime expSlime;
size_t used = vespalib::slime::JsonFormat::decode(memory_exp, expSlime);
- EXPECT_EQUAL(used, memory_exp.size);
+ EXPECT_TRUE(used > 0);
EXPECT_EQUAL(expSlime, *slime);
}
};
diff --git a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp
index 4e321083252..bd8bdf8cd11 100644
--- a/eval/src/vespa/eval/eval/test/tensor_conformance.cpp
+++ b/eval/src/vespa/eval/eval/test/tensor_conformance.cpp
@@ -1267,7 +1267,7 @@ struct TestContext {
MappedFileInput file(path);
Slime slime;
EXPECT_TRUE(file.valid());
- EXPECT_EQUAL(JsonFormat::decode(file, slime), file.get().size);
+ EXPECT_TRUE(JsonFormat::decode(file, slime) > 0);
int64_t num_tests = slime.get()["num_tests"].asLong();
Cursor &tests = slime.get()["tests"];
EXPECT_GREATER(num_tests, 0u);
diff --git a/eval/src/vespa/eval/tensor/sparse/sparse_tensor_match.cpp b/eval/src/vespa/eval/tensor/sparse/sparse_tensor_match.cpp
index 8dbb31c032d..35ae6b7544b 100644
--- a/eval/src/vespa/eval/tensor/sparse/sparse_tensor_match.cpp
+++ b/eval/src/vespa/eval/tensor/sparse/sparse_tensor_match.cpp
@@ -112,7 +112,12 @@ SparseTensorMatch::SparseTensorMatch(const TensorImplType &lhs,
{
if ((lhs.type().dimensions().size() == rhs.type().dimensions().size()) &&
(lhs.type().dimensions().size() == _builder.type().dimensions().size())) {
- fastMatch(lhs, rhs);
+ // Ensure that first tensor to fastMatch has fewest cells.
+ if (lhs.cells().size() <= rhs.cells().size()) {
+ fastMatch(lhs, rhs);
+ } else {
+ fastMatch(rhs, lhs);
+ }
} else {
slowMatch(lhs, rhs);
}
diff --git a/fileacquirer/CMakeLists.txt b/fileacquirer/CMakeLists.txt
index 1ae83a6f6c2..7a366aa3882 100644
--- a/fileacquirer/CMakeLists.txt
+++ b/fileacquirer/CMakeLists.txt
@@ -9,3 +9,5 @@ vespa_define_module(
LIBS
src/vespa/fileacquirer
)
+
+install_config_definition(src/main/resources/configdefinitions/filedistributorrpc.def cloud.config.filedistribution.filedistributorrpc.def)
diff --git a/filedistribution/src/vespa/filedistribution/distributor/CMakeLists.txt b/filedistribution/src/vespa/filedistribution/distributor/CMakeLists.txt
index 5345cba1e3f..f85ab85fb39 100644
--- a/filedistribution/src/vespa/filedistribution/distributor/CMakeLists.txt
+++ b/filedistribution/src/vespa/filedistribution/distributor/CMakeLists.txt
@@ -12,4 +12,4 @@ vespa_add_library(filedistribution_distributor STATIC
)
target_compile_options(filedistribution_distributor PRIVATE -DTORRENT_DISABLE_ENCRYPTION -DTORRENT_DISABLE_DHT -DWITH_SHIPPED_GEOIP_H -DBOOST_ASIO_HASH_MAP_BUCKETS=1021 -DBOOST_EXCEPTION_DISABLE -DBOOST_ASIO_ENABLE_CANCELIO -DBOOST_ASIO_DYN_LINK -DTORRENT_LINKING_SHARED)
vespa_generate_config(filedistribution_distributor filedistributor.def)
-install(FILES filedistributor.def RENAME cloud.config.filedistribution.filedistributor.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(filedistributor.def cloud.config.filedistribution.filedistributor.def)
diff --git a/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.cpp b/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.cpp
index 0d11c0c1528..669cc550003 100644
--- a/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.cpp
+++ b/filedistribution/src/vespa/filedistribution/distributor/filedownloadermanager.cpp
@@ -1,8 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include "filedownloadermanager.h"
-#include <iterator>
-#include <sstream>
+#include "filedownloadermanager.h"
#include <thread>
#include <vespa/log/log.h>
diff --git a/filedistribution/src/vespa/filedistribution/manager/filedistributionmanager.cpp b/filedistribution/src/vespa/filedistribution/manager/filedistributionmanager.cpp
index f1c74e4a000..954cce23205 100644
--- a/filedistribution/src/vespa/filedistribution/manager/filedistributionmanager.cpp
+++ b/filedistribution/src/vespa/filedistribution/manager/filedistributionmanager.cpp
@@ -9,8 +9,6 @@
#include <vespa/filedistribution/model/filedistributionmodel.h>
#include <vespa/filedistribution/model/zkfiledbmodel.h>
#include <vespa/filedistribution/model/mockfiledistributionmodel.h>
-#include <vespa/filedistribution/model/zkfacade.h>
-#include <memory>
using namespace filedistribution;
diff --git a/filedistribution/src/vespa/filedistribution/model/CMakeLists.txt b/filedistribution/src/vespa/filedistribution/model/CMakeLists.txt
index 7136aba475b..5b92aa4086d 100644
--- a/filedistribution/src/vespa/filedistribution/model/CMakeLists.txt
+++ b/filedistribution/src/vespa/filedistribution/model/CMakeLists.txt
@@ -17,4 +17,4 @@ vespa_add_library(filedistribution_filedistributionmodel STATIC
vespa_generate_config(filedistribution_filedistributionmodel filereferences.def)
vespa_add_target_external_dependency(filedistribution_filedistributionmodel zookeeper_mt)
-install(FILES filereferences.def RENAME cloud.config.filedistribution.filereferences.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(filereferences.def cloud.config.filedistribution.filereferences.def)
diff --git a/filedistribution/src/vespa/filedistribution/model/deployedfilestodownload.cpp b/filedistribution/src/vespa/filedistribution/model/deployedfilestodownload.cpp
index 9443d256c92..13daecbe5c1 100644
--- a/filedistribution/src/vespa/filedistribution/model/deployedfilestodownload.cpp
+++ b/filedistribution/src/vespa/filedistribution/model/deployedfilestodownload.cpp
@@ -2,8 +2,6 @@
#include "deployedfilestodownload.h"
#include <vespa/filedistribution/common/logfwd.h>
-#include <sstream>
-#include <iterator>
using filedistribution::DeployedFilesToDownload;
using filedistribution::Path;
diff --git a/filedistribution/src/vespa/filedistribution/model/filedbmodel.h b/filedistribution/src/vespa/filedistribution/model/filedbmodel.h
index 9326e822dc7..c556c703b6d 100644
--- a/filedistribution/src/vespa/filedistribution/model/filedbmodel.h
+++ b/filedistribution/src/vespa/filedistribution/model/filedbmodel.h
@@ -1,8 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <string>
-#include <vector>
#include <vespa/filedistribution/common/buffer.h>
#include <vespa/filedistribution/common/exception.h>
diff --git a/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.cpp b/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.cpp
index ad415c11928..9ce31e0dd3a 100644
--- a/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.cpp
+++ b/filedistribution/src/vespa/filedistribution/model/filedistributionmodelimpl.cpp
@@ -4,7 +4,6 @@
#include "zkfiledbmodel.h"
#include "deployedfilestodownload.h"
#include "filedistributionmodelimpl.h"
-#include <vespa/vespalib/util/stringfmt.h>
#include <boost/filesystem.hpp>
#include <zookeeper/zookeeper.h>
diff --git a/filedistribution/src/vespa/filedistribution/model/zkfacade.cpp b/filedistribution/src/vespa/filedistribution/model/zkfacade.cpp
index ffc82caf4b2..e07f0684584 100644
--- a/filedistribution/src/vespa/filedistribution/model/zkfacade.cpp
+++ b/filedistribution/src/vespa/filedistribution/model/zkfacade.cpp
@@ -6,7 +6,6 @@
#include <vespa/defaults.h>
#include <vespa/vespalib/util/sync.h>
#include <vespa/vespalib/text/stringtokenizer.h>
-#include <vespa/vespalib/util/stringfmt.h>
#include <zookeeper/zookeeper.h>
#include <sstream>
#include <thread>
diff --git a/filedistribution/src/vespa/filedistribution/model/zkfiledbmodel.cpp b/filedistribution/src/vespa/filedistribution/model/zkfiledbmodel.cpp
index 92777ef7b18..9931b104010 100644
--- a/filedistribution/src/vespa/filedistribution/model/zkfiledbmodel.cpp
+++ b/filedistribution/src/vespa/filedistribution/model/zkfiledbmodel.cpp
@@ -5,10 +5,7 @@
#include "zkfiledbmodel.h"
#include "deployedfilestodownload.h"
#include <vespa/filedistribution/common/logfwd.h>
-#include <vespa/vespalib/util/stringfmt.h>
#include <sys/file.h>
-#include <ostream>
-#include <algorithm>
namespace fs = boost::filesystem;
diff --git a/functions.cmake b/functions.cmake
index 38ae339cd53..1c3d1ff7a6e 100644
--- a/functions.cmake
+++ b/functions.cmake
@@ -536,3 +536,23 @@ function(__export_include_directories TARGET)
target_include_directories(${TARGET} PUBLIC ${LOCAL_INCLUDE_DIRS})
endif()
endfunction()
+
+function(install_config_definition)
+ if(ARGC GREATER 1)
+ install(FILES ${ARGV0} RENAME ${ARGV1} DESTINATION var/db/vespa/config_server/serverdb/classes)
+ else()
+ install(FILES ${ARGV0} DESTINATION var/db/vespa/config_server/serverdb/classes)
+ endif()
+endfunction()
+
+function(install_java_artifact NAME)
+ install(FILES "target/${NAME}.jar" DESTINATION lib/jars/)
+endfunction()
+
+function(install_java_artifact_dependencies NAME)
+ install(DIRECTORY "target/dependency/" DESTINATION lib/jars FILES_MATCHING PATTERN "*.jar")
+endfunction()
+
+function(install_fat_java_artifact NAME)
+ install(FILES "target/${NAME}-jar-with-dependencies.jar" DESTINATION lib/jars/)
+endfunction()
diff --git a/install_java.cmake b/install_java.cmake
deleted file mode 100644
index e0611306b5f..00000000000
--- a/install_java.cmake
+++ /dev/null
@@ -1,161 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-function(install_java_artifact NAME)
- install(FILES "${NAME}/target/${NAME}.jar" DESTINATION lib/jars/)
-endfunction()
-
-function(install_java_artifact_dependencies NAME)
- install(DIRECTORY "${NAME}/target/dependency/" DESTINATION lib/jars FILES_MATCHING PATTERN "*.jar")
-endfunction()
-
-function(install_fat_java_artifact NAME)
- install(FILES "${NAME}/target/${NAME}-jar-with-dependencies.jar" DESTINATION lib/jars/)
-endfunction()
-
-install_java_artifact(config-model-fat)
-install_java_artifact(document)
-install_java_artifact(jdisc_jetty)
-install_java_artifact_dependencies(jdisc_jetty)
-install_java_artifact_dependencies(vespa_jersey2)
-install_java_artifact(searchlib)
-install_java_artifact(vespajlib)
-
-install_fat_java_artifact(application-preprocessor)
-install_fat_java_artifact(clustercontroller-apps)
-install_fat_java_artifact(clustercontroller-apputil)
-install_fat_java_artifact(clustercontroller-utils)
-install_fat_java_artifact(clustercontroller-core)
-install_fat_java_artifact(component)
-install_fat_java_artifact(config-bundle)
-install_fat_java_artifact(config-model-api)
-install_fat_java_artifact(config-model)
-install_fat_java_artifact(config-provisioning)
-install_fat_java_artifact(config-proxy)
-install_fat_java_artifact(configdefinitions)
-install_fat_java_artifact(configserver)
-install_fat_java_artifact(container-disc)
-install_fat_java_artifact(container-jersey2)
-install_fat_java_artifact(container-search-and-docproc)
-install_fat_java_artifact(defaults)
-install_fat_java_artifact(docprocs)
-install_fat_java_artifact(jdisc_core)
-install_fat_java_artifact(jdisc_http_service)
-install_fat_java_artifact(logserver)
-install_fat_java_artifact(node-repository)
-install_fat_java_artifact(orchestrator)
-install_fat_java_artifact(persistence)
-install_fat_java_artifact(searchlib)
-install_fat_java_artifact(simplemetrics)
-install_fat_java_artifact(standalone-container)
-install_fat_java_artifact(vespa-http-client)
-install_fat_java_artifact(vespaclient-container-plugin)
-install_fat_java_artifact(vespaclient-java)
-install_fat_java_artifact(zkfacade)
-
-vespa_install_script(application-preprocessor/src/main/sh/vespa-preprocess-application bin)
-vespa_install_script(config-proxy/src/main/sh/vespa-config-ctl.sh vespa-config-ctl bin)
-vespa_install_script(config-proxy/src/main/sh/vespa-config-loadtester.sh vespa-config-loadtester bin)
-vespa_install_script(config-proxy/src/main/sh/vespa-config-verification.sh vespa-config-verification bin)
-vespa_install_script(config-model/src/main/perl/vespa-deploy bin)
-vespa_install_script(config-model/src/main/perl/vespa-expand-config.pl bin)
-vespa_install_script(config-model/src/main/perl/vespa-replicate-log-stream bin)
-vespa_install_script(config-model/src/main/sh/vespa-validate-application bin)
-vespa_install_script(container-disc/src/main/sh/vespa-start-container-daemon.sh vespa-start-container-daemon bin)
-vespa_install_script(searchlib/src/main/sh/vespa-gbdt-converter bin)
-vespa_install_script(searchlib/src/main/sh/vespa-treenet-converter bin)
-vespa_install_script(vespaclient-java/src/main/sh/vespa-document-statistics.sh vespa-document-statistics bin)
-vespa_install_script(vespaclient-java/src/main/sh/vespa-stat.sh vespa-stat bin)
-vespa_install_script(vespaclient-java/src/main/sh/vespa-query-profile-dump-tool.sh vespa-query-profile-dump-tool bin)
-vespa_install_script(vespaclient-java/src/main/sh/vespa-summary-benchmark.sh vespa-summary-benchmark bin)
-vespa_install_script(vespaclient-java/src/main/sh/vespa-destination.sh vespa-destination bin)
-vespa_install_script(vespaclient-java/src/main/sh/vespa-feeder.sh vespa-feeder bin)
-vespa_install_script(vespaclient-java/src/main/sh/vespa-get.sh vespa-get bin)
-vespa_install_script(vespaclient-java/src/main/sh/vespa-visit.sh vespa-visit bin)
-vespa_install_script(vespaclient-java/src/main/sh/vespa-visit-target.sh vespa-visit-target bin)
-
-vespa_install_script(logserver/bin/logserver-start.sh vespa-logserver-start bin)
-
-install(DIRECTORY config-model/src/main/resources/schema DESTINATION share/vespa PATTERN ".gitignore" EXCLUDE)
-install(DIRECTORY config-model/src/main/resources/schema DESTINATION share/vespa/schema/version/6.x PATTERN ".gitignore" EXCLUDE)
-
-install(FILES jdisc_core/src/main/perl/vespa-jdisc-logfmt.1 DESTINATION man/man1)
-
-install(FILES
- config-model-fat/src/main/resources/config-models.xml
- node-repository/src/main/config/node-repository.xml
- DESTINATION conf/configserver-app)
-
-install(FILES
- chain/src/main/resources/configdefinitions/chains.def
- config-provisioning/src/main/resources/configdefinitions/flavors.def
- container-accesslogging/src/main/resources/configdefinitions/access-log.def
- container-core/src/main/resources/configdefinitions/application-metadata.def
- container-core/src/main/resources/configdefinitions/container-document.def
- container-core/src/main/resources/configdefinitions/container-http.def
- container-core/src/main/resources/configdefinitions/diagnostics.def
- container-core/src/main/resources/configdefinitions/health-monitor.def
- container-core/src/main/resources/configdefinitions/http-filter.def
- container-core/src/main/resources/configdefinitions/metrics-presentation.def
- container-core/src/main/resources/configdefinitions/mockservice.def
- container-core/src/main/resources/configdefinitions/qr-logging.def
- container-core/src/main/resources/configdefinitions/qr-searchers.def
- container-core/src/main/resources/configdefinitions/qr-templates.def
- container-core/src/main/resources/configdefinitions/qr.def
- container-core/src/main/resources/configdefinitions/servlet-config.def
- container-core/src/main/resources/configdefinitions/threadpool.def
- container-core/src/main/resources/configdefinitions/vip-status.def
- container-di/src/main/resources/configdefinitions/bundles.def
- container-di/src/main/resources/configdefinitions/components.def
- container-di/src/main/resources/configdefinitions/jersey-bundles.def
- container-di/src/main/resources/configdefinitions/jersey-injection.def
- container-disc/src/main/resources/configdefinitions/container.jdisc.config.http-server.def
- container-disc/src/main/resources/configdefinitions/jdisc-bindings.def
- container-disc/src/main/resources/configdefinitions/jersey-connection.def
- container-disc/src/main/resources/configdefinitions/jersey-init.def
- container-disc/src/main/resources/configdefinitions/jersey-web-app-pool.def
- container-disc/src/main/resources/configdefinitions/metric-defaults.def
- container-disc/src/main/resources/configdefinitions/score-board.def
- container-messagebus/src/main/resources/configdefinitions/container-mbus.def
- container-messagebus/src/main/resources/configdefinitions/session.def
- container-search-and-docproc/src/main/resources/configdefinitions/application-userdata.def
- container-search/src/main/resources/configdefinitions/cluster.def
- container-search/src/main/resources/configdefinitions/documentdb-info.def
- container-search/src/main/resources/configdefinitions/emulation.def
- container-search/src/main/resources/configdefinitions/federation.def
- container-search/src/main/resources/configdefinitions/fs4.def
- container-search/src/main/resources/configdefinitions/index-info.def
- container-search/src/main/resources/configdefinitions/keyvalue.def
- container-search/src/main/resources/configdefinitions/legacy-emulation.def
- container-search/src/main/resources/configdefinitions/lowercasing.def
- container-search/src/main/resources/configdefinitions/measure-qps.def
- container-search/src/main/resources/configdefinitions/page-templates.def
- container-search/src/main/resources/configdefinitions/provider.def
- container-search/src/main/resources/configdefinitions/qr-binary-cache-region.def
- container-search/src/main/resources/configdefinitions/qr-binary-cache.def
- container-search/src/main/resources/configdefinitions/qr-monitor.def
- container-search/src/main/resources/configdefinitions/qr-quotetable.def
- container-search/src/main/resources/configdefinitions/qr-start.def
- container-search/src/main/resources/configdefinitions/query-profiles.def
- container-search/src/main/resources/configdefinitions/rate-limiting.def
- container-search/src/main/resources/configdefinitions/resolvers.def
- container-search/src/main/resources/configdefinitions/rewrites.def
- container-search/src/main/resources/configdefinitions/search-nodes.def
- container-search/src/main/resources/configdefinitions/search-with-renderer-handler.def
- container-search/src/main/resources/configdefinitions/searchchain-forward.def
- container-search/src/main/resources/configdefinitions/semantic-rules.def
- container-search/src/main/resources/configdefinitions/strict-contracts.def
- container-search/src/main/resources/configdefinitions/timing-searcher.def
- docproc/src/main/resources/configdefinitions/docproc.def
- docproc/src/main/resources/configdefinitions/schemamapping.def
- docproc/src/main/resources/configdefinitions/splitter-joiner-document-processor.def
- fileacquirer/src/main/resources/configdefinitions/filedistributorrpc.def
- jdisc_http_service/src/main/resources/configdefinitions/jdisc.http.client.http-client.def
- jdisc_http_service/src/main/resources/configdefinitions/jdisc.http.connector.def
- jdisc_http_service/src/main/resources/configdefinitions/jdisc.http.server.def
- jdisc_http_service/src/main/resources/configdefinitions/jdisc.http.servlet-paths.def
- persistence/src/main/resources/configdefinitions/persistence-rpc.def
- simplemetrics/src/main/resources/configdefinitions/manager.def
- statistics/src/main/resources/configdefinitions/statistics.def
- vespaclient-core/src/main/resources/configdefinitions/feeder.def
- vespaclient-core/src/main/resources/configdefinitions/spooler.def
- docker-api/src/main/resources/configdefinitions/docker.def
- DESTINATION var/db/vespa/config_server/serverdb/classes)
diff --git a/jaxrs_client_utils/src/main/java/com/yahoo/vespa/jaxrs/client/RetryingJaxRsStrategy.java b/jaxrs_client_utils/src/main/java/com/yahoo/vespa/jaxrs/client/RetryingJaxRsStrategy.java
index 2e6cbdd466d..a73297780c6 100644
--- a/jaxrs_client_utils/src/main/java/com/yahoo/vespa/jaxrs/client/RetryingJaxRsStrategy.java
+++ b/jaxrs_client_utils/src/main/java/com/yahoo/vespa/jaxrs/client/RetryingJaxRsStrategy.java
@@ -5,6 +5,9 @@ import com.yahoo.vespa.applicationmodel.HostName;
import javax.ws.rs.ProcessingException;
import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.function.Function;
@@ -20,7 +23,7 @@ public class RetryingJaxRsStrategy<T> implements JaxRsStrategy<T> {
private static final Logger logger = Logger.getLogger(RetryingJaxRsStrategy.class.getName());
private static final int NUM_LOOP_ATTEMPTS = 2;
- private final Set<HostName> hostNames;
+ private final List<HostName> hostNames;
private final int port;
private final JaxRsClientFactory jaxRsClientFactory;
private final Class<T> apiClass;
@@ -38,7 +41,8 @@ public class RetryingJaxRsStrategy<T> implements JaxRsStrategy<T> {
Objects.requireNonNull(jaxRsClientFactory, "jaxRsClientFactory argument may not be null");
Objects.requireNonNull(apiClass, "apiClass argument may not be null");
Objects.requireNonNull(pathPrefix, "pathPrefix argument may not be null");
- this.hostNames = hostNames;
+ this.hostNames = new ArrayList<>(hostNames);
+ Collections.shuffle(this.hostNames);
this.port = port;
this.jaxRsClientFactory = jaxRsClientFactory;
this.apiClass = apiClass;
diff --git a/jdisc_core/CMakeLists.txt b/jdisc_core/CMakeLists.txt
new file mode 100644
index 00000000000..b56d6ce1e5b
--- /dev/null
+++ b/jdisc_core/CMakeLists.txt
@@ -0,0 +1,4 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(jdisc_core)
+
+install(FILES src/main/perl/vespa-jdisc-logfmt.1 DESTINATION man/man1)
diff --git a/jdisc_http_service/CMakeLists.txt b/jdisc_http_service/CMakeLists.txt
new file mode 100644
index 00000000000..8ac0b5e80fb
--- /dev/null
+++ b/jdisc_http_service/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(jdisc_http_service)
+
+install_config_definition(src/main/resources/configdefinitions/jdisc.http.client.http-client.def)
+install_config_definition(src/main/resources/configdefinitions/jdisc.http.connector.def)
+install_config_definition(src/main/resources/configdefinitions/jdisc.http.server.def)
+install_config_definition(src/main/resources/configdefinitions/jdisc.http.servlet-paths.def)
+
diff --git a/jdisc_http_service/pom.xml b/jdisc_http_service/pom.xml
index 9da06709533..d8c0b0bc29c 100644
--- a/jdisc_http_service/pom.xml
+++ b/jdisc_http_service/pom.xml
@@ -16,6 +16,16 @@
<name>${project.artifactId}</name>
<dependencies>
<dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcpkix-jdk15on</artifactId>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk15on</artifactId>
+ <scope>compile</scope>
+ </dependency>
+ <dependency>
<groupId>com.google.inject</groupId>
<artifactId>guice</artifactId>
<scope>provided</scope>
diff --git a/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java
index 166194de505..a46d35f8e70 100644
--- a/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java
+++ b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/filter/DiscFilterRequest.java
@@ -36,6 +36,7 @@ public abstract class DiscFilterRequest {
protected static final String HTTPS_PREFIX = "https";
protected static final int DEFAULT_HTTP_PORT = 80;
protected static final int DEFAULT_HTTPS_PORT = 443;
+ private static final String JDISC_REQUEST_PRINCIPAL = "jdisc.request.principal";
private final ServletOrJdiscHttpRequest parent;
protected final InetSocketAddress localAddress;
@@ -330,7 +331,7 @@ public abstract class DiscFilterRequest {
}
public Principal getUserPrincipal() {
- return userPrincipal;
+ return (Principal) getAttribute(JDISC_REQUEST_PRINCIPAL);
}
public boolean isSecure() {
@@ -375,7 +376,7 @@ public abstract class DiscFilterRequest {
}
public void setUserPrincipal(Principal principal) {
- this.userPrincipal = principal;
+ setAttribute(JDISC_REQUEST_PRINCIPAL, principal);
}
public void setUserRoles(String[] roles) {
diff --git a/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java
index bcc48ed56ae..96180f48229 100644
--- a/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java
+++ b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactory.java
@@ -10,14 +10,14 @@ import com.yahoo.jdisc.http.ConnectorConfig.Ssl.PemKeyStore;
import com.yahoo.jdisc.http.SecretStore;
import com.yahoo.jdisc.http.ssl.ReaderForPath;
import com.yahoo.jdisc.http.ssl.SslKeyStore;
-import com.yahoo.jdisc.http.ssl.SslKeyStoreFactory;
+import com.yahoo.jdisc.http.ssl.pem.PemSslKeyStore;
import org.eclipse.jetty.http.HttpVersion;
import org.eclipse.jetty.server.ConnectionFactory;
-import org.eclipse.jetty.server.ServerConnectionStatistics;
import org.eclipse.jetty.server.HttpConfiguration;
import org.eclipse.jetty.server.HttpConnectionFactory;
import org.eclipse.jetty.server.SecureRequestCustomizer;
import org.eclipse.jetty.server.Server;
+import org.eclipse.jetty.server.ServerConnectionStatistics;
import org.eclipse.jetty.server.ServerConnector;
import org.eclipse.jetty.server.SslConnectionFactory;
import org.eclipse.jetty.util.ssl.SslContextFactory;
@@ -49,20 +49,17 @@ import static com.yahoo.jdisc.http.ConnectorConfig.Ssl.KeyStoreType.Enum.PEM;
import static com.yahoo.jdisc.http.server.jetty.Exceptions.throwUnchecked;
/**
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
- * @since 5.21.0
+ * @author Einar M R Rosenvinge
*/
public class ConnectorFactory {
private final static Logger log = Logger.getLogger(ConnectorFactory.class.getName());
private final ConnectorConfig connectorConfig;
- private final SslKeyStoreFactory sslKeyStoreFactory;
private final SecretStore secretStore;
@Inject
- public ConnectorFactory(ConnectorConfig connectorConfig, SslKeyStoreFactory sslKeyStoreFactory, SecretStore secretStore) {
+ public ConnectorFactory(ConnectorConfig connectorConfig, SecretStore secretStore) {
this.connectorConfig = connectorConfig;
- this.sslKeyStoreFactory = sslKeyStoreFactory;
this.secretStore = secretStore;
if (connectorConfig.ssl().enabled())
@@ -74,15 +71,12 @@ public class ConnectorFactory {
ConnectorConfig.Ssl ssl = config.ssl();
if (ssl.keyStoreType() == JKS) {
- if (! ssl.pemKeyStore().keyPath().isEmpty()
- || ! ssl.pemKeyStore().certificatePath().isEmpty())
- throw new IllegalArgumentException(
- "Setting pemKeyStore attributes does not make sense when keyStoreType==JKS.");
+ if (! ssl.pemKeyStore().keyPath().isEmpty() || ! ssl.pemKeyStore().certificatePath().isEmpty())
+ throw new IllegalArgumentException("pemKeyStore attributes can not be set when keyStoreType is JKS.");
}
if (ssl.keyStoreType() == PEM) {
if (! ssl.keyStorePath().isEmpty())
- throw new IllegalArgumentException(
- "Setting keyStorePath does not make sense when keyStoreType==PEM");
+ throw new IllegalArgumentException("keyStorePath can not be set when keyStoreType is PEM");
}
}
@@ -91,7 +85,7 @@ public class ConnectorFactory {
}
public ServerConnector createConnector(final Metric metric, final Server server, final ServerSocketChannel ch, Map<Path, FileChannel> keyStoreChannels) {
- final ServerConnector connector;
+ ServerConnector connector;
if (connectorConfig.ssl().enabled()) {
connector = new JDiscServerConnector(connectorConfig, metric, server, ch,
newSslConnectionFactory(keyStoreChannels),
@@ -116,7 +110,7 @@ public class ConnectorFactory {
}
private HttpConnectionFactory newHttpConnectionFactory() {
- final HttpConfiguration httpConfig = new HttpConfiguration();
+ HttpConfiguration httpConfig = new HttpConfiguration();
httpConfig.setSendDateHeader(true);
httpConfig.setSendServerVersion(false);
httpConfig.setSendXPoweredBy(false);
@@ -134,7 +128,7 @@ public class ConnectorFactory {
private SslConnectionFactory newSslConnectionFactory(Map<Path, FileChannel> keyStoreChannels) {
Ssl sslConfig = connectorConfig.ssl();
- final SslContextFactory factory = new SslContextFactory();
+ SslContextFactory factory = new SslContextFactory();
switch (sslConfig.clientAuth()) {
case NEED_AUTH:
factory.setNeedClientAuth(true);
@@ -149,21 +143,21 @@ public class ConnectorFactory {
}
if (!sslConfig.excludeProtocol().isEmpty()) {
- final String[] prots = new String[sslConfig.excludeProtocol().size()];
+ String[] prots = new String[sslConfig.excludeProtocol().size()];
for (int i = 0; i < prots.length; i++) {
prots[i] = sslConfig.excludeProtocol(i).name();
}
factory.setExcludeProtocols(prots);
}
if (!sslConfig.includeProtocol().isEmpty()) {
- final String[] prots = new String[sslConfig.includeProtocol().size()];
+ String[] prots = new String[sslConfig.includeProtocol().size()];
for (int i = 0; i < prots.length; i++) {
prots[i] = sslConfig.includeProtocol(i).name();
}
factory.setIncludeProtocols(prots);
}
if (!sslConfig.excludeCipherSuite().isEmpty()) {
- final String[] ciphs = new String[sslConfig.excludeCipherSuite().size()];
+ String[] ciphs = new String[sslConfig.excludeCipherSuite().size()];
for (int i = 0; i < ciphs.length; i++) {
ciphs[i] = sslConfig.excludeCipherSuite(i).name();
}
@@ -171,36 +165,32 @@ public class ConnectorFactory {
}
if (!sslConfig.includeCipherSuite().isEmpty()) {
- final String[] ciphs = new String[sslConfig.includeCipherSuite().size()];
+ String[] ciphs = new String[sslConfig.includeCipherSuite().size()];
for (int i = 0; i < ciphs.length; i++) {
ciphs[i] = sslConfig.includeCipherSuite(i).name();
}
factory.setIncludeCipherSuites(ciphs);
-
}
-
- Optional<String> password = Optional.of(sslConfig.keyDbKey()).
- filter(key -> !key.isEmpty()).map(secretStore::getSecret);
-
+ Optional<String> keyDbPassword = secret(sslConfig.keyDbKey());
switch (sslConfig.keyStoreType()) {
case PEM:
factory.setKeyStore(getKeyStore(sslConfig.pemKeyStore(), keyStoreChannels));
- if (password.isPresent()) {
+ if (keyDbPassword.isPresent())
log.warning("Encrypted PEM key stores are not supported.");
- }
break;
case JKS:
factory.setKeyStorePath(sslConfig.keyStorePath());
factory.setKeyStoreType(sslConfig.keyStoreType().toString());
- factory.setKeyStorePassword(password.orElseThrow(passwordRequiredForJKSKeyStore("key")));
+ factory.setKeyStorePassword(keyDbPassword.orElseThrow(passwordRequiredForJKSKeyStore("key")));
break;
}
if (!sslConfig.trustStorePath().isEmpty()) {
factory.setTrustStorePath(sslConfig.trustStorePath());
- factory.setTrustStoreType(sslConfig.trustStoreType().toString());
- factory.setTrustStorePassword(password.orElseThrow(passwordRequiredForJKSKeyStore("trust")));
+ factory.setTrustStoreType(sslConfig.trustStoreType().toString());
+ if (sslConfig.useTrustStorePassword())
+ factory.setTrustStorePassword(keyDbPassword.orElseThrow(passwordRequiredForJKSKeyStore("trust")));
}
factory.setKeyManagerFactoryAlgorithm(sslConfig.sslKeyManagerFactoryAlgorithm());
@@ -208,6 +198,11 @@ public class ConnectorFactory {
return new SslConnectionFactory(factory, HttpVersion.HTTP_1_1.asString());
}
+ /** Returns the secret password with the given name, or empty if the password name is null or empty */
+ private Optional<String> secret(String keyname) {
+ return Optional.of(keyname).filter(key -> !key.isEmpty()).map(secretStore::getSecret);
+ }
+
@SuppressWarnings("ThrowableInstanceNeverThrown")
private Supplier<RuntimeException> passwordRequiredForJKSKeyStore(String type) {
return () -> new RuntimeException(String.format("Password is required for JKS %s store", type));
@@ -225,9 +220,7 @@ public class ConnectorFactory {
KeyStoreReaderForPath(String pathString) {
Path path = Paths.get(pathString);
channel = Optional.ofNullable(keyStoreChannels.get(path));
- readerForPath = new ReaderForPath(
- channel.map(this::getReader).orElseGet(() -> getReader(path)),
- path);
+ readerForPath = new ReaderForPath(channel.map(this::getReader).orElseGet(() -> getReader(path)), path);
}
private Reader getReader(FileChannel channel) {
@@ -259,8 +252,9 @@ public class ConnectorFactory {
try (KeyStoreReaderForPath certificateReader = new KeyStoreReaderForPath(pemKeyStore.certificatePath());
KeyStoreReaderForPath keyReader = new KeyStoreReaderForPath(pemKeyStore.keyPath())) {
- SslKeyStore keyStore = sslKeyStoreFactory.createKeyStore(certificateReader.readerForPath,
- keyReader.readerForPath);
+ SslKeyStore keyStore = new PemSslKeyStore(
+ new com.yahoo.jdisc.http.ssl.pem.PemKeyStore.KeyStoreLoadParameter(
+ certificateReader.readerForPath, keyReader.readerForPath));
return keyStore.loadJavaKeyStore();
} catch (Exception e) {
throw new RuntimeException("Failed setting up key store for " + pemKeyStore.keyPath() + ", " + pemKeyStore.certificatePath(), e);
@@ -276,12 +270,8 @@ public class ConnectorFactory {
private final boolean tcpNoDelay;
private final ServerSocketChannel channelOpenedByActivator;
- private JDiscServerConnector(
- final ConnectorConfig config,
- final Metric metric,
- final Server server,
- final ServerSocketChannel channelOpenedByActivator,
- final ConnectionFactory... factories) {
+ private JDiscServerConnector(ConnectorConfig config, Metric metric, Server server,
+ ServerSocketChannel channelOpenedByActivator, ConnectionFactory... factories) {
super(server, factories);
this.channelOpenedByActivator = channelOpenedByActivator;
this.tcpKeepAlive = config.tcpKeepAliveEnabled();
@@ -305,8 +295,7 @@ public class ConnectorFactory {
try {
socket.setKeepAlive(tcpKeepAlive);
socket.setTcpNoDelay(tcpNoDelay);
- } catch (final SocketException ignored) {
-
+ } catch (SocketException ignored) {
}
}
@@ -351,7 +340,8 @@ public class ConnectorFactory {
localPortField.set(this, localPort);
}
- private void uglySetChannel(ServerSocketChannel channelOpenedByActivator) throws NoSuchFieldException, IllegalAccessException {
+ private void uglySetChannel(ServerSocketChannel channelOpenedByActivator) throws NoSuchFieldException,
+ IllegalAccessException {
Field acceptChannelField = ServerConnector.class.getDeclaredField("_acceptChannel");
acceptChannelField.setAccessible(true);
acceptChannelField.set(this, channelOpenedByActivator);
@@ -365,4 +355,5 @@ public class ConnectorFactory {
return (JDiscServerConnector)request.getAttribute(REQUEST_ATTRIBUTE);
}
}
+
}
diff --git a/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/SslKeyStoreFactory.java b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/SslKeyStoreFactory.java
index 2d659df1cce..6bffd080cf9 100644
--- a/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/SslKeyStoreFactory.java
+++ b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/SslKeyStoreFactory.java
@@ -1,8 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jdisc.http.ssl;
-import java.nio.file.Paths;
-
/**
* A factory for SSL key stores.
*
diff --git a/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/pem/PemKeyStore.java b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/pem/PemKeyStore.java
new file mode 100644
index 00000000000..21272f202ea
--- /dev/null
+++ b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/pem/PemKeyStore.java
@@ -0,0 +1,316 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.jdisc.http.ssl.pem;
+
+import com.google.common.base.Preconditions;
+import com.yahoo.jdisc.http.ssl.ReaderForPath;
+import org.bouncycastle.asn1.pkcs.PrivateKeyInfo;
+import org.bouncycastle.cert.X509CertificateHolder;
+import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter;
+import org.bouncycastle.jce.provider.BouncyCastleProvider;
+import org.bouncycastle.openssl.PEMException;
+import org.bouncycastle.openssl.PEMKeyPair;
+import org.bouncycastle.openssl.PEMParser;
+import org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter;
+
+import javax.annotation.concurrent.GuardedBy;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.security.Key;
+import java.security.KeyStore.LoadStoreParameter;
+import java.security.KeyStore.ProtectionParameter;
+import java.security.KeyStoreException;
+import java.security.KeyStoreSpi;
+import java.security.NoSuchAlgorithmException;
+import java.security.UnrecoverableKeyException;
+import java.security.cert.Certificate;
+import java.security.cert.CertificateException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Date;
+import java.util.Enumeration;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.function.Consumer;
+
+import static com.yahoo.jdisc.http.server.jetty.Exceptions.throwUnchecked;
+
+/**
+ * Exposes keys and certificates from unencrypted PEM keystore.
+ *
+ * @author Tony Vaagenes
+ * @author bjorncs
+ */
+public class PemKeyStore extends KeyStoreSpi {
+
+ private static String KEY_ALIAS = "KEY";
+
+ static List<String> aliases = Collections.emptyList();
+ static Map<String, String> attributes = Collections.emptyMap();
+ private static final BouncyCastleProvider bouncyCastleProvider = new BouncyCastleProvider();
+
+ @GuardedBy("this")
+ private StoreRole storeRole;
+ @GuardedBy("this")
+ private Key privateKey;
+ @GuardedBy("this")
+ private final Map<String, Certificate> aliasToCertificate = new LinkedHashMap<>();
+
+
+ public PemKeyStore() {}
+
+
+ /**
+ * The user is responsible for closing any readers given in the parameter.
+ */
+ @Override
+ public synchronized void engineLoad(LoadStoreParameter parameter) throws IOException {
+ if (storeRole != null)
+ throw new IllegalStateException("Already initialized.");
+
+ if (parameter instanceof KeyStoreLoadParameter) {
+ storeRole = new KeyStoreRole();
+ loadKeyStore((KeyStoreLoadParameter) parameter);
+ } else if (parameter instanceof TrustStoreLoadParameter) {
+ storeRole = new TrustStoreRole();
+ loadTrustStore((TrustStoreLoadParameter) parameter);
+ } else {
+ throw new IllegalArgumentException("Expected key store or trust store load parameter, got " + parameter.getClass());
+ }
+ }
+
+ private void loadTrustStore(TrustStoreLoadParameter parameter) throws IOException {
+ withPemParser(parameter.certificateReader, this::loadCertificates);
+ }
+
+ private void loadKeyStore(KeyStoreLoadParameter parameter) throws IOException{
+ withPemParser(parameter.keyReader, this::loadPrivateKey);
+ withPemParser(parameter.certificateReader, this::loadCertificates);
+ }
+
+ private static void withPemParser(ReaderForPath reader, Consumer<PEMParser> f) throws IOException {
+ try {
+ //parser.close() will close the underlying reader,
+ //which we want to avoid.
+ //See engineLoad comment.
+ PEMParser parser = new PEMParser(reader.reader);
+ f.accept(parser);
+ } catch (Exception e) {
+ throw new RuntimeException("Failed loading pem key store " + reader.path, e);
+ }
+ }
+
+ private void loadPrivateKey(PEMParser parser) {
+ try {
+ Object object = parser.readObject();
+ PrivateKeyInfo privateKeyInfo;
+ if (object instanceof PEMKeyPair) { // Legacy PKCS1
+ privateKeyInfo = ((PEMKeyPair) object).getPrivateKeyInfo();
+ } else if (object instanceof PrivateKeyInfo) { // PKCS8
+ privateKeyInfo = (PrivateKeyInfo) object;
+ } else {
+ throw new UnsupportedOperationException(
+ "Expected " + PrivateKeyInfo.class + " or " + PEMKeyPair.class + ", got " + object.getClass());
+ }
+
+ Object nextObject = parser.readObject();
+ if (nextObject != null) {
+ throw new UnsupportedOperationException(
+ "Expected a single private key, but found a second element " + nextObject.getClass());
+ }
+
+ setPrivateKey(privateKeyInfo);
+ } catch (Exception e) {
+ throw throwUnchecked(e);
+ }
+ }
+
+ private synchronized void setPrivateKey(PrivateKeyInfo privateKey) throws PEMException {
+ JcaPEMKeyConverter converter = new JcaPEMKeyConverter().setProvider(bouncyCastleProvider);
+ this.privateKey = converter.getPrivateKey(privateKey);
+ }
+
+ private void loadCertificates(PEMParser parser) {
+ try {
+ Object pemObject;
+ while ((pemObject = parser.readObject()) != null) {
+ addCertificate(pemObject);
+ }
+
+ if (aliasToCertificate.isEmpty())
+ throw new RuntimeException("No certificates available");
+ } catch (Exception e) {
+ throw throwUnchecked(e);
+ }
+ }
+
+ private synchronized void addCertificate(Object pemObject) throws CertificateException {
+ if (pemObject instanceof X509CertificateHolder) {
+ JcaX509CertificateConverter converter = new JcaX509CertificateConverter().setProvider(bouncyCastleProvider);
+ String alias = "cert-" + aliasToCertificate.size();
+ aliasToCertificate.put(alias, converter.getCertificate((X509CertificateHolder) pemObject));
+ } else {
+ throw new UnsupportedOperationException("Expected X509 certificate, got " + pemObject.getClass());
+ }
+ }
+
+ @Override
+ public synchronized Enumeration<String> engineAliases() {
+ return Collections.enumeration(storeRole.engineAliases());
+
+ }
+
+ @Override
+ public synchronized boolean engineIsKeyEntry(String alias) {
+ return KEY_ALIAS.equals(alias);
+ }
+
+ @Override
+ public synchronized Key engineGetKey(String alias, char[] password) throws NoSuchAlgorithmException, UnrecoverableKeyException {
+ Preconditions.checkArgument(KEY_ALIAS.equals(alias));
+ return privateKey;
+ }
+
+ @Override
+ public synchronized boolean engineIsCertificateEntry(String alias) {
+ return aliasToCertificate.containsKey(alias);
+ }
+
+
+ @Override
+ public synchronized Certificate engineGetCertificate(String alias) {
+ return aliasToCertificate.get(alias);
+ }
+
+ @Override
+ public synchronized Certificate[] engineGetCertificateChain(String alias) {
+ Preconditions.checkArgument(KEY_ALIAS.equals(alias));
+ return aliasToCertificate.values().toArray(new Certificate[aliasToCertificate.size()]);
+ }
+
+
+ @Override
+ public synchronized boolean engineContainsAlias(String alias) {
+ return storeRole.engineContainsAlias(alias);
+ }
+
+ @Override
+ public synchronized int engineSize() {
+ return storeRole.engineSize();
+ }
+
+ @Override
+ public synchronized String engineGetCertificateAlias(final Certificate certificate) {
+ for (Entry<String, Certificate> entry : aliasToCertificate.entrySet()) {
+ if (entry.getValue() == certificate)
+ return entry.getKey();
+ }
+
+ return null;
+ }
+
+ @Override
+ public synchronized Date engineGetCreationDate(String alias) {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized void engineSetKeyEntry(String alias, Key key, char[] password, Certificate[] chain) throws KeyStoreException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized void engineSetKeyEntry(String alias, byte[] key, Certificate[] chain) throws KeyStoreException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized void engineSetCertificateEntry(String alias, Certificate cert) throws KeyStoreException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized void engineDeleteEntry(String alias) throws KeyStoreException {
+ throw new UnsupportedOperationException();
+ }
+
+
+ @Override
+ public synchronized void engineStore(OutputStream stream, char[] password) throws IOException, NoSuchAlgorithmException, CertificateException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public synchronized void engineLoad(InputStream stream, char[] password) throws IOException, NoSuchAlgorithmException, CertificateException {
+ throw new UnsupportedOperationException();
+ }
+
+ private interface StoreRole {
+ Collection<String> engineAliases();
+ boolean engineContainsAlias(String alias);
+ int engineSize();
+ }
+
+ private class KeyStoreRole implements StoreRole {
+ @Override
+ public Collection<String> engineAliases() {
+ return Collections.singletonList(KEY_ALIAS);
+ }
+
+ @Override
+ public boolean engineContainsAlias(String alias) {
+ return KEY_ALIAS.equals(alias);
+ }
+
+ @Override
+ public int engineSize() {
+ return 1;
+ }
+ }
+
+ private class TrustStoreRole implements StoreRole{
+ @Override
+ public Collection<String> engineAliases() {
+ return aliasToCertificate.keySet();
+ }
+
+ @Override
+ public boolean engineContainsAlias(String alias) {
+ return aliasToCertificate.containsKey(alias);
+ }
+
+ @Override
+ public int engineSize() {
+ return aliasToCertificate.size();
+ }
+ }
+
+ public static class PemLoadStoreParameter implements LoadStoreParameter {
+ private PemLoadStoreParameter() {}
+
+ @Override
+ public ProtectionParameter getProtectionParameter() {
+ return null;
+ }
+ }
+
+ public static final class KeyStoreLoadParameter extends PemLoadStoreParameter {
+ public final ReaderForPath certificateReader;
+ public final ReaderForPath keyReader;
+
+ public KeyStoreLoadParameter(ReaderForPath certificateReader, ReaderForPath keyReader) {
+ this.certificateReader = certificateReader;
+ this.keyReader = keyReader;
+ }
+ }
+
+ public static final class TrustStoreLoadParameter extends PemLoadStoreParameter {
+ public final ReaderForPath certificateReader;
+
+ public TrustStoreLoadParameter(ReaderForPath certificateReader) {
+ this.certificateReader = certificateReader;
+ }
+ }
+}
diff --git a/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/pem/PemKeyStoreProvider.java b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/pem/PemKeyStoreProvider.java
new file mode 100644
index 00000000000..c1fcf8c33bf
--- /dev/null
+++ b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/pem/PemKeyStoreProvider.java
@@ -0,0 +1,20 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.jdisc.http.ssl.pem;
+
+import java.security.Provider;
+
+/**
+ * @author Tony Vaagenes
+ */
+public class PemKeyStoreProvider extends Provider {
+
+ public static final String name = "PEMKeyStoreProvider";
+ public static final double version = 1;
+ public static final String description = "Provides PEM keystore support";
+
+ public PemKeyStoreProvider() {
+ super(name, version, description);
+ putService(new Service(this, "KeyStore", "PEM", PemKeyStore. class.getName(), PemKeyStore.aliases, PemKeyStore.attributes));
+ }
+
+}
diff --git a/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/pem/PemSslKeyStore.java b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/pem/PemSslKeyStore.java
new file mode 100644
index 00000000000..bf91f0eb259
--- /dev/null
+++ b/jdisc_http_service/src/main/java/com/yahoo/jdisc/http/ssl/pem/PemSslKeyStore.java
@@ -0,0 +1,53 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.jdisc.http.ssl.pem;
+
+import com.yahoo.jdisc.http.ssl.SslKeyStore;
+import com.yahoo.jdisc.http.ssl.pem.PemKeyStore.KeyStoreLoadParameter;
+import com.yahoo.jdisc.http.ssl.pem.PemKeyStore.PemLoadStoreParameter;
+import com.yahoo.jdisc.http.ssl.pem.PemKeyStore.TrustStoreLoadParameter;
+
+import java.io.IOException;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.Security;
+import java.security.cert.CertificateException;
+
+/**
+ * Responsible for creating pem key stores.
+ *
+ * @author Tony Vaagenes
+ */
+public class PemSslKeyStore extends SslKeyStore {
+
+ static {
+ Security.addProvider(new PemKeyStoreProvider());
+ }
+
+ private static final String keyStoreType = "PEM";
+ private final PemLoadStoreParameter loadParameter;
+ private KeyStore keyStore;
+
+ public PemSslKeyStore(KeyStoreLoadParameter loadParameter) {
+ this.loadParameter = loadParameter;
+ }
+
+ public PemSslKeyStore(TrustStoreLoadParameter loadParameter) {
+ this.loadParameter = loadParameter;
+ }
+
+ @Override
+ public KeyStore loadJavaKeyStore() throws KeyStoreException, IOException, NoSuchAlgorithmException, CertificateException {
+ if (getKeyStorePassword().isPresent()) {
+ throw new UnsupportedOperationException("PEM key store with password is currently not supported. Please file a feature request.");
+ }
+
+ //cached since Reader(in loadParameter) can only be used one time.
+ if (keyStore == null) {
+ keyStore = KeyStore.getInstance(keyStoreType);
+ keyStore.load(loadParameter);
+ }
+ return keyStore;
+ }
+
+}
diff --git a/jdisc_http_service/src/main/resources/configdefinitions/jdisc.http.connector.def b/jdisc_http_service/src/main/resources/configdefinitions/jdisc.http.connector.def
index 1c059fff2e7..8d709cb8ab1 100644
--- a/jdisc_http_service/src/main/resources/configdefinitions/jdisc.http.connector.def
+++ b/jdisc_http_service/src/main/resources/configdefinitions/jdisc.http.connector.def
@@ -43,7 +43,9 @@ tcpNoDelay bool default=true
# Whether to enable SSL for this connector.
ssl.enabled bool default=false
-# The KeyDB key.
+# The name of the key to the password to the key store if in the secret store, if JKS is used.
+# Must be empty with PEM
+# By default this is also used to look up the password to the trust store.
ssl.keyDbKey string default=""
# Names of protocols to exclude.
@@ -67,11 +69,15 @@ ssl.keyStorePath string default=""
ssl.pemKeyStore.keyPath string default=""
ssl.pemKeyStore.certificatePath string default=""
-ssl.trustStoreType enum { JKS } default="JKS"
+ssl.trustStoreType enum { JKS } default=JKS
# JKS only - the path to the truststore.
ssl.trustStorePath string default=""
+# Whether we should use keyDbKey as password to the trust store (true, default),
+# or use no password with the trust store (false)
+ssl.useTrustStorePassword bool default=true
+
# The algorithm name used by the KeyManagerFactory.
ssl.sslKeyManagerFactoryAlgorithm string default="SunX509"
diff --git a/jdisc_http_service/src/test/java/com/yahoo/jdisc/http/guiceModules/ConnectorFactoryRegistryModule.java b/jdisc_http_service/src/test/java/com/yahoo/jdisc/http/guiceModules/ConnectorFactoryRegistryModule.java
index 5e4614a7804..1200a06be2c 100644
--- a/jdisc_http_service/src/test/java/com/yahoo/jdisc/http/guiceModules/ConnectorFactoryRegistryModule.java
+++ b/jdisc_http_service/src/test/java/com/yahoo/jdisc/http/guiceModules/ConnectorFactoryRegistryModule.java
@@ -11,9 +11,6 @@ import com.yahoo.jdisc.http.ConnectorConfig.Builder;
import com.yahoo.jdisc.http.SecretStore;
import com.yahoo.jdisc.http.server.jetty.ConnectorFactory;
import com.yahoo.jdisc.http.server.jetty.TestDrivers;
-import com.yahoo.jdisc.http.ssl.ReaderForPath;
-import com.yahoo.jdisc.http.ssl.SslKeyStore;
-import com.yahoo.jdisc.http.ssl.SslKeyStoreFactory;
/**
* Guice module for test ConnectorFactories
@@ -49,21 +46,7 @@ public class ConnectorFactoryRegistryModule implements Module {
private static class StaticKeyDbConnectorFactory extends ConnectorFactory {
public StaticKeyDbConnectorFactory(ConnectorConfig connectorConfig) {
- super(connectorConfig, new ThrowingSslKeyStoreFactory(), new MockSecretStore());
- }
-
- }
-
- private static final class ThrowingSslKeyStoreFactory implements SslKeyStoreFactory {
-
- @Override
- public SslKeyStore createKeyStore(ReaderForPath certificateFile, ReaderForPath keyFile) {
- throw new UnsupportedOperationException("A SSL key store factory component is not available");
- }
-
- @Override
- public SslKeyStore createTrustStore(ReaderForPath certificateFile) {
- throw new UnsupportedOperationException("A SSL key store factory component is not available");
+ super(connectorConfig, new MockSecretStore());
}
}
diff --git a/jdisc_http_service/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java b/jdisc_http_service/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java
index 49656775dc0..7a03d805864 100644
--- a/jdisc_http_service/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java
+++ b/jdisc_http_service/src/test/java/com/yahoo/jdisc/http/server/jetty/ConnectorFactoryTest.java
@@ -1,29 +1,36 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jdisc.http.server.jetty;
+import com.google.common.collect.ImmutableMap;
import com.yahoo.jdisc.Metric;
-import com.yahoo.jdisc.http.CertificateStore;
import com.yahoo.jdisc.http.ConnectorConfig;
-import com.yahoo.jdisc.http.HttpRequest;
import com.yahoo.jdisc.http.SecretStore;
import com.yahoo.jdisc.http.ssl.ReaderForPath;
+import com.yahoo.jdisc.http.ssl.SslContextFactory;
import com.yahoo.jdisc.http.ssl.SslKeyStore;
-import com.yahoo.jdisc.http.ssl.SslKeyStoreFactory;
+import com.yahoo.jdisc.http.ssl.pem.PemKeyStore;
+import com.yahoo.jdisc.http.ssl.pem.PemSslKeyStore;
import org.eclipse.jetty.server.Request;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.handler.AbstractHandler;
import org.testng.annotations.Test;
+import javax.net.ssl.SSLContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.net.InetSocketAddress;
+import java.nio.channels.FileChannel;
import java.nio.channels.ServerSocketChannel;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
import java.util.Collections;
import java.util.Map;
-import static com.yahoo.jdisc.http.ConnectorConfig.*;
+import static com.yahoo.jdisc.http.ConnectorConfig.Ssl;
import static com.yahoo.jdisc.http.ConnectorConfig.Ssl.KeyStoreType.Enum.JKS;
import static com.yahoo.jdisc.http.ConnectorConfig.Ssl.KeyStoreType.Enum.PEM;
import static org.hamcrest.CoreMatchers.equalTo;
@@ -44,8 +51,7 @@ public class ConnectorFactoryTest {
new Ssl.PemKeyStore.Builder()
.keyPath("nonEmpty"))));
- ConnectorFactory willThrowException = new ConnectorFactory(config, new ThrowingSslKeyStoreFactory(),
- new ThrowingSecretStore());
+ ConnectorFactory willThrowException = new ConnectorFactory(config, new ThrowingSecretStore());
}
@Test(expectedExceptions = IllegalArgumentException.class)
@@ -57,8 +63,7 @@ public class ConnectorFactoryTest {
.keyStoreType(PEM)
.keyStorePath("nonEmpty")));
- ConnectorFactory willThrowException = new ConnectorFactory(config, new ThrowingSslKeyStoreFactory(),
- new ThrowingSecretStore());
+ ConnectorFactory willThrowException = new ConnectorFactory(config, new ThrowingSecretStore());
}
@Test
@@ -66,7 +71,6 @@ public class ConnectorFactoryTest {
Server server = new Server();
try {
ConnectorFactory factory = new ConnectorFactory(new ConnectorConfig(new ConnectorConfig.Builder()),
- new ThrowingSslKeyStoreFactory(),
new ThrowingSecretStore());
ConnectorFactory.JDiscServerConnector connector =
(ConnectorFactory.JDiscServerConnector)factory.createConnector(new DummyMetric(), server, null, Collections.emptyMap());
@@ -94,7 +98,7 @@ public class ConnectorFactoryTest {
ServerSocketChannel serverChannel = ServerSocketChannel.open();
serverChannel.socket().bind(new InetSocketAddress(0));
- ConnectorFactory factory = new ConnectorFactory(new ConnectorConfig(new ConnectorConfig.Builder()), new ThrowingSslKeyStoreFactory(), new ThrowingSecretStore());
+ ConnectorFactory factory = new ConnectorFactory(new ConnectorConfig(new ConnectorConfig.Builder()), new ThrowingSecretStore());
ConnectorFactory.JDiscServerConnector connector = (ConnectorFactory.JDiscServerConnector) factory.createConnector(new DummyMetric(), server, serverChannel, Collections.emptyMap());
server.addConnector(connector);
server.setHandler(new HelloWorldHandler());
@@ -113,6 +117,63 @@ public class ConnectorFactoryTest {
}
}
+ @Test
+ public void pre_bound_keystore_file_channels_are_used() throws Exception {
+ Path pemKeyStoreDirectory = Paths.get("src/test/resources/pem/");
+
+ Path certificateFile = pemKeyStoreDirectory.resolve("test.crt");
+ Path privateKeyFile = pemKeyStoreDirectory.resolve("test.key");
+
+ Server server = new Server();
+ try {
+ ServerSocketChannel serverChannel = ServerSocketChannel.open();
+ serverChannel.socket().bind(new InetSocketAddress(0));
+
+ String fakeCertificatePath = "ensure-certificate-path-is-not-used-to-open-the-file";
+ String fakeKeyPath = "ensure-key-path-is-not-used-to-open-the-file";
+
+ ConnectorConfig.Builder builder = new ConnectorConfig.Builder();
+ builder.ssl(
+ new Ssl.Builder().
+ enabled(true).
+ keyStoreType(PEM).
+ pemKeyStore(new Ssl.PemKeyStore.Builder().
+ certificatePath(fakeCertificatePath).
+ keyPath(fakeKeyPath)));
+
+ FileChannel certificateChannel = FileChannel.open(certificateFile, StandardOpenOption.READ);
+ FileChannel privateKeyChannel = FileChannel.open(privateKeyFile, StandardOpenOption.READ);
+
+ Map<Path, FileChannel> keyStoreChannels = ImmutableMap.<Path, FileChannel>builder().
+ put(Paths.get(fakeCertificatePath), certificateChannel).
+ put(Paths.get(fakeKeyPath), privateKeyChannel).
+ build();
+
+
+ ConnectorFactory factory = new ConnectorFactory(new ConnectorConfig(builder), new ThrowingSecretStore());
+ ConnectorFactory.JDiscServerConnector connector = (ConnectorFactory.JDiscServerConnector) factory.createConnector(new DummyMetric(), server, serverChannel, keyStoreChannels);
+ server.addConnector(connector);
+ server.setHandler(new HelloWorldHandler());
+ server.start();
+
+ SslKeyStore trustStore = new PemSslKeyStore(
+ new PemKeyStore.TrustStoreLoadParameter(
+ new ReaderForPath(Files.newBufferedReader(certificateFile), certificateFile)));
+
+ SSLContext clientSslContext = SslContextFactory.newInstanceFromTrustStore(trustStore).getServerSSLContext();
+ SimpleHttpClient client = new SimpleHttpClient(clientSslContext, connector.getLocalPort(), false);
+ SimpleHttpClient.RequestExecutor ex = client.newGet("/ignored");
+ SimpleHttpClient.ResponseValidator val = ex.execute();
+ val.expectContent(equalTo("Hello world"));
+ } finally {
+ try {
+ server.stop();
+ } catch (Exception e) {
+ //ignore
+ }
+ }
+ }
+
private static class HelloWorldHandler extends AbstractHandler {
@Override
public void handle(String target, Request baseRequest, HttpServletRequest request, HttpServletResponse response) throws IOException, ServletException {
@@ -139,20 +200,6 @@ public class ConnectorFactoryTest {
private static class DummyContext implements Metric.Context {
}
- private static final class ThrowingSslKeyStoreFactory implements SslKeyStoreFactory {
-
- @Override
- public SslKeyStore createKeyStore(ReaderForPath certificateFile, ReaderForPath keyFile) {
- throw new UnsupportedOperationException("A SSL key store factory component is not available");
- }
-
- @Override
- public SslKeyStore createTrustStore(ReaderForPath certificateFile) {
- throw new UnsupportedOperationException("A SSL key store factory component is not available");
- }
-
- }
-
private static final class ThrowingSecretStore implements SecretStore {
@Override
diff --git a/jdisc_http_service/src/test/resources/pem/test.crt b/jdisc_http_service/src/test/resources/pem/test.crt
new file mode 100644
index 00000000000..fb132a454e2
--- /dev/null
+++ b/jdisc_http_service/src/test/resources/pem/test.crt
@@ -0,0 +1,88 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 4660 (0x1234)
+ Signature Algorithm: md5WithRSAEncryption
+ Issuer: C=US, ST=California, L=Sunnyvale, O=Yahoo Inc., OU=Information Technology, CN=darkmoist-lm.trondheim.corp.yahoo.com
+ Validity
+ Not Before: Sep 2 10:32:37 2014 GMT
+ Not After : Aug 7 10:32:37 2019 GMT
+ Subject: C=US, ST=California, O=Yahoo Inc., OU=Information Technology, CN=darkmoist-lm.trondheim.corp.yahoo.com
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:d4:cd:7b:e0:29:9e:cd:01:21:26:ae:60:e3:3a:
+ 0d:19:6e:b1:ae:49:f3:9f:37:45:7d:77:95:b0:6d:
+ 63:ef:b3:7c:e8:29:15:ad:8b:b6:40:b6:c5:12:1e:
+ 7e:c9:6b:75:15:2f:31:30:2a:6b:1c:00:bb:b3:a7:
+ 31:ab:84:e5:32:52:1d:3e:bb:7d:71:f0:ff:9f:21:
+ b8:9d:cb:6a:65:34:de:cc:22:81:a2:53:0f:7b:9c:
+ d8:a9:b6:a5:3d:8b:31:5e:b1:cb:da:51:12:0e:68:
+ 64:6a:2e:4a:c1:50:ee:0c:6d:a1:30:6b:3f:1c:97:
+ 37:76:fd:03:8a:1a:55:1d:7e:2d:14:fb:24:09:4e:
+ a6:04:cf:f8:f9:bb:01:78:f5:7f:c7:b5:3a:52:76:
+ ce:4d:79:4f:83:59:84:90:a5:ef:58:25:bd:95:d6:
+ f5:90:bf:fa:8b:4b:9f:d1:63:d1:75:2c:c8:00:de:
+ 2d:72:0e:a6:d8:48:ed:36:87:63:21:7d:77:d3:93:
+ 9e:12:f0:69:11:a1:90:63:2f:f9:6b:5d:a6:d2:65:
+ 91:7c:ad:5d:6a:4f:63:79:21:a4:7b:7d:8c:2c:a4:
+ 48:3c:d1:9e:a7:66:6c:d8:9c:ce:c9:54:fa:0e:1f:
+ fd:28:25:7a:ea:e7:4c:2c:86:11:45:a5:dc:b7:5e:
+ fa:97
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Basic Constraints:
+ CA:FALSE
+ Netscape Comment:
+ OpenSSL Generated Certificate
+ X509v3 Subject Key Identifier:
+ 64:31:6B:9A:8E:FF:27:ED:E2:F4:4F:30:D5:A6:0D:45:9C:29:D3:81
+ X509v3 Authority Key Identifier:
+ DirName:/C=US/ST=California/L=Sunnyvale/O=Yahoo Inc./OU=Information Technology/CN=darkmoist-lm.trondheim.corp.yahoo.com
+ serial:9B:F0:C8:38:83:81:2B:C3
+
+ Signature Algorithm: md5WithRSAEncryption
+ 81:82:99:e9:b1:04:d3:f4:49:c3:b4:49:8a:0a:9a:49:29:51:
+ d3:f0:03:0e:2f:d5:7a:2c:44:65:74:15:de:36:41:e3:d3:c3:
+ 69:ff:99:0a:dc:fb:a7:26:c2:3f:a0:40:a6:51:32:47:02:d8:
+ c5:35:ac:f6:e5:c2:65:7a:90:cc:a1:58:4f:1e:8b:7c:e7:77:
+ 07:c2:15:41:38:0f:f7:ca:bd:fb:3e:22:27:0d:90:b5:6f:a7:
+ 2c:10:1c:31:d6:9b:c0:53:db:a8:65:5a:06:97:1a:62:4e:e5:
+ 7f:98:57:8a:60:d6:db:f8:57:ca:ea:f0:44:d0:9e:4c:bb:48:
+ 1c:b4:5f:0f:b4:26:c7:f1:ca:61:f3:7b:21:03:4f:f2:e6:46:
+ 04:ea:88:7d:0f:41:24:32:a5:07:57:3c:6f:e1:a6:ca:12:b0:
+ c1:8c:50:a7:e1:68:80:9b:63:83:e2:de:e5:3c:30:2e:06:12:
+ 66:4c:6c:f8:55:88:62:00:1e:72:4b:ea:78:88:0c:31:95:e5:
+ 38:fa:78:78:a8:e9:80:3f:42:63:e6:37:f7:4b:47:ff:38:0a:
+ 3e:83:7c:ef:70:ea:43:24:06:45:51:3e:f5:ef:6e:ef:99:bc:
+ 47:70:3f:8b:d0:8f:a8:e7:50:3f:c7:94:27:fb:24:bf:c4:8c:
+ db:a5:86:6c
+-----BEGIN CERTIFICATE-----
+MIIEvjCCA6agAwIBAgICEjQwDQYJKoZIhvcNAQEEBQAwgZwxCzAJBgNVBAYTAlVT
+MRMwEQYDVQQIEwpDYWxpZm9ybmlhMRIwEAYDVQQHEwlTdW5ueXZhbGUxEzARBgNV
+BAoTCllhaG9vIEluYy4xHzAdBgNVBAsTFkluZm9ybWF0aW9uIFRlY2hub2xvZ3kx
+LjAsBgNVBAMTJWRhcmttb2lzdC1sbS50cm9uZGhlaW0uY29ycC55YWhvby5jb20w
+HhcNMTQwOTAyMTAzMjM3WhcNMTkwODA3MTAzMjM3WjCBiDELMAkGA1UEBhMCVVMx
+EzARBgNVBAgTCkNhbGlmb3JuaWExEzARBgNVBAoTCllhaG9vIEluYy4xHzAdBgNV
+BAsTFkluZm9ybWF0aW9uIFRlY2hub2xvZ3kxLjAsBgNVBAMTJWRhcmttb2lzdC1s
+bS50cm9uZGhlaW0uY29ycC55YWhvby5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IB
+DwAwggEKAoIBAQDUzXvgKZ7NASEmrmDjOg0ZbrGuSfOfN0V9d5WwbWPvs3zoKRWt
+i7ZAtsUSHn7Ja3UVLzEwKmscALuzpzGrhOUyUh0+u31x8P+fIbidy2plNN7MIoGi
+Uw97nNiptqU9izFescvaURIOaGRqLkrBUO4MbaEwaz8clzd2/QOKGlUdfi0U+yQJ
+TqYEz/j5uwF49X/HtTpSds5NeU+DWYSQpe9YJb2V1vWQv/qLS5/RY9F1LMgA3i1y
+DqbYSO02h2MhfXfTk54S8GkRoZBjL/lrXabSZZF8rV1qT2N5IaR7fYwspEg80Z6n
+ZmzYnM7JVPoOH/0oJXrq50wshhFFpdy3XvqXAgMBAAGjggEaMIIBFjAJBgNVHRME
+AjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0
+ZTAdBgNVHQ4EFgQUZDFrmo7/J+3i9E8w1aYNRZwp04EwgbsGA1UdIwSBszCBsKGB
+oqSBnzCBnDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExEjAQBgNV
+BAcTCVN1bm55dmFsZTETMBEGA1UEChMKWWFob28gSW5jLjEfMB0GA1UECxMWSW5m
+b3JtYXRpb24gVGVjaG5vbG9neTEuMCwGA1UEAxMlZGFya21vaXN0LWxtLnRyb25k
+aGVpbS5jb3JwLnlhaG9vLmNvbYIJAJvwyDiDgSvDMA0GCSqGSIb3DQEBBAUAA4IB
+AQCBgpnpsQTT9EnDtEmKCppJKVHT8AMOL9V6LERldBXeNkHj08Np/5kK3PunJsI/
+oECmUTJHAtjFNaz25cJlepDMoVhPHot853cHwhVBOA/3yr37PiInDZC1b6csEBwx
+1pvAU9uoZVoGlxpiTuV/mFeKYNbb+FfK6vBE0J5Mu0gctF8PtCbH8cph83shA0/y
+5kYE6oh9D0EkMqUHVzxv4abKErDBjFCn4WiAm2OD4t7lPDAuBhJmTGz4VYhiAB5y
+S+p4iAwxleU4+nh4qOmAP0Jj5jf3S0f/OAo+g3zvcOpDJAZFUT71727vmbxHcD+L
+0I+o51A/x5Qn+yS/xIzbpYZs
+-----END CERTIFICATE----- \ No newline at end of file
diff --git a/jdisc_http_service/src/test/resources/pem/test.key b/jdisc_http_service/src/test/resources/pem/test.key
new file mode 100644
index 00000000000..91335afc9a7
--- /dev/null
+++ b/jdisc_http_service/src/test/resources/pem/test.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA1M174CmezQEhJq5g4zoNGW6xrknznzdFfXeVsG1j77N86CkV
+rYu2QLbFEh5+yWt1FS8xMCprHAC7s6cxq4TlMlIdPrt9cfD/nyG4nctqZTTezCKB
+olMPe5zYqbalPYsxXrHL2lESDmhkai5KwVDuDG2hMGs/HJc3dv0DihpVHX4tFPsk
+CU6mBM/4+bsBePV/x7U6UnbOTXlPg1mEkKXvWCW9ldb1kL/6i0uf0WPRdSzIAN4t
+cg6m2EjtNodjIX1305OeEvBpEaGQYy/5a12m0mWRfK1dak9jeSGke32MLKRIPNGe
+p2Zs2JzOyVT6Dh/9KCV66udMLIYRRaXct176lwIDAQABAoIBAQCd9opls391nckF
+9ZtmEMl4f3rVbX+ySE0E/afX9tugKxQlIZo94N/A2esfsBNdYK7gss9IebRYbRLo
+IMv2Dgg0ek/LKVHNKqAVd+qa90xbJAvebB7eZ9muYJdUI4g1TwWuzTwNKvDEUSl4
+yDQlm/WYtCha0MFgb790TAw8j59u68f2qPJVtIQ+EAEB7dTvIt7tOHV9dlcNWL/8
+uPx3NXsi4Nq0m06zF6TTjUmvQjks+Ai/GHLWeNwUPBTfmR7QrCYLFlyuECYt6p2J
+aJMwGYhVORlRMa3LCxkk/7s/Ebxif3qtjZBe19NdePa8zxX/kKDE4/L1LUXaC2aN
++l8rMIxBAoGBAOoX6yOghHC6nRMkVJaPIEaBbyjV/2yeFo56Xe6sGnFGGgv4ZDRB
+0DAiiCKYKenfAcijZ+YmHazSIYI+EB9A6PK1j67JudyG55wtx85sBAGag9pJT0Ep
+lYfWfJZThgTm1kQobp3oblQo6ZAH8NLeH2084OFhoMiQr8z+ObGfDNr7AoGBAOi3
+guH6tXGE3I8Z0OPrhJLRM5Kno5pqDQOFi+85cm5+AcV06wM+Je33K9LJGQYHJ04N
+LJii5aOG+Vs/n2SplYl/3u52fEia+N9u1sc4iXeBi9e7COidjFPeIAX0CI9gGIt7
+x2sa8/WMZiQTqa9MbQF4psYcyWyK3WDQfWbNo8wVAoGBAOS15bhzNbJlwN1Y24QV
+5jS8dPxyyBE5C1S83VU4tMUC9qPHVS9xNZQxyMvz2s9yYG3EqNhFWSzmSHLVbC78
+3htzpCPjV0HMVDFU0SguhGOEsVnt0g8aL8v9lM/SXtgfKCyDTD/fPRvgtQFRoMqE
+1jOGDThmiA4svnYL1BZkDM1NAoGBANH6CvlVmnO0GsJv28BbGILUikEwS3kfaWCd
+Fhci8XJq9bQxe3+wis69b+hAFPkQaVGOp4eNq8AyIDpKHMraDRhErWTiud9VHWuU
++exFwht3YzOjCjXBOgXObXyRpUugvGTWqaelaSxMozi4GSoXvl9OesRU4xWx8m/R
+juS8dafFAoGASRntDyZBQR58yGDVGTIK6QDIRdmN6QcBQS0wiCSsHl4b9Q7Ve/em
+/qRf7xMdzFejAWkB2LD68HbskzVQmAN0VCPMTZjKsPPmxxgcXfIdghBfWNhXXzal
+KV1kiIb8cHHdXZxGRZpOQFCs2oOrQE99jMgYtVmuIXEErz9pssaEhxo=
+-----END RSA PRIVATE KEY-----
diff --git a/jdisc_jetty/CMakeLists.txt b/jdisc_jetty/CMakeLists.txt
new file mode 100644
index 00000000000..9059f68d953
--- /dev/null
+++ b/jdisc_jetty/CMakeLists.txt
@@ -0,0 +1,3 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_java_artifact(jdisc_jetty)
+install_java_artifact_dependencies(jdisc_jetty)
diff --git a/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java b/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java
index 256d1c4fec4..d67b6efed31 100644
--- a/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java
+++ b/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java
@@ -70,8 +70,8 @@ public class Mirror implements IMirror {
}
/**
- * Shut down the Mirror. This will close any open connections and
- * stop the regular mirror updates.
+ * Shut down the Mirror. This will close any open connections,
+ * stop the regular mirror updates, and discard all entries.
*/
public void shutdown() {
updateTask.kill();
@@ -293,6 +293,7 @@ public class Mirror implements IMirror {
target.close();
target = null;
}
+ specs = new Entry[0];
}
/**
diff --git a/logd/src/apps/logd/main.cpp b/logd/src/apps/logd/main.cpp
index caeaf83c7cc..00654060107 100644
--- a/logd/src/apps/logd/main.cpp
+++ b/logd/src/apps/logd/main.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <logd/errhandle.h>
-#include <logd/sigterm.h>
#include <logd/service.h>
#include <logd/forward.h>
#include <logd/conf.h>
@@ -9,6 +8,7 @@
#include <vespa/config/common/exceptions.h>
#include <csignal>
#include <unistd.h>
+#include <vespa/vespalib/util/sig_catch.h>
#include <vespa/log/log.h>
LOG_SETUP("logdemon");
@@ -24,7 +24,7 @@ int main(int, char**)
EV_STARTED("logdemon");
- hook_signals();
+ vespalib::SigCatch catcher;
const char *cfid = getenv("VESPA_CONFIG_ID");
@@ -50,7 +50,7 @@ int main(int, char**)
LOG(debug, "connection exception: %s", ex.what());
subscriber.closeConn();
}
- if (gotSignaled()) {
+ if (catcher.receivedStopSignal()) {
throw SigTermException("caught signal");
}
if (sleepcount < 60) {
@@ -60,10 +60,10 @@ int main(int, char**)
}
LOG(debug, "sleep %d...", sleepcount);
for (int i = 0; i < sleepcount; i++) {
- sleep(1);
- if (gotSignaled()) {
- throw SigTermException("caught signal");
- }
+ sleep(1);
+ if (catcher.receivedStopSignal()) {
+ throw SigTermException("caught signal");
+ }
}
}
} catch (config::ConfigRuntimeException & ex) {
@@ -75,15 +75,8 @@ int main(int, char**)
EV_STOPPING("logdemon", "bad config");
return 1;
} catch (SigTermException& ex) {
- if (gotSignalNumber() == SIGTERM) {
- LOG(debug, "stopping on SIGTERM");
- EV_STOPPING("logdemon", "done ok.");
- } else {
- LOG(warning, "stopping on signal %d", gotSignalNumber());
- char buf[100];
- snprintf(buf, sizeof buf, "got signal %d", gotSignalNumber());
- EV_STOPPING("logdemon", buf);
- }
+ LOG(debug, "stopping on SIGTERM");
+ EV_STOPPING("logdemon", "done ok.");
return 0;
} catch (MsgException& ex) {
LOG(error, "stopping on error: %s", ex.what());
diff --git a/logd/src/logd/CMakeLists.txt b/logd/src/logd/CMakeLists.txt
index 03ade1d66c8..b436ef52876 100644
--- a/logd/src/logd/CMakeLists.txt
+++ b/logd/src/logd/CMakeLists.txt
@@ -8,8 +8,7 @@ vespa_add_library(logd STATIC
service.cpp
cmdbuf.cpp
perform.cpp
- sigterm.cpp
DEPENDS
)
vespa_generate_config(logd ../main/resources/configdefinitions/logd.def)
-install(FILES ../main/resources/configdefinitions/logd.def RENAME cloud.config.log.logd.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(../main/resources/configdefinitions/logd.def cloud.config.log.logd.def)
diff --git a/logd/src/logd/sigterm.cpp b/logd/src/logd/sigterm.cpp
deleted file mode 100644
index 8fe7665865f..00000000000
--- a/logd/src/logd/sigterm.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include "sigterm.h"
-#include <vespa/vespalib/util/signalhandler.h>
-
-void hook_signals()
-{
- vespalib::SignalHandler::INT.hook();
- vespalib::SignalHandler::TERM.hook();
- vespalib::SignalHandler::PIPE.ignore();
-}
-
-bool gotSignaled()
-{
- return (vespalib::SignalHandler::INT.check() ||
- vespalib::SignalHandler::TERM.check());
-}
-
-int gotSignalNumber()
-{
- if (vespalib::SignalHandler::TERM.check()) {
- return SIGTERM;
- }
- if (vespalib::SignalHandler::INT.check()) {
- return SIGINT;
- }
- return 0;
-}
diff --git a/logd/src/logd/sigterm.h b/logd/src/logd/sigterm.h
deleted file mode 100644
index 79b0eb7b781..00000000000
--- a/logd/src/logd/sigterm.h
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#pragma once
-// setup for catching signals
-extern void hook_signals();
-extern bool gotSignaled();
-extern int gotSignalNumber();
diff --git a/logd/src/logd/watch.cpp b/logd/src/logd/watch.cpp
index 0558d0b4548..ad866938ab6 100644
--- a/logd/src/logd/watch.cpp
+++ b/logd/src/logd/watch.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "errhandle.h"
-#include "sigterm.h"
#include "service.h"
#include "forward.h"
#include "conf.h"
@@ -13,6 +12,7 @@
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/time.h>
+#include <vespa/vespalib/util/sig_catch.h>
LOG_SETUP("");
@@ -196,6 +196,7 @@ Watcher::watchfile()
_forwarder.sendMode();
+ vespalib::SigCatch catcher;
int sleepcount = 0;
time_t created = 0;
@@ -342,11 +343,11 @@ Watcher::watchfile()
}
}
- if (gotSignaled()) {
+ if (catcher.receivedStopSignal()) {
throw SigTermException("caught signal");
}
snooze(tickStart);
- if (gotSignaled()) {
+ if (catcher.receivedStopSignal()) {
throw SigTermException("caught signal");
}
if (++sleepcount > 99) {
diff --git a/logforwarder/CMakeLists.txt b/logforwarder/CMakeLists.txt
new file mode 100644
index 00000000000..bd1e480a074
--- /dev/null
+++ b/logforwarder/CMakeLists.txt
@@ -0,0 +1,10 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_define_module(
+ DEPENDS
+ vespalog
+ vespalib
+ config_cloudconfig
+
+ APPS
+ src/apps/vespa-logforwarder-start
+)
diff --git a/logforwarder/OWNERS b/logforwarder/OWNERS
new file mode 100644
index 00000000000..67cd2820bb8
--- /dev/null
+++ b/logforwarder/OWNERS
@@ -0,0 +1 @@
+arnej27959
diff --git a/logforwarder/README b/logforwarder/README
new file mode 100644
index 00000000000..30f5992157b
--- /dev/null
+++ b/logforwarder/README
@@ -0,0 +1,5 @@
+Utility for log forwarding management
+
+currently this only supports "splunk", and
+just saves splunk config in a file using the
+appropriate format.
diff --git a/logforwarder/src/apps/vespa-logforwarder-start/.gitignore b/logforwarder/src/apps/vespa-logforwarder-start/.gitignore
new file mode 100644
index 00000000000..bfa5aca99b1
--- /dev/null
+++ b/logforwarder/src/apps/vespa-logforwarder-start/.gitignore
@@ -0,0 +1 @@
+vespa-logforwarder-start
diff --git a/logforwarder/src/apps/vespa-logforwarder-start/CMakeLists.txt b/logforwarder/src/apps/vespa-logforwarder-start/CMakeLists.txt
new file mode 100644
index 00000000000..9a402bb58da
--- /dev/null
+++ b/logforwarder/src/apps/vespa-logforwarder-start/CMakeLists.txt
@@ -0,0 +1,12 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(logforwarder-start_app
+ SOURCES
+ main.cpp
+ cf-handler.cpp
+ OUTPUT_NAME vespa-logforwarder-start
+ INSTALL bin
+ DEPENDS
+ config_cloudconfig
+ configdefinitions
+ vespalib
+)
diff --git a/logforwarder/src/apps/vespa-logforwarder-start/cf-handler.cpp b/logforwarder/src/apps/vespa-logforwarder-start/cf-handler.cpp
new file mode 100644
index 00000000000..e34a83030e8
--- /dev/null
+++ b/logforwarder/src/apps/vespa-logforwarder-start/cf-handler.cpp
@@ -0,0 +1,91 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "cf-handler.h"
+#include <dirent.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <vespa/defaults.h>
+#include <vespa/config/common/configsystem.h>
+#include <vespa/config/common/exceptions.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP(".cf-handler");
+
+CfHandler::CfHandler() : _subscriber() {}
+
+CfHandler::~CfHandler()
+{
+}
+
+void
+CfHandler::subscribe(const std::string & configId, uint64_t timeoutMS)
+{
+ _handle = _subscriber.subscribe<LogforwarderConfig>(configId, timeoutMS);
+}
+
+namespace {
+std::string
+cfFilePath() {
+ std::string path = vespa::Defaults::underVespaHome("var/db/vespa/splunk");
+ DIR *dp = opendir(path.c_str());
+ if (dp == NULL) {
+ if (errno != ENOENT || mkdir(path.c_str(), 0755) != 0) {
+ perror(path.c_str());
+ }
+ } else {
+ closedir(dp);
+ }
+ path += "/deploymentclient.conf";
+ return path;
+}
+}
+
+void
+CfHandler::doConfigure()
+{
+ std::unique_ptr<LogforwarderConfig> cfg(_handle->getConfig());
+ const LogforwarderConfig& config(*cfg);
+
+ std::string path = cfFilePath();
+ std::string tmpPath = path + ".new";
+ FILE *fp = fopen(tmpPath.c_str(), "w");
+ if (fp == NULL) return;
+
+ fprintf(fp, "[deployment-client]\n");
+ fprintf(fp, "clientName = %s\n", config.clientName.c_str());
+ fprintf(fp, "\n");
+ fprintf(fp, "[target-broker:deploymentServer]\n");
+ fprintf(fp, "targetUri = %s\n", config.deploymentServer.c_str());
+
+ fclose(fp);
+ rename(tmpPath.c_str(), path.c_str());
+}
+
+void
+CfHandler::check()
+{
+ if (_subscriber.nextConfig(0)) {
+ doConfigure();
+ }
+}
+
+constexpr uint64_t CONFIG_TIMEOUT_MS = 30 * 1000;
+
+void
+CfHandler::start(const char *configId)
+{
+ LOG(debug, "Reading configuration with id '%s'", configId);
+ try {
+ subscribe(configId, CONFIG_TIMEOUT_MS);
+ } catch (config::ConfigTimeoutException & ex) {
+ LOG(warning, "Timout getting config, please check your setup. Will exit and restart: %s", ex.getMessage().c_str());
+ exit(EXIT_FAILURE);
+ } catch (config::InvalidConfigException& ex) {
+ LOG(error, "Fatal: Invalid configuration, please check your setup: %s", ex.getMessage().c_str());
+ exit(EXIT_FAILURE);
+ } catch (config::ConfigRuntimeException& ex) {
+ LOG(error, "Fatal: Could not get config, please check your setup: %s", ex.getMessage().c_str());
+ exit(EXIT_FAILURE);
+ }
+}
diff --git a/logforwarder/src/apps/vespa-logforwarder-start/cf-handler.h b/logforwarder/src/apps/vespa-logforwarder-start/cf-handler.h
new file mode 100644
index 00000000000..99f0a6cd6d5
--- /dev/null
+++ b/logforwarder/src/apps/vespa-logforwarder-start/cf-handler.h
@@ -0,0 +1,20 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/config/config.h>
+#include <vespa/config-logforwarder.h>
+
+using cloud::config::LogforwarderConfig;
+
+class CfHandler {
+private:
+ config::ConfigSubscriber _subscriber;
+ config::ConfigHandle<LogforwarderConfig>::UP _handle;
+ void subscribe(const std::string & configId, uint64_t timeoutMS);
+ void doConfigure();
+public:
+ CfHandler();
+ virtual ~CfHandler();
+ void start(const char *configId);
+ void check();
+};
diff --git a/logforwarder/src/apps/vespa-logforwarder-start/main.cpp b/logforwarder/src/apps/vespa-logforwarder-start/main.cpp
new file mode 100644
index 00000000000..8fc74fcac8e
--- /dev/null
+++ b/logforwarder/src/apps/vespa-logforwarder-start/main.cpp
@@ -0,0 +1,38 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <csignal>
+#include <unistd.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP("vespa-logforwarder-start");
+
+#include "cf-handler.h"
+#include <vespa/vespalib/util/sig_catch.h>
+
+class Wrapper {
+ const char *_configId;
+public:
+ Wrapper(const char *cfid) : _configId(cfid) {}
+ void run() {
+ vespalib::SigCatch catcher;
+ CfHandler handler;
+ handler.start(_configId);
+ while (! catcher.receivedStopSignal()) {
+ handler.check();
+ usleep(12500); // Avoid busy looping;
+ }
+ }
+};
+
+int
+main(int argc, char** argv)
+{
+ int c = getopt(argc, argv, "c:");
+ if (c != 'c') {
+ LOG(error, "Usage: %s -c <config-id>", argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ Wrapper wrapper(optarg);
+ wrapper.run();
+ return 0;
+}
diff --git a/logserver/CMakeLists.txt b/logserver/CMakeLists.txt
new file mode 100644
index 00000000000..736878948fb
--- /dev/null
+++ b/logserver/CMakeLists.txt
@@ -0,0 +1,4 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(logserver)
+
+vespa_install_script(bin/logserver-start.sh vespa-logserver-start bin)
diff --git a/memfilepersistence/src/tests/spi/basicoperationhandlertest.cpp b/memfilepersistence/src/tests/spi/basicoperationhandlertest.cpp
index 8011255324b..07bff2038e0 100644
--- a/memfilepersistence/src/tests/spi/basicoperationhandlertest.cpp
+++ b/memfilepersistence/src/tests/spi/basicoperationhandlertest.cpp
@@ -5,8 +5,10 @@
#include "options_builder.h"
#include <vespa/document/fieldset/fieldsetrepo.h>
#include <vespa/document/fieldset/fieldsets.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/vdstestlib/cppunit/macros.h>
+using storage::spi::test::makeBucket;
namespace storage {
namespace memfile {
@@ -142,8 +144,7 @@ BasicOperationHandlerTest::testRemove()
Timestamp(2),
OperationHandler::PERSIST_REMOVE_IF_FOUND));
- getPersistenceProvider().flush(
- spi::Bucket(bucketId, spi::PartitionId(0)), context);
+ getPersistenceProvider().flush(makeBucket(bucketId), context);
env()._cache.clear();
@@ -177,8 +178,7 @@ BasicOperationHandlerTest::doTestRemoveWithNonMatchingTimestamp(
Timestamp(1233),
persistRemove));
- getPersistenceProvider().flush(
- spi::Bucket(bucketId, spi::PartitionId(0)), context);
+ getPersistenceProvider().flush(makeBucket(bucketId), context);
MemFilePtr file(getMemFile(bucketId));
CPPUNIT_ASSERT_EQUAL(
@@ -241,8 +241,7 @@ BasicOperationHandlerTest::testRemoveForExistingRemoveSameTimestamp()
Timestamp(1235),
OperationHandler::PERSIST_REMOVE_IF_FOUND));
- getPersistenceProvider().flush(
- spi::Bucket(bucketId, spi::PartitionId(0)), context);
+ getPersistenceProvider().flush(makeBucket(bucketId), context);
// Should only be one remove entry still
MemFilePtr file(getMemFile(bucketId));
@@ -272,8 +271,7 @@ BasicOperationHandlerTest::doTestRemoveForExistingRemoveNewTimestamp(
Timestamp(1236),
persistRemove));
- getPersistenceProvider().flush(
- spi::Bucket(bucketId, spi::PartitionId(0)), context);
+ getPersistenceProvider().flush(makeBucket(bucketId), context);
MemFilePtr file(getMemFile(bucketId));
CPPUNIT_ASSERT_EQUAL(
@@ -327,16 +325,14 @@ BasicOperationHandlerTest::testRemoveExistingOlderDocumentVersion()
Timestamp(1235),
OperationHandler::ALWAYS_PERSIST_REMOVE));
- getPersistenceProvider().flush(
- spi::Bucket(bucketId, spi::PartitionId(0)), context);
+ getPersistenceProvider().flush(makeBucket(bucketId), context);
CPPUNIT_ASSERT_EQUAL(true, doRemove(bucketId,
doc->getId(),
Timestamp(1234),
OperationHandler::ALWAYS_PERSIST_REMOVE));
- getPersistenceProvider().flush(
- spi::Bucket(bucketId, spi::PartitionId(0)), context);
+ getPersistenceProvider().flush(makeBucket(bucketId), context);
// Should now be two remove entries.
MemFilePtr file(getMemFile(bucketId));
@@ -366,8 +362,7 @@ BasicOperationHandlerTest::doTestRemoveDocumentNotFound(
Timestamp(1235),
persistRemove));
- getPersistenceProvider().flush(
- spi::Bucket(bucketId, spi::PartitionId(0)), context);
+ getPersistenceProvider().flush(makeBucket(bucketId), context);
MemFilePtr file(getMemFile(bucketId));
CPPUNIT_ASSERT_EQUAL(
@@ -581,12 +576,8 @@ BasicOperationHandlerTest::testRemoveEntry()
Document::SP doc = doPut(4, Timestamp(2345));
doPut(4, Timestamp(3456));
- getPersistenceProvider().removeEntry(
- spi::Bucket(bucketId, spi::PartitionId(0)),
- spi::Timestamp(1234), context);
- getPersistenceProvider().removeEntry(
- spi::Bucket(bucketId, spi::PartitionId(0)),
- spi::Timestamp(3456), context);
+ getPersistenceProvider().removeEntry(makeBucket(bucketId), spi::Timestamp(1234), context);
+ getPersistenceProvider().removeEntry(makeBucket(bucketId), spi::Timestamp(3456), context);
flush(bucketId);
memfile::MemFilePtr file(getMemFile(bucketId));
@@ -683,9 +674,7 @@ BasicOperationHandlerTest::testEraseFromCacheOnMaintainException()
std::unique_ptr<Environment::LazyFileFactory>(
new SimulatedFailureLazyFile::Factory);
- spi::Result result = getPersistenceProvider().maintain(
- spi::Bucket(bucketId, spi::PartitionId(0)),
- spi::HIGH);
+ spi::Result result = getPersistenceProvider().maintain(makeBucket(bucketId), spi::HIGH);
CPPUNIT_ASSERT(result.hasError());
CPPUNIT_ASSERT(result.getErrorMessage().find("A simulated I/O write")
!= vespalib::string::npos);
@@ -720,8 +709,7 @@ BasicOperationHandlerTest::testEraseFromCacheOnDeleteBucketException()
std::unique_ptr<Environment::LazyFileFactory>(factory);
// loadFile will fail
- spi::Result result = getPersistenceProvider().deleteBucket(
- spi::Bucket(bucketId, spi::PartitionId(0)), context);
+ spi::Result result = getPersistenceProvider().deleteBucket(makeBucket(bucketId), context);
CPPUNIT_ASSERT(result.hasError());
CPPUNIT_ASSERT(result.getErrorMessage().find("A simulated I/O read")
!= vespalib::string::npos);
diff --git a/memfilepersistence/src/tests/spi/iteratorhandlertest.cpp b/memfilepersistence/src/tests/spi/iteratorhandlertest.cpp
index 622625be864..6deabc3dfe1 100644
--- a/memfilepersistence/src/tests/spi/iteratorhandlertest.cpp
+++ b/memfilepersistence/src/tests/spi/iteratorhandlertest.cpp
@@ -4,8 +4,11 @@
#include <tests/spi/memfiletestutils.h>
#include <tests/spi/simulatedfailurefile.h>
#include <tests/spi/options_builder.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/document/select/parser.h>
+using storage::spi::test::makeBucket;
+
namespace storage {
namespace memfile {
namespace {
@@ -119,7 +122,7 @@ IteratorHandlerTest::createSelection(const std::string& docSel) const
void
IteratorHandlerTest::testCreateIterator()
{
- spi::Bucket b(BucketId(16, 1234), spi::PartitionId(0));
+ spi::Bucket b(makeBucket(BucketId(16, 1234)));
spi::CreateIteratorResult iter1(create(b, createSelection("true")));
CPPUNIT_ASSERT_EQUAL(spi::IteratorId(1), iter1.getIteratorId());
@@ -287,7 +290,7 @@ IteratorHandlerTest::testSomeSlotsRemovedBetweenInvocations()
{
std::vector<DocAndTimestamp> docs = feedDocs(100, 4096, 4096);
- spi::Bucket b(BucketId(16, 4), spi::PartitionId(0));
+ spi::Bucket b(makeBucket(BucketId(16, 4)));
spi::Selection sel(createSelection("true"));
spi::CreateIteratorResult iter(create(b, sel));
@@ -327,7 +330,7 @@ IteratorHandlerTest::testAllSlotsRemovedBetweenInvocations()
{
std::vector<DocAndTimestamp> docs = feedDocs(100, 4096, 4096);
- spi::Bucket b(BucketId(16, 4), spi::PartitionId(0));
+ spi::Bucket b(makeBucket(BucketId(16, 4)));
spi::Selection sel(createSelection("true"));
spi::CreateIteratorResult iter(create(b, sel));
@@ -359,7 +362,7 @@ IteratorHandlerTest::testAllSlotsRemovedBetweenInvocations()
void
IteratorHandlerTest::testIterateMetadataOnly()
{
- spi::Bucket b(BucketId(16, 4), spi::PartitionId(0));
+ spi::Bucket b(makeBucket(BucketId(16, 4)));
std::vector<DocAndTimestamp> docs = feedDocs(10);
CPPUNIT_ASSERT(
@@ -415,7 +418,7 @@ IteratorHandlerTest::testIterateHeadersOnly()
clearBody(*docs[i].first);
}
- spi::Bucket b(BucketId(16, 4), spi::PartitionId(0));
+ spi::Bucket b(makeBucket(BucketId(16, 4)));
spi::Selection sel(createSelection("true"));
spi::CreateIteratorResult iter(create(b, sel, spi::NEWEST_DOCUMENT_ONLY,
@@ -436,7 +439,7 @@ IteratorHandlerTest::testIterateLargeDocument()
std::vector<DocAndTimestamp> largedoc;
largedoc.push_back(docs.back());
- spi::Bucket b(BucketId(16, 4), spi::PartitionId(0));
+ spi::Bucket b(makeBucket(BucketId(16, 4)));
spi::Selection sel(createSelection("true"));
spi::CreateIteratorResult iter(create(b, sel));
@@ -455,7 +458,7 @@ IteratorHandlerTest::testDocumentsRemovedBetweenInvocations()
int docCount = 100;
std::vector<DocAndTimestamp> docs = feedDocs(docCount);
- spi::Bucket b(BucketId(16, 4), spi::PartitionId(0));
+ spi::Bucket b(makeBucket(BucketId(16, 4)));
spi::Selection sel(createSelection("true"));
spi::CreateIteratorResult iter(create(b, sel));
@@ -497,7 +500,7 @@ IteratorHandlerTest::doTestUnrevertableRemoveBetweenInvocations(bool includeRemo
int docCount = 100;
std::vector<DocAndTimestamp> docs = feedDocs(docCount);
- spi::Bucket b(BucketId(16, 4), spi::PartitionId(0));
+ spi::Bucket b(makeBucket(BucketId(16, 4)));
spi::Selection sel(createSelection("true"));
spi::CreateIteratorResult iter(
create(b, sel,
@@ -591,7 +594,7 @@ IteratorHandlerTest::testMatchTimestampRangeDocAltered()
OperationHandler::PERSIST_REMOVE_IF_FOUND));
flush(bucketId);
- spi::Bucket b(bucketId, spi::PartitionId(0));
+ spi::Bucket b(makeBucket(bucketId));
{
spi::Selection sel(createSelection("true"));
@@ -696,7 +699,7 @@ IteratorHandlerTest::testMatchTimestampRangeDocAltered()
void
IteratorHandlerTest::testIterateAllVersions()
{
- spi::Bucket b(BucketId(16, 4), spi::PartitionId(0));
+ spi::Bucket b(makeBucket(BucketId(16, 4)));
std::vector<DocAndTimestamp> docs;
Document::SP originalDoc(createRandomDocumentAtLocation(
@@ -733,7 +736,7 @@ IteratorHandlerTest::testIterateAllVersions()
void
IteratorHandlerTest::testFieldSetFiltering()
{
- spi::Bucket b(BucketId(16, 4), spi::PartitionId(0));
+ spi::Bucket b(makeBucket(BucketId(16, 4)));
Document::SP doc(createRandomDocumentAtLocation(
4, 1001, 110, 110));
doc->setValue(doc->getField("headerval"), document::IntFieldValue(42));
@@ -760,7 +763,7 @@ IteratorHandlerTest::testFieldSetFiltering()
void
IteratorHandlerTest::testIteratorInactiveOnException()
{
- spi::Bucket b(BucketId(16, 4), spi::PartitionId(0));
+ spi::Bucket b(makeBucket(BucketId(16, 4)));
feedDocs(10);
env()._cache.clear();
@@ -787,7 +790,7 @@ IteratorHandlerTest::testIteratorInactiveOnException()
void
IteratorHandlerTest::testDocsCachedBeforeDocumentSelection()
{
- spi::Bucket b(BucketId(16, 4), spi::PartitionId(0));
+ spi::Bucket b(makeBucket(BucketId(16, 4)));
std::vector<DocAndTimestamp> docs = feedDocs(100, 4096, 4096);
env()._cache.clear();
@@ -816,7 +819,7 @@ IteratorHandlerTest::testDocsCachedBeforeDocumentSelection()
void
IteratorHandlerTest::testTimestampRangeLimitedPrefetch()
{
- spi::Bucket b(BucketId(16, 4), spi::PartitionId(0));
+ spi::Bucket b(makeBucket(BucketId(16, 4)));
// Feed docs with timestamp range [1000, 1100)
feedDocs(100, 4096, 4096);
@@ -904,7 +907,7 @@ IteratorHandlerTest::testCachePrefetchRequirements()
void
IteratorHandlerTest::testBucketEvictedFromCacheOnIterateException()
{
- spi::Bucket b(BucketId(16, 4), spi::PartitionId(0));
+ spi::Bucket b(makeBucket(BucketId(16, 4)));
feedDocs(10);
env()._cache.clear();
diff --git a/memfilepersistence/src/tests/spi/joinoperationhandlertest.cpp b/memfilepersistence/src/tests/spi/joinoperationhandlertest.cpp
index ce11667f75f..07c2bab5bae 100644
--- a/memfilepersistence/src/tests/spi/joinoperationhandlertest.cpp
+++ b/memfilepersistence/src/tests/spi/joinoperationhandlertest.cpp
@@ -2,8 +2,10 @@
#include "memfiletestutils.h"
#include <vespa/document/datatype/documenttype.h>
+#include <vespa/persistence/spi/test.h>
using document::DocumentType;
+using storage::spi::test::makeBucket;
namespace storage {
namespace memfile {
@@ -138,9 +140,9 @@ JoinOperationHandlerTest::doJoin(const document::BucketId to,
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
return getPersistenceProvider().join(
- spi::Bucket(from1, spi::PartitionId(0)),
- spi::Bucket(from2, spi::PartitionId(0)),
- spi::Bucket(to, spi::PartitionId(0)),
+ makeBucket(from1),
+ makeBucket(from2),
+ makeBucket(to),
context);
}
@@ -250,9 +252,9 @@ JoinOperationHandlerTest::testMultiDisk()
setupDisks(10);
feedMultiDisk();
- getPersistenceProvider().join(spi::Bucket(SOURCE2, spi::PartitionId(7)),
- spi::Bucket(SOURCE1, spi::PartitionId(4)),
- spi::Bucket(TARGET, spi::PartitionId(3)),
+ getPersistenceProvider().join(makeBucket(SOURCE2, spi::PartitionId(7)),
+ makeBucket(SOURCE1, spi::PartitionId(4)),
+ makeBucket(TARGET, spi::PartitionId(3)),
context);
CPPUNIT_ASSERT_EQUAL(
@@ -276,9 +278,9 @@ JoinOperationHandlerTest::testMultiDiskFlushed()
env()._cache.flushDirtyEntries();
env()._cache.clear();
- getPersistenceProvider().join(spi::Bucket(SOURCE2, spi::PartitionId(7)),
- spi::Bucket(SOURCE1, spi::PartitionId(4)),
- spi::Bucket(TARGET, spi::PartitionId(3)),
+ getPersistenceProvider().join(makeBucket(SOURCE2, spi::PartitionId(7)),
+ makeBucket(SOURCE1, spi::PartitionId(4)),
+ makeBucket(TARGET, spi::PartitionId(3)),
context);
CPPUNIT_ASSERT_EQUAL(
@@ -322,9 +324,9 @@ JoinOperationHandlerTest::testInternalJoin()
mon->overrideRealStat(512, 100000, 50000);
CPPUNIT_ASSERT(!mon->isFull(0, .80f));
- getPersistenceProvider().join(spi::Bucket(SOURCE1, spi::PartitionId(4)),
- spi::Bucket(SOURCE1, spi::PartitionId(4)),
- spi::Bucket(SOURCE1, spi::PartitionId(5)),
+ getPersistenceProvider().join(makeBucket(SOURCE1, spi::PartitionId(4)),
+ makeBucket(SOURCE1, spi::PartitionId(4)),
+ makeBucket(SOURCE1, spi::PartitionId(5)),
context);
env()._cache.clear();
@@ -368,9 +370,9 @@ JoinOperationHandlerTest::testInternalJoinDiskFull()
CPPUNIT_ASSERT(mon->isFull(0, .08f));
spi::Result result =
- getPersistenceProvider().join(spi::Bucket(SOURCE1, spi::PartitionId(4)),
- spi::Bucket(SOURCE1, spi::PartitionId(4)),
- spi::Bucket(SOURCE1, spi::PartitionId(5)),
+ getPersistenceProvider().join(makeBucket(SOURCE1, spi::PartitionId(4)),
+ makeBucket(SOURCE1, spi::PartitionId(4)),
+ makeBucket(SOURCE1, spi::PartitionId(5)),
context);
CPPUNIT_ASSERT(result.hasError());
diff --git a/memfilepersistence/src/tests/spi/memfileautorepairtest.cpp b/memfilepersistence/src/tests/spi/memfileautorepairtest.cpp
index 9984452caed..1bae9d929e3 100644
--- a/memfilepersistence/src/tests/spi/memfileautorepairtest.cpp
+++ b/memfilepersistence/src/tests/spi/memfileautorepairtest.cpp
@@ -4,6 +4,9 @@
#include <vespa/memfilepersistence/mapper/memfile_v1_serializer.h>
#include <vespa/memfilepersistence/mapper/memfile_v1_verifier.h>
#include <tests/spi/memfiletestutils.h>
+#include <vespa/persistence/spi/test.h>
+
+using storage::spi::test::makeBucket;
namespace storage {
namespace memfile {
@@ -223,7 +226,7 @@ MemFileAutoRepairTest::testRepairFailureInMaintainEvictsBucketFromCache()
prepareBucket(*this, *_file);
corruptBodyBlock();
spi::Result result(getPersistenceProvider().maintain(
- spi::Bucket(_bucket, spi::PartitionId(0)), spi::HIGH));
+ makeBucket(_bucket), spi::HIGH));
// File being successfully repaired does not constitute a failure of
// the maintain() call.
CPPUNIT_ASSERT_EQUAL(spi::Result::NONE, result.getErrorCode());
@@ -240,7 +243,7 @@ MemFileAutoRepairTest::testZeroLengthFileIsDeleted()
// No way to deal with zero-length files aside from deleting them.
spi::Result result(getPersistenceProvider().maintain(
- spi::Bucket(_bucket, spi::PartitionId(0)), spi::HIGH));
+ makeBucket(_bucket), spi::HIGH));
CPPUNIT_ASSERT_EQUAL(spi::Result::NONE, result.getErrorCode());
CPPUNIT_ASSERT(!env()._cache.contains(_bucket));
CPPUNIT_ASSERT(!vespalib::fileExists(_file->getPath()));
@@ -272,7 +275,7 @@ MemFileAutoRepairTest::assertDocumentIsSilentlyRemoved(
{
// Corrupted (truncated) slot should be transparently removed during
// loadFile and it should be as if it was never there!
- spi::Bucket spiBucket(bucket, spi::PartitionId(0));
+ spi::Bucket spiBucket(makeBucket(bucket));
spi::GetResult res(doGet(spiBucket, docId, document::AllFields()));
CPPUNIT_ASSERT_EQUAL(spi::Result::NONE, res.getErrorCode());
CPPUNIT_ASSERT(!res.hasDocument());
diff --git a/memfilepersistence/src/tests/spi/memfiletestutils.cpp b/memfilepersistence/src/tests/spi/memfiletestutils.cpp
index 28e8987a2fa..71a3e6a0999 100644
--- a/memfilepersistence/src/tests/spi/memfiletestutils.cpp
+++ b/memfilepersistence/src/tests/spi/memfiletestutils.cpp
@@ -7,11 +7,14 @@
#include <vespa/memfilepersistence/memfile/memfilecache.h>
#include <vespa/storageframework/defaultimplementation/memory/simplememorylogic.h>
#include <vespa/document/update/assignvalueupdate.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/vespalib/util/exceptions.h>
#include <sys/time.h>
using document::DocumentType;
+using storage::spi::test::makeBucket;
+using storage::spi::test::makeBucketSpace;
namespace storage {
namespace memfile {
@@ -112,7 +115,7 @@ std::string
MemFileTestUtils::getModifiedBuckets()
{
spi::BucketIdListResult result(
- getPersistenceProvider().getModifiedBuckets());
+ getPersistenceProvider().getModifiedBuckets(makeBucketSpace()));
const spi::BucketIdListResult::List& list(result.getList());
std::ostringstream ss;
for (size_t i = 0; i < list.size(); ++i) {
@@ -136,7 +139,7 @@ MemFileTestUtils::flush(const document::BucketId& id, uint16_t disk)
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
return getPersistenceProvider().flush(
- spi::Bucket(id, spi::PartitionId(disk)), context);
+ makeBucket(id, spi::PartitionId(disk)), context);
}
document::Document::SP
@@ -152,7 +155,7 @@ MemFileTestUtils::doPutOnDisk(
document::Document::SP doc(createRandomDocumentAtLocation(
location, timestamp.getTime(), minSize, maxSize));
getPersistenceProvider().put(
- spi::Bucket(document::BucketId(16, location), spi::PartitionId(disk)),
+ makeBucket(document::BucketId(16, location), spi::PartitionId(disk)),
spi::Timestamp(timestamp.getTime()),
doc,
context);
@@ -171,14 +174,14 @@ MemFileTestUtils::doRemoveOnDisk(
spi::Trace::TraceLevel(0));
if (persistRemove == OperationHandler::PERSIST_REMOVE_IF_FOUND) {
spi::RemoveResult result = getPersistenceProvider().removeIfFound(
- spi::Bucket(bucketId, spi::PartitionId(disk)),
+ makeBucket(bucketId, spi::PartitionId(disk)),
spi::Timestamp(timestamp.getTime()),
docId,
context);
return result.wasFound();
}
spi::RemoveResult result = getPersistenceProvider().remove(
- spi::Bucket(bucketId, spi::PartitionId(disk)),
+ makeBucket(bucketId, spi::PartitionId(disk)),
spi::Timestamp(timestamp.getTime()),
docId,
context);
@@ -197,7 +200,7 @@ MemFileTestUtils::doUnrevertableRemoveOnDisk(
spi::Trace::TraceLevel(0));
spi::RemoveResult result =
getPersistenceProvider().remove(
- spi::Bucket(bucketId, spi::PartitionId(disk)),
+ makeBucket(bucketId, spi::PartitionId(disk)),
spi::Timestamp(timestamp.getTime()),
docId, context);
@@ -214,7 +217,7 @@ MemFileTestUtils::doGetOnDisk(
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
return getPersistenceProvider().get(
- spi::Bucket(bucketId, spi::PartitionId(disk)),
+ makeBucket(bucketId, spi::PartitionId(disk)),
fields, docId, context);
}
@@ -272,7 +275,7 @@ MemFileTestUtils::doPut(const document::Document::SP& doc,
{
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
- getPersistenceProvider().put(spi::Bucket(bid, spi::PartitionId(disk)),
+ getPersistenceProvider().put(makeBucket(bid, spi::PartitionId(disk)),
spi::Timestamp(time.getTime()), doc, context);
}
@@ -285,7 +288,7 @@ MemFileTestUtils::doUpdate(document::BucketId bid,
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
return getPersistenceProvider().update(
- spi::Bucket(bid, spi::PartitionId(disk)),
+ makeBucket(bid, spi::PartitionId(disk)),
spi::Timestamp(time.getTime()), update, context);
}
@@ -301,12 +304,12 @@ MemFileTestUtils::doRemove(const document::DocumentId& id, Timestamp time,
if (unrevertableRemove) {
getPersistenceProvider().remove(
- spi::Bucket(bucket, spi::PartitionId(disk)),
+ makeBucket(bucket, spi::PartitionId(disk)),
spi::Timestamp(time.getTime()),
id, context);
} else {
spi::RemoveResult result = getPersistenceProvider().removeIfFound(
- spi::Bucket(bucket, spi::PartitionId(disk)),
+ makeBucket(bucket, spi::PartitionId(disk)),
spi::Timestamp(time.getTime()),
id, context);
diff --git a/memfilepersistence/src/tests/spi/splitoperationhandlertest.cpp b/memfilepersistence/src/tests/spi/splitoperationhandlertest.cpp
index 43094d80928..98f722e749c 100644
--- a/memfilepersistence/src/tests/spi/splitoperationhandlertest.cpp
+++ b/memfilepersistence/src/tests/spi/splitoperationhandlertest.cpp
@@ -2,8 +2,10 @@
#include "memfiletestutils.h"
#include <vespa/document/datatype/documenttype.h>
+#include <vespa/persistence/spi/test.h>
using document::DocumentType;
+using storage::spi::test::makeBucket;
namespace storage {
namespace memfile {
@@ -60,9 +62,9 @@ SplitOperationHandlerTest::testSimple()
SplitOperationHandler handler(env());
spi::Result result = getPersistenceProvider().split(
- spi::Bucket(sourceBucket, spi::PartitionId(0)),
- spi::Bucket(target1, spi::PartitionId(0)),
- spi::Bucket(target2, spi::PartitionId(0)),
+ makeBucket(sourceBucket),
+ makeBucket(target1),
+ makeBucket(target2),
context);
env()._cache.clear();
@@ -116,9 +118,9 @@ SplitOperationHandlerTest::doTestMultiDisk(uint16_t sourceDisk,
SplitOperationHandler handler(env());
spi::Result result = getPersistenceProvider().split(
- spi::Bucket(sourceBucket, spi::PartitionId(sourceDisk)),
- spi::Bucket(target1, spi::PartitionId(targetDisk0)),
- spi::Bucket(target2, spi::PartitionId(targetDisk1)),
+ makeBucket(sourceBucket, spi::PartitionId(sourceDisk)),
+ makeBucket(target1, spi::PartitionId(targetDisk0)),
+ makeBucket(target2, spi::PartitionId(targetDisk1)),
context);
env()._cache.clear();
@@ -183,9 +185,9 @@ SplitOperationHandlerTest::testExceptionDuringSplittingEvictsAllBuckets()
try {
SplitOperationHandler handler(env());
spi::Result result = getPersistenceProvider().split(
- spi::Bucket(sourceBucket, spi::PartitionId(0)),
- spi::Bucket(target1, spi::PartitionId(0)),
- spi::Bucket(target2, spi::PartitionId(0)),
+ makeBucket(sourceBucket),
+ makeBucket(target1),
+ makeBucket(target2),
context);
CPPUNIT_FAIL("Exception not thrown on flush failure");
} catch (std::exception&) {
diff --git a/memfilepersistence/src/vespa/memfilepersistence/spi/memfilepersistenceprovider.cpp b/memfilepersistence/src/vespa/memfilepersistence/spi/memfilepersistenceprovider.cpp
index ab9a329e92a..e75df3fc29f 100644
--- a/memfilepersistence/src/vespa/memfilepersistence/spi/memfilepersistenceprovider.cpp
+++ b/memfilepersistence/src/vespa/memfilepersistence/spi/memfilepersistenceprovider.cpp
@@ -210,7 +210,8 @@ private:
void
MemFilePersistenceProvider::handleBucketCorruption(const FileSpecification& file) const
{
- spi::Bucket fixBucket(file.getBucketId(),
+ spi::Bucket fixBucket(document::Bucket(document::BucketSpace::placeHolder(),
+ file.getBucketId()),
spi::PartitionId(file.getDirectory().getIndex()));
// const_cast is nasty, but maintain() must necessarily be able to
@@ -410,7 +411,7 @@ MemFilePersistenceProvider::getPartitionStates() const
}
spi::BucketIdListResult
-MemFilePersistenceProvider::listBuckets(spi::PartitionId partition) const
+MemFilePersistenceProvider::listBuckets(BucketSpace, spi::PartitionId partition) const
{
spi::BucketIdListResult::List buckets;
_fileScanner->buildBucketList(buckets, partition, 0, 1);
@@ -418,7 +419,7 @@ MemFilePersistenceProvider::listBuckets(spi::PartitionId partition) const
}
spi::BucketIdListResult
-MemFilePersistenceProvider::getModifiedBuckets() const
+MemFilePersistenceProvider::getModifiedBuckets(BucketSpace) const
{
document::BucketId::List modified;
_env->swapModifiedBuckets(modified); // Atomic op
diff --git a/memfilepersistence/src/vespa/memfilepersistence/spi/memfilepersistenceprovider.h b/memfilepersistence/src/vespa/memfilepersistence/spi/memfilepersistenceprovider.h
index 28af44c504e..f706fabc20c 100644
--- a/memfilepersistence/src/vespa/memfilepersistence/spi/memfilepersistenceprovider.h
+++ b/memfilepersistence/src/vespa/memfilepersistence/spi/memfilepersistenceprovider.h
@@ -41,8 +41,8 @@ public:
~MemFilePersistenceProvider();
spi::PartitionStateListResult getPartitionStates() const override;
- spi::BucketIdListResult listBuckets(spi::PartitionId) const override;
- spi::BucketIdListResult getModifiedBuckets() const override;
+ spi::BucketIdListResult listBuckets(BucketSpace bucketSpace, spi::PartitionId) const override;
+ spi::BucketIdListResult getModifiedBuckets(BucketSpace bucketSpace) const override;
spi::BucketInfoResult getBucketInfo(const spi::Bucket&) const override;
spi::Result put(const spi::Bucket&, spi::Timestamp,
const spi::DocumentSP&, spi::Context&) override;
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCNetwork.java b/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCNetwork.java
index 96a0a5fecef..bb0b7bdd878 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCNetwork.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCNetwork.java
@@ -67,7 +67,7 @@ public class RPCNetwork implements Network, MethodHandler {
private NetworkOwner owner;
private final SlobrokConfigSubscriber slobroksConfig;
private final LinkedHashMap<String, Route> lruRouteMap = new LinkedHashMap<>(10000, 0.5f, true);
- private final ExecutorService sendService =
+ private final ExecutorService executor =
new ThreadPoolExecutor(Runtime.getRuntime().availableProcessors(), Runtime.getRuntime().availableProcessors(),
0L, TimeUnit.SECONDS,
new SynchronousQueue<>(false),
@@ -258,7 +258,7 @@ public class RPCNetwork implements Network, MethodHandler {
} else if (ctx.hasError) {
replyError(ctx, ErrorCode.HANDSHAKE_FAILED, "An error occured while resolving version.");
} else {
- sendService.execute(new SendTask(owner.getProtocol(ctx.msg.getProtocol()), ctx));
+ executor.execute(new SendTask(owner.getProtocol(ctx.msg.getProtocol()), ctx));
}
}
@@ -278,7 +278,7 @@ public class RPCNetwork implements Network, MethodHandler {
listener.shutdown().join();
orb.transport().shutdown().join();
targetPool.flushTargets(true);
- sendService.shutdown();
+ executor.shutdown();
return true;
}
return false;
@@ -411,6 +411,10 @@ public class RPCNetwork implements Network, MethodHandler {
return oosManager;
}
+ ExecutorService getExecutor() {
+ return executor;
+ }
+
private class SendTask implements Runnable {
final Protocol protocol;
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCSend.java b/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCSend.java
index d7b4887bd36..daa31ae2701 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCSend.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCSend.java
@@ -107,6 +107,10 @@ public abstract class RPCSend implements MethodHandler, ReplyHandler, RequestWai
@Override
public final void handleRequestDone(Request req) {
+ net.getExecutor().execute(() -> doRequestDone(req));
+ }
+
+ private void doRequestDone(Request req) {
SendContext ctx = (SendContext)req.getContext();
String serviceName = ((RPCServiceAddress)ctx.recipient.getServiceAddress()).getServiceName();
Reply reply = null;
@@ -157,6 +161,10 @@ public abstract class RPCSend implements MethodHandler, ReplyHandler, RequestWai
@Override
public final void invoke(Request request) {
request.detach();
+ net.getExecutor().execute(() -> doInvoke(request));
+ }
+
+ private void doInvoke(Request request) {
Params p = toParams(request.parameters());
request.discardParameters(); // allow garbage collection of request parameters
diff --git a/messagebus/src/tests/protocolrepository/protocolrepository.cpp b/messagebus/src/tests/protocolrepository/protocolrepository.cpp
index b2454eb272a..b6178449918 100644
--- a/messagebus/src/tests/protocolrepository/protocolrepository.cpp
+++ b/messagebus/src/tests/protocolrepository/protocolrepository.cpp
@@ -48,6 +48,7 @@ public:
(void)data;
throw std::exception();
}
+ bool requireSequencing() const override { return false; }
};
int
diff --git a/messagebus/src/tests/throttling/throttling.cpp b/messagebus/src/tests/throttling/throttling.cpp
index 279f31201c4..5d3525e8ba6 100644
--- a/messagebus/src/tests/throttling/throttling.cpp
+++ b/messagebus/src/tests/throttling/throttling.cpp
@@ -197,6 +197,7 @@ Test::testMaxPendingSize()
EXPECT_EQUAL(2u, SimpleMessage("12").getApproxSize());
EXPECT_TRUE(ss->send(Message::UP(new SimpleMessage("1")), "dst").isAccepted());
+ EXPECT_TRUE(waitQueueSize(dstQ, 1));
EXPECT_TRUE(ss->send(Message::UP(new SimpleMessage("12")), "dst").isAccepted());
EXPECT_TRUE(!ss->send(Message::UP(new SimpleMessage("1")), "dst").isAccepted());
diff --git a/messagebus/src/vespa/messagebus/CMakeLists.txt b/messagebus/src/vespa/messagebus/CMakeLists.txt
index 7c922f36a8c..9ff3bae67c8 100644
--- a/messagebus/src/vespa/messagebus/CMakeLists.txt
+++ b/messagebus/src/vespa/messagebus/CMakeLists.txt
@@ -37,4 +37,4 @@ vespa_add_library(messagebus
DEPENDS
)
vespa_generate_config(messagebus ../../main/config/messagebus.def)
-install(FILES ../../main/config/messagebus.def RENAME messagebus.messagebus.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(../../main/config/messagebus.def messagebus.messagebus.def)
diff --git a/messagebus/src/vespa/messagebus/emptyreply.cpp b/messagebus/src/vespa/messagebus/emptyreply.cpp
index d87cffe52d5..4e858f4332d 100644
--- a/messagebus/src/vespa/messagebus/emptyreply.cpp
+++ b/messagebus/src/vespa/messagebus/emptyreply.cpp
@@ -21,9 +21,4 @@ EmptyReply::getType() const {
return 0;
}
-Blob
-EmptyReply::encode() const {
- return Blob(0);
-}
-
} // namespace mbus
diff --git a/messagebus/src/vespa/messagebus/emptyreply.h b/messagebus/src/vespa/messagebus/emptyreply.h
index 648598201b2..909fb197697 100644
--- a/messagebus/src/vespa/messagebus/emptyreply.h
+++ b/messagebus/src/vespa/messagebus/emptyreply.h
@@ -2,7 +2,6 @@
#pragma once
#include "reply.h"
-#include "blob.h"
namespace mbus {
@@ -16,34 +15,10 @@ namespace mbus {
*/
class EmptyReply : public Reply {
public:
- /**
- * Constructs a new instance of this class.
- */
EmptyReply();
-
- /**
- * This method returns the empty string to signal that it does not belong to
- * a protocol.
- *
- * @return ""
- */
const string & getProtocol() const override;
-
- /**
- * This method returns the message type id reserved for empty replies: 0
- *
- * @return 0
- */
uint32_t getType() const override;
-
- /**
- * Encodes this reply into an empty blob.
- *
- * @return empty blob
- */
- Blob encode() const;
-
uint8_t priority() const override { return 8; }
};
-} // namespace mbus
+}
diff --git a/messagebus/src/vespa/messagebus/iprotocol.h b/messagebus/src/vespa/messagebus/iprotocol.h
index 40cfc779c36..8a4129d1976 100644
--- a/messagebus/src/vespa/messagebus/iprotocol.h
+++ b/messagebus/src/vespa/messagebus/iprotocol.h
@@ -52,8 +52,7 @@ public:
* @param param Ppolicy specific parameter.
* @return A newly created routing policy.
*/
- virtual IRoutingPolicy::UP createPolicy(const string &name,
- const string &param) const = 0;
+ virtual IRoutingPolicy::UP createPolicy(const string &name, const string &param) const = 0;
/**
* Encodes the protocol specific data of a routable into a byte array.
@@ -80,6 +79,9 @@ public:
* @return The decoded routable.
*/
virtual Routable::UP decode(const vespalib::Version &version, BlobRef data) const = 0; // throw()
+
+
+ virtual bool requireSequencing() const = 0;
};
} // namespace mbus
diff --git a/messagebus/src/vespa/messagebus/network/rpcnetwork.cpp b/messagebus/src/vespa/messagebus/network/rpcnetwork.cpp
index e5c292fedd9..8ff7ac87edc 100644
--- a/messagebus/src/vespa/messagebus/network/rpcnetwork.cpp
+++ b/messagebus/src/vespa/messagebus/network/rpcnetwork.cpp
@@ -15,6 +15,7 @@
#include <vespa/slobrok/sbmirror.h>
#include <vespa/vespalib/component/vtag.h>
#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/fnet/scheduler.h>
#include <vespa/fnet/transport.h>
#include <vespa/fnet/frt/supervisor.h>
@@ -119,6 +120,7 @@ RPCNetwork::RPCNetwork(const RPCNetworkParams &params) :
_regAPI(std::make_unique<slobrok::api::RegisterAPI>(*_orb, *_slobrokCfgFactory)),
_oosManager(std::make_unique<OOSManager>(*_orb, *_mirror, params.getOOSServerPattern())),
_requestedPort(params.getListenPort()),
+ _executor(std::make_unique<vespalib::ThreadStackExecutor>(4,65536)),
_sendV1(std::make_unique<RPCSendV1>()),
_sendV2(std::make_unique<RPCSendV2>()),
_sendAdapters(),
@@ -222,7 +224,10 @@ RPCNetwork::start()
return true;
}
-
+vespalib::Executor &
+RPCNetwork::getExecutor() {
+ return *_executor;
+}
bool
RPCNetwork::waitUntilReady(double seconds) const
@@ -371,6 +376,7 @@ void
RPCNetwork::sync()
{
SyncTask task(_scheduler);
+ _executor->sync();
task.await();
}
@@ -379,6 +385,8 @@ RPCNetwork::shutdown()
{
_transport->ShutDown(false);
_threadPool->Close();
+ _executor->shutdown();
+ _executor->sync();
}
void
diff --git a/messagebus/src/vespa/messagebus/network/rpcnetwork.h b/messagebus/src/vespa/messagebus/network/rpcnetwork.h
index 5e762f1a2a9..13fab018c3b 100644
--- a/messagebus/src/vespa/messagebus/network/rpcnetwork.h
+++ b/messagebus/src/vespa/messagebus/network/rpcnetwork.h
@@ -60,24 +60,25 @@ private:
using SendAdapterMap = std::map<vespalib::Version, RPCSendAdapter*>;
- INetworkOwner *_owner;
- Identity _ident;
- std::unique_ptr<FastOS_ThreadPool> _threadPool;
- std::unique_ptr<FNET_Transport> _transport;
- std::unique_ptr<FRT_Supervisor> _orb;
- FNET_Scheduler &_scheduler;
- std::unique_ptr<RPCTargetPool> _targetPool;
- TargetPoolTask _targetPoolTask;
- std::unique_ptr<RPCServicePool> _servicePool;
- std::unique_ptr<slobrok::ConfiguratorFactory> _slobrokCfgFactory;
- std::unique_ptr<slobrok::api::IMirrorAPI> _mirror;
- std::unique_ptr<slobrok::api::RegisterAPI> _regAPI;
- std::unique_ptr<OOSManager> _oosManager;
- int _requestedPort;
- std::unique_ptr<RPCSendAdapter> _sendV1;
- std::unique_ptr<RPCSendAdapter> _sendV2;
- SendAdapterMap _sendAdapters;
- CompressionConfig _compressionConfig;
+ INetworkOwner *_owner;
+ Identity _ident;
+ std::unique_ptr<FastOS_ThreadPool> _threadPool;
+ std::unique_ptr<FNET_Transport> _transport;
+ std::unique_ptr<FRT_Supervisor> _orb;
+ FNET_Scheduler &_scheduler;
+ std::unique_ptr<RPCTargetPool> _targetPool;
+ TargetPoolTask _targetPoolTask;
+ std::unique_ptr<RPCServicePool> _servicePool;
+ std::unique_ptr<slobrok::ConfiguratorFactory> _slobrokCfgFactory;
+ std::unique_ptr<slobrok::api::IMirrorAPI> _mirror;
+ std::unique_ptr<slobrok::api::RegisterAPI> _regAPI;
+ std::unique_ptr<OOSManager> _oosManager;
+ int _requestedPort;
+ std::unique_ptr<vespalib::ThreadStackExecutor> _executor;
+ std::unique_ptr<RPCSendAdapter> _sendV1;
+ std::unique_ptr<RPCSendAdapter> _sendV2;
+ SendAdapterMap _sendAdapters;
+ CompressionConfig _compressionConfig;
/**
* Resolves and assigns a service address for the given recipient using the
@@ -235,6 +236,7 @@ public:
const slobrok::api::IMirrorAPI &getMirror() const override;
CompressionConfig getCompressionConfig() { return _compressionConfig; }
void invoke(FRT_RPCRequest *req);
+ vespalib::Executor & getExecutor();
};
} // namespace mbus
diff --git a/messagebus/src/vespa/messagebus/network/rpcsend.cpp b/messagebus/src/vespa/messagebus/network/rpcsend.cpp
index 705b8648442..e23f4dc29d9 100644
--- a/messagebus/src/vespa/messagebus/network/rpcsend.cpp
+++ b/messagebus/src/vespa/messagebus/network/rpcsend.cpp
@@ -7,13 +7,15 @@
#include <vespa/messagebus/tracelevel.h>
#include <vespa/messagebus/emptyreply.h>
#include <vespa/messagebus/errorcode.h>
-#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/fnet/channel.h>
#include <vespa/fnet/frt/reflection.h>
+#include <vespa/vespalib/util/stringfmt.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/vespalib/data/slime/cursor.h>
using vespalib::make_string;
+using vespalib::makeLambdaTask;
namespace mbus {
@@ -146,6 +148,11 @@ RPCSend::send(RoutingNode &recipient, const vespalib::Version &version,
void
RPCSend::RequestDone(FRT_RPCRequest *req)
{
+ doRequestDone(req);
+}
+
+void
+RPCSend::doRequestDone(FRT_RPCRequest *req) {
SendContext::UP ctx(static_cast<SendContext*>(req->GetContext()._value.VOIDP));
const string &serviceName = static_cast<RPCServiceAddress&>(ctx->getRecipient().getServiceAddress()).getServiceName();
Reply::UP reply;
@@ -213,6 +220,19 @@ RPCSend::decode(vespalib::stringref protocolName, const vespalib::Version & vers
void
RPCSend::handleReply(Reply::UP reply)
{
+ const IProtocol * protocol = _net->getOwner().getProtocol(reply->getProtocol());
+ if (!protocol || protocol->requireSequencing()) {
+ doHandleReply(protocol, std::move(reply));
+ } else {
+ auto rejected = _net->getExecutor().execute(makeLambdaTask([this, protocol, reply = std::move(reply)]() mutable {
+ doHandleReply(protocol, std::move(reply));
+ }));
+ assert (!rejected);
+ }
+}
+
+void
+RPCSend::doHandleReply(const IProtocol * protocol, Reply::UP reply) {
ReplyContext::UP ctx(static_cast<ReplyContext*>(reply->getContext().value.PTR));
FRT_RPCRequest &req = ctx->getRequest();
string version = ctx->getVersion().toString();
@@ -222,7 +242,7 @@ RPCSend::handleReply(Reply::UP reply)
}
Blob payload(0);
if (reply->getType() != 0) {
- payload = _net->getOwner().getProtocol(reply->getProtocol())->encode(ctx->getVersion(), *reply);
+ payload = protocol->encode(ctx->getVersion(), *reply);
if (payload.size() == 0) {
reply->addError(Error(ErrorCode::ENCODE_ERROR, "An error occured while encoding the reply, see log."));
}
@@ -236,6 +256,12 @@ void
RPCSend::invoke(FRT_RPCRequest *req)
{
req->Detach();
+ doRequest(req);
+}
+
+void
+RPCSend::doRequest(FRT_RPCRequest *req)
+{
FRT_Values &args = *req->GetParams();
std::unique_ptr<Params> params = toParams(args);
diff --git a/messagebus/src/vespa/messagebus/network/rpcsend.h b/messagebus/src/vespa/messagebus/network/rpcsend.h
index c707b47f548..11a042b91c0 100644
--- a/messagebus/src/vespa/messagebus/network/rpcsend.h
+++ b/messagebus/src/vespa/messagebus/network/rpcsend.h
@@ -19,6 +19,7 @@ class Error;
class Route;
class Message;
class RPCServiceAddress;
+class IProtocol;
class PayLoadFiller
{
@@ -82,6 +83,9 @@ public:
void invoke(FRT_RPCRequest *req);
private:
+ void doRequest(FRT_RPCRequest *req);
+ void doRequestDone(FRT_RPCRequest *req);
+ void doHandleReply(const IProtocol * protocol, std::unique_ptr<Reply> reply);
void attach(RPCNetwork &net) final override;
void handleDiscard(Context ctx) final override;
void sendByHandover(RoutingNode &recipient, const vespalib::Version &version,
diff --git a/messagebus/src/vespa/messagebus/routable.h b/messagebus/src/vespa/messagebus/routable.h
index 48154839994..50cb4e090ce 100644
--- a/messagebus/src/vespa/messagebus/routable.h
+++ b/messagebus/src/vespa/messagebus/routable.h
@@ -171,4 +171,3 @@ public:
};
} // namespace mbus
-
diff --git a/messagebus/src/vespa/messagebus/testlib/simpleprotocol.cpp b/messagebus/src/vespa/messagebus/testlib/simpleprotocol.cpp
index 0be33033538..29136a276bd 100644
--- a/messagebus/src/vespa/messagebus/testlib/simpleprotocol.cpp
+++ b/messagebus/src/vespa/messagebus/testlib/simpleprotocol.cpp
@@ -5,8 +5,6 @@
#include "simplereply.h"
#include <vespa/messagebus/emptyreply.h>
#include <vespa/messagebus/routing/routingcontext.h>
-#include <vespa/messagebus/routing/routingnodeiterator.h>
-#include <cstdlib>
namespace mbus {
diff --git a/messagebus/src/vespa/messagebus/testlib/simpleprotocol.h b/messagebus/src/vespa/messagebus/testlib/simpleprotocol.h
index 8931e1b46f9..09e1ee9febe 100644
--- a/messagebus/src/vespa/messagebus/testlib/simpleprotocol.h
+++ b/messagebus/src/vespa/messagebus/testlib/simpleprotocol.h
@@ -72,6 +72,7 @@ public:
IRoutingPolicy::UP createPolicy(const string &name, const string &param) const override;
Blob encode(const vespalib::Version &version, const Routable &routable) const override;
Routable::UP decode(const vespalib::Version &version, BlobRef data) const override;
+ virtual bool requireSequencing() const override { return false; }
};
} // namespace mbus
diff --git a/metrics/src/tests/metricmanagertest.cpp b/metrics/src/tests/metricmanagertest.cpp
index a086cf70ca9..e9934b6dbbb 100644
--- a/metrics/src/tests/metricmanagertest.cpp
+++ b/metrics/src/tests/metricmanagertest.cpp
@@ -779,7 +779,7 @@ void MetricManagerTest::testJsonOutput()
using namespace vespalib::slime;
vespalib::Slime slime;
size_t parsed = JsonFormat::decode(vespalib::Memory(jsonData), slime);
- if (jsonData.size() != parsed) {
+ if (parsed == 0) {
vespalib::SimpleBuffer buffer;
JsonFormat::encode(slime, buffer, false);
std::ostringstream ost;
diff --git a/metrics/src/vespa/metrics/CMakeLists.txt b/metrics/src/vespa/metrics/CMakeLists.txt
index 27a1f25e887..6eae8cd75e4 100644
--- a/metrics/src/vespa/metrics/CMakeLists.txt
+++ b/metrics/src/vespa/metrics/CMakeLists.txt
@@ -24,4 +24,4 @@ vespa_add_library(metrics
DEPENDS
)
vespa_generate_config(metrics metricsmanager.def)
-install(FILES metricsmanager.def RENAME metrics.metricsmanager.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(metricsmanager.def metrics.metricsmanager.def)
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
index cac924cbf18..a3a647e1d14 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
@@ -399,7 +399,7 @@ public class StorageMaintainer {
}
void updateNextHandleCoredumpsTime() {
- nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofHours(1));
+ nextHandleOldCoredumpsAt = clock.instant().plus(Duration.ofMinutes(5));
}
boolean shouldHandleCoredumpsNow() {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainer.java
index b202e615341..2947ef68ba4 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainer.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainer.java
@@ -18,6 +18,7 @@ import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.stream.Collectors;
+import java.util.stream.Stream;
/**
* The responsibility of this class is to configure ACLs for all running containers. The ACLs are fetched from the Node
@@ -63,15 +64,13 @@ public class AclMaintainer implements Runnable {
}
final Command flush = new FlushCommand(Chain.INPUT);
final Command rollback = new PolicyCommand(Chain.INPUT, Action.ACCEPT);
- log.info("Start modifying ACL rules for " + containerName.asString());
try {
- log.info("Running ACL command '" + flush.asString() + "'");
- dockerOperations.executeCommandInNetworkNamespace(containerName, flush.asArray(IPTABLES_COMMAND));
- acl.toCommands().forEach(command -> {
- log.info("Running ACL command '" + command.asString() + "' for " + containerName.asString());
- dockerOperations.executeCommandInNetworkNamespace(containerName,
- command.asArray(IPTABLES_COMMAND));
- });
+ String commands = Stream.concat(Stream.of(flush), acl.toCommands().stream())
+ .map(command -> command.asString(IPTABLES_COMMAND))
+ .collect(Collectors.joining("; "));
+
+ log.debug("Running ACL command '" + commands + "' in " + containerName.asString());
+ dockerOperations.executeCommandInNetworkNamespace(containerName, "/bin/sh", "-c", commands);
containerAcls.put(containerName, acl);
} catch (Exception e) {
log.error("Exception occurred while configuring ACLs for " + containerName.asString() + ", attempting rollback", e);
@@ -81,7 +80,6 @@ public class AclMaintainer implements Runnable {
log.error("Rollback of ACLs for " + containerName.asString() + " failed, giving up", ne);
}
}
- log.info("Finished modifying ACL rules for " + containerName.asString());
}
private synchronized void configureAcls() {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java
index cf70963eee1..6bc3b6d2a46 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdmin.java
@@ -60,6 +60,11 @@ public interface NodeAdmin {
Map<String, Object> debugInfo();
/**
+ * Start node-admin schedulers.
+ */
+ void start();
+
+ /**
* Stop the NodeAgent. Will not delete the storage or stop the container.
*/
void stop();
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
index f227a166034..ddb04f1249d 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
@@ -47,6 +47,7 @@ public class NodeAdminImpl implements NodeAdmin {
private final DockerOperations dockerOperations;
private final Function<String, NodeAgent> nodeAgentFactory;
private final StorageMaintainer storageMaintainer;
+ private final AclMaintainer aclMaintainer;
private final Clock clock;
private boolean previousWantFrozen;
@@ -67,6 +68,7 @@ public class NodeAdminImpl implements NodeAdmin {
this.dockerOperations = dockerOperations;
this.nodeAgentFactory = nodeAgentFactory;
this.storageMaintainer = storageMaintainer;
+ this.aclMaintainer = aclMaintainer;
this.clock = clock;
this.previousWantFrozen = true;
@@ -76,18 +78,6 @@ public class NodeAdminImpl implements NodeAdmin {
Dimensions dimensions = new Dimensions.Builder().add("role", "docker").build();
this.numberOfContainersInLoadImageState = metricReceiver.declareGauge(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.image.loading");
this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter(MetricReceiverWrapper.APPLICATION_DOCKER, dimensions, "nodes.unhandled_exceptions");
-
- metricsScheduler.scheduleAtFixedRate(() -> {
- try {
- nodeAgents.values().forEach(NodeAgent::updateContainerNodeMetrics);
- } catch (Throwable e) {
- logger.warning("Metric fetcher scheduler failed", e);
- }
- }, 0, 55, TimeUnit.SECONDS);
-
- aclScheduler.scheduleWithFixedDelay(() -> {
- if (!isFrozen()) aclMaintainer.run();
- }, 30, 60, TimeUnit.SECONDS);
}
@Override
@@ -179,6 +169,21 @@ public class NodeAdminImpl implements NodeAdmin {
}
@Override
+ public void start() {
+ metricsScheduler.scheduleAtFixedRate(() -> {
+ try {
+ nodeAgents.values().forEach(NodeAgent::updateContainerNodeMetrics);
+ } catch (Throwable e) {
+ logger.warning("Metric fetcher scheduler failed", e);
+ }
+ }, 0, 55, TimeUnit.SECONDS);
+
+ aclScheduler.scheduleWithFixedDelay(() -> {
+ if (!isFrozen()) aclMaintainer.run();
+ }, 30, 60, TimeUnit.SECONDS);
+ }
+
+ @Override
public void stop() {
metricsScheduler.shutdown();
aclScheduler.shutdown();
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
index d1f23b13e6c..e5e19ff69e4 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdater.java
@@ -2,6 +2,9 @@
package com.yahoo.vespa.hosted.node.admin.nodeadmin;
import com.yahoo.concurrent.ThreadFactoryFactory;
+import com.yahoo.concurrent.classlock.ClassLock;
+import com.yahoo.concurrent.classlock.ClassLocking;
+import com.yahoo.concurrent.classlock.LockInterruptException;
import com.yahoo.log.LogLevel;
import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec;
import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer;
@@ -9,6 +12,7 @@ import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAttributes;
import com.yahoo.vespa.hosted.node.admin.noderepository.NodeRepository;
import com.yahoo.vespa.hosted.node.admin.orchestrator.Orchestrator;
import com.yahoo.vespa.hosted.node.admin.orchestrator.OrchestratorException;
+import com.yahoo.vespa.hosted.node.admin.util.HttpException;
import com.yahoo.vespa.hosted.provision.Node;
import java.io.IOException;
@@ -19,6 +23,7 @@ import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
+import java.util.Optional;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
@@ -48,16 +53,17 @@ public class NodeAdminStateUpdater {
private final Logger log = Logger.getLogger(NodeAdminStateUpdater.class.getName());
private final ScheduledExecutorService specVerifierScheduler =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("specverifier"));
- private Thread loopThread;
+ private final Thread loopThread;
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
- private final StorageMaintainer storageMaintainer;
private final NodeAdmin nodeAdmin;
private final Clock clock;
private final String dockerHostHostName;
private final Duration nodeAdminConvergeStateInterval;
+ private final ClassLocking classLocking;
+ private Optional<ClassLock> classLock;
private Instant lastTick;
public NodeAdminStateUpdater(
@@ -67,15 +73,41 @@ public class NodeAdminStateUpdater {
NodeAdmin nodeAdmin,
String dockerHostHostName,
Clock clock,
- Duration nodeAdminConvergeStateInterval) {
+ Duration nodeAdminConvergeStateInterval,
+ ClassLocking classLocking) {
+ log.info(objectToString() + ": Creating object");
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
- this.storageMaintainer = storageMaintainer;
this.nodeAdmin = nodeAdmin;
this.dockerHostHostName = dockerHostHostName;
this.clock = clock;
this.nodeAdminConvergeStateInterval = nodeAdminConvergeStateInterval;
+ this.classLocking = classLocking;
this.lastTick = clock.instant();
+
+ this.loopThread = new Thread(() -> {
+ log.info(objectToString() + ": Acquiring lock");
+ try {
+ classLock = Optional.of(classLocking.lockWhile(NodeAdminStateUpdater.class, () -> !terminated.get()));
+ } catch (LockInterruptException e) {
+ classLock = Optional.empty();
+ return;
+ }
+
+ log.info(objectToString() + ": Starting threads and schedulers");
+ nodeAdmin.start();
+ specVerifierScheduler.scheduleWithFixedDelay(() ->
+ updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES);
+
+ while (! terminated.get()) {
+ tick();
+ }
+ });
+ this.loopThread.setName("tick-NodeAdminStateUpdater");
+ }
+
+ private String objectToString() {
+ return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this));
}
public enum State { RESUMED, SUSPENDED_NODE_ADMIN, SUSPENDED}
@@ -147,7 +179,7 @@ public class NodeAdminStateUpdater {
try {
convergeState(wantedStateCopy);
- } catch (OrchestratorException | ConvergenceException e) {
+ } catch (OrchestratorException | ConvergenceException | HttpException e) {
log.info("Unable to converge to " + wantedStateCopy + ": " + e.getMessage());
} catch (Exception e) {
log.log(LogLevel.ERROR, "Error while trying to converge to " + wantedStateCopy, e);
@@ -257,28 +289,20 @@ public class NodeAdminStateUpdater {
}
public void start() {
- if (loopThread != null) {
- throw new RuntimeException("Can not restart NodeAdminStateUpdater");
- }
-
- loopThread = new Thread(() -> {
- while (! terminated.get()) tick();
- });
- loopThread.setName("tick-NodeAdminStateUpdater");
loopThread.start();
-
- specVerifierScheduler.scheduleWithFixedDelay(() ->
- updateHardwareDivergence(storageMaintainer), 5, 60, TimeUnit.MINUTES);
}
public void stop() {
- specVerifierScheduler.shutdown();
+ log.info(objectToString() + ": Stop called");
if (!terminated.compareAndSet(false, true)) {
throw new RuntimeException("Can not re-stop a node agent.");
}
+ classLocking.interrupt();
+
// First we need to stop NodeAdminStateUpdater thread to make sure no new NodeAgents are spawned
signalWorkToBeDone();
+ specVerifierScheduler.shutdown();
do {
try {
@@ -291,5 +315,11 @@ public class NodeAdminStateUpdater {
// Finally, stop NodeAdmin and all the NodeAgents
nodeAdmin.stop();
+
+ classLock.ifPresent(lock -> {
+ log.info(objectToString() + ": Releasing lock");
+ lock.close();
+ });
+ log.info(objectToString() + ": Stop complete");
}
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
index 77348a9dc45..453012e9791 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
@@ -81,7 +81,7 @@ public class NodeAgentImpl implements NodeAgent {
private int numberOfUnhandledException = 0;
private Instant lastConverge;
- private Thread loopThread;
+ private final Thread loopThread;
private final ScheduledExecutorService filebeatRestarter =
Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter"));
@@ -131,6 +131,11 @@ public class NodeAgentImpl implements NodeAgent {
this.clock = clock;
this.timeBetweenEachConverge = timeBetweenEachConverge;
this.lastConverge = clock.instant();
+
+ this.loopThread = new Thread(() -> {
+ while (!terminated.get()) tick();
+ });
+ this.loopThread.setName("tick-" + hostname);
}
@Override
@@ -178,14 +183,6 @@ public class NodeAgentImpl implements NodeAgent {
logger.info(message);
addDebugMessage(message);
- if (loopThread != null) {
- throw new RuntimeException("Can not restart a node agent.");
- }
-
- loopThread = new Thread(() -> {
- while (!terminated.get()) tick();
- });
- loopThread.setName("tick-" + hostname);
loopThread.start();
serviceRestarter = service -> {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepositoryImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepositoryImpl.java
index 08957a489b6..8283c90e43d 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepositoryImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/noderepository/NodeRepositoryImpl.java
@@ -12,6 +12,7 @@ import com.yahoo.vespa.hosted.node.admin.noderepository.bindings.NodeMessageResp
import com.yahoo.vespa.hosted.node.admin.noderepository.bindings.UpdateNodeAttributesRequestBody;
import com.yahoo.vespa.hosted.node.admin.noderepository.bindings.UpdateNodeAttributesResponse;
import com.yahoo.vespa.hosted.node.admin.util.ConfigServerHttpRequestExecutor;
+import com.yahoo.vespa.hosted.node.admin.util.HttpException;
import com.yahoo.vespa.hosted.node.admin.util.PrefixLogger;
import com.yahoo.vespa.hosted.provision.Node;
@@ -76,7 +77,7 @@ public class NodeRepositoryImpl implements NodeRepository {
return Optional.empty();
}
return Optional.of(createContainerNodeSpec(nodeResponse));
- } catch (ConfigServerHttpRequestExecutor.NotFoundException e) {
+ } catch (HttpException.NotFoundException e) {
return Optional.empty();
}
}
@@ -90,7 +91,7 @@ public class NodeRepositoryImpl implements NodeRepository {
.map(node -> new ContainerAclSpec(
node.hostname, node.ipAddress, ContainerName.fromHostname(node.trustedBy)))
.collect(Collectors.toList());
- } catch (ConfigServerHttpRequestExecutor.NotFoundException e) {
+ } catch (HttpException.NotFoundException e) {
return Collections.emptyList();
}
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImpl.java
index bd9df486e7b..d1e996f8e93 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImpl.java
@@ -3,6 +3,7 @@ package com.yahoo.vespa.hosted.node.admin.orchestrator;
import com.yahoo.vespa.hosted.node.admin.util.ConfigServerHttpRequestExecutor;
+import com.yahoo.vespa.hosted.node.admin.util.HttpException;
import com.yahoo.vespa.orchestrator.restapi.HostApi;
import com.yahoo.vespa.orchestrator.restapi.HostSuspensionApi;
import com.yahoo.vespa.orchestrator.restapi.wire.BatchHostSuspendRequest;
@@ -40,8 +41,11 @@ public class OrchestratorImpl implements Orchestrator {
port,
Optional.empty(), /* body */
UpdateHostResponse.class);
- } catch (ConfigServerHttpRequestExecutor.NotFoundException n) {
+ } catch (HttpException.NotFoundException n) {
throw new OrchestratorNotFoundException("Failed to suspend " + hostName + ", host not found");
+ } catch (HttpException e) {
+ throw new OrchestratorException("Failed to suspend " + hostName + ": " +
+ e.toString());
} catch (Exception e) {
throw new RuntimeException("Got error on suspend", e);
}
@@ -55,11 +59,14 @@ public class OrchestratorImpl implements Orchestrator {
public void suspend(String parentHostName, List<String> hostNames) {
final BatchOperationResult batchOperationResult;
try {
- batchOperationResult = requestExecutor.put(
+ batchOperationResult = requestExecutor.put(
ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API,
- port,
+ port,
Optional.of(new BatchHostSuspendRequest(parentHostName, hostNames)),
BatchOperationResult.class);
+ } catch (HttpException e) {
+ throw new OrchestratorException("Failed to batch suspend for " +
+ parentHostName + ": " + e.toString());
} catch (Exception e) {
throw new RuntimeException("Got error on batch suspend for " + parentHostName + ", with nodes " + hostNames, e);
}
@@ -75,8 +82,11 @@ public class OrchestratorImpl implements Orchestrator {
try {
String path = getSuspendPath(hostName);
response = requestExecutor.delete(path, port, UpdateHostResponse.class);
- } catch (ConfigServerHttpRequestExecutor.NotFoundException n) {
+ } catch (HttpException.NotFoundException n) {
throw new OrchestratorNotFoundException("Failed to resume " + hostName + ", host not found");
+ } catch (HttpException e) {
+ throw new OrchestratorException("Failed to suspend " + hostName + ": " +
+ e.toString());
} catch (Exception e) {
throw new RuntimeException("Got error on resume", e);
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProvider.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProvider.java
deleted file mode 100644
index 93a77a13bf9..00000000000
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProvider.java
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.node.admin.provider;
-
-import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater;
-
-/**
- * Class for setting up instances of classes; enables testing.
- *
- * @author dybis
- */
-public interface ComponentsProvider {
- NodeAdminStateUpdater getNodeAdminStateUpdater();
-}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProviderImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/NodeAdminProvider.java
index a18325672be..109dbab924c 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProviderImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/NodeAdminProvider.java
@@ -2,7 +2,8 @@
package com.yahoo.vespa.hosted.node.admin.provider;
import com.google.inject.Inject;
-import com.yahoo.component.AbstractComponent;
+import com.yahoo.concurrent.classlock.ClassLocking;
+import com.yahoo.container.di.componentgraph.Provider;
import com.yahoo.net.HostName;
import com.yahoo.system.ProcessExecuter;
@@ -27,7 +28,6 @@ import com.yahoo.vespa.hosted.node.admin.util.Environment;
import java.time.Clock;
import java.time.Duration;
import java.util.function.Function;
-import java.util.logging.Logger;
import static com.yahoo.vespa.defaults.Defaults.getDefaults;
@@ -36,19 +36,16 @@ import static com.yahoo.vespa.defaults.Defaults.getDefaults;
*
* @author dybis
*/
-public class ComponentsProviderImpl extends AbstractComponent implements ComponentsProvider {
+public class NodeAdminProvider implements Provider<NodeAdminStateUpdater> {
private static final int WEB_SERVICE_PORT = getDefaults().vespaWebServicePort();
private static final Duration NODE_AGENT_SCAN_INTERVAL = Duration.ofSeconds(30);
private static final Duration NODE_ADMIN_CONVERGE_STATE_INTERVAL = Duration.ofSeconds(30);
- private final Logger log = Logger.getLogger(ComponentsProviderImpl.class.getName());
private final NodeAdminStateUpdater nodeAdminStateUpdater;
@Inject
- public ComponentsProviderImpl(Docker docker, MetricReceiverWrapper metricReceiver) {
- log.info(objectToString() + ": Creating object");
-
+ public NodeAdminProvider(Docker docker, MetricReceiverWrapper metricReceiver, ClassLocking classLocking) {
Clock clock = Clock.systemUTC();
String dockerHostHostName = HostName.getLocalhost();
ProcessExecuter processExecuter = new ProcessExecuter();
@@ -69,24 +66,18 @@ public class ComponentsProviderImpl extends AbstractComponent implements Compone
metricReceiver, clock);
nodeAdminStateUpdater = new NodeAdminStateUpdater(nodeRepository, orchestrator, storageMaintainer, nodeAdmin,
- dockerHostHostName, clock, NODE_ADMIN_CONVERGE_STATE_INTERVAL);
+ dockerHostHostName, clock, NODE_ADMIN_CONVERGE_STATE_INTERVAL, classLocking);
+
nodeAdminStateUpdater.start();
}
@Override
- public NodeAdminStateUpdater getNodeAdminStateUpdater() {
+ public NodeAdminStateUpdater get() {
return nodeAdminStateUpdater;
}
@Override
public void deconstruct() {
- log.info(objectToString() + ": Stop called");
nodeAdminStateUpdater.stop();
- log.info(objectToString() + ": Stop complete");
- }
-
-
- private String objectToString() {
- return this.getClass().getSimpleName() + "@" + Integer.toString(System.identityHashCode(this));
}
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/RestApiHandler.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/RestApiHandler.java
index 73c544c8c80..42b36d95374 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/RestApiHandler.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/RestApiHandler.java
@@ -10,7 +10,6 @@ import com.yahoo.container.logging.AccessLog;
import com.yahoo.vespa.hosted.dockerapi.metrics.DimensionMetrics;
import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater;
-import com.yahoo.vespa.hosted.node.admin.provider.ComponentsProvider;
import javax.inject.Inject;
import javax.ws.rs.core.MediaType;
@@ -39,10 +38,10 @@ public class RestApiHandler extends LoggingRequestHandler{
@Inject
public RestApiHandler(Executor executor, AccessLog accessLog,
- ComponentsProvider componentsProvider,
+ NodeAdminStateUpdater nodeAdminStateUpdater,
MetricReceiverWrapper metricReceiverWrapper) {
super(executor, accessLog);
- this.refresher = componentsProvider.getNodeAdminStateUpdater();
+ this.refresher = nodeAdminStateUpdater;
this.metricReceiverWrapper = metricReceiverWrapper;
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutor.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutor.java
index 9c8dc198388..f7d6b86ce69 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutor.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutor.java
@@ -17,7 +17,6 @@ import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
-import javax.ws.rs.core.Response;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
@@ -72,13 +71,6 @@ public class ConfigServerHttpRequestExecutor {
HttpUriRequest createRequest(String configserver) throws JsonProcessingException, UnsupportedEncodingException;
}
- public class NotFoundException extends RuntimeException {
- private static final long serialVersionUID = 4791511887L;
- public NotFoundException(String message) {
- super(message);
- }
- }
-
private <T> T tryAllConfigServers(CreateRequest requestFactory, Class<T> wantedReturnType) {
Exception lastException = null;
for (int loopRetry = 0; loopRetry < MAX_LOOPS; loopRetry++) {
@@ -99,9 +91,9 @@ public class ConfigServerHttpRequestExecutor {
}
try {
- if (response.getStatusLine().getStatusCode() == Response.Status.NOT_FOUND.getStatusCode()) {
- throw new NotFoundException("Not found returned from " + configServer);
- }
+ HttpException.throwOnFailure(
+ response.getStatusLine().getStatusCode(),
+ "Config server " + configServer);
try {
return mapper.readValue(response.getEntity().getContent(), wantedReturnType);
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/HttpException.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/HttpException.java
new file mode 100644
index 00000000000..fd7f6308593
--- /dev/null
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/util/HttpException.java
@@ -0,0 +1,35 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.node.admin.util;
+
+import javax.ws.rs.core.Response;
+
+@SuppressWarnings("serial")
+public class HttpException extends RuntimeException {
+ public static class NotFoundException extends HttpException {
+ private static final long serialVersionUID = 4791511887L;
+ public NotFoundException(String message) {
+ super(Response.Status.NOT_FOUND, message);
+ }
+ }
+
+ static void throwOnFailure(int statusCode, String message) {
+ Response.Status status = Response.Status.fromStatusCode(statusCode);
+ if (status == null) {
+ throw new HttpException(statusCode, message);
+ }
+
+ if (status == Response.Status.NOT_FOUND) {
+ throw new NotFoundException(message);
+ } else if (status.getFamily() != Response.Status.Family.SUCCESSFUL) {
+ throw new HttpException(status, message);
+ }
+ }
+
+ private HttpException(int statusCode, String message) {
+ super("HTTP status code " + statusCode + ": " + message);
+ }
+
+ private HttpException(Response.Status status, String message) {
+ super(status.toString() + " (" + status.getStatusCode() + "): " + message);
+ }
+}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ComponentsProviderWithMocks.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ComponentsProviderWithMocks.java
deleted file mode 100644
index 522ad07a558..00000000000
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ComponentsProviderWithMocks.java
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.node.admin.integrationTests;
-
-import com.yahoo.metrics.simple.MetricReceiver;
-import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
-import com.yahoo.vespa.hosted.node.admin.docker.DockerOperations;
-import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer;
-import com.yahoo.vespa.hosted.node.admin.maintenance.acl.AclMaintainer;
-import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdmin;
-import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminImpl;
-import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater;
-import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgent;
-import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentImpl;
-import com.yahoo.vespa.hosted.node.admin.noderepository.NodeRepository;
-import com.yahoo.vespa.hosted.node.admin.orchestrator.Orchestrator;
-import com.yahoo.vespa.hosted.node.admin.provider.ComponentsProvider;
-import com.yahoo.vespa.hosted.node.admin.util.Environment;
-
-import java.time.Clock;
-import java.time.Duration;
-import java.util.function.Function;
-
-import static org.mockito.Mockito.mock;
-
-/**
- * For setting up test with mocks.
- *
- * @author dybis
- */
-public class ComponentsProviderWithMocks implements ComponentsProvider {
- private static final Duration NODE_AGENT_SCAN_INTERVAL = Duration.ofMillis(100);
- private static final Duration NODE_ADMIN_CONVERGE_STATE_INTERVAL = Duration.ofMillis(5);
-
- static final NodeRepository nodeRepositoryMock = mock(NodeRepository.class);
- static final Orchestrator orchestratorMock = mock(Orchestrator.class);
- static final DockerOperations dockerOperationsMock = mock(DockerOperations.class);
-
- private final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class);
- private final AclMaintainer aclMaintainer = mock(AclMaintainer.class);
- private final Environment environment = new Environment.Builder().build();
- private final MetricReceiverWrapper mr = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
- private final Function<String, NodeAgent> nodeAgentFactory =
- (hostName) -> new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, dockerOperationsMock,
- storageMaintainer, aclMaintainer, environment, Clock.systemUTC(), NODE_AGENT_SCAN_INTERVAL);
- private final NodeAdmin nodeAdmin = new NodeAdminImpl(dockerOperationsMock, nodeAgentFactory, storageMaintainer, aclMaintainer, mr, Clock.systemUTC());
- private final NodeAdminStateUpdater nodeAdminStateUpdater = new NodeAdminStateUpdater(nodeRepositoryMock,
- orchestratorMock, storageMaintainer, nodeAdmin, "localhost.test.yahoo.com", Clock.systemUTC(), NODE_ADMIN_CONVERGE_STATE_INTERVAL);
-
- public ComponentsProviderWithMocks() {
- nodeAdminStateUpdater.start();
- }
-
- @Override
- public NodeAdminStateUpdater getNodeAdminStateUpdater() {
- return nodeAdminStateUpdater;
- }
-}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java
index ab752bbe4c0..b4a5b552738 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerTester.java
@@ -1,6 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.integrationTests;
+import com.yahoo.concurrent.classlock.ClassLocking;
import com.yahoo.metrics.simple.MetricReceiver;
import com.yahoo.vespa.hosted.dockerapi.Docker;
import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
@@ -66,7 +67,7 @@ public class DockerTester implements AutoCloseable {
orchestratorMock, dockerOperations, storageMaintainer, aclMaintainer, environment, clock, NODE_AGENT_SCAN_INTERVAL);
nodeAdmin = new NodeAdminImpl(dockerOperations, nodeAgentFactory, storageMaintainer, aclMaintainer, mr, Clock.systemUTC());
nodeAdminStateUpdater = new NodeAdminStateUpdater(nodeRepositoryMock, orchestratorMock, storageMaintainer,
- nodeAdmin, "basehostname", clock, NODE_ADMIN_CONVERGE_STATE_INTERVAL);
+ nodeAdmin, "basehostname", clock, NODE_ADMIN_CONVERGE_STATE_INTERVAL, new ClassLocking());
nodeAdminStateUpdater.start();
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RunInContainerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RunInContainerTest.java
index 036a53a9654..23d55bd947c 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RunInContainerTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/RunInContainerTest.java
@@ -3,11 +3,25 @@ package com.yahoo.vespa.hosted.node.admin.integrationTests;
import com.yahoo.application.Networking;
import com.yahoo.application.container.JDisc;
+import com.yahoo.concurrent.classlock.ClassLocking;
+import com.yahoo.container.di.componentgraph.Provider;
+import com.yahoo.metrics.simple.MetricReceiver;
import com.yahoo.vespa.hosted.dockerapi.ContainerName;
import com.yahoo.vespa.hosted.dockerapi.DockerImage;
+import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec;
+import com.yahoo.vespa.hosted.node.admin.docker.DockerOperations;
+import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer;
+import com.yahoo.vespa.hosted.node.admin.maintenance.acl.AclMaintainer;
+import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdmin;
+import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminImpl;
+import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater;
+import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgent;
+import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentImpl;
+import com.yahoo.vespa.hosted.node.admin.noderepository.NodeRepository;
import com.yahoo.vespa.hosted.node.admin.orchestrator.Orchestrator;
import com.yahoo.vespa.hosted.node.admin.orchestrator.OrchestratorException;
+import com.yahoo.vespa.hosted.node.admin.util.Environment;
import com.yahoo.vespa.hosted.provision.Node;
import org.apache.commons.io.IOUtils;
import org.apache.http.HttpEntity;
@@ -26,15 +40,19 @@ import java.io.IOException;
import java.io.StringWriter;
import java.net.ServerSocket;
import java.nio.charset.StandardCharsets;
+import java.time.Clock;
+import java.time.Duration;
import java.time.Instant;
import java.util.Arrays;
import java.util.Collections;
+import java.util.function.Function;
import java.util.logging.Logger;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
/**
@@ -42,7 +60,11 @@ import static org.mockito.Mockito.when;
*/
public class RunInContainerTest {
private final Logger logger = Logger.getLogger("RunInContainerTest");
- private final Orchestrator orchestrator = ComponentsProviderWithMocks.orchestratorMock;
+
+ private static final NodeRepository nodeRepositoryMock = mock(NodeRepository.class);
+ private static final Orchestrator orchestratorMock = mock(Orchestrator.class);
+ private static final DockerOperations dockerOperationsMock = mock(DockerOperations.class);
+
private final String parentHostname = "localhost.test.yahoo.com";
private JDisc container;
private int port;
@@ -58,7 +80,7 @@ public class RunInContainerTest {
// To test the initial NodeAdminStateUpdater convergence towards RESUME, orchestrator should
// deny permission to resume for parent host, otherwise it'll converge to RESUME before REST
// handler comes up
- doThrow(new RuntimeException()).when(orchestrator).resume(parentHostname);
+ doThrow(new RuntimeException()).when(orchestratorMock).resume(parentHostname);
port = findRandomOpenPort();
System.out.println("PORT IS " + port);
logger.info("PORT IS " + port);
@@ -117,22 +139,22 @@ public class RunInContainerTest {
@Ignore
@Test
public void testGetContainersToRunAPi() throws IOException, InterruptedException {
- doThrow(new OrchestratorException("Cannot suspend because...")).when(orchestrator).suspend(parentHostname);
- when(ComponentsProviderWithMocks.nodeRepositoryMock.getContainersToRun(eq(parentHostname))).thenReturn(Collections.emptyList());
+ doThrow(new OrchestratorException("Cannot suspend because...")).when(orchestratorMock).suspend(parentHostname);
+ when(nodeRepositoryMock.getContainersToRun(eq(parentHostname))).thenReturn(Collections.emptyList());
waitForJdiscContainerToServe();
assertTrue("The initial resume command should fail because it needs to converge first",
verifyWithRetries("resume", false));
- doNothing().when(orchestrator).resume(parentHostname);
+ doNothing().when(orchestratorMock).resume(parentHostname);
assertTrue(verifyWithRetries("resume", true));
doThrow(new OrchestratorException("Cannot suspend because..."))
- .when(orchestrator).suspend(parentHostname, Collections.singletonList(parentHostname));
+ .when(orchestratorMock).suspend(parentHostname, Collections.singletonList(parentHostname));
assertTrue("Should fail because orchestrator does not allow node-admin to suspend",
verifyWithRetries("suspend/node-admin", false));
// Orchestrator changes its mind, allows node-admin to suspend
- doNothing().when(orchestrator).suspend(parentHostname, Collections.singletonList(parentHostname));
+ doNothing().when(orchestratorMock).suspend(parentHostname, Collections.singletonList(parentHostname));
assertTrue(verifyWithRetries("suspend/node-admin", true));
// Lets try to suspend everything now, should be trivial as we have no active containers to stop services at
@@ -144,7 +166,7 @@ public class RunInContainerTest {
assertTrue(verifyWithRetries("resume", true));
// Lets try the same, but with an active container running on this host
- when(ComponentsProviderWithMocks.nodeRepositoryMock.getContainersToRun(eq(parentHostname))).thenReturn(
+ when(nodeRepositoryMock.getContainersToRun(eq(parentHostname))).thenReturn(
Collections.singletonList(new ContainerNodeSpec.Builder()
.hostname("host1.test.yahoo.com")
.wantedDockerImage(new DockerImage("dockerImage"))
@@ -152,7 +174,7 @@ public class RunInContainerTest {
.nodeType("tenant")
.nodeFlavor("docker")
.build()));
- doThrow(new OrchestratorException("Cannot suspend because...")).when(orchestrator)
+ doThrow(new OrchestratorException("Cannot suspend because...")).when(orchestratorMock)
.suspend("localhost.test.yahoo.com", Arrays.asList("host1.test.yahoo.com", parentHostname));
// Initially we are denied to suspend because we have to freeze all the node-agents
@@ -160,16 +182,16 @@ public class RunInContainerTest {
// At this point they should be frozen, but Orchestrator doesn't allow to suspend either the container or the node-admin
assertTrue(verifyWithRetries("suspend/node-admin", false));
- doNothing().when(orchestrator)
+ doNothing().when(orchestratorMock)
.suspend("localhost.test.yahoo.com", Arrays.asList("host1.test.yahoo.com", parentHostname));
// Orchestrator successfully suspended everything
assertTrue(verifyWithRetries("suspend/node-admin", true));
// Allow stopping services in active nodes
- doNothing().when(ComponentsProviderWithMocks.dockerOperationsMock)
+ doNothing().when(dockerOperationsMock)
.trySuspendNode(eq(new ContainerName("host1")));
- doNothing().when(ComponentsProviderWithMocks.dockerOperationsMock)
+ doNothing().when(dockerOperationsMock)
.stopServicesOnNode(eq(new ContainerName("host1")));
assertTrue(verifyWithRetries("suspend", false));
@@ -191,11 +213,49 @@ public class RunInContainerTest {
" <handler id=\"com.yahoo.vespa.hosted.node.admin.restapi.RestApiHandler\" bundle=\"node-admin\">\n" +
" <binding>http://*/rest/*</binding>\n" +
" </handler>\n" +
- " <component id=\"node-admin\" class=\"com.yahoo.vespa.hosted.node.admin.integrationTests.ComponentsProviderWithMocks\" bundle=\"node-admin\"/>\n" +
+ " <component id=\"metric-receiver\" class=\"com.yahoo.vespa.hosted.node.admin.integrationTests.RunInContainerTest$MetricReceiverWrapperMock\" bundle=\"node-admin\"/>\n" +
+ " <component id=\"node-admin\" class=\"com.yahoo.vespa.hosted.node.admin.integrationTests.RunInContainerTest$NodeAdminProviderWithMocks\" bundle=\"node-admin\"/>\n" +
" <http>" +
" <server id=\'myServer\' port=\'" + port + "\' />" +
" </http>" +
" </jdisc>\n" +
"</services>\n";
}
+
+
+ public static class MetricReceiverWrapperMock extends MetricReceiverWrapper {
+ public MetricReceiverWrapperMock() {
+ super(MetricReceiver.nullImplementation);
+ }
+ }
+
+ public class NodeAdminProviderWithMocks implements Provider<NodeAdminStateUpdater> {
+ private final Duration NODE_AGENT_SCAN_INTERVAL = Duration.ofMillis(100);
+ private final Duration NODE_ADMIN_CONVERGE_STATE_INTERVAL = Duration.ofMillis(5);
+
+ private final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class);
+ private final AclMaintainer aclMaintainer = mock(AclMaintainer.class);
+ private final Environment environment = new Environment.Builder().build();
+ private final MetricReceiverWrapper mr = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
+ private final Function<String, NodeAgent> nodeAgentFactory =
+ (hostName) -> new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, dockerOperationsMock,
+ storageMaintainer, aclMaintainer, environment, Clock.systemUTC(), NODE_AGENT_SCAN_INTERVAL);
+ private final NodeAdmin nodeAdmin = new NodeAdminImpl(dockerOperationsMock, nodeAgentFactory, storageMaintainer, aclMaintainer, mr, Clock.systemUTC());
+ private final NodeAdminStateUpdater nodeAdminStateUpdater = new NodeAdminStateUpdater(nodeRepositoryMock,
+ orchestratorMock, storageMaintainer, nodeAdmin, "localhost.test.yahoo.com", Clock.systemUTC(), NODE_ADMIN_CONVERGE_STATE_INTERVAL, new ClassLocking());
+
+ public NodeAdminProviderWithMocks() {
+ nodeAdminStateUpdater.start();
+ }
+
+ @Override
+ public NodeAdminStateUpdater get() {
+ return nodeAdminStateUpdater;
+ }
+
+ @Override
+ public void deconstruct() {
+ nodeAdminStateUpdater.stop();
+ }
+ }
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainerTest.java
index 17e5637c0eb..9ce48dac55b 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainerTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/acl/AclMaintainerTest.java
@@ -116,83 +116,23 @@ public class AclMaintainerTest {
private void assertAclsApplied(ContainerName containerName, List<ContainerAclSpec> containerAclSpecs,
VerificationMode verificationMode) {
+ StringBuilder expectedCommand = new StringBuilder()
+ .append("ip6tables -F INPUT; ")
+ .append("ip6tables -P INPUT DROP; ")
+ .append("ip6tables -P FORWARD DROP; ")
+ .append("ip6tables -P OUTPUT ACCEPT; ")
+ .append("ip6tables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT; ")
+ .append("ip6tables -A INPUT -i lo -j ACCEPT; ")
+ .append("ip6tables -A INPUT -p ipv6-icmp -j ACCEPT; ");
+
+ containerAclSpecs.forEach(aclSpec ->
+ expectedCommand.append("ip6tables -A INPUT -s " + aclSpec.ipAddress() + "/128 -j ACCEPT; "));
+
+ expectedCommand.append("ip6tables -A INPUT -j REJECT");
+
+
verify(dockerOperations, verificationMode).executeCommandInNetworkNamespace(
- eq(containerName),
- eq("ip6tables"),
- eq("-F"),
- eq("INPUT")
- );
- verify(dockerOperations, verificationMode).executeCommandInNetworkNamespace(
- eq(containerName),
- eq("ip6tables"),
- eq("-P"),
- eq("INPUT"),
- eq("DROP")
- );
- verify(dockerOperations, verificationMode).executeCommandInNetworkNamespace(
- eq(containerName),
- eq("ip6tables"),
- eq("-P"),
- eq("FORWARD"),
- eq("DROP")
- );
- verify(dockerOperations, verificationMode).executeCommandInNetworkNamespace(
- eq(containerName),
- eq("ip6tables"),
- eq("-P"),
- eq("OUTPUT"),
- eq("ACCEPT")
- );
- verify(dockerOperations, verificationMode).executeCommandInNetworkNamespace(
- eq(containerName),
- eq("ip6tables"),
- eq("-A"),
- eq("INPUT"),
- eq("-m"),
- eq("state"),
- eq("--state"),
- eq("RELATED,ESTABLISHED"),
- eq("-j"),
- eq("ACCEPT")
- );
- verify(dockerOperations, verificationMode).executeCommandInNetworkNamespace(
- eq(containerName),
- eq("ip6tables"),
- eq("-A"),
- eq("INPUT"),
- eq("-i"),
- eq("lo"),
- eq("-j"),
- eq("ACCEPT")
- );
- verify(dockerOperations, verificationMode).executeCommandInNetworkNamespace(
- eq(containerName),
- eq("ip6tables"),
- eq("-A"),
- eq("INPUT"),
- eq("-p"),
- eq("ipv6-icmp"),
- eq("-j"),
- eq("ACCEPT")
- );
- containerAclSpecs.forEach(aclSpec -> verify(dockerOperations, verificationMode).executeCommandInNetworkNamespace(
- eq(containerName),
- eq("ip6tables"),
- eq("-A"),
- eq("INPUT"),
- eq("-s"),
- eq(aclSpec.ipAddress() + "/128"),
- eq("-j"),
- eq("ACCEPT")
- ));
- verify(dockerOperations, verificationMode).executeCommandInNetworkNamespace(
- eq(containerName),
- eq("ip6tables"),
- eq("-A"),
- eq("INPUT"),
- eq("-j"),
- eq("REJECT")
- );
+ eq(containerName), eq("/bin/sh"), eq("-c"), eq(expectedCommand.toString()));
}
private Container makeContainer(String hostname) {
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
index e1501cf59fe..ce062702a3b 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminStateUpdaterTest.java
@@ -45,7 +45,7 @@ public class NodeAdminStateUpdaterTest {
private final Duration convergeStateInterval = Duration.ofSeconds(30);
private final NodeAdminStateUpdater refresher = spy(new NodeAdminStateUpdater(
- nodeRepository, orchestrator, storageMaintainer, nodeAdmin, parentHostname, clock, convergeStateInterval));
+ nodeRepository, orchestrator, storageMaintainer, nodeAdmin, parentHostname, clock, convergeStateInterval, null));
@Test
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImplTest.java
index cef43a058c0..0a11ddd1e62 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImplTest.java
@@ -2,6 +2,7 @@
package com.yahoo.vespa.hosted.node.admin.orchestrator;
import com.yahoo.vespa.hosted.node.admin.util.ConfigServerHttpRequestExecutor;
+import com.yahoo.vespa.hosted.node.admin.util.HttpException;
import com.yahoo.vespa.orchestrator.restapi.wire.BatchHostSuspendRequest;
import com.yahoo.vespa.orchestrator.restapi.wire.BatchOperationResult;
import com.yahoo.vespa.orchestrator.restapi.wire.HostStateChangeDenialReason;
@@ -12,7 +13,9 @@ import java.util.Arrays;
import java.util.List;
import java.util.Optional;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
/**
* @author freva
@@ -56,7 +59,7 @@ public class OrchestratorImplTest {
any(Integer.class),
any(),
any()
- )).thenThrow(requestExecutor.new NotFoundException("Not Found"));
+ )).thenThrow(new HttpException.NotFoundException("Not Found"));
orchestrator.suspend(hostName);
}
@@ -102,7 +105,7 @@ public class OrchestratorImplTest {
any(String.class),
any(Integer.class),
any()
- )).thenThrow(requestExecutor.new NotFoundException("Not Found"));
+ )).thenThrow(new HttpException.NotFoundException("Not Found"));
orchestrator.resume(hostName);
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutorTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutorTest.java
index f0d81f2aaf7..e197dd8bc54 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutorTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/util/ConfigServerHttpRequestExecutorTest.java
@@ -78,8 +78,8 @@ public class ConfigServerHttpRequestExecutorTest {
assertLogStringContainsGETForAHost();
}
- @Test
- public void testBasicFailureWithNoRetries() throws Exception {
+ @Test(expected = HttpException.class)
+ public void testBasicFailure() throws Exception {
Set<String> configServers = new ArraySet<>(2);
configServers.add("host1");
configServers.add("host2");
@@ -93,6 +93,20 @@ public class ConfigServerHttpRequestExecutorTest {
}
@Test
+ public void testBasicSuccessWithNoRetries() throws Exception {
+ Set<String> configServers = new ArraySet<>(2);
+ configServers.add("host1");
+ configServers.add("host2");
+ // Server is returning 201, no retries.
+ mockReturnCode = 201;
+ ConfigServerHttpRequestExecutor executor = new ConfigServerHttpRequestExecutor(configServers, createClientMock());
+
+ TestPojo testPojo = executor.get("/path", 666, TestPojo.class);
+ assertEquals(testPojo.errorCode.intValue(), mockReturnCode);
+ assertLogStringContainsGETForAHost();
+ }
+
+ @Test
public void testRetries() throws Exception {
Set<String> configServers = new ArraySet<>(2);
configServers.add("host1");
@@ -123,7 +137,7 @@ public class ConfigServerHttpRequestExecutorTest {
try {
executor.get("/path", 666, TestPojo.class);
fail("Expected exception");
- } catch (ConfigServerHttpRequestExecutor.NotFoundException e) {
+ } catch (HttpException.NotFoundException e) {
// ignore
}
assertLogStringContainsGETForAHost();
diff --git a/node-maintainer/src/main/java/com/yahoo/vespa/hosted/node/maintainer/CoredumpHandler.java b/node-maintainer/src/main/java/com/yahoo/vespa/hosted/node/maintainer/CoredumpHandler.java
index 21584aee18d..99dfdb48334 100644
--- a/node-maintainer/src/main/java/com/yahoo/vespa/hosted/node/maintainer/CoredumpHandler.java
+++ b/node-maintainer/src/main/java/com/yahoo/vespa/hosted/node/maintainer/CoredumpHandler.java
@@ -15,6 +15,7 @@ import java.io.InputStreamReader;
import java.nio.file.Files;
import java.nio.file.Path;
import java.time.Duration;
+import java.util.Comparator;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
@@ -28,10 +29,10 @@ import java.util.stream.Collectors;
*
* @author freva
*/
-public class CoredumpHandler {
+class CoredumpHandler {
- public static final String PROCESSING_DIRECTORY_NAME = "processing";
- public static final String METADATA_FILE_NAME = "metadata.json";
+ static final String PROCESSING_DIRECTORY_NAME = "processing";
+ static final String METADATA_FILE_NAME = "metadata.json";
private final Logger logger = Logger.getLogger(CoredumpHandler.class.getName());
private final ObjectMapper objectMapper = new ObjectMapper();
@@ -57,7 +58,7 @@ public class CoredumpHandler {
public void processAll() throws IOException {
removeJavaCoredumps();
- processAndReportCoredumps();
+ handleNewCoredumps();
removeOldCoredumps();
}
@@ -71,21 +72,27 @@ public class CoredumpHandler {
FileHelper.deleteDirectories(doneCoredumpsPath, Duration.ofDays(10), Optional.empty());
}
- private void processAndReportCoredumps() throws IOException {
- Path processingCoredumps = processCoredumps();
- reportCoredumps(processingCoredumps);
+ private void handleNewCoredumps() throws IOException {
+ Path processingCoredumps = enqueueCoredumps();
+ processAndReportCoredumps(processingCoredumps);
}
- Path processCoredumps() throws IOException {
+ /**
+ * Moves a coredump to a new directory under the processing/ directory. Limit to only processing
+ * one coredump at the time, starting with the oldest.
+ */
+ Path enqueueCoredumps() throws IOException {
Path processingCoredumpsPath = coredumpsPath.resolve(PROCESSING_DIRECTORY_NAME);
processingCoredumpsPath.toFile().mkdirs();
+ if (Files.list(processingCoredumpsPath).count() > 0) return processingCoredumpsPath;
Files.list(coredumpsPath)
.filter(path -> path.toFile().isFile() && ! path.getFileName().toString().startsWith("."))
- .forEach(coredumpPath -> {
+ .min((Comparator.comparingLong(o -> o.toFile().lastModified())))
+ .ifPresent(coredumpPath -> {
try {
- startProcessing(coredumpPath, processingCoredumpsPath);
+ enqueueCoredumpForProcessing(coredumpPath, processingCoredumpsPath);
} catch (Throwable e) {
logger.log(Level.WARNING, "Failed to process coredump " + coredumpPath, e);
}
@@ -94,7 +101,7 @@ public class CoredumpHandler {
return processingCoredumpsPath;
}
- void reportCoredumps(Path processingCoredumpsPath) throws IOException {
+ void processAndReportCoredumps(Path processingCoredumpsPath) throws IOException {
doneCoredumpsPath.toFile().mkdirs();
Files.list(processingCoredumpsPath)
@@ -110,7 +117,7 @@ public class CoredumpHandler {
});
}
- Path startProcessing(Path coredumpPath, Path processingCoredumpsPath) throws IOException {
+ Path enqueueCoredumpForProcessing(Path coredumpPath, Path processingCoredumpsPath) throws IOException {
// Make coredump readable
coredumpPath.toFile().setReadable(true, false);
diff --git a/node-maintainer/src/main/java/com/yahoo/vespa/hosted/node/verification/commons/noderepo/NodeRepoInfoRetriever.java b/node-maintainer/src/main/java/com/yahoo/vespa/hosted/node/verification/commons/noderepo/NodeRepoInfoRetriever.java
index 57506e44961..465d42a1f1d 100644
--- a/node-maintainer/src/main/java/com/yahoo/vespa/hosted/node/verification/commons/noderepo/NodeRepoInfoRetriever.java
+++ b/node-maintainer/src/main/java/com/yahoo/vespa/hosted/node/verification/commons/noderepo/NodeRepoInfoRetriever.java
@@ -5,7 +5,6 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import java.io.IOException;
import java.net.URL;
-import java.util.ArrayList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
@@ -27,7 +26,6 @@ public class NodeRepoInfoRetriever {
try {
nodeRepoJsonModel = objectMapper.readValue(nodeInfoURL, NodeRepoJsonModel.class);
return nodeRepoJsonModel;
-
} catch (IOException e) {
logger.log(Level.WARNING, "Failed to parse JSON from config server: " + nodeInfoURL.toString(), e);
}
diff --git a/node-maintainer/src/test/java/com/yahoo/vespa/hosted/node/maintainer/CoredumpHandlerTest.java b/node-maintainer/src/test/java/com/yahoo/vespa/hosted/node/maintainer/CoredumpHandlerTest.java
index d469e5efc46..2cc96567e20 100644
--- a/node-maintainer/src/test/java/com/yahoo/vespa/hosted/node/maintainer/CoredumpHandlerTest.java
+++ b/node-maintainer/src/test/java/com/yahoo/vespa/hosted/node/maintainer/CoredumpHandlerTest.java
@@ -22,7 +22,9 @@ import java.net.URI;
import java.net.URISyntaxException;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.time.Instant;
import java.util.Arrays;
+import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@@ -84,8 +86,8 @@ public class CoredumpHandlerTest {
@Test
public void ignoresIncompleteCoredumps() throws IOException {
- Path coredumpPath = createCoredump(".core.dump");
- Path processingPath = coredumpHandler.processCoredumps();
+ Path coredumpPath = createCoredump(".core.dump", Instant.now());
+ Path processingPath = coredumpHandler.enqueueCoredumps();
// The 'processing' directory should be empty
assertFolderContents(processingPath);
@@ -96,9 +98,9 @@ public class CoredumpHandlerTest {
@Test
public void startProcessingTest() throws IOException {
- Path coredumpPath = createCoredump("core.dump");
+ Path coredumpPath = createCoredump("core.dump", Instant.now());
Path processingPath = crashPath.resolve("processing_dir");
- coredumpHandler.startProcessing(coredumpPath, processingPath);
+ coredumpHandler.enqueueCoredumpForProcessing(coredumpPath, processingPath);
// Contents of 'crash' should be only the 'processing' directory
assertFolderContents(crashPath, processingPath.getFileName().toString());
@@ -112,9 +114,36 @@ public class CoredumpHandlerTest {
}
@Test
+ public void limitToProcessingOneCoredumpAtTheTimeTest() throws IOException {
+ final String oldestCoredump = "core.dump0";
+ final Instant startTime = Instant.now();
+ createCoredump(oldestCoredump, startTime.minusSeconds(3600));
+ createCoredump("core.dump1", startTime.minusSeconds(1000));
+ createCoredump("core.dump2", startTime);
+ Path processingPath = coredumpHandler.enqueueCoredumps();
+
+ List<Path> processingCoredumps = Files.list(processingPath).collect(Collectors.toList());
+ assertEquals(1, processingCoredumps.size());
+
+ // Make sure that the 1 coredump that we are processing is the oldest one
+ Set<String> filenamesInProcessingDirectory = Files.list(processingCoredumps.get(0))
+ .map(file -> file.getFileName().toString())
+ .collect(Collectors.toSet());
+ assertEquals(Collections.singleton(oldestCoredump), filenamesInProcessingDirectory);
+
+ // Running enqueueCoredumps should not start processing any new coredumps as we already are processing one
+ coredumpHandler.enqueueCoredumps();
+ assertEquals(processingCoredumps, Files.list(processingPath).collect(Collectors.toList()));
+ filenamesInProcessingDirectory = Files.list(processingCoredumps.get(0))
+ .map(file -> file.getFileName().toString())
+ .collect(Collectors.toSet());
+ assertEquals(Collections.singleton(oldestCoredump), filenamesInProcessingDirectory);
+ }
+
+ @Test
public void coredumpMetadataCollectAndWriteTest() throws IOException, InterruptedException {
- createCoredump("core.dump");
- Path processingPath = coredumpHandler.processCoredumps();
+ createCoredump("core.dump", Instant.now());
+ Path processingPath = coredumpHandler.enqueueCoredumps();
Path processingCoredumpPath = Files.list(processingPath).findFirst().orElseThrow(() ->
new RuntimeException("Expected to find directory with coredump in processing dir"));
when(coreCollector.collect(eq(processingCoredumpPath.resolve("core.dump")), any())).thenReturn(metadata);
@@ -153,7 +182,7 @@ public class CoredumpHandlerTest {
Path metadataPath = createProcessedCoredump(documentId);
setNextHttpResponse(500, Optional.of("Internal server error"));
- coredumpHandler.reportCoredumps(crashPath.resolve(CoredumpHandler.PROCESSING_DIRECTORY_NAME));
+ coredumpHandler.processAndReportCoredumps(crashPath.resolve(CoredumpHandler.PROCESSING_DIRECTORY_NAME));
validateNextHttpPost(documentId, expectedMetadataFileContents);
// The coredump should not have been moved out of 'processing' and into 'done' as the report failed
@@ -182,9 +211,10 @@ public class CoredumpHandlerTest {
assertEquals(expectedContentsOfFolder, actualContentsOfFolder);
}
- private Path createCoredump(String coredumpName) throws IOException {
+ private Path createCoredump(String coredumpName, Instant lastModified) throws IOException {
Path coredumpPath = crashPath.resolve(coredumpName);
coredumpPath.toFile().createNewFile();
+ coredumpPath.toFile().setLastModified(lastModified.toEpochMilli());
return coredumpPath;
}
diff --git a/node-repository/CMakeLists.txt b/node-repository/CMakeLists.txt
new file mode 100644
index 00000000000..24a8f7e4177
--- /dev/null
+++ b/node-repository/CMakeLists.txt
@@ -0,0 +1,5 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(node-repository)
+
+install(FILES src/main/config/node-repository.xml
+ DESTINATION conf/configserver-app)
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java
index a86ba955a9a..b7971e61117 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirer.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.hosted.provision.maintenance;
import com.yahoo.config.provision.Environment;
import com.yahoo.config.provision.Flavor;
import com.yahoo.config.provision.NodeType;
-import com.yahoo.config.provision.SystemName;
import com.yahoo.config.provision.Zone;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
@@ -80,7 +79,6 @@ public class FailedExpirer extends Expirer {
private boolean failCountIndicatesHwFail(Zone zone, Node node) {
if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER) return false;
- if (zone.system() == SystemName.cd) return false;
return zone.environment() == Environment.prod || zone.environment() == Environment.staging;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetireIPv4OnlyNodes.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetireIPv4OnlyNodes.java
index 4fb58b3714b..ae4cfbd27f3 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetireIPv4OnlyNodes.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/retire/RetireIPv4OnlyNodes.java
@@ -33,6 +33,10 @@ public class RetireIPv4OnlyNodes implements RetirementPolicy {
return zone.environment() == Environment.perf || zone.environment() == Environment.prod;
} else if (zone.region().equals(RegionName.from("us-west-1"))) {
return zone.environment() == Environment.prod;
+ } else if (zone.region().equals(RegionName.from("us-central-1"))) {
+ return zone.environment() == Environment.prod;
+ } else if (zone.region().equals(RegionName.from("ap-southeast-1"))) {
+ return zone.environment() == Environment.prod;
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/ErrorResponse.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/ErrorResponse.java
index 12016bb3d77..6bbf89a906e 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/ErrorResponse.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/restapi/v2/ErrorResponse.java
@@ -11,6 +11,11 @@ import java.io.OutputStream;
import static com.yahoo.jdisc.Response.Status.*;
+/**
+ * Error responses with JSON bodies
+ *
+ * @author bratseth
+ */
public class ErrorResponse extends HttpResponse {
private final Slime slime = new Slime();
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java
index 3ccacb3ff02..51991a844d7 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/FailedExpirerTest.java
@@ -75,8 +75,8 @@ public class FailedExpirerTest {
clock.advance(Duration.ofDays(5));
failedExpirer.run();
+ assertNodeHostnames(Node.State.failed, "node1");
assertNodeHostnames(Node.State.parked, "node2", "node3");
- assertNodeHostnames(Node.State.dirty, "node1");
}
@Test
diff --git a/orchestrator/CMakeLists.txt b/orchestrator/CMakeLists.txt
new file mode 100644
index 00000000000..5ff6a7ac576
--- /dev/null
+++ b/orchestrator/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(orchestrator)
diff --git a/orchestrator/pom.xml b/orchestrator/pom.xml
index c2091e74435..d70e5006c54 100644
--- a/orchestrator/pom.xml
+++ b/orchestrator/pom.xml
@@ -35,6 +35,12 @@
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
+ <artifactId>config-model-api</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
<artifactId>jaxrs_client_utils</artifactId>
<version>${project.version}</version>
<scope>compile</scope>
diff --git a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/ServiceMonitorInstanceLookupService.java b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/ServiceMonitorInstanceLookupService.java
index b7f1ec56541..31f4f1430d3 100644
--- a/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/ServiceMonitorInstanceLookupService.java
+++ b/orchestrator/src/main/java/com/yahoo/vespa/orchestrator/ServiceMonitorInstanceLookupService.java
@@ -2,11 +2,11 @@
package com.yahoo.vespa.orchestrator;
import com.google.inject.Inject;
-import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference;
import com.yahoo.vespa.applicationmodel.ApplicationInstance;
+import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference;
import com.yahoo.vespa.applicationmodel.HostName;
+import com.yahoo.vespa.service.monitor.ServiceMonitor;
import com.yahoo.vespa.service.monitor.ServiceMonitorStatus;
-import com.yahoo.vespa.service.monitor.SlobrokAndConfigIntersector;
import java.util.List;
import java.util.Map;
@@ -21,24 +21,24 @@ import java.util.stream.Collectors;
*/
public class ServiceMonitorInstanceLookupService implements InstanceLookupService {
- private final SlobrokAndConfigIntersector slobrokAndConfigIntersector;
+ private final ServiceMonitor serviceMonitor;
@Inject
- public ServiceMonitorInstanceLookupService(SlobrokAndConfigIntersector slobrokAndConfigIntersector) {
- this.slobrokAndConfigIntersector = slobrokAndConfigIntersector;
+ public ServiceMonitorInstanceLookupService(ServiceMonitor serviceMonitor) {
+ this.serviceMonitor = serviceMonitor;
}
@Override
public Optional<ApplicationInstance<ServiceMonitorStatus>> findInstanceById(ApplicationInstanceReference applicationInstanceReference) {
Map<ApplicationInstanceReference, ApplicationInstance<ServiceMonitorStatus>> instanceMap
- = slobrokAndConfigIntersector.queryStatusOfAllApplicationInstances();
+ = serviceMonitor.queryStatusOfAllApplicationInstances();
return Optional.ofNullable(instanceMap.get(applicationInstanceReference));
}
@Override
public Optional<ApplicationInstance<ServiceMonitorStatus>> findInstanceByHost(HostName hostName) {
Map<ApplicationInstanceReference, ApplicationInstance<ServiceMonitorStatus>> instanceMap
- = slobrokAndConfigIntersector.queryStatusOfAllApplicationInstances();
+ = serviceMonitor.queryStatusOfAllApplicationInstances();
List<ApplicationInstance<ServiceMonitorStatus>> applicationInstancesUsingHost = instanceMap.entrySet().stream()
.filter(entry -> applicationInstanceUsesHost(entry.getValue(), hostName))
.map(Map.Entry::getValue)
@@ -56,7 +56,7 @@ public class ServiceMonitorInstanceLookupService implements InstanceLookupServic
@Override
public Set<ApplicationInstanceReference> knownInstances() {
- return slobrokAndConfigIntersector.queryStatusOfAllApplicationInstances().keySet();
+ return serviceMonitor.queryStatusOfAllApplicationInstances().keySet();
}
private static boolean applicationInstanceUsesHost(ApplicationInstance<ServiceMonitorStatus> applicationInstance,
diff --git a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionResourceTest.java b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionResourceTest.java
index 8595c8a31a4..95fad5e56ed 100644
--- a/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionResourceTest.java
+++ b/orchestrator/src/test/java/com/yahoo/vespa/orchestrator/resources/ApplicationSuspensionResourceTest.java
@@ -28,6 +28,7 @@ import static org.junit.Assert.assertEquals;
*
* @author smorgrav
*/
+@Ignore("Fails with JVM crashing with OoM on CentOS")
public class ApplicationSuspensionResourceTest {
static final String BASE_PATH = "/orchestrator/v1/suspensions/applications";
diff --git a/persistence/CMakeLists.txt b/persistence/CMakeLists.txt
index b773fccc09a..983a0a390a0 100644
--- a/persistence/CMakeLists.txt
+++ b/persistence/CMakeLists.txt
@@ -17,7 +17,6 @@ vespa_define_module(
src/vespa/persistence
src/vespa/persistence/conformancetest
src/vespa/persistence/dummyimpl
- src/vespa/persistence/proxy
src/vespa/persistence/spi
TEST_DEPENDS
@@ -26,6 +25,5 @@ vespa_define_module(
TESTS
src/tests
src/tests/dummyimpl
- src/tests/proxy
src/tests/spi
)
diff --git a/persistence/pom.xml b/persistence/pom.xml
deleted file mode 100644
index 00734aa2f89..00000000000
--- a/persistence/pom.xml
+++ /dev/null
@@ -1,105 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. -->
-<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
- <modelVersion>4.0.0</modelVersion>
- <parent>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>parent</artifactId>
- <version>6-SNAPSHOT</version>
- </parent>
- <artifactId>persistence</artifactId>
- <packaging>container-plugin</packaging>
- <build>
- <plugins>
- <plugin>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>bundle-plugin</artifactId>
- <extensions>true</extensions>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-compiler-plugin</artifactId>
- <configuration>
- <compilerArgs>
- <arg>-Xlint:rawtypes</arg>
- <arg>-Xlint:unchecked</arg>
- <arg>-Xlint:deprecation</arg>
- <arg>-Werror</arg>
- </compilerArgs>
- </configuration>
- </plugin>
-
- <plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-javadoc-plugin</artifactId>
- <executions>
- <execution>
- <id>attach-javadocs</id>
- <goals>
- <goal>jar</goal>
- </goals>
- <configuration>
- <finalName>${project.artifactId}</finalName>
- <additionalparam>-Xdoclint:${doclint} -Xdoclint:-missing</additionalparam>
- </configuration>
- </execution>
- </executions>
- </plugin>
- <plugin>
- <artifactId>maven-source-plugin</artifactId>
- <version>2.1.2</version>
- <executions>
- <execution>
- <id>attach-sources</id>
- <goals>
- <goal>jar-no-fork</goal>
- </goals>
- <configuration>
- <finalName>${project.artifactId}</finalName>
- </configuration>
- </execution>
- </executions>
- </plugin>
- </plugins>
- </build>
-
- <dependencies>
- <dependency>
- <groupId>junit</groupId>
- <artifactId>junit</artifactId>
- </dependency>
-
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>container-dev</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- <exclusions>
- <exclusion>
- <groupId>org.antlr</groupId>
- <artifactId>antlr4-runtime</artifactId>
- </exclusion>
- </exclusions>
- </dependency>
-
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>document</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
-
- <dependency>
- <groupId>com.yahoo.vespa</groupId>
- <artifactId>jrt</artifactId>
- <version>${project.version}</version>
- <scope>provided</scope>
- </dependency>
- </dependencies>
-
- <properties>
- <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
- </properties>
-</project>
diff --git a/persistence/src/main/java/com/yahoo/persistence/rpc/BucketProviderMethod.java b/persistence/src/main/java/com/yahoo/persistence/rpc/BucketProviderMethod.java
deleted file mode 100644
index a5a13d7bf0e..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/rpc/BucketProviderMethod.java
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.rpc;
-
-/**
- * Class to represent a persistence provider method that has a bucket
- * as its first parameter.
- */
-public class BucketProviderMethod extends PersistenceProviderMethod {
- public BucketProviderMethod(String name, PersistenceProviderHandler owner) {
- this(name, owner, "", "");
- }
-
- public BucketProviderMethod(String name, PersistenceProviderHandler owner, String paramTypes) {
- this(name, owner, paramTypes, "");
- }
-
- public BucketProviderMethod(String name, PersistenceProviderHandler owner, String paramTypes, String returnTypes) {
- super(name, owner, "ll" + paramTypes, returnTypes);
- paramDesc("bucketId", "The bucket id to perform operation on");
- paramDesc("partitionId", "The partition to perform operation on");
- }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderHandler.java b/persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderHandler.java
deleted file mode 100644
index e4a5dc3067f..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderHandler.java
+++ /dev/null
@@ -1,401 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.rpc;
-
-import com.yahoo.document.*;
-import com.yahoo.document.fieldset.AllFields;
-import com.yahoo.document.fieldset.FieldSet;
-import com.yahoo.document.select.parser.ParseException;
-import com.yahoo.document.serialization.*;
-import com.yahoo.io.GrowableByteBuffer;
-import com.yahoo.jrt.*;
-import com.yahoo.persistence.PersistenceRpcConfig;
-import com.yahoo.persistence.spi.*;
-import com.yahoo.persistence.spi.result.*;
-
-import java.nio.ByteBuffer;
-import java.util.TreeSet;
-
-/**
- * @author thomasg
- */
-public class PersistenceProviderHandler extends RPCHandler {
- DocumentTypeManager docTypeManager;
- PersistenceProvider provider = null;
- boolean started = false;
-
- int magic_number = 0xf00ba2;
-
- public PersistenceProviderHandler(PersistenceRpcConfig config) {
- super(config.port());
- }
-
- public void initialize(PersistenceProvider provider, DocumentTypeManager manager) {
- this.provider = provider;
- this.docTypeManager = manager;
-
- if (!started) {
- addMethod(new Method("vespa.persistence.connect", "s", "", this, "RPC_connect")
- .paramDesc(0, "buildId", "Id to make sure client and server come from the same build"));
- addMethod(new PersistenceProviderMethod("initialize", this));
- addMethod(new PersistenceProviderMethod("getPartitionStates", this, "", "IS"));
- addMethod(new PersistenceProviderMethod("listBuckets", this, "l", "L")
- .paramDesc("partitionId", "The partition to list buckets for")
- .returnDesc("bucketIds", "An array of bucketids"));
- addMethod(new PersistenceProviderMethod("getModifiedBuckets", this, "", "L")
- .returnDesc("bucketIds", "An array of bucketids"));
- addMethod(new PersistenceProviderMethod("setClusterState", this, "x")
- .paramDesc("clusterState", "The updated cluster state"));
- addMethod(new BucketProviderMethod("setActiveState", this, "b")
- .paramDesc("bucketState", "The new state (active/not active)"));
- addMethod(new BucketProviderMethod("getBucketInfo", this, "", "iiiiibb")
- .returnDesc("checksum", "The bucket checksum")
- .returnDesc("documentCount", "The number of unique documents stored in the bucket")
- .returnDesc("documentSize", "The size of the unique documents")
- .returnDesc("entryCount", "The number of entries (inserts/removes) in the bucket")
- .returnDesc("usedSize", "The number of bytes used by the bucket in total")
- .returnDesc("ready", "Whether the bucket is \"ready\" for external reads or not")
- .returnDesc("active", "Whether the bucket has been activated for external reads or not"));
- addMethod(new TimestampedProviderMethod("put", this, "x")
- .paramDesc("document", "The serialized document"));
- addMethod(new TimestampedProviderMethod("removeById", this, "s", "b")
- .paramDesc("documentId", "The ID of the document to remove")
- .returnDesc("existed", "Whether or not the document existed"));
- addMethod(new TimestampedProviderMethod("removeIfFound", this, "s", "b")
- .paramDesc("documentId", "The ID of the document to remove")
- .returnDesc("existed", "Whether or not the document existed"));
- addMethod(new TimestampedProviderMethod("update", this, "x", "l")
- .paramDesc("update", "The document update to apply")
- .returnDesc("existingTimestamp", "The timestamp of the document that the update was applied to, or 0 if it didn't exist"));
- addMethod(new BucketProviderMethod("flush", this));
- addMethod(new BucketProviderMethod("get", this, "ss", "lx")
- .paramDesc("fieldSet", "A set of fields to return")
- .paramDesc("documentId", "The document ID to fetch")
- .returnDesc("timestamp", "The timestamp of the document fetched")
- .returnDesc("document", "A serialized document"));
- addMethod(new BucketProviderMethod("createIterator", this, "ssllLb", "l")
- .paramDesc("fieldSet", "A set of fields to return")
- .paramDesc("documentSelectionString", "Document selection to match with")
- .paramDesc("timestampFrom", "lowest timestamp to include")
- .paramDesc("timestampTo", "Highest timestamp to include")
- .paramDesc("timestampSubset", "Array of timestamps to include")
- .paramDesc("includedVersions", "Document versions to include")
- .returnDesc("iteratorId", "An iterator id to use for further calls to iterate and destroyIterator"));
- addMethod(new PersistenceProviderMethod("iterate", this, "ll", "LISXb")
- .paramDesc("iteratorId", "An iterator id previously returned by createIterator")
- .paramDesc("maxByteSize", "The maximum number of bytes to return in this call (approximate)")
- .returnDesc("timestampArray", "Array of timestamps for DocEntries")
- .returnDesc("flagArray", "Array of flags for DocEntries")
- .returnDesc("docIdArray", "Array of document ids for DocEntries")
- .returnDesc("docArray", "Array of documents for DocEntries")
- .returnDesc("completed", "Whether or not iteration completed"));
- addMethod(new PersistenceProviderMethod("destroyIterator", this, "l")
- .paramDesc("iteratorId", "An iterator id previously returned by createIterator"));
- addMethod(new BucketProviderMethod("createBucket", this));
- addMethod(new BucketProviderMethod("deleteBucket", this));
- addMethod(new BucketProviderMethod("split", this, "llll")
- .paramDesc("target1Bucket", "Bucket id of first split target")
- .paramDesc("target1Partition", "Partition id of first split target")
- .paramDesc("target2Bucket", "Bucket id of second split target")
- .paramDesc("target2Partition", "Partition id of second split target"));
- addMethod(new PersistenceProviderMethod("join", this, "llllll")
- .paramDesc("source1Bucket", "Bucket id of first source bucket")
- .paramDesc("source1Partition", "Partition id of first source bucket")
- .paramDesc("source1Bucket", "Bucket id of second source bucket")
- .paramDesc("source1Partition", "Partition id of second source bucket")
- .paramDesc("source1Bucket", "Bucket id of target bucket")
- .paramDesc("source1Partition", "Partition id of target bucket"));
- addMethod(new BucketProviderMethod("move", this, "l")
- .paramDesc("partitionId", "The partition to move the bucket to"));
- addMethod(new BucketProviderMethod("maintain", this, "b")
- .paramDesc("maintenanceLevel", "LOW or HIGH maintenance"));
- addMethod(new TimestampedProviderMethod("removeEntry", this));
-
- start();
- started = false;
- }
- }
-
- public void RPC_connect(Request req) {
- }
-
- public void addResult(Result result, Request req) {
- req.returnValues().add(new Int8Value((byte) result.getErrorType().ordinal()));
- req.returnValues().add(new StringValue(result.getErrorMessage()));
- }
-
- public void RPC_initialize(Request req) {
- addResult(provider.initialize(), req);
- }
-
- public void RPC_getPartitionStates(Request req) {
- PartitionStateListResult result = provider.getPartitionStates();
- addResult(result, req);
-
- int[] states = new int[result.getPartitionStates().size()];
- String[] reasons = new String[result.getPartitionStates().size()];
-
- for (int i = 0; i < states.length; ++i) {
- states[i] = result.getPartitionStates().get(i).getState().ordinal();
- reasons[i] = result.getPartitionStates().get(i).getReason();
- }
-
- req.returnValues().add(new Int32Array(states));
- req.returnValues().add(new StringArray(reasons));
- }
-
- void addBucketIdListResult(BucketIdListResult result, Request req) {
- addResult(result, req);
-
- long[] retVal = new long[result.getBuckets().size()];
- for (int i = 0; i < retVal.length; ++i) {
- retVal[i] = result.getBuckets().get(i).getRawId();
- }
-
- req.returnValues().add(new Int64Array(retVal));
- }
-
- public void RPC_listBuckets(Request req) {
- addBucketIdListResult(provider.listBuckets((short) req.parameters().get(0).asInt64()), req);
- }
-
- public void RPC_setClusterState(Request req) throws java.text.ParseException {
- ClusterStateImpl state = new ClusterStateImpl(req.parameters().get(0).asData());
- addResult(provider.setClusterState(state), req);
- }
-
- Bucket getBucket(Request req, int index) {
- return new Bucket((short)req.parameters().get(index + 1).asInt64(),
- new BucketId(req.parameters().get(index).asInt64()));
- }
-
- public void RPC_setActiveState(Request req) {
- try {
- addResult(provider.setActiveState(getBucket(req, 0),
- BucketInfo.ActiveState.values()[req.parameters().get(2).asInt8()]), req);
- } catch (Exception e) {
- e.printStackTrace();
- throw new RuntimeException(e);
- }
- }
-
- public void RPC_getBucketInfo(Request req) {
- BucketInfoResult result = provider.getBucketInfo(getBucket(req, 0));
-
- addResult(result, req);
- req.returnValues().add(new Int32Value(result.getBucketInfo().getChecksum()));
- req.returnValues().add(new Int32Value(result.getBucketInfo().getDocumentCount()));
- req.returnValues().add(new Int32Value(result.getBucketInfo().getDocumentSize()));
- req.returnValues().add(new Int32Value(result.getBucketInfo().getEntryCount()));
- req.returnValues().add(new Int32Value(result.getBucketInfo().getUsedSize()));
- req.returnValues().add(new Int8Value(result.getBucketInfo().isReady() ? (byte)1 : (byte)0));
- req.returnValues().add(new Int8Value(result.getBucketInfo().isActive() ? (byte)1 : (byte)0));
- }
-
- public void RPC_put(Request req) {
- try {
- GrowableByteBuffer buffer = new GrowableByteBuffer(ByteBuffer.wrap(req.parameters().get(3).asData()));
- Document doc = new Document(DocumentDeserializerFactory.create42(docTypeManager, buffer));
- addResult(provider.put(getBucket(req, 0), req.parameters().get(2).asInt64(), doc), req);
- } catch (Exception e) {
- e.printStackTrace();
- throw new RuntimeException(e);
- }
- }
-
- public void addRemoveResult(RemoveResult result, Request req) {
- addResult(result, req);
- req.returnValues().add(new Int8Value(result.wasFound() ? (byte)1 : (byte)0));
- }
-
- public void RPC_removeById(Request req) {
- addRemoveResult(
- provider.remove(
- getBucket(req, 0),
- req.parameters().get(2).asInt64(),
- new DocumentId(req.parameters().get(3).asString())), req);
- }
-
- public void RPC_removeIfFound(Request req) {
- addRemoveResult(
- provider.removeIfFound(
- getBucket(req, 0),
- req.parameters().get(2).asInt64(),
- new DocumentId(req.parameters().get(3).asString())), req);
- }
-
- public void RPC_removeEntry(Request req) {
- addResult(
- provider.removeEntry(
- getBucket(req, 0),
- req.parameters().get(2).asInt64()), req);
- }
-
- public void RPC_update(Request req) {
- try {
- GrowableByteBuffer buffer = new GrowableByteBuffer(ByteBuffer.wrap(req.parameters().get(3).asData()));
- DocumentUpdate update = new DocumentUpdate(DocumentDeserializerFactory.createHead(docTypeManager, buffer));
- UpdateResult result = provider.update(getBucket(req, 0), req.parameters().get(2).asInt64(), update);
- addResult(result, req);
-
- req.returnValues().add(new Int64Value(result.getExistingTimestamp()));
- } catch (Exception e) {
- e.printStackTrace();
- throw new RuntimeException(e);
- }
- }
-
- public void RPC_flush(Request req) {
- addResult(provider.flush(getBucket(req, 0)), req);
- }
-
- FieldSet getFieldSet(Request req, int index) {
- return new AllFields();
-
- //return new FieldSetRepo().parse(docTypeManager, req.parameters().get(index).asString());
- }
-
- byte[] serializeDocument(Document doc) {
- if (doc != null) {
- GrowableByteBuffer buf = new GrowableByteBuffer();
- DocumentSerializer serializer = DocumentSerializerFactory.create42(buf);
- doc.serialize(serializer);
- buf.flip();
- return buf.array();
- } else {
- return new byte[0];
- }
- }
-
- public void RPC_get(Request req) {
- GetResult result = provider.get(getBucket(req, 0),
- getFieldSet(req, 2),
- new DocumentId(req.parameters().get(3).asString()));
- addResult(result, req);
- req.returnValues().add(new Int64Value(result.getLastModifiedTimestamp()));
- req.returnValues().add(new DataValue(serializeDocument(result.getDocument())));
- }
-
- public void RPC_createIterator(Request req) {
- try {
- TreeSet<Long> timestampSet = new TreeSet<Long>();
- long[] timestamps = req.parameters().get(6).asInt64Array();
- for (long l : timestamps) {
- timestampSet.add(l);
- }
-
- Selection selection;
- if (timestamps.length > 0) {
- selection = new Selection(timestampSet);
- } else {
- selection = new Selection(
- req.parameters().get(3).asString(),
- req.parameters().get(4).asInt64(),
- req.parameters().get(5).asInt64());
- }
-
- CreateIteratorResult result = provider.createIterator(
- getBucket(req, 0),
- getFieldSet(req, 2),
- selection,
- PersistenceProvider.IncludedVersions.values()[req.parameters().get(7).asInt8()]);
-
- addResult(result, req);
- req.returnValues().add(new Int64Value(result.getIteratorId()));
- } catch (ParseException e) {
- addResult(new Result(Result.ErrorType.PERMANENT_ERROR, "Unparseable document selection expression"), req);
- req.returnValues().add(new Int64Value(0));
- } catch (Exception e) {
- e.printStackTrace();
- throw new RuntimeException(e);
- }
- }
-
- public void RPC_iterate(Request req) {
- try {
- long iteratorId = req.parameters().get(0).asInt64();
- long maxByteSize = req.parameters().get(1).asInt64();
-
- IterateResult result = provider.iterate(iteratorId, maxByteSize);
-
- addResult(result, req);
-
- int count = result.getEntries() != null ? result.getEntries().size() : 0;
- long[] timestamps = new long[count];
- int[] flags = new int[count];
- String[] docIds = new String[count];
- byte[][] documents = new byte[count][];
-
- for (int i = 0; i < count; ++i) {
- DocEntry entry = result.getEntries().get(i);
- timestamps[i] = entry.getTimestamp();
- flags[i] = entry.getType().ordinal();
-
- if (entry.getDocumentId() != null) {
- docIds[i] = entry.getDocumentId().toString();
- } else {
- docIds[i] = "";
- }
-
- if (entry.getDocument() != null) {
- documents[i] = serializeDocument(entry.getDocument());
- } else {
- documents[i] = (new byte[0]);
- }
- }
-
- req.returnValues().add(new Int64Array(timestamps));
- req.returnValues().add(new Int32Array(flags));
- req.returnValues().add(new StringArray(docIds));
- req.returnValues().add(new DataArray(documents));
- req.returnValues().add(new Int8Value(result.isCompleted() ? (byte)1 : (byte)0));
- } catch (Exception e) {
- e.printStackTrace();
- throw new RuntimeException(e);
- }
- }
-
- public void RPC_destroyIterator(Request req) {
- try {
- addResult(provider.destroyIterator(req.parameters().get(0).asInt64()), req);
- } catch (Exception e) {
- e.printStackTrace();
- }
- }
-
- public void RPC_createBucket(Request req) {
- addResult(provider.createBucket(getBucket(req, 0)), req);
- }
-
- public void RPC_deleteBucket(Request req) {
- addResult(provider.deleteBucket(getBucket(req, 0)), req);
- }
-
- public void RPC_getModifiedBuckets(Request req) {
- addBucketIdListResult(provider.getModifiedBuckets(), req);
- }
-
- public void RPC_maintain(Request req) {
- addResult(provider.maintain(getBucket(req, 0),
- PersistenceProvider.MaintenanceLevel.values()[req.parameters().get(2).asInt8()]), req);
- }
-
- public void RPC_split(Request req) {
- addResult(provider.split(
- getBucket(req, 0),
- getBucket(req, 2),
- getBucket(req, 4)), req);
- }
-
- public void RPC_join(Request req) {
- addResult(provider.join(
- getBucket(req, 0),
- getBucket(req, 2),
- getBucket(req, 4)), req);
- }
-
- public void RPC_move(Request req) {
- addResult(provider.move(
- getBucket(req, 0),
- (short)req.parameters().get(2).asInt64()), req);
- }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderMethod.java b/persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderMethod.java
deleted file mode 100644
index da0bf786a29..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/rpc/PersistenceProviderMethod.java
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.rpc;
-
-import com.yahoo.jrt.*;
-
-/**
- * Class to represent a JRT method used by PersistenceProviderHandler.
- */
-public class PersistenceProviderMethod extends Method {
- int nextReturnDesc = 0;
- int nextParamDesc;
-
- PersistenceProviderMethod returnDesc(String code, String text) {
- returnDesc(nextReturnDesc, code, text);
- ++nextReturnDesc;
- return this;
- }
-
- PersistenceProviderMethod paramDesc(String code, String text) {
- paramDesc(nextParamDesc, code, text);
- ++nextParamDesc;
- return this;
- }
-
- public PersistenceProviderMethod(String name, PersistenceProviderHandler owner, String paramTypes) {
- this(name, owner, paramTypes, "");
- }
-
- public PersistenceProviderMethod(String name, PersistenceProviderHandler owner) {
- this(name, owner, "", "");
- }
-
- public PersistenceProviderMethod(String name, PersistenceProviderHandler owner, String paramTypes, String returnTypes) {
- super("vespa.persistence." + name, paramTypes, "bs" + returnTypes, owner, "RPC_" + name);
- returnDesc("code", "Error code, or 0 if successful");
- returnDesc("message", "Error message");
- }
-
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/rpc/RPCHandler.java b/persistence/src/main/java/com/yahoo/persistence/rpc/RPCHandler.java
deleted file mode 100644
index b28579cd28e..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/rpc/RPCHandler.java
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.rpc;
-
-import com.yahoo.jrt.*;
-
-import java.util.logging.Logger;
-
-
-/**
- * A handler that can be used to register RPC function calls,
- * using Vespa JRT. To enable an RPC server, first call addMethod() any number of times,
- * then start().
- */
-public class RPCHandler {
- private final static Logger log = Logger.getLogger(RPCHandler.class.getName());
-
- private final int port;
- private final Supervisor supervisor;
- private Acceptor acceptor;
-
- public RPCHandler(int port) {
- supervisor = new Supervisor(new Transport());
- this.port = port;
- }
-
- public void start() {
- try {
- acceptor = supervisor.listen(new Spec(port));
- log.info("Listening for RPC requests on port " + port);
- } catch (Exception e) {
- throw new RuntimeException(e);
- }
- }
-
- public void addMethod(Method method) {
- supervisor.addMethod(method);
- }
-
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/rpc/TimestampedProviderMethod.java b/persistence/src/main/java/com/yahoo/persistence/rpc/TimestampedProviderMethod.java
deleted file mode 100644
index 00639601c4d..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/rpc/TimestampedProviderMethod.java
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.rpc;
-
-/**
- * Represents a JRT persistence provider method that includes a timestamp in its request
- */
-public class TimestampedProviderMethod extends BucketProviderMethod {
- public TimestampedProviderMethod(String name, PersistenceProviderHandler owner) {
- this(name, owner, "", "");
- }
-
- public TimestampedProviderMethod(String name, PersistenceProviderHandler owner, String paramTypes) {
- this(name, owner, paramTypes, "");
- }
-
- public TimestampedProviderMethod(String name, PersistenceProviderHandler owner, String paramTypes, String returnTypes) {
- super(name, owner, "l" + paramTypes, returnTypes);
- paramDesc("timestamp", "The timestamp of the operation");
- }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/rpc/package-info.java b/persistence/src/main/java/com/yahoo/persistence/rpc/package-info.java
deleted file mode 100644
index 2d0b9fea6b7..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/rpc/package-info.java
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-@ExportPackage
-@PublicApi
-package com.yahoo.persistence.rpc;
-
-import com.yahoo.api.annotations.PublicApi;
-import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/AbstractPersistenceProvider.java b/persistence/src/main/java/com/yahoo/persistence/spi/AbstractPersistenceProvider.java
deleted file mode 100644
index bd7c2bd7823..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/AbstractPersistenceProvider.java
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi;
-
-import com.yahoo.document.*;
-import com.yahoo.document.fieldset.AllFields;
-import com.yahoo.persistence.spi.*;
-import com.yahoo.persistence.spi.result.*;
-
-import java.util.ArrayList;
-import java.util.List;
-
-/**
- * An abstract class that implements persistence provider functionality that some providers
- * may not have use for.
- */
-public abstract class AbstractPersistenceProvider implements PersistenceProvider {
- @Override
- public Result initialize() {
- return new Result();
- }
-
- @Override
- public PartitionStateListResult getPartitionStates() {
- List<PartitionState> partitionStates = new ArrayList<PartitionState>();
- partitionStates.add(new PartitionState(PartitionState.State.UP, ""));
- return new PartitionStateListResult(partitionStates);
- }
-
- @Override
- public Result setClusterState(ClusterState state) {
- return new Result();
- }
-
- @Override
- public Result setActiveState(Bucket bucket, BucketInfo.ActiveState active) {
- return new Result();
- }
-
-
- @Override
- public RemoveResult removeIfFound(Bucket bucket, long timestamp, DocumentId id) {
- return remove(bucket, timestamp, id);
- }
-
- @Override
- public Result removeEntry(Bucket bucket, long timestampToRemove) {
- return new Result();
- }
-
- @Override
- public Result flush(Bucket bucket) {
- return new Result();
- }
-
- @Override
- public BucketIdListResult getModifiedBuckets() {
- return new BucketIdListResult(new ArrayList<BucketId>());
- }
-
- @Override
- public Result maintain(Bucket bucket, MaintenanceLevel level) {
- return new Result();
- }
-
- @Override
- public Result move(Bucket bucket, short partitionId) {
- return new Result();
- }
-
- @Override
- public UpdateResult update(Bucket bucket, long timestamp, DocumentUpdate update) {
- GetResult result = get(bucket, new AllFields(), update.getId());
- if (result.wasFound()) {
- Document doc = result.getDocument().clone();
- update.applyTo(doc);
- put(bucket, timestamp, doc);
- return new UpdateResult(result.getLastModifiedTimestamp());
- } else {
- return new UpdateResult();
- }
- }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/Bucket.java b/persistence/src/main/java/com/yahoo/persistence/spi/Bucket.java
deleted file mode 100644
index ed443fa9100..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/Bucket.java
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi;
-
-import com.yahoo.document.BucketId;
-
-/**
- * @author thomasg
- */
-public class Bucket {
- BucketId bucketId;
- short partitionId;
-
- /**
- * @param partition The partition (i.e. disk) where the bucket is located
- * @param bucketId The bucket id of the bucket
- */
- public Bucket(short partition, BucketId bucketId) {
- this.partitionId = partition;
- this.bucketId = bucketId;
- }
-
- public BucketId getBucketId() { return bucketId; }
-
- public short getPartitionId() { return partitionId; }
-
- @Override
- public String toString() {
- return partitionId + "/" + bucketId;
- }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/BucketInfo.java b/persistence/src/main/java/com/yahoo/persistence/spi/BucketInfo.java
deleted file mode 100644
index 2105ade6206..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/BucketInfo.java
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi;
-
-/**
- * Class to represents information about the buckets stored by the persistence provider.
- */
-public class BucketInfo {
- public enum ReadyState {
- NOT_READY,
- READY
- }
-
- public enum ActiveState {
- NOT_ACTIVE,
- ACTIVE
- }
-
- /** Create an empty bucket info object. */
- public BucketInfo() {
- }
-
- /**
- * @param checksum The checksum of the bucket contents.
- * @param docCount The number of documents stored
- * @param docSize The total size of the documents stored
- * @param metaEntryCount The number of different versions of documents that are stored (including document removes)
- * @param size The total size of entries in this bucket.
- * @param ready Whether the bucket is ready or not
- * @param active Whether the bucket is active or not
- */
- public BucketInfo(int checksum,
- int docCount,
- int docSize,
- int metaEntryCount,
- int size,
- ReadyState ready,
- ActiveState active) {
- this.checksum = checksum;
- this.documentCount = docCount;
- this.documentSize = docSize;
- this.entryCount = metaEntryCount;
- this.size = size;
- this.ready = ready;
- this.active = active;
- }
-
- /**
- * Constructor for bucketinfo for providers that don't care about the READY/ACTIVE paradigm.
- *
- * @param checksum The checksum of the bucket contents.
- * @param docCount The number of documents stored
- * @param docSize The total size of the documents stored
- * @param metaEntryCount The number of different versions of documents that are stored (including document removes)
- * @param size The total size of entries in this bucket.
- */
- public BucketInfo(int checksum,
- int docCount,
- int docSize,
- int metaEntryCount,
- int size) {
- this(checksum, docCount, docSize, metaEntryCount, size, ReadyState.NOT_READY, ActiveState.NOT_ACTIVE);
- }
-
- public boolean equals(BucketInfo other) {
- return checksum == other.checksum &&
- documentCount == other.documentCount &&
- documentSize == other.documentSize &&
- entryCount == other.entryCount &&
- size == other.size &&
- ready == other.ready &&
- active == other.active;
- }
-
- @Override
- public String toString() {
- String retVal = "BucketInfo(";
- if (valid()) {
- retVal += "crc " + checksum + ", uniqueCount " + documentCount +
- ", uniqueSize " + documentSize + ", entry count " + entryCount +
- ", usedSize " + size + ", ready " + isReady() +
- ", active " + isActive();
- } else {
- retVal += "invalid";
- }
- retVal += ")";
- return retVal;
- }
-
- /**
- * @return Get the checksum of the bucket. An empty bucket should have checksum of
- * zero. The checksum should only include data from the latest versions of
- * non-removed documents. Otherwise, the checksum implementation is up to
- * the persistence implementation. (Unless one wants to run multiple
- * persistence implementations in the same cluster, in which case they have
- * to match).
- */
- public int getChecksum() { return checksum; }
-
- /**
- * The number of unique documents that have not been removed from the
- * bucket. A unique document count above the splitting threshold will cause
- * the bucket to be split.
- */
- public int getDocumentCount() { return documentCount; }
-
- /**
- * The total size of all the unique documents in this bucket. A size above
- * the splitting threshold will cause the bucket to be split. Knowing size
- * is optional, but a bucket with more than zero unique documents should
- * always return a non-zero value for size. If splitting on size is not
- * required or desired, a simple solution here is to just set the number
- * of unique documents as the size.
- */
- public int getDocumentSize() { return documentSize; }
-
- /**
- * The number of meta entries in the bucket. For a persistence layer
- * keeping history of data (multiple versions of a document or remove
- * entries), it may use more meta entries in the bucket than it has unique
- * documents If the sum of meta entries from a pair of joinable buckets go
- * below the join threshold, the buckets will be joined.
- */
- public int getEntryCount() { return entryCount; }
-
- /**
- * The total size used by the persistence layer to store all the documents
- * for a given bucket. Possibly excluding pre-allocated space not currently
- * in use. Knowing size is optional, but if the bucket contains more than
- * zero meta entries, it should return a non-zero value for used size.
- */
- public int getUsedSize() { return size; }
-
- /**
- * @return Returns true if this bucket is considered "ready". Ready buckets
- * are prioritized before non-ready buckets to be set active.
- */
- public boolean isReady() { return ready == ReadyState.READY; }
-
- /**
- * @return Returns true if this bucket is "active". If it is, the bucket should
- * be included in read operations outside of the persistence provider API.
- */
- public boolean isActive() { return active == ActiveState.ACTIVE; }
-
- public boolean valid()
- { return (documentCount > 0 || documentSize == 0); }
-
- int checksum = 0;
- int documentCount = 0;
- int documentSize = 0;
- int entryCount = 0;
- int size = 0;
- ReadyState ready = ReadyState.READY;
- ActiveState active = ActiveState.NOT_ACTIVE;
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/ClusterState.java b/persistence/src/main/java/com/yahoo/persistence/spi/ClusterState.java
deleted file mode 100644
index dbf7a90f49c..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/ClusterState.java
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi;
-
-/**
- * Class that allows a provider to figure out if the node is currently up, the cluster is up and/or a
- * given bucket should be "ready" given the state.
- */
-public interface ClusterState {
- /**
- * Returns true if the system has been set up to have
- * "ready" nodes, and the given bucket is in the ideal state
- * for readiness.
- *
- * @param bucket The bucket to check.
- * @return Returns true if the bucket should be set to "ready".
- */
- public boolean shouldBeReady(Bucket bucket);
-
- /**
- * @return Returns false if the cluster has been deemed down. This can happen
- * if the fleet controller has detected that too many nodes are down
- * compared to the complete list of nodes, and deigns the system to be
- * unusable.
- */
- public boolean clusterUp();
-
- /**
- * @return Returns false if this node has been set in a state where it should not
- * receive external load.
- */
- public boolean nodeUp();
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/ClusterStateImpl.java b/persistence/src/main/java/com/yahoo/persistence/spi/ClusterStateImpl.java
deleted file mode 100644
index 5d20f14dcc8..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/ClusterStateImpl.java
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi;
-
-import com.yahoo.vdslib.distribution.Distribution;
-import com.yahoo.vdslib.state.Node;
-import com.yahoo.vdslib.state.NodeType;
-
-import java.nio.ByteBuffer;
-import java.text.ParseException;
-
-/**
- * Implementation of the cluster state interface for deserialization from RPC.
- */
-public class ClusterStateImpl implements com.yahoo.persistence.spi.ClusterState {
- com.yahoo.vdslib.state.ClusterState clusterState;
- short clusterIndex;
- Distribution distribution;
-
- public ClusterStateImpl(byte[] serialized) throws ParseException {
- ByteBuffer buf = ByteBuffer.wrap(serialized);
-
- int clusterStateLength = buf.getInt();
- byte[] clusterState = new byte[clusterStateLength];
- buf.get(clusterState);
-
- clusterIndex = buf.getShort();
-
- int distributionLength = buf.getInt();
- byte[] distribution = new byte[distributionLength];
- buf.get(distribution);
-
- this.clusterState = new com.yahoo.vdslib.state.ClusterState(new String(clusterState));
- this.distribution = new Distribution("raw:" + new String(distribution));
- }
-
- @Override
- public boolean shouldBeReady(Bucket bucket) {
- return true;
- }
-
- @Override
- public boolean clusterUp() {
- return clusterState != null && clusterState.getClusterState().oneOf("u");
- }
-
- @Override
- public boolean nodeUp() {
- return !clusterUp() && clusterState.getNodeState(new Node(NodeType.STORAGE, clusterIndex)).getState().oneOf("uir");
- }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/DocEntry.java b/persistence/src/main/java/com/yahoo/persistence/spi/DocEntry.java
deleted file mode 100644
index 2217bbcb0f7..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/DocEntry.java
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi;
-
-import com.yahoo.document.Document;
-import com.yahoo.document.DocumentId;
-
-/**
- * Class that represents an entry retrieved by iterating.
- */
-public class DocEntry implements Comparable<DocEntry> {
-
-
- @Override
- public int compareTo(DocEntry docEntry) {
- return new Long(timestamp).compareTo(docEntry.getTimestamp());
- }
-
- public enum Type {
- PUT_ENTRY,
- REMOVE_ENTRY
- }
-
- long timestamp;
- Type type;
-
- DocumentId docId;
- Document document;
-
- public DocEntry(long timestamp, Document doc, Type type, DocumentId docId) {
- this.timestamp = timestamp;
- this.type = type;
- this.docId = docId;
- document = doc;
- }
-
-
- public DocEntry(long timestamp, Document doc) {
- this(timestamp, doc, Type.PUT_ENTRY, doc.getId());
- }
-
- public DocEntry(long timestamp, DocumentId docId) {
- this(timestamp, null, Type.REMOVE_ENTRY, docId);
- }
-
- public DocEntry(long timestamp, Type type) {
- this(timestamp, null, type, null);
- }
-
- public Type getType() { return type; }
-
- public long getTimestamp() { return timestamp; }
-
- public DocumentId getDocumentId() { return docId; }
-
- public Document getDocument() { return document; }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/PartitionState.java b/persistence/src/main/java/com/yahoo/persistence/spi/PartitionState.java
deleted file mode 100644
index b998989fce7..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/PartitionState.java
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi;
-
-/**
-* @author thomasg
-*/
-public class PartitionState {
- public PartitionState(State state, String reason) {
- this.state = state;
- this.reason = reason;
-
- if (reason == null || state == null) {
- throw new IllegalArgumentException("State and reason must be non-null");
- }
- }
-
- public State getState() { return state; }
- public String getReason() { return reason; }
-
- State state;
- String reason;
-
- public enum State {
- UP,
- DOWN
- }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/PersistenceProvider.java b/persistence/src/main/java/com/yahoo/persistence/spi/PersistenceProvider.java
deleted file mode 100644
index b33e631aadb..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/PersistenceProvider.java
+++ /dev/null
@@ -1,382 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi;
-
-import com.yahoo.document.Document;
-import com.yahoo.document.DocumentId;
-import com.yahoo.document.DocumentUpdate;
-import com.yahoo.document.fieldset.FieldSet;
-import com.yahoo.persistence.spi.result.*;
-
-/**
- * <p>
- * This interface is the basis for a persistence provider in Vespa.
- * A persistence provider is used by Vespa Storage to provide an elastic stateful system.
- * </p>
- * <p>
- * The Vespa distribution mechanisms are based on distributing "buckets"
- * between the nodes in the system. A bucket is an abstract concept that
- * groups a set of documents. The persistence provider can choose freely
- * how to implement a bucket, but it needs to be able to access a bucket as
- * a unit. The placement of these units is controlled by the distributors.
- * </p>
- * <p>
- * A persistence provider may support multiple "partitions". One example of
- * a partition is a physical disk, but the exact meaning of "partitions"
- * is left to the provider. It must be able to report to the service layer though.
- * </p>
- * <p>
- * All operations return a Result object. The base Result class only encapsulates
- * potential errors, which can be <i>transient</i>, <i>permanent</i> or <i>fatal</i>.
- * Transient errors are errors where it's conceivable that retrying the operation
- * would lead to success, either on this data copy or on others. Permanent errors
- * are errors where the request itself is faulty. Fatal errors are transient errors
- * that have uncovered a problem with this instance of the provider (such as a failing disk),
- * and where the provider wants the process to be shut down.
- * </p>
- * <p>
- * All write operations have a timestamp. This timestamp is generated
- * by the distributor, and is guaranteed to be unique for the bucket we're
- * writing to. A persistence provider is required to store "entries" for each of
- * these operations, and associate the timestamp with that entry.
- * Iteration code can retrieve these entries, including entries
- * for remove operations. The provider is not required to keep any history beyond
- * the last operation that was performed on a given document.
- * </p>
- * <p>
- * The contract for all write operations is that after returning from the function,
- * provider read methods (get, iterate) should reflect the modified state.
- * </p>
- */
-public interface PersistenceProvider
-{
- /**
- * The different types of entries that can be returned
- * from an iterator.
- */
- public enum IncludedVersions {
- NEWEST_DOCUMENT_ONLY,
- NEWEST_DOCUMENT_OR_REMOVE,
- ALL_VERSIONS
- }
-
- /**
- * The different kinds of maintenance we can do.
- * LOW maintenance may be run more often than HIGH.
- */
- public enum MaintenanceLevel {
- LOW,
- HIGH
- }
-
- /**
- * Initializes the persistence provider. This function is called exactly once when
- * the persistence provider starts. If any error is returned here, the service layer
- * will shut down.
- */
- Result initialize();
-
- /**
- * Returns a list of the partitions available,
- * and which are up and down.
- */
- PartitionStateListResult getPartitionStates();
-
- /**
- * Return list of buckets that provider has stored on the given partition.
- */
- BucketIdListResult listBuckets(short partition);
-
- /**
- * Updates the persistence provider with the last cluster state.
- * Only cluster states that are relevant for the provider are supplied (changes
- * that relate to the distributor will not cause an update here).
- */
- Result setClusterState(ClusterState state);
-
- /**
- * Sets the bucket state to active or inactive. After this returns,
- * other buckets may be deactivated, so the node must be able to serve
- * the data from its secondary index or get reduced coverage.
- */
- Result setActiveState(Bucket bucket, BucketInfo.ActiveState active);
-
- /**
- * If the bucket doesn't exist, return empty bucket info.
- */
- BucketInfoResult getBucketInfo(Bucket bucket);
-
- /**
- * Stores the given document.
- *
- * @param timestamp The timestamp for the new bucket entry.
- */
- Result put(Bucket bucket, long timestamp, Document doc);
-
- /**
- * <p>
- * Removes the document referenced by the document id.
- * It is strongly recommended to keep entries for the removes for
- * some period of time. For recovery to work properly, a node that
- * has been down for a longer period of time than that should be totally
- * erased. If not, documents that have been removed but have documents
- * on nodes that have been down will be reinserted.
- * </p>
- * <p>
- * Postconditions:
- * A successful invocation of this function must add the remove to the
- * bucket regardless of whether the document existed. More specifically,
- * iterating over the bucket while including removes after this call
- * shall yield a remove-entry at the given timestamp for the given
- * document identifier as part of its result set. The remove entry
- * shall be added even if there exist removes for the same document id
- * at other timestamps in the bucket.
- * </p>
- * <p>
- * Also, if the given timestamp is higher to or equal than any
- * existing put entry, those entries should not be returned in subsequent
- * get calls. If the timestamp is lower than an existing put entry,
- * those entries should still be available.
- * </p>
- * @param timestamp The timestamp for the new bucket entry.
- * @param id The ID to remove
- */
- RemoveResult remove(Bucket bucket, long timestamp, DocumentId id);
- /**
- * <p>
- * See remove()
- * </p>
- * <p>
- * Used for external remove operations. removeIfFound() has no extra
- * postconditions than remove, but it may choose to <i>not</i> include
- * a remove entry if there didn't already exist a put entry for the given
- * entry. It is recommended, but not required, to not insert entries in this
- * case, though if remove entries are considered critical it might be better
- * to insert them in both cases.
- * </p>
- * @param timestamp The timestamp for the new bucket entry.
- * @param id The ID to remove
- */
- RemoveResult removeIfFound(Bucket bucket, long timestamp, DocumentId id);
-
- /**
- * Removes the entry with the given timestamp. This is usually used to revert
- * previously performed operations. This operation should be
- * successful even if there doesn't exist such an entry.
- */
- Result removeEntry(Bucket bucket, long timestampToRemove);
-
- /**
- * Partially modifies a document referenced by the document update.
- *
- * @param timestamp The timestamp to use for the new update entry.
- * @param update The document update to apply to the stored document.
- */
- UpdateResult update(Bucket bucket, long timestamp, DocumentUpdate update);
-
- /**
- * <p>
- * For providers that store data persistently on disk, the contract of
- * flush is that data has been stored persistently so that if the node should
- * restart, the data will be available.
- * </p>
- * <p>
- * The service layer may choose to batch certain commands. This means
- * that the service layer will lock the bucket only once, then perform several
- * commands, and finally get the bucket info from the bucket, and then flush it.
- * This can be used to improve performance by caching the modifications, and
- * persisting them to disk only when flush is called. The service layer guarantees
- * that after one of these operations, flush() is called, regardless of whether
- * the operation succeeded or not, before another bucket is processed in the same
- * worker thread. The following operations can be batched and have the guarantees
- * above:
- * - put
- * - get
- * - remove
- * - removeIfFound
- * - update
- * - removeEntry
- * </p>
- */
- Result flush(Bucket bucket);
-
- /**
- * Retrieves the latest version of the document specified by the
- * document id. If no versions were found, or the document was removed,
- * the result should be successful, but contain no document (see GetResult).
- *
- * @param fieldSet A set of fields that should be retrieved.
- * @param id The document id to retrieve.
- */
- GetResult get(Bucket bucket, FieldSet fieldSet, DocumentId id);
-
- /**
- * Create an iterator for a given bucket and selection criteria, returning
- * a unique, non-zero iterator identifier that can be used by the caller as
- * an argument to iterate and destroyIterator.
- *
- * Each successful invocation of createIterator shall be paired with
- * a later invocation of destroyIterator by the caller to ensure
- * resources are freed up. NOTE: this may not apply in a shutdown
- * situation due to service layer communication channels closing down.
- *
- * It is assumed that a successful invocation of this function will result
- * in some state being established in the persistence provider, holding
- * the information required to match iterator ids up to their current
- * iteration progress and selection criteria. destroyIterator will NOT
- * be called when createIterator returns an error.
- *
- * @param selection Selection criteria used to limit the subset of
- * the bucket's documents that will be returned by the iterator. The
- * provider implementation may use these criteria to optimize its
- * operation as it sees fit, as long as doing so does not violate
- * selection correctness.
- * @return A process-globally unique iterator identifier iff the result
- * is successful and internal state has been created, otherwise an
- * error. Identifier must be non-zero, as zero is used internally to
- * signify an invalid iterator ID.
- */
- CreateIteratorResult createIterator(Bucket bucket,
- FieldSet fieldSet,
- Selection selection,
- IncludedVersions versions);
-
- /**
- * Iterate over a bucket's document space using a valid iterator id
- * received from createIterator. Each invocation of iterate upon an
- * iterator that has not yet fully exhausted its document space shall
- * return a minimum of 1 document entry per IterateResult to ensure progress.
- * An implementation shall limit the result set per invocation to document
- * entries whose combined in-memory/serialized size is a "soft" maximum of
- * maxByteSize. More specifically, the sum of getSize() over all returned
- * DocEntry instances should be &lt;= (maxByteSize + the size of the last
- * document in the result set). This special case allows for limiting the
- * result set both by observing "before the fact" that the next potential
- * document to include would exceed the max size and by observing "after
- * the fact" that the document that was just added caused the max size to
- * be exceeded.
- * However, if a document exceeds maxByteSize and not including it implies
- * the result set would be empty, it must be included in the result anyway
- * in order to not violate the progress requirement.
- *
- * The caller shall not make any assumptions on whether or not documents
- * that arrive to--or are removed from--the bucket in the time between
- * separate invocations of iterate for the same iterator id will show up
- * in the results, assuming that these documents do not violate the
- * selection criteria. This means that there is no requirement for
- * maintaining a "snapshot" view of the bucket's state as it existed upon
- * the initial createIterator call. Neither shall the caller make any
- * assumptions on the ordering of the returned documents.
- *
- * The IterateResult shall--for each document entry that matches the
- * selection criteria and falls within the maxByteSize limit mentioned
- * above--return the following information in its result:
- *
- * -- For non-removed entries: A DocEntry where getDocumentOperation() will
- * return a valid DocumentPut instance and getSize() will return the
- * serialized size of the document.
- * -- For removed entries: A DocEntry where getDocumentId() will
- * return a valid document identifier. Remove entries shall not
- * contain document instances.
- * -- For meta entries: A DocEntry that shall not contain a document
- * instance nor should it include a document id instance (if
- * included, would be ignored by the service layer in any context
- * where metadata-only is requested).
- *
- * The service layer shall guarantee that no two invocations of iterate
- * will happen simultaneously/concurrently for the same iterator id.
- *
- * Upon a successful invocation of iterate, the persistence provider shall
- * update its internal state to account for the progress made so that new
- * invocations will cover a new subset of the document space. When an
- * IterateResult contains the final documents for the iteration, i.e. the
- * iterator has reached its end, setCompleted() must be set on the result
- * to indicate this to the caller. Calling iterate on an already completed
- * iterator must only set this flag on the result and return without any
- * documents.
- *
- * @param iteratorId An iterator ID returned by a previous call to createIterator
- * @param maxByteSize An indication of the maximum number of bytes that should be returned.
- */
- IterateResult iterate(long iteratorId, long maxByteSize);
-
- /**
- * <p>
- * Destroys the iterator specified by the given id.
- * </p>
- * <p>
- * IMPORTANT: this method has different invocation semantics than
- * the other provider methods! It may be called from the context of
- * ANY service layer thread, NOT just from the thread in which
- * createIterator was invoked! The reason for this is because internal
- * iterator destroy messages aren't mapped to partition threads in the
- * way other messages are due to their need for guaranteed execution.
- * </p>
- * <p>
- * This in turn implies that iterator states must be shared between
- * partitions (and thus protected against cross-partition concurrent
- * access).
- * </p>
- * @param iteratorId The iterator id previously returned by createIterator.
- */
- Result destroyIterator(long iteratorId);
-
- /**
- * Tells the provider that the given bucket has been created in the
- * service layer. There is no requirement to do anything here.
- */
- Result createBucket(Bucket bucket);
-
- /**
- * Deletes the given bucket and all entries contained in that bucket.
- * After this operation has succeeded, a restart of the provider should
- * not yield the bucket in getBucketList().
- */
- Result deleteBucket(Bucket bucket);
-
- /**
- * This function is called continuously by the service layer. It allows
- * the provider to signify whether it has done any out-of-band changes to
- * buckets that need to be recognized by the rest of the system. The service
- * layer will proceed to call getBucketInfo() on each of the returned buckets.
- * After a call to getModifiedBuckets(), the provider should clear it's list
- * of modified buckets, so that the next call does not return the same buckets.
- */
- BucketIdListResult getModifiedBuckets();
-
- /**
- * Allows the provider to do periodic maintenance and verification.
- *
- * @param level The level of maintenance to do. LOW maintenance is scheduled more
- * often than HIGH maintenance, so should be cheaper.
- */
- Result maintain(Bucket bucket, MaintenanceLevel level);
-
- /**
- * <p>
- * Splits the source bucket into the two target buckets.
- * After the split, all documents belonging to target1 should be
- * in that bucket, and all documents belonging to target2 should be
- * there. The information in SplitResult should reflect
- * this.
- * </p>
- * <p>
- * Before calling this function, the service layer will iterate the bucket
- * to figure out which buckets the source should be split into. This may
- * result in splitting more than one bucket bit at a time.
- * </p>
- */
- Result split(Bucket source, Bucket target1, Bucket target2);
-
- /**
- * Joins two buckets into one. After the join, all documents from
- * source1 and source2 should be stored in the target bucket.
- */
- Result join(Bucket source1, Bucket source2, Bucket target);
-
- /**
- * Moves a bucket from one partition to another.
- *
- * @param partitionId The partition to move to.
- */
- Result move(Bucket bucket, short partitionId);
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/Selection.java b/persistence/src/main/java/com/yahoo/persistence/spi/Selection.java
deleted file mode 100644
index 4bcf75fa322..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/Selection.java
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi;
-
-import com.yahoo.document.Document;
-import com.yahoo.document.DocumentPut;
-import com.yahoo.document.select.DocumentSelector;
-import com.yahoo.document.select.Result;
-import com.yahoo.document.select.parser.ParseException;
-
-import java.util.Set;
-
-/**
- * Class used when iterating to represent a selection of entries to be returned.
- *
- * This class is likely to be replaced by a more generic selection AST in the near future.
- */
-public class Selection {
- DocumentSelector documentSelection = null;
- long fromTimestamp = 0;
- long toTimestamp = Long.MAX_VALUE;
- Set<Long> timestampSubset = null;
-
- public Selection(String documentSelection, long fromTimestamp, long toTimestamp) throws ParseException {
- this.documentSelection = new DocumentSelector(documentSelection);
- this.fromTimestamp = fromTimestamp;
- this.toTimestamp = toTimestamp;
- }
-
- public Selection(Set<Long> timestampSubset) {
- this.timestampSubset = timestampSubset;
- }
-
- public boolean requiresFields() {
- return documentSelection != null;
- }
-
- public Set<Long> getTimestampSubset() {
- return timestampSubset;
- }
-
- /**
- * Returns true if the entry matches the selection criteria given.
- */
- public boolean match(Document doc, long timestamp) {
- if (timestamp < fromTimestamp) {
- return false;
- }
-
- if (timestamp > toTimestamp) {
- return false;
- }
-
- if (timestampSubset != null && !timestampSubset.contains(timestamp)) {
- return false;
- }
-
- if (documentSelection != null && doc != null && !documentSelection.accepts(new DocumentPut(doc)).equals(Result.TRUE)) {
- return false;
- }
-
- return true;
- }
-
- /**
- * Returns true if the entry matches the timestamp ranges/subsets specified in the selection.
- */
- public boolean match(long timestamp) {
- return match(null, timestamp);
- }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/conformance/ConformanceTest.java b/persistence/src/main/java/com/yahoo/persistence/spi/conformance/ConformanceTest.java
deleted file mode 100644
index f3e7166b078..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/conformance/ConformanceTest.java
+++ /dev/null
@@ -1,1605 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi.conformance;
-
-import com.yahoo.document.BucketId;
-import com.yahoo.document.*;
-import com.yahoo.document.datatypes.IntegerFieldValue;
-import com.yahoo.document.datatypes.StringFieldValue;
-import com.yahoo.document.fieldset.AllFields;
-import com.yahoo.document.fieldset.FieldSet;
-import com.yahoo.document.update.AssignValueUpdate;
-import com.yahoo.document.update.FieldUpdate;
-import com.yahoo.persistence.spi.*;
-import com.yahoo.persistence.spi.result.*;
-import junit.framework.TestCase;
-
-import java.util.*;
-
-public abstract class ConformanceTest extends TestCase {
- TestDocMan testDocMan = new TestDocMan();
-
- public interface PersistenceProviderFactory {
- public PersistenceProvider createProvider(DocumentTypeManager manager);
- public boolean supportsActiveState();
- }
-
- PersistenceProvider init(PersistenceProviderFactory factory) {
- return factory.createProvider(testDocMan);
- }
-
- // TODO: should invoke some form of destruction method on the provider after a test
- protected void doConformanceTest(PersistenceProviderFactory factory) throws Exception {
- testBasics(init(factory));
- testPut(init(factory));
- testRemove(init(factory));
- testGet(init(factory));
- testUpdate(init(factory));
-
- testListBuckets(init(factory));
- testBucketInfo(init(factory));
- testOrderIndependentBucketInfo(init(factory));
- testPutNewDocumentVersion(init(factory));
- testPutOlderDocumentVersion(init(factory));
- testPutDuplicate(init(factory));
- testDeleteBucket(init(factory));
- testSplitNormalCase(init(factory));
- testSplitTargetExists(init(factory));
- testJoinNormalCase(init(factory));
- testJoinTargetExists(init(factory));
- testJoinOneBucket(init(factory));
-
- testMaintain(init(factory));
- testGetModifiedBuckets(init(factory));
-
- if (factory.supportsActiveState()) {
- testBucketActivation(init(factory));
- testBucketActivationSplitAndJoin(init(factory));
- }
-
- testIterateAllDocs(init(factory));
- testIterateAllDocsNewestVersionOnly(init(factory));
- testIterateCreateIterator(init(factory));
- testIterateDestroyIterator(init(factory));
- testIterateWithUnknownId(init(factory));
- testIterateChunked(init(factory));
- testIterateMatchTimestampRange(init(factory));
- testIterateMaxByteSize(init(factory));
- testIterateExplicitTimestampSubset(init(factory));
- testIterateMatchSelection(init(factory));
- testIterateRemoves(init(factory));
- testIterationRequiringDocumentIdOnlyMatching(init(factory));
- testIterateAlreadyCompleted(init(factory));
- testIterateEmptyBucket(init(factory));
-
- testRemoveMerge(init(factory));
- }
-
- List<DocEntry> iterateBucket(PersistenceProvider spi, Bucket bucket, PersistenceProvider.IncludedVersions versions) throws Exception {
- List<DocEntry> ret = new ArrayList<DocEntry>();
-
- CreateIteratorResult iter = spi.createIterator(
- bucket,
- new AllFields(),
- new Selection("", 0, Long.MAX_VALUE),
- versions);
-
- assertFalse(iter.hasError());
-
- while (true) {
- IterateResult result = spi.iterate(iter.getIteratorId(), Long.MAX_VALUE);
- assertFalse(result.hasError());
-
- ret.addAll(result.getEntries());
-
- if (result.isCompleted()) {
- break;
- }
- }
-
- Collections.sort(ret);
-
- return ret;
- }
-
- void testBasicsIteration(PersistenceProvider provider, Bucket bucket, Document doc1, Document doc2, boolean includeRemoves) throws Exception {
- Selection selection = new Selection("true", 0, Long.MAX_VALUE);
-
- CreateIteratorResult iter = provider.createIterator(
- bucket,
- new AllFields(),
- selection,
- includeRemoves ? PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_OR_REMOVE : PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
-
- assertTrue(!iter.hasError());
-
- IterateResult result = provider.iterate(iter.getIteratorId(), Long.MAX_VALUE);
- assertTrue(!result.hasError());
- assertTrue(result.isCompleted());
- assertEquals(new Result(), provider.destroyIterator(iter.getIteratorId()));
-
- long timeRemoveDoc1 = 0;
- long timeDoc1 = 0;
- long timeDoc2 = 0;
-
- for (DocEntry entry : result.getEntries()) {
- assertNotNull(entry.getDocumentId());
-
- if (entry.getDocumentId().equals(doc1.getId())) {
- assertTrue("Got removed document 1 when iterating without removes", includeRemoves);
-
- if (entry.getType() == DocEntry.Type.REMOVE_ENTRY) {
- timeRemoveDoc1 = entry.getTimestamp();
- } else {
- timeDoc1 = entry.getTimestamp();
- }
- } else if (entry.getDocumentId().equals(doc2.getId())) {
- assertEquals(DocEntry.Type.PUT_ENTRY, entry.getType());
- timeDoc2 = entry.getTimestamp();
- } else {
- assertFalse("Unknown document " + entry.getDocumentId(), false);
- }
- }
-
- assertEquals(2, timeDoc2);
- assertTrue(timeDoc1 == 0 || timeRemoveDoc1 != 0);
- }
-
- void testBasics(PersistenceProvider provider) throws Exception {
- Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
- Document doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
-
- assertEquals(new Result(), provider.createBucket(bucket));
- assertEquals(new Result(), provider.put(bucket, 1, doc1));
- assertEquals(new Result(), provider.put(bucket, 2, doc2));
-
- assertEquals(new RemoveResult(true), provider.remove(bucket, 3, doc1.getId()));
- assertEquals(new Result(), provider.flush(bucket));
-
- testBasicsIteration(provider, bucket, doc1, doc2, false);
- testBasicsIteration(provider, bucket, doc1, doc2, true);
- }
-
- void testListBuckets(PersistenceProvider provider) {
- BucketId bucketId1 = new BucketId(8, 0x01);
- BucketId bucketId2 = new BucketId(8, 0x02);
- BucketId bucketId3 = new BucketId(8, 0x03);
-
- Bucket bucket1 = new Bucket((short)0, bucketId1);
- Bucket bucket2 = new Bucket((short)0, bucketId2);
- Bucket bucket3 = new Bucket((short)0, bucketId3);
-
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
- Document doc2 = testDocMan.createRandomDocumentAtLocation(0x02, 2);
- Document doc3 = testDocMan.createRandomDocumentAtLocation(0x03, 3);
-
- provider.createBucket(bucket1);
- provider.createBucket(bucket2);
- provider.createBucket(bucket3);
-
- provider.put(bucket1, 1, doc1);
- provider.flush(bucket1);
-
- provider.put(bucket2, 2, doc2);
- provider.flush(bucket2);
-
- provider.put(bucket3, 3, doc3);
- provider.flush(bucket3);
-
- BucketIdListResult result = provider.listBuckets((short)0);
- assertEquals(3, result.getBuckets().size());
- assertTrue(result.getBuckets().contains(bucketId1));
- assertTrue(result.getBuckets().contains(bucketId2));
- assertTrue(result.getBuckets().contains(bucketId3));
- }
-
- void testBucketInfo(PersistenceProvider provider) {
- Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
-
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
- Document doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
-
- provider.createBucket(bucket);
- provider.put(bucket, 2, doc2);
-
- BucketInfo info = provider.getBucketInfo(bucket).getBucketInfo();
- provider.flush(bucket);
-
- assertEquals(1, info.getDocumentCount());
- assertTrue(info.getChecksum() != 0);
-
- provider.put(bucket, 3, doc1);
- BucketInfo info2 = provider.getBucketInfo(bucket).getBucketInfo();
- provider.flush(bucket);
-
- assertEquals(2, info2.getDocumentCount());
- assertTrue(info2.getChecksum() != 0);
- assertTrue(info.getChecksum() != info2.getChecksum());
-
- provider.put(bucket, 4, doc1);
- BucketInfo info3 = provider.getBucketInfo(bucket).getBucketInfo();
- provider.flush(bucket);
-
- assertEquals(2, info3.getDocumentCount());
- assertTrue(info3.getChecksum() != 0);
- assertTrue(info2.getChecksum() != info3.getChecksum());
-
- provider.remove(bucket, 5, doc1.getId());
- BucketInfo info4 = provider.getBucketInfo(bucket).getBucketInfo();
- provider.flush(bucket);
-
- assertEquals(1, info4.getDocumentCount());
- assertTrue(info4.getChecksum() != 0);
- }
-
-
- void testOrderIndependentBucketInfo(PersistenceProvider spi)
- {
- Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
-
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
- Document doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
- spi.createBucket(bucket);
-
- int checksumOrdered = 0;
-
- {
- spi.put(bucket, 2, doc1);
- spi.put(bucket, 3, doc2);
- spi.flush(bucket);
- BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
-
- checksumOrdered = info.getChecksum();
- assertTrue(checksumOrdered != 0);
- }
-
- spi.deleteBucket(bucket);
- spi.createBucket(bucket);
- assertEquals(0, spi.getBucketInfo(bucket).getBucketInfo().getChecksum());
-
- int checksumUnordered = 0;
-
- {
- // Swap order of puts
- spi.put(bucket, 3, doc2);
- spi.put(bucket, 2, doc1);
- spi.flush(bucket);
- BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
-
- checksumUnordered = info.getChecksum();
- assertTrue(checksumUnordered != 0);
- }
-
- assertEquals(checksumOrdered, checksumUnordered);
- }
-
- void testPut(PersistenceProvider spi) {
- Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
-
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
- spi.createBucket(bucket);
-
- assertEquals(new Result(), spi.put(bucket, 3, doc1));
- BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
- spi.flush(bucket);
-
- assertEquals(1, (int)info.getDocumentCount());
- assertTrue(info.getEntryCount() >= info.getDocumentCount());
- assertTrue(info.getChecksum() != 0);
- assertTrue(info.getDocumentSize() > 0);
- assertTrue(info.getUsedSize() >= info.getDocumentSize());
- }
-
- void testPutNewDocumentVersion(PersistenceProvider spi) {
- Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
-
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
- Document doc2 = doc1.clone();
-
-
- doc2.setFieldValue("content", new StringFieldValue("hiho silver"));
- spi.createBucket(bucket);
-
- Result result = spi.put(bucket, 3, doc1);
- {
- BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
- spi.flush(bucket);
-
- assertEquals(1, (int)info.getDocumentCount());
- assertTrue(info.getEntryCount() >= info.getDocumentCount());
- assertTrue(info.getChecksum() != 0);
- assertTrue(info.getDocumentSize() > 0);
- assertTrue(info.getUsedSize() >= info.getDocumentSize());
- }
-
- result = spi.put(bucket, 4, doc2);
- {
- BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
- spi.flush(bucket);
-
- assertEquals(1, (int)info.getDocumentCount());
- assertTrue(info.getEntryCount() >= info.getDocumentCount());
- assertTrue(info.getChecksum() != 0);
- assertTrue(info.getDocumentSize() > 0);
- assertTrue(info.getUsedSize() >= info.getDocumentSize());
- }
-
- GetResult gr = spi.get(bucket, new AllFields(), doc1.getId());
-
- assertEquals(Result.ErrorType.NONE, gr.getErrorType());
- assertEquals(4, gr.getLastModifiedTimestamp());
- assertEquals(doc2, gr.getDocument());
- }
-
- void testPutOlderDocumentVersion(PersistenceProvider spi) {
- Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
-
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
- Document doc2 = doc1.clone();
- doc2.setFieldValue("content", new StringFieldValue("hiho silver"));
- spi.createBucket(bucket);
-
- Result result = spi.put(bucket, 5, doc1);
- BucketInfo info1 = spi.getBucketInfo(bucket).getBucketInfo();
- spi.flush(bucket);
- {
- assertEquals(1, info1.getDocumentCount());
- assertTrue(info1.getEntryCount() >= info1.getDocumentCount());
- assertTrue(info1.getChecksum() != 0);
- assertTrue(info1.getDocumentSize() > 0);
- assertTrue(info1.getUsedSize() >= info1.getDocumentSize());
- }
-
- result = spi.put(bucket, 4, doc2);
- {
- BucketInfo info2 = spi.getBucketInfo(bucket).getBucketInfo();
- spi.flush(bucket);
-
- assertEquals(1, info2.getDocumentCount());
- assertTrue(info2.getEntryCount() >= info1.getDocumentCount());
- assertEquals(info1.getChecksum(), info2.getChecksum());
- assertEquals(info1.getDocumentSize(), info2.getDocumentSize());
- assertTrue(info2.getUsedSize() >= info1.getDocumentSize());
- }
-
- GetResult gr = spi.get(bucket, new AllFields(), doc1.getId());
-
- assertEquals(Result.ErrorType.NONE, gr.getErrorType());
- assertEquals(5, gr.getLastModifiedTimestamp());
- assertEquals(doc1, gr.getDocument());
- }
-
- void testPutDuplicate(PersistenceProvider spi) throws Exception {
- Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
-
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
- spi.createBucket(bucket);
- assertEquals(new Result(), spi.put(bucket, 3, doc1));
-
- int checksum;
- {
- BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
- spi.flush(bucket);
- assertEquals(1, (int)info.getDocumentCount());
- checksum = info.getChecksum();
- }
- assertEquals(new Result(), spi.put(bucket, 3, doc1));
-
- {
- BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
- spi.flush(bucket);
- assertEquals(1, (int)info.getDocumentCount());
- assertEquals(checksum, info.getChecksum());
- }
-
- List<DocEntry> entries = iterateBucket(spi, bucket, PersistenceProvider.IncludedVersions.ALL_VERSIONS);
- assertEquals(1, entries.size());
- }
-
-
- void testRemove(PersistenceProvider spi) throws Exception {
- Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
-
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
- spi.createBucket(bucket);
-
- Result result = spi.put(bucket, 3, doc1);
-
- {
- BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
- spi.flush(bucket);
-
- assertEquals(1, (int)info.getDocumentCount());
- assertTrue(info.getChecksum() != 0);
-
- List<DocEntry> entries = iterateBucket(spi, bucket, PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
- assertEquals(1, entries.size());
- }
-
- RemoveResult result2 = spi.removeIfFound(bucket, 5, doc1.getId());
-
- {
- BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
- spi.flush(bucket);
-
- assertEquals(0, info.getDocumentCount());
- assertEquals(0, info.getChecksum());
- assertEquals(true, result2.wasFound());
- }
-
- assertEquals(0, iterateBucket(spi, bucket, PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY).size());
- assertEquals(1, iterateBucket(spi, bucket, PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_OR_REMOVE).size());
-
- RemoveResult result3 = spi.remove(bucket, 7, doc1.getId());
-
- {
- BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
- spi.flush(bucket);
-
- assertEquals(0, (int)info.getDocumentCount());
- assertEquals(0, (int)info.getChecksum());
- assertEquals(false, result3.wasFound());
- }
-
- Result result4 = spi.put(bucket, 9, doc1);
- spi.flush(bucket);
-
- assertTrue(!result4.hasError());
-
- RemoveResult result5 = spi.remove(bucket, 9, doc1.getId());
-
- {
- BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
- spi.flush(bucket);
-
- assertEquals(0, (int)info.getDocumentCount());
- assertEquals(0, (int)info.getChecksum());
- assertEquals(true, result5.wasFound());
- assertTrue(!result5.hasError());
- }
-
- GetResult getResult = spi.get(bucket, new AllFields(), doc1.getId());
- assertEquals(Result.ErrorType.NONE, getResult.getErrorType());
- assertEquals(0, getResult.getLastModifiedTimestamp());
- assertNull(getResult.getDocument());
- }
-
- void testRemoveMerge(PersistenceProvider spi) throws Exception {
- Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
-
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
- DocumentId removeId = new DocumentId("userdoc:fraggle:1:rock");
- spi.createBucket(bucket);
-
- Result result = spi.put(bucket, 3, doc1);
-
- // Remove a document that does not exist
- {
- RemoveResult removeResult = spi.remove(bucket, 10, removeId);
- spi.flush(bucket);
- assertEquals(Result.ErrorType.NONE, removeResult.getErrorType());
- assertEquals(false, removeResult.wasFound());
- }
- // In a merge case, there might be multiple removes for the same document
- // if resending et al has taken place. These must all be added.
- {
- RemoveResult removeResult = spi.remove(bucket,
- 5,
- removeId);
- spi.flush(bucket);
- assertEquals(Result.ErrorType.NONE, removeResult.getErrorType());
- assertEquals(false, removeResult.wasFound());
- }
- {
- BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
-
- assertEquals(1, info.getDocumentCount());
- assertEquals(3, info.getEntryCount());
- assertTrue(info.getChecksum() != 0);
- }
- assertFalse(spi.flush(bucket).hasError());
-
- List<DocEntry> entries = iterateBucket(spi, bucket, PersistenceProvider.IncludedVersions.ALL_VERSIONS);
- // Remove entries should exist afterwards
- assertEquals(3, entries.size());
- for (int i = 2; i > 0; --i) {
- assertEquals((i == 2) ? 10 : 5, entries.get(i).getTimestamp());
- assertTrue(entries.get(i).getType() == DocEntry.Type.REMOVE_ENTRY);
- assertNotNull(entries.get(i).getDocumentId());
- assertEquals(removeId, entries.get(i).getDocumentId());
- }
-
- // Result tagged as document not found if CONVERT_PUT_TO_REMOVE flag is given and
- // timestamp does not exist, and PERSIST_NONEXISTING is not set
-
- // CONVERTED_REMOVE flag should be set if CONVERT_PUT_TO_REMOVE is set.
-
- // Trying to turn a remove without CONVERTED_REMOVE flag set into
- // unrevertable remove should work. (Should likely log warning, but we want
- // to do it anyways, in order to get bucket copies in sync if it happens)
-
- // Timestamps should not have been changed.
-
- // Verify that a valid and altered bucket info is returned on success
- }
-
- void testUpdate(PersistenceProvider spi) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
- Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
- spi.createBucket(bucket);
-
- DocumentType docType = testDocMan.getDocumentType("testdoctype1");
-
- DocumentUpdate update = new DocumentUpdate(docType, doc1.getId());
- FieldUpdate fieldUpdate = FieldUpdate.create(docType.getField("headerval"));
- fieldUpdate.addValueUpdate(AssignValueUpdate.createAssign(new IntegerFieldValue(42)));
- update.addFieldUpdate(fieldUpdate);
-
- {
- UpdateResult result = spi.update(bucket, 3, update);
- spi.flush(bucket);
- assertEquals(Result.ErrorType.NONE, result.getErrorType());
- assertEquals(0, result.getExistingTimestamp());
- }
-
- spi.put(bucket, 3, doc1);
- {
- UpdateResult result = spi.update(bucket, 4, update);
- spi.flush(bucket);
-
- assertEquals(Result.ErrorType.NONE, result.getErrorType());
- assertEquals(3, result.getExistingTimestamp());
- }
-
- {
- GetResult result = spi.get(bucket, new AllFields(), doc1.getId());
-
- assertEquals(Result.ErrorType.NONE, result.getErrorType());
- assertEquals(4, result.getLastModifiedTimestamp());
- assertEquals(new IntegerFieldValue(42), result.getDocument().getFieldValue("headerval"));
- }
-
- spi.remove(bucket, 5, doc1.getId());
- spi.flush(bucket);
-
- {
- GetResult result = spi.get(bucket, new AllFields(), doc1.getId());
-
- assertEquals(Result.ErrorType.NONE, result.getErrorType());
- assertEquals(0, result.getLastModifiedTimestamp());
- assertNull(result.getDocument());
- }
-
-
- {
- UpdateResult result = spi.update(bucket, 6, update);
- spi.flush(bucket);
-
- assertEquals(Result.ErrorType.NONE, result.getErrorType());
- assertEquals(0, result.getExistingTimestamp());
- }
- }
-
- void testGet(PersistenceProvider spi) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
- Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
-
- spi.createBucket(bucket);
-
- {
- GetResult result = spi.get(bucket, new AllFields(), doc1.getId());
-
- assertEquals(Result.ErrorType.NONE, result.getErrorType());
- assertEquals(0, result.getLastModifiedTimestamp());
- }
-
- spi.put(bucket, 3, doc1);
- spi.flush(bucket);
-
- {
- GetResult result = spi.get(bucket, new AllFields(), doc1.getId());
- assertEquals(doc1, result.getDocument());
- assertEquals(3, result.getLastModifiedTimestamp());
- }
-
- spi.remove(bucket,
- 4,
- doc1.getId());
- spi.flush(bucket);
-
- {
- GetResult result = spi.get(bucket, new AllFields(), doc1.getId());
-
- assertEquals(Result.ErrorType.NONE, result.getErrorType());
- assertEquals(0, result.getLastModifiedTimestamp());
- }
- }
-
- void
- testIterateCreateIterator(PersistenceProvider spi) throws Exception
- {
-
- Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
-
- spi.createBucket(bucket);
-
- CreateIteratorResult result = spi.createIterator(bucket, new AllFields(), new Selection("", 0, Long.MAX_VALUE), PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
- assertEquals(Result.ErrorType.NONE, result.getErrorType());
- // Iterator ID 0 means invalid iterator, so cannot be returned
- // from a successful createIterator call.
- assertTrue(result.getIteratorId() != 0);
-
- spi.destroyIterator(result.getIteratorId());
- }
-
- void
- testIterateDestroyIterator(PersistenceProvider spi) throws Exception
- {
- Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
- spi.createBucket(b);
-
- CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE), PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
-
- {
- IterateResult result = spi.iterate(iter.getIteratorId(), 1024);
- assertEquals(Result.ErrorType.NONE, result.getErrorType());
- }
-
- {
- Result destroyResult = spi.destroyIterator(iter.getIteratorId());
- assertTrue(!destroyResult.hasError());
- }
- // Iteration should now fail
- {
- IterateResult result = spi.iterate(iter.getIteratorId(), 1024);
- assertEquals(Result.ErrorType.PERMANENT_ERROR, result.getErrorType());
- }
- {
- Result destroyResult = spi.destroyIterator(iter.getIteratorId());
- assertTrue(!destroyResult.hasError());
- }
- }
-
- List<DocEntry> feedDocs(PersistenceProvider spi, Bucket bucket,
- int numDocs,
- int minSize,
- int maxSize)
- {
- List<DocEntry> docs = new ArrayList<DocEntry>();
-
- for (int i = 0; i < numDocs; ++i) {
- Document doc = testDocMan.createRandomDocumentAtLocation(
- bucket.getBucketId().getId(),
- i,
- minSize,
- maxSize);
- Result result = spi.put(bucket, 1000 + i, doc);
- assertTrue(!result.hasError());
- docs.add(new DocEntry(1000 + i, doc));
- }
- assertEquals(new Result(), spi.flush(bucket));
- return docs;
- }
-
- void
- testIterateWithUnknownId(PersistenceProvider spi)
- {
- Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
- spi.createBucket(b);
-
- IterateResult result = spi.iterate(123, 1024);
- assertEquals(Result.ErrorType.PERMANENT_ERROR, result.getErrorType());
- }
-
- /**
- * Do a full bucket iteration, returning a vector of DocEntry chunks.
- */
- List<IterateResult> doIterate(PersistenceProvider spi,
- long id,
- long maxByteSize,
- int maxChunks)
- {
- List<IterateResult> chunks = new ArrayList<IterateResult>();
-
- while (true) {
- IterateResult result = spi.iterate(id, maxByteSize);
- assertFalse(result.hasError());
-
- assertTrue(result.getEntries().size() > 0);
- chunks.add(result);
-
- if (result.isCompleted()
- || (maxChunks != 0 && chunks.size() >= maxChunks))
- {
- break;
- }
- }
- return chunks;
- }
-
- boolean containsDocument(List<IterateResult> chunks, Document doc) {
- for (IterateResult i : chunks) {
- for (DocEntry e : i.getEntries()) {
- if (e.getType() == DocEntry.Type.PUT_ENTRY && e.getDocument() != null && e.getDocument().equals(doc)) {
- return true;
- }
- }
- }
-
- return false;
- }
-
- boolean containsRemove(List<IterateResult> chunks, String docId) {
- for (IterateResult i : chunks) {
- for (DocEntry e : i.getEntries()) {
- if (e.getType() == DocEntry.Type.REMOVE_ENTRY && e.getDocumentId() != null && e.getDocumentId().toString().equals(docId)) {
- return true;
- }
- }
- }
-
- return false;
- }
-
- void verifyDocs(List<DocEntry> docs, List<IterateResult> chunks, List<String> removes) {
- int docCount = 0;
- int removeCount = 0;
- for (IterateResult result : chunks) {
- for (DocEntry e : result.getEntries()) {
- if (e.getType() == DocEntry.Type.PUT_ENTRY) {
- ++docCount;
- } else {
- ++removeCount;
- }
- }
- }
-
- assertEquals(docs.size(), docCount);
-
- for (DocEntry e : docs) {
- assertTrue(e.getDocument().toString(), containsDocument(chunks, e.getDocument()));
- }
-
- if (removes != null) {
- assertEquals(removes.size(), removeCount);
-
- for (String docId : removes) {
- assertTrue(docId, containsRemove(chunks, docId));
- }
- }
- }
-
- void verifyDocs(List<DocEntry> docs, List<IterateResult> chunks) {
- verifyDocs(docs, chunks, null);
- }
-
-
- void testIterateAllDocs(PersistenceProvider spi) throws Exception {
- Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
- spi.createBucket(b);
-
- List<DocEntry> docs = feedDocs(spi, b, 100, 110, 110);
-
- CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE), PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
-
- List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 4096, 0);
- verifyDocs(docs, chunks);
-
- spi.destroyIterator(iter.getIteratorId());
- }
-
- void testIterateAllDocsNewestVersionOnly(PersistenceProvider spi) throws Exception {
- Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
- spi.createBucket(b);
-
- List<DocEntry> docs = feedDocs(spi, b, 100, 110, 110);
- List<DocEntry> newDocs = new ArrayList<DocEntry>();
-
- for (DocEntry e : docs) {
- Document newDoc = e.getDocument().clone();
- newDoc.setFieldValue("headerval", new IntegerFieldValue(5678 + (int)e.getTimestamp()));
- spi.put(b, 1000 + e.getTimestamp(), newDoc);
- newDocs.add(new DocEntry(1000 + e.getTimestamp(), newDoc));
- }
-
- spi.flush(b);
-
- CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE), PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
-
- List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 4096, 0);
-
- verifyDocs(newDocs, chunks);
-
- spi.destroyIterator(iter.getIteratorId());
- }
-
-
- void testIterateChunked(PersistenceProvider spi) throws Exception
- {
- Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
- spi.createBucket(b);
-
- List<DocEntry> docs = feedDocs(spi, b, 100, 110, 110);
- CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE), PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
-
- List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 1, 0);
- assertEquals(100, chunks.size());
- verifyDocs(docs, chunks);
-
- spi.destroyIterator(iter.getIteratorId());
- }
-
- void
- testIterateMaxByteSize(PersistenceProvider spi) throws Exception
- {
- Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
- spi.createBucket(b);
-
- List<DocEntry> docs = feedDocs(spi, b, 100, 4096, 4096);
-
- CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE), PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
-
- // Docs are 4k each and iterating with max combined size of 10k.
- // Should receive no more than 3 docs in each chunk
- List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 10000, 0);
- assertTrue("Expected >= 33 chunks, got " + chunks.size(), chunks.size() >= 33);
- verifyDocs(docs, chunks);
-
- spi.destroyIterator(iter.getIteratorId());
- }
-
- void
- testIterateMatchTimestampRange(PersistenceProvider spi) throws Exception
- {
- Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
- spi.createBucket(b);
-
- List<DocEntry> docsToVisit = new ArrayList<DocEntry>();
-
- long fromTimestamp = 1010;
- long toTimestamp = 1060;
-
- for (int i = 0; i < 99; i++) {
- long timestamp = 1000 + i;
-
- Document doc = testDocMan.createRandomDocumentAtLocation(1, timestamp);
-
- spi.put(b, timestamp, doc);
- if (timestamp >= fromTimestamp && timestamp <= toTimestamp) {
- docsToVisit.add(new DocEntry(timestamp, doc));
- }
- }
- spi.flush(b);
-
- CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", fromTimestamp, toTimestamp),
- PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
-
- List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 2048, 0);
- verifyDocs(docsToVisit, chunks);
-
- spi.destroyIterator(iter.getIteratorId());
- }
-
- void testIterateExplicitTimestampSubset(PersistenceProvider spi) throws Exception {
- Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
- spi.createBucket(b);
-
- List<DocEntry> docsToVisit = new ArrayList<DocEntry>();
- Set<Long> timestampsToVisit = new TreeSet<Long>();
- List<String> removes = new ArrayList<String>();
-
-
- for (int i = 0; i < 99; i++) {
- long timestamp = 1000 + i;
- Document doc = testDocMan.createRandomDocumentAtLocation(1, timestamp, 110, 110);
-
- spi.put(b, timestamp, doc);
- if (timestamp % 3 == 0) {
- docsToVisit.add(new DocEntry(timestamp, doc));
- timestampsToVisit.add(timestamp);
- }
- }
-
- assertTrue(spi.remove(b, 2000, docsToVisit.get(0).getDocument().getId()).wasFound());
- spi.flush(b);
-
- timestampsToVisit.add(2000l);
- removes.add(docsToVisit.get(0).getDocument().getId().toString());
- timestampsToVisit.remove(docsToVisit.get(0).getTimestamp());
- docsToVisit.remove(docsToVisit.get(0));
-
- // When selecting a timestamp subset, we should ignore IncludedVersions, and return all matches regardless.
- CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection(timestampsToVisit),
- PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
-
- List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 2048, 0);
-
- verifyDocs(docsToVisit, chunks, removes);
-
- spi.destroyIterator(iter.getIteratorId());
- }
-
- void testIterateRemoves(PersistenceProvider spi) throws Exception {
- Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
- spi.createBucket(b);
-
- int docCount = 10;
- List<DocEntry> docs = feedDocs(spi, b, docCount, 100, 100);
- List<String> removedDocs = new ArrayList<String>();
- List<DocEntry> nonRemovedDocs = new ArrayList<DocEntry>();
-
- for (int i = 0; i < docCount; ++i) {
- if (i % 3 == 0) {
- removedDocs.add(docs.get(i).getDocument().getId().toString());
- assertTrue(spi.remove(b, 2000 + i, docs.get(i).getDocument().getId()).wasFound());
- } else {
- nonRemovedDocs.add(docs.get(i));
- }
- }
- spi.flush(b);
-
- // First, test iteration without removes
- {
- CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE),
- PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
-
- List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 4096, 0);
- verifyDocs(nonRemovedDocs, chunks);
- spi.destroyIterator(iter.getIteratorId());
- }
-
- {
- CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE),
- PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_OR_REMOVE);
-
- List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 4096, 0);
- verifyDocs(nonRemovedDocs, chunks, removedDocs);
- spi.destroyIterator(iter.getIteratorId());
- }
- }
-
- void testIterateMatchSelection(PersistenceProvider spi) throws Exception
- {
- Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
- spi.createBucket(b);
-
- List<DocEntry> docsToVisit = new ArrayList<DocEntry>();
-
- for (int i = 0; i < 99; i++) {
- Document doc = testDocMan.createRandomDocumentAtLocation(1, 1000 + i, 110, 110);
- doc.setFieldValue("headerval", new IntegerFieldValue(i));
-
- spi.put(b, 1000 + i, doc);
- if ((i % 3) == 0) {
- docsToVisit.add(new DocEntry(1000 + i, doc));
- }
- }
- spi.flush(b);
-
- CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("testdoctype1.headerval % 3 == 0", 0, Long.MAX_VALUE),
- PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_ONLY);
-
- List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 2048, 0);
- verifyDocs(docsToVisit, chunks);
-
- spi.destroyIterator(iter.getIteratorId());
- }
-
- void testIterationRequiringDocumentIdOnlyMatching(PersistenceProvider spi) throws Exception
- {
- Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
- spi.createBucket(b);
-
- feedDocs(spi, b, 100, 100, 100);
- DocumentId removedId = new DocumentId("userdoc:blarg:1:unknowndoc");
-
- // Document does not already exist, remove should create a
- // remove entry for it regardless.
- assertFalse(spi.remove(b, 2000, removedId).wasFound());
- spi.flush(b);
-
- CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("id == '" + removedId.toString() + "'", 0, Long.MAX_VALUE),
- PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_OR_REMOVE);
-
- List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 4096, 0);
- List<String> removes = new ArrayList<String>();
- List<DocEntry> docs = new ArrayList<DocEntry>();
-
- removes.add(removedId.toString());
- verifyDocs(docs, chunks, removes);
-
- spi.destroyIterator(iter.getIteratorId());
- }
-
- void testIterateAlreadyCompleted(PersistenceProvider spi) throws Exception
- {
- Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
- spi.createBucket(b);
-
- List<DocEntry> docs = feedDocs(spi, b, 10, 100, 100);
- CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE),
- PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_OR_REMOVE);
-
- List<IterateResult> chunks = doIterate(spi, iter.getIteratorId(), 4096, 0);
- verifyDocs(docs, chunks);
-
- IterateResult result = spi.iterate(iter.getIteratorId(), 4096);
- assertEquals(Result.ErrorType.NONE, result.getErrorType());
- assertEquals(0, result.getEntries().size());
- assertTrue(result.isCompleted());
-
- spi.destroyIterator(iter.getIteratorId());
- }
-
- void testIterateEmptyBucket(PersistenceProvider spi) throws Exception
- {
- Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
- spi.createBucket(b);
-
- CreateIteratorResult iter = spi.createIterator(b, new AllFields(), new Selection("", 0, Long.MAX_VALUE),
- PersistenceProvider.IncludedVersions.NEWEST_DOCUMENT_OR_REMOVE);
-
- IterateResult result = spi.iterate(iter.getIteratorId(), 4096);
- assertEquals(Result.ErrorType.NONE, result.getErrorType());
- assertEquals(0, result.getEntries().size());
- assertTrue(result.isCompleted());
-
- spi.destroyIterator(iter.getIteratorId());
- }
-
- void testDeleteBucket(PersistenceProvider spi) throws Exception
- {
- Bucket b = new Bucket((short)0, new BucketId(8, 0x1));
- spi.createBucket(b);
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
-
- spi.put(b, 3, doc1);
- spi.flush(b);
-
- spi.deleteBucket(b);
- GetResult result = spi.get(b, new AllFields(), doc1.getId());
-
- assertEquals(Result.ErrorType.NONE, result.getErrorType());
- assertEquals(0, result.getLastModifiedTimestamp());
- }
-
-
- void testSplitNormalCase(PersistenceProvider spi)
- {
- Bucket bucketA = new Bucket((short)0, new BucketId(3, 0x2));
- Bucket bucketB = new Bucket((short)0, new BucketId(3, 0x6));
-
- Bucket bucketC = new Bucket((short)0, new BucketId(2, 0x2));
- spi.createBucket(bucketC);
-
- for (int i = 0; i < 10; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
- spi.put(bucketC, i + 1, doc1);
- }
-
- for (int i = 10; i < 20; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
- spi.put(bucketC, i + 1, doc1);
- }
-
- spi.flush(bucketC);
-
- spi.split(bucketC, bucketA, bucketB);
- testSplitNormalCasePostCondition(spi, bucketA, bucketB, bucketC);
- /*if (_factory->hasPersistence()) {
- spi.reset();
- document::TestDocMan testDocMan2;
- spi = getSpi(*_factory, testDocMan2);
- testSplitNormalCasePostCondition(spi, bucketA, bucketB, bucketC,
- testDocMan2);
- }*/
- }
-
-
- void testSplitNormalCasePostCondition(PersistenceProvider spi, Bucket bucketA,
- Bucket bucketB, Bucket bucketC)
- {
- assertEquals(10, spi.getBucketInfo(bucketA).getBucketInfo().
- getDocumentCount());
- assertEquals(10, spi.getBucketInfo(bucketB).getBucketInfo().
- getDocumentCount());
-
- FieldSet fs = new AllFields();
- for (int i = 0; i < 10; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
- assertTrue(spi.get(bucketA, fs, doc1.getId()).hasDocument());
- assertTrue(!spi.get(bucketC, fs, doc1.getId()).hasDocument());
- assertTrue(!spi.get(bucketB, fs, doc1.getId()).hasDocument());
- }
-
- for (int i = 10; i < 20; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
- assertTrue(spi.get(bucketB, fs, doc1.getId()).hasDocument());
- assertTrue(!spi.get(bucketA, fs, doc1.getId()).hasDocument());
- assertTrue(!spi.get(bucketC, fs, doc1.getId()).hasDocument());
- }
- }
-
- void testSplitTargetExists(PersistenceProvider spi) throws Exception
- {
- Bucket bucketA = new Bucket((short)0, new BucketId(3, 0x2));
- Bucket bucketB = new Bucket((short)0, new BucketId(3, 0x6));
- spi.createBucket(bucketB);
-
- Bucket bucketC = new Bucket((short)0, new BucketId(2, 0x2));
- spi.createBucket(bucketC);
-
- for (int i = 0; i < 10; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
- spi.put(bucketC, i + 1, doc1);
- }
-
- spi.flush(bucketC);
-
- for (int i = 10; i < 20; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
- spi.put(bucketB, i + 1, doc1);
- }
- spi.flush(bucketB);
- assertTrue(!spi.getBucketInfo(bucketB).getBucketInfo().isActive());
-
- for (int i = 10; i < 20; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
- spi.put(bucketC, i + 1, doc1);
- }
- spi.flush(bucketC);
-
- for (int i = 20; i < 25; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
- spi.put(bucketB, i + 1, doc1);
- }
-
- spi.flush(bucketB);
-
- spi.split(bucketC, bucketA, bucketB);
- testSplitTargetExistsPostCondition(spi, bucketA, bucketB, bucketC);
- /*if (_factory->hasPersistence()) {
- spi.reset();
- document::TestDocMan testDocMan2;
- spi = getSpi(*_factory, testDocMan2);
- testSplitTargetExistsPostCondition(spi, bucketA, bucketB, bucketC,
- testDocMan2);
- }*/
- }
-
-
- void testSplitTargetExistsPostCondition(PersistenceProvider spi, Bucket bucketA,
- Bucket bucketB, Bucket bucketC)
- {
- assertEquals(10, spi.getBucketInfo(bucketA).getBucketInfo().
- getDocumentCount());
- assertEquals(15, spi.getBucketInfo(bucketB).getBucketInfo().
- getDocumentCount());
-
- FieldSet fs = new AllFields();
- for (int i = 0; i < 10; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
- assertTrue(spi.get(bucketA, fs, doc1.getId()).hasDocument());
- assertTrue(!spi.get(bucketC, fs, doc1.getId()).hasDocument());
- assertTrue(!spi.get(bucketB, fs, doc1.getId()).hasDocument());
- }
-
- for (int i = 10; i < 25; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
- assertTrue(spi.get(bucketB, fs, doc1.getId()).hasDocument());
- assertTrue(!spi.get(bucketA, fs, doc1.getId()).hasDocument());
- assertTrue(!spi.get(bucketC, fs, doc1.getId()).hasDocument());
- }
- }
-
- void testJoinNormalCase(PersistenceProvider spi) throws Exception
- {
- Bucket bucketA = new Bucket((short)0, new BucketId(3, 0x02));
- spi.createBucket(bucketA);
-
- Bucket bucketB = new Bucket((short)0, new BucketId(3, 0x06));
- spi.createBucket(bucketB);
-
- Bucket bucketC = new Bucket((short)0, new BucketId(2, 0x02));
-
- for (int i = 0; i < 10; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
- spi.put(bucketA, i + 1, doc1);
- }
-
- spi.flush(bucketA);
-
- for (int i = 10; i < 20; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
- spi.put(bucketB, i + 1, doc1);
- }
-
- spi.flush(bucketB);
-
- spi.join(bucketA, bucketB, bucketC);
- testJoinNormalCasePostCondition(spi, bucketA, bucketB, bucketC);
- /*if (_factory->hasPersistence()) {
- spi.reset();
- document::TestDocMan testDocMan2;
- spi = getSpi(*_factory, testDocMan2);
- testJoinNormalCasePostCondition(spi, bucketA, bucketB, bucketC,
- testDocMan2);
- }*/
- }
-
- void testJoinNormalCasePostCondition(PersistenceProvider spi, Bucket bucketA,
- Bucket bucketB, Bucket bucketC)
- {
- assertEquals(20, spi.getBucketInfo(bucketC).
- getBucketInfo().getDocumentCount());
-
- FieldSet fs = new AllFields();
- for (int i = 0; i < 10; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
- assertTrue(spi.get(bucketC, fs, doc1.getId()).hasDocument());
- assertTrue(!spi.get(bucketA, fs, doc1.getId()).hasDocument());
- }
-
- for (int i = 10; i < 20; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
- assertTrue(spi.get(bucketC, fs, doc1.getId()).hasDocument());
- assertTrue(!spi.get(bucketB, fs, doc1.getId()).hasDocument());
- }
- }
-
- void testJoinTargetExists(PersistenceProvider spi) throws Exception
- {
- Bucket bucketA = new Bucket((short)0, new BucketId(3, 0x02));
- spi.createBucket(bucketA);
-
- Bucket bucketB = new Bucket((short)0, new BucketId(3, 0x06));
- spi.createBucket(bucketB);
-
- Bucket bucketC = new Bucket((short)0, new BucketId(2, 0x02));
- spi.createBucket(bucketC);
-
- for (int i = 0; i < 10; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
- spi.put(bucketA, i + 1, doc1);
- }
-
- spi.flush(bucketA);
-
- for (int i = 10; i < 20; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
- spi.put(bucketB, i + 1, doc1);
- }
- spi.flush(bucketB);
-
- for (int i = 20; i < 30; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
- spi.put(bucketC, i + 1, doc1);
- }
- spi.flush(bucketC);
-
- spi.join(bucketA, bucketB, bucketC);
- testJoinTargetExistsPostCondition(spi, bucketA, bucketB, bucketC);
- /*if (_factory->hasPersistence()) {
- spi.reset();
- document::TestDocMan testDocMan2;
- spi = getSpi(*_factory, testDocMan2);
- testJoinTargetExistsPostCondition(spi, bucketA, bucketB, bucketC,
- testDocMan2);
- }*/
- }
-
- void testJoinTargetExistsPostCondition(PersistenceProvider spi, Bucket bucketA,
- Bucket bucketB, Bucket bucketC)
- {
- assertEquals(30, spi.getBucketInfo(bucketC).getBucketInfo().
- getDocumentCount());
-
- FieldSet fs = new AllFields();
- for (int i = 0; i < 10; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
- assertTrue(spi.get(bucketC, fs, doc1.getId()).hasDocument());
- assertTrue(!spi.get(bucketA, fs, doc1.getId()).hasDocument());
- }
-
- for (int i = 10; i < 20; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
- assertTrue(spi.get(bucketC, fs, doc1.getId()).hasDocument());
- assertTrue(!spi.get(bucketB, fs, doc1.getId()).hasDocument());
- }
-
- for (int i = 20; i < 30; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x06, i);
- assertTrue(spi.get(bucketC, fs, doc1.getId()).hasDocument());
- }
- }
-
- void testJoinOneBucket(PersistenceProvider spi) throws Exception
- {
- Bucket bucketA = new Bucket((short)0, new BucketId(3, 0x02));
- spi.createBucket(bucketA);
-
- Bucket bucketB = new Bucket((short)0, new BucketId(3, 0x06));
- Bucket bucketC = new Bucket((short)0, new BucketId(2, 0x02));
-
- for (int i = 0; i < 10; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
- spi.put(bucketA, i + 1, doc1);
- }
- spi.flush(bucketA);
-
- spi.join(bucketA, bucketB, bucketC);
- testJoinOneBucketPostCondition(spi, bucketA, bucketC);
- /*if (_factory->hasPersistence()) {
- spi.reset();
- document::TestDocMan testDocMan2;
- spi = getSpi(*_factory, testDocMan2);
- testJoinOneBucketPostCondition(spi, bucketA, bucketC, testDocMan2);
- }*/
- }
-
- void testJoinOneBucketPostCondition(PersistenceProvider spi, Bucket bucketA, Bucket bucketC)
- {
- assertEquals(10, spi.getBucketInfo(bucketC).getBucketInfo().
- getDocumentCount());
-
- FieldSet fs = new AllFields();
- for (int i = 0; i < 10; ++i) {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, i);
- assertTrue(spi.get(bucketC, fs, doc1.getId()).hasDocument());
- assertTrue(!spi.get(bucketA, fs, doc1.getId()).hasDocument());
- }
- }
-
-
- void testMaintain(PersistenceProvider spi) throws Exception {
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
-
- Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
- spi.createBucket(bucket);
-
- spi.put(bucket, 3, doc1);
- spi.flush(bucket);
-
- assertEquals(Result.ErrorType.NONE,
- spi.maintain(bucket, PersistenceProvider.MaintenanceLevel.LOW).getErrorType());
- }
-
- void testGetModifiedBuckets(PersistenceProvider spi) throws Exception {
- assertEquals(0, spi.getModifiedBuckets().getBuckets().size());
- }
-
- void testBucketActivation(PersistenceProvider spi) throws Exception {
- Bucket bucket = new Bucket((short)0, new BucketId(8, 0x01));
-
- spi.createBucket(bucket);
- {
- BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
- assertTrue(!info.isActive());
- }
-
- spi.setActiveState(bucket, BucketInfo.ActiveState.ACTIVE);
- {
- BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
- assertTrue(info.isActive());
- }
-
- spi.setActiveState(bucket, BucketInfo.ActiveState.NOT_ACTIVE);
- {
- BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
- assertTrue(!info.isActive());
- }
- }
-
- void testBucketActivationSplitAndJoin(PersistenceProvider spi) throws Exception
- {
- Bucket bucketA = new Bucket((short)0, new BucketId(3, 0x02));
- Bucket bucketB = new Bucket((short)0, new BucketId(3, 0x06));
- Bucket bucketC = new Bucket((short)0, new BucketId(2, 0x02));
- Document doc1 = testDocMan.createRandomDocumentAtLocation(0x02, 1);
- Document doc2 = testDocMan.createRandomDocumentAtLocation(0x06, 2);
-
- spi.createBucket(bucketC);
- spi.put(bucketC, 1, doc1);
- spi.put(bucketC, 2, doc2);
- spi.flush(bucketC);
-
- spi.setActiveState(bucketC, BucketInfo.ActiveState.ACTIVE);
- assertTrue(spi.getBucketInfo(bucketC).getBucketInfo().isActive());
- spi.split(bucketC, bucketA, bucketB);
- assertTrue(spi.getBucketInfo(bucketA).getBucketInfo().isActive());
- assertTrue(spi.getBucketInfo(bucketB).getBucketInfo().isActive());
- assertTrue(!spi.getBucketInfo(bucketC).getBucketInfo().isActive());
-
- spi.setActiveState(bucketA, BucketInfo.ActiveState.NOT_ACTIVE);
- spi.setActiveState(bucketB, BucketInfo.ActiveState.NOT_ACTIVE);
- spi.join(bucketA, bucketB, bucketC);
- assertTrue(!spi.getBucketInfo(bucketA).getBucketInfo().isActive());
- assertTrue(!spi.getBucketInfo(bucketB).getBucketInfo().isActive());
- assertTrue(!spi.getBucketInfo(bucketC).getBucketInfo().isActive());
-
- spi.split(bucketC, bucketA, bucketB);
- assertTrue(!spi.getBucketInfo(bucketA).getBucketInfo().isActive());
- assertTrue(!spi.getBucketInfo(bucketB).getBucketInfo().isActive());
- assertTrue(!spi.getBucketInfo(bucketC).getBucketInfo().isActive());
-
- spi.setActiveState(bucketA, BucketInfo.ActiveState.ACTIVE);
- spi.join(bucketA, bucketB, bucketC);
- assertTrue(!spi.getBucketInfo(bucketA).getBucketInfo().isActive());
- assertTrue(!spi.getBucketInfo(bucketB).getBucketInfo().isActive());
- assertTrue(spi.getBucketInfo(bucketC).getBucketInfo().isActive());
- }
-//
-// void testRemoveEntry()
-// {
-// if (!_factory->supportsRemoveEntry()) {
-// return;
-// }
-// document::TestDocMan testDocMan;
-// _factory->clear();
-// PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
-//
-// Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
-// Document doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
-// Document doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
-// spi.createBucket(bucket);
-//
-// spi.put(bucket, 3, doc1);
-// spi.flush(bucket);
-// BucketInfo info1 = spi.getBucketInfo(bucket).getBucketInfo();
-//
-// {
-// spi.put(bucket, 4, doc2);
-// spi.flush(bucket);
-// spi.removeEntry(bucket, 4);
-// spi.flush(bucket);
-// BucketInfo info2 = spi.getBucketInfo(bucket).getBucketInfo();
-// assertEquals(info1, info2);
-// }
-//
-// // Test case where there exists a previous version of the document.
-// {
-// spi.put(bucket, 5, doc1);
-// spi.flush(bucket);
-// spi.removeEntry(bucket, 5);
-// spi.flush(bucket);
-// BucketInfo info2 = spi.getBucketInfo(bucket).getBucketInfo();
-// assertEquals(info1, info2);
-// }
-//
-// // Test case where the newest document version after removeEntrying is a remove.
-// {
-// spi.remove(bucket, 6, doc1.getId());
-// spi.flush(bucket);
-// BucketInfo info2 = spi.getBucketInfo(bucket).getBucketInfo();
-// assertEquals(0, info2.getDocumentCount());
-//
-// spi.put(bucket, 7, doc1);
-// spi.flush(bucket);
-// spi.removeEntry(bucket, 7);
-// spi.flush(bucket);
-// BucketInfo info3 = spi.getBucketInfo(bucket).getBucketInfo();
-// assertEquals(info2, info3);
-// }
-// }
-//
-}
-
-//
-//// Get number of puts and removes across all chunks (i.e. all entries)
-// size_t
-// getDocCount(const std::vector<Chunk>& chunks)
-// {
-// size_t count = 0;
-// for (size_t i=0; i<chunks.size(); ++i) {
-// count += chunks[i]._entries.size();
-// }
-// return count;
-// }
-//
-// size_t
-// getRemoveEntryCount(const std::vector<spi::DocEntry::UP>& entries)
-// {
-// size_t ret = 0;
-// for (size_t i = 0; i < entries.size(); ++i) {
-// if (entries[i]->isRemove()) {
-// ++ret;
-// }
-// }
-// return ret;
-// }
-//
-// List<DocEntry>
-// getEntriesFromChunks(const std::vector<Chunk>& chunks)
-// {
-// std::vector<spi::DocEntry::UP> ret;
-// for (size_t chunk = 0; chunk < chunks.size(); ++chunk) {
-// for (size_t i = 0; i < chunks[chunk]._entries.size(); ++i) {
-// ret.push_back(chunks[chunk]._entries[i]);
-// }
-// }
-// std::sort(ret.begin(),
-// ret.end(),
-// DocEntryIndirectTimestampComparator());
-// return ret;
-// }
-//
-//
-//
-// spi.destroyIterator(iter.getIteratorId());
-// std::sort(ret.begin(),
-// ret.end(),
-// DocEntryIndirectTimestampComparator());
-// return ret;
-// }
-//
-// void
-// verifyDocs(const std::vector<DocAndTimestamp>& wanted,
-// const std::vector<Chunk>& chunks,
-// const std::set<string>& removes = std::set<string>())
-// {
-// List<DocEntry> retrieved(
-// getEntriesFromChunks(chunks));
-// size_t removeCount = getRemoveEntryCount(retrieved);
-// // Ensure that we've got the correct number of puts and removes
-// assertEquals(removes.size(), removeCount);
-// assertEquals(wanted.size(), retrieved.size() - removeCount);
-//
-// size_t wantedIdx = 0;
-// for (size_t i = 0; i < retrieved.size(); ++i) {
-// DocEntry& entry(*retrieved[i]);
-// if (entry.getDocumentOperation() != 0) {
-// if (!(*wanted[wantedIdx].doc == *entry.getDocumentOperation())) {
-// std::ostringstream ss;
-// ss << "Documents differ! Wanted:\n"
-// << wanted[wantedIdx].doc->toString(true)
-// << "\n\nGot:\n"
-// << entry.getDocumentOperation()->toString(true);
-// CPPUNIT_FAIL(ss.str());
-// }
-// assertEquals(wanted[wantedIdx].timestamp, entry.getTimestamp());
-// size_t serSize = wanted[wantedIdx].doc->serialize()->getLength();
-// assertEquals(serSize + sizeof(DocEntry), size_t(entry.getSize()));
-// assertEquals(serSize, size_t(entry.getDocumentSize()));
-// ++wantedIdx;
-// } else {
-// // Remove-entry
-// assertTrue(entry.getDocumentId() != 0);
-// size_t serSize = entry.getDocumentId()->getSerializedSize();
-// assertEquals(serSize + sizeof(DocEntry), size_t(entry.getSize()));
-// assertEquals(serSize, size_t(entry.getDocumentSize()));
-// if (removes.find(entry.getDocumentId()->toString()) == removes.end()) {
-// std::ostringstream ss;
-// ss << "Got unexpected remove entry for document id "
-// << *entry.getDocumentId();
-// CPPUNIT_FAIL(ss.str());
-// }
-// }
-// }
-// }
-//
-//// Feed numDocs documents, starting from timestamp 1000
-//
-// } // namespace
-//
-//
-//
-
-
-
-
-
-// void detectAndTestOptionalBehavior() {
-// // Report if implementation supports setting bucket size info.
-//
-// // Report if joining same bucket on multiple partitions work.
-// // (Where target equals one of the sources). (If not supported service
-// // layer must die if a bucket is found during init on multiple partitions)
-// // Test functionality if it works.
-// }
-//
-//
-// } // spi
-// } // storage
-//
-//
-//}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/conformance/TestDocMan.java b/persistence/src/main/java/com/yahoo/persistence/spi/conformance/TestDocMan.java
deleted file mode 100644
index b002eabe7b8..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/conformance/TestDocMan.java
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi.conformance;
-
-import com.yahoo.document.*;
-import com.yahoo.document.datatypes.IntegerFieldValue;
-import com.yahoo.document.datatypes.StringFieldValue;
-
-public class TestDocMan extends DocumentTypeManager {
-
- public TestDocMan() {
- DocumentType docType = new DocumentType("testdoctype1");
- docType.addHeaderField("headerval", DataType.INT);
- docType.addField("content", DataType.STRING);
-
- registerDocumentType(docType);
- }
-
- public Document createRandomDocumentAtLocation(long location, long timestamp) {
- return createRandomDocumentAtLocation(location, timestamp, 100, 100);
- }
-
- public Document createRandomDocumentAtLocation(long location, long timestamp, int minSize, int maxSize) {
- Document document = new Document(getDocumentType("testdoctype1"),
- new DocumentId("userdoc:footype:" + location + ":" + timestamp));
-
- document.setFieldValue("headerval", new IntegerFieldValue((int)timestamp));
-
- StringBuffer value = new StringBuffer();
- int length = (int)(Math.random() * (maxSize - minSize)) + minSize;
- for (int i = 0; i < length; ++i) {
- value.append("A");
- }
-
- document.setFieldValue("content", new StringFieldValue(value.toString()));
- return document;
- }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/conformance/package-info.java b/persistence/src/main/java/com/yahoo/persistence/spi/conformance/package-info.java
deleted file mode 100644
index 32180a07983..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/conformance/package-info.java
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-@ExportPackage
-@PublicApi
-package com.yahoo.persistence.spi.conformance;
-
-import com.yahoo.api.annotations.PublicApi;
-import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/package-info.java b/persistence/src/main/java/com/yahoo/persistence/spi/package-info.java
deleted file mode 100644
index 179e9029f73..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/package-info.java
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-@ExportPackage
-@PublicApi
-package com.yahoo.persistence.spi;
-
-import com.yahoo.api.annotations.PublicApi;
-import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/BucketIdListResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/BucketIdListResult.java
deleted file mode 100644
index c02b7384fe5..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/result/BucketIdListResult.java
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi.result;
-
-import com.yahoo.document.BucketId;
-
-import java.util.List;
-
-/**
- * Result class used for bucket id list requests.
- */
-public class BucketIdListResult extends Result {
- List<BucketId> buckets;
-
- /**
- * Creates a result with an error.
- *
- * @param type The type of error
- * @param message A human-readable error message to further detail the error.
- */
- public BucketIdListResult(ErrorType type, String message) {
- super(type, message);
- }
-
- /**
- * Creates a result containing a list of all the buckets the requested partition has.
- *
- * @param buckets The list of buckets.
- */
- public BucketIdListResult(List<BucketId> buckets) {
- this.buckets = buckets;
- }
-
- public List<BucketId> getBuckets() {
- return buckets;
- }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/BucketInfoResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/BucketInfoResult.java
deleted file mode 100644
index e6e9d4a898e..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/result/BucketInfoResult.java
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi.result;
-
-import com.yahoo.persistence.spi.BucketInfo;
-
-/**
- * Result class for the getBucketInfo() function.
- */
-public class BucketInfoResult extends Result {
- BucketInfo bucketInfo = null;
-
- /**
- * Constructor to use for a result where an error has been detected.
- * The service layer will not update the bucket information in this case,
- * so it should not be returned either.
- *
- * @param type The type of error.
- * @param message A human readable message further detailing the error.
- */
- public BucketInfoResult(ErrorType type, String message) {
- super(type, message);
- }
-
- /**
- * Constructor to use when the write operation was successful,
- * and the bucket info was modified.
- *
- * @param info Returns the information about the bucket.
- */
- public BucketInfoResult(BucketInfo info) {
- this.bucketInfo = info;
- }
-
- public BucketInfo getBucketInfo() {
- return bucketInfo;
- }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/CreateIteratorResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/CreateIteratorResult.java
deleted file mode 100644
index 28df8a7f5a3..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/result/CreateIteratorResult.java
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi.result;
-
-/**
- * Result class for CreateIterator requests.
- */
-public class CreateIteratorResult extends Result {
- long iteratorId = 0;
-
- /**
- * Creates a result with an error.
- *
- * @param type The type of error
- * @param message A human-readable error message to further detail the error.
- */
- public CreateIteratorResult(Result.ErrorType type, String message) {
- super(type, message);
- }
-
- /**
- * Creates a successful result, containing a unique identifier for this iterator
- * (must be created and maintained by the provider).
- *
- * @param iteratorId The iterator ID to use for this iterator.
- */
- public CreateIteratorResult(long iteratorId) {
- this.iteratorId = iteratorId;
- }
-
- public long getIteratorId() {
- return iteratorId;
- }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/GetResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/GetResult.java
deleted file mode 100644
index c74d81730ee..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/result/GetResult.java
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi.result;
-
-import com.yahoo.document.Document;
-
-/**
- * Result class for Get operations
- */
-public class GetResult extends Result {
- Document doc;
- long lastModifiedTimestamp = 0;
-
- /**
- * Constructor to use when there was an error retrieving the document.
- * Not finding the document is not an error in this context.
- *
- * @param type The type of error.
- * @param message A human readable message further detailing the error.
- */
- GetResult(ErrorType type, String message) {
- super(type, message);
- }
-
- /**
- * Constructor to use when we didn't find the document in question.
- */
- public GetResult() {}
-
- /**
- * Constructor to use when we found the document asked for.
- *
- * @param doc The document we found
- * @param lastModifiedTimestamp The timestamp with which the document was stored.
- */
- public GetResult(Document doc, long lastModifiedTimestamp) {
- this.doc = doc;
- this.lastModifiedTimestamp = lastModifiedTimestamp;
- }
-
- /**
- * @return Returns the timestamp at which the document was last modified, or 0 if
- * no document was found.
- */
- public long getLastModifiedTimestamp() { return lastModifiedTimestamp;}
-
- /**
- * @return Returns true if the document was found.
- */
- public boolean wasFound() {
- return doc != null;
- }
-
- public boolean hasDocument() {
- return doc != null;
- }
-
- public Document getDocument() {
- return doc;
- }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/IterateResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/IterateResult.java
deleted file mode 100644
index ae9ce4ecf26..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/result/IterateResult.java
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi.result;
-
-import com.yahoo.persistence.spi.DocEntry;
-
-import java.util.List;
-
-/**
- * Result class for iterate requests
- */
-public class IterateResult extends Result {
- List<DocEntry> entries = null;
- boolean isCompleted = false;
-
- /**
- * Creates a result with an error.
- *
- * @param type The type of error
- * @param message A human-readable error message to further detail the error.
- */
- public IterateResult(Result.ErrorType type, String message) {
- super(type, message);
- }
-
- /**
- * Creates a successful result.
- *
- * @param entries The next chunk of entries that were found during iteration.
- * @param isCompleted Set to true if there are no more entries to iterate through.
- */
- public IterateResult(List<DocEntry> entries, boolean isCompleted) {
- this.entries = entries;
- this.isCompleted = isCompleted;
- }
-
- public List<DocEntry> getEntries() {
- return entries;
- }
-
- public boolean isCompleted() {
- return isCompleted;
- }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/PartitionStateListResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/PartitionStateListResult.java
deleted file mode 100644
index 5989b6bc3ac..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/result/PartitionStateListResult.java
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi.result;
-
-import com.yahoo.persistence.spi.PartitionState;
-
-import java.util.List;
-
-/**
- * A result class for getPartitionState() requests.
- */
-public class PartitionStateListResult extends Result {
- List<PartitionState> partitionStates = null;
-
- /**
- * Creates a result with an error.
- *
- * @param type The type of error
- * @param message A human-readable error message to further detail the error.
- */
- public PartitionStateListResult(Result.ErrorType type, String message) {
- super(type, message);
- }
-
- /**
- * Creates a result containing a list of all the partitions this provider has,
- * and their states.
- *
- * @param partitions A map containing all the partitions
- */
- public PartitionStateListResult(List<PartitionState> partitions) {
- this.partitionStates = partitions;
- }
-
- public List<PartitionState> getPartitionStates() {
- return partitionStates;
- }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/RemoveResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/RemoveResult.java
deleted file mode 100644
index eb7abe9f5c4..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/result/RemoveResult.java
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi.result;
-
-import com.yahoo.persistence.spi.BucketInfo;
-
-/**
- * Result class for Remove operations
- */
-public class RemoveResult extends Result {
- boolean wasFound = false;
-
- /**
- * Constructor to use when an error occurred during the update
- *
- * @param error The type of error that occurred
- * @param message A human readable message further detailing the error.
- */
- public RemoveResult(Result.ErrorType error, String message) {
- super(error, message);
- }
-
- /**
- * Constructor to use when there was no document to remove.
- */
- public RemoveResult() {}
-
- /**
- * Constructor to use when the update was successful.
- *
- * @param wasFound The timestamp of the document that was updated.
- */
- public RemoveResult(boolean wasFound) {
- this.wasFound = wasFound;
- }
-
- @Override
- public boolean equals(Object other) {
- if (other instanceof RemoveResult) {
- return super.equals((Result)other) &&
- wasFound == ((RemoveResult)other).wasFound;
- }
-
- return false;
- }
-
- public boolean wasFound() { return wasFound; }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/Result.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/Result.java
deleted file mode 100644
index 78d7326fb12..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/result/Result.java
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi.result;
-
-/**
- * Represents a result from an SPI method invocation.
- */
-public class Result {
-
- /**
- * Enumeration of the various categories of errors that can be returned
- * in a result.
- *
- * The categories are:
- *
- * TRANSIENT_ERROR: Operation failed, but may succeed if attempted again or on other data copies
- * PERMANENT_ERROR: Operation failed because it was somehow malformed or the operation parameters were wrong. Operation won't succeed
- * on other data copies either.
- * FATAL_ERROR: Operation failed in such a way that this node should be stopped (for instance, a disk failure). Operation will be retried
- * on other data copies.
- */
- public enum ErrorType {
- NONE,
- TRANSIENT_ERROR,
- PERMANENT_ERROR,
- UNUSED_ID,
- FATAL_ERROR
- }
-
- /**
- * Constructor to use for a result where there is no error.
- */
- public Result() {
- }
-
- /**
- * Creates a result with an error.
- *
- * @param type The type of error
- * @param message A human-readable error message to further detail the error.
- */
- public Result(ErrorType type, String message) {
- this.type = type;
- this.message = message;
- }
-
- public boolean equals(Result other) {
- return type.equals(other.type) &&
- message.equals(other.message);
- }
-
- @Override
- public boolean equals(Object otherResult) {
- if (otherResult instanceof Result) {
- return equals((Result)otherResult);
- }
-
- return false;
- }
-
- public boolean hasError() {
- return type != ErrorType.NONE;
- }
-
- public ErrorType getErrorType() {
- return type;
- }
-
- public String getErrorMessage() {
- return message;
- }
-
- @Override
- public String toString() {
- if (type == null) {
- return "Result(OK)";
- }
-
- return "Result(" + type.toString() + ", " + message + ")";
- }
-
- ErrorType type = ErrorType.NONE;
- String message = "";
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/UpdateResult.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/UpdateResult.java
deleted file mode 100644
index 97bccdb21b7..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/result/UpdateResult.java
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.persistence.spi.result;
-
-import com.yahoo.persistence.spi.BucketInfo;
-
-/**
- * Result class for update operations.
- */
-public class UpdateResult extends Result {
- long existingTimestamp = 0;
-
- /**
- * Constructor to use when an error occurred during the update
- *
- * @param error The type of error that occurred
- * @param message A human readable message further detailing the error.
- */
- public UpdateResult(ErrorType error, String message) {
- super(error, message);
- }
-
- /**
- * Constructor to use when the document to update was not found.
- */
- public UpdateResult() {
- super();
- }
-
- /**
- * Constructor to use when the update was successful.
- *
- * @param existingTimestamp The timestamp of the document that was updated.
- */
- public UpdateResult(long existingTimestamp) {
- this.existingTimestamp = existingTimestamp;
- }
-
- public long getExistingTimestamp() { return existingTimestamp; }
-}
diff --git a/persistence/src/main/java/com/yahoo/persistence/spi/result/package-info.java b/persistence/src/main/java/com/yahoo/persistence/spi/result/package-info.java
deleted file mode 100644
index 06a40dec719..00000000000
--- a/persistence/src/main/java/com/yahoo/persistence/spi/result/package-info.java
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-@ExportPackage
-@PublicApi
-package com.yahoo.persistence.spi.result;
-
-import com.yahoo.api.annotations.PublicApi;
-import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/persistence/src/tests/proxy/.gitignore b/persistence/src/tests/proxy/.gitignore
deleted file mode 100644
index 03bce028dd9..00000000000
--- a/persistence/src/tests/proxy/.gitignore
+++ /dev/null
@@ -1,10 +0,0 @@
-/.depend
-/Makefile
-/providerstub_test
-/providerproxy_test
-/providerproxy_conformancetest
-/vespa-external-providerproxy-conformancetest
-persistence_providerproxy_conformance_test_app
-persistence_providerproxy_test_app
-persistence_providerstub_test_app
-persistence_external_providerproxy_conformancetest_app
diff --git a/persistence/src/tests/proxy/CMakeLists.txt b/persistence/src/tests/proxy/CMakeLists.txt
deleted file mode 100644
index 598a3a6a69d..00000000000
--- a/persistence/src/tests/proxy/CMakeLists.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(persistence_providerstub_test_app
- SOURCES
- providerstub_test.cpp
- DEPENDS
- persistence
-)
-vespa_add_executable(persistence_providerproxy_test_app
- SOURCES
- providerproxy_test.cpp
- DEPENDS
- persistence
-)
-vespa_add_executable(persistence_providerproxy_conformance_test_app TEST
- SOURCES
- providerproxy_conformancetest.cpp
- DEPENDS
- persistence
- persistence_persistence_conformancetest
-)
-vespa_add_executable(persistence_external_providerproxy_conformancetest_app
- SOURCES
- external_providerproxy_conformancetest.cpp
- OUTPUT_NAME vespa-external-providerproxy-conformancetest
- INSTALL bin
- DEPENDS
- persistence
- persistence_persistence_conformancetest
-)
-vespa_add_test(NAME persistence_providerproxy_conformance_test_app COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/proxy_test.sh
- DEPENDS persistence_providerstub_test_app persistence_providerproxy_test_app persistence_providerproxy_conformance_test_app)
diff --git a/persistence/src/tests/proxy/dummy_provider_factory.h b/persistence/src/tests/proxy/dummy_provider_factory.h
deleted file mode 100644
index 808bce29fac..00000000000
--- a/persistence/src/tests/proxy/dummy_provider_factory.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/persistence/proxy/providerstub.h>
-#include <memory>
-
-namespace storage {
-namespace spi {
-
-/**
- * A simple rpc server persistence provider factory that will only
- * work once, by returning a precreated persistence provider instance.
- **/
-struct DummyProviderFactory : ProviderStub::PersistenceProviderFactory
-{
- typedef std::unique_ptr<DummyProviderFactory> UP;
- typedef storage::spi::PersistenceProvider Provider;
-
- mutable std::unique_ptr<Provider> provider;
-
- DummyProviderFactory(std::unique_ptr<Provider> p) : provider(std::move(p)) {}
-
- std::unique_ptr<Provider> create() const override {
- ASSERT_TRUE(provider.get() != 0);
- std::unique_ptr<Provider> ret = std::move(provider);
- ASSERT_TRUE(provider.get() == 0);
- return ret;
- }
-};
-
-} // namespace spi
-} // namespace storage
-
diff --git a/persistence/src/tests/proxy/external_providerproxy_conformancetest.cpp b/persistence/src/tests/proxy/external_providerproxy_conformancetest.cpp
deleted file mode 100644
index adf7a84dbd4..00000000000
--- a/persistence/src/tests/proxy/external_providerproxy_conformancetest.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "proxyfactory.h"
-#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/document/repo/documenttyperepo.h>
-#include <vespa/persistence/conformancetest/conformancetest.h>
-#include <vespa/persistence/proxy/providerproxy.h>
-#include <vespa/persistence/proxy/providerstub.h>
-
-using namespace storage::spi;
-typedef document::DocumentTypeRepo Repo;
-typedef ConformanceTest::PersistenceFactory Factory;
-
-namespace {
-
-struct ConformanceFixture : public ConformanceTest {
- ConformanceFixture(Factory::UP f) : ConformanceTest(std::move(f)) { setUp(); }
- ~ConformanceFixture() { tearDown(); }
-};
-
-Factory::UP getFactory() {
- return Factory::UP(new ProxyFactory());
-}
-
-#define CONVERT_TEST(testFunction, makeFactory) \
-namespace ns_ ## testFunction { \
-TEST_F(TEST_STR(testFunction) " " TEST_STR(makeFactory), ConformanceFixture(makeFactory)) { \
- f.testFunction(); \
-} \
-} // namespace testFunction
-
-#undef CPPUNIT_TEST
-#define CPPUNIT_TEST(testFunction) CONVERT_TEST(testFunction, MAKE_FACTORY)
-
-#define MAKE_FACTORY getFactory()
-DEFINE_CONFORMANCE_TESTS();
-
-} // namespace
-
-TEST_MAIN() {
- TEST_RUN_ALL();
-}
diff --git a/persistence/src/tests/proxy/mockprovider.h b/persistence/src/tests/proxy/mockprovider.h
deleted file mode 100644
index fda0b4aa922..00000000000
--- a/persistence/src/tests/proxy/mockprovider.h
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#pragma once
-
-#include <vespa/persistence/spi/persistenceprovider.h>
-#include <vespa/document/fieldvalue/document.h>
-
-namespace storage {
-namespace spi {
-
-struct MockProvider : PersistenceProvider {
- enum Function { NONE, INITIALIZE, GET_PARTITION_STATES, LIST_BUCKETS,
- SET_CLUSTER_STATE,
- SET_ACTIVE_STATE, GET_BUCKET_INFO, PUT, REMOVE_BY_ID,
- REMOVE_IF_FOUND, REPLACE_WITH_REMOVE, UPDATE, FLUSH, GET,
- CREATE_ITERATOR, ITERATE, DESTROY_ITERATOR, CREATE_BUCKET,
- DELETE_BUCKET, GET_MODIFIED_BUCKETS, SPLIT, JOIN, MOVE, MAINTAIN,
- REMOVE_ENTRY };
-
- mutable Function last_called;
-
- MockProvider() : last_called(NONE) {}
-
- Result initialize() override {
- last_called = INITIALIZE;
- return Result();
- }
-
- PartitionStateListResult getPartitionStates() const override {
- last_called = GET_PARTITION_STATES;
- return PartitionStateListResult(PartitionStateList(1u));
- }
-
- BucketIdListResult listBuckets(PartitionId id) const override {
- last_called = LIST_BUCKETS;
- BucketIdListResult::List result;
- result.push_back(document::BucketId(id));
- return BucketIdListResult(result);
- }
-
- Result setClusterState(const ClusterState &) override {
- last_called = SET_CLUSTER_STATE;
- return Result();
- }
-
- Result setActiveState(const Bucket &, BucketInfo::ActiveState) override {
- last_called = SET_ACTIVE_STATE;
- return Result();
- }
-
- BucketInfoResult getBucketInfo(const Bucket &bucket) const override {
- last_called = GET_BUCKET_INFO;
- return BucketInfoResult(BucketInfo(BucketChecksum(1), 2, 3,
- bucket.getBucketId().getRawId(),
- bucket.getPartition(),
- BucketInfo::READY,
- BucketInfo::ACTIVE));
- }
-
- Result put(const Bucket &, Timestamp, const DocumentSP&, Context&) override {
- last_called = PUT;
- return Result();
- }
-
- RemoveResult remove(const Bucket &, Timestamp, const DocumentId &, Context&) override {
- last_called = REMOVE_BY_ID;
- return RemoveResult(true);
- }
-
- RemoveResult removeIfFound(const Bucket &, Timestamp, const DocumentId &, Context&) override {
- last_called = REMOVE_IF_FOUND;
- return RemoveResult(true);
- }
-
- virtual RemoveResult replaceWithRemove(const Bucket &, Timestamp,
- const DocumentId &, Context&) {
- last_called = REPLACE_WITH_REMOVE;
- return RemoveResult(true);
- }
-
- UpdateResult update(const Bucket &, Timestamp timestamp, const DocumentUpdateSP&, Context&) override {
- last_called = UPDATE;
- return UpdateResult(Timestamp(timestamp - 10));
- }
-
- Result flush(const Bucket&, Context&) override {
- last_called = FLUSH;
- return Result();
- }
-
- GetResult get(const Bucket &, const document::FieldSet&, const DocumentId&, Context&) const override {
- last_called = GET;
- return GetResult(Document::UP(new Document),
- Timestamp(6u));
- }
-
- CreateIteratorResult createIterator(const Bucket& bucket,
- const document::FieldSet&,
- const Selection&,
- IncludedVersions,
- Context&) override
- {
- last_called = CREATE_ITERATOR;
- return CreateIteratorResult(IteratorId(bucket.getPartition()));
- }
-
- IterateResult iterate(IteratorId, uint64_t, Context&) const override {
- last_called = ITERATE;
- IterateResult::List result;
- result.push_back(DocEntry::UP(new DocEntry(Timestamp(1), 0)));
- return IterateResult(std::move(result), true);
- }
-
- Result destroyIterator(IteratorId, Context&) override {
- last_called = DESTROY_ITERATOR;
- return Result();
- }
-
- Result createBucket(const Bucket&, Context&) override {
- last_called = CREATE_BUCKET;
- return Result();
- }
- Result deleteBucket(const Bucket&, Context&) override {
- last_called = DELETE_BUCKET;
- return Result();
- }
-
- BucketIdListResult getModifiedBuckets() const override {
- last_called = GET_MODIFIED_BUCKETS;
- BucketIdListResult::List list;
- list.push_back(document::BucketId(2));
- list.push_back(document::BucketId(3));
- return BucketIdListResult(list);
- }
-
- Result split(const Bucket &, const Bucket &, const Bucket &, Context&) override {
- last_called = SPLIT;
- return Result();
- }
-
- Result join(const Bucket &, const Bucket &, const Bucket &, Context&) override {
- last_called = JOIN;
- return Result();
- }
-
- Result move(const Bucket &, PartitionId, Context&) override {
- last_called = MOVE;
- return Result();
- }
-
-
- Result maintain(const Bucket &, MaintenanceLevel) override {
- last_called = MAINTAIN;
- return Result();
- }
-
- Result removeEntry(const Bucket &, Timestamp, Context&) override {
- last_called = REMOVE_ENTRY;
- return Result();
- }
-};
-
-} // namespace spi
-} // namespace storage
diff --git a/persistence/src/tests/proxy/providerproxy_conformancetest.cpp b/persistence/src/tests/proxy/providerproxy_conformancetest.cpp
deleted file mode 100644
index fda3f42f0d5..00000000000
--- a/persistence/src/tests/proxy/providerproxy_conformancetest.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/document/repo/documenttyperepo.h>
-#include <vespa/persistence/conformancetest/conformancetest.h>
-#include <vespa/persistence/dummyimpl/dummypersistence.h>
-#include <vespa/persistence/proxy/providerproxy.h>
-#include <vespa/persistence/proxy/providerstub.h>
-#include "proxy_factory_wrapper.h"
-
-using namespace storage::spi;
-typedef document::DocumentTypeRepo Repo;
-typedef ConformanceTest::PersistenceFactory Factory;
-
-namespace {
-
-struct DummyFactory : Factory {
- PersistenceProvider::UP getPersistenceImplementation(const Repo::SP& repo,
- const Repo::DocumenttypesConfig &) override {
- return PersistenceProvider::UP(new dummy::DummyPersistence(repo, 4));
- }
-
- bool supportsActiveState() const override {
- return true;
- }
-};
-
-struct ConformanceFixture : public ConformanceTest {
- ConformanceFixture(Factory::UP f) : ConformanceTest(std::move(f)) { setUp(); }
- ~ConformanceFixture() { tearDown(); }
-};
-
-Factory::UP dummyViaProxy(size_t n) {
- if (n == 0) {
- return Factory::UP(new DummyFactory());
- }
- return Factory::UP(new ProxyFactoryWrapper(dummyViaProxy(n - 1)));
-}
-
-#define CONVERT_TEST(testFunction, makeFactory) \
-namespace ns_ ## testFunction { \
-TEST_F(TEST_STR(testFunction) " " TEST_STR(makeFactory), ConformanceFixture(makeFactory)) { \
- f.testFunction(); \
-} \
-} // namespace testFunction
-
-#undef CPPUNIT_TEST
-#define CPPUNIT_TEST(testFunction) CONVERT_TEST(testFunction, MAKE_FACTORY)
-
-#define MAKE_FACTORY dummyViaProxy(1)
-DEFINE_CONFORMANCE_TESTS();
-
-#undef MAKE_FACTORY
-#define MAKE_FACTORY dummyViaProxy(7)
-DEFINE_CONFORMANCE_TESTS();
-
-} // namespace
-
-TEST_MAIN() {
- TEST_RUN_ALL();
-}
diff --git a/persistence/src/tests/proxy/providerproxy_test.cpp b/persistence/src/tests/proxy/providerproxy_test.cpp
deleted file mode 100644
index 5fcfe0b3cab..00000000000
--- a/persistence/src/tests/proxy/providerproxy_test.cpp
+++ /dev/null
@@ -1,404 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-// Unit tests for providerproxy.
-
-#include "dummy_provider_factory.h"
-#include "mockprovider.h"
-#include <vespa/document/bucket/bucketid.h>
-#include <vespa/document/datatype/datatype.h>
-#include <vespa/document/repo/documenttyperepo.h>
-#include <vespa/document/fieldvalue/document.h>
-#include <vespa/document/update/documentupdate.h>
-#include <vespa/document/datatype/documenttype.h>
-#include <vespa/persistence/proxy/providerproxy.h>
-#include <vespa/persistence/proxy/providerstub.h>
-#include <vespa/persistence/spi/abstractpersistenceprovider.h>
-#include <vespa/vespalib/testkit/testapp.h>
-#include <vespa/vespalib/util/closure.h>
-#include <vespa/vespalib/util/closuretask.h>
-#include <vespa/vespalib/util/sync.h>
-#include <vespa/vespalib/util/threadstackexecutor.h>
-#include <vespa/document/fieldset/fieldsets.h>
-#include <vespa/vdslib/state/clusterstate.h>
-#include <vespa/vdslib/distribution/distribution.h>
-#include <vespa/config-stor-distribution.h>
-
-using document::BucketId;
-using document::DataType;
-using document::DocumentTypeRepo;
-using std::ostringstream;
-using vespalib::Gate;
-using vespalib::ThreadStackExecutor;
-using vespalib::makeClosure;
-using vespalib::makeTask;
-using namespace storage::spi;
-using namespace storage;
-
-namespace {
-
-const int port = 14863;
-const string connect_spec = "tcp/localhost:14863";
-LoadType defaultLoadType(0, "default");
-
-void startServer(const DocumentTypeRepo *repo, Gate *gate) {
- DummyProviderFactory factory(MockProvider::UP(new MockProvider));
- ProviderStub stub(port, 8, *repo, factory);
- gate->await();
- EXPECT_TRUE(stub.hasClient());
-}
-
-TEST("require that client can start connecting before server is up") {
- const DocumentTypeRepo repo;
- Gate gate;
- ThreadStackExecutor executor(1, 65536);
- executor.execute(makeTask(makeClosure(startServer, &repo, &gate)));
- ProviderProxy proxy(connect_spec, repo);
- gate.countDown();
- executor.sync();
-}
-
-TEST("require that when the server goes down it causes permanent failure.") {
- const DocumentTypeRepo repo;
- DummyProviderFactory factory(MockProvider::UP(new MockProvider));
- ProviderStub::UP server(new ProviderStub(port, 8, repo, factory));
- ProviderProxy proxy(connect_spec, repo);
- server.reset(0);
-
- const uint64_t bucket_id = 21;
- const PartitionId partition_id(42);
- const Bucket bucket(BucketId(bucket_id), partition_id);
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Result result = proxy.flush(bucket, context);
- EXPECT_EQUAL(Result::FATAL_ERROR, result.getErrorCode());
-}
-
-struct Fixture {
- MockProvider &mock_spi;
- DummyProviderFactory factory;
- DocumentTypeRepo repo;
- ProviderStub stub;
- ProviderProxy proxy;
-
- Fixture()
- : mock_spi(*(new MockProvider)),
- factory(PersistenceProvider::UP(&mock_spi)),
- repo(),
- stub(port, 8, repo, factory),
- proxy(connect_spec, repo) {}
-};
-
-TEST_F("require that client handles initialize", Fixture) {
- Result result = f.proxy.initialize();
- EXPECT_EQUAL(MockProvider::INITIALIZE, f.mock_spi.last_called);
-}
-
-TEST_F("require that client handles getPartitionStates", Fixture) {
- PartitionStateListResult result = f.proxy.getPartitionStates();
- EXPECT_EQUAL(MockProvider::GET_PARTITION_STATES, f.mock_spi.last_called);
- EXPECT_EQUAL(1u, result.getList().size());
-}
-
-TEST_F("require that client handles listBuckets", Fixture) {
- const PartitionId partition_id(42);
-
- BucketIdListResult result = f.proxy.listBuckets(partition_id);
- EXPECT_EQUAL(MockProvider::LIST_BUCKETS, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
- ASSERT_EQUAL(1u, result.getList().size());
-}
-
-TEST_F("require that client handles setClusterState", Fixture) {
- lib::ClusterState s("version:1 storage:3 distributor:3");
- lib::Distribution d(lib::Distribution::getDefaultDistributionConfig(3, 3));
- ClusterState state(s, 0, d);
-
- Result result = f.proxy.setClusterState(state);
- EXPECT_EQUAL(MockProvider::SET_CLUSTER_STATE, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
-}
-
-TEST_F("require that client handles setActiveState", Fixture) {
- const uint64_t bucket_id = 21;
- const PartitionId partition_id(42);
- const Bucket bucket(BucketId(bucket_id), partition_id);
- const BucketInfo::ActiveState bucket_state = BucketInfo::NOT_ACTIVE;
-
- Result result = f.proxy.setActiveState(bucket, bucket_state);
- EXPECT_EQUAL(MockProvider::SET_ACTIVE_STATE, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
-}
-
-TEST_F("require that client handles getBucketInfo", Fixture) {
- const uint64_t bucket_id = 21;
- const PartitionId partition_id(42);
- const Bucket bucket(BucketId(bucket_id), partition_id);
-
- BucketInfoResult result = f.proxy.getBucketInfo(bucket);
- EXPECT_EQUAL(MockProvider::GET_BUCKET_INFO, f.mock_spi.last_called);
-
- const BucketInfo& info(result.getBucketInfo());
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
- EXPECT_EQUAL(1u, info.getChecksum());
- EXPECT_EQUAL(2u, info.getDocumentCount());
- EXPECT_EQUAL(3u, info.getDocumentSize());
- EXPECT_EQUAL(bucket_id, info.getEntryCount());
- EXPECT_EQUAL(partition_id, info.getUsedSize());
- EXPECT_EQUAL(true, info.isReady());
- EXPECT_EQUAL(true, info.isActive());
-}
-
-TEST_F("require that client handles put", Fixture) {
- const uint64_t bucket_id = 21;
- const PartitionId partition_id(42);
- const Bucket bucket(BucketId(bucket_id), partition_id);
- const Timestamp timestamp(84);
- Document::SP doc(new Document());
-
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Result result = f.proxy.put(bucket, timestamp, doc, context);
- EXPECT_EQUAL(MockProvider::PUT, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
-}
-
-TEST_F("require that client handles remove by id", Fixture) {
- const uint64_t bucket_id = 21;
- const PartitionId partition_id(42);
- const Bucket bucket(BucketId(bucket_id), partition_id);
- const Timestamp timestamp(84);
- const DocumentId id("doc:test:1");
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
-
- RemoveResult result = f.proxy.remove(bucket, timestamp, id, context);
- EXPECT_EQUAL(MockProvider::REMOVE_BY_ID, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
- EXPECT_EQUAL(true, result.wasFound());
-}
-
-TEST_F("require that client handles removeIfFound", Fixture) {
- const uint64_t bucket_id = 21;
- const PartitionId partition_id(42);
- const Bucket bucket(BucketId(bucket_id), partition_id);
- const Timestamp timestamp(84);
- const DocumentId id("doc:test:1");
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
-
- RemoveResult result = f.proxy.removeIfFound(bucket, timestamp, id, context);
- EXPECT_EQUAL(MockProvider::REMOVE_IF_FOUND, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
- EXPECT_EQUAL(true, result.wasFound());
-}
-
-TEST_F("require that client handles update", Fixture) {
- const uint64_t bucket_id = 21;
- const PartitionId partition_id(42);
- const Bucket bucket(BucketId(bucket_id), partition_id);
- const Timestamp timestamp(84);
- DocumentUpdate::SP update(new DocumentUpdate(*DataType::DOCUMENT, DocumentId("doc:test:1")));
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
-
- UpdateResult result = f.proxy.update(bucket, timestamp, update, context);
- EXPECT_EQUAL(MockProvider::UPDATE, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
- EXPECT_EQUAL(timestamp - 10, result.getExistingTimestamp());
-}
-
-TEST_F("require that client handles flush", Fixture) {
- const uint64_t bucket_id = 21;
- const PartitionId partition_id(42);
- const Bucket bucket(BucketId(bucket_id), partition_id);
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
-
- Result result = f.proxy.flush(bucket, context);
- EXPECT_EQUAL(MockProvider::FLUSH, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
-}
-
-TEST_F("require that client handles get", Fixture) {
- const uint64_t bucket_id = 21;
- const PartitionId partition_id(42);
- const Bucket bucket(BucketId(bucket_id), partition_id);
-
- document::AllFields field_set;
- const DocumentId id("doc:test:1");
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
-
- GetResult result = f.proxy.get(bucket, field_set, id, context);
- EXPECT_EQUAL(MockProvider::GET, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
- EXPECT_EQUAL(6u, result.getTimestamp());
- ASSERT_TRUE(result.hasDocument());
- EXPECT_EQUAL(Document(), result.getDocument());
-}
-
-TEST_F("require that client handles createIterator", Fixture) {
- const uint64_t bucket_id = 21;
- const PartitionId partition_id(42);
- const Bucket bucket(BucketId(bucket_id), partition_id);
- const DocumentSelection doc_sel("docsel");
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
-
- document::AllFields field_set;
-
- Selection selection(doc_sel);
- selection.setFromTimestamp(Timestamp(84));
- selection.setToTimestamp(Timestamp(126));
-
- CreateIteratorResult result =
- f.proxy.createIterator(bucket, field_set, selection,
- NEWEST_DOCUMENT_ONLY, context);
-
- EXPECT_EQUAL(MockProvider::CREATE_ITERATOR, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
- EXPECT_EQUAL(partition_id, result.getIteratorId());
-}
-
-TEST_F("require that client handles iterate", Fixture) {
- const IteratorId iterator_id(42);
- const uint64_t max_byte_size = 21;
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
-
- IterateResult result = f.proxy.iterate(iterator_id, max_byte_size, context);
- EXPECT_EQUAL(MockProvider::ITERATE, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
- EXPECT_EQUAL(1u, result.getEntries().size());
- EXPECT_TRUE(result.isCompleted());
-}
-
-TEST_F("require that client handles destroyIterator", Fixture) {
- const IteratorId iterator_id(42);
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
-
- f.proxy.destroyIterator(iterator_id, context);
- EXPECT_EQUAL(MockProvider::DESTROY_ITERATOR, f.mock_spi.last_called);
-}
-
-TEST_F("require that client handles createBucket", Fixture) {
- const uint64_t bucket_id = 21;
- const PartitionId partition_id(42);
- const Bucket bucket(BucketId(bucket_id), partition_id);
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
-
- f.proxy.createBucket(bucket, context);
- EXPECT_EQUAL(MockProvider::CREATE_BUCKET, f.mock_spi.last_called);
-}
-
-TEST_F("require that server accepts deleteBucket", Fixture) {
- const uint64_t bucket_id = 21;
- const PartitionId partition_id(42);
- const Bucket bucket(BucketId(bucket_id), partition_id);
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
-
- f.proxy.deleteBucket(bucket, context);
- EXPECT_EQUAL(MockProvider::DELETE_BUCKET, f.mock_spi.last_called);
-}
-
-TEST_F("require that client handles getModifiedBuckets", Fixture) {
- BucketIdListResult modifiedBuckets = f.proxy.getModifiedBuckets();
- EXPECT_EQUAL(MockProvider::GET_MODIFIED_BUCKETS, f.mock_spi.last_called);
-
- EXPECT_EQUAL(2u, modifiedBuckets.getList().size());
-}
-
-TEST_F("require that client handles split", Fixture) {
- const uint64_t bucket_id_1 = 21;
- const PartitionId partition_id_1(42);
- const Bucket bucket_1(BucketId(bucket_id_1), partition_id_1);
- const uint64_t bucket_id_2 = 210;
- const PartitionId partition_id_2(420);
- const Bucket bucket_2(BucketId(bucket_id_2), partition_id_2);
- const uint64_t bucket_id_3 = 2100;
- const PartitionId partition_id_3(4200);
- const Bucket bucket_3(BucketId(bucket_id_3), partition_id_3);
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
-
- Result result = f.proxy.split(bucket_1, bucket_2, bucket_3, context);
- EXPECT_EQUAL(MockProvider::SPLIT, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
-}
-
-TEST_F("require that client handles join", Fixture) {
- const uint64_t bucket_id_1 = 21;
- const PartitionId partition_id_1(42);
- const Bucket bucket_1(BucketId(bucket_id_1), partition_id_1);
- const uint64_t bucket_id_2 = 210;
- const PartitionId partition_id_2(420);
- const Bucket bucket_2(BucketId(bucket_id_2), partition_id_2);
- const uint64_t bucket_id_3 = 2100;
- const PartitionId partition_id_3(4200);
- const Bucket bucket_3(BucketId(bucket_id_3), partition_id_3);
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
-
- Result result = f.proxy.join(bucket_1, bucket_2, bucket_3, context);
- EXPECT_EQUAL(MockProvider::JOIN, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
-}
-
-TEST_F("require that client handles move", Fixture) {
- const uint64_t bucket_id = 21;
- const PartitionId from_partition_id(42);
- const PartitionId to_partition_id(43);
- const Bucket bucket(BucketId(bucket_id), from_partition_id);
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
-
- Result result = f.proxy.move(bucket, to_partition_id, context);
- EXPECT_EQUAL(MockProvider::MOVE, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
-}
-
-TEST_F("require that client handles maintain", Fixture) {
- const uint64_t bucket_id = 21;
- const PartitionId partition_id(42);
- const Bucket bucket(BucketId(bucket_id), partition_id);
-
- Result result = f.proxy.maintain(bucket, HIGH);
- EXPECT_EQUAL(MockProvider::MAINTAIN, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
-}
-
-TEST_F("require that client handles remove entry", Fixture) {
- const uint64_t bucket_id = 21;
- const PartitionId partition_id(42);
- const Bucket bucket(BucketId(bucket_id), partition_id);
- const Timestamp timestamp(345);
- Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
-
- Result result = f.proxy.removeEntry(bucket, timestamp, context);
- EXPECT_EQUAL(MockProvider::REMOVE_ENTRY, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0, result.getErrorCode());
- EXPECT_EQUAL("", result.getErrorMessage());
-}
-
-} // namespace
-
-TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/persistence/src/tests/proxy/providerstub_test.cpp b/persistence/src/tests/proxy/providerstub_test.cpp
deleted file mode 100644
index fa94cfa0cdc..00000000000
--- a/persistence/src/tests/proxy/providerstub_test.cpp
+++ /dev/null
@@ -1,543 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-// Unit tests for providerstub.
-
-#include <vespa/document/datatype/datatype.h>
-#include <vespa/document/repo/documenttyperepo.h>
-#include <vespa/document/serialization/vespadocumentserializer.h>
-#include <vespa/document/util/bytebuffer.h>
-#include <vespa/document/fieldvalue/document.h>
-#include <vespa/document/update/documentupdate.h>
-#include <vespa/document/datatype/documenttype.h>
-#include <vespa/persistence/proxy/buildid.h>
-#include <vespa/persistence/proxy/providerstub.h>
-#include <vespa/persistence/spi/abstractpersistenceprovider.h>
-#include <vespa/vespalib/objects/nbostream.h>
-#include <vespa/vespalib/testkit/testapp.h>
-#include <vespa/vdslib/distribution/distribution.h>
-#include <vespa/vdslib/state/clusterstate.h>
-#include <vespa/config-stor-distribution.h>
-#include <vespa/fnet/frt/supervisor.h>
-#include <vespa/fnet/frt/rpcrequest.h>
-#include <vespa/fnet/frt/target.h>
-
-using document::BucketId;
-using document::ByteBuffer;
-using document::DataType;
-using document::DocumentTypeRepo;
-using document::VespaDocumentSerializer;
-using vespalib::nbostream;
-using namespace storage::spi;
-using namespace storage;
-
-#include <tests/proxy/mockprovider.h>
-#include "dummy_provider_factory.h"
-
-namespace {
-
-const int port = 14863;
-const char connect_spec[] = "tcp/localhost:14863";
-const string build_id = getBuildId();
-
-struct Fixture {
- MockProvider &mock_spi;
- DummyProviderFactory factory;
- DocumentTypeRepo repo;
- ProviderStub stub;
- FRT_Supervisor supervisor;
- FRT_RPCRequest *current_request;
- FRT_Target *target;
-
- Fixture()
- : mock_spi(*(new MockProvider())),
- factory(PersistenceProvider::UP(&mock_spi)),
- repo(),
- stub(port, 8, repo, factory),
- supervisor(),
- current_request(0),
- target(supervisor.GetTarget(connect_spec))
- {
- supervisor.Start();
- ASSERT_TRUE(target);
- }
- ~Fixture() {
- if (current_request) {
- current_request->SubRef();
- }
- target->SubRef();
- supervisor.ShutDown(true);
- }
- FRT_RPCRequest *getRequest(const string &name) {
- FRT_RPCRequest *req = supervisor.AllocRPCRequest(current_request);
- current_request = req;
- req->SetMethodName(name.c_str());
- return req;
- }
- void callRpc(FRT_RPCRequest *req, const string &return_spec) {
- target->InvokeSync(req, 5.0);
- req->CheckReturnTypes(return_spec.c_str());
- if (!EXPECT_EQUAL(uint32_t(FRTE_NO_ERROR), req->GetErrorCode())) {
- TEST_FATAL(req->GetErrorMessage());
- }
- }
- void failRpc(FRT_RPCRequest *req, uint32_t error_code) {
- target->InvokeSync(req, 5.0);
- EXPECT_EQUAL(error_code, req->GetErrorCode());
- }
-};
-
-struct ConnectedFixture : Fixture {
- ConnectedFixture() {
- FRT_RPCRequest *req = getRequest("vespa.persistence.connect");
- req->GetParams()->AddString(build_id.data(), build_id.size());
- callRpc(req, "");
- }
-};
-
-TEST("print build id") { fprintf(stderr, "build id: '%s'\n", getBuildId()); }
-
-TEST_F("require that server accepts connect", Fixture) {
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.connect");
- req->GetParams()->AddString(build_id.data(), build_id.size());
- f.callRpc(req, "");
- EXPECT_TRUE(f.stub.hasClient());
-}
-
-TEST_F("require that connect can be called twice", ConnectedFixture) {
- EXPECT_TRUE(f.stub.hasClient());
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.connect");
- req->GetParams()->AddString(build_id.data(), build_id.size());
- f.callRpc(req, "");
- EXPECT_TRUE(f.stub.hasClient());
-}
-
-TEST_F("require that connect fails with wrong build id", Fixture) {
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.connect");
- const string wrong_id = "wrong build id";
- req->GetParams()->AddString(wrong_id.data(), wrong_id.size());
- f.failRpc(req, FRTE_RPC_METHOD_FAILED);
- string prefix("Wrong build id. Got 'wrong build id', required ");
- EXPECT_EQUAL(prefix,
- string(req->GetErrorMessage()).substr(0, prefix.size()));
- EXPECT_FALSE(f.stub.hasClient());
-}
-
-TEST_F("require that only one client can connect", ConnectedFixture) {
- EXPECT_TRUE(f.stub.hasClient());
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.connect");
- req->GetParams()->AddString(build_id.data(), build_id.size());
- FRT_Target *target = f.supervisor.GetTarget(connect_spec);
- target->InvokeSync(req, 5.0);
- target->SubRef();
- EXPECT_EQUAL(uint32_t(FRTE_RPC_METHOD_FAILED), req->GetErrorCode());
- EXPECT_EQUAL("Server is already connected",
- string(req->GetErrorMessage()));
-}
-
-TEST_F("require that server accepts getPartitionStates", ConnectedFixture) {
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.getPartitionStates");
- f.callRpc(req, "bsIS");
- EXPECT_EQUAL(MockProvider::GET_PARTITION_STATES, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
- EXPECT_EQUAL(1u, req->GetReturn()->GetValue(2)._int32_array._len);
- EXPECT_EQUAL(1u, req->GetReturn()->GetValue(3)._string_array._len);
-}
-
-TEST_F("require that server accepts listBuckets", ConnectedFixture) {
- const uint64_t partition_id = 42;
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.listBuckets");
- req->GetParams()->AddInt64(partition_id);
- f.callRpc(req, "bsL");
- EXPECT_EQUAL(MockProvider::LIST_BUCKETS, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
- EXPECT_EQUAL(1u, req->GetReturn()->GetValue(2)._int64_array._len);
- EXPECT_EQUAL(partition_id,
- req->GetReturn()->GetValue(2)._int64_array._pt[0]);
-}
-
-TEST_F("require that server accepts setClusterState", ConnectedFixture) {
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.setClusterState");
-
- lib::ClusterState s("version:1 storage:3 distributor:3");
- lib::Distribution d(lib::Distribution::getDefaultDistributionConfig(3, 3));
- ClusterState state(s, 0, d);
- vespalib::nbostream o;
- state.serialize(o);
- req->GetParams()->AddData(o.c_str(), o.size());
- f.callRpc(req, "bs");
- EXPECT_EQUAL(MockProvider::SET_CLUSTER_STATE, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
-}
-
-TEST_F("require that server accepts setActiveState", ConnectedFixture) {
- const uint64_t bucket_id = 21;
- const uint64_t partition_id = 42;
- const BucketInfo::ActiveState bucket_state = BucketInfo::NOT_ACTIVE;
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.setActiveState");
- req->GetParams()->AddInt64(bucket_id);
- req->GetParams()->AddInt64(partition_id);
- req->GetParams()->AddInt8(bucket_state);
- f.callRpc(req, "bs");
- EXPECT_EQUAL(MockProvider::SET_ACTIVE_STATE, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
-}
-
-TEST_F("require that server accepts getBucketInfo", ConnectedFixture) {
- const uint64_t bucket_id = 21;
- const uint64_t partition_id = 42;
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.getBucketInfo");
- req->GetParams()->AddInt64(bucket_id);
- req->GetParams()->AddInt64(partition_id);
- f.callRpc(req, "bsiiiiibb");
- EXPECT_EQUAL(MockProvider::GET_BUCKET_INFO, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
- EXPECT_EQUAL(1u, req->GetReturn()->GetValue(2)._intval32);
- EXPECT_EQUAL(2u, req->GetReturn()->GetValue(3)._intval32);
- EXPECT_EQUAL(3u, req->GetReturn()->GetValue(4)._intval32);
- EXPECT_EQUAL(bucket_id, req->GetReturn()->GetValue(5)._intval32);
- EXPECT_EQUAL(partition_id, req->GetReturn()->GetValue(6)._intval32);
- EXPECT_EQUAL(static_cast<uint8_t>(BucketInfo::READY),
- req->GetReturn()->GetValue(7)._intval8);
- EXPECT_EQUAL(static_cast<uint8_t>(BucketInfo::ACTIVE),
- req->GetReturn()->GetValue(8)._intval8);
-}
-
-TEST_F("require that server accepts put", ConnectedFixture) {
- const uint64_t bucket_id = 21;
- const uint64_t partition_id = 42;
- const Timestamp timestamp(84);
- Document::UP doc(new Document);
- nbostream stream;
- VespaDocumentSerializer serializer(stream);
- serializer.write(*doc, document::COMPLETE);
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.put");
- req->GetParams()->AddInt64(bucket_id);
- req->GetParams()->AddInt64(partition_id);
- req->GetParams()->AddInt64(timestamp);
- req->GetParams()->AddData(stream.c_str(), stream.size());
- f.callRpc(req, "bs");
- EXPECT_EQUAL(MockProvider::PUT, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
-}
-
-void testRemove(ConnectedFixture &f, const string &rpc_name,
- MockProvider::Function func) {
- const uint64_t bucket_id = 21;
- const uint64_t partition_id = 42;
- const Timestamp timestamp(84);
- const DocumentId id("doc:test:1");
-
- FRT_RPCRequest *req = f.getRequest(rpc_name);
- req->GetParams()->AddInt64(bucket_id);
- req->GetParams()->AddInt64(partition_id);
- req->GetParams()->AddInt64(timestamp);
- req->GetParams()->AddString(id.toString().data(), id.toString().size());
- f.callRpc(req, "bsb");
- EXPECT_EQUAL(func, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
- EXPECT_TRUE(req->GetReturn()->GetValue(2)._intval8);
-}
-
-TEST_F("require that server accepts remove by id", ConnectedFixture) {
- testRemove(f, "vespa.persistence.removeById", MockProvider::REMOVE_BY_ID);
-}
-
-TEST_F("require that server accepts removeIfFound", ConnectedFixture) {
- testRemove(f, "vespa.persistence.removeIfFound",
- MockProvider::REMOVE_IF_FOUND);
-}
-
-TEST_F("require that server accepts update", ConnectedFixture) {
- const uint64_t bucket_id = 21;
- const uint64_t partition_id = 42;
- const Timestamp timestamp(84);
- DocumentUpdate update(*DataType::DOCUMENT, DocumentId("doc:test:1"));
- vespalib::nbostream stream;
- update.serializeHEAD(stream);
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.update");
- req->GetParams()->AddInt64(bucket_id);
- req->GetParams()->AddInt64(partition_id);
- req->GetParams()->AddInt64(timestamp);
- req->GetParams()->AddData(stream.c_str(), stream.size());
- f.callRpc(req, "bsl");
- EXPECT_EQUAL(MockProvider::UPDATE, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
- EXPECT_EQUAL(timestamp - 10, req->GetReturn()->GetValue(2)._intval64);
-}
-
-TEST_F("require that server accepts flush", ConnectedFixture) {
- const uint64_t bucket_id = 21;
- const uint64_t partition_id = 42;
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.flush");
- req->GetParams()->AddInt64(bucket_id);
- req->GetParams()->AddInt64(partition_id);
- f.callRpc(req, "bs");
- EXPECT_EQUAL(MockProvider::FLUSH, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
-}
-
-TEST_F("require that server accepts get", ConnectedFixture) {
- const uint64_t bucket_id = 21;
- const uint64_t partition_id = 42;
- const string field_set_1 = "[all]";
- const DocumentId id("doc:test:1");
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.get");
- req->GetParams()->AddInt64(bucket_id);
- req->GetParams()->AddInt64(partition_id);
- req->GetParams()->AddString(field_set_1.data(), field_set_1.size());
- req->GetParams()->AddString(id.toString().data(), id.toString().size());
- f.callRpc(req, "bslx");
- EXPECT_EQUAL(MockProvider::GET, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
- EXPECT_EQUAL(6u, req->GetReturn()->GetValue(2)._intval64);
- EXPECT_EQUAL(25u, req->GetReturn()->GetValue(3)._data._len);
-}
-
-TEST_F("require that server accepts createIterator", ConnectedFixture) {
- const uint64_t bucket_id = 21;
- const uint64_t partition_id = 42;
- const string doc_sel = "docsel";
- const Timestamp timestamp_from(84);
- const Timestamp timestamp_to(126);
- const Timestamp timestamp_subset(168);
- const string field_set_1 = "[all]";
- const bool include_removes = false;
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.createIterator");
- req->GetParams()->AddInt64(bucket_id);
- req->GetParams()->AddInt64(partition_id);
- req->GetParams()->AddString(field_set_1.data(), field_set_1.size());
- req->GetParams()->AddString(doc_sel.data(), doc_sel.size());
- req->GetParams()->AddInt64(timestamp_from);
- req->GetParams()->AddInt64(timestamp_to);
- req->GetParams()->AddInt64Array(1)[0] = timestamp_subset;
- req->GetParams()->AddInt8(include_removes);
-
- f.callRpc(req, "bsl");
- EXPECT_EQUAL(MockProvider::CREATE_ITERATOR, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
- EXPECT_EQUAL(partition_id, req->GetReturn()->GetValue(2)._intval64);
-}
-
-TEST_F("require that server accepts iterate", ConnectedFixture) {
- const uint64_t iterator_id = 42;
- const uint64_t max_byte_size = 21;
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.iterate");
- req->GetParams()->AddInt64(iterator_id);
- req->GetParams()->AddInt64(max_byte_size);
- f.callRpc(req, "bsLISXb");
- EXPECT_EQUAL(MockProvider::ITERATE, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
- EXPECT_EQUAL(1u, req->GetReturn()->GetValue(2)._int64_array._len);
- EXPECT_EQUAL(1u, req->GetReturn()->GetValue(3)._int32_array._len);
- EXPECT_EQUAL(1u, req->GetReturn()->GetValue(4)._string_array._len);
- EXPECT_EQUAL(1u, req->GetReturn()->GetValue(5)._data_array._len);
- EXPECT_TRUE(req->GetReturn()->GetValue(6)._intval8);
-}
-
-TEST_F("require that server accepts destroyIterator", ConnectedFixture) {
- const uint64_t iterator_id = 42;
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.destroyIterator");
- req->GetParams()->AddInt64(iterator_id);
- f.callRpc(req, "bs");
- EXPECT_EQUAL(MockProvider::DESTROY_ITERATOR, f.mock_spi.last_called);
-}
-
-TEST_F("require that server accepts createBucket", ConnectedFixture) {
- const uint64_t bucket_id = 21;
- const uint64_t partition_id = 42;
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.createBucket");
- req->GetParams()->AddInt64(bucket_id);
- req->GetParams()->AddInt64(partition_id);
- f.callRpc(req, "bs");
- EXPECT_EQUAL(MockProvider::CREATE_BUCKET, f.mock_spi.last_called);
-}
-
-TEST_F("require that server accepts deleteBucket", ConnectedFixture) {
- const uint64_t bucket_id = 21;
- const uint64_t partition_id = 42;
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.deleteBucket");
- req->GetParams()->AddInt64(bucket_id);
- req->GetParams()->AddInt64(partition_id);
- f.callRpc(req, "bs");
- EXPECT_EQUAL(MockProvider::DELETE_BUCKET, f.mock_spi.last_called);
-}
-
-TEST_F("require that server accepts getModifiedBuckets", ConnectedFixture) {
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.getModifiedBuckets");
- f.callRpc(req, "bsL");
- EXPECT_EQUAL(MockProvider::GET_MODIFIED_BUCKETS, f.mock_spi.last_called);
- EXPECT_EQUAL(2u, req->GetReturn()->GetValue(2)._int64_array._len);
-}
-
-TEST_F("require that server accepts split", ConnectedFixture) {
- const uint64_t bucket_id_1 = 21;
- const uint64_t partition_id_1 = 42;
- const uint64_t bucket_id_2 = 210;
- const uint64_t partition_id_2 = 420;
- const uint64_t bucket_id_3 = 2100;
- const uint64_t partition_id_3 = 4200;
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.split");
- req->GetParams()->AddInt64(bucket_id_1);
- req->GetParams()->AddInt64(partition_id_1);
- req->GetParams()->AddInt64(bucket_id_2);
- req->GetParams()->AddInt64(partition_id_2);
- req->GetParams()->AddInt64(bucket_id_3);
- req->GetParams()->AddInt64(partition_id_3);
- f.callRpc(req, "bs");
- EXPECT_EQUAL(MockProvider::SPLIT, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
-}
-
-TEST_F("require that server accepts join", ConnectedFixture) {
- const uint64_t bucket_id_1 = 21;
- const uint64_t partition_id_1 = 42;
- const uint64_t bucket_id_2 = 210;
- const uint64_t partition_id_2 = 420;
- const uint64_t bucket_id_3 = 2100;
- const uint64_t partition_id_3 = 4200;
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.join");
- req->GetParams()->AddInt64(bucket_id_1);
- req->GetParams()->AddInt64(partition_id_1);
- req->GetParams()->AddInt64(bucket_id_2);
- req->GetParams()->AddInt64(partition_id_2);
- req->GetParams()->AddInt64(bucket_id_3);
- req->GetParams()->AddInt64(partition_id_3);
- f.callRpc(req, "bs");
- EXPECT_EQUAL(MockProvider::JOIN, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
-}
-
-TEST_F("require that server accepts move", ConnectedFixture) {
- const uint64_t bucket_id = 21;
- const uint64_t from_partition_id = 42;
- const uint64_t to_partition_id = 43;
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.move");
- req->GetParams()->AddInt64(bucket_id);
- req->GetParams()->AddInt64(from_partition_id);
- req->GetParams()->AddInt64(to_partition_id);
- f.callRpc(req, "bs");
- EXPECT_EQUAL(MockProvider::MOVE, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
-}
-
-TEST_F("require that server accepts maintain", ConnectedFixture) {
- const uint64_t bucket_id = 21;
- const uint64_t partition_id = 42;
- const MaintenanceLevel verification_level = HIGH;
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.maintain");
- req->GetParams()->AddInt64(bucket_id);
- req->GetParams()->AddInt64(partition_id);
- req->GetParams()->AddInt8(verification_level);
- f.callRpc(req, "bs");
- EXPECT_EQUAL(MockProvider::MAINTAIN, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
-}
-
-TEST_F("require that server accepts remove_entry", ConnectedFixture) {
- const uint64_t bucket_id = 21;
- const uint64_t partition_id = 42;
- const Timestamp timestamp(345);
-
- FRT_RPCRequest *req = f.getRequest("vespa.persistence.removeEntry");
- req->GetParams()->AddInt64(bucket_id);
- req->GetParams()->AddInt64(partition_id);
- req->GetParams()->AddInt64(timestamp);
- f.callRpc(req, "bs");
- EXPECT_EQUAL(MockProvider::REMOVE_ENTRY, f.mock_spi.last_called);
-
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(0)._intval8);
- EXPECT_EQUAL(0u, req->GetReturn()->GetValue(1)._string._len);
-}
-
-void checkRpcFails(const string &name, const string &param_spec, Fixture &f) {
- TEST_STATE(name.c_str());
- FRT_RPCRequest *req = f.getRequest("vespa.persistence." + name);
- for (size_t i = 0; i < param_spec.size(); ++i) {
- switch(param_spec[i]) {
- case 'b': req->GetParams()->AddInt8(0); break;
- case 'l': req->GetParams()->AddInt64(0); break;
- case 'L': req->GetParams()->AddInt64Array(0); break;
- case 's': req->GetParams()->AddString(0, 0); break;
- case 'S': req->GetParams()->AddStringArray(0); break;
- case 'x': req->GetParams()->AddData(0, 0); break;
- }
- }
- f.failRpc(req, FRTE_RPC_METHOD_FAILED);
-}
-
-TEST_F("require that unconnected server fails all SPI calls.", Fixture)
-{
- checkRpcFails("initialize", "", f);
- checkRpcFails("getPartitionStates", "", f);
- checkRpcFails("listBuckets", "l", f);
- checkRpcFails("setClusterState", "x", f);
- checkRpcFails("setActiveState", "llb", f);
- checkRpcFails("getBucketInfo", "ll", f);
- checkRpcFails("put", "lllx", f);
- checkRpcFails("removeById", "llls", f);
- checkRpcFails("removeIfFound", "llls", f);
- checkRpcFails("update", "lllx", f);
- checkRpcFails("flush", "ll", f);
- checkRpcFails("get", "llss", f);
- checkRpcFails("createIterator", "llssllLb", f);
- checkRpcFails("iterate", "ll", f);
- checkRpcFails("destroyIterator", "l", f);
- checkRpcFails("createBucket", "ll", f);
- checkRpcFails("deleteBucket", "ll", f);
- checkRpcFails("getModifiedBuckets", "", f);
- checkRpcFails("split", "llllll", f);
- checkRpcFails("join", "llllll", f);
- checkRpcFails("maintain", "llb", f);
- checkRpcFails("removeEntry", "lll", f);
-}
-
-} // namespace
-
-TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/persistence/src/tests/proxy/proxy_factory_wrapper.h b/persistence/src/tests/proxy/proxy_factory_wrapper.h
deleted file mode 100644
index c42b262bcac..00000000000
--- a/persistence/src/tests/proxy/proxy_factory_wrapper.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/vespalib/util/stringfmt.h>
-#include <vespa/persistence/conformancetest/conformancetest.h>
-#include <vespa/persistence/proxy/providerstub.h>
-#include <vespa/persistence/proxy/providerproxy.h>
-#include "dummy_provider_factory.h"
-
-namespace storage {
-namespace spi {
-
-/**
- * Generic wrapper for persistence conformance test factories. This
- * wrapper will take any other factory and expose a factory interface
- * that will create persistence instances that communicate with
- * persistence instances created by the wrapped factory using the RPC
- * persistence Proxy.
- **/
-struct ProxyFactoryWrapper : ConformanceTest::PersistenceFactory
-{
- typedef storage::spi::ConformanceTest::PersistenceFactory Factory;
- typedef storage::spi::PersistenceProvider Provider;
- typedef storage::spi::ProviderStub Server;
- typedef storage::spi::ProviderProxy Client;
- typedef document::DocumentTypeRepo Repo;
-
- Factory::UP factory;
- ProxyFactoryWrapper(Factory::UP f) : factory(std::move(f)) {}
-
- struct Wrapper : Client {
- DummyProviderFactory::UP provider;
- Server::UP server;
- Wrapper(DummyProviderFactory::UP p, Server::UP s, const Repo &repo)
- : Client(vespalib::make_string("tcp/localhost:%u", s->getPort()), repo),
- provider(std::move(p)),
- server(std::move(s))
- {}
- };
-
- virtual Provider::UP
- getPersistenceImplementation(const Repo::SP &repo,
- const Repo::DocumenttypesConfig &typesCfg) override{
- DummyProviderFactory::UP provider(new DummyProviderFactory(factory->getPersistenceImplementation(repo, typesCfg)));
- Server::UP server(new Server(0, 8, *repo, *provider));
- return Provider::UP(new Wrapper(std::move(provider), std::move(server), *repo));
- }
-
- bool supportsActiveState() const override {
- return factory->supportsActiveState();
- }
-};
-} // namespace spi
-} // namespace storage
-
diff --git a/persistence/src/tests/proxy/proxy_test.sh b/persistence/src/tests/proxy/proxy_test.sh
deleted file mode 100755
index 637ff192356..00000000000
--- a/persistence/src/tests/proxy/proxy_test.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-set -e
-$VALGRIND ./persistence_providerstub_test_app
-$VALGRIND ./persistence_providerproxy_test_app
-$VALGRIND ./persistence_providerproxy_conformance_test_app
diff --git a/persistence/src/tests/proxy/proxyfactory.h b/persistence/src/tests/proxy/proxyfactory.h
deleted file mode 100644
index b785fab4290..00000000000
--- a/persistence/src/tests/proxy/proxyfactory.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/persistence/conformancetest/conformancetest.h>
-#include <vespa/persistence/proxy/providerstub.h>
-#include <vespa/persistence/proxy/providerproxy.h>
-
-namespace storage {
-namespace spi {
-
-/**
- * Generic wrapper for persistence conformance test factories. This
- * wrapper will take any other factory and expose a factory interface
- * that will create persistence instances that communicate with
- * persistence instances created by the wrapped factory using the RPC
- * persistence Proxy.
- **/
-struct ProxyFactory : ConformanceTest::PersistenceFactory
-{
- using Provider = storage::spi::PersistenceProvider;
- using Client = storage::spi::ProviderProxy;
- using Repo = document::DocumentTypeRepo;
-
- ProxyFactory() {}
-
- Provider::UP
- getPersistenceImplementation(const Repo::SP &repo, const Repo::DocumenttypesConfig &) override {
- return Provider::UP(new Client("tcp/localhost:3456", *repo));
- }
-
- bool supportsActiveState() const override {
- return false;
- }
-};
-} // namespace spi
-} // namespace storage
diff --git a/persistence/src/tests/spi/clusterstatetest.cpp b/persistence/src/tests/spi/clusterstatetest.cpp
index a829035b61d..2fdaf25abf6 100644
--- a/persistence/src/tests/spi/clusterstatetest.cpp
+++ b/persistence/src/tests/spi/clusterstatetest.cpp
@@ -169,7 +169,7 @@ ClusterStateTest::testReady()
{
lib::ClusterState s("version:1 storage:3 distributor:3");
- Bucket b(document::BucketId(16, 1), PartitionId(0));
+ Bucket b(document::Bucket(document::BucketSpace::placeHolder(), document::BucketId(16, 1)), PartitionId(0));
// With 3 copies, this bucket has ideal state 0, 2, 1
diff --git a/persistence/src/vespa/persistence/CMakeLists.txt b/persistence/src/vespa/persistence/CMakeLists.txt
index 3b7920128ce..da8eda2164f 100644
--- a/persistence/src/vespa/persistence/CMakeLists.txt
+++ b/persistence/src/vespa/persistence/CMakeLists.txt
@@ -3,7 +3,6 @@ vespa_add_library(persistence
SOURCES
$<TARGET_OBJECTS:persistence_dummyimpl>
$<TARGET_OBJECTS:persistence_spi>
- $<TARGET_OBJECTS:persistence_proxy>
INSTALL lib64
DEPENDS
)
diff --git a/persistence/src/vespa/persistence/conformancetest/conformancetest.cpp b/persistence/src/vespa/persistence/conformancetest/conformancetest.cpp
index a0176fe88f2..662955be1b7 100644
--- a/persistence/src/vespa/persistence/conformancetest/conformancetest.cpp
+++ b/persistence/src/vespa/persistence/conformancetest/conformancetest.cpp
@@ -2,6 +2,7 @@
#include <vespa/document/base/testdocman.h>
#include <vespa/persistence/conformancetest/conformancetest.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/document/fieldset/fieldsets.h>
#include <vespa/document/update/documentupdate.h>
@@ -16,6 +17,9 @@
#include <limits>
using document::BucketId;
+using document::BucketSpace;
+using storage::spi::test::makeBucket;
+using storage::spi::test::makeBucketSpace;
namespace storage::spi {
@@ -309,7 +313,7 @@ void ConformanceTest::testBasics() {
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Bucket bucket(makeBucket(BucketId(8, 0x01)));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
spi->createBucket(bucket, context);
@@ -395,9 +399,9 @@ void ConformanceTest::testListBuckets() {
BucketId bucketId1(8, 0x01);
BucketId bucketId2(8, 0x02);
BucketId bucketId3(8, 0x03);
- Bucket bucket1(bucketId1, partId);
- Bucket bucket2(bucketId2, partId);
- Bucket bucket3(bucketId3, partId);
+ Bucket bucket1(makeBucket(bucketId1, partId));
+ Bucket bucket2(makeBucket(bucketId2, partId));
+ Bucket bucket3(makeBucket(bucketId3, partId));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x02, 2);
@@ -415,12 +419,12 @@ void ConformanceTest::testListBuckets() {
spi->flush(bucket3, context);
{
- BucketIdListResult result = spi->listBuckets(PartitionId(1));
+ BucketIdListResult result = spi->listBuckets(makeBucketSpace(), PartitionId(1));
CPPUNIT_ASSERT(result.getList().empty());
}
{
- BucketIdListResult result = spi->listBuckets(partId);
+ BucketIdListResult result = spi->listBuckets(makeBucketSpace(), partId);
const BucketIdListResult::List &bucketList = result.getList();
CPPUNIT_ASSERT_EQUAL(3u, (uint32_t)bucketList.size());
CPPUNIT_ASSERT(std::find(bucketList.begin(), bucketList.end(), bucketId1) != bucketList.end());
@@ -435,7 +439,7 @@ void ConformanceTest::testBucketInfo() {
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
- Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Bucket bucket(makeBucket(BucketId(8, 0x01)));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
@@ -493,7 +497,7 @@ ConformanceTest::testOrderIndependentBucketInfo()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
- Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Bucket bucket(makeBucket(BucketId(8, 0x01)));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
@@ -538,7 +542,7 @@ void ConformanceTest::testPut() {
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Bucket bucket(makeBucket(BucketId(8, 0x01)));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
spi->createBucket(bucket, context);
@@ -563,7 +567,7 @@ void ConformanceTest::testPutNewDocumentVersion() {
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Bucket bucket(makeBucket(BucketId(8, 0x01)));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
Document::SP doc2(doc1->clone());
doc2->setValue("content", document::StringFieldValue("hiho silver"));
@@ -614,7 +618,7 @@ void ConformanceTest::testPutOlderDocumentVersion() {
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Bucket bucket(makeBucket(BucketId(8, 0x01)));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
Document::SP doc2(doc1->clone());
doc2->setValue("content", document::StringFieldValue("hiho silver"));
@@ -658,7 +662,7 @@ void ConformanceTest::testPutDuplicate() {
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Bucket bucket(makeBucket(BucketId(8, 0x01)));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
spi->createBucket(bucket, context);
CPPUNIT_ASSERT_EQUAL(Result(),
@@ -691,7 +695,7 @@ void ConformanceTest::testRemove() {
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Bucket bucket(makeBucket(BucketId(8, 0x01)));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
spi->createBucket(bucket, context);
@@ -787,7 +791,7 @@ void ConformanceTest::testRemoveMerge() {
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Bucket bucket(makeBucket(BucketId(8, 0x01)));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
DocumentId removeId("id:fraggle:testdoctype1:n=1:rock");
spi->createBucket(bucket, context);
@@ -884,7 +888,7 @@ void ConformanceTest::testUpdate() {
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Bucket bucket(makeBucket(BucketId(8, 0x01)));
spi->createBucket(bucket, context);
const document::DocumentType *docType(
@@ -960,7 +964,7 @@ void ConformanceTest::testGet() {
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Bucket bucket(makeBucket(BucketId(8, 0x01)));
spi->createBucket(bucket, context);
{
@@ -1000,7 +1004,7 @@ ConformanceTest::testIterateCreateIterator()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ Bucket b(makeBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
spi::CreateIteratorResult result(
@@ -1020,7 +1024,7 @@ ConformanceTest::testIterateWithUnknownId()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ Bucket b(makeBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
IteratorId unknownId(123);
@@ -1035,7 +1039,7 @@ ConformanceTest::testIterateDestroyIterator()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ Bucket b(makeBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
CreateIteratorResult iter(createIterator(*spi, b, createSelection("")));
@@ -1068,7 +1072,7 @@ ConformanceTest::testIterateAllDocs()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ Bucket b(makeBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
std::vector<DocAndTimestamp> docs(feedDocs(*spi, testDocMan, b, 100));
@@ -1087,7 +1091,7 @@ ConformanceTest::testIterateAllDocsNewestVersionOnly()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ Bucket b(makeBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
std::vector<DocAndTimestamp> docs(feedDocs(*spi, testDocMan, b, 100));
@@ -1117,7 +1121,7 @@ ConformanceTest::testIterateChunked()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ Bucket b(makeBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
std::vector<DocAndTimestamp> docs(feedDocs(*spi, testDocMan, b, 100));
@@ -1138,7 +1142,7 @@ ConformanceTest::testMaxByteSize()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ Bucket b(makeBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
std::vector<DocAndTimestamp> docs(
@@ -1167,7 +1171,7 @@ ConformanceTest::testIterateMatchTimestampRange()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ Bucket b(makeBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
std::vector<DocAndTimestamp> docsToVisit;
@@ -1207,7 +1211,7 @@ ConformanceTest::testIterateExplicitTimestampSubset()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ Bucket b(makeBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
std::vector<DocAndTimestamp> docsToVisit;
@@ -1258,7 +1262,7 @@ ConformanceTest::testIterateRemoves()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ Bucket b(makeBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
int docCount = 10;
@@ -1311,7 +1315,7 @@ ConformanceTest::testIterateMatchSelection()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ Bucket b(makeBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
std::vector<DocAndTimestamp> docsToVisit;
@@ -1347,7 +1351,7 @@ ConformanceTest::testIterationRequiringDocumentIdOnlyMatching()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ Bucket b(makeBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
feedDocs(*spi, testDocMan, b, 100);
@@ -1381,7 +1385,7 @@ ConformanceTest::testIterateBadDocumentSelection()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ Bucket b(makeBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
{
CreateIteratorResult iter(
@@ -1423,7 +1427,7 @@ ConformanceTest::testIterateAlreadyCompleted()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ Bucket b(makeBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
std::vector<DocAndTimestamp> docs = feedDocs(*spi, testDocMan, b, 10);
@@ -1448,7 +1452,7 @@ ConformanceTest::testIterateEmptyBucket()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket b(document::BucketId(8, 0x1), PartitionId(0));
+ Bucket b(makeBucket(BucketId(8, 0x1)));
spi->createBucket(b, context);
Selection sel(createSelection(""));
@@ -1471,7 +1475,7 @@ ConformanceTest::testDeleteBucket()
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
- Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Bucket bucket(makeBucket(BucketId(8, 0x01)));
spi->createBucket(bucket, context);
spi->put(bucket, Timestamp(3), doc1, context);
@@ -1515,10 +1519,10 @@ ConformanceTest::testSplitNormalCase()
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucketA(document::BucketId(3, 0x02), PartitionId(0));
- Bucket bucketB(document::BucketId(3, 0x06), PartitionId(0));
+ Bucket bucketA(makeBucket(BucketId(3, 0x02)));
+ Bucket bucketB(makeBucket(BucketId(3, 0x06)));
- Bucket bucketC(document::BucketId(2, 0x02), PartitionId(0));
+ Bucket bucketC(makeBucket(BucketId(2, 0x02)));
spi->createBucket(bucketC, context);
TimestampList tsList;
@@ -1591,11 +1595,11 @@ ConformanceTest::testSplitTargetExists()
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucketA(document::BucketId(3, 0x02), PartitionId(0));
- Bucket bucketB(document::BucketId(3, 0x06), PartitionId(0));
+ Bucket bucketA(makeBucket(BucketId(3, 0x02)));
+ Bucket bucketB(makeBucket(BucketId(3, 0x06)));
spi->createBucket(bucketB, context);
- Bucket bucketC(document::BucketId(2, 0x02), PartitionId(0));
+ Bucket bucketC(makeBucket(BucketId(2, 0x02)));
spi->createBucket(bucketC, context);
TimestampList tsList;
@@ -1683,10 +1687,10 @@ ConformanceTest::testSplitSingleDocumentInSource()
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket target1(document::BucketId(3, 0x02), PartitionId(0));
- Bucket target2(document::BucketId(3, 0x06), PartitionId(0));
+ Bucket target1(makeBucket(BucketId(3, 0x02)));
+ Bucket target2(makeBucket(BucketId(3, 0x06)));
- Bucket source(document::BucketId(2, 0x02), PartitionId(0));
+ Bucket source(makeBucket(BucketId(2, 0x02)));
spi->createBucket(source, context);
// Create doc belonging in target2 after split.
@@ -1791,18 +1795,18 @@ ConformanceTest::doTestJoinNormalCase(const Bucket& source1,
void
ConformanceTest::testJoinNormalCase()
{
- Bucket source1(document::BucketId(3, 0x02), PartitionId(0));
- Bucket source2(document::BucketId(3, 0x06), PartitionId(0));
- Bucket target(document::BucketId(2, 0x02), PartitionId(0));
+ Bucket source1(makeBucket(BucketId(3, 0x02)));
+ Bucket source2(makeBucket(BucketId(3, 0x06)));
+ Bucket target(makeBucket(BucketId(2, 0x02)));
doTestJoinNormalCase(source1, source2, target);
}
void
ConformanceTest::testJoinNormalCaseWithMultipleBitsDecreased()
{
- Bucket source1(document::BucketId(3, 0x02), PartitionId(0));
- Bucket source2(document::BucketId(3, 0x06), PartitionId(0));
- Bucket target(document::BucketId(1, 0x00), PartitionId(0));
+ Bucket source1(makeBucket(BucketId(3, 0x02)));
+ Bucket source2(makeBucket(BucketId(3, 0x06)));
+ Bucket target(makeBucket(BucketId(1, 0x00)));
doTestJoinNormalCase(source1, source2, target);
}
@@ -1849,13 +1853,13 @@ ConformanceTest::testJoinTargetExists()
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucketA(document::BucketId(3, 0x02), PartitionId(0));
+ Bucket bucketA(makeBucket(BucketId(3, 0x02)));
spi->createBucket(bucketA, context);
- Bucket bucketB(document::BucketId(3, 0x06), PartitionId(0));
+ Bucket bucketB(makeBucket(BucketId(3, 0x06)));
spi->createBucket(bucketB, context);
- Bucket bucketC(document::BucketId(2, 0x02), PartitionId(0));
+ Bucket bucketC(makeBucket(BucketId(2, 0x02)));
spi->createBucket(bucketC, context);
for (uint32_t i = 0; i < 10; ++i) {
@@ -1953,11 +1957,11 @@ ConformanceTest::testJoinOneBucket()
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucketA(document::BucketId(3, 0x02), PartitionId(0));
+ Bucket bucketA(makeBucket(BucketId(3, 0x02)));
spi->createBucket(bucketA, context);
- Bucket bucketB(document::BucketId(3, 0x06), PartitionId(0));
- Bucket bucketC(document::BucketId(2, 0x02), PartitionId(0));
+ Bucket bucketB(makeBucket(BucketId(3, 0x06)));
+ Bucket bucketC(makeBucket(BucketId(2, 0x02)));
populateBucket(bucketA, *spi, context, 0, 10, testDocMan);
@@ -2030,16 +2034,16 @@ ConformanceTest::doTestJoinSameSourceBuckets(const Bucket& source,
void
ConformanceTest::testJoinSameSourceBuckets()
{
- Bucket source(document::BucketId(3, 0x02), PartitionId(0));
- Bucket target(document::BucketId(2, 0x02), PartitionId(0));
+ Bucket source(makeBucket(BucketId(3, 0x02)));
+ Bucket target(makeBucket(BucketId(2, 0x02)));
doTestJoinSameSourceBuckets(source, target);
}
void
ConformanceTest::testJoinSameSourceBucketsWithMultipleBitsDecreased()
{
- Bucket source(document::BucketId(3, 0x02), PartitionId(0));
- Bucket target(document::BucketId(1, 0x00), PartitionId(0));
+ Bucket source(makeBucket(BucketId(3, 0x02)));
+ Bucket target(makeBucket(BucketId(1, 0x00)));
doTestJoinSameSourceBuckets(source, target);
}
@@ -2072,10 +2076,10 @@ ConformanceTest::testJoinSameSourceBucketsTargetExists()
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket source(document::BucketId(3, 0x02), PartitionId(0));
+ Bucket source(makeBucket(BucketId(3, 0x02)));
spi->createBucket(source, context);
- Bucket target(document::BucketId(2, 0x02), PartitionId(0));
+ Bucket target(makeBucket(BucketId(2, 0x02)));
spi->createBucket(target, context);
populateBucket(source, *spi, context, 0, 10, testDocMan);
@@ -2101,7 +2105,7 @@ void ConformanceTest::testMaintain()
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
- Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Bucket bucket(makeBucket(BucketId(8, 0x01)));
spi->createBucket(bucket, context);
spi->put(bucket, Timestamp(3), doc1, context);
@@ -2117,7 +2121,7 @@ void ConformanceTest::testGetModifiedBuckets()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
CPPUNIT_ASSERT_EQUAL(0,
- (int)spi->getModifiedBuckets().getList().size());
+ (int)spi->getModifiedBuckets(makeBucketSpace()).getList().size());
}
void ConformanceTest::testBucketActivation()
@@ -2130,7 +2134,7 @@ void ConformanceTest::testBucketActivation()
_factory->clear();
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Bucket bucket(makeBucket(BucketId(8, 0x01)));
spi->setClusterState(createClusterState());
spi->createBucket(bucket, context);
@@ -2174,9 +2178,9 @@ void ConformanceTest::testBucketActivationSplitAndJoin()
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucketA(document::BucketId(3, 0x02), PartitionId(0));
- Bucket bucketB(document::BucketId(3, 0x06), PartitionId(0));
- Bucket bucketC(document::BucketId(2, 0x02), PartitionId(0));
+ Bucket bucketA(makeBucket(BucketId(3, 0x02)));
+ Bucket bucketB(makeBucket(BucketId(3, 0x06)));
+ Bucket bucketC(makeBucket(BucketId(2, 0x02)));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x02, 1);
Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x06, 2);
@@ -2253,7 +2257,7 @@ void ConformanceTest::testRemoveEntry()
PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
- Bucket bucket(document::BucketId(8, 0x01), PartitionId(0));
+ Bucket bucket(makeBucket(BucketId(8, 0x01)));
Document::SP doc1 = testDocMan.createRandomDocumentAtLocation(0x01, 1);
Document::SP doc2 = testDocMan.createRandomDocumentAtLocation(0x01, 2);
spi->createBucket(bucket, context);
@@ -2297,6 +2301,72 @@ void ConformanceTest::testRemoveEntry()
}
}
+void assertBucketInfo(PersistenceProvider &spi, const Bucket &bucket, uint32_t expDocCount)
+{
+ const BucketInfo info = spi.getBucketInfo(bucket).getBucketInfo();
+ CPPUNIT_ASSERT_EQUAL(expDocCount, info.getDocumentCount());
+ CPPUNIT_ASSERT(info.getEntryCount() >= info.getDocumentCount());
+ CPPUNIT_ASSERT(info.getChecksum() != 0);
+ CPPUNIT_ASSERT(info.getDocumentSize() > 0);
+ CPPUNIT_ASSERT(info.getUsedSize() >= info.getDocumentSize());
+}
+
+void assertBucketList(PersistenceProvider &spi,
+ BucketSpace &bucketSpace,
+ PartitionId partId,
+ const std::vector<BucketId> &expBuckets)
+{
+ BucketIdListResult result = spi.listBuckets(bucketSpace, partId);
+ const BucketIdListResult::List &bucketList = result.getList();
+ CPPUNIT_ASSERT_EQUAL(expBuckets.size(), bucketList.size());
+ for (const auto &expBucket : expBuckets) {
+ CPPUNIT_ASSERT(std::find(bucketList.begin(), bucketList.end(), expBucket) != bucketList.end());
+ }
+}
+
+void ConformanceTest::testBucketSpaces()
+{
+ if (!_factory->supportsBucketSpaces()) {
+ return;
+ }
+ document::TestDocMan testDocMan;
+ _factory->clear();
+ PersistenceProvider::UP spi(getSpi(*_factory, testDocMan));
+ Context context(defaultLoadType, Priority(0), Trace::TraceLevel(0));
+ BucketSpace bucketSpace0(makeBucketSpace("testdoctype1"));
+ BucketSpace bucketSpace1(makeBucketSpace("testdoctype2"));
+ BucketSpace bucketSpace2(makeBucketSpace("no"));
+ PartitionId partId(0);
+
+ BucketId bucketId1(8, 0x01);
+ BucketId bucketId2(8, 0x02);
+ Bucket bucket01({ bucketSpace0, bucketId1 }, partId);
+ Bucket bucket11({ bucketSpace1, bucketId1 }, partId);
+ Bucket bucket12({ bucketSpace1, bucketId2 }, partId);
+ Document::SP doc1 = testDocMan.createDocument("content", "id:test:testdoctype1:n=1:1", "testdoctype1");
+ Document::SP doc2 = testDocMan.createDocument("content", "id:test:testdoctype1:n=1:2", "testdoctype1");
+ Document::SP doc3 = testDocMan.createDocument("content", "id:test:testdoctype2:n=1:3", "testdoctype2");
+ Document::SP doc4 = testDocMan.createDocument("content", "id:test:testdoctype2:n=2:4", "testdoctype2");
+ spi->createBucket(bucket01, context);
+ spi->createBucket(bucket11, context);
+ spi->createBucket(bucket12, context);
+ spi->put(bucket01, Timestamp(3), doc1, context);
+ spi->put(bucket01, Timestamp(4), doc2, context);
+ spi->put(bucket11, Timestamp(5), doc3, context);
+ spi->put(bucket12, Timestamp(6), doc4, context);
+ spi->flush(bucket01, context);
+ spi->flush(bucket11, context);
+ spi->flush(bucket12, context);
+ // Check bucket lists
+ assertBucketList(*spi, bucketSpace0, partId, { bucketId1 });
+ assertBucketList(*spi, bucketSpace1, partId, { bucketId1, bucketId2 });
+ assertBucketList(*spi, bucketSpace2, partId, { });
+ // Check bucket info
+ assertBucketInfo(*spi, bucket01, 2);
+ assertBucketInfo(*spi, bucket11, 1);
+ assertBucketInfo(*spi, bucket12, 1);
+}
+
void ConformanceTest::detectAndTestOptionalBehavior() {
// Report if implementation supports setting bucket size info.
diff --git a/persistence/src/vespa/persistence/conformancetest/conformancetest.h b/persistence/src/vespa/persistence/conformancetest/conformancetest.h
index 46aee7a33b8..66b1dec113a 100644
--- a/persistence/src/vespa/persistence/conformancetest/conformancetest.h
+++ b/persistence/src/vespa/persistence/conformancetest/conformancetest.h
@@ -58,6 +58,7 @@
CPPUNIT_TEST(testBucketActivation); \
CPPUNIT_TEST(testBucketActivationSplitAndJoin); \
CPPUNIT_TEST(testRemoveEntry); \
+ CPPUNIT_TEST(testBucketSpaces); \
CPPUNIT_TEST(detectAndTestOptionalBehavior);
namespace document
@@ -100,6 +101,8 @@ struct ConformanceTest : public CppUnit::TestFixture {
{
return false;
}
+ // If bucket spaces are supported then testdoctype2 is in bucket space 1
+ virtual bool supportsBucketSpaces() const { return false; }
};
PersistenceFactory::UP _factory;
@@ -261,6 +264,9 @@ public:
void testRemoveEntry();
+ /** Test multiple bucket spaces */
+ void testBucketSpaces();
+
/**
* Reports what optional behavior is supported by implementation and not.
* Tests functionality if supported.
diff --git a/persistence/src/vespa/persistence/dummyimpl/dummypersistence.cpp b/persistence/src/vespa/persistence/dummyimpl/dummypersistence.cpp
index 43e1e3e60a1..5c38679f52c 100644
--- a/persistence/src/vespa/persistence/dummyimpl/dummypersistence.cpp
+++ b/persistence/src/vespa/persistence/dummyimpl/dummypersistence.cpp
@@ -341,7 +341,7 @@ DummyPersistence::getPartitionStates() const
BucketIdListResult
-DummyPersistence::listBuckets(PartitionId id) const
+DummyPersistence::listBuckets(BucketSpace, PartitionId id) const
{
DUMMYPERSISTENCE_VERIFY_INITIALIZED;
LOG(debug, "listBuckets(%u)", uint16_t(id));
@@ -363,7 +363,7 @@ DummyPersistence::setModifiedBuckets(const BucketIdListResult::List& buckets)
}
BucketIdListResult
-DummyPersistence::getModifiedBuckets() const
+DummyPersistence::getModifiedBuckets(BucketSpace) const
{
vespalib::MonitorGuard lock(_monitor);
return BucketIdListResult(_modifiedBuckets);
diff --git a/persistence/src/vespa/persistence/dummyimpl/dummypersistence.h b/persistence/src/vespa/persistence/dummyimpl/dummypersistence.h
index 19076f053eb..50a4562ea3b 100644
--- a/persistence/src/vespa/persistence/dummyimpl/dummypersistence.h
+++ b/persistence/src/vespa/persistence/dummyimpl/dummypersistence.h
@@ -132,7 +132,7 @@ public:
~DummyPersistence();
PartitionStateListResult getPartitionStates() const override;
- BucketIdListResult listBuckets(PartitionId) const override;
+ BucketIdListResult listBuckets(BucketSpace bucketSpace, PartitionId) const override;
void setModifiedBuckets(const BucketIdListResult::List& result);
@@ -140,7 +140,7 @@ public:
* Returns the list set by setModifiedBuckets(), then clears
* the list.
*/
- BucketIdListResult getModifiedBuckets() const override;
+ BucketIdListResult getModifiedBuckets(BucketSpace bucketSpace) const override;
Result setClusterState(const ClusterState& newState) override;
Result setActiveState(const Bucket& bucket, BucketInfo::ActiveState newState) override;
diff --git a/persistence/src/vespa/persistence/proxy/.gitignore b/persistence/src/vespa/persistence/proxy/.gitignore
deleted file mode 100644
index 7e7c0fe7fae..00000000000
--- a/persistence/src/vespa/persistence/proxy/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/.depend
-/Makefile
diff --git a/persistence/src/vespa/persistence/proxy/buildid.cpp b/persistence/src/vespa/persistence/proxy/buildid.cpp
deleted file mode 100644
index 2ac018069b8..00000000000
--- a/persistence/src/vespa/persistence/proxy/buildid.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "buildid.h"
-#include <vespa/vespalib/component/vtag.h>
-
-const char *storage::spi::getBuildId() {
- return vespalib::VersionTagComponent;
-}
diff --git a/persistence/src/vespa/persistence/proxy/buildid.h b/persistence/src/vespa/persistence/proxy/buildid.h
deleted file mode 100644
index ab32b09c533..00000000000
--- a/persistence/src/vespa/persistence/proxy/buildid.h
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-namespace storage {
-namespace spi {
-
-const char *getBuildId();
-
-} // namespace spi
-} // namespace storage
-
diff --git a/persistence/src/vespa/persistence/proxy/providerproxy.cpp b/persistence/src/vespa/persistence/proxy/providerproxy.cpp
deleted file mode 100644
index 52e641db74f..00000000000
--- a/persistence/src/vespa/persistence/proxy/providerproxy.cpp
+++ /dev/null
@@ -1,493 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "providerproxy.h"
-#include "buildid.h"
-#include <vespa/document/repo/documenttyperepo.h>
-#include <vespa/document/update/documentupdate.h>
-#include <vespa/document/fieldvalue/document.h>
-#include <vespa/document/fieldset/fieldsetrepo.h>
-#include <vespa/document/serialization/vespadocumentdeserializer.h>
-#include <vespa/document/serialization/vespadocumentserializer.h>
-#include <vespa/vespalib/util/stringfmt.h>
-#include <vespa/vespalib/objects/nbostream.h>
-#include <vespa/fnet/frt/frt.h>
-#include <vespa/log/log.h>
-LOG_SETUP(".providerproxy");
-
-using document::BucketId;
-using document::ByteBuffer;
-using document::DocumentTypeRepo;
-using document::VespaDocumentDeserializer;
-using document::VespaDocumentSerializer;
-using vespalib::nbostream;
-
-namespace storage {
-namespace spi {
-namespace {
-void addBucket(FRT_Values &values, const Bucket &bucket) {
- values.AddInt64(bucket.getBucketId().getId());
- values.AddInt64(bucket.getPartition());
-}
-
-void addDocument(FRT_Values &values, const Document &doc) {
- nbostream stream;
- VespaDocumentSerializer serializer(stream);
- serializer.write(doc, document::COMPLETE);
- values.AddData(stream.c_str(), stream.size());
-}
-
-void addString(FRT_Values &values, const string &s) {
- values.AddString(s.data(), s.size());
-}
-
-void addSelection(FRT_Values &values, const Selection &selection) {
- addString(values, selection.getDocumentSelection().getDocumentSelection());
- values.AddInt64(selection.getFromTimestamp());
- values.AddInt64(selection.getToTimestamp());
- std::copy(selection.getTimestampSubset().begin(),
- selection.getTimestampSubset().end(),
- values.AddInt64Array(selection.getTimestampSubset().size()));
-}
-
-void addDocumentUpdate(FRT_Values &values, const DocumentUpdate &update) {
- nbostream stream;
- update.serializeHEAD(stream);
- values.AddData(stream.c_str(), stream.size());
-}
-
-Document::UP readDocument(nbostream &stream, const DocumentTypeRepo &repo) {
- const uint16_t version = 8;
- VespaDocumentDeserializer deserializer(repo, stream, version);
- Document::UP doc(new Document);
- deserializer.read(*doc);
- return doc;
-}
-
-string getString(const FRT_StringValue &str) {
- return string(str._str, str._len);
-}
-
-string getString(const FRT_Value &value) {
- return getString(value._string);
-}
-
-template <typename ResultType>
-ResultType readError(const FRT_Values &values) {
- uint8_t error_code = values[0]._intval8;
- string error_msg = getString(values[1]);
- return ResultType(Result::ErrorType(error_code), error_msg);
-}
-
-bool invokeRpc(FRT_Target *target, FRT_RPCRequest &req, const char *res_spec) {
- target->InvokeSync(&req, 0.0); // no timeout
- req.CheckReturnTypes(res_spec);
- return req.GetErrorCode() == FRTE_NO_ERROR;
-}
-
-struct RequestScopedPtr : vespalib::noncopyable {
- FRT_RPCRequest *req;
- RequestScopedPtr(FRT_RPCRequest *r) : req(r) { assert(req); }
- ~RequestScopedPtr() { req->SubRef(); }
- FRT_RPCRequest *operator->() { return req; }
- FRT_RPCRequest &operator*() { return *req; }
-};
-} // namespace
-
-template <typename ResultType>
-ResultType ProviderProxy::invokeRpc_Return(FRT_RPCRequest &req,
- const char *res_spec) const
-{
- if (!invokeRpc(_target, req, res_spec)) {
-
-
- return ResultType(Result::FATAL_ERROR,
- vespalib::make_string("Error %s when running RPC request %s",
- req.GetErrorMessage(),
- req.GetMethodName()));
- }
- return readResult<ResultType>(*req.GetReturn());
-}
-
-template <typename ResultType>
-ResultType ProviderProxy::readResult(const FRT_Values &values) const {
- if (values[0]._intval8 != Result::NONE) {
- return readError<ResultType>(values);
- }
- return readNoError<ResultType>(values);
-}
-
-template <>
-Result ProviderProxy::readNoError(const FRT_Values &) const {
- return Result();
-}
-
-template <>
-PartitionStateListResult
-ProviderProxy::readNoError(const FRT_Values &values) const {
- FRT_LPT(uint32_t) state_array = values[2]._int32_array;
- FRT_LPT(FRT_StringValue) reason_array = values[3]._string_array;
- PartitionStateList states(state_array._len);
- for (size_t i = 0; i < state_array._len; ++i) {
- PartitionState::State state =
- static_cast<PartitionState::State>(state_array._pt[i]);
- string reason = getString(reason_array._pt[i]);
- states[i] = PartitionState(state, reason);
- }
- return PartitionStateListResult(states);
-}
-
-template <>
-BucketIdListResult ProviderProxy::readNoError(const FRT_Values &values) const {
- BucketIdListResult::List list;
- for (uint32_t i = 0; i < values[2]._int64_array._len; ++i) {
- list.push_back(BucketId(values[2]._int64_array._pt[i]));
- }
- return BucketIdListResult(list);
-}
-
-template <>
-BucketInfoResult ProviderProxy::readNoError(const FRT_Values &values) const {
- BucketInfo info(BucketChecksum(values[2]._intval32),
- values[3]._intval32,
- values[4]._intval32,
- values[5]._intval32,
- values[6]._intval32,
- static_cast<BucketInfo::ReadyState>(
- values[7]._intval8),
- static_cast<BucketInfo::ActiveState>(
- values[8]._intval8));
- return BucketInfoResult(info);
-}
-
-template <>
-RemoveResult ProviderProxy::readNoError(const FRT_Values &values) const {
- return RemoveResult(values[2]._intval8);
-}
-
-template <>
-UpdateResult ProviderProxy::readNoError(const FRT_Values &values) const {
- return UpdateResult(Timestamp(values[2]._intval64));
-}
-
-template <>
-GetResult ProviderProxy::readNoError(const FRT_Values &values) const {
- nbostream stream(values[3]._data._buf, values[3]._data._len);
- if (stream.empty()) {
- return GetResult();
- }
- return GetResult(readDocument(stream, *_repo),
- Timestamp(values[2]._intval64));
-}
-
-template <>
-CreateIteratorResult ProviderProxy::readNoError(const FRT_Values &values) const
-{
- return CreateIteratorResult(IteratorId(values[2]._intval64));
-}
-
-template <>
-IterateResult ProviderProxy::readNoError(const FRT_Values &values) const {
- IterateResult::List result;
- assert(values[2]._int64_array._len == values[3]._int32_array._len &&
- values[2]._int64_array._len == values[4]._string_array._len &&
- values[2]._int64_array._len == values[5]._data_array._len);
- for (uint32_t i = 0; i < values[2]._int64_array._len; ++i) {
- Timestamp timestamp(values[2]._int64_array._pt[i]);
- uint32_t meta_flags = values[3]._int32_array._pt[i];
- string doc_id(getString(values[4]._string_array._pt[i]));
- nbostream stream(values[5]._data_array._pt[i]._buf,
- values[5]._data_array._pt[i]._len);
- DocEntry::UP entry;
- if (!stream.empty()) {
- Document::UP doc = readDocument(stream, *_repo);
- entry.reset(new DocEntry(timestamp, meta_flags, std::move(doc)));
- } else if (!doc_id.empty()) {
- entry.reset(
- new DocEntry(timestamp, meta_flags, DocumentId(doc_id)));
- } else {
- entry.reset(new DocEntry(timestamp, meta_flags));
- }
- result.push_back(std::move(entry));
- }
-
- return IterateResult(std::move(result), values[6]._intval8);
-}
-
-namespace {
-bool shouldFailFast(uint32_t error_code) {
- return error_code != FRTE_RPC_TIMEOUT
- && error_code != FRTE_RPC_CONNECTION
- && error_code != FRTE_RPC_OVERLOAD
- && error_code != FRTE_NO_ERROR;
-}
-} // namespace
-
-ProviderProxy::ProviderProxy(const vespalib::string &connect_spec,
- const DocumentTypeRepo &repo)
- : _supervisor(new FRT_Supervisor()),
- _target(0),
- _repo(&repo)
-{
- _supervisor->Start();
- bool connected = false;
- _target = _supervisor->GetTarget(connect_spec.c_str());
- for (size_t i = 0; !connected && (i < (100 + 300)); ++i) {
- FRT_RPCRequest *req = new FRT_RPCRequest();
- req->SetMethodName("vespa.persistence.connect");
- const string build_id = getBuildId();
- req->GetParams()->AddString(build_id.data(), build_id.size());
- _target->InvokeSync(req, 5.0);
- connected = req->CheckReturnTypes("");
- uint32_t error_code = req->GetErrorCode();
- req->SubRef();
- if (!connected) {
- if (shouldFailFast(error_code)) {
- break;
- }
- _target->SubRef();
- if (i < 100) {
- FastOS_Thread::Sleep(100); // retry each 100ms for 10s
- } else {
- FastOS_Thread::Sleep(1000); // retry each 1s for 5m
- }
- _target = _supervisor->GetTarget(connect_spec.c_str());
- }
- }
- if (!connected) {
- LOG(error, "could not connect to peer");
- }
-}
-
-ProviderProxy::~ProviderProxy() {
- _target->SubRef();
- _supervisor->ShutDown(true);
-}
-
-Result ProviderProxy::initialize() {
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.initialize");
- return invokeRpc_Return<Result>(*req, "bs");
-}
-
-PartitionStateListResult ProviderProxy::getPartitionStates() const {
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.getPartitionStates");
- return invokeRpc_Return<PartitionStateListResult>(*req, "bsIS");
-}
-
-BucketIdListResult ProviderProxy::listBuckets(PartitionId partition) const {
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.listBuckets");
- req->GetParams()->AddInt64(partition);
-
- return invokeRpc_Return<BucketIdListResult>(*req, "bsL");
-}
-
-Result ProviderProxy::setClusterState(const ClusterState& clusterState) {
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.setClusterState");
-
- vespalib::nbostream o;
- clusterState.serialize(o);
- req->GetParams()->AddData(o.c_str(), o.size());
- return invokeRpc_Return<Result>(*req, "bs");
-}
-
-Result ProviderProxy::setActiveState(const Bucket &bucket,
- BucketInfo::ActiveState newState) {
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.setActiveState");
- addBucket(*req->GetParams(), bucket);
- req->GetParams()->AddInt8(newState);
- return invokeRpc_Return<Result>(*req, "bs");
-}
-
-BucketInfoResult ProviderProxy::getBucketInfo(const Bucket &bucket) const {
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.getBucketInfo");
- addBucket(*req->GetParams(), bucket);
- return invokeRpc_Return<BucketInfoResult>(*req, "bsiiiiibb");
-}
-
-Result ProviderProxy::put(const Bucket &bucket, Timestamp timestamp,
- const Document::SP& doc, Context&)
-{
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.put");
- addBucket(*req->GetParams(), bucket);
- req->GetParams()->AddInt64(timestamp);
- addDocument(*req->GetParams(), *doc);
- return invokeRpc_Return<Result>(*req, "bs");
-}
-
-RemoveResult ProviderProxy::remove(const Bucket &bucket,
- Timestamp timestamp,
- const DocumentId &id,
- Context&)
-{
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.removeById");
- addBucket(*req->GetParams(), bucket);
- req->GetParams()->AddInt64(timestamp);
- addString(*req->GetParams(), id.toString());
- return invokeRpc_Return<RemoveResult>(*req, "bsb");
-}
-
-RemoveResult ProviderProxy::removeIfFound(const Bucket &bucket,
- Timestamp timestamp,
- const DocumentId &id,
- Context&)
-{
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.removeIfFound");
- addBucket(*req->GetParams(), bucket);
- req->GetParams()->AddInt64(timestamp);
- addString(*req->GetParams(), id.toString());
- return invokeRpc_Return<RemoveResult>(*req, "bsb");
-}
-
-UpdateResult ProviderProxy::update(const Bucket &bucket, Timestamp timestamp,
- const DocumentUpdate::SP& doc_update,
- Context&)
-{
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.update");
- addBucket(*req->GetParams(), bucket);
- req->GetParams()->AddInt64(timestamp);
- addDocumentUpdate(*req->GetParams(), *doc_update);
- return invokeRpc_Return<UpdateResult>(*req, "bsl");
-}
-
-Result ProviderProxy::flush(const Bucket &bucket, Context&) {
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.flush");
- addBucket(*req->GetParams(), bucket);
- return invokeRpc_Return<Result>(*req, "bs");
-}
-
-GetResult ProviderProxy::get(const Bucket &bucket,
- const document::FieldSet& fieldSet,
- const DocumentId &doc_id,
- Context&) const
-{
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.get");
- document::FieldSetRepo repo;
- addBucket(*req->GetParams(), bucket);
- addString(*req->GetParams(), repo.serialize(fieldSet));
- addString(*req->GetParams(), doc_id.toString());
- return invokeRpc_Return<GetResult>(*req, "bslx");
-}
-
-CreateIteratorResult ProviderProxy::createIterator(const Bucket &bucket,
- const document::FieldSet& fieldSet,
- const Selection &select,
- IncludedVersions versions,
- Context&)
-{
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.createIterator");
- addBucket(*req->GetParams(), bucket);
-
- document::FieldSetRepo repo;
- addString(*req->GetParams(), repo.serialize(fieldSet));
- addSelection(*req->GetParams(), select);
- req->GetParams()->AddInt8(versions);
- return invokeRpc_Return<CreateIteratorResult>(*req, "bsl");
-}
-
-IterateResult ProviderProxy::iterate(IteratorId id,
- uint64_t max_byte_size,
- Context&) const
-{
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.iterate");
- req->GetParams()->AddInt64(id);
- req->GetParams()->AddInt64(max_byte_size);
- return invokeRpc_Return<IterateResult>(*req, "bsLISXb");
-}
-
-Result ProviderProxy::destroyIterator(IteratorId id, Context&) {
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.destroyIterator");
- req->GetParams()->AddInt64(id);
- return invokeRpc_Return<Result>(*req, "bs");
-}
-
-Result ProviderProxy::createBucket(const Bucket &bucket, Context&) {
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.createBucket");
- addBucket(*req->GetParams(), bucket);
- return invokeRpc_Return<Result>(*req, "bs");
-}
-
-Result ProviderProxy::deleteBucket(const Bucket &bucket, Context&) {
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.deleteBucket");
- addBucket(*req->GetParams(), bucket);
- return invokeRpc_Return<Result>(*req, "bs");
-}
-
-BucketIdListResult ProviderProxy::getModifiedBuckets() const {
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.getModifiedBuckets");
- return invokeRpc_Return<BucketIdListResult>(*req, "bsL");
-}
-
-Result ProviderProxy::split(const Bucket &source,
- const Bucket &target1,
- const Bucket &target2,
- Context&)
-{
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.split");
- addBucket(*req->GetParams(), source);
- addBucket(*req->GetParams(), target1);
- addBucket(*req->GetParams(), target2);
- return invokeRpc_Return<Result>(*req, "bs");
-}
-
-Result ProviderProxy::join(const Bucket &source1,
- const Bucket &source2,
- const Bucket &target,
- Context&)
-{
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.join");
- addBucket(*req->GetParams(), source1);
- addBucket(*req->GetParams(), source2);
- addBucket(*req->GetParams(), target);
- return invokeRpc_Return<Result>(*req, "bs");
-}
-
-Result ProviderProxy::move(const Bucket &source,
- PartitionId target,
- Context&)
-{
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.move");
- addBucket(*req->GetParams(), source);
- req->GetParams()->AddInt64(target);
- return invokeRpc_Return<Result>(*req, "bs");
-}
-
-Result ProviderProxy::maintain(const Bucket &bucket, MaintenanceLevel level) {
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.maintain");
- addBucket(*req->GetParams(), bucket);
- req->GetParams()->AddInt8(level);
- return invokeRpc_Return<Result>(*req, "bs");
-}
-
-Result ProviderProxy::removeEntry(const Bucket &bucket, Timestamp timestamp,
- Context&)
-{
- RequestScopedPtr req(_supervisor->AllocRPCRequest());
- req->SetMethodName("vespa.persistence.removeEntry");
- addBucket(*req->GetParams(), bucket);
- req->GetParams()->AddInt64(timestamp);
- return invokeRpc_Return<Result>(*req, "bs");
-}
-
-} // namespace spi
-} // namespace storage
diff --git a/persistence/src/vespa/persistence/proxy/providerproxy.h b/persistence/src/vespa/persistence/proxy/providerproxy.h
deleted file mode 100644
index 7fa59fe07d0..00000000000
--- a/persistence/src/vespa/persistence/proxy/providerproxy.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/persistence/spi/persistenceprovider.h>
-
-class FRT_Target;
-class FRT_Supervisor;
-class FRT_RPCRequest;
-class FRT_Values;
-
-namespace document {
- class DocumentTypeRepo;
-}
-
-namespace storage {
-namespace spi {
-
-class ProviderProxy : public PersistenceProvider {
- std::unique_ptr<FRT_Supervisor> _supervisor;
- FRT_Target *_target;
- const document::DocumentTypeRepo *_repo;
-
- template <typename ResultType>
- ResultType invokeRpc_Return(FRT_RPCRequest &req, const char *res_spec) const;
- template <typename ResultType>
- ResultType readResult(const FRT_Values &values) const;
- template <typename ResultType>
- ResultType readNoError(const FRT_Values &values) const;
-
-public:
- typedef std::unique_ptr<ProviderProxy> UP;
-
- ProviderProxy(const vespalib::string &connect_spec, const document::DocumentTypeRepo &repo);
- ~ProviderProxy();
-
- void setRepo(const document::DocumentTypeRepo &repo) {
- _repo = &repo;
- }
-
- Result initialize() override;
- PartitionStateListResult getPartitionStates() const override;
- BucketIdListResult listBuckets(PartitionId) const override;
- Result setClusterState(const ClusterState&) override;
- Result setActiveState(const Bucket&, BucketInfo::ActiveState) override;
- BucketInfoResult getBucketInfo(const Bucket &) const override;
-
- Result put(const Bucket &, Timestamp, const DocumentSP&, Context&) override;
- RemoveResult remove(const Bucket &, Timestamp, const DocumentId &, Context&) override;
- RemoveResult removeIfFound(const Bucket &, Timestamp, const DocumentId &, Context&) override;
- UpdateResult update(const Bucket &, Timestamp, const DocumentUpdateSP&, Context&) override;
-
- Result flush(const Bucket &, Context&) override;
-
- GetResult get(const Bucket &, const document::FieldSet&, const DocumentId &, Context&) const override;
-
- CreateIteratorResult createIterator(const Bucket &, const document::FieldSet&, const Selection&,
- IncludedVersions versions, Context&) override;
-
- IterateResult iterate(IteratorId, uint64_t max_byte_size, Context&) const override;
- Result destroyIterator(IteratorId, Context&) override;
-
- Result createBucket(const Bucket &, Context&) override;
- Result deleteBucket(const Bucket &, Context&) override;
- BucketIdListResult getModifiedBuckets() const override;
- Result split(const Bucket &source, const Bucket &target1, const Bucket &target2, Context&) override;
- Result join(const Bucket &source1, const Bucket &source2, const Bucket &target, Context&) override;
- Result move(const Bucket &source, PartitionId partition, Context&) override;
-
- Result maintain(const Bucket &, MaintenanceLevel) override;
- Result removeEntry(const Bucket &, Timestamp, Context&) override;
-};
-
-} // namespace spi
-} // namespace storage
-
diff --git a/persistence/src/vespa/persistence/proxy/providerstub.cpp b/persistence/src/vespa/persistence/proxy/providerstub.cpp
deleted file mode 100644
index b4137f0eb0c..00000000000
--- a/persistence/src/vespa/persistence/proxy/providerstub.cpp
+++ /dev/null
@@ -1,928 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "buildid.h"
-#include "providerstub.h"
-#include <vespa/document/serialization/vespadocumentdeserializer.h>
-#include <vespa/document/serialization/vespadocumentserializer.h>
-#include <vespa/document/util/bytebuffer.h>
-#include <vespa/document/base/documentid.h>
-#include <vespa/document/update/documentupdate.h>
-#include <vespa/document/fieldvalue/document.h>
-#include <vespa/persistence/spi/persistenceprovider.h>
-#include <vespa/vespalib/objects/nbostream.h>
-#include <vespa/vespalib/util/closuretask.h>
-#include <vespa/document/fieldset/fieldsetrepo.h>
-#include <vespa/fnet/frt/values.h>
-#include <vespa/fnet/frt/supervisor.h>
-#include <vespa/fnet/frt/rpcrequest.h>
-
-
-using document::BucketId;
-using document::ByteBuffer;
-using document::DocumentTypeRepo;
-using document::VespaDocumentDeserializer;
-using document::VespaDocumentSerializer;
-using std::vector;
-using vespalib::Closure;
-using vespalib::makeClosure;
-using vespalib::makeTask;
-using vespalib::nbostream;
-
-namespace storage::spi {
-namespace {
-
-LoadType defaultLoadType(0, "default");
-
-// Serialize return values
-void addResult(FRT_Values &ret, const Result &result) {
- ret.AddInt8(result.getErrorCode());
- ret.AddString(result.getErrorMessage().data(),
- result.getErrorMessage().size());
-}
-
-void addPartitionStateListResult(FRT_Values &ret,
- const PartitionStateListResult &result) {
- addResult(ret, result);
- PartitionStateList states = result.getList();
- uint32_t *stateValues = ret.AddInt32Array(states.size());
- FRT_StringValue *reasons = ret.AddStringArray(states.size());
- for (size_t i = 0; i < states.size(); ++i) {
- stateValues[i] = states[i].getState();
- string reason(states[i].getReason());
- ret.SetString(&reasons[i], reason.data(), reason.size());
- }
-}
-
-void addBucketInfoResult(FRT_Values &ret, const BucketInfoResult &result) {
- addResult(ret, result);
- const BucketInfo& info = result.getBucketInfo();
- ret.AddInt32(info.getChecksum());
- ret.AddInt32(info.getDocumentCount());
- ret.AddInt32(info.getDocumentSize());
- ret.AddInt32(info.getEntryCount());
- ret.AddInt32(info.getUsedSize());
- ret.AddInt8(static_cast<uint8_t>(info.isReady()));
- ret.AddInt8(static_cast<uint8_t>(info.isActive()));
-}
-
-void addRemoveResult(FRT_Values &ret, const RemoveResult &result) {
- addResult(ret, result);
- ret.AddInt8(result.wasFound());
-}
-
-void addUpdateResult(FRT_Values &ret, const UpdateResult &result) {
- addResult(ret, result);
- ret.AddInt64(result.getExistingTimestamp());
-}
-
-void addGetResult(FRT_Values &ret, const GetResult &result) {
- addResult(ret, result);
- ret.AddInt64(result.getTimestamp());
- if (result.hasDocument()) {
- nbostream stream;
- VespaDocumentSerializer serializer(stream);
- serializer.write(result.getDocument(), document::COMPLETE);
- ret.AddData(stream.c_str(), stream.size());
- } else {
- ret.AddData(0, 0);
- }
-}
-
-void addCreateIteratorResult(FRT_Values &ret,
- const CreateIteratorResult &result)
-{
- addResult(ret, result);
- ret.AddInt64(result.getIteratorId());
-}
-
-void addIterateResult(FRT_Values &ret, const IterateResult &result)
-{
- addResult(ret, result);
-
- const vector<DocEntry::UP> &entries = result.getEntries();
- uint64_t *timestamps = ret.AddInt64Array(entries.size());
- uint32_t *flags = ret.AddInt32Array(entries.size());
- assert(sizeof(DocEntry::SizeType) == sizeof(uint32_t));
- FRT_StringValue *doc_id_array = ret.AddStringArray(entries.size());
- FRT_DataValue *doc_array = ret.AddDataArray(entries.size());
-
- for (size_t i = 0; i < entries.size(); ++i) {
- string doc_id_str;
- nbostream stream;
- const DocumentId *doc_id = entries[i]->getDocumentId();
- if (doc_id) {
- doc_id_str = doc_id->toString();
- }
- const Document *doc = entries[i]->getDocument();
- if (doc) {
- VespaDocumentSerializer serializer(stream);
- serializer.write(*doc, document::COMPLETE);
- }
-
- timestamps[i] = entries[i]->getTimestamp();
- flags[i] = entries[i]->getFlags();
- ret.SetString(&doc_id_array[i], doc_id_str.data(), doc_id_str.size());
- ret.SetData(&doc_array[i], stream.c_str(), stream.size());
- }
-
- ret.AddInt8(result.isCompleted());
-}
-
-void addBucketIdListResult(FRT_Values &ret, const BucketIdListResult& result) {
- addResult(ret, result);
-
- size_t modified_bucket_size = result.getList().size();
- uint64_t *bucket_id = ret.AddInt64Array(modified_bucket_size);
- for (size_t i = 0; i < modified_bucket_size; ++i) {
- bucket_id[i] = result.getList()[i].getRawId();
- }
-}
-
-string getString(const FRT_StringValue &str) {
- return string(str._str, str._len);
-}
-
-string getString(const FRT_Value &value) {
- return getString(value._string);
-}
-
-Bucket getBucket(const FRT_Value &bucket_val, const FRT_Value &partition_val) {
- BucketId bucket_id(bucket_val._intval64);
- PartitionId partition_id(partition_val._intval64);
- return Bucket(bucket_id, partition_id);
-}
-
-Document::UP getDocument(const FRT_Value &val, const DocumentTypeRepo &repo) {
- nbostream stream(val._data._buf, val._data._len);
- const uint16_t version = 8;
- VespaDocumentDeserializer deserializer(repo, stream, version);
- Document::UP doc(new Document);
- deserializer.read(*doc);
- return doc;
-}
-
-Selection getSelection(const FRT_Values &params, int i) {
- DocumentSelection doc_sel(getString(params[i]));
- Timestamp timestamp_from(params[i + 1]._intval64);
- Timestamp timestamp_to(params[i + 2]._intval64);
- FRT_Array<uint64_t> array = params[i + 3]._int64_array;
- TimestampList timestamp_subset(array._pt, array._pt + array._len);
-
- Selection selection(doc_sel);
- selection.setFromTimestamp(timestamp_from);
- selection.setToTimestamp(timestamp_to);
- selection.setTimestampSubset(timestamp_subset);
- return selection;
-}
-
-void addConnect(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.connect",
- "s", "", true, func, obj);
- rb.MethodDesc("Set up connection to proxy.");
- rb.ParamDesc("build_id", "Id to make sure client and server come from the "
- "same build.");
-}
-
-void addGetPartitionStates(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.getPartitionStates",
- "", "bsIS", true, func, obj);
- rb.MethodDesc("???");
- rb.ReturnDesc("ret", "An array of serialized PartitionStates.");
-}
-
-void doGetPartitionStates(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &ret = *req->GetReturn();
- addPartitionStateListResult(ret, provider->getPartitionStates());
- req->Return();
-}
-
-void addInitialize(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.initialize",
- "", "bs", true, func, obj);
- rb.MethodDesc("???");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
-}
-
-void doInitialize(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &ret = *req->GetReturn();
- addResult(ret, provider->initialize());
- req->Return();
-}
-
-void addListBuckets(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.listBuckets",
- "l", "bsL", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("partition_id", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
- rb.ReturnDesc("bucket_ids", "An array of BucketIds.");
-}
-
-void doListBuckets(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- PartitionId partition_id(params[0]._intval64);
-
- FRT_Values &ret = *req->GetReturn();
- addBucketIdListResult(ret, provider->listBuckets(partition_id));
- req->Return();
-}
-
-void addSetClusterState(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.setClusterState",
- "x", "bs", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("cluster_state", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
-}
-
-void doSetClusterState(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- vespalib::nbostream stream(params[0]._data._buf, params[0]._data._len);
-
- ClusterState state(stream);
- FRT_Values &ret = *req->GetReturn();
- addResult(ret, provider->setClusterState(state));
- req->Return();
-}
-
-void addSetActiveState(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.setActiveState",
- "llb", "bs", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("bucket_id", "");
- rb.ParamDesc("partition_id", "");
- rb.ParamDesc("bucket_state", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
-}
-
-void doSetActiveState(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- Bucket bucket = getBucket(params[0], params[1]);
- BucketInfo::ActiveState state = BucketInfo::ActiveState(params[2]._intval8);
-
- FRT_Values &ret = *req->GetReturn();
- addResult(ret, provider->setActiveState(bucket, state));
- req->Return();
-}
-
-void addGetBucketInfo(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.getBucketInfo",
- "ll", "bsiiiiibb", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("bucket_id", "");
- rb.ParamDesc("partition_id", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
- rb.ReturnDesc("checksum", "");
- rb.ReturnDesc("document_count", "");
- rb.ReturnDesc("document_size", "");
- rb.ReturnDesc("entry_count", "");
- rb.ReturnDesc("used_size", "");
- rb.ReturnDesc("ready", "");
- rb.ReturnDesc("active", "");
-}
-
-void doGetBucketInfo(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- Bucket bucket = getBucket(params[0], params[1]);
-
- FRT_Values &ret = *req->GetReturn();
- addBucketInfoResult(ret, provider->getBucketInfo(bucket));
- req->Return();
-}
-
-void addPut(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.put",
- "lllx", "bs", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("bucket_id", "");
- rb.ParamDesc("partition_id", "");
- rb.ParamDesc("timestamp", "");
- rb.ParamDesc("document", "A serialized document");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
-}
-
-void doPut(FRT_RPCRequest *req, PersistenceProvider *provider,
- const DocumentTypeRepo *repo)
-{
- FRT_Values &params = *req->GetParams();
- Bucket bucket = getBucket(params[0], params[1]);
- Timestamp timestamp(params[2]._intval64);
- Document::SP doc(getDocument(params[3], *repo).release());
-
- FRT_Values &ret = *req->GetReturn();
- Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
- addResult(ret, provider->put(bucket, timestamp, doc, context));
- req->Return();
-}
-
-void addRemoveById(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.removeById",
- "llls", "bsb", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("bucket_id", "");
- rb.ParamDesc("partition_id", "");
- rb.ParamDesc("timestamp", "");
- rb.ParamDesc("document_id", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
- rb.ReturnDesc("existed", "");
-}
-
-void doRemoveById(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- Bucket bucket = getBucket(params[0], params[1]);
- Timestamp timestamp(params[2]._intval64);
- DocumentId id(getString(params[3]));
-
- FRT_Values &ret = *req->GetReturn();
- Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
- addRemoveResult(ret, provider->remove(bucket, timestamp, id, context));
- req->Return();
-}
-
-void addRemoveIfFound(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.removeIfFound",
- "llls", "bsb", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("bucket_id", "");
- rb.ParamDesc("partition_id", "");
- rb.ParamDesc("timestamp", "");
- rb.ParamDesc("document_id", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
- rb.ReturnDesc("existed", "");
-}
-
-void doRemoveIfFound(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- Bucket bucket = getBucket(params[0], params[1]);
- Timestamp timestamp(params[2]._intval64);
- DocumentId id(getString(params[3]));
-
- FRT_Values &ret = *req->GetReturn();
- Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
- addRemoveResult(ret,
- provider->removeIfFound(bucket, timestamp, id, context));
- req->Return();
-}
-
-void addUpdate(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.update",
- "lllx", "bsl", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("bucket_id", "");
- rb.ParamDesc("partition_id", "");
- rb.ParamDesc("timestamp", "");
- rb.ParamDesc("document_update", "A serialized DocumentUpdate");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
- rb.ReturnDesc("existing timestamp", "");
-}
-
-void doUpdate(FRT_RPCRequest *req, PersistenceProvider *provider,
- const DocumentTypeRepo *repo)
-{
- FRT_Values &params = *req->GetParams();
- Bucket bucket = getBucket(params[0], params[1]);
- Timestamp timestamp(params[2]._intval64);
- ByteBuffer buffer(params[3]._data._buf, params[3]._data._len);
- auto update = std::make_shared<DocumentUpdate>(*repo, buffer,
- DocumentUpdate::
- SerializeVersion::
- SERIALIZE_HEAD);
-
- FRT_Values &ret = *req->GetReturn();
- Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
- addUpdateResult(ret, provider->update(bucket, timestamp, update, context));
- req->Return();
-}
-
-void addFlush(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.flush", "ll", "bs", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("bucket_id", "");
- rb.ParamDesc("partition_id", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
-}
-
-void doFlush(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- Bucket bucket = getBucket(params[0], params[1]);
-
- FRT_Values &ret = *req->GetReturn();
- Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
- addResult(ret, provider->flush(bucket, context));
- req->Return();
-}
-
-void addGet(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.get",
- "llss", "bslx", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("bucket_id", "");
- rb.ParamDesc("partition_id", "");
- rb.ParamDesc("field_set", "Array of fields in the set");
- rb.ParamDesc("document_id", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
- rb.ReturnDesc("timestamp", "");
- rb.ReturnDesc("document", "A serialized document");
-}
-
-void doGet(FRT_RPCRequest *req,
- PersistenceProvider *provider,
- const DocumentTypeRepo* repo)
-{
- FRT_Values &params = *req->GetParams();
- Bucket bucket = getBucket(params[0], params[1]);
-
- document::FieldSetRepo fsr;
- document::FieldSet::UP fieldSet = fsr.parse(*repo, getString(params[2]));
- DocumentId id(getString(params[3]));
-
- FRT_Values &ret = *req->GetReturn();
- Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
- addGetResult(ret, provider->get(bucket, *fieldSet, id, context));
- req->Return();
-}
-
-void addCreateIterator(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.createIterator",
- "llssllLb", "bsl", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("bucket_id", "");
- rb.ParamDesc("partition_id", "");
- rb.ParamDesc("field_set", "Field set string (comma-separated list of strings)");
- rb.ParamDesc("document_selection_string", "");
- rb.ParamDesc("timestamp_from", "");
- rb.ParamDesc("timestamp_to", "");
- rb.ParamDesc("timestamp_subset", "");
- rb.ParamDesc("includedversions", "");
-
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
- rb.ReturnDesc("iterator_id", "");
-}
-
-void doCreateIterator(FRT_RPCRequest *req, PersistenceProvider *provider,
- const DocumentTypeRepo* repo)
-{
- FRT_Values &params = *req->GetParams();
- Bucket bucket = getBucket(params[0], params[1]);
-
- document::FieldSetRepo fsr;
- document::FieldSet::UP fieldSet = fsr.parse(*repo, getString(params[2]));
- Selection selection = getSelection(params, 3);
- IncludedVersions versions =
- static_cast<IncludedVersions>(params[7]._intval8);
-
- FRT_Values &ret = *req->GetReturn();
- Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
- addCreateIteratorResult(ret, provider->createIterator(
- bucket, *fieldSet, selection, versions, context));
- req->Return();
-}
-
-void addIterate(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.iterate",
- "ll", "bsLISXb", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("iterator_id", "");
- rb.ParamDesc("max_byte_size", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
- rb.ReturnDesc("doc_entry_timestamp", "Array of timestamps for DocEntries");
- rb.ReturnDesc("doc_entry_flags", "Array of flags for DocEntries");
- rb.ReturnDesc("doc_entry_doc_id", "Array of DocumentIds for DocEntries");
- rb.ReturnDesc("doc_entry_doc", "Array of Documents for DocEntries");
- rb.ReturnDesc("completed", "bool");
-}
-
-void doIterate(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- IteratorId id(params[0]._intval64);
- uint64_t max_byte_size = params[1]._intval64;
-
- FRT_Values &ret = *req->GetReturn();
- Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
- addIterateResult(ret, provider->iterate(id, max_byte_size, context));
- req->Return();
-}
-
-void addDestroyIterator(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.destroyIterator",
- "l", "bs", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("iterator_id", "");
-}
-
-void doDestroyIterator(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- IteratorId id(params[0]._intval64);
-
- FRT_Values &ret = *req->GetReturn();
- Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
- addResult(ret, provider->destroyIterator(id, context));
- req->Return();
-}
-
-void addCreateBucket(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.createBucket",
- "ll", "bs", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("bucket_id", "");
- rb.ParamDesc("partition_id", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
-}
-
-void doCreateBucket(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- Bucket bucket = getBucket(params[0], params[1]);
-
- FRT_Values &ret = *req->GetReturn();
- Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
- addResult(ret, provider->createBucket(bucket, context));
- req->Return();
-}
-
-void addDeleteBucket(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.deleteBucket",
- "ll", "bs", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("bucket_id", "");
- rb.ParamDesc("partition_id", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
-}
-
-void doDeleteBucket(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- Bucket bucket = getBucket(params[0], params[1]);
-
- FRT_Values &ret = *req->GetReturn();
- Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
- addResult(ret, provider->deleteBucket(bucket, context));
- req->Return();
-}
-
-void addGetModifiedBuckets(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.getModifiedBuckets",
- "", "bsL", true, func, obj);
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
- rb.ReturnDesc("modified_buckets_bucket_ids", "Array of bucket ids");
-}
-
-void doGetModifiedBuckets(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &ret = *req->GetReturn();
- addBucketIdListResult(ret, provider->getModifiedBuckets());
- req->Return();
-}
-
-void addSplit(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.split",
- "llllll", "bs", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("source_bucket_id", "");
- rb.ParamDesc("source_partition_id", "");
- rb.ParamDesc("target1_bucket_id", "");
- rb.ParamDesc("target1_partition_id", "");
- rb.ParamDesc("target2_bucket_id", "");
- rb.ParamDesc("target2_partition_id", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
-}
-
-void doSplit(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- Bucket source = getBucket(params[0], params[1]);
- Bucket target1 = getBucket(params[2], params[3]);
- Bucket target2 = getBucket(params[4], params[5]);
-
- FRT_Values &ret = *req->GetReturn();
- Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
- addResult(ret, provider->split(source, target1, target2, context));
- req->Return();
-}
-
-void addJoin(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.join",
- "llllll", "bs", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("source1_bucket_id", "");
- rb.ParamDesc("source1_partition_id", "");
- rb.ParamDesc("source2_bucket_id", "");
- rb.ParamDesc("source2_partition_id", "");
- rb.ParamDesc("target_bucket_id", "");
- rb.ParamDesc("target_partition_id", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
-}
-
-void doJoin(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- Bucket source1 = getBucket(params[0], params[1]);
- Bucket source2 = getBucket(params[2], params[3]);
- Bucket target = getBucket(params[4], params[5]);
-
- FRT_Values &ret = *req->GetReturn();
- Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
- addResult(ret, provider->join(source1, source2, target, context));
- req->Return();
-}
-
-void addMove(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.move",
- "lll", "bs", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("source_bucket_id", "");
- rb.ParamDesc("source_partition_id", "");
- rb.ParamDesc("target_partition_id", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
-}
-
-void doMove(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- Bucket source = getBucket(params[0], params[1]);
- PartitionId partition_id(params[2]._intval64);
-
- FRT_Values &ret = *req->GetReturn();
- Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
- addResult(ret, provider->move(source, partition_id, context));
- req->Return();
-}
-
-
-void addMaintain(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.maintain",
- "llb", "bs", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("bucket_id", "");
- rb.ParamDesc("partition_id", "");
- rb.ParamDesc("verification_level", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
-}
-
-void doMaintain(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- Bucket bucket = getBucket(params[0], params[1]);
- MaintenanceLevel level =
- static_cast<MaintenanceLevel>(params[2]._intval8);
-
- FRT_Values &ret = *req->GetReturn();
- addResult(ret, provider->maintain(bucket, level));
- req->Return();
-}
-
-void addRemoveEntry(
- FRT_ReflectionBuilder &rb, FRT_METHOD_PT func, FRT_Invokable *obj) {
- rb.DefineMethod("vespa.persistence.removeEntry",
- "lll", "bs", true, func, obj);
- rb.MethodDesc("???");
- rb.ParamDesc("bucket_id", "");
- rb.ParamDesc("partition_id", "");
- rb.ParamDesc("timestamp", "");
- rb.ReturnDesc("error_code", "");
- rb.ReturnDesc("error_message", "");
-}
-
-void doRemoveEntry(FRT_RPCRequest *req, PersistenceProvider *provider) {
- FRT_Values &params = *req->GetParams();
- Bucket bucket = getBucket(params[0], params[1]);
- Timestamp timestamp(params[2]._intval64);
-
- FRT_Values &ret = *req->GetReturn();
- Context context(defaultLoadType, Priority(0x80), Trace::TraceLevel(0));
- addResult(ret, provider->removeEntry(bucket, timestamp, context));
- req->Return();
-}
-
-const uint32_t magic_number = 0xf00ba2;
-
-bool checkConnection(FNET_Connection *connection) {
- return connection && connection->GetContext()._value.INT == magic_number;
-}
-} //namespace
-
-void ProviderStub::HOOK_fini(FRT_RPCRequest *req) {
- FNET_Connection *connection = req->GetConnection();
- if (checkConnection(connection)) {
- assert(_provider.get() != 0);
- _providerCleanupTask.ScheduleNow();
- }
-}
-
-void ProviderStub::RPC_connect(FRT_RPCRequest *req) {
- FRT_Values &params = *req->GetParams();
- FNET_Connection *connection = req->GetConnection();
- if (checkConnection(connection)) {
- return;
- }
- string build_id = getString(params[0]);
- if (build_id != getBuildId()) {
- req->SetError(FRTE_RPC_METHOD_FAILED,
- ("Wrong build id. Got '" + build_id +
- "', required '" + getBuildId() + "'").c_str());
- return;
- } else if (_provider.get()) {
- req->SetError(FRTE_RPC_METHOD_FAILED, "Server is already connected");
- return;
- }
- if (!connection) {
- req->SetError(FRTE_RPC_METHOD_FAILED);
- return;
- }
- connection->SetContext(FNET_Context(magic_number));
- _provider = _factory.create();
-}
-
-void ProviderStub::detachAndRun(FRT_RPCRequest *req, Closure::UP closure) {
- if (!checkConnection(req->GetConnection())) {
- req->SetError(FRTE_RPC_METHOD_FAILED);
- return;
- }
- assert(_provider.get() != 0);
- req->Detach();
- _executor.execute(makeTask(std::move(closure)));
-}
-
-void ProviderStub::RPC_getPartitionStates(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doGetPartitionStates, req, _provider.get()));
-}
-
-void ProviderStub::RPC_initialize(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doInitialize, req, _provider.get()));
-}
-
-void ProviderStub::RPC_listBuckets(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doListBuckets, req, _provider.get()));
-}
-
-void ProviderStub::RPC_setClusterState(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doSetClusterState, req, _provider.get()));
-}
-
-void ProviderStub::RPC_setActiveState(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doSetActiveState, req, _provider.get()));
-}
-
-void ProviderStub::RPC_getBucketInfo(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doGetBucketInfo, req, _provider.get()));
-}
-
-void ProviderStub::RPC_put(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doPut, req, _provider.get(), _repo));
-}
-
-void ProviderStub::RPC_removeById(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doRemoveById, req, _provider.get()));
-}
-
-void ProviderStub::RPC_removeIfFound(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doRemoveIfFound, req, _provider.get()));
-}
-
-void ProviderStub::RPC_update(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doUpdate, req, _provider.get(), _repo));
-}
-
-void ProviderStub::RPC_flush(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doFlush, req, _provider.get()));
-}
-
-void ProviderStub::RPC_get(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doGet, req, _provider.get(), _repo));
-}
-
-void ProviderStub::RPC_createIterator(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doCreateIterator, req, _provider.get(), _repo));
-}
-
-void ProviderStub::RPC_iterate(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doIterate, req, _provider.get()));
-}
-
-void ProviderStub::RPC_destroyIterator(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doDestroyIterator, req, _provider.get()));
-}
-
-void ProviderStub::RPC_createBucket(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doCreateBucket, req, _provider.get()));
-}
-
-void ProviderStub::RPC_deleteBucket(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doDeleteBucket, req, _provider.get()));
-}
-
-void ProviderStub::RPC_getModifiedBuckets(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doGetModifiedBuckets, req, _provider.get()));
-}
-
-void ProviderStub::RPC_split(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doSplit, req, _provider.get()));
-}
-
-void ProviderStub::RPC_join(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doJoin, req, _provider.get()));
-}
-
-void ProviderStub::RPC_move(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doMove, req, _provider.get()));
-}
-
-void ProviderStub::RPC_maintain(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doMaintain, req, _provider.get()));
-}
-
-void ProviderStub::RPC_removeEntry(FRT_RPCRequest *req) {
- detachAndRun(req, makeClosure(doRemoveEntry, req, _provider.get()));
-}
-
-void ProviderStub::SetupRpcCalls() {
- FRT_ReflectionBuilder rb(_supervisor.get());
- addConnect(rb, FRT_METHOD(ProviderStub::RPC_connect), this);
- addInitialize(rb, FRT_METHOD(ProviderStub::RPC_initialize), this);
- addGetPartitionStates(rb, FRT_METHOD(ProviderStub::RPC_getPartitionStates), this);
- addListBuckets(rb, FRT_METHOD(ProviderStub::RPC_listBuckets), this);
- addSetClusterState(rb, FRT_METHOD(ProviderStub::RPC_setClusterState), this);
- addSetActiveState(rb, FRT_METHOD(ProviderStub::RPC_setActiveState), this);
- addGetBucketInfo(rb, FRT_METHOD(ProviderStub::RPC_getBucketInfo), this);
- addPut(rb, FRT_METHOD(ProviderStub::RPC_put), this);
- addRemoveById(rb, FRT_METHOD(ProviderStub::RPC_removeById), this);
- addRemoveIfFound(rb, FRT_METHOD(ProviderStub::RPC_removeIfFound), this);
- addUpdate(rb, FRT_METHOD(ProviderStub::RPC_update), this);
- addFlush(rb, FRT_METHOD(ProviderStub::RPC_flush), this);
- addGet(rb, FRT_METHOD(ProviderStub::RPC_get), this);
- addCreateIterator(rb, FRT_METHOD(ProviderStub::RPC_createIterator), this);
- addIterate(rb, FRT_METHOD(ProviderStub::RPC_iterate), this);
- addDestroyIterator(rb, FRT_METHOD(ProviderStub::RPC_destroyIterator), this);
- addCreateBucket(rb, FRT_METHOD(ProviderStub::RPC_createBucket), this);
- addDeleteBucket(rb, FRT_METHOD(ProviderStub::RPC_deleteBucket), this);
- addGetModifiedBuckets(rb, FRT_METHOD(ProviderStub::RPC_getModifiedBuckets), this);
- addSplit(rb, FRT_METHOD(ProviderStub::RPC_split), this);
- addJoin(rb, FRT_METHOD(ProviderStub::RPC_join), this);
- addMove(rb, FRT_METHOD(ProviderStub::RPC_move), this);
- addMaintain(rb, FRT_METHOD(ProviderStub::RPC_maintain), this);
- addRemoveEntry(rb, FRT_METHOD(ProviderStub::RPC_removeEntry), this);
-}
-
-ProviderStub::ProviderStub(int port, uint32_t threads,
- const document::DocumentTypeRepo &repo,
- PersistenceProviderFactory &factory)
- : _supervisor(std::make_unique<FRT_Supervisor>()),
- _executor(threads, 256*1024),
- _repo(&repo),
- _factory(factory),
- _provider(),
- _providerCleanupTask(_supervisor->GetScheduler(), _executor, _provider)
-{
- SetupRpcCalls();
- _supervisor->SetSessionFiniHook(FRT_METHOD(ProviderStub::HOOK_fini), this);
- _supervisor->Start();
- _supervisor->Listen(port);
-}
-
-ProviderStub::~ProviderStub() {
- _supervisor->ShutDown(true);
- sync();
-}
-
-int
-ProviderStub::getPort() const {
- return _supervisor->GetListenPort();
-}
-
-}
diff --git a/persistence/src/vespa/persistence/proxy/providerstub.h b/persistence/src/vespa/persistence/proxy/providerstub.h
deleted file mode 100644
index cd0665171b1..00000000000
--- a/persistence/src/vespa/persistence/proxy/providerstub.h
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/vespalib/util/closure.h>
-#include <vespa/vespalib/util/threadstackexecutor.h>
-#include <vespa/fnet/frt/invokable.h>
-#include <vespa/fnet/task.h>
-#include <memory>
-
-class FRT_Supervisor;
-
-namespace document { class DocumentTypeRepo; }
-
-namespace storage {
-namespace spi {
-class PersistenceProvider;
-
-class ProviderStub : private FRT_Invokable
-{
-public:
- struct PersistenceProviderFactory {
- virtual std::unique_ptr<PersistenceProvider> create() const = 0;
- virtual ~PersistenceProviderFactory() {}
- };
-
-private:
- struct ProviderCleanupTask : FNET_Task {
- vespalib::ThreadStackExecutor &executor;
- std::unique_ptr<PersistenceProvider> &provider;
- ProviderCleanupTask(FNET_Scheduler *s,
- vespalib::ThreadStackExecutor &e,
- std::unique_ptr<PersistenceProvider> &p)
- : FNET_Task(s), executor(e), provider(p) {}
- void PerformTask() override {
- executor.sync();
- assert(provider.get() != 0);
- provider.reset();
- }
- };
-
- std::unique_ptr<FRT_Supervisor> _supervisor;
- vespalib::ThreadStackExecutor _executor;
- const document::DocumentTypeRepo *_repo;
- PersistenceProviderFactory &_factory;
- std::unique_ptr<PersistenceProvider> _provider;
- ProviderCleanupTask _providerCleanupTask;
-
- void HOOK_fini(FRT_RPCRequest *req);
-
- void detachAndRun(FRT_RPCRequest *req, vespalib::Closure::UP closure);
- void RPC_connect(FRT_RPCRequest *req);
- void RPC_initialize(FRT_RPCRequest *req);
- void RPC_getPartitionStates(FRT_RPCRequest *req);
- void RPC_listBuckets(FRT_RPCRequest *req);
- void RPC_setClusterState(FRT_RPCRequest *req);
- void RPC_setActiveState(FRT_RPCRequest *req);
- void RPC_getBucketInfo(FRT_RPCRequest *req);
- void RPC_put(FRT_RPCRequest *req);
- void RPC_removeById(FRT_RPCRequest *req);
- void RPC_removeIfFound(FRT_RPCRequest *req);
- void RPC_update(FRT_RPCRequest *req);
- void RPC_flush(FRT_RPCRequest *req);
- void RPC_get(FRT_RPCRequest *req);
- void RPC_createIterator(FRT_RPCRequest *req);
- void RPC_iterate(FRT_RPCRequest *req);
- void RPC_destroyIterator(FRT_RPCRequest *req);
- void RPC_createBucket(FRT_RPCRequest *req);
- void RPC_deleteBucket(FRT_RPCRequest *req);
- void RPC_getModifiedBuckets(FRT_RPCRequest *req);
- void RPC_split(FRT_RPCRequest *req);
- void RPC_join(FRT_RPCRequest *req);
- void RPC_move(FRT_RPCRequest *req);
- void RPC_maintain(FRT_RPCRequest *req);
- void RPC_removeEntry(FRT_RPCRequest *req);
-
- void SetupRpcCalls();
-
-public:
- typedef std::unique_ptr<ProviderStub> UP;
-
- ProviderStub(int port, uint32_t threads,
- const document::DocumentTypeRepo &repo,
- PersistenceProviderFactory &factory);
- ~ProviderStub();
-
- bool hasClient() const { return (_provider.get() != 0); }
- int getPort() const;
- void setRepo(const document::DocumentTypeRepo &repo) {
- _repo = &repo;
- }
- void sync() { _executor.sync(); }
-};
-
-} // namespace spi
-} // namespace storage
-
diff --git a/persistence/src/vespa/persistence/spi/CMakeLists.txt b/persistence/src/vespa/persistence/spi/CMakeLists.txt
index 598ed757efb..a8b1faadcd3 100644
--- a/persistence/src/vespa/persistence/spi/CMakeLists.txt
+++ b/persistence/src/vespa/persistence/spi/CMakeLists.txt
@@ -13,6 +13,7 @@ vespa_add_library(persistence_spi OBJECT
read_consistency.cpp
result
selection.cpp
+ test.cpp
docentry
DEPENDS
)
diff --git a/persistence/src/vespa/persistence/spi/abstractpersistenceprovider.cpp b/persistence/src/vespa/persistence/spi/abstractpersistenceprovider.cpp
index 3782d17ea50..5e6e908e042 100644
--- a/persistence/src/vespa/persistence/spi/abstractpersistenceprovider.cpp
+++ b/persistence/src/vespa/persistence/spi/abstractpersistenceprovider.cpp
@@ -43,7 +43,7 @@ AbstractPersistenceProvider::removeIfFound(const Bucket& b, Timestamp timestamp,
}
BucketIdListResult
-AbstractPersistenceProvider::getModifiedBuckets() const
+AbstractPersistenceProvider::getModifiedBuckets(BucketSpace) const
{
BucketIdListResult::List list;
return BucketIdListResult(list);
@@ -52,7 +52,7 @@ AbstractPersistenceProvider::getModifiedBuckets() const
Result
AbstractPersistenceProvider::move(const Bucket& source, PartitionId target, Context& context)
{
- spi::Bucket to(source.getBucketId(), spi::PartitionId(target));
+ spi::Bucket to(source.getBucket(), spi::PartitionId(target));
return join(source, source, to, context);
}
diff --git a/persistence/src/vespa/persistence/spi/abstractpersistenceprovider.h b/persistence/src/vespa/persistence/spi/abstractpersistenceprovider.h
index 461f31ad474..2460259cfe7 100644
--- a/persistence/src/vespa/persistence/spi/abstractpersistenceprovider.h
+++ b/persistence/src/vespa/persistence/spi/abstractpersistenceprovider.h
@@ -64,7 +64,7 @@ public:
/**
* Default impl empty.
*/
- BucketIdListResult getModifiedBuckets() const override;
+ BucketIdListResult getModifiedBuckets(BucketSpace bucketSpace) const override;
/**
* Uses join by default.
diff --git a/persistence/src/vespa/persistence/spi/bucket.h b/persistence/src/vespa/persistence/spi/bucket.h
index e4cbbeae075..dec185b6082 100644
--- a/persistence/src/vespa/persistence/spi/bucket.h
+++ b/persistence/src/vespa/persistence/spi/bucket.h
@@ -15,25 +15,27 @@
#pragma once
#include <persistence/spi/types.h>
-#include <vespa/document/bucket/bucketid.h>
+#include <vespa/document/bucket/bucket.h>
namespace storage {
namespace spi {
class Bucket {
- document::BucketId _bucket;
+ document::Bucket _bucket;
PartitionId _partition;
public:
- Bucket() : _bucket(0), _partition(0) {}
- Bucket(const document::BucketId& b, PartitionId p)
+ Bucket() : _bucket(document::BucketSpace::placeHolder(), document::BucketId(0)), _partition(0) {}
+ Bucket(const document::Bucket& b, PartitionId p)
: _bucket(b), _partition(p) {}
- const document::BucketId& getBucketId() const { return _bucket; }
+ const document::Bucket &getBucket() const { return _bucket; }
+ document::BucketId getBucketId() const { return _bucket.getBucketId(); }
+ document::BucketSpace getBucketSpace() const { return _bucket.getBucketSpace(); }
PartitionId getPartition() const { return _partition; }
/** Convert easily to a document bucket id to make class easy to use. */
- operator const document::BucketId&() const { return _bucket; }
+ operator document::BucketId() const { return _bucket.getBucketId(); }
bool operator==(const Bucket& o) const {
return (_bucket == o._bucket && _partition == o._partition);
diff --git a/persistence/src/vespa/persistence/spi/metricpersistenceprovider.cpp b/persistence/src/vespa/persistence/spi/metricpersistenceprovider.cpp
index 6f8600c7ca4..e338c76fb88 100644
--- a/persistence/src/vespa/persistence/spi/metricpersistenceprovider.cpp
+++ b/persistence/src/vespa/persistence/spi/metricpersistenceprovider.cpp
@@ -104,10 +104,10 @@ Impl::getPartitionStates() const
}
BucketIdListResult
-Impl::listBuckets(PartitionId v1) const
+Impl::listBuckets(BucketSpace bucketSpace, PartitionId v1) const
{
PRE_PROCESS(2);
- BucketIdListResult r(_next->listBuckets(v1));
+ BucketIdListResult r(_next->listBuckets(bucketSpace, v1));
POST_PROCESS(2, r);
return r;
}
@@ -250,10 +250,10 @@ Impl::deleteBucket(const Bucket& v1, Context& v2)
}
BucketIdListResult
-Impl::getModifiedBuckets() const
+Impl::getModifiedBuckets(BucketSpace bucketSpace) const
{
PRE_PROCESS(18);
- BucketIdListResult r(_next->getModifiedBuckets());
+ BucketIdListResult r(_next->getModifiedBuckets(bucketSpace));
POST_PROCESS(18, r);
return r;
}
diff --git a/persistence/src/vespa/persistence/spi/metricpersistenceprovider.h b/persistence/src/vespa/persistence/spi/metricpersistenceprovider.h
index 2a577f1234a..8ec2e2dd1bc 100644
--- a/persistence/src/vespa/persistence/spi/metricpersistenceprovider.h
+++ b/persistence/src/vespa/persistence/spi/metricpersistenceprovider.h
@@ -34,7 +34,7 @@ public:
// Implementation of the PersistenceProvider API
Result initialize() override;
PartitionStateListResult getPartitionStates() const override;
- BucketIdListResult listBuckets(PartitionId) const override;
+ BucketIdListResult listBuckets(BucketSpace bucketSpace, PartitionId) const override;
Result setClusterState(const ClusterState&) override;
Result setActiveState(const Bucket&, BucketInfo::ActiveState) override;
BucketInfoResult getBucketInfo(const Bucket&) const override;
@@ -51,7 +51,7 @@ public:
Result destroyIterator(IteratorId, Context&) override;
Result createBucket(const Bucket&, Context&) override;
Result deleteBucket(const Bucket&, Context&) override;
- BucketIdListResult getModifiedBuckets() const override;
+ BucketIdListResult getModifiedBuckets(BucketSpace bucketSpace) const override;
Result maintain(const Bucket&, MaintenanceLevel level) override;
Result split(const Bucket& source, const Bucket& target1, const Bucket& target2, Context&) override;
Result join(const Bucket& source1, const Bucket& source2, const Bucket& target, Context&) override;
diff --git a/persistence/src/vespa/persistence/spi/persistenceprovider.h b/persistence/src/vespa/persistence/spi/persistenceprovider.h
index 154a6c43469..b10ed618e88 100644
--- a/persistence/src/vespa/persistence/spi/persistenceprovider.h
+++ b/persistence/src/vespa/persistence/spi/persistenceprovider.h
@@ -59,6 +59,7 @@ namespace spi {
struct PersistenceProvider
{
typedef std::unique_ptr<PersistenceProvider> UP;
+ using BucketSpace = document::BucketSpace;
virtual ~PersistenceProvider();
@@ -84,7 +85,7 @@ struct PersistenceProvider
* Return list of buckets that provider has stored on the given partition.
* Typically called once per partition on startup.
*/
- virtual BucketIdListResult listBuckets(PartitionId) const = 0;
+ virtual BucketIdListResult listBuckets(BucketSpace bucketSpace, PartitionId) const = 0;
/**
* Updates the persistence provider with the last cluster state.
@@ -384,7 +385,7 @@ struct PersistenceProvider
* should clear it's list of modified buckets, so that the next call does
* not return the same buckets.
*/
- virtual BucketIdListResult getModifiedBuckets() const = 0;
+ virtual BucketIdListResult getModifiedBuckets(BucketSpace bucketSpace) const = 0;
/**
* Allows the provider to do periodic maintenance and verification.
diff --git a/persistence/src/vespa/persistence/spi/test.cpp b/persistence/src/vespa/persistence/spi/test.cpp
new file mode 100644
index 00000000000..d91f7fdfc1c
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/test.cpp
@@ -0,0 +1,38 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "test.h"
+
+using document::BucketId;
+using document::BucketSpace;
+
+namespace storage::spi::test {
+
+BucketSpace makeBucketSpace()
+{
+ return BucketSpace::placeHolder();
+}
+
+BucketSpace makeBucketSpace(const vespalib::string &docTypeName)
+{
+ // Used by persistence conformance test to map fron document type name
+ // to bucket space. See document::TestDocRepo for known document types.
+ if (docTypeName == "no") {
+ return BucketSpace(2);
+ } else if (docTypeName == "testdoctype2") {
+ return BucketSpace(1);
+ } else {
+ return makeBucketSpace();
+ }
+}
+
+Bucket makeBucket(BucketId bucketId, PartitionId partitionId)
+{
+ return Bucket(document::Bucket(BucketSpace::placeHolder(), bucketId), partitionId);
+}
+
+Bucket makeBucket(BucketId bucketId)
+{
+ return makeBucket(bucketId, PartitionId(0));
+}
+
+}
diff --git a/persistence/src/vespa/persistence/spi/test.h b/persistence/src/vespa/persistence/spi/test.h
new file mode 100644
index 00000000000..cbd546d6315
--- /dev/null
+++ b/persistence/src/vespa/persistence/spi/test.h
@@ -0,0 +1,16 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "bucket.h"
+
+namespace storage::spi::test {
+
+// Helper functions used by unit tests
+
+document::BucketSpace makeBucketSpace();
+document::BucketSpace makeBucketSpace(const vespalib::string &docTypeName);
+Bucket makeBucket(document::BucketId bucketId, PartitionId partitionId);
+Bucket makeBucket(document::BucketId bucketId);
+
+}
diff --git a/pom.xml b/pom.xml
index 0c41a993fdf..fc6faa58cd2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -842,6 +842,16 @@
<artifactId>xercesImpl</artifactId>
<version>2.11.0</version>
</dependency>
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcpkix-jdk15on</artifactId>
+ <version>${bouncycastle.version}</version>
+ </dependency>
+ <dependency>
+ <groupId>org.bouncycastle</groupId>
+ <artifactId>bcprov-jdk15on</artifactId>
+ <version>${bouncycastle.version}</version>
+ </dependency>
<!-- jersey 2 support -->
<dependency>
<groupId>javax.ws.rs</groupId>
@@ -959,7 +969,6 @@
<module>documentapi</module>
<module>document</module>
<module>documentgen-test</module>
- <module>dummy-persistence</module>
<module>fileacquirer</module>
<module>filedistributionmanager</module>
<module>filedistribution</module>
@@ -986,7 +995,6 @@
<module>node-maintainer</module>
<module>orchestrator-restapi</module>
<module>orchestrator</module>
- <module>persistence</module>
<module>predicate-search</module>
<module>predicate-search-core</module>
<module>processing</module>
diff --git a/searchcore/CMakeLists.txt b/searchcore/CMakeLists.txt
index 208be618b1f..2944b753dca 100644
--- a/searchcore/CMakeLists.txt
+++ b/searchcore/CMakeLists.txt
@@ -97,6 +97,7 @@ vespa_define_module(
src/tests/proton/documentdb/maintenancecontroller
src/tests/proton/documentdb/move_operation_limiter
src/tests/proton/documentdb/storeonlyfeedview
+ src/tests/proton/documentdb/threading_service_config
src/tests/proton/documentmetastore
src/tests/proton/documentmetastore/lidreusedelayer
src/tests/proton/feed_and_search
@@ -122,6 +123,7 @@ vespa_define_module(
src/tests/proton/metrics/metrics_engine
src/tests/proton/persistenceconformance
src/tests/proton/persistenceengine
+ src/tests/proton/persistenceengine/persistence_handler_map
src/tests/proton/proton
src/tests/proton/proton_config_fetcher
src/tests/proton/proton_configurer
diff --git a/searchcore/src/apps/proton/downpersistence.cpp b/searchcore/src/apps/proton/downpersistence.cpp
index b7fecb6aef1..4ee9a0635b0 100644
--- a/searchcore/src/apps/proton/downpersistence.cpp
+++ b/searchcore/src/apps/proton/downpersistence.cpp
@@ -38,7 +38,7 @@ DownPersistence::getPartitionStates() const
}
BucketIdListResult
-DownPersistence::listBuckets(PartitionId) const
+DownPersistence::listBuckets(BucketSpace, PartitionId) const
{
return BucketIdListResult(errorResult.getErrorCode(),
errorResult.getErrorMessage());
@@ -148,7 +148,7 @@ DownPersistence::deleteBucket(const Bucket&, Context&)
BucketIdListResult
-DownPersistence::getModifiedBuckets() const
+DownPersistence::getModifiedBuckets(BucketSpace) const
{
return BucketIdListResult(errorResult.getErrorCode(),
errorResult.getErrorMessage());
diff --git a/searchcore/src/apps/proton/downpersistence.h b/searchcore/src/apps/proton/downpersistence.h
index 61738a600de..9e64b89f065 100644
--- a/searchcore/src/apps/proton/downpersistence.h
+++ b/searchcore/src/apps/proton/downpersistence.h
@@ -28,7 +28,7 @@ public:
Result initialize() override;
PartitionStateListResult getPartitionStates() const override;
- BucketIdListResult listBuckets(PartitionId) const override;
+ BucketIdListResult listBuckets(BucketSpace bucketSpace, PartitionId) const override;
Result setClusterState(const ClusterState&) override;
Result setActiveState(const Bucket&, BucketInfo::ActiveState) override;
BucketInfoResult getBucketInfo(const Bucket&) const override;
@@ -47,7 +47,7 @@ public:
Result destroyIterator(IteratorId id, Context&) override;
Result createBucket(const Bucket&, Context&) override;
Result deleteBucket(const Bucket&, Context&) override;
- BucketIdListResult getModifiedBuckets() const override;
+ BucketIdListResult getModifiedBuckets(BucketSpace bucketSpace) const override;
Result maintain(const Bucket&, MaintenanceLevel level) override;
Result split(const Bucket& source, const Bucket& target1, const Bucket& target2, Context&) override;
Result join(const Bucket& source1, const Bucket& source2, const Bucket& target, Context&) override;
diff --git a/searchcore/src/apps/tests/persistenceconformance_test.cpp b/searchcore/src/apps/tests/persistenceconformance_test.cpp
index 905d0434886..0ea3b41f232 100644
--- a/searchcore/src/apps/tests/persistenceconformance_test.cpp
+++ b/searchcore/src/apps/tests/persistenceconformance_test.cpp
@@ -2,42 +2,45 @@
#include <vespa/vespalib/testkit/testapp.h>
+#include <tests/proton/common/dummydbowner.h>
#include <vespa/config-imported-fields.h>
#include <vespa/config-rank-profiles.h>
#include <vespa/config-summarymap.h>
-#include <vespa/searchsummary/config/config-juniperrc.h>
#include <vespa/document/base/testdocman.h>
+#include <vespa/fastos/file.h>
#include <vespa/persistence/conformancetest/conformancetest.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/searchcommon/common/schemaconfigurer.h>
+#include <vespa/searchcore/proton/common/hw_info.h>
#include <vespa/searchcore/proton/matching/querylimiter.h>
+#include <vespa/searchcore/proton/metrics/metricswireservice.h>
#include <vespa/searchcore/proton/persistenceengine/ipersistenceengineowner.h>
#include <vespa/searchcore/proton/persistenceengine/persistenceengine.h>
+#include <vespa/searchcore/proton/server/bootstrapconfig.h>
#include <vespa/searchcore/proton/server/document_db_maintenance_config.h>
#include <vespa/searchcore/proton/server/documentdb.h>
#include <vespa/searchcore/proton/server/documentdbconfigmanager.h>
#include <vespa/searchcore/proton/server/fileconfigmanager.h>
#include <vespa/searchcore/proton/server/memoryconfigstore.h>
-#include <vespa/searchcore/proton/server/bootstrapconfig.h>
-#include <vespa/searchcore/proton/metrics/metricswireservice.h>
#include <vespa/searchcore/proton/server/persistencehandlerproxy.h>
#include <vespa/searchlib/index/dummyfileheadercontext.h>
#include <vespa/searchlib/transactionlog/translogserver.h>
-#include <tests/proton/common/dummydbowner.h>
+#include <vespa/searchsummary/config/config-juniperrc.h>
#include <vespa/vespalib/io/fileutil.h>
-#include <vespa/searchcore/proton/common/hw_info.h>
-#include <vespa/fastos/file.h>
#include <vespa/log/log.h>
LOG_SETUP("persistenceconformance_test");
using namespace config;
using namespace proton;
-using namespace vespa::config::search;
+using namespace cloud::config::filedistribution;
+using namespace storage::spi::test;
using namespace vespa::config::search::core;
using namespace vespa::config::search::summary;
-using namespace cloud::config::filedistribution;
+using namespace vespa::config::search;
using std::shared_ptr;
+using document::BucketSpace;
using document::DocumentType;
using document::DocumentTypeRepo;
using document::DocumenttypesConfig;
@@ -128,6 +131,7 @@ public:
std::make_shared<TuneFileDocumentDB>(),
schema,
std::make_shared<DocumentDBMaintenanceConfig>(),
+ search::LogDocumentStore::Config(),
"client",
docTypeName.getName()));
}
@@ -157,7 +161,8 @@ private:
public:
DocumentDBFactory(const vespalib::string &baseDir, int tlsListenPort);
~DocumentDBFactory();
- DocumentDB::SP create(const DocTypeName &docType,
+ DocumentDB::SP create(BucketSpace bucketSpace,
+ const DocTypeName &docType,
const ConfigFactory &factory) {
DocumentDBConfig::SP snapshot = factory.create(docType);
vespalib::mkdir(_baseDir, false);
@@ -185,15 +190,15 @@ public:
_queryLimiter,
_clock,
docType,
- ProtonConfig(),
+ bucketSpace,
+ *b->getProtonConfigSP(),
const_cast<DocumentDBFactory &>(*this),
_summaryExecutor,
_summaryExecutor,
- NULL,
+ _tls,
_metricsWireService,
_fileHeaderContext,
- _config_stores.getConfigStore(
- docType.toString()),
+ _config_stores.getConfigStore(docType.toString()),
std::make_shared<vespalib::ThreadStackExecutor>
(16, 128 * 1024),
HwInfo()));
@@ -224,7 +229,9 @@ public:
{
DocTypeVector types = cfgFactory.getDocTypes();
for (size_t i = 0; i < types.size(); ++i) {
- DocumentDB::SP docDb = docDbFactory.create(types[i],
+ BucketSpace bucketSpace(makeBucketSpace(types[i].getName()));
+ DocumentDB::SP docDb = docDbFactory.create(bucketSpace,
+ types[i],
cfgFactory);
docDb->start();
docDb->waitForOnlineState();
@@ -316,7 +323,7 @@ public:
LOG(info, "putHandler(%s)", itr->first.toString().c_str());
IPersistenceHandler::SP proxy(
new PersistenceHandlerProxy(itr->second));
- putHandler(itr->first, proxy);
+ putHandler(itr->second->getBucketSpace(), itr->first, proxy);
}
}
@@ -328,7 +335,7 @@ public:
const DocumentDBMap &docDbs = _docDbRepo->getDocDbs();
for (DocumentDBMap::const_iterator itr = docDbs.begin();
itr != docDbs.end(); ++itr) {
- IPersistenceHandler::SP proxy(removeHandler(itr->first));
+ IPersistenceHandler::SP proxy(removeHandler(itr->second->getBucketSpace(), itr->first));
}
}
@@ -381,6 +388,7 @@ public:
virtual bool hasPersistence() const override { return true; }
virtual bool supportsActiveState() const override { return true; }
+ virtual bool supportsBucketSpaces() const override { return true; }
};
@@ -582,6 +590,11 @@ TEST_F("require thant testJoinSameSourceBucketsTargetExists() works",
f.test.testJoinSameSourceBucketsTargetExists();
}
+TEST_F("require that multiple bucket spaces works", TestFixture)
+{
+ f.test.testBucketSpaces();
+}
+
// *** Run all conformance tests, but ignore the results BEGIN ***
#define CONVERT_TEST(testFunction) \
diff --git a/searchcore/src/apps/vespa-transactionlog-inspect/vespa-transactionlog-inspect.cpp b/searchcore/src/apps/vespa-transactionlog-inspect/vespa-transactionlog-inspect.cpp
index 6d57795a734..17f1faffbba 100644
--- a/searchcore/src/apps/vespa-transactionlog-inspect/vespa-transactionlog-inspect.cpp
+++ b/searchcore/src/apps/vespa-transactionlog-inspect/vespa-transactionlog-inspect.cpp
@@ -315,7 +315,6 @@ public:
}
return RPC::OK;
}
- virtual void inSync() override { }
virtual void eof() override { _eof = true; }
bool isEof() const { return _eof; }
};
diff --git a/searchcore/src/tests/proton/attribute/attributeflush_test.cpp b/searchcore/src/tests/proton/attribute/attributeflush_test.cpp
index ecb39af61bc..eb6b17a9826 100644
--- a/searchcore/src/tests/proton/attribute/attributeflush_test.cpp
+++ b/searchcore/src/tests/proton/attribute/attributeflush_test.cpp
@@ -577,7 +577,7 @@ Test::requireThatFlushedAttributeCanBeLoaded(const HwInfo &hwInfo)
{
constexpr uint32_t numDocs = 100;
BaseFixture f(hwInfo);
- vespalib::string attrName(hwInfo.slowDisk() ? "a11slow" : "a11fast");
+ vespalib::string attrName(hwInfo.disk().slow() ? "a11slow" : "a11fast");
{
AttributeManagerFixture amf(f);
AttributeManager &am = amf._m;
@@ -606,8 +606,8 @@ Test::requireThatFlushedAttributeCanBeLoaded(const HwInfo &hwInfo)
void
Test::requireThatFlushedAttributeCanBeLoaded()
{
- TEST_DO(requireThatFlushedAttributeCanBeLoaded(HwInfo(false)));
- TEST_DO(requireThatFlushedAttributeCanBeLoaded(HwInfo(true)));
+ TEST_DO(requireThatFlushedAttributeCanBeLoaded(HwInfo(HwInfo::Disk(0, false, false), HwInfo::Memory(0), HwInfo::Cpu(0))));
+ TEST_DO(requireThatFlushedAttributeCanBeLoaded(HwInfo(HwInfo::Disk(0, true, false), HwInfo::Memory(0), HwInfo::Cpu(0))));
}
int
diff --git a/searchcore/src/tests/proton/common/hw_info_sampler/hw_info_sampler_test.cpp b/searchcore/src/tests/proton/common/hw_info_sampler/hw_info_sampler_test.cpp
index f9745d7fd03..78676d6abf1 100644
--- a/searchcore/src/tests/proton/common/hw_info_sampler/hw_info_sampler_test.cpp
+++ b/searchcore/src/tests/proton/common/hw_info_sampler/hw_info_sampler_test.cpp
@@ -18,6 +18,7 @@ namespace {
const vespalib::string test_dir = "temp";
constexpr uint64_t sampleLen = 1024 * 1024 * 40;
+constexpr bool sharedDisk = false;
long time_point_to_long(Clock::time_point tp)
{
@@ -44,11 +45,11 @@ struct Fixture
TEST_F("Test that hw_info_sampler uses override info", Fixture)
{
- Config samplerCfg(75.0, 100.0, sampleLen);
+ Config samplerCfg(0, 75.0, 100.0, sampleLen, sharedDisk, 0, 0);
HwInfoSampler sampler(test_dir, samplerCfg);
EXPECT_EQUAL(75.0, sampler.diskWriteSpeed());
EXPECT_NOT_EQUAL(0, time_point_to_long(sampler.sampleTime()));
- EXPECT_TRUE(sampler.hwInfo().slowDisk());
+ EXPECT_TRUE(sampler.hwInfo().disk().slow());
}
TEST_F("Test that hw_info_sampler uses saved info", Fixture)
@@ -57,16 +58,16 @@ TEST_F("Test that hw_info_sampler uses saved info", Fixture)
builder.disk.writespeed = 72.0;
builder.disk.sampletime = time_point_to_long(Clock::now());
f.writeConfig(builder);
- Config samplerCfg(0.0, 70.0, sampleLen);
+ Config samplerCfg(0, 0.0, 70.0, sampleLen, sharedDisk, 0, 0);
HwInfoSampler sampler(test_dir, samplerCfg);
EXPECT_EQUAL(builder.disk.writespeed, sampler.diskWriteSpeed());
EXPECT_EQUAL(builder.disk.sampletime, time_point_to_long(sampler.sampleTime()));
- EXPECT_FALSE(sampler.hwInfo().slowDisk());
+ EXPECT_FALSE(sampler.hwInfo().disk().slow());
}
TEST_F("Test that hw_info_sampler can sample disk write speed", Fixture)
{
- Config samplerCfg(0.0, 100.0, sampleLen);
+ Config samplerCfg(0, 0.0, 100.0, sampleLen, sharedDisk, 0, 0);
HwInfoSampler sampler(test_dir, samplerCfg);
ASSERT_NOT_EQUAL(0.0, sampler.diskWriteSpeed());
ASSERT_NOT_EQUAL(0, time_point_to_long(sampler.sampleTime()));
@@ -76,6 +77,48 @@ TEST_F("Test that hw_info_sampler can sample disk write speed", Fixture)
time_point_to_long(sampler2.sampleTime()));
}
+TEST_F("require that disk size can be specified", Fixture)
+{
+ Config samplerCfg(1024, 1.0, 0.0, sampleLen, sharedDisk, 0, 0);
+ HwInfoSampler sampler(test_dir, samplerCfg);
+ EXPECT_EQUAL(1024u, sampler.hwInfo().disk().sizeBytes());
+}
+
+TEST_F("require that disk size can be sampled", Fixture)
+{
+ Config samplerCfg(0, 1.0, 0.0, sampleLen, sharedDisk, 0, 0);
+ HwInfoSampler sampler(test_dir, samplerCfg);
+ EXPECT_GREATER(sampler.hwInfo().disk().sizeBytes(), 0u);
+}
+
+TEST_F("require that memory size can be specified", Fixture)
+{
+ Config samplerCfg(0, 1.0, 0.0, sampleLen, sharedDisk, 1024, 0);
+ HwInfoSampler sampler(test_dir, samplerCfg);
+ EXPECT_EQUAL(1024u, sampler.hwInfo().memory().sizeBytes());
+}
+
+TEST_F("require that memory size can be sampled", Fixture)
+{
+ Config samplerCfg(0, 1.0, 0.0, sampleLen, sharedDisk, 0, 0);
+ HwInfoSampler sampler(test_dir, samplerCfg);
+ EXPECT_GREATER(sampler.hwInfo().memory().sizeBytes(), 0u);
+}
+
+TEST_F("require that num cpu cores can be specified", Fixture)
+{
+ Config samplerCfg(0, 1.0, 0.0, sampleLen, sharedDisk, 0, 8);
+ HwInfoSampler sampler(test_dir, samplerCfg);
+ EXPECT_EQUAL(8u, sampler.hwInfo().cpu().cores());
+}
+
+TEST_F("require that num cpu cores can be sampled", Fixture)
+{
+ Config samplerCfg(0, 1.0, 0.0, sampleLen, sharedDisk, 0, 0);
+ HwInfoSampler sampler(test_dir, samplerCfg);
+ EXPECT_GREATER(sampler.hwInfo().cpu().cores(), 0u);
+}
+
TEST_MAIN()
{
vespalib::rmdir(test_dir, true);
diff --git a/searchcore/src/tests/proton/docsummary/CMakeLists.txt b/searchcore/src/tests/proton/docsummary/CMakeLists.txt
index e1b0da71c34..906a1e642f5 100644
--- a/searchcore/src/tests/proton/docsummary/CMakeLists.txt
+++ b/searchcore/src/tests/proton/docsummary/CMakeLists.txt
@@ -3,6 +3,7 @@ vespa_add_executable(searchcore_docsummary_test_app TEST
SOURCES
docsummary.cpp
DEPENDS
+ searchcore_test
searchcore_server
searchcore_initializer
searchcore_reprocessing
diff --git a/searchcore/src/tests/proton/docsummary/docsummary.cpp b/searchcore/src/tests/proton/docsummary/docsummary.cpp
index f492dc44fc6..3b199d266a8 100644
--- a/searchcore/src/tests/proton/docsummary/docsummary.cpp
+++ b/searchcore/src/tests/proton/docsummary/docsummary.cpp
@@ -1,15 +1,21 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <tests/proton/common/dummydbowner.h>
+#include <vespa/config/helper/configgetter.hpp>
+#include <vespa/eval/tensor/default_tensor.h>
+#include <vespa/eval/tensor/serialization/typed_binary_format.h>
+#include <vespa/eval/tensor/tensor_factory.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/searchcore/proton/attribute/attribute_writer.h>
-#include <vespa/searchcore/proton/common/bucketfactory.h>
-#include <vespa/searchcore/proton/documentmetastore/documentmetastore.h>
+#include <vespa/searchcore/proton/test/bucketfactory.h>
#include <vespa/searchcore/proton/docsummary/docsumcontext.h>
#include <vespa/searchcore/proton/docsummary/documentstoreadapter.h>
#include <vespa/searchcore/proton/docsummary/summarymanager.h>
+#include <vespa/searchcore/proton/documentmetastore/documentmetastore.h>
#include <vespa/searchcore/proton/feedoperation/putoperation.h>
#include <vespa/searchcore/proton/metrics/metricswireservice.h>
-#include <vespa/searchcore/proton/server/documentdb.h>
#include <vespa/searchcore/proton/server/bootstrapconfig.h>
+#include <vespa/searchcore/proton/server/documentdb.h>
#include <vespa/searchcore/proton/server/documentdbconfigmanager.h>
#include <vespa/searchcore/proton/server/idocumentsubdb.h>
#include <vespa/searchcore/proton/server/memoryconfigstore.h>
@@ -21,17 +27,12 @@
#include <vespa/searchlib/engine/docsumapi.h>
#include <vespa/searchlib/index/docbuilder.h>
#include <vespa/searchlib/index/dummyfileheadercontext.h>
-#include <vespa/searchlib/transactionlog/translogserver.h>
-#include <tests/proton/common/dummydbowner.h>
-#include <vespa/vespalib/testkit/testapp.h>
-#include <vespa/searchlib/transactionlog/nosyncproxy.h>
-#include <vespa/eval/tensor/tensor_factory.h>
-#include <vespa/eval/tensor/default_tensor.h>
#include <vespa/searchlib/tensor/tensor_attribute.h>
+#include <vespa/searchlib/transactionlog/nosyncproxy.h>
+#include <vespa/searchlib/transactionlog/translogserver.h>
#include <vespa/vespalib/data/slime/slime.h>
-#include <vespa/config/helper/configgetter.hpp>
-#include <vespa/eval/tensor/serialization/typed_binary_format.h>
#include <vespa/vespalib/encoding/base64.h>
+#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/log/log.h>
LOG_SETUP("docsummary_test");
@@ -43,6 +44,7 @@ using namespace search::engine;
using namespace search::index;
using namespace search::transactionlog;
using namespace search;
+using namespace storage::spi::test;
using document::DocumenttypesConfig;
using search::TuneFileDocumentDB;
@@ -201,38 +203,21 @@ public:
_sa()
{
assert(_mkdirOk);
- auto b = std::make_shared<BootstrapConfig>(1,
- _documenttypesConfig,
- _repo,
+ auto b = std::make_shared<BootstrapConfig>(1, _documenttypesConfig, _repo,
std::make_shared<ProtonConfig>(),
std::make_shared<FiledistributorrpcConfig>(),
_tuneFileDocumentDB);
_configMgr.forwardConfig(b);
_configMgr.nextGeneration(0);
if (! FastOS_File::MakeDirectory((std::string("tmpdb/") + docTypeName).c_str())) { abort(); }
- _ddb.reset(new DocumentDB("tmpdb",
- _configMgr.getConfig(),
- "tcp/localhost:9013",
- _queryLimiter,
- _clock,
- DocTypeName(docTypeName),
- ProtonConfig(),
- *this,
- _summaryExecutor,
- _summaryExecutor,
- NULL,
- _dummy,
- _fileHeaderContext,
- ConfigStore::UP(new MemoryConfigStore),
- std::make_shared<vespalib::
- ThreadStackExecutor>
- (16, 128 * 1024),
- _hwInfo)),
+ _ddb.reset(new DocumentDB("tmpdb", _configMgr.getConfig(), "tcp/localhost:9013", _queryLimiter, _clock,
+ DocTypeName(docTypeName), makeBucketSpace(),
+ *b->getProtonConfigSP(), *this, _summaryExecutor, _summaryExecutor,
+ _tls, _dummy, _fileHeaderContext, ConfigStore::UP(new MemoryConfigStore),
+ std::make_shared<vespalib::ThreadStackExecutor>(16, 128 * 1024), _hwInfo)),
_ddb->start();
_ddb->waitForOnlineState();
- _aw = AttributeWriter::UP(new AttributeWriter(_ddb->
- getReadySubDB()->
- getAttributeManager()));
+ _aw = AttributeWriter::UP(new AttributeWriter(_ddb->getReadySubDB()->getAttributeManager()));
_sa = _ddb->getReadySubDB()->getSummaryAdapter();
}
~DBContext()
@@ -251,11 +236,8 @@ public:
typedef DocumentMetaStore::Result PutRes;
IDocumentMetaStore &dms = _ddb->getReadySubDB()->getDocumentMetaStoreContext().get();
uint32_t docSize = 1;
- PutRes putRes(dms.put(docId.getGlobalId(),
- BucketFactory::getBucketId(docId),
- Timestamp(0u),
- docSize,
- lid));
+ PutRes putRes(dms.put(docId.getGlobalId(), BucketFactory::getBucketId(docId),
+ Timestamp(0u), docSize, lid));
LOG_ASSERT(putRes.ok());
uint64_t serialNum = _ddb->getFeedHandler().incSerialNum();
_aw->put(serialNum, doc, lid, true, std::shared_ptr<IDestructorCallback>());
@@ -273,8 +255,7 @@ public:
op.setDbDocumentId(dbdId);
op.setPrevDbDocumentId(prevDbdId);
_ddb->getFeedHandler().storeOperation(op);
- SearchView *sv(dynamic_cast<SearchView *>
- (_ddb->getReadySubDB()->getSearchView().get()));
+ SearchView *sv(dynamic_cast<SearchView *>(_ddb->getReadySubDB()->getSearchView().get()));
if (sv != NULL) {
// cf. FeedView::putAttributes()
DocIdLimit &docIdLimit = sv->getDocIdLimit();
@@ -461,12 +442,12 @@ Test::assertSlime(const std::string &exp, const DocsumReply &reply, uint32_t id,
vespalib::slime::JsonFormat::encode(slime, buf, false);
vespalib::Slime tmpSlime;
size_t used = vespalib::slime::JsonFormat::decode(buf.get(), tmpSlime);
- EXPECT_EQUAL(buf.get().size, used);
+ EXPECT_TRUE(used > 0);
slime = std::move(tmpSlime);
}
vespalib::Slime expSlime;
size_t used = vespalib::slime::JsonFormat::decode(exp, expSlime);
- EXPECT_EQUAL(exp.size(), used);
+ EXPECT_TRUE(used > 0);
return EXPECT_EQUAL(expSlime, slime);
}
diff --git a/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp b/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp
index 17759e353e7..2d0ff39efa4 100644
--- a/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp
+++ b/searchcore/src/tests/proton/docsummary/summaryfieldconverter_test.cpp
@@ -119,7 +119,7 @@ FieldBlock::FieldBlock(const vespalib::string &jsonInput)
: input(jsonInput), slime(), binary(1024), json()
{
size_t used = vespalib::slime::JsonFormat::decode(jsonInput, slime);
- EXPECT_EQUAL(jsonInput.size(), used);
+ EXPECT_TRUE(used > 0);
{
search::SlimeOutputRawBufAdapter adapter(binary);
vespalib::slime::JsonFormat::encode(slime, adapter, true);
diff --git a/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp b/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp
index 3c891ff4c11..bef65add869 100644
--- a/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp
+++ b/searchcore/src/tests/proton/document_iterator/document_iterator_test.cpp
@@ -6,6 +6,7 @@
#include <vespa/persistence/spi/bucket.h>
#include <vespa/persistence/spi/docentry.h>
#include <vespa/persistence/spi/result.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/searchcore/proton/common/attrupdate.h>
#include <vespa/searchcore/proton/persistenceengine/document_iterator.h>
#include <vespa/searchcore/proton/server/commit_and_wait_document_retriever.h>
@@ -43,6 +44,7 @@ using storage::spi::IterateResult;
using storage::spi::PartitionId;
using storage::spi::Selection;
using storage::spi::Timestamp;
+using storage::spi::test::makeBucket;
using namespace proton;
using namespace search::index;
@@ -50,7 +52,7 @@ using namespace search::index;
const uint64_t largeNum = 10000000;
Bucket bucket(size_t x) {
- return Bucket(BucketId(x), PartitionId(0));
+ return makeBucket(BucketId(x));
}
Selection selectAll() {
diff --git a/searchcore/src/tests/proton/documentdb/buckethandler/buckethandler_test.cpp b/searchcore/src/tests/proton/documentdb/buckethandler/buckethandler_test.cpp
index 6fda983e88c..950571df6a4 100644
--- a/searchcore/src/tests/proton/documentdb/buckethandler/buckethandler_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/buckethandler/buckethandler_test.cpp
@@ -5,6 +5,7 @@ LOG_SETUP("buckethandler_test");
#include <vespa/searchcore/proton/server/ibucketstatechangedhandler.h>
#include <vespa/searchcore/proton/server/ibucketmodifiedhandler.h>
#include <vespa/searchcore/proton/test/test.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/vespalib/testkit/testapp.h>
using namespace proton;
@@ -14,6 +15,7 @@ using storage::spi::Bucket;
using storage::spi::BucketInfo;
using storage::spi::PartitionId;
using storage::spi::Timestamp;
+using storage::spi::test::makeBucket;
using vespalib::ThreadStackExecutor;
using proton::test::BucketStateCalculator;
@@ -144,7 +146,7 @@ struct Fixture
}
void sync() { _exec.sync(); }
void handleGetBucketInfo(const BucketId &bucket) {
- _handler.handleGetBucketInfo(Bucket(bucket, PART_ID), _bucketInfo);
+ _handler.handleGetBucketInfo(makeBucket(bucket, PART_ID), _bucketInfo);
}
void
setNodeUp(bool value)
diff --git a/searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp b/searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp
index 123002ecfd2..d4af7b214b6 100644
--- a/searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/combiningfeedview/combiningfeedview_test.cpp
@@ -2,6 +2,7 @@
#include <vespa/log/log.h>
LOG_SETUP("combiningfeedview_test");
+#include <vespa/persistence/spi/test.h>
#include <vespa/searchcore/proton/feedoperation/moveoperation.h>
#include <vespa/searchcore/proton/server/combiningfeedview.h>
#include <vespa/searchcore/proton/test/test.h>
@@ -13,6 +14,7 @@ using document::DocumentUpdate;
using search::IDestructorCallback;
using search::SerialNum;
using storage::spi::Timestamp;
+using storage::spi::test::makeBucketSpace;
using namespace proton;
typedef std::vector<IFeedView::SP> FeedViewVector;
@@ -142,7 +144,7 @@ struct Fixture
_removed(_builder.getRepo(), _bucketDB, SubDbType::REMOVED),
_notReady(_builder.getRepo(), _bucketDB, SubDbType::NOTREADY),
_calc(new test::BucketStateCalculator()),
- _view(getVector(_ready, _removed, _notReady), _calc)
+ _view(getVector(_ready, _removed, _notReady), makeBucketSpace(), _calc)
{
_builder.createDoc(1, 1);
_builder.createDoc(2, 2);
diff --git a/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp b/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp
index 412c7abac5b..315fb7e86eb 100644
--- a/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/configurer/configurer_test.cpp
@@ -206,7 +206,7 @@ Fixture::initViewSet(ViewSet &views)
views._hwInfo));
ProtonConfig protonCfg;
SummaryManager::SP summaryMgr(
- new SummaryManager(_summaryExecutor, ProtonConfig::Summary(),
+ new SummaryManager(_summaryExecutor, search::LogDocumentStore::Config(),
GrowStrategy(), BASE_DIR, views._docTypeName,
TuneFileSummary(), views._fileHeaderContext,
views._noTlSyncer, search::IBucketizer::SP()));
diff --git a/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp b/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp
index 9eddef36436..547e400cd76 100644
--- a/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/document_subdbs/document_subdbs_test.cpp
@@ -1,5 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/persistence/spi/test.h>
#include <vespa/searchcore/proton/attribute/imported_attributes_repo.h>
#include <vespa/searchcore/proton/bucketdb/bucketdbhandler.h>
#include <vespa/searchcore/proton/common/hw_info.h>
@@ -23,7 +24,7 @@
#include <vespa/searchcore/proton/test/thread_utils.h>
#include <vespa/searchcorespi/plugin/iindexmanagerfactory.h>
#include <vespa/searchlib/common/idestructorcallback.h>
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/searchlib/index/docbuilder.h>
#include <vespa/searchlib/test/directory_handler.h>
#include <vespa/vespalib/io/fileutil.h>
@@ -51,6 +52,7 @@ using search::test::DirectoryHandler;
using searchcorespi::IFlushTarget;
using searchcorespi::index::IThreadingService;
using storage::spi::Timestamp;
+using storage::spi::test::makeBucketSpace;
using vespa::config::search::core::ProtonConfig;
using vespalib::mkdir;
@@ -78,6 +80,7 @@ struct MySubDBOwner : public IDocumentSubDBOwner
uint32_t _syncCnt;
MySubDBOwner() : _syncCnt(0) {}
void syncFeedView() override { ++_syncCnt; }
+ document::BucketSpace getBucketSpace() const override { return makeBucketSpace(); }
vespalib::string getName() const override { return "owner"; }
uint32_t getDistributionKey() const override { return -1; }
};
@@ -262,25 +265,26 @@ struct MyConfigSnapshot
Schema _schema;
DocBuilder _builder;
DocumentDBConfig::SP _cfg;
+ BootstrapConfig::SP _bootstrap;
MyConfigSnapshot(const Schema &schema,
const vespalib::string &cfgDir)
: _schema(schema),
_builder(_schema),
- _cfg()
+ _cfg(),
+ _bootstrap()
{
DocumentDBConfig::DocumenttypesConfigSP documenttypesConfig
(new DocumenttypesConfig(_builder.getDocumenttypesConfig()));
TuneFileDocumentDB::SP tuneFileDocumentDB(new TuneFileDocumentDB());
- BootstrapConfig::SP bootstrap
- (new BootstrapConfig(1,
+ _bootstrap = std::make_shared<BootstrapConfig>(1,
documenttypesConfig,
_builder.getDocumentTypeRepo(),
std::make_shared<ProtonConfig>(),
std::make_shared<FiledistributorrpcConfig>(),
- tuneFileDocumentDB));
+ tuneFileDocumentDB);
config::DirSpec spec(cfgDir);
DocumentDBConfigHelper mgr(spec, "searchdocument");
- mgr.forwardConfig(bootstrap);
+ mgr.forwardConfig(_bootstrap);
mgr.nextGeneration(1);
_cfg = mgr.getConfig();
}
@@ -292,8 +296,8 @@ struct FixtureBase
ExecutorThreadingService _writeService;
ThreadStackExecutor _summaryExecutor;
typename Traits::Config _cfg;
- std::shared_ptr<BucketDBOwner> _bucketDB;
- BucketDBHandler _bucketDBHandler;
+ std::shared_ptr<BucketDBOwner> _bucketDB;
+ BucketDBHandler _bucketDBHandler;
typename Traits::Context _ctx;
typename Traits::Schema _baseSchema;
MyConfigSnapshot::UP _snapshot;
@@ -329,7 +333,6 @@ struct FixtureBase
DocumentSubDbInitializer::SP task =
_subDb.createInitializer(*_snapshot->_cfg,
Traits::configSerial(),
- ProtonConfig::Summary(),
ProtonConfig::Index());
vespalib::ThreadStackExecutor executor(1, 1024 * 1024);
initializer::TaskRunner taskRunner(executor);
@@ -340,26 +343,18 @@ struct FixtureBase
void basicReconfig(SerialNum serialNum) {
runInMaster([&] () { performReconfig(serialNum, TwoAttrSchema(), ConfigDir2::dir()); });
}
- void reconfig(SerialNum serialNum,
- const Schema &reconfigSchema,
- const vespalib::string &reconfigConfigDir) {
+ void reconfig(SerialNum serialNum, const Schema &reconfigSchema, const vespalib::string &reconfigConfigDir) {
runInMaster([&] () { performReconfig(serialNum, reconfigSchema, reconfigConfigDir); });
- }
- void performReconfig(SerialNum serialNum,
- const Schema &reconfigSchema,
- const vespalib::string &reconfigConfigDir) {
+ }
+ void performReconfig(SerialNum serialNum, const Schema &reconfigSchema, const vespalib::string &reconfigConfigDir) {
MyConfigSnapshot::UP newCfg(new MyConfigSnapshot(reconfigSchema, reconfigConfigDir));
DocumentDBConfig::ComparisonResult cmpResult;
cmpResult.attributesChanged = true;
cmpResult.documenttypesChanged = true;
cmpResult.documentTypeRepoChanged = true;
MyDocumentDBReferenceResolver resolver;
- IReprocessingTask::List tasks =
- _subDb.applyConfig(*newCfg->_cfg,
- *_snapshot->_cfg,
- serialNum,
- ReconfigParams(cmpResult),
- resolver);
+ auto tasks = _subDb.applyConfig(*newCfg->_cfg, *_snapshot->_cfg,
+ serialNum, ReconfigParams(cmpResult), resolver);
_snapshot = std::move(newCfg);
if (!tasks.empty()) {
ReprocessingRunner runner;
diff --git a/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp b/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp
index 5dbf5d523bc..4419e982abf 100644
--- a/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/documentbucketmover/documentbucketmover_test.cpp
@@ -2,7 +2,7 @@
#include <vespa/searchcore/proton/bucketdb/bucketdbhandler.h>
#include <vespa/searchcore/proton/bucketdb/bucket_create_notifier.h>
-#include <vespa/searchcore/proton/common/bucketfactory.h>
+#include <vespa/searchcore/proton/test/bucketfactory.h>
#include <vespa/searchcore/proton/feedoperation/moveoperation.h>
#include <vespa/searchcore/proton/server/bucketmovejob.h>
#include <vespa/searchcore/proton/server/documentbucketmover.h>
@@ -15,6 +15,7 @@
#include <vespa/searchcore/proton/test/disk_mem_usage_notifier.h>
#include <vespa/searchcore/proton/test/test.h>
#include <vespa/searchlib/index/docbuilder.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/vespalib/testkit/testapp.h>
using namespace proton;
@@ -31,6 +32,7 @@ using search::index::DocBuilder;
using search::index::Schema;
using storage::spi::BucketInfo;
using storage::spi::Timestamp;
+using storage::spi::test::makeBucketSpace;
using vespalib::make_string;
using BlockedReason = IBlockableMaintenanceJob::BlockedReason;
@@ -622,7 +624,7 @@ ControllerFixtureBase::ControllerFixtureBase(const BlockableMaintenanceJobConfig
_bmj(_calc, _moveHandler, _modifiedHandler, _ready._subDb,
_notReady._subDb, _fbh, _bucketCreateNotifier, _clusterStateHandler, _bucketHandler,
_diskMemUsageNotifier, blockableConfig,
- "test"),
+ "test", makeBucketSpace()),
_runner(_bmj)
{
}
diff --git a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
index b9a04acb8da..5e3e5cd78be 100644
--- a/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/documentdb_test.cpp
@@ -1,6 +1,11 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <tests/proton/common/dummydbowner.h>
+#include <vespa/document/datatype/documenttype.h>
+#include <vespa/fastos/file.h>
+#include <vespa/messagebus/emptyreply.h>
+#include <vespa/messagebus/testlib/receptor.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/searchcore/proton/attribute/flushableattribute.h>
#include <vespa/searchcore/proton/common/feedtoken.h>
#include <vespa/searchcore/proton/common/statusreport.h>
@@ -19,20 +24,17 @@
#include <vespa/searchcorespi/index/indexflushtarget.h>
#include <vespa/searchlib/index/dummyfileheadercontext.h>
#include <vespa/searchlib/transactionlog/translogserver.h>
-#include <vespa/messagebus/emptyreply.h>
-#include <vespa/messagebus/testlib/receptor.h>
-#include <vespa/document/datatype/documenttype.h>
#include <vespa/vespalib/data/slime/slime.h>
#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/fastos/file.h>
+using namespace cloud::config::filedistribution;
+using namespace proton;
+using namespace storage::spi::test;
+using namespace vespalib::slime;
using document::DocumentType;
using document::DocumentTypeRepo;
using document::DocumenttypesConfig;
-using namespace cloud::config::filedistribution;
-using namespace proton;
-using namespace vespalib::slime;
using search::TuneFileDocumentDB;
using search::index::DummyFileHeaderContext;
using search::index::Schema;
@@ -107,22 +109,17 @@ Fixture::Fixture()
config::DirSpec spec(TEST_PATH("cfg"));
DocumentDBConfigHelper mgr(spec, "typea");
BootstrapConfig::SP
- b(new BootstrapConfig(1,
- documenttypesConfig,
- repo,
+ b(new BootstrapConfig(1, documenttypesConfig, repo,
std::make_shared<ProtonConfig>(),
std::make_shared<FiledistributorrpcConfig>(),
tuneFileDocumentDB));
mgr.forwardConfig(b);
mgr.nextGeneration(0);
- _db.reset(new DocumentDB(".", mgr.getConfig(), "tcp/localhost:9014",
- _queryLimiter, _clock, DocTypeName("typea"),
- ProtonConfig(),
- _myDBOwner, _summaryExecutor, _summaryExecutor, NULL, _dummy, _fileHeaderContext,
- ConfigStore::UP(new MemoryConfigStore),
- std::make_shared<vespalib::ThreadStackExecutor>
- (16, 128 * 1024),
- _hwInfo));
+ _db.reset(new DocumentDB(".", mgr.getConfig(), "tcp/localhost:9014", _queryLimiter, _clock, DocTypeName("typea"),
+ makeBucketSpace(),
+ *b->getProtonConfigSP(), _myDBOwner, _summaryExecutor, _summaryExecutor, _tls, _dummy,
+ _fileHeaderContext, ConfigStore::UP(new MemoryConfigStore),
+ std::make_shared<vespalib::ThreadStackExecutor>(16, 128 * 1024), _hwInfo));
_db->start();
_db->waitForOnlineState();
}
diff --git a/searchcore/src/tests/proton/documentdb/feedhandler/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/feedhandler/CMakeLists.txt
index 6215929faa0..8f93629b6a6 100644
--- a/searchcore/src/tests/proton/documentdb/feedhandler/CMakeLists.txt
+++ b/searchcore/src/tests/proton/documentdb/feedhandler/CMakeLists.txt
@@ -3,6 +3,7 @@ vespa_add_executable(searchcore_feedhandler_test_app TEST
SOURCES
feedhandler_test.cpp
DEPENDS
+ searchcore_test
searchcore_server
searchcore_bucketdb
searchcore_persistenceengine
diff --git a/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
index bb43aa6c63d..cccbbededd1 100644
--- a/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/feedhandler/feedhandler_test.cpp
@@ -6,7 +6,7 @@
#include <vespa/documentapi/messagebus/messages/updatedocumentreply.h>
#include <vespa/persistence/spi/result.h>
#include <vespa/searchcore/proton/bucketdb/bucketdbhandler.h>
-#include <vespa/searchcore/proton/common/bucketfactory.h>
+#include <vespa/searchcore/proton/test/bucketfactory.h>
#include <vespa/searchcore/proton/common/feedtoken.h>
#include <vespa/searchcore/proton/feedoperation/moveoperation.h>
#include <vespa/searchcore/proton/feedoperation/pruneremoveddocumentsoperation.h>
@@ -50,7 +50,7 @@ using search::IDestructorCallback;
using search::SerialNum;
using search::index::schema::CollectionType;
using search::index::schema::DataType;
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
using search::transactionlog::TransLogServer;
using storage::spi::PartitionId;
using storage::spi::RemoveResult;
@@ -411,12 +411,10 @@ struct MyTlsWriter : TlsWriter {
bool erase_return;
MyTlsWriter() : store_count(0), erase_count(0), erase_return(true) {}
- virtual void storeOperation(const FeedOperation &) override { ++store_count; }
- virtual bool erase(SerialNum) override { ++erase_count; return erase_return; }
+ void storeOperation(const FeedOperation &) override { ++store_count; }
+ bool erase(SerialNum) override { ++erase_count; return erase_return; }
- virtual SerialNum
- sync(SerialNum syncTo) override
- {
+ SerialNum sync(SerialNum syncTo) override {
return syncTo;
}
};
@@ -452,7 +450,7 @@ struct FeedHandlerFixture
_bucketDB(),
_bucketDBHandler(_bucketDB),
handler(writeService, tlsSpec, schema.getDocType(),
- feedMetrics._feed, _state, owner, writeFilter, replayConfig, NULL, &tls_writer)
+ feedMetrics._feed, _state, owner, writeFilter, replayConfig, tls, &tls_writer)
{
_state.enterLoadState();
_state.enterReplayTransactionLogState();
@@ -544,8 +542,7 @@ addLidToRemove(RemoveDocumentsOperation &op)
TEST_F("require that handleMove calls FeedView", FeedHandlerFixture)
{
DocumentContext doc_context("doc:test:foo", *f.schema.builder);
- MoveOperation op(doc_context.bucketId, Timestamp(2), doc_context.doc,
- DbDocumentId(0, 2), 1);
+ MoveOperation op(doc_context.bucketId, Timestamp(2), doc_context.doc, DbDocumentId(0, 2), 1);
op.setDbDocumentId(DbDocumentId(1, 2));
f.runAsMaster([&]() { f.handler.handleMove(op, IDestructorCallback::SP()); });
EXPECT_EQUAL(1, f.feedView.move_count);
diff --git a/searchcore/src/tests/proton/documentdb/feedview/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/feedview/CMakeLists.txt
index 6d2dfb59172..ebf28f3bf16 100644
--- a/searchcore/src/tests/proton/documentdb/feedview/CMakeLists.txt
+++ b/searchcore/src/tests/proton/documentdb/feedview/CMakeLists.txt
@@ -3,6 +3,7 @@ vespa_add_executable(searchcore_feedview_test_app TEST
SOURCES
feedview_test.cpp
DEPENDS
+ searchcore_test
searchcore_server
searchcore_index
searchcore_feedoperation
diff --git a/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
index eeec8122703..c820a9f392c 100644
--- a/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/feedview/feedview_test.cpp
@@ -5,7 +5,7 @@
#include <vespa/documentapi/messagebus/messages/removedocumentreply.h>
#include <vespa/documentapi/messagebus/messages/updatedocumentreply.h>
#include <vespa/searchcore/proton/attribute/i_attribute_writer.h>
-#include <vespa/searchcore/proton/common/bucketfactory.h>
+#include <vespa/searchcore/proton/test/bucketfactory.h>
#include <vespa/searchcore/proton/common/commit_time_tracker.h>
#include <vespa/searchcore/proton/common/feedtoken.h>
#include <vespa/searchcore/proton/documentmetastore/lidreusedelayer.h>
diff --git a/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp b/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp
index 0b48686bbdd..559dbb240a8 100644
--- a/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp
+++ b/searchcore/src/tests/proton/documentdb/maintenancecontroller/maintenancecontroller_test.cpp
@@ -1,5 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/persistence/spi/test.h>
#include <vespa/searchcore/proton/attribute/attribute_usage_filter.h>
#include <vespa/searchcore/proton/attribute/i_attribute_manager.h>
#include <vespa/searchcore/proton/common/doctypename.h>
@@ -53,6 +54,7 @@ using search::IDestructorCallback;
using search::SerialNum;
using storage::spi::BucketInfo;
using storage::spi::Timestamp;
+using storage::spi::test::makeBucketSpace;
using vespalib::Slime;
using vespalib::makeClosure;
using vespalib::makeTask;
@@ -962,7 +964,7 @@ MaintenanceControllerFixture::injectMaintenanceJobs()
{
if (_injectDefaultJobs) {
MaintenanceJobsInjector::injectJobs(_mc, *_mcCfg, _fh, _gsp,
- _lscHandlers, _fh, _mc, _bucketCreateNotifier, _docTypeName.getName(),
+ _lscHandlers, _fh, _mc, _bucketCreateNotifier, _docTypeName.getName(), makeBucketSpace(),
_fh, _fh, _bmc, _clusterStateHandler, _bucketHandler,
_calc,
_diskMemUsageNotifier,
diff --git a/searchcore/src/tests/proton/documentdb/threading_service_config/CMakeLists.txt b/searchcore/src/tests/proton/documentdb/threading_service_config/CMakeLists.txt
new file mode 100644
index 00000000000..214b5c9b86d
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/threading_service_config/CMakeLists.txt
@@ -0,0 +1,8 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_threading_service_config_test_app TEST
+ SOURCES
+ threading_service_config_test.cpp
+ DEPENDS
+ searchcore_server
+)
+vespa_add_test(NAME searchcore_threading_service_config_test_app COMMAND searchcore_threading_service_config_test_app)
diff --git a/searchcore/src/tests/proton/documentdb/threading_service_config/threading_service_config_test.cpp b/searchcore/src/tests/proton/documentdb/threading_service_config/threading_service_config_test.cpp
new file mode 100644
index 00000000000..658ebe818eb
--- /dev/null
+++ b/searchcore/src/tests/proton/documentdb/threading_service_config/threading_service_config_test.cpp
@@ -0,0 +1,66 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#include <vespa/log/log.h>
+LOG_SETUP("threading_service_config_test");
+
+#include <vespa/searchcore/config/config-proton.h>
+#include <vespa/searchcore/proton/common/hw_info.h>
+#include <vespa/searchcore/proton/server/threading_service_config.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace proton;
+using ProtonConfig = vespa::config::search::core::ProtonConfig;
+using ProtonConfigBuilder = vespa::config::search::core::ProtonConfigBuilder;
+
+struct Fixture {
+ ProtonConfig cfg;
+ Fixture(uint32_t baseLineIndexingThreads = 2)
+ : cfg(makeConfig(baseLineIndexingThreads))
+ {
+ }
+ ProtonConfig makeConfig(uint32_t baseLineIndexingThreads) {
+ ProtonConfigBuilder builder;
+ builder.indexing.threads = baseLineIndexingThreads;
+ builder.indexing.tasklimit = 500;
+ builder.indexing.semiunboundtasklimit = 50000;
+ builder.feeding.concurrency = 0.5;
+ return builder;
+ }
+ ThreadingServiceConfig make(uint32_t cpuCores) {
+ return ThreadingServiceConfig::make(cfg, HwInfo::Cpu(cpuCores));
+ }
+ void assertIndexingThreads(uint32_t expIndexingThreads, uint32_t cpuCores) {
+ EXPECT_EQUAL(expIndexingThreads, make(cpuCores).indexingThreads());
+ }
+};
+
+TEST_F("require that indexing threads are set based on cpu cores and feeding concurrency", Fixture)
+{
+ TEST_DO(f.assertIndexingThreads(2, 1));
+ TEST_DO(f.assertIndexingThreads(2, 4));
+ TEST_DO(f.assertIndexingThreads(2, 8));
+ TEST_DO(f.assertIndexingThreads(2, 12));
+ TEST_DO(f.assertIndexingThreads(3, 13));
+ TEST_DO(f.assertIndexingThreads(3, 18));
+ TEST_DO(f.assertIndexingThreads(4, 19));
+ TEST_DO(f.assertIndexingThreads(4, 24));
+}
+
+TEST_F("require that indexing threads is always >= 1", Fixture(0))
+{
+ TEST_DO(f.assertIndexingThreads(1, 0));
+}
+
+TEST_F("require that default task limit is set", Fixture)
+{
+ EXPECT_EQUAL(500u, f.make(24).defaultTaskLimit());
+}
+
+TEST_F("require that semiunbound task limit is scaled based on indexing threads", Fixture)
+{
+ EXPECT_EQUAL(12500u, f.make(24).semiUnboundTaskLimit());
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
diff --git a/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp
index 5109d994f61..d56340be2b2 100644
--- a/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp
+++ b/searchcore/src/tests/proton/documentmetastore/lidreusedelayer/lidreusedelayer_test.cpp
@@ -7,9 +7,9 @@ LOG_SETUP("lidreusedelayer_test");
#include <vespa/searchcore/proton/server/executorthreadingservice.h>
#include <vespa/searchcore/proton/test/thread_utils.h>
#include <vespa/searchcore/proton/test/threading_service_observer.h>
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
namespace proton {
diff --git a/searchcore/src/tests/proton/feedtoken/feedtoken.cpp b/searchcore/src/tests/proton/feedtoken/feedtoken.cpp
index 3a01e35fbe5..9df65ae3437 100644
--- a/searchcore/src/tests/proton/feedtoken/feedtoken.cpp
+++ b/searchcore/src/tests/proton/feedtoken/feedtoken.cpp
@@ -42,7 +42,6 @@ private:
void testFail();
void testHandover();
void testIntegrity();
- void testTrace();
public:
int Main() override {
@@ -53,7 +52,6 @@ public:
testFail(); TEST_FLUSH();
testHandover(); TEST_FLUSH();
// testIntegrity(); TEST_FLUSH();
- testTrace(); TEST_FLUSH();
TEST_DONE();
}
@@ -134,19 +132,3 @@ Test::testIntegrity()
}
}
-void
-Test::testTrace()
-{
- LocalTransport transport;
- mbus::Reply::UP reply(new documentapi::RemoveDocumentReply());
-
- FeedToken token(transport, std::move(reply));
- token.trace(0, "foo");
- token.ack();
- reply = transport.getReply();
- ASSERT_TRUE(reply.get() != NULL);
- EXPECT_TRUE(!reply->hasErrors());
- std::string trace = reply->getTrace().toString();
- fprintf(stderr, "%s", trace.c_str());
- EXPECT_TRUE(trace.find("foo") != std::string::npos);
-}
diff --git a/searchcore/src/tests/proton/index/indexmanager_test.cpp b/searchcore/src/tests/proton/index/indexmanager_test.cpp
index 84198fb9b42..e8afd738e84 100644
--- a/searchcore/src/tests/proton/index/indexmanager_test.cpp
+++ b/searchcore/src/tests/proton/index/indexmanager_test.cpp
@@ -43,7 +43,7 @@ using search::index::DocBuilder;
using search::index::DummyFileHeaderContext;
using search::index::Schema;
using search::index::schema::DataType;
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
using search::memoryindex::CompactDocumentWordsStore;
using search::memoryindex::Dictionary;
using search::queryeval::Source;
diff --git a/searchcore/src/tests/proton/matching/CMakeLists.txt b/searchcore/src/tests/proton/matching/CMakeLists.txt
index f3ed2bd4833..14f3960c43e 100644
--- a/searchcore/src/tests/proton/matching/CMakeLists.txt
+++ b/searchcore/src/tests/proton/matching/CMakeLists.txt
@@ -3,6 +3,7 @@ vespa_add_executable(searchcore_matching_test_app TEST
SOURCES
matching_test.cpp
DEPENDS
+ searchcore_test
searchcore_server
searchcore_fconfig
searchcore_matching
diff --git a/searchcore/src/tests/proton/matching/matching_test.cpp b/searchcore/src/tests/proton/matching/matching_test.cpp
index 261827a1e06..27e677bb1dc 100644
--- a/searchcore/src/tests/proton/matching/matching_test.cpp
+++ b/searchcore/src/tests/proton/matching/matching_test.cpp
@@ -5,7 +5,7 @@
#include <vespa/document/base/globalid.h>
#include <initializer_list>
#include <vespa/searchcommon/attribute/iattributecontext.h>
-#include <vespa/searchcore/proton/common/bucketfactory.h>
+#include <vespa/searchcore/proton/test/bucketfactory.h>
#include <vespa/searchcore/proton/documentmetastore/documentmetastore.h>
#include <vespa/searchcore/proton/matching/error_constant_value.h>
#include <vespa/searchcore/proton/matching/fakesearchcontext.h>
diff --git a/searchcore/src/tests/proton/persistenceengine/persistence_handler_map/CMakeLists.txt b/searchcore/src/tests/proton/persistenceengine/persistence_handler_map/CMakeLists.txt
new file mode 100644
index 00000000000..ecfe8af5d98
--- /dev/null
+++ b/searchcore/src/tests/proton/persistenceengine/persistence_handler_map/CMakeLists.txt
@@ -0,0 +1,9 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+vespa_add_executable(searchcore_persistence_handler_map_test_app TEST
+ SOURCES
+ persistence_handler_map_test.cpp
+ DEPENDS
+ searchcore_persistenceengine
+ searchcore_pcommon
+)
+vespa_add_test(NAME searchcore_persistence_handler_map_test_app COMMAND searchcore_persistence_handler_map_test_app)
diff --git a/searchcore/src/tests/proton/persistenceengine/persistence_handler_map/persistence_handler_map_test.cpp b/searchcore/src/tests/proton/persistenceengine/persistence_handler_map/persistence_handler_map_test.cpp
new file mode 100644
index 00000000000..34de8a2e6a3
--- /dev/null
+++ b/searchcore/src/tests/proton/persistenceengine/persistence_handler_map/persistence_handler_map_test.cpp
@@ -0,0 +1,144 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/searchcore/proton/persistenceengine/ipersistencehandler.h>
+#include <vespa/searchcore/proton/persistenceengine/persistence_handler_map.h>
+#include <vespa/vespalib/testkit/testapp.h>
+
+using namespace document;
+using namespace proton;
+
+using HandlerSnapshot = PersistenceHandlerMap::HandlerSnapshot;
+
+struct DummyPersistenceHandler : public IPersistenceHandler {
+ using SP = std::shared_ptr<DummyPersistenceHandler>;
+ virtual void initialize() override {}
+ virtual void handlePut(FeedToken,
+ const storage::spi::Bucket &,
+ storage::spi::Timestamp,
+ const document::Document::SP &) override {}
+ virtual void handleUpdate(FeedToken,
+ const storage::spi::Bucket &,
+ storage::spi::Timestamp,
+ const document::DocumentUpdate::SP &) override {}
+ virtual void handleRemove(FeedToken,
+ const storage::spi::Bucket &,
+ storage::spi::Timestamp,
+ const document::DocumentId &) override {}
+ virtual void handleListBuckets(IBucketIdListResultHandler &) override {}
+ virtual void handleSetClusterState(const storage::spi::ClusterState &,
+ IGenericResultHandler &) override {}
+ virtual void handleSetActiveState(const storage::spi::Bucket &,
+ storage::spi::BucketInfo::ActiveState,
+ IGenericResultHandler &) override {}
+ virtual void handleGetBucketInfo(const storage::spi::Bucket &,
+ IBucketInfoResultHandler &) override {}
+ virtual void handleCreateBucket(FeedToken, const storage::spi::Bucket &) override {}
+ virtual void handleDeleteBucket(FeedToken, const storage::spi::Bucket &) override {}
+ virtual void handleGetModifiedBuckets(IBucketIdListResultHandler &) override {}
+ virtual void handleSplit(FeedToken,
+ const storage::spi::Bucket &,
+ const storage::spi::Bucket &,
+ const storage::spi::Bucket &) override {}
+ virtual void handleJoin(FeedToken,
+ const storage::spi::Bucket &,
+ const storage::spi::Bucket &,
+ const storage::spi::Bucket &) override {}
+ virtual RetrieversSP getDocumentRetrievers(storage::spi::ReadConsistency) override { return RetrieversSP(); }
+ virtual BucketGuard::UP lockBucket(const storage::spi::Bucket &) override { return BucketGuard::UP(); }
+ virtual void handleListActiveBuckets(IBucketIdListResultHandler &) override {}
+ virtual void handlePopulateActiveBuckets(document::BucketId::List &,
+ IGenericResultHandler &) override {}
+};
+
+BucketSpace space_1(1);
+BucketSpace space_2(2);
+BucketSpace space_null(3);
+DocTypeName type_a("a");
+DocTypeName type_b("b");
+DocTypeName type_c("c");
+DummyPersistenceHandler::SP handler_a(std::make_shared<DummyPersistenceHandler>());
+DummyPersistenceHandler::SP handler_b(std::make_shared<DummyPersistenceHandler>());
+DummyPersistenceHandler::SP handler_c(std::make_shared<DummyPersistenceHandler>());
+DummyPersistenceHandler::SP handler_a_new(std::make_shared<DummyPersistenceHandler>());
+
+
+void
+assertHandler(const IPersistenceHandler::SP &lhs, const IPersistenceHandler::SP &rhs)
+{
+ EXPECT_EQUAL(lhs.get(), rhs.get());
+}
+
+void
+assertNullHandler(const IPersistenceHandler::SP &handler)
+{
+ EXPECT_TRUE(handler.get() == nullptr);
+}
+
+void
+assertSnapshot(const std::vector<IPersistenceHandler::SP> &exp, const HandlerSnapshot::UP &snapshot)
+{
+ EXPECT_EQUAL(exp.size(), snapshot->size());
+ auto &sequence = snapshot->handlers();
+ for (size_t i = 0; i < exp.size() && sequence.valid(); ++i, sequence.next()) {
+ EXPECT_EQUAL(exp[i].get(), sequence.get());
+ }
+}
+
+struct Fixture {
+ PersistenceHandlerMap map;
+ Fixture() {
+ TEST_DO(assertNullHandler(map.putHandler(space_1, type_a, handler_a)));
+ TEST_DO(assertNullHandler(map.putHandler(space_1, type_b, handler_b)));
+ TEST_DO(assertNullHandler(map.putHandler(space_2, type_c, handler_c)));
+ }
+};
+
+TEST_F("require that handlers can be retrieved", Fixture)
+{
+ TEST_DO(assertHandler(handler_a, f.map.getHandler(space_1, type_a)));
+ TEST_DO(assertHandler(handler_b, f.map.getHandler(space_1, type_b)));
+ TEST_DO(assertHandler(handler_c, f.map.getHandler(space_2, type_c)));
+ TEST_DO(assertNullHandler(f.map.getHandler(space_1, type_c)));
+ TEST_DO(assertNullHandler(f.map.getHandler(space_null, type_a)));
+}
+
+TEST_F("require that old handler is returned if replaced by new handler", Fixture)
+{
+ TEST_DO(assertHandler(handler_a, f.map.putHandler(space_1, type_a, handler_a_new)));
+ TEST_DO(assertHandler(handler_a_new, f.map.getHandler(space_1, type_a)));
+}
+
+TEST_F("require that handler can be removed (and old handler returned)", Fixture)
+{
+ TEST_DO(assertHandler(handler_a, f.map.removeHandler(space_1, type_a)));
+ TEST_DO(assertNullHandler(f.map.getHandler(space_1, type_a)));
+ TEST_DO(assertNullHandler(f.map.removeHandler(space_1, type_c)));
+}
+
+TEST_F("require that handler snapshot can be retrieved for all handlers", Fixture)
+{
+ TEST_DO(assertSnapshot({handler_c, handler_a, handler_b}, f.map.getHandlerSnapshot()));
+}
+
+TEST_F("require that handler snapshot can be retrieved for given bucket space", Fixture)
+{
+ TEST_DO(assertSnapshot({handler_a, handler_b}, f.map.getHandlerSnapshot(space_1)));
+ TEST_DO(assertSnapshot({handler_c}, f.map.getHandlerSnapshot(space_2)));
+ TEST_DO(assertSnapshot({}, f.map.getHandlerSnapshot(space_null)));
+}
+
+TEST_F("require that handler snapshot can be retrieved for given document type (in bucket space)", Fixture)
+{
+ // Note: Document id doesn't contain document type -> all handlers returned
+ TEST_DO(assertSnapshot({handler_a, handler_b},
+ f.map.getHandlerSnapshot(space_1, DocumentId("userdoc:namespace:1234:namespace"))));
+ TEST_DO(assertSnapshot({handler_a},
+ f.map.getHandlerSnapshot(space_1, DocumentId("id:namespace:a::doc1"))));
+ EXPECT_TRUE(f.map.getHandlerSnapshot(space_1, DocumentId("id:namespace:c::doc2")).get() == nullptr);
+}
+
+TEST_MAIN()
+{
+ TEST_RUN_ALL();
+}
+
diff --git a/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp b/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp
index a7c4b0e5774..f8c0e0ac2f6 100644
--- a/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp
+++ b/searchcore/src/tests/proton/persistenceengine/persistenceengine_test.cpp
@@ -1,18 +1,23 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/document/repo/documenttyperepo.h>
+
+#include <vespa/config-stor-distribution.h>
#include <vespa/document/datatype/documenttype.h>
+#include <vespa/document/fieldset/fieldsets.h>
+#include <vespa/document/repo/documenttyperepo.h>
#include <vespa/persistence/spi/documentselection.h>
+#include <vespa/persistence/spi/test.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/searchcore/proton/persistenceengine/bucket_guard.h>
#include <vespa/searchcore/proton/persistenceengine/ipersistenceengineowner.h>
#include <vespa/searchcore/proton/persistenceengine/persistenceengine.h>
-#include <vespa/vespalib/testkit/testapp.h>
-#include <vespa/document/fieldset/fieldsets.h>
#include <vespa/vdslib/distribution/distribution.h>
#include <vespa/vdslib/state/clusterstate.h>
-#include <vespa/config-stor-distribution.h>
+#include <vespa/vespalib/testkit/testapp.h>
+#include <algorithm>
#include <set>
using document::BucketId;
+using document::BucketSpace;
using document::Document;
using document::DocumentId;
using document::DocumentType;
@@ -22,19 +27,22 @@ using storage::spi::BucketChecksum;
using storage::spi::BucketIdListResult;
using storage::spi::BucketInfo;
using storage::spi::BucketInfoResult;
-using storage::spi::Context;
using storage::spi::ClusterState;
+using storage::spi::Context;
using storage::spi::CreateIteratorResult;
using storage::spi::DocumentSelection;
using storage::spi::GetResult;
-using storage::spi::IteratorId;
using storage::spi::IterateResult;
+using storage::spi::IteratorId;
using storage::spi::PartitionId;
+using storage::spi::PersistenceProvider;
using storage::spi::RemoveResult;
using storage::spi::Result;
using storage::spi::Selection;
using storage::spi::Timestamp;
using storage::spi::UpdateResult;
+using storage::spi::test::makeBucket;
+using storage::spi::test::makeBucketSpace;
using namespace proton;
using namespace vespalib;
@@ -349,6 +357,8 @@ struct HandlerSet {
MyHandler &handler2;
HandlerSet();
~HandlerSet();
+ void prepareListBuckets();
+ void prepareGetModifiedBuckets();
};
HandlerSet::HandlerSet()
@@ -378,8 +388,8 @@ BucketId bckId1(1);
BucketId bckId2(2);
BucketId bckId3(3);
Bucket bucket0;
-Bucket bucket1(bckId1, partId);
-Bucket bucket2(bckId2, partId);
+Bucket bucket1(makeBucket(bckId1, partId));
+Bucket bucket2(makeBucket(bckId2, partId));
BucketChecksum checksum1(1);
BucketChecksum checksum2(2);
BucketChecksum checksum3(1+2);
@@ -392,7 +402,26 @@ Timestamp tstamp2(2);
Timestamp tstamp3(3);
DocumentSelection doc_sel("");
Selection selection(doc_sel);
+BucketSpace altBucketSpace(1);
+
+
+void
+HandlerSet::prepareListBuckets()
+{
+ handler1.bucketList.push_back(bckId1);
+ handler1.bucketList.push_back(bckId2);
+ handler2.bucketList.push_back(bckId2);
+ handler2.bucketList.push_back(bckId3);
+}
+void
+HandlerSet::prepareGetModifiedBuckets()
+{
+ handler1.modBucketList.push_back(bckId1);
+ handler1.modBucketList.push_back(bckId2);
+ handler2.modBucketList.push_back(bckId2);
+ handler2.modBucketList.push_back(bckId3);
+}
class SimplePersistenceEngineOwner : public IPersistenceEngineOwner
{
@@ -424,13 +453,17 @@ struct SimpleFixture {
SimpleResourceWriteFilter _writeFilter;
PersistenceEngine engine;
HandlerSet hset;
- SimpleFixture()
+ SimpleFixture(BucketSpace bucketSpace2)
: _owner(),
engine(_owner, _writeFilter, -1, false),
hset()
{
- engine.putHandler(DocTypeName(doc1->getType()), hset.phandler1);
- engine.putHandler(DocTypeName(doc2->getType()), hset.phandler2);
+ engine.putHandler(makeBucketSpace(), DocTypeName(doc1->getType()), hset.phandler1);
+ engine.putHandler(bucketSpace2, DocTypeName(doc2->getType()), hset.phandler2);
+ }
+ SimpleFixture()
+ : SimpleFixture(makeBucketSpace())
+ {
}
};
@@ -444,6 +477,26 @@ assertHandler(const Bucket &expBucket, Timestamp expTimestamp,
EXPECT_EQUAL(expDocId, handler.lastDocId);
}
+void assertBucketList(const BucketIdListResult &result, const std::vector<BucketId> &expBuckets)
+{
+ const BucketIdListResult::List &bucketList = result.getList();
+ EXPECT_EQUAL(expBuckets.size(), bucketList.size());
+ for (const auto &expBucket : expBuckets) {
+ EXPECT_TRUE(std::find(bucketList.begin(), bucketList.end(), expBucket) != bucketList.end());
+ }
+}
+
+void assertBucketList(PersistenceProvider &spi, BucketSpace bucketSpace, const std::vector<BucketId> &expBuckets)
+{
+ BucketIdListResult result = spi.listBuckets(bucketSpace, partId);
+ TEST_DO(assertBucketList(result, expBuckets));
+}
+
+void assertModifiedBuckets(PersistenceProvider &spi, BucketSpace bucketSpace, const std::vector<BucketId> &expBuckets)
+{
+ BucketIdListResult result = spi.getModifiedBuckets(bucketSpace);
+ TEST_DO(assertBucketList(result, expBuckets));
+}
TEST_F("require that getPartitionStates() prepares all handlers", SimpleFixture)
{
@@ -585,18 +638,9 @@ TEST_F("require that remove is NOT rejected if resource limit is reached", Simpl
TEST_F("require that listBuckets() is routed to handlers and merged", SimpleFixture)
{
- f.hset.handler1.bucketList.push_back(bckId1);
- f.hset.handler1.bucketList.push_back(bckId2);
- f.hset.handler2.bucketList.push_back(bckId2);
- f.hset.handler2.bucketList.push_back(bckId3);
-
- EXPECT_TRUE(f.engine.listBuckets(PartitionId(1)).getList().empty());
- BucketIdListResult result = f.engine.listBuckets(partId);
- const BucketIdListResult::List &bucketList = result.getList();
- EXPECT_EQUAL(3u, bucketList.size());
- EXPECT_EQUAL(bckId1, bucketList[0]);
- EXPECT_EQUAL(bckId2, bucketList[1]);
- EXPECT_EQUAL(bckId3, bucketList[2]);
+ f.hset.prepareListBuckets();
+ EXPECT_TRUE(f.engine.listBuckets(makeBucketSpace(), PartitionId(1)).getList().empty());
+ TEST_DO(assertBucketList(f.engine, makeBucketSpace(), { bckId1, bckId2, bckId3 }));
}
@@ -671,17 +715,8 @@ TEST_F("require that deleteBucket() is routed to handlers and merged", SimpleFix
TEST_F("require that getModifiedBuckets() is routed to handlers and merged", SimpleFixture)
{
- f.hset.handler1.modBucketList.push_back(bckId1);
- f.hset.handler1.modBucketList.push_back(bckId2);
- f.hset.handler2.modBucketList.push_back(bckId2);
- f.hset.handler2.modBucketList.push_back(bckId3);
-
- BucketIdListResult result = f.engine.getModifiedBuckets();
- const BucketIdListResult::List &bucketList = result.getList();
- EXPECT_EQUAL(3u, bucketList.size());
- EXPECT_EQUAL(bckId1, bucketList[0]);
- EXPECT_EQUAL(bckId2, bucketList[1]);
- EXPECT_EQUAL(bckId3, bucketList[2]);
+ f.hset.prepareGetModifiedBuckets();
+ TEST_DO(assertModifiedBuckets(f.engine, makeBucketSpace(), { bckId1, bckId2, bckId3 }));
}
@@ -837,6 +872,15 @@ TEST_F("require that buckets are frozen during iterator life", SimpleFixture) {
EXPECT_FALSE(f.hset.handler2.isFrozen(bucket1));
}
+TEST_F("require that multiple bucket spaces works", SimpleFixture(altBucketSpace)) {
+ f.hset.prepareListBuckets();
+ TEST_DO(assertBucketList(f.engine, makeBucketSpace(), { bckId1, bckId2 }));
+ TEST_DO(assertBucketList(f.engine, altBucketSpace, { bckId2, bckId3 }));
+ f.hset.prepareGetModifiedBuckets();
+ TEST_DO(assertModifiedBuckets(f.engine, makeBucketSpace(), { bckId1, bckId2 }));
+ TEST_DO(assertModifiedBuckets(f.engine, altBucketSpace, { bckId2, bckId3 }));
+}
+
TEST_MAIN()
{
TEST_RUN_ALL();
diff --git a/searchcore/src/tests/proton/proton_configurer/proton_configurer_test.cpp b/searchcore/src/tests/proton/proton_configurer/proton_configurer_test.cpp
index 757a9c19f29..a99eaa0daf6 100644
--- a/searchcore/src/tests/proton/proton_configurer/proton_configurer_test.cpp
+++ b/searchcore/src/tests/proton/proton_configurer/proton_configurer_test.cpp
@@ -86,6 +86,7 @@ struct DBConfigFixture {
std::make_shared<TuneFileDocumentDB>(),
buildSchema(),
std::make_shared<DocumentDBMaintenanceConfig>(),
+ search::LogDocumentStore::Config(),
configId,
docTypeName,
config::ConfigSnapshot());
@@ -207,9 +208,9 @@ struct MyDocumentDBConfigOwner : public IDocumentDBConfigOwner
_owner(owner)
{
}
- virtual ~MyDocumentDBConfigOwner() { }
+ ~MyDocumentDBConfigOwner() { }
- virtual void reconfigure(const DocumentDBConfig::SP & config) override;
+ void reconfigure(const DocumentDBConfig::SP & config) override;
};
struct MyProtonConfigurerOwner : public IProtonConfigurerOwner
@@ -229,11 +230,13 @@ struct MyProtonConfigurerOwner : public IProtonConfigurerOwner
virtual ~MyProtonConfigurerOwner() { }
virtual IDocumentDBConfigOwner *addDocumentDB(const DocTypeName &docTypeName,
+ document::BucketSpace bucketSpace,
const vespalib::string &configId,
const std::shared_ptr<BootstrapConfig> &bootstrapConfig,
const std::shared_ptr<DocumentDBConfig> &documentDBConfig,
InitializeThreads initializeThreads) override
{
+ (void) bucketSpace;
(void) configId;
(void) bootstrapConfig;
(void) initializeThreads;
diff --git a/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp b/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp
index 625e9c1f6a9..1201bc4720b 100644
--- a/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp
+++ b/searchcore/src/tests/proton/reference/gid_to_lid_change_handler/gid_to_lid_change_handler_test.cpp
@@ -4,7 +4,7 @@
#include <vespa/document/base/documentid.h>
#include <vespa/vespalib/util/threadstackexecutor.h>
#include <vespa/searchcore/proton/server/executor_thread_service.h>
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/searchcore/proton/reference/i_gid_to_lid_change_listener.h>
#include <vespa/searchcore/proton/reference/gid_to_lid_change_handler.h>
#include <map>
@@ -13,7 +13,7 @@ LOG_SETUP("gid_to_lid_change_handler_test");
using document::GlobalId;
using document::DocumentId;
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
using search::SerialNum;
namespace proton {
diff --git a/searchcore/src/tests/proton/server/CMakeLists.txt b/searchcore/src/tests/proton/server/CMakeLists.txt
index 7f97c6dcf92..31f67bdd0ac 100644
--- a/searchcore/src/tests/proton/server/CMakeLists.txt
+++ b/searchcore/src/tests/proton/server/CMakeLists.txt
@@ -33,6 +33,7 @@ vespa_add_executable(searchcore_feedstates_test_app TEST
SOURCES
feedstates_test.cpp
DEPENDS
+ searchcore_test
searchcore_server
searchcore_bucketdb
searchcore_persistenceengine
diff --git a/searchcore/src/tests/proton/server/disk_mem_usage_filter/disk_mem_usage_filter_test.cpp b/searchcore/src/tests/proton/server/disk_mem_usage_filter/disk_mem_usage_filter_test.cpp
index 5d8a647f675..4dd8c43d9b2 100644
--- a/searchcore/src/tests/proton/server/disk_mem_usage_filter/disk_mem_usage_filter_test.cpp
+++ b/searchcore/src/tests/proton/server/disk_mem_usage_filter/disk_mem_usage_filter_test.cpp
@@ -1,9 +1,11 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/vespalib/testkit/testapp.h>
+#include <vespa/searchcore/proton/common/hw_info.h>
#include <vespa/searchcore/proton/server/disk_mem_usage_filter.h>
using proton::DiskMemUsageFilter;
+using proton::HwInfo;
namespace fs = std::experimental::filesystem;
@@ -16,9 +18,9 @@ struct Fixture
using Config = DiskMemUsageFilter::Config;
Fixture()
- : _filter(64 * 1024 * 1024)
+ : _filter(HwInfo(HwInfo::Disk(100, false, false), HwInfo::Memory(64 * 1024 * 1024), HwInfo::Cpu(0)))
{
- _filter.setDiskStats({.capacity = 100, .free = 100, .available=100});
+ _filter.setDiskUsedSize(0);
_filter.setMemoryStats(vespalib::ProcessMemoryStats(10000000,
10000001,
10000002,
@@ -41,7 +43,7 @@ struct Fixture
}
void triggerDiskLimit() {
- _filter.setDiskStats({.capacity = 100, .free = 20, .available=10});
+ _filter.setDiskUsedSize(90);
}
void triggerMemoryLimit()
@@ -76,7 +78,7 @@ TEST_F("Check that disk limit can be reached", Fixture)
"action: \"add more content nodes\", "
"reason: \"disk used (0.9) > disk limit (0.8)\", "
"stats: { "
- "capacity: 100, free: 20, available: 10, diskUsed: 0.9, diskLimit: 0.8}}");
+ "capacity: 100, used: 90, diskUsed: 0.9, diskLimit: 0.8}}");
}
TEST_F("Check that memory limit can be reached", Fixture)
@@ -108,7 +110,7 @@ TEST_F("Check that both disk limit and memory limit can be reached", Fixture)
"action: \"add more content nodes\", "
"reason: \"disk used (0.9) > disk limit (0.8)\", "
"stats: { "
- "capacity: 100, free: 20, available: 10, diskUsed: 0.9, diskLimit: 0.8}}");
+ "capacity: 100, used: 90, diskUsed: 0.9, diskLimit: 0.8}}");
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchcore/src/tests/proton/server/documentretriever_test.cpp b/searchcore/src/tests/proton/server/documentretriever_test.cpp
index df444d26f8c..b949046e8b6 100644
--- a/searchcore/src/tests/proton/server/documentretriever_test.cpp
+++ b/searchcore/src/tests/proton/server/documentretriever_test.cpp
@@ -19,6 +19,7 @@
#include <vespa/document/repo/documenttyperepo.h>
#include <vespa/persistence/spi/bucket.h>
#include <vespa/persistence/spi/result.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/searchcommon/common/schema.h>
#include <vespa/searchcore/proton/documentmetastore/documentmetastorecontext.h>
#include <vespa/searchcore/proton/server/documentretriever.h>
@@ -69,6 +70,7 @@ using storage::spi::Bucket;
using storage::spi::GetResult;
using storage::spi::PartitionId;
using storage::spi::Timestamp;
+using storage::spi::test::makeBucket;
using vespalib::make_string;
using vespalib::string;
using namespace document::config_builder;
@@ -324,12 +326,12 @@ TEST_F("require that document retriever can retrieve document meta data",
TEST_F("require that document retriever can retrieve bucket meta data",
Fixture) {
DocumentMetaData::Vector result;
- f._retriever->getBucketMetaData(Bucket(f.bucket_id, PartitionId(0)), result);
+ f._retriever->getBucketMetaData(makeBucket(f.bucket_id, PartitionId(0)), result);
ASSERT_EQUAL(1u, result.size());
EXPECT_EQUAL(f.lid, result[0].lid);
EXPECT_EQUAL(f.timestamp, result[0].timestamp);
result.clear();
- f._retriever->getBucketMetaData(Bucket(BucketId(f.bucket_id.getId() + 1),
+ f._retriever->getBucketMetaData(makeBucket(BucketId(f.bucket_id.getId() + 1),
PartitionId(0)), result);
EXPECT_EQUAL(0u, result.size());
}
diff --git a/searchcore/src/tests/proton/server/feedstates_test.cpp b/searchcore/src/tests/proton/server/feedstates_test.cpp
index 7d409db690e..15c2fbe5a84 100644
--- a/searchcore/src/tests/proton/server/feedstates_test.cpp
+++ b/searchcore/src/tests/proton/server/feedstates_test.cpp
@@ -8,7 +8,7 @@ LOG_SETUP("feedstates_test");
#include <vespa/document/base/testdocrepo.h>
#include <vespa/document/bucket/bucketid.h>
#include <vespa/document/repo/documenttyperepo.h>
-#include <vespa/searchcore/proton/common/bucketfactory.h>
+#include <vespa/searchcore/proton/test/bucketfactory.h>
#include <vespa/searchcore/proton/server/feedstates.h>
#include <vespa/searchcore/proton/server/ireplayconfig.h>
#include <vespa/searchcore/proton/server/memoryconfigstore.h>
diff --git a/searchcore/src/tests/proton/server/visibility_handler/visibility_handler_test.cpp b/searchcore/src/tests/proton/server/visibility_handler/visibility_handler_test.cpp
index 69b8a482476..5b5bea412f7 100644
--- a/searchcore/src/tests/proton/server/visibility_handler/visibility_handler_test.cpp
+++ b/searchcore/src/tests/proton/server/visibility_handler/visibility_handler_test.cpp
@@ -6,7 +6,7 @@ LOG_SETUP("visibility_handler_test");
#include <vespa/searchcore/proton/test/dummy_feed_view.h>
#include <vespa/searchcore/proton/test/threading_service_observer.h>
#include <vespa/searchcore/proton/server/executorthreadingservice.h>
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
using search::SerialNum;
using proton::IGetSerialNum;
@@ -15,7 +15,7 @@ using proton::ExecutorThreadingService;
using proton::test::ThreadingServiceObserver;
using proton::IFeedView;
using proton::VisibilityHandler;
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
using fastos::TimeStamp;
namespace {
diff --git a/searchcore/src/tests/proton/summaryengine/summaryengine.cpp b/searchcore/src/tests/proton/summaryengine/summaryengine.cpp
index 355151dd88c..db707e4aa97 100644
--- a/searchcore/src/tests/proton/summaryengine/summaryengine.cpp
+++ b/searchcore/src/tests/proton/summaryengine/summaryengine.cpp
@@ -213,12 +213,12 @@ verify(vespalib::stringref exp, const Slime &slime) {
Memory expMemory(exp);
vespalib::Slime expSlime;
size_t used = vespalib::slime::JsonFormat::decode(expMemory, expSlime);
- EXPECT_EQUAL(used, expMemory.size);
+ EXPECT_TRUE(used > 0);
vespalib::SimpleBuffer output;
vespalib::slime::JsonFormat::encode(slime, output, true);
Slime reSlimed;
used = vespalib::slime::JsonFormat::decode(output.get(), reSlimed);
- EXPECT_EQUAL(used, output.get().size);
+ EXPECT_TRUE(used > 0);
EXPECT_EQUAL(expSlime, reSlimed);
}
diff --git a/searchcore/src/vespa/searchcore/config/CMakeLists.txt b/searchcore/src/vespa/searchcore/config/CMakeLists.txt
index c7669efc876..3d62309161c 100644
--- a/searchcore/src/vespa/searchcore/config/CMakeLists.txt
+++ b/searchcore/src/vespa/searchcore/config/CMakeLists.txt
@@ -4,11 +4,11 @@ vespa_add_library(searchcore_fconfig STATIC
DEPENDS
)
vespa_generate_config(searchcore_fconfig partitions.def)
-install(FILES partitions.def RENAME vespa.config.search.core.partitions.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(partitions.def vespa.config.search.core.partitions.def)
vespa_generate_config(searchcore_fconfig fdispatchrc.def)
-install(FILES fdispatchrc.def RENAME vespa.config.search.core.fdispatchrc.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(fdispatchrc.def vespa.config.search.core.fdispatchrc.def)
vespa_generate_config(searchcore_fconfig proton.def)
-install(FILES proton.def RENAME vespa.config.search.core.proton.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(proton.def vespa.config.search.core.proton.def)
vespa_generate_config(searchcore_fconfig ranking-constants.def)
-install(FILES ranking-constants.def RENAME vespa.config.search.core.ranking-constants.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(ranking-constants.def vespa.config.search.core.ranking-constants.def)
vespa_generate_config(searchcore_fconfig hwinfo.def)
diff --git a/searchcore/src/vespa/searchcore/config/proton.def b/searchcore/src/vespa/searchcore/config/proton.def
index 5311e9bde07..ddb2711fac8 100644
--- a/searchcore/src/vespa/searchcore/config/proton.def
+++ b/searchcore/src/vespa/searchcore/config/proton.def
@@ -11,12 +11,6 @@ ptport int default=8003 restart
## Port to use for the rpcserver.
rpcport int default=8004 restart
-## Port used for Slime Message Passing (srmp protocol)
-slime_messaging_port int default=8005 restart
-
-## Connect spec for rtc.
-rtcspec string default="tcp/localhost:8004" restart
-
## Port to use for the web server
httpport int default=0 restart
@@ -174,10 +168,10 @@ grouping.sessionmanager.maxentries int default=500 restart
grouping.sessionmanager.pruning.interval double default=1.0
## Redundancy of documents.
-distribution.redundancy long default=1 restart
+distribution.redundancy long default=1
## Searchable copies of the documents.
-distribution.searchablecopies long default=1 restart
+distribution.searchablecopies long default=1
## Minimum initial size for any per document tables.
grow.initial long default=1024 restart
@@ -192,11 +186,11 @@ grow.add int default=1 restart
grow.numdocs int default=10000 restart
## Control cache size in bytes.
-summary.cache.maxbytes long default=0 restart
+summary.cache.maxbytes long default=0
## Include visits in the cache, if the visitoperation allows it.
## This will enable another separate cache of summary.cache.maxbytes size.
-summary.cache.allowvisitcaching bool default=true restart
+summary.cache.allowvisitcaching bool default=true
## Control number of cache entries preallocated.
## Default is no preallocation
@@ -204,69 +198,74 @@ summary.cache.allowvisitcaching bool default=true restart
summary.cache.initialentries long default=0 restart
## Control compression type of the summary while in the cache.
-summary.cache.compression.type enum {NONE, LZ4, ZSTD} default=LZ4 restart
+summary.cache.compression.type enum {NONE, LZ4, ZSTD} default=LZ4
## Control compression level of the summary while in cache.
## LZ4 has normal range 1..9 while ZSTD has range 1..19
## 9 is a reasonable default for both
-summary.cache.compression.level int default=9 restart
+summary.cache.compression.level int default=9
## Control compression type of the summary while in memory during compaction
## NB So far only stragey=LOG honours it.
-summary.log.compact.compression.type enum {NONE, LZ4, ZSTD} default=ZSTD restart
+summary.log.compact.compression.type enum {NONE, LZ4, ZSTD} default=ZSTD
## Control compression level of the summary while in memory during compaction
## LZ4 has normal range 1..9 while ZSTD has range 1..19
## 9 is a reasonable default for both
-summary.log.compact.compression.level int default=9 restart
+summary.log.compact.compression.level int default=9
## Control compression type of the summary
-summary.log.chunk.compression.type enum {NONE, LZ4, ZSTD} default=ZSTD restart
+summary.log.chunk.compression.type enum {NONE, LZ4, ZSTD} default=ZSTD
## Control compression level of the summary
## LZ4 has normal range 1..9 while ZSTD has range 1..19
## 9 is a reasonable default for both. Going above for ZSTD can give an improvement,
## but is better done in conjunction with increasing chunk size.
-summary.log.chunk.compression.level int default=9 restart
+summary.log.chunk.compression.level int default=9
## Max size in bytes per chunk.
-summary.log.chunk.maxbytes int default=65536 restart
+summary.log.chunk.maxbytes int default=65536
## Max number of documents in each chunk.
## TODO Deprecated and ignored. Remove soon.
-summary.log.chunk.maxentries int default=256 restart
+summary.log.chunk.maxentries int default=256
## Skip crc32 check on read.
-summary.log.chunk.skipcrconread bool default=false restart
+summary.log.chunk.skipcrconread bool default=false
## Control how compation is done, write to the front or to new const file.
-summary.log.compact2activefile bool default=false restart
+## TODO: Remove, will always be false
+summary.log.compact2activefile bool default=false
## Max size per summary file.
-summary.log.maxfilesize long default=1000000000 restart
+summary.log.maxfilesize long default=1000000000
## Max number of removes per summary file.
## TODO Deprecated and ignored. Remove soon.
-summary.log.maxentriesperfile long default=20000000 restart
+summary.log.maxentriesperfile long default=20000000
## Max disk bloat factor. This will trigger compacting.
-summary.log.maxdiskbloatfactor double default=0.1 restart
+summary.log.maxdiskbloatfactor double default=0.1
## Max bucket spread within a single summary file. This will trigger bucket order compacting.
## Only used when summary.compact2buckets is true.
-summary.log.maxbucketspread double default=2.5 restart
+summary.log.maxbucketspread double default=2.5
## If a file goes below this ratio compared to allowed max size it will be joined to the front.
## Value in the range [0.0, 1.0]
-summary.log.minfilesizefactor double default=0.2 restart
+summary.log.minfilesizefactor double default=0.2
## Number of threads used for compressing incomming documents/compacting.
+## Deprecated. Use background.threads instead.
+## TODO Remove
summary.log.numthreads int default=8 restart
## Control io options during flush of stored documents.
-summary.write.io enum {NORMAL, OSYNC, DIRECTIO} default=DIRECTIO restart
+summary.write.io enum {NORMAL, OSYNC, DIRECTIO} default=DIRECTIO
## Control io options during read of stored documents.
+## All summary.read options will take effect immediately on new files written.
+## On old files it will take effect either upon compact or on restart.
summary.read.io enum {NORMAL, DIRECTIO, MMAP } default=MMAP restart
## Multiple optional options for use with mmap
@@ -276,16 +275,17 @@ summary.read.mmap.options[] enum {MLOCK, POPULATE, HUGETLB} restart
summary.read.mmap.advise enum {NORMAL, RANDOM, SEQUENTIAL} default=NORMAL restart
## Enable compact for bucket oriented access.
+## TODO: Unused, always bucket order.
summary.compact2buckets bool default=true restart
## The name of the input document type
-documentdb[].inputdoctypename string restart
+documentdb[].inputdoctypename string
## The configid used to subscribe to config for this database.
-documentdb[].configid string restart
+documentdb[].configid string
## How many seconds is allowed from document is received to it is visible in the index.
documentdb[].visibilitydelay double default=0.0
## Whether this document type is globally distributed or not.
-documentdb[].global bool default=false restart
+documentdb[].global bool default=false
## The interval of when periodic tasks should be run
periodic.interval double default=3600.0
@@ -378,6 +378,11 @@ visit.ignoremaxbytes bool default=true
## When set to 0 (default) we use 1 separate thread per document database.
initialize.threads int default = 0
+## Number of worker threads doing background compaction/compression tasks.
+## They all live i a shared thread pool.
+## When set to 0 (default), it will have enough threads to saturate half of the cores.
+background.threads int default=0
+
## Portion of enumstore address space that can be used before put and update
## portion of feed is blocked.
writefilter.attribute.enumstorelimit double default = 0.9
@@ -402,6 +407,11 @@ writefilter.sampleinterval double default = 60.0
## The disk size is used when determining if feed should be blocked in writefilter.
hwinfo.disk.size long default = 0
+## Whether the disk partition is shared among several instances of proton (e.g. when using docker).
+## If shared, disk usage is sampled by doing a recursive directory scan in proton basedir.
+## If not, disk usage is sampled by looking at the filesystem space info.
+hwinfo.disk.shared bool default = false
+
## Override for disk write speed, measured in MiB/s. When zero, the
## actual disk write speed is sampled by writing data to a temporary file.
hwinfo.disk.writespeed double default = 200.0
@@ -419,6 +429,25 @@ hwinfo.disk.slowwritespeedlimit double default = 100.0
## The memory size is used when determining if feed should be blocked in writefilter.
hwinfo.memory.size long default = 0
+## The number of cores on the cpu.
+## If set to 0, this is sampled by using std::thread::hardware_concurrency().
+hwinfo.cpu.cores int default = 0
+
+## A number between 0.0 and 1.0 that specifies the concurrency when handling feed operations.
+## When set to 1.0 all cores on the cpu is utilized.
+##
+## 4 thread pools used for various aspect of feeding are configured based on this setting:
+## 1) Compressing and compacting documents
+## 2) Writing changes to attribute fields
+## 3) Inverting index fields
+## 4) Writing changes to index fields
+##
+## The number of threads in pool 1 is calculated as:
+## max(ceil(hwinfo.cpu.cores * feeding.concurrency), summary.log.numthreads)
+## The number of threads in each of pools 2-4 is calculated as:
+## max(ceil((hwinfo.cpu.cores * feeding.concurrency)/3), indexing.threads)
+feeding.concurrency double default = 0.5 restart
+
## Adjustment to resource limit when determining if maintenance jobs can run.
##
## Currently used by 'lid_space_compaction' and 'move_buckets' jobs.
diff --git a/searchcore/src/vespa/searchcore/proton/attribute/flushableattribute.cpp b/searchcore/src/vespa/searchcore/proton/attribute/flushableattribute.cpp
index 43dc3237685..a658b11263a 100644
--- a/searchcore/src/vespa/searchcore/proton/attribute/flushableattribute.cpp
+++ b/searchcore/src/vespa/searchcore/proton/attribute/flushableattribute.cpp
@@ -92,7 +92,7 @@ FlushableAttribute::Flusher::saveAttribute()
_syncToken);
bool saveSuccess = true;
if (_saver && _saver->hasGenerationGuard() &&
- _fattr._hwInfo.slowDisk()) {
+ _fattr._hwInfo.disk().slow()) {
saveSuccess = _saver->save(_saveTarget);
_saver.reset();
}
diff --git a/searchcore/src/vespa/searchcore/proton/common/CMakeLists.txt b/searchcore/src/vespa/searchcore/proton/common/CMakeLists.txt
index dec686d3070..9aa6a87ea6c 100644
--- a/searchcore/src/vespa/searchcore/proton/common/CMakeLists.txt
+++ b/searchcore/src/vespa/searchcore/proton/common/CMakeLists.txt
@@ -3,7 +3,6 @@ vespa_add_library(searchcore_pcommon STATIC
SOURCES
attributefieldvaluenode.cpp
attrupdate.cpp
- bucketfactory.cpp
cachedselect.cpp
commit_time_tracker.cpp
dbdocumentid.cpp
@@ -22,4 +21,5 @@ vespa_add_library(searchcore_pcommon STATIC
DEPENDS
searchcore_proton_metrics
searchcore_fconfig
+ stdc++fs
)
diff --git a/searchcore/src/vespa/searchcore/proton/common/feedtoken.cpp b/searchcore/src/vespa/searchcore/proton/common/feedtoken.cpp
index be9e2e823a0..545a303cb16 100644
--- a/searchcore/src/vespa/searchcore/proton/common/feedtoken.cpp
+++ b/searchcore/src/vespa/searchcore/proton/common/feedtoken.cpp
@@ -19,19 +19,19 @@ FeedToken::State::State(ITransport & transport, mbus::Reply::UP reply, uint32_t
_lock(),
_startTime()
{
- assert(_reply.get() != NULL);
+ assert(_reply);
_startTime.SetNow();
}
FeedToken::State::~State()
{
- assert(_reply.get() == NULL);
+ assert(!_reply);
}
void
FeedToken::State::ack()
{
- assert(_reply.get() != NULL);
+ assert(_reply);
uint32_t prev(_unAckedCount--);
if (prev == 1) {
_transport.send(std::move(_reply), std::move(_result), _documentWasFound, _startTime.MilliSecsToNow());
@@ -44,7 +44,7 @@ void
FeedToken::State::ack(const FeedOperation::Type opType,
PerDocTypeFeedMetrics &metrics)
{
- assert(_reply.get() != NULL);
+ assert(_reply);
uint32_t prev(_unAckedCount--);
if (prev == 1) {
_transport.send(std::move(_reply), std::move(_result), _documentWasFound, _startTime.MilliSecsToNow());
@@ -74,7 +74,7 @@ FeedToken::State::ack(const FeedOperation::Type opType,
void
FeedToken::State::incNeededAcks()
{
- assert(_reply.get() != NULL);
+ assert(_reply);
uint32_t prev(_unAckedCount++);
assert(prev >= 1);
(void) prev;
@@ -84,18 +84,10 @@ FeedToken::State::incNeededAcks()
void
FeedToken::State::fail(uint32_t errNum, const vespalib::string &errMsg)
{
- assert(_reply.get() != NULL);
+ assert(_reply);
vespalib::LockGuard guard(_lock);
_reply->addError(mbus::Error(errNum, errMsg));
_transport.send(std::move(_reply), std::move(_result), _documentWasFound, _startTime.MilliSecsToNow());
}
-void
-FeedToken::State::trace(uint32_t traceLevel, const vespalib::string &traceMsg)
-{
- assert(_reply.get() != NULL);
- vespalib::LockGuard guard(_lock);
- _reply->getTrace().trace(traceLevel, traceMsg);
-}
-
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/common/feedtoken.h b/searchcore/src/vespa/searchcore/proton/common/feedtoken.h
index 6bca156ddba..722827ded87 100644
--- a/searchcore/src/vespa/searchcore/proton/common/feedtoken.h
+++ b/searchcore/src/vespa/searchcore/proton/common/feedtoken.h
@@ -8,9 +8,7 @@
#include <vespa/searchcore/proton/feedoperation/feedoperation.h>
#include <atomic>
-namespace proton
-{
-
+namespace proton {
class PerDocTypeFeedMetrics;
typedef std::unique_ptr<storage::spi::Result> ResultUP;
@@ -38,7 +36,6 @@ private:
State & operator = (const State &) = delete;
State(ITransport & transport, mbus::Reply::UP reply, uint32_t numAcksRequired);
~State();
- void setNumAcksRequired(uint32_t numAcksRequired) { _unAckedCount = numAcksRequired; }
void ack();
void ack(const FeedOperation::Type opType, PerDocTypeFeedMetrics &metrics);
@@ -46,8 +43,6 @@ private:
void incNeededAcks();
void fail(uint32_t errNum, const vespalib::string &errMsg);
- void trace(uint32_t traceLevel, const vespalib::string &traceMsg);
- bool shouldTrace(uint32_t traceLevel) const { return _reply->getTrace().shouldTrace(traceLevel); }
mbus::Reply & getReply() { return *_reply; }
void setResult(ResultUP result, bool documentWasFound) {
_documentWasFound = documentWasFound;
@@ -81,6 +76,12 @@ public:
*/
FeedToken(ITransport &transport, mbus::Reply::UP reply);
+ FeedToken(FeedToken &&) = default;
+ FeedToken & operator =(FeedToken &&) = default;
+ FeedToken(const FeedToken &) = default;
+ FeedToken & operator =(const FeedToken &) = default;
+ ~FeedToken() = default;
+
/**
* Passes a receipt back to the originating FeedEngine, declaring that this
* operation succeeded. If an error occured while processing the operation,
@@ -88,15 +89,11 @@ public:
*/
void ack() const { _state->ack(); }
- void
- ack(const FeedOperation::Type opType, PerDocTypeFeedMetrics &metrics) const
- {
+ void ack(const FeedOperation::Type opType, PerDocTypeFeedMetrics &metrics) const {
_state->ack(opType, metrics);
}
- void
- incNeededAcks() const
- {
+ void incNeededAcks() const {
_state->incNeededAcks();
}
@@ -111,23 +108,6 @@ public:
void fail(uint32_t errNum, const vespalib::string &errMsg) const { _state->fail(errNum, errMsg); }
/**
- * Writes a trace message to the receipt of this operation that will later
- * be passed back to the FeedEngine through ack() or fail().
- *
- * @param traceLevel The level of the message to write.
- * @param traceMsg The message to write.
- */
- void trace(uint32_t traceLevel, const vespalib::string &traceMsg) const { _state->trace(traceLevel, traceMsg); }
-
- /**
- * Tell you if tracing at this level is enabled
- *
- * @param traceLevel The level you want to trace at.
- * @return if you should trace or not.
- */
- bool shouldTrace(uint32_t traceLevel) const { return _state->shouldTrace(traceLevel); }
-
- /**
* Gives you access to the underlying reply message.
*
* @return The reply
@@ -148,14 +128,6 @@ public:
_state->setResult(std::move(result), documentWasFound);
}
- /**
- * This controls how many acks are required before it is acked back to the sender.
- * Default is 1, and so far only adjusted by multioperation handling.
- *
- * @param numAcksRequired How many acks must be received before it is considered acked.
- */
- void setNumAcksRequired(uint32_t numAcksRequired) const { _state->setNumAcksRequired(numAcksRequired); }
-
FastOS_Time getStartTime() const { return _state->getStartTime(); }
};
diff --git a/searchcore/src/vespa/searchcore/proton/common/handlermap.hpp b/searchcore/src/vespa/searchcore/proton/common/handlermap.hpp
index 5b19fa96e4c..6b5d89cbd22 100644
--- a/searchcore/src/vespa/searchcore/proton/common/handlermap.hpp
+++ b/searchcore/src/vespa/searchcore/proton/common/handlermap.hpp
@@ -43,12 +43,13 @@ public:
public:
typedef std::unique_ptr<Snapshot> UP;
- Snapshot(StdMap &map) : _handlers(), _offset(0) {
+ Snapshot(const StdMap &map) : _handlers(), _offset(0) {
_handlers.reserve(map.size());
- for (MapIterator pos = map.begin(); pos != map.end(); ++pos) {
- _handlers.push_back(pos->second);
+ for (auto itr : map) {
+ _handlers.push_back(itr.second);
}
}
+ Snapshot(std::vector<HandlerSP> &&handlers) : _handlers(std::move(handlers)), _offset(0) {}
bool valid() const override { return (_offset < _handlers.size()); }
T *get() const override { return _handlers[_offset].get(); }
HandlerSP getSP() const { return _handlers[_offset]; }
@@ -162,7 +163,7 @@ public:
* @return handler sequence
**/
std::unique_ptr<Snapshot>
- snapshot()
+ snapshot() const
{
return std::unique_ptr<Snapshot>(new Snapshot(_handlers));
}
diff --git a/searchcore/src/vespa/searchcore/proton/common/hw_info.h b/searchcore/src/vespa/searchcore/proton/common/hw_info.h
index 06efaec18f2..cd0ef2817d7 100644
--- a/searchcore/src/vespa/searchcore/proton/common/hw_info.h
+++ b/searchcore/src/vespa/searchcore/proton/common/hw_info.h
@@ -2,6 +2,8 @@
#pragma once
+#include <cstdint>
+
namespace proton {
/*
@@ -9,19 +11,61 @@ namespace proton {
*/
class HwInfo
{
- bool _slowDisk;
+public:
+ class Disk {
+ private:
+ uint64_t _sizeBytes;
+ bool _slow;
+ bool _shared;
+ public:
+ Disk(uint64_t sizeBytes_, bool slow_, bool shared_)
+ : _sizeBytes(sizeBytes_), _slow(slow_), _shared(shared_) {}
+ uint64_t sizeBytes() const { return _sizeBytes; }
+ bool slow() const { return _slow; }
+ bool shared() const { return _shared; }
+ };
+
+ class Memory {
+ private:
+ uint64_t _sizeBytes;
+ public:
+ Memory(uint64_t sizeBytes_) : _sizeBytes(sizeBytes_) {}
+ uint64_t sizeBytes() const { return _sizeBytes; }
+ };
+
+ class Cpu {
+ private:
+ uint32_t _cores;
+ public:
+ Cpu(uint32_t cores_) : _cores(cores_) {}
+ uint32_t cores() const { return _cores; }
+ };
+
+private:
+ Disk _disk;
+ Memory _memory;
+ Cpu _cpu;
+
public:
HwInfo()
- : _slowDisk(false)
+ : _disk(0, false, false),
+ _memory(0),
+ _cpu(0)
{
}
- HwInfo(bool slowDisk_in)
- : _slowDisk(slowDisk_in)
+ HwInfo(const Disk &disk_,
+ const Memory &memory_,
+ const Cpu &cpu_)
+ : _disk(disk_),
+ _memory(memory_),
+ _cpu(cpu_)
{
}
- bool slowDisk() const { return _slowDisk; }
+ const Disk &disk() const { return _disk; }
+ const Memory &memory() const { return _memory; }
+ const Cpu &cpu() const { return _cpu; }
};
}
diff --git a/searchcore/src/vespa/searchcore/proton/common/hw_info_sampler.cpp b/searchcore/src/vespa/searchcore/proton/common/hw_info_sampler.cpp
index 73bb20c712a..c9bf81d1310 100644
--- a/searchcore/src/vespa/searchcore/proton/common/hw_info_sampler.cpp
+++ b/searchcore/src/vespa/searchcore/proton/common/hw_info_sampler.cpp
@@ -1,13 +1,15 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "hw_info_sampler.h"
-#include <vespa/config/config.h>
#include <vespa/config/common/configholder.h>
+#include <vespa/config/config.h>
#include <vespa/config/file/filesource.h>
-#include <vespa/searchcore/config/config-hwinfo.h>
#include <vespa/config/print/fileconfigwriter.h>
-#include <vespa/vespalib/io/fileutil.h>
#include <vespa/fastos/file.h>
+#include <vespa/searchcore/config/config-hwinfo.h>
+#include <vespa/vespalib/io/fileutil.h>
+#include <experimental/filesystem>
+#include <thread>
using config::ConfigHandle;
using config::ConfigSubscriber;
@@ -22,6 +24,35 @@ namespace proton {
namespace {
+uint64_t
+sampleDiskSizeBytes(const std::string &pathStr, const HwInfoSampler::Config &cfg)
+{
+ if (cfg.diskSizeBytes != 0) {
+ return cfg.diskSizeBytes;
+ }
+ std::experimental::filesystem::path path(pathStr);
+ auto space_info = std::experimental::filesystem::space(path);
+ return space_info.capacity;
+}
+
+uint64_t
+sampleMemorySizeBytes(const HwInfoSampler::Config &cfg)
+{
+ if (cfg.memorySizeBytes != 0) {
+ return cfg.memorySizeBytes;
+ }
+ return sysconf(_SC_PHYS_PAGES) * sysconf(_SC_PAGESIZE);
+}
+
+uint32_t
+sampleCpuCores(const HwInfoSampler::Config &cfg)
+{
+ if (cfg.cpuCores != 0) {
+ return cfg.cpuCores;
+ }
+ return std::thread::hardware_concurrency();
+}
+
std::unique_ptr<HwinfoConfig> readConfig(const vespalib::string &path) {
FileSpec spec(path + "/" + "hwinfo.cfg");
ConfigSubscriber s(spec);
@@ -80,8 +111,29 @@ HwInfoSampler::HwInfoSampler(const vespalib::string &path,
_sampleTime(),
_diskWriteSpeed(0.0)
{
- if (config._diskWriteSpeedOverride != 0) {
- _diskWriteSpeed = config._diskWriteSpeedOverride;
+ setDiskWriteSpeed(path, config);
+ setup(HwInfo::Disk(sampleDiskSizeBytes(path, config),
+ (_diskWriteSpeed < config.slowWriteSpeedLimit),
+ config.diskShared),
+ HwInfo::Memory(sampleMemorySizeBytes(config)),
+ HwInfo::Cpu(sampleCpuCores(config)));
+}
+
+HwInfoSampler::~HwInfoSampler()
+{
+}
+
+void
+HwInfoSampler::setup(const HwInfo::Disk &disk, const HwInfo::Memory &memory, const HwInfo::Cpu &cpu)
+{
+ _hwInfo = HwInfo(disk, memory, cpu);
+}
+
+void
+HwInfoSampler::setDiskWriteSpeed(const vespalib::string &path, const Config &config)
+{
+ if (config.diskWriteSpeedOverride != 0) {
+ _diskWriteSpeed = config.diskWriteSpeedOverride;
_sampleTime = Clock::now();
} else {
auto cfg = readConfig(path);
@@ -89,28 +141,16 @@ HwInfoSampler::HwInfoSampler(const vespalib::string &path,
_sampleTime = std::chrono::time_point<Clock>(std::chrono::seconds(cfg->disk.sampletime));
_diskWriteSpeed = cfg->disk.writespeed;
} else {
- sample(path, config);
+ sampleDiskWriteSpeed(path, config);
}
}
- setup(config);
-}
-
-HwInfoSampler::~HwInfoSampler()
-{
-}
-
-void
-HwInfoSampler::setup(const Config &config)
-{
- bool slowDisk = _diskWriteSpeed < config._slowWriteSpeedLimit;
- _hwInfo = HwInfo(slowDisk);
}
void
-HwInfoSampler::sample(const vespalib::string &path, const Config &config)
+HwInfoSampler::sampleDiskWriteSpeed(const vespalib::string &path, const Config &config)
{
size_t minDiskWriteLen = 1024u * 1024u;
- size_t diskWriteLen = config._diskSampleWriteSize;
+ size_t diskWriteLen = config.diskSampleWriteSize;
diskWriteLen = std::max(diskWriteLen, minDiskWriteLen);
_sampleTime = Clock::now();
_diskWriteSpeed = measureDiskWriteSpeed(path, diskWriteLen);
diff --git a/searchcore/src/vespa/searchcore/proton/common/hw_info_sampler.h b/searchcore/src/vespa/searchcore/proton/common/hw_info_sampler.h
index 22af2d32786..3570ba1b87f 100644
--- a/searchcore/src/vespa/searchcore/proton/common/hw_info_sampler.h
+++ b/searchcore/src/vespa/searchcore/proton/common/hw_info_sampler.h
@@ -16,16 +16,28 @@ class HwInfoSampler
{
public:
struct Config {
- double _diskWriteSpeedOverride;
- double _slowWriteSpeedLimit;
- uint64_t _diskSampleWriteSize;
-
- Config(double diskWriteSpeedOverride,
- double slowWriteSpeedLimit,
- double diskSampleWriteSize)
- : _diskWriteSpeedOverride(diskWriteSpeedOverride),
- _slowWriteSpeedLimit(slowWriteSpeedLimit),
- _diskSampleWriteSize(diskSampleWriteSize)
+ uint64_t diskSizeBytes;
+ double diskWriteSpeedOverride;
+ double slowWriteSpeedLimit;
+ uint64_t diskSampleWriteSize;
+ bool diskShared;
+ uint64_t memorySizeBytes;
+ uint32_t cpuCores;
+
+ Config(uint64_t diskSizeBytes_,
+ double diskWriteSpeedOverride_,
+ double slowWriteSpeedLimit_,
+ double diskSampleWriteSize_,
+ bool diskShared_,
+ uint64_t memorySizeBytes_,
+ uint32_t cpuCores_)
+ : diskSizeBytes(diskSizeBytes_),
+ diskWriteSpeedOverride(diskWriteSpeedOverride_),
+ slowWriteSpeedLimit(slowWriteSpeedLimit_),
+ diskSampleWriteSize(diskSampleWriteSize_),
+ diskShared(diskShared_),
+ memorySizeBytes(memorySizeBytes_),
+ cpuCores(cpuCores_)
{
}
};
@@ -36,8 +48,9 @@ private:
Clock::time_point _sampleTime;
double _diskWriteSpeed;
- void setup(const Config &config);
- void sample(const vespalib::string &path, const Config &config);
+ void setup(const HwInfo::Disk &disk, const HwInfo::Memory &memory, const HwInfo::Cpu &cpu);
+ void setDiskWriteSpeed(const vespalib::string &path, const Config &config);
+ void sampleDiskWriteSpeed(const vespalib::string &path, const Config &config);
public:
HwInfoSampler(const vespalib::string &path, const Config &config);
~HwInfoSampler();
diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp b/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp
index 0549e57a528..6cc1c1eb5b9 100644
--- a/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp
+++ b/searchcore/src/vespa/searchcore/proton/docsummary/summarycompacttarget.cpp
@@ -1,13 +1,13 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "summarycompacttarget.h"
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/searchcorespi/index/i_thread_service.h>
#include <future>
using search::IDocumentStore;
using search::SerialNum;
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
using searchcorespi::FlushStats;
using searchcorespi::IFlushTarget;
diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summaryflushtarget.cpp b/searchcore/src/vespa/searchcore/proton/docsummary/summaryflushtarget.cpp
index de8eaacf184..2f7681c5909 100644
--- a/searchcore/src/vespa/searchcore/proton/docsummary/summaryflushtarget.cpp
+++ b/searchcore/src/vespa/searchcore/proton/docsummary/summaryflushtarget.cpp
@@ -1,9 +1,8 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "summaryflushtarget.h"
-#include <future>
#include <vespa/searchcorespi/index/i_thread_service.h>
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
using search::IDocumentStore;
using search::SerialNum;
@@ -91,7 +90,7 @@ SummaryFlushTarget::initFlush(SerialNum currentSerial)
// Called by document db executor
std::promise<Task::UP> promise;
std::future<Task::UP> future = promise.get_future();
- _summaryService.execute(search::makeLambdaTask(
+ _summaryService.execute(vespalib::makeLambdaTask(
[&]() { promise.set_value(
internalInitFlush(currentSerial));
}));
diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp
index 79d82108ee8..a8ec474e88c 100644
--- a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.cpp
@@ -8,8 +8,7 @@
#include <vespa/juniper/rpinterface.h>
#include <vespa/searchcorespi/index/i_thread_service.h>
#include <vespa/searchcore/proton/flushengine/shrink_lid_space_flush_target.h>
-#include <vespa/searchlib/docstore/logdocumentstore.h>
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/searchsummary/docsummary/docsumconfig.h>
#include <vespa/vespalib/util/exceptions.h>
#include <sstream>
@@ -20,7 +19,6 @@ LOG_SETUP(".proton.docsummary.summarymanager");
using namespace config;
using namespace document;
using namespace search::docsummary;
-using namespace vespa::config::search::core;
using namespace vespa::config::search::summary;
using namespace vespa::config::search;
using vespalib::make_string;
@@ -32,7 +30,7 @@ using search::IDocumentStore;
using search::LogDocumentStore;
using search::LogDataStore;
using search::WriteableFileChunk;
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
using search::TuneFileSummary;
using search::common::FileHeaderContext;
@@ -48,24 +46,19 @@ class ShrinkSummaryLidSpaceFlushTarget : public ShrinkLidSpaceFlushTarget
searchcorespi::index::IThreadService & _summaryService;
public:
- ShrinkSummaryLidSpaceFlushTarget(const vespalib::string &name,
- Type type,
- Component component,
- SerialNum flushedSerialNum,
- Time lastFlushTime,
+ ShrinkSummaryLidSpaceFlushTarget(const vespalib::string &name, Type type, Component component,
+ SerialNum flushedSerialNum, Time lastFlushTime,
searchcorespi::index::IThreadService & summaryService,
std::shared_ptr<ICompactableLidSpace> target);
~ShrinkSummaryLidSpaceFlushTarget();
- virtual Task::UP initFlush(SerialNum currentSerial) override;
+ Task::UP initFlush(SerialNum currentSerial) override;
};
-ShrinkSummaryLidSpaceFlushTarget::ShrinkSummaryLidSpaceFlushTarget(const vespalib::string &name,
- Type type,
- Component component,
- SerialNum flushedSerialNum,
- Time lastFlushTime,
- searchcorespi::index::IThreadService & summaryService,
- std::shared_ptr<ICompactableLidSpace> target)
+ShrinkSummaryLidSpaceFlushTarget::
+ShrinkSummaryLidSpaceFlushTarget(const vespalib::string &name, Type type, Component component,
+ SerialNum flushedSerialNum, Time lastFlushTime,
+ searchcorespi::index::IThreadService & summaryService,
+ std::shared_ptr<ICompactableLidSpace> target)
: ShrinkLidSpaceFlushTarget(name, type, component, flushedSerialNum, lastFlushTime, std::move(target)),
_summaryService(summaryService)
{
@@ -85,13 +78,9 @@ ShrinkSummaryLidSpaceFlushTarget::initFlush(SerialNum currentSerial)
}
SummaryManager::SummarySetup::
-SummarySetup(const vespalib::string & baseDir,
- const DocTypeName & docTypeName,
- const SummaryConfig & summaryCfg,
- const SummarymapConfig & summarymapCfg,
- const JuniperrcConfig & juniperCfg,
- const search::IAttributeManager::SP &attributeMgr,
- const search::IDocumentStore::SP & docStore,
+SummarySetup(const vespalib::string & baseDir, const DocTypeName & docTypeName, const SummaryConfig & summaryCfg,
+ const SummarymapConfig & summarymapCfg, const JuniperrcConfig & juniperCfg,
+ const search::IAttributeManager::SP &attributeMgr, const search::IDocumentStore::SP & docStore,
const DocumentTypeRepo::SP &repo)
: _docsumWriter(),
_wordFolder(),
@@ -148,47 +137,18 @@ SummaryManager::SummarySetup::createDocsumStore(const vespalib::string &resultCl
ISummaryManager::ISummarySetup::SP
-SummaryManager::createSummarySetup(const SummaryConfig & summaryCfg,
- const SummarymapConfig & summarymapCfg,
- const JuniperrcConfig & juniperCfg,
- const DocumentTypeRepo::SP &repo,
+SummaryManager::createSummarySetup(const SummaryConfig & summaryCfg, const SummarymapConfig & summarymapCfg,
+ const JuniperrcConfig & juniperCfg, const DocumentTypeRepo::SP &repo,
const search::IAttributeManager::SP &attributeMgr)
{
return std::make_shared<SummarySetup>(_baseDir, _docTypeName, summaryCfg, summarymapCfg,
juniperCfg, attributeMgr, _docStore, repo);
}
-namespace {
-
-template<typename T>
-CompressionConfig
-deriveCompression(const T & config) {
- CompressionConfig compression;
- if (config.type == T::LZ4) {
- compression.type = CompressionConfig::LZ4;
- } else if (config.type == T::ZSTD) {
- compression.type = CompressionConfig::ZSTD;
- }
- compression.compressionLevel = config.level;
- return compression;
-}
-
-DocumentStore::Config
-getStoreConfig(const ProtonConfig::Summary::Cache & cache)
-{
- return DocumentStore::Config(deriveCompression(cache.compression), cache.maxbytes, cache.initialentries).allowVisitCaching(cache.allowvisitcaching);
-}
-
-}
-
-SummaryManager::SummaryManager(vespalib::ThreadExecutor & executor,
- const ProtonConfig::Summary & summary,
- const search::GrowStrategy & growStrategy,
- const vespalib::string &baseDir,
- const DocTypeName &docTypeName,
- const TuneFileSummary &tuneFileSummary,
- const FileHeaderContext &fileHeaderContext,
- search::transactionlog::SyncProxy &tlSyncer,
+SummaryManager::SummaryManager(vespalib::ThreadExecutor & executor, const LogDocumentStore::Config & storeConfig,
+ const search::GrowStrategy & growStrategy, const vespalib::string &baseDir,
+ const DocTypeName &docTypeName, const TuneFileSummary &tuneFileSummary,
+ const FileHeaderContext &fileHeaderContext, search::transactionlog::SyncProxy &tlSyncer,
const search::IBucketizer::SP & bucketizer)
: _baseDir(baseDir),
_docTypeName(docTypeName),
@@ -196,19 +156,8 @@ SummaryManager::SummaryManager(vespalib::ThreadExecutor & executor,
_tuneFileSummary(tuneFileSummary),
_currentSerial(0u)
{
- DocumentStore::Config config(getStoreConfig(summary.cache));
- const ProtonConfig::Summary::Log & log(summary.log);
- const ProtonConfig::Summary::Log::Chunk & chunk(log.chunk);
-
- WriteableFileChunk::Config fileConfig(deriveCompression(chunk.compression), chunk.maxbytes);
- LogDataStore::Config logConfig(log.maxfilesize, log.maxdiskbloatfactor, log.maxbucketspread,
- log.minfilesizefactor, log.numthreads, log.compact2activefile,
- deriveCompression(log.compact.compression), fileConfig);
- logConfig.disableCrcOnRead(chunk.skipcrconread);
- _docStore.reset(new LogDocumentStore(executor, baseDir,
- LogDocumentStore::Config(config, logConfig),
- growStrategy, tuneFileSummary, fileHeaderContext, tlSyncer,
- summary.compact2buckets ? bucketizer : search::IBucketizer::SP()));
+ _docStore = std::make_shared<LogDocumentStore>(executor, baseDir, storeConfig, growStrategy, tuneFileSummary,
+ fileHeaderContext, tlSyncer, bucketizer);
}
SummaryManager::~SummaryManager() {}
@@ -261,4 +210,9 @@ IFlushTarget::List SummaryManager::getFlushTargets(searchcorespi::index::IThread
return ret;
}
+void SummaryManager::reconfigure(const LogDocumentStore::Config & config) {
+ LogDocumentStore & docStore = dynamic_cast<LogDocumentStore &> (*_docStore);
+ docStore.reconfigure(config);
+}
+
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.h b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.h
index af82f25ba51..826c39046a4 100644
--- a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.h
+++ b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanager.h
@@ -3,12 +3,11 @@
#include "isummarymanager.h"
#include "fieldcacherepo.h"
-#include <vespa/searchcore/config/config-proton.h>
#include <vespa/searchcore/proton/attribute/attributemanager.h>
#include <vespa/searchcore/proton/common/doctypename.h>
#include <vespa/searchcorespi/flush/iflushtarget.h>
#include <vespa/searchlib/common/tunefileinfo.h>
-#include <vespa/searchlib/docstore/idatastore.h>
+#include <vespa/searchlib/docstore/logdocumentstore.h>
#include <vespa/searchlib/transactionlog/syncproxy.h>
#include <vespa/document/fieldvalue/document.h>
#include <vespa/document/repo/documenttyperepo.h>
@@ -16,13 +15,8 @@
#include <vespa/fastlib/text/normwordfolder.h>
namespace searchcorespi::index { class IThreadService; }
-namespace search {
-
-class IBucketizer;
-
-namespace common { class FileHeaderContext; }
-
-}
+namespace search { class IBucketizer; }
+namespace search::common { class FileHeaderContext; }
namespace proton {
@@ -50,32 +44,27 @@ public:
const search::IDocumentStore::SP & docStore,
const document::DocumentTypeRepo::SP &repo);
- /**
- * Implements ISummarySetup.
- */
search::docsummary::IDocsumWriter & getDocsumWriter() const override { return *_docsumWriter; }
search::docsummary::ResultConfig & getResultConfig() override { return *_docsumWriter->GetResultConfig(); }
- search::docsummary::IDocsumStore::UP createDocsumStore(
- const vespalib::string &resultClassName) override;
+ search::docsummary::IDocsumStore::UP createDocsumStore(const vespalib::string &resultClassName) override;
- // Inherit doc from IDocsumEnvironment
- virtual search::IAttributeManager * getAttributeManager() override { return _attributeMgr.get(); }
- virtual vespalib::string lookupIndex(const vespalib::string & s) const override { (void) s; return ""; }
- virtual juniper::Juniper * getJuniper() override { return _juniperConfig.get(); }
+ search::IAttributeManager * getAttributeManager() override { return _attributeMgr.get(); }
+ vespalib::string lookupIndex(const vespalib::string & s) const override { (void) s; return ""; }
+ juniper::Juniper * getJuniper() override { return _juniperConfig.get(); }
};
private:
vespalib::string _baseDir;
DocTypeName _docTypeName;
- search::IDocumentStore::SP _docStore;
+ std::shared_ptr<search::IDocumentStore> _docStore;
const search::TuneFileSummary _tuneFileSummary;
uint64_t _currentSerial;
public:
typedef std::shared_ptr<SummaryManager> SP;
SummaryManager(vespalib::ThreadExecutor & executor,
- const vespa::config::search::core::ProtonConfig::Summary & summary,
+ const search::LogDocumentStore::Config & summary,
const search::GrowStrategy & growStrategy,
const vespalib::string &baseDir,
const DocTypeName &docTypeName,
@@ -87,22 +76,18 @@ public:
void putDocument(uint64_t syncToken, search::DocumentIdT lid, const document::Document & doc);
void putDocument(uint64_t syncToken, search::DocumentIdT lid, const vespalib::nbostream & doc);
-
void removeDocument(uint64_t syncToken, search::DocumentIdT lid);
searchcorespi::IFlushTarget::List getFlushTargets(searchcorespi::index::IThreadService & summaryService);
- /**
- * Implements ISummaryManager.
- */
- virtual ISummarySetup::SP
+ ISummarySetup::SP
createSummarySetup(const vespa::config::search::SummaryConfig &summaryCfg,
const vespa::config::search::SummarymapConfig &summarymapCfg,
const vespa::config::search::summary::JuniperrcConfig &juniperCfg,
const document::DocumentTypeRepo::SP &repo,
const search::IAttributeManager::SP &attributeMgr) override;
- virtual search::IDocumentStore & getBackingStore() override { return *_docStore; }
-
+ search::IDocumentStore & getBackingStore() override { return *_docStore; }
+ void reconfigure(const search::LogDocumentStore::Config & config);
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanagerinitializer.cpp b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanagerinitializer.cpp
index 5c0cca83f69..c11326090dc 100644
--- a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanagerinitializer.cpp
+++ b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanagerinitializer.cpp
@@ -11,12 +11,10 @@ SummaryManagerInitializer(const search::GrowStrategy &grow,
const vespalib::string baseDir,
const vespalib::string &subDbName,
const DocTypeName &docTypeName,
- vespalib::ThreadStackExecutorBase &
- summaryExecutor,
- const ProtonConfig::Summary protonSummaryCfg,
+ vespalib::ThreadStackExecutorBase &summaryExecutor,
+ const search::LogDocumentStore::Config & storeCfg,
const search::TuneFileSummary &tuneFile,
- const search::common::FileHeaderContext &
- fileHeaderContext,
+ const search::common::FileHeaderContext &fileHeaderContext,
search::transactionlog::SyncProxy &tlSyncer,
IBucketizerSP bucketizer,
std::shared_ptr<SummaryManager::SP> result)
@@ -26,7 +24,7 @@ SummaryManagerInitializer(const search::GrowStrategy &grow,
_subDbName(subDbName),
_docTypeName(docTypeName),
_summaryExecutor(summaryExecutor),
- _protonSummaryCfg(protonSummaryCfg),
+ _storeCfg(storeCfg),
_tuneFile(tuneFile),
_fileHeaderContext(fileHeaderContext),
_tlSyncer(tlSyncer),
@@ -34,6 +32,7 @@ SummaryManagerInitializer(const search::GrowStrategy &grow,
_result(result)
{ }
+SummaryManagerInitializer::~SummaryManagerInitializer() {}
void
SummaryManagerInitializer::run()
@@ -42,17 +41,11 @@ SummaryManagerInitializer::run()
fastos::TimeStamp startTime = fastos::ClockSystem::now();
EventLogger::loadDocumentStoreStart(_subDbName);
*_result = std::make_shared<SummaryManager>
- (_summaryExecutor,
- _protonSummaryCfg, _grow,
- _baseDir,
- _docTypeName,
- _tuneFile,
- _fileHeaderContext,
- _tlSyncer, _bucketizer);
+ (_summaryExecutor, _storeCfg, _grow, _baseDir, _docTypeName,
+ _tuneFile, _fileHeaderContext, _tlSyncer, _bucketizer);
fastos::TimeStamp endTime = fastos::ClockSystem::now();
int64_t elapsedTimeMs = (endTime - startTime).ms();
EventLogger::loadDocumentStoreComplete(_subDbName, elapsedTimeMs);
}
-
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanagerinitializer.h b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanagerinitializer.h
index 83a841ee43d..d96e94df5c3 100644
--- a/searchcore/src/vespa/searchcore/proton/docsummary/summarymanagerinitializer.h
+++ b/searchcore/src/vespa/searchcore/proton/docsummary/summarymanagerinitializer.h
@@ -16,18 +16,17 @@ namespace proton {
class SummaryManagerInitializer : public initializer::InitializerTask
{
using IBucketizerSP = std::shared_ptr<search::IBucketizer>;
- using ProtonConfig = vespa::config::search::core::ProtonConfig;
- const search::GrowStrategy _grow;
- const vespalib::string _baseDir;
- const vespalib::string _subDbName;
- const DocTypeName _docTypeName;
- vespalib::ThreadExecutor &_summaryExecutor;
- const ProtonConfig::Summary _protonSummaryCfg;
- const search::TuneFileSummary _tuneFile;
+ const search::GrowStrategy _grow;
+ const vespalib::string _baseDir;
+ const vespalib::string _subDbName;
+ const DocTypeName _docTypeName;
+ vespalib::ThreadExecutor &_summaryExecutor;
+ const search::LogDocumentStore::Config _storeCfg;
+ const search::TuneFileSummary _tuneFile;
const search::common::FileHeaderContext &_fileHeaderContext;
- search::transactionlog::SyncProxy &_tlSyncer;
- const IBucketizerSP _bucketizer;
- std::shared_ptr<SummaryManager::SP> _result;
+ search::transactionlog::SyncProxy &_tlSyncer;
+ const IBucketizerSP _bucketizer;
+ std::shared_ptr<SummaryManager::SP> _result;
public:
using SP = std::shared_ptr<SummaryManagerInitializer>;
@@ -38,12 +37,13 @@ public:
const vespalib::string &subDbName,
const DocTypeName &docTypeName,
vespalib::ThreadStackExecutorBase & summaryExecutor,
- const ProtonConfig::Summary protonSummaryCfg,
+ const search::LogDocumentStore::Config & storeCfg,
const search::TuneFileSummary &tuneFile,
const search::common::FileHeaderContext & fileHeaderContext,
search::transactionlog::SyncProxy &tlSyncer,
IBucketizerSP bucketizer,
std::shared_ptr<SummaryManager::SP> result);
+ ~SummaryManagerInitializer();
void run() override;
};
diff --git a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastoreflushtarget.cpp b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastoreflushtarget.cpp
index 322e71a572c..3821237233e 100644
--- a/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastoreflushtarget.cpp
+++ b/searchcore/src/vespa/searchcore/proton/documentmetastore/documentmetastoreflushtarget.cpp
@@ -87,7 +87,7 @@ DocumentMetaStoreFlushTarget::Flusher::saveDocumentMetaStore()
SerialNumFileHeaderContext fileHeaderContext(_dmsft._fileHeaderContext,
_syncToken);
bool saveSuccess = false;
- if (_dmsft._hwInfo.slowDisk()) {
+ if (_dmsft._hwInfo.disk().slow()) {
search::AttributeMemorySaveTarget memorySaveTarget;
saveSuccess = _saver->save(memorySaveTarget);
_saver.reset();
diff --git a/searchcore/src/vespa/searchcore/proton/flushengine/threadedflushtarget.cpp b/searchcore/src/vespa/searchcore/proton/flushengine/threadedflushtarget.cpp
index c7a614dafe8..3c1ac1b5361 100644
--- a/searchcore/src/vespa/searchcore/proton/flushengine/threadedflushtarget.cpp
+++ b/searchcore/src/vespa/searchcore/proton/flushengine/threadedflushtarget.cpp
@@ -2,13 +2,13 @@
#include "threadedflushtarget.h"
#include <vespa/searchcore/proton/server/igetserialnum.h>
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <future>
#include <cassert>
using searchcorespi::IFlushTarget;
using searchcorespi::FlushStats;
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
namespace proton {
diff --git a/searchcore/src/vespa/searchcore/proton/initializer/task_runner.cpp b/searchcore/src/vespa/searchcore/proton/initializer/task_runner.cpp
index a416a7d18f3..7deb0afa7af 100644
--- a/searchcore/src/vespa/searchcore/proton/initializer/task_runner.cpp
+++ b/searchcore/src/vespa/searchcore/proton/initializer/task_runner.cpp
@@ -1,15 +1,13 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "task_runner.h"
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/vespalib/util/threadstackexecutor.h>
#include <future>
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
-namespace proton {
-
-namespace initializer {
+namespace proton::initializer {
TaskRunner::TaskRunner(vespalib::Executor &executor)
: _executor(executor),
@@ -126,6 +124,4 @@ TaskRunner::runTask(InitializerTask::SP rootTask,
context->execute(makeLambdaTask([=]() { pollTask(context); } ));
}
-} // namespace proton::initializer
-
-} // namespace proton
+}
diff --git a/searchcore/src/vespa/searchcore/proton/persistenceengine/CMakeLists.txt b/searchcore/src/vespa/searchcore/proton/persistenceengine/CMakeLists.txt
index eeda9ccfea6..7b0727d5496 100644
--- a/searchcore/src/vespa/searchcore/proton/persistenceengine/CMakeLists.txt
+++ b/searchcore/src/vespa/searchcore/proton/persistenceengine/CMakeLists.txt
@@ -4,6 +4,7 @@ vespa_add_library(searchcore_persistenceengine STATIC
document_iterator.cpp
i_document_retriever.cpp
persistenceengine.cpp
+ persistence_handler_map.cpp
transport_latch.cpp
DEPENDS
)
diff --git a/searchcore/src/vespa/searchcore/proton/persistenceengine/persistence_handler_map.cpp b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistence_handler_map.cpp
new file mode 100644
index 00000000000..cf7d027f244
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistence_handler_map.cpp
@@ -0,0 +1,111 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "ipersistencehandler.h"
+#include "persistence_handler_map.h"
+
+namespace proton {
+
+using HandlerSnapshot = PersistenceHandlerMap::HandlerSnapshot;
+
+PersistenceHandlerMap::PersistenceHandlerMap()
+ : _map()
+{
+}
+
+IPersistenceHandler::SP
+PersistenceHandlerMap::putHandler(document::BucketSpace bucketSpace,
+ const DocTypeName &docType,
+ const IPersistenceHandler::SP &handler)
+{
+ return _map[bucketSpace].putHandler(docType, handler);
+}
+
+IPersistenceHandler::SP
+PersistenceHandlerMap::getHandler(document::BucketSpace bucketSpace,
+ const DocTypeName &docType) const
+{
+ auto itr = _map.find(bucketSpace);
+ if (itr != _map.end()) {
+ return itr->second.getHandler(docType);
+ }
+ return IPersistenceHandler::SP();
+}
+
+IPersistenceHandler::SP
+PersistenceHandlerMap::removeHandler(document::BucketSpace bucketSpace,
+ const DocTypeName &docType)
+{
+ auto itr = _map.find(bucketSpace);
+ if (itr != _map.end()) {
+ return itr->second.removeHandler(docType);
+ }
+ return IPersistenceHandler::SP();
+}
+
+HandlerSnapshot::UP
+PersistenceHandlerMap::getHandlerSnapshot() const
+{
+ std::vector<IPersistenceHandler::SP> handlers;
+ for (auto spaceItr : _map) {
+ for (auto handlerItr : spaceItr.second) {
+ handlers.push_back(handlerItr.second);
+ }
+ }
+ return std::make_unique<HandlerSnapshot>
+ (std::make_unique<DocTypeToHandlerMap::Snapshot>(std::move(handlers)),
+ handlers.size());
+}
+
+namespace {
+
+struct EmptySequence : public vespalib::Sequence<IPersistenceHandler *> {
+ virtual bool valid() const override { return false; }
+ virtual IPersistenceHandler *get() const override { return nullptr; }
+ virtual void next() override { }
+ static EmptySequence::UP make() { return std::make_unique<EmptySequence>(); }
+};
+
+}
+
+HandlerSnapshot::UP
+PersistenceHandlerMap::getHandlerSnapshot(document::BucketSpace bucketSpace) const
+{
+ auto itr = _map.find(bucketSpace);
+ if (itr != _map.end()) {
+ return std::make_unique<HandlerSnapshot>(itr->second.snapshot(), itr->second.size());
+ }
+ return std::make_unique<HandlerSnapshot>(EmptySequence::make(), 0);
+}
+
+namespace {
+
+class SequenceOfOne : public vespalib::Sequence<IPersistenceHandler *> {
+private:
+ bool _done;
+ IPersistenceHandler *_value;
+public:
+ SequenceOfOne(IPersistenceHandler *value) : _done(false), _value(value) {}
+
+ virtual bool valid() const override { return !_done; }
+ virtual IPersistenceHandler *get() const override { return _value; }
+ virtual void next() override { _done = true; }
+ static SequenceOfOne::UP make(IPersistenceHandler *value) { return std::make_unique<SequenceOfOne>(value); }
+};
+
+}
+
+HandlerSnapshot::UP
+PersistenceHandlerMap::getHandlerSnapshot(document::BucketSpace bucketSpace,
+ const document::DocumentId &id) const
+{
+ if (!id.hasDocType()) {
+ return getHandlerSnapshot(bucketSpace);
+ }
+ IPersistenceHandler::SP handler = getHandler(bucketSpace, DocTypeName(id.getDocType()));
+ if (!handler.get()) {
+ return HandlerSnapshot::UP();
+ }
+ return std::make_unique<HandlerSnapshot>(SequenceOfOne::make(handler.get()), 1);
+}
+
+}
diff --git a/searchcore/src/vespa/searchcore/proton/persistenceengine/persistence_handler_map.h b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistence_handler_map.h
new file mode 100644
index 00000000000..8a852066284
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistence_handler_map.h
@@ -0,0 +1,69 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/document/bucket/bucketspace.h>
+#include <vespa/searchcore/proton/common/handlermap.hpp>
+#include <vespa/vespalib/util/sequence.h>
+#include <memory>
+#include <unordered_map>
+
+namespace document { class DocumentId; }
+
+namespace proton {
+
+class DocTypeName;
+class IPersistenceHandler;
+
+/**
+ * Class that maintains a set of PersistenceHandler instances
+ * and provides mapping from bucket space to the set of handlers in that space.
+ */
+class PersistenceHandlerMap {
+public:
+ using PersistenceHandlerSequence = vespalib::Sequence<IPersistenceHandler *>;
+ using PersistenceHandlerSP = std::shared_ptr<IPersistenceHandler>;
+
+ class HandlerSnapshot {
+ private:
+ PersistenceHandlerSequence::UP _handlers;
+ size_t _size;
+ public:
+ using UP = std::unique_ptr<HandlerSnapshot>;
+ HandlerSnapshot(PersistenceHandlerSequence::UP handlers_, size_t size_)
+ : _handlers(std::move(handlers_)),
+ _size(size_)
+ {}
+ HandlerSnapshot(const HandlerSnapshot &) = delete;
+ HandlerSnapshot & operator = (const HandlerSnapshot &) = delete;
+
+ size_t size() const { return _size; }
+ PersistenceHandlerSequence &handlers() { return *_handlers; }
+ static PersistenceHandlerSequence::UP release(HandlerSnapshot &&rhs) { return std::move(rhs._handlers); }
+ };
+
+private:
+ using DocTypeToHandlerMap = HandlerMap<IPersistenceHandler>;
+
+ struct BucketSpaceHash {
+ std::size_t operator() (const document::BucketSpace &bucketSpace) const { return bucketSpace.getId(); }
+ };
+
+ std::unordered_map<document::BucketSpace, DocTypeToHandlerMap, BucketSpaceHash> _map;
+
+public:
+ PersistenceHandlerMap();
+
+ PersistenceHandlerSP putHandler(document::BucketSpace bucketSpace,
+ const DocTypeName &docType,
+ const PersistenceHandlerSP &handler);
+ PersistenceHandlerSP removeHandler(document::BucketSpace bucketSpace,
+ const DocTypeName &docType);
+ PersistenceHandlerSP getHandler(document::BucketSpace bucketSpace,
+ const DocTypeName &docType) const;
+ HandlerSnapshot::UP getHandlerSnapshot() const;
+ HandlerSnapshot::UP getHandlerSnapshot(document::BucketSpace bucketSpace) const;
+ HandlerSnapshot::UP getHandlerSnapshot(document::BucketSpace bucketSpace,
+ const document::DocumentId &id) const;
+};
+
+}
diff --git a/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.cpp b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.cpp
index 399b70f6c56..69a3a902af8 100644
--- a/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.cpp
+++ b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.cpp
@@ -171,39 +171,22 @@ PersistenceEngine::HandlerSnapshot::UP
PersistenceEngine::getHandlerSnapshot() const
{
LockGuard guard(_lock);
- return std::make_unique<HandlerSnapshot>(_handlers.snapshot(), _handlers.size());
+ return _handlers.getHandlerSnapshot();
}
-namespace {
-template <typename T>
-class SequenceOfOne : public Sequence<T> {
- bool _done;
- T _value;
-public:
- SequenceOfOne(const T &value) : _done(false), _value(value) {}
-
- virtual bool valid() const override { return !_done; }
- virtual T get() const override { return _value; }
- virtual void next() override { _done = true; }
-};
-
-template <typename T>
-typename Sequence<T>::UP make_sequence(const T &value) {
- return typename Sequence<T>::UP(new SequenceOfOne<T>(value));
+PersistenceEngine::HandlerSnapshot::UP
+PersistenceEngine::getHandlerSnapshot(document::BucketSpace bucketSpace) const
+{
+ LockGuard guard(_lock);
+ return _handlers.getHandlerSnapshot(bucketSpace);
}
-} // namespace
PersistenceEngine::HandlerSnapshot::UP
-PersistenceEngine::getHandlerSnapshot(const DocumentId &id) const {
- if (!id.hasDocType()) {
- return getHandlerSnapshot();
- }
- IPersistenceHandler::SP handler = getHandler(DocTypeName(id.getDocType()));
- if (!handler.get()) {
- return HandlerSnapshot::UP();
- }
- return HandlerSnapshot::UP(
- new HandlerSnapshot(make_sequence(handler.get()), 1));
+PersistenceEngine::getHandlerSnapshot(document::BucketSpace bucketSpace,
+ const DocumentId &id) const
+{
+ LockGuard guard(_lock);
+ return _handlers.getHandlerSnapshot(bucketSpace, id);
}
PersistenceEngine::PersistenceEngine(IPersistenceEngineOwner &owner,
@@ -233,28 +216,31 @@ PersistenceEngine::~PersistenceEngine()
IPersistenceHandler::SP
-PersistenceEngine::putHandler(const DocTypeName &docType,
+PersistenceEngine::putHandler(document::BucketSpace bucketSpace,
+ const DocTypeName &docType,
const IPersistenceHandler::SP &handler)
{
LockGuard guard(_lock);
- return _handlers.putHandler(docType, handler);
+ return _handlers.putHandler(bucketSpace, docType, handler);
}
IPersistenceHandler::SP
-PersistenceEngine::getHandler(const DocTypeName &docType) const
+PersistenceEngine::getHandler(document::BucketSpace bucketSpace,
+ const DocTypeName &docType) const
{
LockGuard guard(_lock);
- return _handlers.getHandler(docType);
+ return _handlers.getHandler(bucketSpace, docType);
}
IPersistenceHandler::SP
-PersistenceEngine::removeHandler(const DocTypeName &docType)
+PersistenceEngine::removeHandler(document::BucketSpace bucketSpace,
+ const DocTypeName &docType)
{
// TODO: Grab bucket list and treat them as modified
LockGuard guard(_lock);
- return _handlers.removeHandler(docType);
+ return _handlers.removeHandler(bucketSpace, docType);
}
@@ -282,7 +268,7 @@ PersistenceEngine::getPartitionStates() const
BucketIdListResult
-PersistenceEngine::listBuckets(PartitionId id) const
+PersistenceEngine::listBuckets(BucketSpace bucketSpace, PartitionId id) const
{
// Runs in SPI thread.
// No handover to write threads in persistence handlers.
@@ -291,7 +277,7 @@ PersistenceEngine::listBuckets(PartitionId id) const
BucketIdListResult::List emptyList;
return BucketIdListResult(emptyList);
}
- HandlerSnapshot::UP snap = getHandlerSnapshot();
+ HandlerSnapshot::UP snap = getHandlerSnapshot(bucketSpace);
BucketIdListResultHandler resultHandler;
for (; snap->handlers().valid(); snap->handlers().next()) {
IPersistenceHandler *handler = snap->handlers().get();
@@ -323,7 +309,7 @@ PersistenceEngine::setActiveState(const Bucket& bucket,
storage::spi::BucketInfo::ActiveState newState)
{
std::shared_lock<std::shared_timed_mutex> rguard(_rwMutex);
- HandlerSnapshot::UP snap = getHandlerSnapshot();
+ HandlerSnapshot::UP snap = getHandlerSnapshot(bucket.getBucketSpace());
GenericResultHandler resultHandler(snap->size());
for (; snap->handlers().valid(); snap->handlers().next()) {
IPersistenceHandler *handler = snap->handlers().get();
@@ -340,7 +326,7 @@ PersistenceEngine::getBucketInfo(const Bucket& b) const
// Runs in SPI thread.
// No handover to write threads in persistence handlers.
std::shared_lock<std::shared_timed_mutex> rguard(_rwMutex);
- HandlerSnapshot::UP snap = getHandlerSnapshot();
+ HandlerSnapshot::UP snap = getHandlerSnapshot(b.getBucketSpace());
BucketInfoResultHandler resultHandler;
for (; snap->handlers().valid(); snap->handlers().next()) {
IPersistenceHandler *handler = snap->handlers().get();
@@ -374,7 +360,7 @@ PersistenceEngine::put(const Bucket& b, Timestamp t, const document::Document::S
"Old id scheme not supported in elastic mode (%s)",
doc->getId().toString().c_str()));
}
- IPersistenceHandler::SP handler = getHandler(docType);
+ IPersistenceHandler::SP handler = getHandler(b.getBucketSpace(), docType);
if (handler.get() == NULL) {
return Result(Result::PERMANENT_ERROR,
make_string("No handler for document type '%s'",
@@ -397,7 +383,7 @@ PersistenceEngine::remove(const Bucket& b, Timestamp t, const DocumentId& did, C
b.toString().c_str(),
static_cast<uint64_t>(t.getValue()),
did.toString().c_str());
- HandlerSnapshot::UP snap = getHandlerSnapshot(did);
+ HandlerSnapshot::UP snap = getHandlerSnapshot(b.getBucketSpace(), did);
if (!snap.get()) {
return RemoveResult(false);
}
@@ -432,7 +418,7 @@ PersistenceEngine::update(const Bucket& b, Timestamp t, const DocumentUpdate::SP
docType.toString().c_str(),
upd->getId().toString().c_str(),
(upd->getCreateIfNonExistent() ? "true" : "false"));
- IPersistenceHandler::SP handler = getHandler(docType);
+ IPersistenceHandler::SP handler = getHandler(b.getBucketSpace(), docType);
TransportLatch latch(1);
if (handler.get() != NULL) {
FeedToken token(latch, mbus::Reply::UP(new documentapi::UpdateDocumentReply()));
@@ -453,7 +439,7 @@ PersistenceEngine::get(const Bucket& b,
Context& context) const
{
std::shared_lock<std::shared_timed_mutex> rguard(_rwMutex);
- HandlerSnapshot::UP snapshot = getHandlerSnapshot();
+ HandlerSnapshot::UP snapshot = getHandlerSnapshot(b.getBucketSpace());
for (PersistenceHandlerSequence & handlers = snapshot->handlers(); handlers.valid(); handlers.next()) {
BucketGuard::UP bucket_guard = handlers.get()->lockBucket(b);
@@ -486,7 +472,7 @@ PersistenceEngine::createIterator(const Bucket &bucket,
Context & context)
{
std::shared_lock<std::shared_timed_mutex> rguard(_rwMutex);
- HandlerSnapshot::UP snapshot = getHandlerSnapshot();
+ HandlerSnapshot::UP snapshot = getHandlerSnapshot(bucket.getBucketSpace());
auto entry = std::make_unique<IteratorEntry>(context.getReadConsistency(), bucket, fields, selection,
versions, _defaultSerializedSize, _ignoreMaxBytes);
@@ -562,7 +548,7 @@ PersistenceEngine::createBucket(const Bucket &b, Context &)
{
std::shared_lock<std::shared_timed_mutex> rguard(_rwMutex);
LOG(spam, "createBucket(%s)", b.toString().c_str());
- HandlerSnapshot::UP snap = getHandlerSnapshot();
+ HandlerSnapshot::UP snap = getHandlerSnapshot(b.getBucketSpace());
TransportLatch latch(snap->size());
for (; snap->handlers().valid(); snap->handlers().next()) {
IPersistenceHandler *handler = snap->handlers().get();
@@ -579,7 +565,7 @@ PersistenceEngine::deleteBucket(const Bucket& b, Context&)
{
std::shared_lock<std::shared_timed_mutex> rguard(_rwMutex);
LOG(spam, "deleteBucket(%s)", b.toString().c_str());
- HandlerSnapshot::UP snap = getHandlerSnapshot();
+ HandlerSnapshot::UP snap = getHandlerSnapshot(b.getBucketSpace());
TransportLatch latch(snap->size());
for (; snap->handlers().valid(); snap->handlers().next()) {
IPersistenceHandler *handler = snap->handlers().get();
@@ -592,16 +578,16 @@ PersistenceEngine::deleteBucket(const Bucket& b, Context&)
BucketIdListResult
-PersistenceEngine::getModifiedBuckets() const
+PersistenceEngine::getModifiedBuckets(BucketSpace bucketSpace) const
{
std::shared_lock<std::shared_timed_mutex> rguard(_rwMutex);
typedef BucketIdListResultV MBV;
MBV extraModifiedBuckets;
{
LockGuard guard(_lock);
- extraModifiedBuckets.swap(_extraModifiedBuckets);
+ extraModifiedBuckets.swap(_extraModifiedBuckets[bucketSpace]);
}
- HandlerSnapshot::UP snap = getHandlerSnapshot();
+ HandlerSnapshot::UP snap = getHandlerSnapshot(bucketSpace);
SynchronizedBucketIdListResultHandler resultHandler(snap->size() + extraModifiedBuckets.size());
for (; snap->handlers().valid(); snap->handlers().next()) {
IPersistenceHandler *handler = snap->handlers().get();
@@ -620,7 +606,9 @@ PersistenceEngine::split(const Bucket& source, const Bucket& target1, const Buck
{
std::shared_lock<std::shared_timed_mutex> rguard(_rwMutex);
LOG(spam, "split(%s, %s, %s)", source.toString().c_str(), target1.toString().c_str(), target2.toString().c_str());
- HandlerSnapshot::UP snap = getHandlerSnapshot();
+ assert(source.getBucketSpace() == target1.getBucketSpace());
+ assert(source.getBucketSpace() == target2.getBucketSpace());
+ HandlerSnapshot::UP snap = getHandlerSnapshot(source.getBucketSpace());
TransportLatch latch(snap->size());
for (; snap->handlers().valid(); snap->handlers().next()) {
IPersistenceHandler *handler = snap->handlers().get();
@@ -637,7 +625,9 @@ PersistenceEngine::join(const Bucket& source1, const Bucket& source2, const Buck
{
std::shared_lock<std::shared_timed_mutex> rguard(_rwMutex);
LOG(spam, "join(%s, %s, %s)", source1.toString().c_str(), source2.toString().c_str(), target.toString().c_str());
- HandlerSnapshot::UP snap = getHandlerSnapshot();
+ assert(source1.getBucketSpace() == target.getBucketSpace());
+ assert(source2.getBucketSpace() == target.getBucketSpace());
+ HandlerSnapshot::UP snap = getHandlerSnapshot(target.getBucketSpace());
TransportLatch latch(snap->size());
for (; snap->handlers().valid(); snap->handlers().next()) {
IPersistenceHandler *handler = snap->handlers().get();
@@ -709,13 +699,13 @@ PersistenceEngine::propagateSavedClusterState(IPersistenceHandler &handler)
}
void
-PersistenceEngine::grabExtraModifiedBuckets(IPersistenceHandler &handler)
+PersistenceEngine::grabExtraModifiedBuckets(BucketSpace bucketSpace, IPersistenceHandler &handler)
{
BucketIdListResultHandler resultHandler;
handler.handleListBuckets(resultHandler);
auto result = std::make_shared<BucketIdListResult>(resultHandler.getResult());
LockGuard guard(_lock);
- _extraModifiedBuckets.push_back(result);
+ _extraModifiedBuckets[bucketSpace].push_back(result);
}
@@ -742,9 +732,10 @@ public:
};
void
-PersistenceEngine::populateInitialBucketDB(IPersistenceHandler &targetHandler)
+PersistenceEngine::populateInitialBucketDB(BucketSpace bucketSpace,
+ IPersistenceHandler &targetHandler)
{
- HandlerSnapshot::UP snap = getHandlerSnapshot();
+ HandlerSnapshot::UP snap = getHandlerSnapshot(bucketSpace);
size_t snapSize(snap->size());
size_t flawed = 0;
diff --git a/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.h b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.h
index d8bbe0b3c0d..b2abc7911d7 100644
--- a/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.h
+++ b/searchcore/src/vespa/searchcore/proton/persistenceengine/persistenceengine.h
@@ -3,6 +3,8 @@
#include "document_iterator.h"
#include "i_resource_write_filter.h"
+#include "persistence_handler_map.h"
+#include <vespa/document/bucket/bucketspace.h>
#include <vespa/persistence/spi/abstractpersistenceprovider.h>
#include <vespa/searchcore/proton/common/handlermap.hpp>
#include <vespa/searchcore/proton/persistenceengine/ipersistencehandler.h>
@@ -16,7 +18,8 @@ class IPersistenceEngineOwner;
class PersistenceEngine : public storage::spi::AbstractPersistenceProvider {
private:
- typedef vespalib::Sequence<IPersistenceHandler *> PersistenceHandlerSequence;
+ using PersistenceHandlerSequence = vespalib::Sequence<IPersistenceHandler *>;
+ using HandlerSnapshot = PersistenceHandlerMap::HandlerSnapshot;
using DocumentUpdate = document::DocumentUpdate;
using Bucket = storage::spi::Bucket;
using BucketIdListResult = storage::spi::BucketIdListResult;
@@ -38,23 +41,6 @@ private:
using Timestamp = storage::spi::Timestamp;
using TimestampList = storage::spi::TimestampList;
using UpdateResult = storage::spi::UpdateResult;
- class HandlerSnapshot {
- private:
- PersistenceHandlerSequence::UP _handlers;
- size_t _size;
- public:
- typedef std::unique_ptr<HandlerSnapshot> UP;
- HandlerSnapshot(PersistenceHandlerSequence::UP handlers_, size_t size_) :
- _handlers(handlers_.release()),
- _size(size_)
- {}
- HandlerSnapshot(const HandlerSnapshot &) = delete;
- HandlerSnapshot & operator = (const HandlerSnapshot &) = delete;
-
- size_t size() const { return _size; }
- PersistenceHandlerSequence &handlers() { return *_handlers; }
- static PersistenceHandlerSequence::UP release(HandlerSnapshot && rhs) { return std::move(rhs._handlers); }
- };
struct IteratorEntry {
PersistenceHandlerSequence::UP handler_sequence;
@@ -73,23 +59,33 @@ private:
in_use(false),
bucket_guards() {}
};
+ struct BucketSpaceHash {
+ std::size_t operator() (const document::BucketSpace &bucketSpace) const { return bucketSpace.getId(); }
+ };
+
typedef std::map<IteratorId, IteratorEntry *> Iterators;
typedef std::vector<std::shared_ptr<BucketIdListResult> > BucketIdListResultV;
+ using ExtraModifiedBuckets = std::unordered_map<BucketSpace, BucketIdListResultV, BucketSpaceHash>;
+
const ssize_t _defaultSerializedSize;
const bool _ignoreMaxBytes;
- mutable HandlerMap<IPersistenceHandler> _handlers;
+ PersistenceHandlerMap _handlers;
vespalib::Lock _lock;
Iterators _iterators;
vespalib::Lock _iterators_lock;
IPersistenceEngineOwner &_owner;
const IResourceWriteFilter &_writeFilter;
ClusterState::SP _clusterState;
- mutable BucketIdListResultV _extraModifiedBuckets;
+ mutable ExtraModifiedBuckets _extraModifiedBuckets;
mutable std::shared_timed_mutex _rwMutex;
+ IPersistenceHandler::SP getHandler(document::BucketSpace bucketSpace,
+ const DocTypeName &docType) const;
HandlerSnapshot::UP getHandlerSnapshot() const;
- HandlerSnapshot::UP getHandlerSnapshot(const document::DocumentId &) const;
+ HandlerSnapshot::UP getHandlerSnapshot(document::BucketSpace bucketSpace) const;
+ HandlerSnapshot::UP getHandlerSnapshot(document::BucketSpace bucketSpace,
+ const document::DocumentId &docId) const;
void saveClusterState(const ClusterState &calc);
ClusterState::SP savedClusterState() const;
@@ -102,14 +98,16 @@ public:
ssize_t defaultSerializedSize, bool ignoreMaxBytes);
~PersistenceEngine();
- IPersistenceHandler::SP putHandler(const DocTypeName &docType, const IPersistenceHandler::SP &handler);
- IPersistenceHandler::SP getHandler(const DocTypeName &docType) const;
- IPersistenceHandler::SP removeHandler(const DocTypeName &docType);
+ IPersistenceHandler::SP putHandler(document::BucketSpace bucketSpace,
+ const DocTypeName &docType,
+ const IPersistenceHandler::SP &handler);
+ IPersistenceHandler::SP removeHandler(document::BucketSpace bucketSpace,
+ const DocTypeName &docType);
// Implements PersistenceProvider
virtual Result initialize() override;
virtual PartitionStateListResult getPartitionStates() const override;
- virtual BucketIdListResult listBuckets(PartitionId) const override;
+ virtual BucketIdListResult listBuckets(BucketSpace bucketSpace, PartitionId) const override;
virtual Result setClusterState(const ClusterState& calc) override;
virtual Result setActiveState(const Bucket& bucket, BucketInfo::ActiveState newState) override;
virtual BucketInfoResult getBucketInfo(const Bucket&) const override;
@@ -124,7 +122,7 @@ public:
virtual Result createBucket(const Bucket &bucketId, Context &) override ;
virtual Result deleteBucket(const Bucket&, Context&) override;
- virtual BucketIdListResult getModifiedBuckets() const override;
+ virtual BucketIdListResult getModifiedBuckets(BucketSpace bucketSpace) const override;
virtual Result split(const Bucket& source, const Bucket& target1, const Bucket& target2, Context&) override;
virtual Result join(const Bucket& source1, const Bucket& source2, const Bucket& target, Context&) override;
@@ -132,8 +130,8 @@ public:
void destroyIterators();
void propagateSavedClusterState(IPersistenceHandler &handler);
- void grabExtraModifiedBuckets(IPersistenceHandler &handler);
- void populateInitialBucketDB(IPersistenceHandler &targetHandler);
+ void grabExtraModifiedBuckets(BucketSpace bucketSpace, IPersistenceHandler &handler);
+ void populateInitialBucketDB(BucketSpace bucketSpace, IPersistenceHandler &targetHandler);
std::unique_lock<std::shared_timed_mutex> getWLock() const;
};
diff --git a/searchcore/src/vespa/searchcore/proton/reference/gid_to_lid_change_handler.cpp b/searchcore/src/vespa/searchcore/proton/reference/gid_to_lid_change_handler.cpp
index bd3574327bc..abaf37f5084 100644
--- a/searchcore/src/vespa/searchcore/proton/reference/gid_to_lid_change_handler.cpp
+++ b/searchcore/src/vespa/searchcore/proton/reference/gid_to_lid_change_handler.cpp
@@ -2,14 +2,12 @@
#include "gid_to_lid_change_handler.h"
#include "i_gid_to_lid_change_listener.h"
-#include <vespa/searchlib/common/lambdatask.h>
#include <vespa/searchcorespi/index/i_thread_service.h>
-#include <vespa/document/base/globalid.h>
-#include <cassert>
#include <vespa/vespalib/stllike/hash_map.hpp>
+#include <vespa/vespalib/util/lambdatask.h>
+#include <cassert>
-using search::makeLambdaTask;
-
+using vespalib::makeLambdaTask;
namespace proton {
@@ -22,7 +20,6 @@ GidToLidChangeHandler::GidToLidChangeHandler()
{
}
-
GidToLidChangeHandler::~GidToLidChangeHandler()
{
assert(_closed);
diff --git a/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt b/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt
index c5b0cd70527..5bb512c12ce 100644
--- a/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt
+++ b/searchcore/src/vespa/searchcore/proton/server/CMakeLists.txt
@@ -68,7 +68,6 @@ vespa_add_library(searchcore_server STATIC
operationdonecontext.cpp
pendinglidtracker.cpp
persistencehandlerproxy.cpp
- persistenceproviderproxy.cpp
proton.cpp
proton_config_fetcher.cpp
proton_config_snapshot.cpp
@@ -94,6 +93,7 @@ vespa_add_library(searchcore_server STATIC
storeonlydocsubdb.cpp
storeonlyfeedview.cpp
summaryadapter.cpp
+ threading_service_config.cpp
tlcproxy.cpp
tlssyncer.cpp
transactionlogmanager.cpp
diff --git a/searchcore/src/vespa/searchcore/proton/server/bootstrapconfig.h b/searchcore/src/vespa/searchcore/proton/server/bootstrapconfig.h
index b4fcf71129c..f0252bb694f 100644
--- a/searchcore/src/vespa/searchcore/proton/server/bootstrapconfig.h
+++ b/searchcore/src/vespa/searchcore/proton/server/bootstrapconfig.h
@@ -56,49 +56,24 @@ public:
getDocumenttypesConfigSP() const { return _documenttypes; }
const document::DocumentTypeRepo::SP &
- getDocumentTypeRepoSP() const
- {
- return _repo;
- }
+ getDocumentTypeRepoSP() const { return _repo; }
const vespa::config::search::core::ProtonConfig &
- getProtonConfig() const
- {
- return *_proton;
- }
+ getProtonConfig() const { return *_proton; }
const ProtonConfigSP &
- getProtonConfigSP() const
- {
- return _proton;
- }
+ getProtonConfigSP() const { return _proton; }
const search::TuneFileDocumentDB::SP &
- getTuneFileDocumentDBSP() const
- {
- return _tuneFileDocumentDB;
- }
-
- int64_t
- getGeneration() const
- {
- return _generation;
- }
-
- void
- setGeneration(int64_t generation)
- {
- _generation = generation;
- }
+ getTuneFileDocumentDBSP() const { return _tuneFileDocumentDB; }
+
+ int64_t getGeneration() const { return _generation; }
/**
* Shared pointers are checked for identity, not equality.
*/
- bool
- operator==(const BootstrapConfig &rhs) const;
-
- bool
- valid() const;
+ bool operator==(const BootstrapConfig &rhs) const;
+ bool valid() const;
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.cpp b/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.cpp
index f36c2add23a..cb929e0a6c7 100644
--- a/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.cpp
@@ -67,7 +67,7 @@ BucketMoveJob::checkBucket(const BucketId &bucket,
if (_calc->nodeRetired() && !isActive) {
return;
}
- const bool shouldBeReady = _calc->shouldBeReady(bucket);
+ const bool shouldBeReady = _calc->shouldBeReady(document::Bucket(_bucketSpace, bucket));
const bool wantReady = shouldBeReady || isActive;
LOG(spam, "checkBucket(): bucket(%s), shouldBeReady(%s), active(%s)",
bucket.toString().c_str(), bool2str(shouldBeReady), bool2str(isActive));
@@ -160,7 +160,8 @@ BucketMoveJob(const IBucketStateCalculator::SP &calc,
IBucketStateChangedNotifier &bucketStateChangedNotifier,
IDiskMemUsageNotifier &diskMemUsageNotifier,
const BlockableMaintenanceJobConfig &blockableConfig,
- const vespalib::string &docTypeName)
+ const vespalib::string &docTypeName,
+ document::BucketSpace bucketSpace)
: BlockableMaintenanceJob("move_buckets." + docTypeName, 0.0, 0.0, blockableConfig),
IClusterStateChangedHandler(),
IBucketFreezeListener(),
@@ -177,6 +178,7 @@ BucketMoveJob(const IBucketStateCalculator::SP &calc,
_scanPos(),
_scanPass(FIRST_SCAN_PASS),
_endPos(),
+ _bucketSpace(bucketSpace),
_delayedBuckets(),
_delayedBucketsFrozen(),
_frozenBuckets(frozenBuckets),
@@ -213,7 +215,7 @@ BucketMoveJob::maybeCancelMover(DocumentBucketMover &mover)
if (!mover.bucketDone()) {
bool ready = mover.getSource() == &_ready;
if (isBlocked() ||
- _calc->shouldBeReady(mover.getBucket()) == ready) {
+ _calc->shouldBeReady(document::Bucket(_bucketSpace, mover.getBucket())) == ready) {
mover.cancel();
}
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.h b/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.h
index 6b43af9d4c0..7147613caee 100644
--- a/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.h
+++ b/searchcore/src/vespa/searchcore/proton/server/bucketmovejob.h
@@ -91,6 +91,7 @@ private:
ScanPosition _scanPos;
uint32_t _scanPass;
ScanPosition _endPos;
+ document::BucketSpace _bucketSpace;
typedef std::set<document::BucketId> DelayedBucketSet;
@@ -147,7 +148,8 @@ public:
IBucketStateChangedNotifier &bucketStateChangedNotifier,
IDiskMemUsageNotifier &diskMemUsageNotifier,
const BlockableMaintenanceJobConfig &blockableConfig,
- const vespalib::string &docTypeName);
+ const vespalib::string &docTypeName,
+ document::BucketSpace bucketSpace);
virtual ~BucketMoveJob();
diff --git a/searchcore/src/vespa/searchcore/proton/server/clusterstatehandler.cpp b/searchcore/src/vespa/searchcore/proton/server/clusterstatehandler.cpp
index 3b3437b4346..fa0e0f33469 100644
--- a/searchcore/src/vespa/searchcore/proton/server/clusterstatehandler.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/clusterstatehandler.cpp
@@ -39,7 +39,7 @@ public:
_nodeRetired(_calc.nodeRetired())
{
}
- bool shouldBeReady(const document::BucketId &bucket) const override {
+ bool shouldBeReady(const document::Bucket &bucket) const override {
return _calc.shouldBeReady(Bucket(bucket, PartitionId(0)));
}
bool clusterUp() const override { return _clusterUp; }
diff --git a/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.cpp b/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.cpp
index 7439081f4e6..30ee6e1ba75 100644
--- a/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.cpp
@@ -34,13 +34,15 @@ getRepo(const std::vector<IFeedView::SP> &views)
};
CombiningFeedView::CombiningFeedView(const std::vector<IFeedView::SP> &views,
+ document::BucketSpace bucketSpace,
const IBucketStateCalculator::SP &calc)
: _repo(getRepo(views)),
_views(views),
_metaStores(),
_calc(calc),
_clusterUp(calc.get() != NULL && calc->clusterUp()),
- _forceReady(!_clusterUp || !hasNotReadyFeedView())
+ _forceReady(!_clusterUp || !hasNotReadyFeedView()),
+ _bucketSpace(bucketSpace)
{
_metaStores.reserve(views.size());
for (const auto &view : views) {
@@ -273,17 +275,18 @@ CombiningFeedView::setCalculator(const IBucketStateCalculator::SP &newCalc)
bool
CombiningFeedView::shouldBeReady(const document::BucketId &bucket) const
{
+ document::Bucket dbucket(_bucketSpace, bucket);
LOG(debug,
"shouldBeReady(%s): forceReady(%s), clusterUp(%s), calcReady(%s)",
bucket.toString().c_str(),
(_forceReady ? "true" : "false"),
(_clusterUp ? "true" : "false"),
(_calc.get() != NULL ?
- (_calc->shouldBeReady(bucket) ? "true" : "false") : "null"));
+ (_calc->shouldBeReady(dbucket) ? "true" : "false") : "null"));
const documentmetastore::IBucketHandler *readyMetaStore =
_metaStores[getReadyFeedViewId()];
bool isActive = readyMetaStore->getBucketDB().takeGuard()->isActiveBucket(bucket);
- return _forceReady || isActive || _calc->shouldBeReady(bucket);
+ return _forceReady || isActive || _calc->shouldBeReady(dbucket);
}
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.h b/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.h
index ef8616a25eb..ea4ac64176a 100644
--- a/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.h
+++ b/searchcore/src/vespa/searchcore/proton/server/combiningfeedview.h
@@ -29,6 +29,7 @@ private:
IBucketStateCalculator::SP _calc;
bool _clusterUp;
bool _forceReady;
+ document::BucketSpace _bucketSpace;
const ISimpleDocumentMetaStore * getDocumentMetaStorePtr() const override;
@@ -59,6 +60,7 @@ public:
typedef std::shared_ptr<CombiningFeedView> SP;
CombiningFeedView(const std::vector<IFeedView::SP> &views,
+ document::BucketSpace bucketSpace,
const IBucketStateCalculator::SP &calc);
virtual ~CombiningFeedView();
diff --git a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_filter.cpp b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_filter.cpp
index d489f477df2..32b2b6aaba6 100644
--- a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_filter.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_filter.cpp
@@ -3,6 +3,7 @@
#include "disk_mem_usage_filter.h"
#include "i_disk_mem_usage_listener.h"
#include <vespa/log/log.h>
+#include <vespa/searchcore/proton/common/hw_info.h>
LOG_SETUP(".proton.server.disk_mem_usage_filter");
@@ -43,12 +44,12 @@ void
makeDiskStatsMessage(std::ostream &os,
double diskUsed,
double diskLimit,
- const DiskMemUsageFilter::space_info &diskStats)
+ const HwInfo &hwInfo,
+ uint64_t usedDiskSizeBytes)
{
os << "stats: { ";
- os << "capacity: " << diskStats.capacity << ", ";
- os << "free: " << diskStats.free << ", ";
- os << "available: " << diskStats.available << ", ";
+ os << "capacity: " << hwInfo.disk().sizeBytes() << ", ";
+ os << "used: " << usedDiskSizeBytes << ", ";
os << "diskUsed: " << diskUsed << ", ";
os << "diskLimit: " << diskLimit << "}";
}
@@ -57,31 +58,31 @@ void
makeDiskLimitMessage(std::ostream &os,
double diskUsed,
double diskLimit,
- const DiskMemUsageFilter::space_info &diskStats)
+ const HwInfo &hwInfo,
+ uint64_t usedDiskSizeBytes)
{
os << "diskLimitReached: { ";
os << "action: \"add more content nodes\", ";
os << "reason: \"disk used (" << diskUsed << ") > disk limit (" << diskLimit << ")\", ";
- makeDiskStatsMessage(os, diskUsed, diskLimit, diskStats);
+ makeDiskStatsMessage(os, diskUsed, diskLimit, hwInfo, usedDiskSizeBytes);
os << "}";
}
-
vespalib::string
makeUnblockingMessage(double memoryUsed,
double memoryLimit,
const vespalib::ProcessMemoryStats &memoryStats,
- uint64_t physicalMemory,
+ const HwInfo &hwInfo,
double diskUsed,
double diskLimit,
- const DiskMemUsageFilter::space_info &diskStats)
+ uint64_t usedDiskSizeBytes)
{
std::ostringstream os;
os << "memoryLimitOK: { ";
- makeMemoryStatsMessage(os, memoryUsed, memoryLimit, memoryStats, physicalMemory);
+ makeMemoryStatsMessage(os, memoryUsed, memoryLimit, memoryStats, hwInfo.memory().sizeBytes());
os << "}, ";
os << "diskLimitOK: { ";
- makeDiskStatsMessage(os, diskUsed, diskLimit, diskStats);
+ makeDiskStatsMessage(os, diskUsed, diskLimit, hwInfo, usedDiskSizeBytes);
os << "}";
return os.str();
}
@@ -97,7 +98,7 @@ DiskMemUsageFilter::recalcState(const Guard &guard)
if (memoryUsed > _config._memoryLimit) {
hasMessage = true;
makeMemoryLimitMessage(message, memoryUsed,
- _config._memoryLimit, _memoryStats, _physicalMemory);
+ _config._memoryLimit, _memoryStats, _hwInfo.memory().sizeBytes());
}
double diskUsed = getDiskUsedRatio(guard);
if (diskUsed > _config._diskLimit) {
@@ -105,7 +106,7 @@ DiskMemUsageFilter::recalcState(const Guard &guard)
message << ", ";
}
hasMessage = true;
- makeDiskLimitMessage(message, diskUsed, _config._diskLimit, _diskStats);
+ makeDiskLimitMessage(message, diskUsed, _config._diskLimit, _hwInfo, _diskUsedSizeBytes);
}
if (hasMessage) {
if (_acceptWrite) {
@@ -118,10 +119,10 @@ DiskMemUsageFilter::recalcState(const Guard &guard)
vespalib::string unblockMsg = makeUnblockingMessage(memoryUsed,
_config._memoryLimit,
_memoryStats,
- _physicalMemory,
+ _hwInfo,
diskUsed,
_config._diskLimit,
- _diskStats);
+ _diskUsedSizeBytes);
LOG(info, "Write operations are now un-blocked: '%s'", unblockMsg.c_str());
}
_state = State();
@@ -137,23 +138,23 @@ DiskMemUsageFilter::getMemoryUsedRatio(const Guard &guard) const
{
(void) guard;
uint64_t unscaledMemoryUsed = _memoryStats.getAnonymousRss();
- return static_cast<double>(unscaledMemoryUsed) / _physicalMemory;
+ return static_cast<double>(unscaledMemoryUsed) / _hwInfo.memory().sizeBytes();
}
double
DiskMemUsageFilter::getDiskUsedRatio(const Guard &guard) const
{
(void) guard;
- double availableDiskSpaceRatio = static_cast<double>(_diskStats.available) /
- static_cast<double>(_diskStats.capacity);
- return 1.0 - availableDiskSpaceRatio;
+ double usedDiskSpaceRatio = static_cast<double>(_diskUsedSizeBytes) /
+ static_cast<double>(_hwInfo.disk().sizeBytes());
+ return usedDiskSpaceRatio;
}
-DiskMemUsageFilter::DiskMemUsageFilter(uint64_t physicalMemory_in)
+DiskMemUsageFilter::DiskMemUsageFilter(const HwInfo &hwInfo)
: _lock(),
+ _hwInfo(hwInfo),
_memoryStats(),
- _physicalMemory(physicalMemory_in),
- _diskStats(),
+ _diskUsedSizeBytes(),
_config(),
_state(),
_acceptWrite(true),
@@ -172,10 +173,10 @@ DiskMemUsageFilter::setMemoryStats(vespalib::ProcessMemoryStats memoryStats_in)
}
void
-DiskMemUsageFilter::setDiskStats(space_info diskStats_in)
+DiskMemUsageFilter::setDiskUsedSize(uint64_t diskUsedSizeBytes)
{
Guard guard(_lock);
- _diskStats = diskStats_in;
+ _diskUsedSizeBytes = diskUsedSizeBytes;
recalcState(guard);
}
@@ -194,11 +195,11 @@ DiskMemUsageFilter::getMemoryStats() const
return _memoryStats;
}
-DiskMemUsageFilter::space_info
-DiskMemUsageFilter::getDiskStats() const
+uint64_t
+DiskMemUsageFilter::getDiskUsedSize() const
{
Guard guard(_lock);
- return _diskStats;
+ return _diskUsedSizeBytes;
}
DiskMemUsageFilter::Config
diff --git a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_filter.h b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_filter.h
index c06d2e2ab0a..4906348a54d 100644
--- a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_filter.h
+++ b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_filter.h
@@ -4,12 +4,12 @@
#include "i_disk_mem_usage_notifier.h"
#include "disk_mem_usage_state.h"
+#include <vespa/searchcore/proton/common/hw_info.h>
#include <vespa/searchcore/proton/persistenceengine/i_resource_write_filter.h>
#include <vespa/vespalib/util/process_memory_stats.h>
-#include <mutex>
#include <atomic>
#include <experimental/filesystem>
-
+#include <mutex>
namespace proton {
@@ -39,10 +39,10 @@ public:
};
private:
- mutable Mutex _lock; // protect _memoryStats, _diskStats, _config, _state
+ mutable Mutex _lock; // protect _memoryStats, _usedDiskSizeBytes, _config, _state
+ HwInfo _hwInfo;
vespalib::ProcessMemoryStats _memoryStats;
- uint64_t _physicalMemory;
- space_info _diskStats;
+ uint64_t _diskUsedSizeBytes;
Config _config;
State _state;
std::atomic<bool> _acceptWrite;
@@ -55,15 +55,15 @@ private:
void notifyDiskMemUsage(const Guard &guard, DiskMemUsageState state);
public:
- DiskMemUsageFilter(uint64_t physicalMememory_in);
+ DiskMemUsageFilter(const HwInfo &hwInfo);
~DiskMemUsageFilter();
void setMemoryStats(vespalib::ProcessMemoryStats memoryStats_in);
- void setDiskStats(space_info diskStats_in);
+ void setDiskUsedSize(uint64_t diskUsedSizeBytes);
void setConfig(Config config);
vespalib::ProcessMemoryStats getMemoryStats() const;
- space_info getDiskStats() const;
+ uint64_t getDiskUsedSize() const;
Config getConfig() const;
- uint64_t getPhysicalMemory() const { return _physicalMemory; }
+ const HwInfo &getHwInfo() const { return _hwInfo; }
double getMemoryUsedRatio() const;
double getDiskUsedRatio() const;
bool acceptWriteOperation() const override;
diff --git a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_forwarder.cpp b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_forwarder.cpp
index 7e8f16ac9a1..22af1dc1692 100644
--- a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_forwarder.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_forwarder.cpp
@@ -1,10 +1,10 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "disk_mem_usage_forwarder.h"
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <cassert>
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
namespace proton {
diff --git a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_sampler.cpp b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_sampler.cpp
index fb7d712db9d..ddbede13880 100644
--- a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_sampler.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_sampler.cpp
@@ -2,31 +2,17 @@
#include "disk_mem_usage_sampler.h"
#include <vespa/vespalib/util/timer.h>
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
+#include <experimental/filesystem>
#include <unistd.h>
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
namespace proton {
-namespace {
-
-uint64_t getPhysicalMemoryBytes()
-{
- // TODO: Temporal workaround for Docker nodes. Remove when this is part of proton.cfg instead.
- if (const char *memoryEnv = std::getenv("VESPA_TOTAL_MEMORY_MB")) {
- uint64_t physicalMemoryMB = atoll(memoryEnv);
- return physicalMemoryMB * 1024u * 1024u;
- } else {
- return sysconf(_SC_PHYS_PAGES) * sysconf(_SC_PAGESIZE);
- }
-}
-
-} // namespace proton:<anonymous>
-
DiskMemUsageSampler::DiskMemUsageSampler(const std::string &path_in,
const Config &config)
- : _filter(getPhysicalMemoryBytes()),
+ : _filter(config.hwInfo),
_path(path_in),
_sampleInterval(60.0),
_periodicTimer()
@@ -43,8 +29,8 @@ void
DiskMemUsageSampler::setConfig(const Config &config)
{
_periodicTimer.reset();
- _filter.setConfig(config._filterConfig);
- _sampleInterval = config._sampleInterval;
+ _filter.setConfig(config.filterConfig);
+ _sampleInterval = config.sampleInterval;
sampleUsage();
_periodicTimer = std::make_unique<vespalib::Timer>();
_periodicTimer->scheduleAtFixedRate(makeLambdaTask([this]()
@@ -59,10 +45,43 @@ DiskMemUsageSampler::sampleUsage()
sampleDiskUsage();
}
+namespace {
+
+namespace fs = std::experimental::filesystem;
+
+uint64_t
+sampleDiskUsageOnFileSystem(const fs::path &path, const HwInfo::Disk &disk)
+{
+ auto space_info = fs::space(path);
+ uint64_t result = (space_info.capacity - space_info.available);
+ if (result > disk.sizeBytes()) {
+ return disk.sizeBytes();
+ }
+ return result;
+}
+
+uint64_t
+sampleDiskUsageInDirectory(const fs::path &path)
+{
+ uint64_t result = 0;
+ for (const auto &elem : fs::recursive_directory_iterator(path,
+ fs::directory_options::skip_permission_denied)) {
+ if (fs::is_regular_file(elem.path()) && !fs::is_symlink(elem.path())) {
+ result += fs::file_size(elem.path());
+ }
+ }
+ return result;
+}
+
+}
+
void
DiskMemUsageSampler::sampleDiskUsage()
{
- _filter.setDiskStats(std::experimental::filesystem::space(_path));
+ const auto &disk = _filter.getHwInfo().disk();
+ _filter.setDiskUsedSize(disk.shared() ?
+ sampleDiskUsageInDirectory(_path) :
+ sampleDiskUsageOnFileSystem(_path, disk));
}
void
diff --git a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_sampler.h b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_sampler.h
index 198f111f052..4ed48613f6a 100644
--- a/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_sampler.h
+++ b/searchcore/src/vespa/searchcore/proton/server/disk_mem_usage_sampler.h
@@ -22,19 +22,24 @@ class DiskMemUsageSampler {
void sampleMemoryUsage();
public:
struct Config {
- DiskMemUsageFilter::Config _filterConfig;
- double _sampleInterval;
- public:
+ DiskMemUsageFilter::Config filterConfig;
+ double sampleInterval;
+ HwInfo hwInfo;
+
Config()
- : _filterConfig(),
- _sampleInterval(60.0)
+ : filterConfig(),
+ sampleInterval(60.0),
+ hwInfo()
{
}
- Config(double memoryLimit_in, double diskLimit_in,
- double sampleInterval_in)
- : _filterConfig(memoryLimit_in, diskLimit_in),
- _sampleInterval(sampleInterval_in)
+ Config(double memoryLimit_in,
+ double diskLimit_in,
+ double sampleInterval_in,
+ const HwInfo &hwInfo_in)
+ : filterConfig(memoryLimit_in, diskLimit_in),
+ sampleInterval(sampleInterval_in),
+ hwInfo(hwInfo_in)
{
}
};
diff --git a/searchcore/src/vespa/searchcore/proton/server/document_subdb_initializer.cpp b/searchcore/src/vespa/searchcore/proton/server/document_subdb_initializer.cpp
index f48a9d37e8f..a9609bf0cc5 100644
--- a/searchcore/src/vespa/searchcore/proton/server/document_subdb_initializer.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/document_subdb_initializer.cpp
@@ -2,11 +2,10 @@
#include "document_subdb_initializer.h"
#include "idocumentsubdb.h"
-#include <future>
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/searchcorespi/index/i_thread_service.h>
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
namespace proton {
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
index 8c89f2a36af..cffa014534e 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.cpp
@@ -10,6 +10,7 @@
#include "lid_space_compaction_handler.h"
#include "maintenance_jobs_injector.h"
#include "reconfig_params.h"
+#include "bootstrapconfig.h"
#include <vespa/searchcore/proton/attribute/attribute_writer.h>
#include <vespa/searchcore/proton/attribute/imported_attributes_repo.h>
#include <vespa/searchcore/proton/common/eventlogger.h>
@@ -42,8 +43,6 @@ using vespalib::Executor;
using vespalib::IllegalStateException;
using vespalib::StateExplorer;
using vespalib::make_string;
-using vespalib::makeTask;
-using vespalib::makeClosure;
using namespace proton::matching;
using namespace search;
using namespace search::engine;
@@ -56,22 +55,13 @@ using storage::spi::Timestamp;
using search::common::FileHeaderContext;
using proton::initializer::InitializerTask;
using proton::initializer::TaskRunner;
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
using searchcorespi::IFlushTarget;
namespace proton {
namespace {
-
constexpr uint32_t indexing_thread_stack_size = 128 * 1024;
-
-uint32_t semiUnboundTaskLimit(uint32_t semiUnboundExecutorTaskLimit,
- uint32_t indexingThreads)
-{
- uint32_t taskLimit = semiUnboundExecutorTaskLimit / indexingThreads;
- return taskLimit;
-}
-
}
template <typename FunctionType>
@@ -81,16 +71,17 @@ DocumentDB::masterExecute(FunctionType &&function) {
}
DocumentDB::DocumentDB(const vespalib::string &baseDir,
- const DocumentDBConfig::SP & configSnapshot,
+ const DocumentDBConfig::SP &configSnapshot,
const vespalib::string &tlsSpec,
- matching::QueryLimiter & queryLimiter,
+ matching::QueryLimiter &queryLimiter,
const vespalib::Clock &clock,
const DocTypeName &docTypeName,
+ document::BucketSpace bucketSpace,
const ProtonConfig &protonCfg,
- IDocumentDBOwner & owner,
- vespalib::ThreadExecutor & warmupExecutor,
- vespalib::ThreadStackExecutorBase & summaryExecutor,
- search::transactionlog::Writer * tlsDirectWriter,
+ IDocumentDBOwner &owner,
+ vespalib::ThreadExecutor &warmupExecutor,
+ vespalib::ThreadStackExecutorBase &summaryExecutor,
+ search::transactionlog::Writer &tlsDirectWriter,
MetricsWireService &metricsWireService,
const FileHeaderContext &fileHeaderContext,
ConfigStore::UP config_store,
@@ -103,14 +94,13 @@ DocumentDB::DocumentDB(const vespalib::string &baseDir,
IClusterStateChangedHandler(),
search::transactionlog::SyncProxy(),
_docTypeName(docTypeName),
+ _bucketSpace(bucketSpace),
_baseDir(baseDir + "/" + _docTypeName.toString()),
// Only one thread per executor, or performDropFeedView() will fail.
- _defaultExecutorTaskLimit(protonCfg.indexing.tasklimit),
- _semiUnboundExecutorTaskLimit(protonCfg.indexing.semiunboundtasklimit),
- _indexingThreads(protonCfg.indexing.threads),
- _writeService(std::max(1, protonCfg.indexing.threads),
+ _writeServiceConfig(ThreadingServiceConfig::make(protonCfg, hwInfo.cpu())),
+ _writeService(_writeServiceConfig.indexingThreads(),
indexing_thread_stack_size,
- _defaultExecutorTaskLimit),
+ _writeServiceConfig.defaultTaskLimit()),
_initializeThreads(initializeThreads),
_initConfigSnapshot(),
_initConfigSerialNum(0u),
@@ -123,7 +113,6 @@ DocumentDB::DocumentDB(const vespalib::string &baseDir,
_initGate(),
_clusterStateHandler(_writeService.master()),
_bucketHandler(_writeService.master()),
- _protonSummaryCfg(protonCfg.summary),
_protonIndexCfg(protonCfg.index),
_config_store(std::move(config_store)),
_sessionManager(new matching::SessionManager(protonCfg.grouping.sessionmanager.maxentries)),
@@ -136,31 +125,12 @@ DocumentDB::DocumentDB(const vespalib::string &baseDir,
_state(),
_dmUsageForwarder(_writeService.master()),
_writeFilter(),
- _feedHandler(_writeService,
- tlsSpec,
- docTypeName,
+ _feedHandler(_writeService, tlsSpec, docTypeName,
getMetricsCollection().getLegacyMetrics().feed,
- _state,
- *this,
- _writeFilter,
- *this,
- tlsDirectWriter),
- _subDBs(*this,
- *this,
- _feedHandler,
- _docTypeName,
- _writeService,
- warmupExecutor,
- summaryExecutor,
- fileHeaderContext,
- metricsWireService,
- getMetricsCollection(),
- queryLimiter,
- clock,
- _configMutex,
- _baseDir,
- protonCfg,
- hwInfo),
+ _state, *this, _writeFilter, *this, tlsDirectWriter),
+ _subDBs(*this, *this, _feedHandler, _docTypeName, _writeService, warmupExecutor,
+ summaryExecutor, fileHeaderContext, metricsWireService, getMetricsCollection(),
+ queryLimiter, clock, _configMutex, _baseDir, protonCfg, hwInfo),
_maintenanceController(_writeService.master(), summaryExecutor, _docTypeName),
_visibility(_feedHandler, _writeService, _feedView),
_lidSpaceCompactionHandlers(),
@@ -200,7 +170,7 @@ DocumentDB::DocumentDB(const vespalib::string &baseDir,
fastos::TimeStamp visibilityDelay = loaded_config->getMaintenanceConfigSP()->getVisibilityDelay();
_visibility.setVisibilityDelay(visibilityDelay);
if (_visibility.getVisibilityDelay() > 0) {
- _writeService.setTaskLimit(semiUnboundTaskLimit(_semiUnboundExecutorTaskLimit, _indexingThreads));
+ _writeService.setTaskLimit(_writeServiceConfig.semiUnboundTaskLimit());
}
}
@@ -239,8 +209,7 @@ void
DocumentDB::internalInit()
{
(void) _state.enterLoadState();
- _writeService.master().execute(makeTask(makeClosure(this,
- &DocumentDB::initManagers)));
+ masterExecute([this]() { initManagers(); });
}
class InitDoneTask : public vespalib::Executor::Task {
@@ -277,8 +246,7 @@ DocumentDB::initManagers()
DocumentDBConfig::SP configSnapshot(_initConfigSnapshot);
_initConfigSnapshot.reset();
InitializerTask::SP rootTask =
- _subDBs.createInitializer(*configSnapshot, _initConfigSerialNum,
- _protonSummaryCfg, _protonIndexCfg);
+ _subDBs.createInitializer(*configSnapshot, _initConfigSerialNum, _protonIndexCfg);
InitializeThreads initializeThreads = _initializeThreads;
_initializeThreads.reset();
std::shared_ptr<TaskRunner> taskRunner(std::make_shared<TaskRunner>(*initializeThreads));
@@ -325,9 +293,7 @@ DocumentDB::newConfigSnapshot(DocumentDBConfig::SP snapshot)
return;
}
}
- _writeService.master().execute(makeTask(makeClosure(this,
- &DocumentDB::performReconfig,
- _pendingConfigSnapshot.get())));
+ masterExecute([this] () { performReconfig(_pendingConfigSnapshot.get()); } );
}
@@ -370,7 +336,7 @@ void
DocumentDB::performReconfig(DocumentDBConfig::SP configSnapshot)
{
// Called by executor thread
- applyConfig(configSnapshot, getCurrentSerialNumber());
+ applyConfig(std::move(configSnapshot), getCurrentSerialNumber());
if (_state.getState() == DDBState::State::APPLY_LIVE_CONFIG) {
enterReprocessState();
}
@@ -378,7 +344,8 @@ DocumentDB::performReconfig(DocumentDBConfig::SP configSnapshot)
void
-DocumentDB::applySubDBConfig(const DocumentDBConfig &newConfigSnapshot, SerialNum serialNum, const ReconfigParams &params)
+DocumentDB::applySubDBConfig(const DocumentDBConfig &newConfigSnapshot,
+ SerialNum serialNum, const ReconfigParams &params)
{
auto registry = _owner.getDocumentDBReferenceRegistry();
auto oldRepo = _activeConfigSnapshot->getDocumentTypeRepoSP();
@@ -387,18 +354,13 @@ DocumentDB::applySubDBConfig(const DocumentDBConfig &newConfigSnapshot, SerialNu
auto newRepo = newConfigSnapshot.getDocumentTypeRepoSP();
auto newDocType = newRepo->getDocumentType(_docTypeName.getName());
assert(newDocType != nullptr);
- DocumentDBReferenceResolver resolver(*registry,
- *newDocType,
- newConfigSnapshot.getImportedFieldsConfig(),
- *oldDocType,
- _refCount,
- _writeService.attributeFieldWriter());
+ DocumentDBReferenceResolver resolver(*registry, *newDocType, newConfigSnapshot.getImportedFieldsConfig(),
+ *oldDocType, _refCount, _writeService.attributeFieldWriter());
_subDBs.applyConfig(newConfigSnapshot, *_activeConfigSnapshot, serialNum, params, resolver);
}
void
-DocumentDB::applyConfig(DocumentDBConfig::SP configSnapshot,
- SerialNum serialNum)
+DocumentDB::applyConfig(DocumentDBConfig::SP configSnapshot, SerialNum serialNum)
{
// Always called by executor thread:
// Called by performReconfig() by executor thread during normal
@@ -447,15 +409,14 @@ DocumentDB::applyConfig(DocumentDBConfig::SP configSnapshot,
_feedView.get()->forceCommit(elidedConfigSave ? serialNum :
serialNum - 1);
_writeService.sync();
- fastos::TimeStamp visibilityDelay =
- configSnapshot->getMaintenanceConfigSP()->getVisibilityDelay();
+ fastos::TimeStamp visibilityDelay = configSnapshot->getMaintenanceConfigSP()->getVisibilityDelay();
hasVisibilityDelayChanged = (visibilityDelay != _visibility.getVisibilityDelay());
_visibility.setVisibilityDelay(visibilityDelay);
}
if (_visibility.getVisibilityDelay() > 0) {
- _writeService.setTaskLimit(semiUnboundTaskLimit(_semiUnboundExecutorTaskLimit, _indexingThreads));
+ _writeService.setTaskLimit(_writeServiceConfig.semiUnboundTaskLimit());
} else {
- _writeService.setTaskLimit(_defaultExecutorTaskLimit);
+ _writeService.setTaskLimit(_writeServiceConfig.defaultTaskLimit());
}
if (params.shouldSubDbsChange() || hasVisibilityDelayChanged) {
applySubDBConfig(*configSnapshot, serialNum, params);
@@ -482,8 +443,7 @@ DocumentDB::applyConfig(DocumentDBConfig::SP configSnapshot,
if (params.shouldMaintenanceControllerChange()) {
forwardMaintenanceConfig();
}
- _writeFilter.setConfig(configSnapshot->getMaintenanceConfigSP()->
- getAttributeUsageFilterConfig());
+ _writeFilter.setConfig(configSnapshot->getMaintenanceConfigSP()->getAttributeUsageFilterConfig());
if (_subDBs.getReprocessingRunner().empty()) {
_subDBs.pruneRemovedFields(serialNum);
}
@@ -508,10 +468,7 @@ DocumentDB::performDropFeedView(IFeedView::SP feedView)
_writeService.summary().sync();
// Feed view is kept alive in the closure's shared ptr.
- _writeService.index().execute(makeTask(makeClosure(this,
- &proton::DocumentDB::
- performDropFeedView2,
- feedView)));
+ _writeService.index().execute(makeLambdaTask([this, feedView] () { performDropFeedView2(feedView); }));
}
@@ -524,7 +481,7 @@ DocumentDB::performDropFeedView2(IFeedView::SP feedView)
_writeService.indexFieldWriter().sync();
// Feed view is kept alive in the closure's shared ptr.
- _writeService.master().execute(makeTask(makeClosure(&doNothing, feedView)));
+ masterExecute([feedView] () { doNothing(feedView); });
}
@@ -572,7 +529,7 @@ DocumentDB::close()
_metricsWireService.cleanAttributes(ready, &legacyMetrics.attributes);
_metricsWireService.cleanAttributes(notReady, NULL);
_writeService.sync();
- _writeService.master().execute(makeTask(makeClosure(this, &DocumentDB::closeSubDBs)));
+ masterExecute([this] () { closeSubDBs(); } );
_writeService.sync();
// What about queued tasks ?
_writeService.shutdown();
@@ -838,9 +795,7 @@ DocumentDB::enterApplyLiveConfigState()
lock_guard guard(_configMutex);
(void) _state.enterApplyLiveConfigState();
}
- _writeService.master().execute(makeTask(makeClosure(this,
- &DocumentDB::performReconfig,
- _pendingConfigSnapshot.get())));
+ masterExecute([this]() { performReconfig(_pendingConfigSnapshot.get()); });
}
@@ -893,11 +848,9 @@ DocumentDB::replayConfig(search::SerialNum serialNum)
return;
}
// Load config to replay
- _config_store->loadConfig(*configSnapshot, serialNum,
- configSnapshot);
+ _config_store->loadConfig(*configSnapshot, serialNum, configSnapshot);
// Grab relevant parts from pending config
- configSnapshot = DocumentDBConfigScout::scout(configSnapshot,
- *_pendingConfigSnapshot.get());
+ configSnapshot = DocumentDBConfigScout::scout(configSnapshot, *_pendingConfigSnapshot.get());
// Ignore configs that are not relevant during replay of transaction log
configSnapshot = DocumentDBConfig::makeReplayConfig(configSnapshot);
applyConfig(configSnapshot, serialNum);
@@ -950,6 +903,7 @@ DocumentDB::injectMaintenanceJobs(const DocumentDBMaintenanceConfig &config)
_maintenanceController, // IFrozenBucketHandler
_subDBs.getBucketCreateNotifier(),
_docTypeName.getName(),
+ _bucketSpace,
_feedHandler, // IPruneRemovedDocumentsHandler
_feedHandler, // IDocumentMoveHandler
_clusterStateHandler, // IBucketModifiedHandler
@@ -1346,6 +1300,18 @@ DocumentDB::waitForOnlineState()
_state.waitForOnlineState();
}
+vespalib::string
+DocumentDB::getName() const
+{
+ return _docTypeName.getName();
+}
+
+document::BucketSpace
+DocumentDB::getBucketSpace() const
+{
+ return _bucketSpace;
+}
+
uint32_t
DocumentDB::getDistributionKey() const
{
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdb.h b/searchcore/src/vespa/searchcore/proton/server/documentdb.h
index d06ce9050b7..e23cd78b3ad 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdb.h
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdb.h
@@ -5,20 +5,23 @@
#include "clusterstatehandler.h"
#include "configstore.h"
#include "ddbstate.h"
+#include "disk_mem_usage_forwarder.h"
#include "documentdbconfig.h"
#include "documentsubdbcollection.h"
+#include "executorthreadingservice.h"
#include "feedhandler.h"
+#include "i_document_db_config_owner.h"
+#include "i_document_subdb_owner.h"
#include "i_feed_handler_owner.h"
#include "i_lid_space_compaction_handler.h"
#include "ifeedview.h"
#include "ireplayconfig.h"
#include "maintenancecontroller.h"
-#include "i_document_db_config_owner.h"
-#include "executorthreadingservice.h"
+#include "threading_service_config.h"
#include "visibilityhandler.h"
-#include "i_document_subdb_owner.h"
-#include "disk_mem_usage_forwarder.h"
+#include <vespa/metrics/updatehook.h>
+#include <vespa/searchcore/proton/attribute/attribute_usage_filter.h>
#include <vespa/searchcore/proton/common/doctypename.h>
#include <vespa/searchcore/proton/common/monitored_refcount.h>
#include <vespa/searchcore/proton/metrics/documentdb_job_trackers.h>
@@ -28,8 +31,6 @@
#include <vespa/searchlib/docstore/cachestats.h>
#include <vespa/searchlib/transactionlog/syncproxy.h>
#include <vespa/vespalib/util/varholder.h>
-#include <vespa/searchcore/proton/attribute/attribute_usage_filter.h>
-#include <vespa/metrics/updatehook.h>
#include <mutex>
#include <condition_variable>
@@ -78,10 +79,9 @@ private:
using ProtonConfig = vespa::config::search::core::ProtonConfig;
DocTypeName _docTypeName;
+ document::BucketSpace _bucketSpace;
vespalib::string _baseDir;
- uint32_t _defaultExecutorTaskLimit;
- uint32_t _semiUnboundExecutorTaskLimit;
- uint32_t _indexingThreads;
+ ThreadingServiceConfig _writeServiceConfig;
// Only one thread per executor, or dropFeedView() will fail.
ExecutorThreadingService _writeService;
// threads for initializer tasks during proton startup
@@ -108,13 +108,12 @@ private:
ClusterStateHandler _clusterStateHandler;
BucketHandler _bucketHandler;
- ProtonConfig::Summary _protonSummaryCfg;
ProtonConfig::Index _protonIndexCfg;
ConfigStore::UP _config_store;
std::shared_ptr<matching::SessionManager> _sessionManager; // TODO: This should not have to be a shared pointer.
MetricsWireService &_metricsWireService;
MetricsUpdateHook _metricsHook;
- vespalib::VarHolder<IFeedView::SP> _feedView;
+ vespalib::VarHolder<IFeedView::SP> _feedView;
MonitoredRefCount _refCount;
bool _syncFeedViewEnabled;
IDocumentDBOwner &_owner;
@@ -142,7 +141,8 @@ private:
void performReconfig(DocumentDBConfig::SP configSnapshot);
void closeSubDBs();
- void applySubDBConfig(const DocumentDBConfig &newConfigSnapshot, SerialNum serialNum, const ReconfigParams &params);
+ void applySubDBConfig(const DocumentDBConfig &newConfigSnapshot,
+ SerialNum serialNum, const ReconfigParams &params);
void applyConfig(DocumentDBConfig::SP configSnapshot, SerialNum serialNum);
/**
@@ -163,7 +163,7 @@ private:
* Redo interrupted reprocessing if last entry in transaction log
* is a config change.
*/
- virtual void enterRedoReprocessState() override;
+ void enterRedoReprocessState() override;
void enterApplyLiveConfigState();
/**
@@ -233,16 +233,17 @@ public:
* @param config_store Access to read and write configs.
*/
DocumentDB(const vespalib::string &baseDir,
- const DocumentDBConfig::SP & currentSnapshot,
+ const DocumentDBConfig::SP &currentSnapshot,
const vespalib::string &tlsSpec,
- matching::QueryLimiter & queryLimiter,
+ matching::QueryLimiter &queryLimiter,
const vespalib::Clock &clock,
const DocTypeName &docTypeName,
+ document::BucketSpace bucketSpace,
const ProtonConfig &protonCfg,
- IDocumentDBOwner & owner,
- vespalib::ThreadExecutor & warmupExecutor,
- vespalib::ThreadStackExecutorBase & summaryExecutor,
- search::transactionlog::Writer * tlsDirectWriter,
+ IDocumentDBOwner &owner,
+ vespalib::ThreadExecutor &warmupExecutor,
+ vespalib::ThreadStackExecutorBase &summaryExecutor,
+ search::transactionlog::Writer &tlsDirectWriter,
MetricsWireService &metricsWireService,
const search::common::FileHeaderContext &fileHeaderContext,
ConfigStore::UP config_store,
@@ -390,25 +391,17 @@ public:
void release() { _refCount.release(); }
bool getDelayedConfig() const { return _state.getDelayedConfig(); }
-
- /**
- * Implements IReplayConfig API.
- */
- virtual void replayConfig(SerialNum serialNum) override;
-
+ void replayConfig(SerialNum serialNum) override;
const DocTypeName & getDocTypeName() const { return _docTypeName; }
-
void newConfigSnapshot(DocumentDBConfig::SP snapshot);
-
- // Implements DocumentDBConfigOwner
void reconfigure(const DocumentDBConfig::SP & snapshot) override;
-
int64_t getActiveGeneration() const;
-
- // Implements IDocumentSubDBOwner
+ /*
+ * Implements IDocumentSubDBOwner
+ */
void syncFeedView() override;
-
- vespalib::string getName() const override { return _docTypeName.getName(); }
+ document::BucketSpace getBucketSpace() const override;
+ vespalib::string getName() const override;
uint32_t getDistributionKey() const override;
/**
@@ -432,7 +425,7 @@ public:
*
* Sync transaction log to syncTo.
*/
- virtual void sync(SerialNum syncTo) override;
+ void sync(SerialNum syncTo) override;
void enterReprocessState();
void enterOnlineState();
void waitForOnlineState();
@@ -440,4 +433,3 @@ public:
};
} // namespace proton
-
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.cpp
index 86d43517d00..44f4260f7f8 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.cpp
@@ -41,6 +41,7 @@ DocumentDBConfig::ComparisonResult::ComparisonResult()
tuneFileDocumentDBChanged(false),
schemaChanged(false),
maintenanceChanged(false),
+ storeChanged(false),
visibilityDelayChanged(false)
{ }
@@ -59,6 +60,7 @@ DocumentDBConfig::DocumentDBConfig(
const search::TuneFileDocumentDB::SP &tuneFileDocumentDB,
const Schema::SP &schema,
const DocumentDBMaintenanceConfig::SP &maintenance,
+ const search::LogDocumentStore::Config & storeConfig,
const vespalib::string &configId,
const vespalib::string &docTypeName,
const config::ConfigSnapshot & extraConfigs)
@@ -78,6 +80,7 @@ DocumentDBConfig::DocumentDBConfig(
_tuneFileDocumentDB(tuneFileDocumentDB),
_schema(schema),
_maintenance(maintenance),
+ _storeConfig(storeConfig),
_extraConfigs(extraConfigs),
_orig(),
_delayedAttributeAspects(false)
@@ -102,6 +105,7 @@ DocumentDBConfig(const DocumentDBConfig &cfg)
_tuneFileDocumentDB(cfg._tuneFileDocumentDB),
_schema(cfg._schema),
_maintenance(cfg._maintenance),
+ _storeConfig(cfg._storeConfig),
_extraConfigs(cfg._extraConfigs),
_orig(cfg._orig),
_delayedAttributeAspects(false)
@@ -112,31 +116,20 @@ DocumentDBConfig::~DocumentDBConfig() { }
bool
DocumentDBConfig::operator==(const DocumentDBConfig & rhs) const
{
- return equals<RankProfilesConfig>(_rankProfiles.get(),
- rhs._rankProfiles.get()) &&
- equals<RankingConstants>(_rankingConstants.get(),
- rhs._rankingConstants.get()) &&
- equals<IndexschemaConfig>(_indexschema.get(),
- rhs._indexschema.get()) &&
- equals<AttributesConfig>(_attributes.get(),
- rhs._attributes.get()) &&
- equals<SummaryConfig>(_summary.get(),
- rhs._summary.get()) &&
- equals<SummarymapConfig>(_summarymap.get(),
- rhs._summarymap.get()) &&
- equals<JuniperrcConfig>(_juniperrc.get(),
- rhs._juniperrc.get()) &&
- equals<DocumenttypesConfig>(_documenttypes.get(),
- rhs._documenttypes.get()) &&
+ return equals<RankProfilesConfig>(_rankProfiles.get(), rhs._rankProfiles.get()) &&
+ equals<RankingConstants>(_rankingConstants.get(), rhs._rankingConstants.get()) &&
+ equals<IndexschemaConfig>(_indexschema.get(), rhs._indexschema.get()) &&
+ equals<AttributesConfig>(_attributes.get(), rhs._attributes.get()) &&
+ equals<SummaryConfig>(_summary.get(), rhs._summary.get()) &&
+ equals<SummarymapConfig>(_summarymap.get(), rhs._summarymap.get()) &&
+ equals<JuniperrcConfig>(_juniperrc.get(), rhs._juniperrc.get()) &&
+ equals<DocumenttypesConfig>(_documenttypes.get(), rhs._documenttypes.get()) &&
_repo.get() == rhs._repo.get() &&
- equals<ImportedFieldsConfig >(_importedFields.get(),
- rhs._importedFields.get()) &&
- equals<TuneFileDocumentDB>(_tuneFileDocumentDB.get(),
- rhs._tuneFileDocumentDB.get()) &&
- equals<Schema>(_schema.get(),
- rhs._schema.get()) &&
- equals<DocumentDBMaintenanceConfig>(_maintenance.get(),
- rhs._maintenance.get());
+ equals<ImportedFieldsConfig >(_importedFields.get(), rhs._importedFields.get()) &&
+ equals<TuneFileDocumentDB>(_tuneFileDocumentDB.get(), rhs._tuneFileDocumentDB.get()) &&
+ equals<Schema>(_schema.get(), rhs._schema.get()) &&
+ equals<DocumentDBMaintenanceConfig>(_maintenance.get(), rhs._maintenance.get()) &&
+ _storeConfig == rhs._storeConfig;
}
@@ -144,36 +137,21 @@ DocumentDBConfig::ComparisonResult
DocumentDBConfig::compare(const DocumentDBConfig &rhs) const
{
ComparisonResult retval;
- retval.rankProfilesChanged =
- !equals<RankProfilesConfig>(_rankProfiles.get(), rhs._rankProfiles.get());
- retval.rankingConstantsChanged =
- !equals<RankingConstants>(_rankingConstants.get(), rhs._rankingConstants.get());
- retval.indexschemaChanged =
- !equals<IndexschemaConfig>(_indexschema.get(), rhs._indexschema.get());
- retval.attributesChanged =
- !equals<AttributesConfig>(_attributes.get(), rhs._attributes.get());
- retval.summaryChanged =
- !equals<SummaryConfig>(_summary.get(), rhs._summary.get());
- retval.summarymapChanged =
- !equals<SummarymapConfig>(_summarymap.get(), rhs._summarymap.get());
- retval.juniperrcChanged =
- !equals<JuniperrcConfig>(_juniperrc.get(), rhs._juniperrc.get());
- retval.documenttypesChanged =
- !equals<DocumenttypesConfig>(_documenttypes.get(),
- rhs._documenttypes.get());
+ retval.rankProfilesChanged = !equals<RankProfilesConfig>(_rankProfiles.get(), rhs._rankProfiles.get());
+ retval.rankingConstantsChanged = !equals<RankingConstants>(_rankingConstants.get(), rhs._rankingConstants.get());
+ retval.indexschemaChanged = !equals<IndexschemaConfig>(_indexschema.get(), rhs._indexschema.get());
+ retval.attributesChanged = !equals<AttributesConfig>(_attributes.get(), rhs._attributes.get());
+ retval.summaryChanged = !equals<SummaryConfig>(_summary.get(), rhs._summary.get());
+ retval.summarymapChanged = !equals<SummarymapConfig>(_summarymap.get(), rhs._summarymap.get());
+ retval.juniperrcChanged = !equals<JuniperrcConfig>(_juniperrc.get(), rhs._juniperrc.get());
+ retval.documenttypesChanged = !equals<DocumenttypesConfig>(_documenttypes.get(), rhs._documenttypes.get());
retval.documentTypeRepoChanged = _repo.get() != rhs._repo.get();
- retval.importedFieldsChanged =
- !equals<ImportedFieldsConfig >(_importedFields.get(), rhs._importedFields.get());
- retval.tuneFileDocumentDBChanged =
- !equals<TuneFileDocumentDB>(_tuneFileDocumentDB.get(),
- rhs._tuneFileDocumentDB.get());
- retval.schemaChanged =
- !equals<Schema>(_schema.get(), rhs._schema.get());
- retval.maintenanceChanged =
- !equals<DocumentDBMaintenanceConfig>(_maintenance.get(),
- rhs._maintenance.get());
- retval.visibilityDelayChanged =
- (_maintenance->getVisibilityDelay() != rhs._maintenance->getVisibilityDelay());
+ retval.importedFieldsChanged = !equals<ImportedFieldsConfig >(_importedFields.get(), rhs._importedFields.get());
+ retval.tuneFileDocumentDBChanged = !equals<TuneFileDocumentDB>(_tuneFileDocumentDB.get(), rhs._tuneFileDocumentDB.get());
+ retval.schemaChanged = !equals<Schema>(_schema.get(), rhs._schema.get());
+ retval.maintenanceChanged = !equals<DocumentDBMaintenanceConfig>(_maintenance.get(), rhs._maintenance.get());
+ retval.storeChanged = (_storeConfig != rhs._storeConfig);
+ retval.visibilityDelayChanged = (_maintenance->getVisibilityDelay() != rhs._maintenance->getVisibilityDelay());
return retval;
}
@@ -181,19 +159,19 @@ DocumentDBConfig::compare(const DocumentDBConfig &rhs) const
bool
DocumentDBConfig::valid() const
{
- return (_rankProfiles.get() != NULL) &&
- (_rankingConstants.get() != NULL) &&
- (_indexschema.get() != NULL) &&
- (_attributes.get() != NULL) &&
- (_summary.get() != NULL) &&
- (_summarymap.get() != NULL) &&
- (_juniperrc.get() != NULL) &&
- (_documenttypes.get() != NULL) &&
- (_repo.get() != NULL) &&
- (_importedFields.get() != NULL) &&
- (_tuneFileDocumentDB.get() != NULL) &&
- (_schema.get() != NULL) &&
- (_maintenance.get() != NULL);
+ return _rankProfiles &&
+ _rankingConstants &&
+ _indexschema &&
+ _attributes &&
+ _summary &&
+ _summarymap &&
+ _juniperrc &&
+ _documenttypes &&
+ _repo &&
+ _importedFields &&
+ _tuneFileDocumentDB &&
+ _schema &&
+ _maintenance;
}
namespace
@@ -234,6 +212,7 @@ DocumentDBConfig::makeReplayConfig(const SP & orig)
o._tuneFileDocumentDB,
o._schema,
o._maintenance,
+ o._storeConfig,
o._configId,
o._docTypeName,
o._extraConfigs);
@@ -274,6 +253,7 @@ DocumentDBConfig::newFromAttributesConfig(const AttributesConfigSP &attributes)
_tuneFileDocumentDB,
_schema,
_maintenance,
+ _storeConfig,
_configId,
_docTypeName,
_extraConfigs);
@@ -309,6 +289,7 @@ DocumentDBConfig::makeDelayedAttributeAspectConfig(const SP &newCfg, const Docum
n._tuneFileDocumentDB,
n._schema,
n._maintenance,
+ n._storeConfig,
n._configId,
n._docTypeName,
n._extraConfigs);
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.h b/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.h
index 773450af620..4250ce61175 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.h
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdbconfig.h
@@ -9,22 +9,17 @@
#include <vespa/searchcore/proton/matching/ranking_constants.h>
#include <vespa/config/retriever/configkeyset.h>
#include <vespa/config/retriever/configsnapshot.h>
-
-namespace vespa {
- namespace config {
- namespace search {
- namespace internal {
- class InternalSummaryType;
- class InternalSummarymapType;
- class InternalRankProfilesType;
- class InternalAttributesType;
- class InternalIndexschemaType;
- class InternalImportedFieldsType;
- }
- namespace summary { namespace internal { class InternalJuniperrcType; } }
- }
- }
+#include <vespa/searchlib/docstore/logdocumentstore.h>
+
+namespace vespa::config::search::internal {
+ class InternalSummaryType;
+ class InternalSummarymapType;
+ class InternalRankProfilesType;
+ class InternalAttributesType;
+ class InternalIndexschemaType;
+ class InternalImportedFieldsType;
}
+namespace vespa::config::search::summary { namespace internal { class InternalJuniperrcType; } }
namespace document { namespace internal { class InternalDocumenttypesType; } }
@@ -49,6 +44,7 @@ public:
bool tuneFileDocumentDBChanged;
bool schemaChanged;
bool maintenanceChanged;
+ bool storeChanged;
bool visibilityDelayChanged;
ComparisonResult();
@@ -65,6 +61,8 @@ public:
ComparisonResult &setTuneFileDocumentDBChanged(bool val) { tuneFileDocumentDBChanged = val; return *this; }
ComparisonResult &setSchemaChanged(bool val) { schemaChanged = val; return *this; }
ComparisonResult &setMaintenanceChanged(bool val) { maintenanceChanged = val; return *this; }
+ ComparisonResult &setStoreChanged(bool val) { storeChanged = val; return *this; }
+
ComparisonResult &setVisibilityDelayChanged(bool val) { visibilityDelayChanged = val; return *this; }
};
@@ -89,25 +87,26 @@ public:
using ImportedFieldsConfigSP = std::shared_ptr<ImportedFieldsConfig>;
private:
- vespalib::string _configId;
- vespalib::string _docTypeName;
- int64_t _generation;
- RankProfilesConfigSP _rankProfiles;
- RankingConstants::SP _rankingConstants;
- IndexschemaConfigSP _indexschema;
- AttributesConfigSP _attributes;
- SummaryConfigSP _summary;
- SummarymapConfigSP _summarymap;
- JuniperrcConfigSP _juniperrc;
- DocumenttypesConfigSP _documenttypes;
- document::DocumentTypeRepo::SP _repo;
- ImportedFieldsConfigSP _importedFields;
- search::TuneFileDocumentDB::SP _tuneFileDocumentDB;
- search::index::Schema::SP _schema;
- MaintenanceConfigSP _maintenance;
- config::ConfigSnapshot _extraConfigs;
- SP _orig;
- bool _delayedAttributeAspects;
+ vespalib::string _configId;
+ vespalib::string _docTypeName;
+ int64_t _generation;
+ RankProfilesConfigSP _rankProfiles;
+ RankingConstants::SP _rankingConstants;
+ IndexschemaConfigSP _indexschema;
+ AttributesConfigSP _attributes;
+ SummaryConfigSP _summary;
+ SummarymapConfigSP _summarymap;
+ JuniperrcConfigSP _juniperrc;
+ DocumenttypesConfigSP _documenttypes;
+ document::DocumentTypeRepo::SP _repo;
+ ImportedFieldsConfigSP _importedFields;
+ search::TuneFileDocumentDB::SP _tuneFileDocumentDB;
+ search::index::Schema::SP _schema;
+ MaintenanceConfigSP _maintenance;
+ search::LogDocumentStore::Config _storeConfig;
+ config::ConfigSnapshot _extraConfigs;
+ SP _orig;
+ bool _delayedAttributeAspects;
template <typename T>
@@ -133,6 +132,7 @@ public:
const search::TuneFileDocumentDB::SP &tuneFileDocumentDB,
const search::index::Schema::SP &schema,
const DocumentDBMaintenanceConfig::SP &maintenance,
+ const search::LogDocumentStore::Config & storeConfig,
const vespalib::string &configId,
const vespalib::string &docTypeName,
const config::ConfigSnapshot &extraConfig = config::ConfigSnapshot());
@@ -206,6 +206,8 @@ public:
*/
SP newFromAttributesConfig(const AttributesConfigSP &attributes) const;
+ const search::LogDocumentStore::Config & getStoreConfig() const { return _storeConfig; }
+
/**
* Create config with delayed attribute aspect changes if they require
* reprocessing.
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp
index cae45d50d6d..6aea5234fbe 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.cpp
@@ -7,14 +7,13 @@
#include <vespa/config-summarymap.h>
#include <vespa/config/file_acquirer/file_acquirer.h>
#include <vespa/config/helper/legacy.h>
-#include <vespa/log/log.h>
#include <vespa/searchcommon/common/schemaconfigurer.h>
#include <vespa/searchlib/index/schemautil.h>
#include <vespa/searchsummary/config/config-juniperrc.h>
#include <vespa/searchcore/config/config-ranking-constants.h>
#include <vespa/vespalib/time/time_box.h>
-#include <thread>
+#include <vespa/log/log.h>
LOG_SETUP(".proton.server.documentdbconfigmanager");
using namespace config;
@@ -28,6 +27,11 @@ using search::TuneFileDocumentDB;
using search::index::Schema;
using search::index::SchemaBuilder;
using proton::matching::RankingConstants;
+using vespalib::compression::CompressionConfig;
+using search::LogDocumentStore;
+using search::LogDataStore;
+using search::DocumentStore;
+using search::WriteableFileChunk;
namespace proton {
@@ -87,7 +91,9 @@ DocumentDBConfigManager::buildSchema(const AttributesConfig &newAttributesConfig
return oldSchema;
}
-static DocumentDBMaintenanceConfig::SP
+namespace {
+
+DocumentDBMaintenanceConfig::SP
buildMaintenanceConfig(const BootstrapConfig::SP &bootstrapConfig,
const vespalib::string &docTypeName)
{
@@ -133,7 +139,45 @@ buildMaintenanceConfig(const BootstrapConfig::SP &bootstrapConfig,
proton.maintenancejobs.maxoutstandingmoveops));
}
-namespace {
+template<typename T>
+CompressionConfig
+deriveCompression(const T & config) {
+ CompressionConfig compression;
+ if (config.type == T::LZ4) {
+ compression.type = CompressionConfig::LZ4;
+ } else if (config.type == T::ZSTD) {
+ compression.type = CompressionConfig::ZSTD;
+ }
+ compression.compressionLevel = config.level;
+ return compression;
+}
+
+DocumentStore::Config
+getStoreConfig(const ProtonConfig::Summary::Cache & cache)
+{
+ return DocumentStore::Config(deriveCompression(cache.compression), cache.maxbytes, cache.initialentries).allowVisitCaching(cache.allowvisitcaching);
+}
+
+LogDocumentStore::Config
+deriveConfig(const ProtonConfig::Summary & summary, const ProtonConfig::Flush::Memory & flush) {
+ DocumentStore::Config config(getStoreConfig(summary.cache));
+ const ProtonConfig::Summary::Log & log(summary.log);
+ const ProtonConfig::Summary::Log::Chunk & chunk(log.chunk);
+ const
+
+ WriteableFileChunk::Config fileConfig(deriveCompression(chunk.compression), chunk.maxbytes);
+ LogDataStore::Config logConfig;
+ logConfig.setMaxFileSize(log.maxfilesize)
+ .setMaxDiskBloatFactor(std::min(flush.diskbloatfactor, flush.each.diskbloatfactor))
+ .setMaxBucketSpread(log.maxbucketspread).setMinFileSizeFactor(log.minfilesizefactor)
+ .compact2ActiveFile(log.compact2activefile).compactCompression(deriveCompression(log.compact.compression))
+ .setFileConfig(fileConfig).disableCrcOnRead(chunk.skipcrconread);
+ return LogDocumentStore::Config(config, logConfig);
+}
+
+search::LogDocumentStore::Config buildStoreConfig(const ProtonConfig & proton) {
+ return deriveConfig(proton.summary, proton.flush.memory);
+}
using AttributesConfigSP = DocumentDBConfig::AttributesConfigSP;
using AttributesConfigBuilder = vespa::config::search::AttributesConfigBuilder;
@@ -180,27 +224,22 @@ DocumentDBConfigManager::update(const ConfigSnapshot &snapshot)
MaintenanceConfigSP newMaintenanceConfig;
if (!_ignoreForwardedConfig) {
- if (_bootstrapConfig->getDocumenttypesConfigSP().get() == NULL ||
- _bootstrapConfig->getDocumentTypeRepoSP().get() == NULL ||
- _bootstrapConfig->getProtonConfigSP().get() == NULL ||
- _bootstrapConfig->getTuneFileDocumentDBSP().get() == NULL) {
+ if (!(_bootstrapConfig->getDocumenttypesConfigSP() &&
+ _bootstrapConfig->getDocumentTypeRepoSP() &&
+ _bootstrapConfig->getProtonConfigSP() &&
+ _bootstrapConfig->getTuneFileDocumentDBSP())) {
return;
}
}
int64_t generation = snapshot.getGeneration();
- LOG(debug,
- "Forwarded generation %"
- PRId64
- ", generation %"
- PRId64,
- _bootstrapConfig->getGeneration(), generation);
+ LOG(debug, "Forwarded generation %" PRId64 ", generation %" PRId64, _bootstrapConfig->getGeneration(), generation);
if (!_ignoreForwardedConfig && _bootstrapConfig->getGeneration() != generation) {
return;
}
int64_t currentGeneration = -1;
- if (current.get() != NULL) {
+ if (current) {
newRankProfilesConfig = current->getRankProfilesConfigSP();
newRankingConstants = current->getRankingConstantsSP();
newIndexschemaConfig = current->getIndexschemaConfigSP();
@@ -245,13 +284,11 @@ DocumentDBConfigManager::update(const ConfigSnapshot &snapshot)
newRankingConstants = std::make_shared<RankingConstants>(constants);
}
if (snapshot.isChanged<IndexschemaConfig>(_configId, currentGeneration)) {
- std::unique_ptr<IndexschemaConfig> indexschemaConfig =
- snapshot.getConfig<IndexschemaConfig>(_configId);
+ std::unique_ptr<IndexschemaConfig> indexschemaConfig = snapshot.getConfig<IndexschemaConfig>(_configId);
search::index::Schema schema;
search::index::SchemaBuilder::build(*indexschemaConfig, schema);
if (!search::index::SchemaUtil::validateSchema(schema)) {
- LOG(error,
- "Cannot use bad index schema, validation failed");
+ LOG(error, "Cannot use bad index schema, validation failed");
abort();
}
newIndexschemaConfig = IndexschemaConfigSP(indexschemaConfig.release());
@@ -272,14 +309,10 @@ DocumentDBConfigManager::update(const ConfigSnapshot &snapshot)
newImportedFieldsConfig = ImportedFieldsConfigSP(snapshot.getConfig<ImportedFieldsConfig>(_configId).release());
}
- Schema::SP schema(buildSchema(*newAttributesConfig,
- *newSummaryConfig,
- *newIndexschemaConfig));
- newMaintenanceConfig = buildMaintenanceConfig(_bootstrapConfig,
- _docTypeName);
- if (newMaintenanceConfig.get() != NULL &&
- oldMaintenanceConfig.get() != NULL &&
- *newMaintenanceConfig == *oldMaintenanceConfig) {
+ Schema::SP schema(buildSchema(*newAttributesConfig, *newSummaryConfig, *newIndexschemaConfig));
+ newMaintenanceConfig = buildMaintenanceConfig(_bootstrapConfig, _docTypeName);
+ search::LogDocumentStore::Config storeConfig = buildStoreConfig(_bootstrapConfig->getProtonConfig());
+ if (newMaintenanceConfig && oldMaintenanceConfig && *newMaintenanceConfig == *oldMaintenanceConfig) {
newMaintenanceConfig = oldMaintenanceConfig;
}
ConfigSnapshot extraConfigs(snapshot.subset(_extraConfigKeys));
@@ -298,6 +331,7 @@ DocumentDBConfigManager::update(const ConfigSnapshot &snapshot)
_bootstrapConfig->getTuneFileDocumentDBSP(),
schema,
newMaintenanceConfig,
+ storeConfig,
_configId,
_docTypeName,
extraConfigs));
@@ -310,8 +344,7 @@ DocumentDBConfigManager::update(const ConfigSnapshot &snapshot)
DocumentDBConfigManager::
-DocumentDBConfigManager(const vespalib::string &configId,
- const vespalib::string &docTypeName)
+DocumentDBConfigManager(const vespalib::string &configId, const vespalib::string &docTypeName)
: _configId(configId),
_docTypeName(docTypeName),
_bootstrapConfig(),
@@ -342,9 +375,11 @@ forwardConfig(const BootstrapConfig::SP & config)
}
}
+DocumentDBConfigHelper::DocumentDBConfigHelper(const config::DirSpec &spec, const vespalib::string &docTypeName)
+ : DocumentDBConfigHelper(spec, docTypeName, config::ConfigKeySet())
+{ }
-DocumentDBConfigHelper::DocumentDBConfigHelper(const config::DirSpec &spec,
- const vespalib::string &docTypeName,
+DocumentDBConfigHelper::DocumentDBConfigHelper(const config::DirSpec &spec, const vespalib::string &docTypeName,
const config::ConfigKeySet &extraConfigKeys)
: _mgr("", docTypeName),
_retriever()
@@ -359,8 +394,7 @@ DocumentDBConfigHelper::~DocumentDBConfigHelper() { }
bool
DocumentDBConfigHelper::nextGeneration(int timeoutInMillis)
{
- config::ConfigSnapshot
- snapshot(_retriever->getBootstrapConfigs(timeoutInMillis));
+ config::ConfigSnapshot snapshot(_retriever->getBootstrapConfigs(timeoutInMillis));
if (snapshot.empty())
return false;
_mgr.update(snapshot);
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.h b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.h
index 45259fe32a3..ebab75da1b0 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.h
+++ b/searchcore/src/vespa/searchcore/proton/server/documentdbconfigmanager.h
@@ -34,8 +34,7 @@ private:
const DocumentDBConfig::IndexschemaConfig & newIndexschemaConfig);
public:
- DocumentDBConfigManager(const vespalib::string &configId,
- const vespalib::string &docTypeName);
+ DocumentDBConfigManager(const vespalib::string &configId, const vespalib::string &docTypeName);
~DocumentDBConfigManager();
void update(const config::ConfigSnapshot & snapshot);
@@ -54,9 +53,9 @@ public:
class DocumentDBConfigHelper
{
public:
- DocumentDBConfigHelper(const config::DirSpec &spec,
- const vespalib::string &docTypeName,
- const config::ConfigKeySet &extraConfigKeys = config::ConfigKeySet());
+ DocumentDBConfigHelper(const config::DirSpec &spec, const vespalib::string &docTypeName);
+ DocumentDBConfigHelper(const config::DirSpec &spec, const vespalib::string &docTypeName,
+ const config::ConfigKeySet &extraConfigKeys);
~DocumentDBConfigHelper();
bool nextGeneration(int timeoutInMillis);
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp b/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp
index 9caeb49d370..c766cc89bb3 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp
@@ -4,6 +4,7 @@
#include "commit_and_wait_document_retriever.h"
#include "document_subdb_collection_initializer.h"
#include "documentsubdbcollection.h"
+#include "i_document_subdb_owner.h"
#include "maintenancecontroller.h"
#include "searchabledocsubdb.h"
@@ -12,7 +13,6 @@
using proton::matching::SessionManager;
using search::index::Schema;
using search::SerialNum;
-using vespa::config::search::core::ProtonConfig;
using searchcorespi::IFlushTarget;
namespace proton {
@@ -35,6 +35,7 @@ DocumentSubDBCollection::DocumentSubDBCollection(
const ProtonConfig &protonCfg,
const HwInfo &hwInfo)
: _subDBs(),
+ _owner(owner),
_calc(),
_readySubDbId(0),
_remSubDbId(1),
@@ -169,17 +170,11 @@ void DocumentSubDBCollection::maintenanceSync(MaintenanceController &mc,
initializer::InitializerTask::SP
DocumentSubDBCollection::createInitializer(const DocumentDBConfig &configSnapshot,
SerialNum configSerialNum,
- const ProtonConfig::Summary & protonSummaryCfg,
const ProtonConfig::Index & indexCfg)
{
- DocumentSubDbCollectionInitializer::SP task =
- std::make_shared<DocumentSubDbCollectionInitializer>();
+ DocumentSubDbCollectionInitializer::SP task = std::make_shared<DocumentSubDbCollectionInitializer>();
for (auto subDb : _subDBs) {
- DocumentSubDbInitializer::SP
- subTask(subDb->createInitializer(configSnapshot,
- configSerialNum,
- protonSummaryCfg,
- indexCfg));
+ DocumentSubDbInitializer::SP subTask(subDb->createInitializer(configSnapshot, configSerialNum, indexCfg));
task->add(subTask);
}
return task;
@@ -264,8 +259,7 @@ DocumentSubDBCollection::applyConfig(const DocumentDBConfig &newConfigSnapshot,
_reprocessingRunner.reset();
for (auto subDb : _subDBs) {
IReprocessingTask::List tasks;
- tasks = subDb->applyConfig(newConfigSnapshot, oldConfigSnapshot,
- serialNum, params, resolver);
+ tasks = subDb->applyConfig(newConfigSnapshot, oldConfigSnapshot, serialNum, params, resolver);
_reprocessingRunner.addTasks(tasks);
}
}
@@ -282,7 +276,7 @@ DocumentSubDBCollection::getFeedView()
IFeedView::SP newFeedView;
assert(views.size() >= 1);
if (views.size() > 1) {
- return IFeedView::SP(new CombiningFeedView(views, _calc));
+ return IFeedView::SP(new CombiningFeedView(views, _owner.getBucketSpace(), _calc));
} else {
assert(views.front() != NULL);
return views.front();
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.h b/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.h
index a3acd68d377..7290250c59e 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.h
+++ b/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.h
@@ -51,9 +51,10 @@ namespace initializer { class InitializerTask; }
class DocumentSubDBCollection {
public:
- typedef std::vector<IDocumentSubDB *> SubDBVector;
- typedef SubDBVector::const_iterator const_iterator;
- typedef search::SerialNum SerialNum;
+ using SubDBVector = std::vector<IDocumentSubDB *>;
+ using const_iterator = SubDBVector::const_iterator;
+ using SerialNum = search::SerialNum;
+ using ProtonConfig = vespa::config::search::core::ProtonConfig;
private:
using IFeedViewSP = std::shared_ptr<IFeedView>;
@@ -61,13 +62,14 @@ private:
using SessionManagerSP = std::shared_ptr<matching::SessionManager>;
using IFlushTargetList = std::vector<std::shared_ptr<searchcorespi::IFlushTarget>>;
SubDBVector _subDBs;
+ IDocumentSubDBOwner &_owner;
IBucketStateCalculatorSP _calc;
const uint32_t _readySubDbId;
const uint32_t _remSubDbId;
const uint32_t _notReadySubDbId;
- typedef std::shared_ptr<std::vector<std::shared_ptr<IDocumentRetriever>> > RetrieversSP;
+ using RetrieversSP = std::shared_ptr<std::vector<std::shared_ptr<IDocumentRetriever>> >;
vespalib::VarHolder<RetrieversSP> _retrievers;
- typedef std::vector<std::shared_ptr<IReprocessingTask>> ReprocessingTasks;
+ using ReprocessingTasks = std::vector<std::shared_ptr<IReprocessingTask>>;
ReprocessingRunner _reprocessingRunner;
std::shared_ptr<BucketDBOwner> _bucketDB;
std::unique_ptr<bucketdb::BucketDBHandler> _bucketDBHandler;
@@ -124,15 +126,10 @@ public:
}
std::shared_ptr<initializer::InitializerTask>
- createInitializer(const DocumentDBConfig &configSnapshot,
- SerialNum configSerialNum,
- const vespa::config::search::core::ProtonConfig::Summary &protonSummaryCfg,
+ createInitializer(const DocumentDBConfig &configSnapshot, SerialNum configSerialNum,
const vespa::config::search::core::ProtonConfig::Index & indexCfg);
- void
- initViews(const DocumentDBConfig &configSnapshot,
- const SessionManagerSP &sessionManager);
-
+ void initViews(const DocumentDBConfig &configSnapshot, const SessionManagerSP &sessionManager);
void clearViews();
void onReplayDone();
void onReprocessDone(SerialNum serialNum);
@@ -141,12 +138,8 @@ public:
void pruneRemovedFields(SerialNum serialNum);
- void
- applyConfig(const DocumentDBConfig &newConfigSnapshot,
- const DocumentDBConfig &oldConfigSnapshot,
- SerialNum serialNum,
- const ReconfigParams &params,
- IDocumentDBReferenceResolver &resolver);
+ void applyConfig(const DocumentDBConfig &newConfigSnapshot, const DocumentDBConfig &oldConfigSnapshot,
+ SerialNum serialNum, const ReconfigParams &params, IDocumentDBReferenceResolver &resolver);
IFeedViewSP getFeedView();
IFlushTargetList getFlushTargets();
@@ -156,6 +149,4 @@ public:
void tearDownReferences(IDocumentDBReferenceResolver &resolver);
};
-
} // namespace proton
-
diff --git a/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp b/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp
index 692bb12213e..2a3bc600a8f 100644
--- a/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.cpp
@@ -11,7 +11,6 @@
#include <vespa/searchcore/proton/attribute/attribute_factory.h>
#include <vespa/searchcore/proton/attribute/attribute_manager_initializer.h>
#include <vespa/searchcore/proton/attribute/attribute_populator.h>
-#include <vespa/searchcore/proton/attribute/attribute_writer.h>
#include <vespa/searchcore/proton/attribute/filter_attribute_manager.h>
#include <vespa/searchcore/proton/attribute/sequential_attributes_initializer.h>
#include <vespa/searchcore/proton/metrics/legacy_documentdb_metrics.h>
@@ -215,14 +214,11 @@ FastAccessDocSubDB::FastAccessDocSubDB(const Config &cfg, const Context &ctx)
FastAccessDocSubDB::~FastAccessDocSubDB() { }
DocumentSubDbInitializer::UP
-FastAccessDocSubDB::createInitializer(const DocumentDBConfig &configSnapshot,
- SerialNum configSerialNum,
- const vespa::config::search::core::ProtonConfig::Summary &protonSummaryCfg,
+FastAccessDocSubDB::createInitializer(const DocumentDBConfig &configSnapshot, SerialNum configSerialNum,
const vespa::config::search::core::ProtonConfig::Index &indexCfg) const
{
- auto result = Parent::createInitializer(configSnapshot, configSerialNum, protonSummaryCfg, indexCfg);
- auto attrMgrInitTask = createAttributeManagerInitializer(configSnapshot,
- configSerialNum,
+ auto result = Parent::createInitializer(configSnapshot, configSerialNum, indexCfg);
+ auto attrMgrInitTask = createAttributeManagerInitializer(configSnapshot, configSerialNum,
result->getDocumentMetaStoreInitTask(),
result->result().documentMetaStore()->documentMetaStore(),
result->writableResult().writableAttributeManager());
@@ -253,13 +249,12 @@ FastAccessDocSubDB::initViews(const DocumentDBConfig &configSnapshot,
}
IReprocessingTask::List
-FastAccessDocSubDB::applyConfig(const DocumentDBConfig &newConfigSnapshot,
- const DocumentDBConfig &oldConfigSnapshot,
- SerialNum serialNum,
- const ReconfigParams &params,
- IDocumentDBReferenceResolver &resolver)
+FastAccessDocSubDB::applyConfig(const DocumentDBConfig &newConfigSnapshot, const DocumentDBConfig &oldConfigSnapshot,
+ SerialNum serialNum, const ReconfigParams &params, IDocumentDBReferenceResolver &resolver)
{
(void) resolver;
+
+ reconfigure(newConfigSnapshot.getStoreConfig());
IReprocessingTask::List tasks;
updateLidReuseDelayer(&newConfigSnapshot);
/*
diff --git a/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.h b/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.h
index c89a63b95c0..32c006fd8b7 100644
--- a/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.h
+++ b/searchcore/src/vespa/searchcore/proton/server/fast_access_doc_subdb.h
@@ -93,31 +93,23 @@ protected:
virtual IFlushTargetList getFlushTargetsInternal() override;
void reconfigureAttributeMetrics(const IAttributeManager &newMgr, const IAttributeManager &oldMgr);
- IReprocessingTask::UP
- createReprocessingTask(IReprocessingInitializer &initializer,
- const document::DocumentTypeRepo::SP &docTypeRepo) const;
+ IReprocessingTask::UP createReprocessingTask(IReprocessingInitializer &initializer,
+ const document::DocumentTypeRepo::SP &docTypeRepo) const;
public:
FastAccessDocSubDB(const Config &cfg, const Context &ctx);
-
~FastAccessDocSubDB();
- virtual std::unique_ptr<DocumentSubDbInitializer>
- createInitializer(const DocumentDBConfig &configSnapshot,
- SerialNum configSerialNum,
- const ProtonConfig::Summary &protonSummaryCfg,
+ std::unique_ptr<DocumentSubDbInitializer>
+ createInitializer(const DocumentDBConfig &configSnapshot, SerialNum configSerialNum,
const ProtonConfig::Index &indexCfg) const override;
void setup(const DocumentSubDbInitializerResult &initResult) override;
+ void initViews(const DocumentDBConfig &configSnapshot, const SessionManagerSP &sessionManager) override;
- void initViews(const DocumentDBConfig &configSnapshot,
- const SessionManagerSP &sessionManager) override;
-
- IReprocessingTask::List applyConfig(const DocumentDBConfig &newConfigSnapshot,
- const DocumentDBConfig &oldConfigSnapshot,
- SerialNum serialNum,
- const ReconfigParams &params,
- IDocumentDBReferenceResolver &resolver) override;
+ IReprocessingTask::List
+ applyConfig(const DocumentDBConfig &newConfigSnapshot, const DocumentDBConfig &oldConfigSnapshot,
+ SerialNum serialNum, const ReconfigParams &params, IDocumentDBReferenceResolver &resolver) override;
proton::IAttributeManager::SP getAttributeManager() const override;
IDocumentRetriever::UP getDocumentRetriever() override;
diff --git a/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp b/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp
index cbfdb4895cc..bb5f058429a 100644
--- a/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/feedhandler.cpp
@@ -6,6 +6,7 @@
#include "i_feed_handler_owner.h"
#include "ifeedview.h"
#include "tlcproxy.h"
+#include "configstore.h"
#include <vespa/document/datatype/documenttype.h>
#include <vespa/documentapi/messagebus/documentprotocol.h>
#include <vespa/documentapi/messagebus/messages/documentreply.h>
@@ -17,7 +18,6 @@
#include <vespa/searchcore/proton/persistenceengine/transport_latch.h>
#include <vespa/searchcorespi/index/ithreadingservice.h>
#include <vespa/searchlib/common/idestructorcallback.h>
-#include <vespa/vespalib/util/closuretask.h>
#include <vespa/vespalib/util/exceptions.h>
#include <unistd.h>
@@ -40,49 +40,42 @@ using storage::spi::Timestamp;
using storage::spi::UpdateResult;
using vespalib::Executor;
using vespalib::IllegalStateException;
-using vespalib::makeClosure;
-using vespalib::makeTask;
+using vespalib::makeLambdaTask;
using vespalib::make_string;
using vespalib::MonitorGuard;
using vespalib::LockGuard;
namespace proton {
-
namespace {
+
void
setUpdateWasFound(mbus::Reply &reply, bool was_found)
{
- assert(static_cast<DocumentReply&>(reply).getType() ==
- DocumentProtocol::REPLY_UPDATEDOCUMENT);
+ assert(static_cast<DocumentReply&>(reply).getType() == DocumentProtocol::REPLY_UPDATEDOCUMENT);
UpdateDocumentReply &update_rep = static_cast<UpdateDocumentReply&>(reply);
update_rep.setWasFound(was_found);
}
-
void
setRemoveWasFound(mbus::Reply &reply, bool was_found)
{
- assert(static_cast<DocumentReply&>(reply).getType() ==
- DocumentProtocol::REPLY_REMOVEDOCUMENT);
+ assert(static_cast<DocumentReply&>(reply).getType() == DocumentProtocol::REPLY_REMOVEDOCUMENT);
RemoveDocumentReply &remove_rep = static_cast<RemoveDocumentReply&>(reply);
remove_rep.setWasFound(was_found);
}
-
bool
ignoreOperation(const DocumentOperation &op)
{
- return op.getPrevTimestamp() != 0 &&
- op.getTimestamp() < op.getPrevTimestamp();
+ return (op.getPrevTimestamp() != 0)
+ && (op.getTimestamp() < op.getPrevTimestamp());
}
-
} // namespace
-
void FeedHandler::TlsMgrWriter::storeOperation(const FeedOperation &op) {
- TlcProxy(*_tls_mgr.getSession(), _tlsDirectWriter).storeOperation(op);
+ TlcProxy(_tls_mgr.getDomainName(), *_tlsDirectWriter).storeOperation(op);
}
bool FeedHandler::TlsMgrWriter::erase(SerialNum oldest_to_keep) {
return _tls_mgr.getSession()->erase(oldest_to_keep);
@@ -92,7 +85,6 @@ search::SerialNum
FeedHandler::TlsMgrWriter::sync(SerialNum syncTo)
{
for (int retryCount = 0; retryCount < 10; ++retryCount) {
-
SerialNum syncedTo(0);
LOG(spam, "Trying tls sync(%" PRIu64 ")", syncTo);
bool res = _tls_mgr.getSession()->sync(syncTo, syncedTo);
@@ -102,19 +94,12 @@ FeedHandler::TlsMgrWriter::sync(SerialNum syncTo)
continue;
}
if (syncedTo >= syncTo) {
- LOG(spam,
- "Tls sync complete, reached %" PRIu64", returning",
- syncedTo);
+ LOG(spam, "Tls sync complete, reached %" PRIu64", returning", syncedTo);
return syncedTo;
}
- LOG(spam,
- "Tls sync incomplete, reached %" PRIu64 ", retrying",
- syncedTo);
+ LOG(spam, "Tls sync incomplete, reached %" PRIu64 ", retrying", syncedTo);
}
- throw vespalib::IllegalStateException(
- vespalib::make_string(
- "Failed to sync TLS to token %" PRIu64 ".",
- syncTo));
+ throw IllegalStateException(make_string("Failed to sync TLS to token %" PRIu64 ".", syncTo));
return 0;
}
@@ -122,7 +107,7 @@ void
FeedHandler::doHandleOperation(FeedToken token, FeedOperation::UP op)
{
assert(_writeService.master().isCurrentThread());
- vespalib::LockGuard guard(_feedLock);
+ LockGuard guard(_feedLock);
_feedState->handleOperation(token, std::move(op));
}
@@ -130,34 +115,17 @@ void FeedHandler::performPut(FeedToken::UP token, PutOperation &op) {
op.assertValid();
_activeFeedView->preparePut(op);
if (ignoreOperation(op)) {
- LOG(debug, "performPut(): ignoreOperation: docId(%s), "
- "timestamp(%" PRIu64 "), prevTimestamp(%" PRIu64 ")",
- op.getDocument()->getId().toString().c_str(),
- (uint64_t)op.getTimestamp(),
- (uint64_t)op.getPrevTimestamp());
- if (token.get() != NULL) {
+ LOG(debug, "performPut(): ignoreOperation: docId(%s), timestamp(%" PRIu64 "), prevTimestamp(%" PRIu64 ")",
+ op.getDocument()->getId().toString().c_str(), (uint64_t)op.getTimestamp(), (uint64_t)op.getPrevTimestamp());
+ if (token) {
token->setResult(ResultUP(new Result), false);
token->ack(op.getType(), _metrics);
}
return;
}
storeOperation(op);
- if (token.get() != NULL) {
+ if (token) {
token->setResult(ResultUP(new Result), false);
- if (token->shouldTrace(1)) {
- const document::DocumentId &docId = op.getDocument()->getId();
- const document::GlobalId &gid = docId.getGlobalId();
- token->trace(1,
- make_string(
- "Indexing document '%s' (gid = '%s',"
- " lid = '%u,%u' prevlid = '%u,%u').",
- docId.toString().c_str(),
- gid.toString().c_str(),
- op.getSubDbId(),
- op.getLid(),
- op.getPrevSubDbId(),
- op.getPrevLid()));
- }
}
_activeFeedView->handlePut(token.get(), op);
}
@@ -172,16 +140,8 @@ FeedHandler::performUpdate(FeedToken::UP token, UpdateOperation &op)
} else if (op.getUpdate()->getCreateIfNonExistent()) {
createNonExistingDocument(std::move(token), op);
} else {
- if (token.get() != NULL) {
+ if (token) {
token->setResult(ResultUP(new UpdateResult(Timestamp(0))), false);
- if (token->shouldTrace(1)) {
- const document::DocumentId &docId = op.getUpdate()->getId();
- token->trace(1,
- make_string(
- "Document '%s' not found."
- " Update operation ignored",
- docId.toString().c_str()));
- }
setUpdateWasFound(token->getReply(), false);
token->ack(op.getType(), _metrics);
}
@@ -193,23 +153,8 @@ void
FeedHandler::performInternalUpdate(FeedToken::UP token, UpdateOperation &op)
{
storeOperation(op);
- if (token.get() != NULL) {
- token->setResult(ResultUP(new UpdateResult(op.getPrevTimestamp())),
- true);
- if (token->shouldTrace(1)) {
- const document::DocumentId &docId = op.getUpdate()->getId();
- const document::GlobalId &gid = docId.getGlobalId();
- token->trace(1,
- make_string(
- "Updating document '%s' (gid = '%s',"
- " lid = '%u,%u' prevlid = '%u,%u').",
- docId.toString().c_str(),
- gid.toString().c_str(),
- op.getSubDbId(),
- op.getLid(),
- op.getPrevSubDbId(),
- op.getPrevLid()));
- }
+ if (token) {
+ token->setResult(ResultUP(new UpdateResult(op.getPrevTimestamp())), true);
setUpdateWasFound(token->getReply(), true);
}
_activeFeedView->handleUpdate(token.get(), op);
@@ -225,27 +170,15 @@ FeedHandler::createNonExistingDocument(FeedToken::UP token, const UpdateOperatio
PutOperation putOp(op.getBucketId(), op.getTimestamp(), doc);
_activeFeedView->preparePut(putOp);
storeOperation(putOp);
- if (token.get() != NULL) {
+ if (token) {
token->setResult(ResultUP(new UpdateResult(putOp.getTimestamp())), true);
- if (token->shouldTrace(1)) {
- const document::DocumentId &docId = putOp.getDocument()->getId();
- const document::GlobalId &gid = docId.getGlobalId();
- token->trace(1, make_string("Creating non-existing document '%s' for update (gid='%s',"
- " lid= %u,%u' prevlid='%u,%u').",
- docId.toString().c_str(),
- gid.toString().c_str(),
- putOp.getSubDbId(),
- putOp.getLid(),
- putOp.getPrevSubDbId(),
- putOp.getPrevLid()));
- }
setUpdateWasFound(token->getReply(), true);
}
TransportLatch latch(1);
FeedToken putToken(latch, mbus::Reply::UP(new FeedReply(DocumentProtocol::REPLY_PUTDOCUMENT)));
_activeFeedView->handlePut(&putToken, putOp);
latch.await();
- if (token.get() != NULL) {
+ if (token) {
token->ack();
}
}
@@ -254,12 +187,9 @@ FeedHandler::createNonExistingDocument(FeedToken::UP token, const UpdateOperatio
void FeedHandler::performRemove(FeedToken::UP token, RemoveOperation &op) {
_activeFeedView->prepareRemove(op);
if (ignoreOperation(op)) {
- LOG(debug, "performRemove(): ignoreOperation: docId(%s), "
- "timestamp(%" PRIu64 "), prevTimestamp(%" PRIu64 ")",
- op.getDocumentId().toString().c_str(),
- (uint64_t)op.getTimestamp(),
- (uint64_t)op.getPrevTimestamp());
- if (token.get() != NULL) {
+ LOG(debug, "performRemove(): ignoreOperation: docId(%s), timestamp(%" PRIu64 "), prevTimestamp(%" PRIu64 ")",
+ op.getDocumentId().toString().c_str(), (uint64_t)op.getTimestamp(), (uint64_t)op.getPrevTimestamp());
+ if (token) {
token->setResult(ResultUP(new RemoveResult(false)), false);
token->ack(op.getType(), _metrics);
}
@@ -269,52 +199,23 @@ void FeedHandler::performRemove(FeedToken::UP token, RemoveOperation &op) {
assert(op.getValidNewOrPrevDbdId());
assert(op.notMovingLidInSameSubDb());
storeOperation(op);
- if (token.get() != NULL) {
+ if (token) {
bool documentWasFound = !op.getPrevMarkedAsRemoved();
- token->setResult(ResultUP(new RemoveResult(documentWasFound)),
- documentWasFound);
- if (token->shouldTrace(1)) {
- const document::DocumentId &docId = op.getDocumentId();
- const document::GlobalId &gid = docId.getGlobalId();
- token->trace(1,
- make_string(
- "Removing document '%s' (gid = '%s',"
- " lid = '%u,%u' prevlid = '%u,%u').",
- docId.toString().c_str(),
- gid.toString().c_str(),
- op.getSubDbId(),
- op.getLid(),
- op.getPrevSubDbId(),
- op.getPrevLid()));
- }
+ token->setResult(ResultUP(new RemoveResult(documentWasFound)), documentWasFound);
setRemoveWasFound(token->getReply(), documentWasFound);
}
_activeFeedView->handleRemove(token.get(), op);
} else if (op.hasDocType()) {
assert(op.getDocType() == _docTypeName.getName());
storeOperation(op);
- if (token.get() != NULL) {
+ if (token) {
token->setResult(ResultUP(new RemoveResult(false)), false);
- if (token->shouldTrace(1)) {
- token->trace(1,
- make_string(
- "Document '%s' not found."
- " Remove operation stored.",
- op.getDocumentId().toString().c_str()));
- }
setRemoveWasFound(token->getReply(), false);
}
_activeFeedView->handleRemove(token.get(), op);
} else {
- if (token.get() != NULL) {
+ if (token) {
token->setResult(ResultUP(new RemoveResult(false)), false);
- if (token->shouldTrace(1)) {
- token->trace(1,
- make_string(
- "Document '%s' not found."
- " Remove operation ignored",
- op.getDocumentId().toString().c_str()));
- }
setRemoveWasFound(token->getReply(), false);
token->ack(op.getType(), _metrics);
}
@@ -324,15 +225,14 @@ void FeedHandler::performRemove(FeedToken::UP token, RemoveOperation &op) {
void
FeedHandler::performGarbageCollect(FeedToken::UP token)
{
- if (token.get() != NULL) {
+ if (token) {
token->ack();
}
}
void
-FeedHandler::performCreateBucket(FeedToken::UP token,
- CreateBucketOperation &op)
+FeedHandler::performCreateBucket(FeedToken::UP token, CreateBucketOperation &op)
{
storeOperation(op);
_bucketDBHandler->handleCreateBucket(op.getBucketId());
@@ -342,8 +242,7 @@ FeedHandler::performCreateBucket(FeedToken::UP token,
}
-void FeedHandler::performDeleteBucket(FeedToken::UP token,
- DeleteBucketOperation &op) {
+void FeedHandler::performDeleteBucket(FeedToken::UP token, DeleteBucketOperation &op) {
_activeFeedView->prepareDeleteBucket(op);
storeOperation(op);
// Delete documents in bucket
@@ -358,10 +257,7 @@ void FeedHandler::performDeleteBucket(FeedToken::UP token,
void FeedHandler::performSplit(FeedToken::UP token, SplitBucketOperation &op) {
storeOperation(op);
- _bucketDBHandler->handleSplit(op.getSerialNum(),
- op.getSource(),
- op.getTarget1(),
- op.getTarget2());
+ _bucketDBHandler->handleSplit(op.getSerialNum(), op.getSource(), op.getTarget1(), op.getTarget2());
if (token) {
token->ack();
}
@@ -370,10 +266,7 @@ void FeedHandler::performSplit(FeedToken::UP token, SplitBucketOperation &op) {
void FeedHandler::performJoin(FeedToken::UP token, JoinBucketsOperation &op) {
storeOperation(op);
- _bucketDBHandler->handleJoin(op.getSerialNum(),
- op.getSource1(),
- op.getSource2(),
- op.getTarget());
+ _bucketDBHandler->handleJoin(op.getSerialNum(), op.getSource1(), op.getSource2(), op.getTarget());
if (token) {
token->ack();
}
@@ -392,9 +285,7 @@ FeedHandler::performEof()
{
assert(_writeService.master().isCurrentThread());
_writeService.sync();
- LOG(debug,
- "Visiting done for transaction log domain '%s', eof received",
- _tlsMgr.getDomainName().c_str());
+ LOG(debug, "Visiting done for transaction log domain '%s', eof received", _tlsMgr.getDomainName().c_str());
_owner.onTransactionLogReplayDone();
_tlsMgr.replayDone();
changeToNormalFeedState();
@@ -429,7 +320,7 @@ FeedHandler::performPrune(SerialNum flushedSerial)
tlsPrune(flushedSerial); // throws on error
LOG(debug, "Pruned TLS to token %" PRIu64 ".", flushedSerial);
_owner.onPerformPrune(flushedSerial);
- } catch (const vespalib::IllegalStateException & e) {
+ } catch (const IllegalStateException & e) {
LOG(warning, "FeedHandler::performPrune failed due to '%s'.", e.what());
}
}
@@ -450,7 +341,7 @@ FeedHandler::getFeedState() const
{
FeedState::SP state;
{
- vespalib::LockGuard guard(_feedLock);
+ LockGuard guard(_feedLock);
state = _feedState;
}
return state;
@@ -460,18 +351,15 @@ FeedHandler::getFeedState() const
void
FeedHandler::changeFeedState(FeedState::SP newState)
{
- vespalib::LockGuard guard(_feedLock);
+ LockGuard guard(_feedLock);
changeFeedState(newState, guard);
}
void
-FeedHandler::changeFeedState(FeedState::SP newState,
- const vespalib::LockGuard &)
+FeedHandler::changeFeedState(FeedState::SP newState, const LockGuard &)
{
- LOG(debug,
- "Change feed state from '%s' -> '%s'",
- _feedState->getName().c_str(), newState->getName().c_str());
+ LOG(debug, "Change feed state from '%s' -> '%s'", _feedState->getName().c_str(), newState->getName().c_str());
_feedState = newState;
}
@@ -484,8 +372,8 @@ FeedHandler::FeedHandler(IThreadingService &writeService,
IFeedHandlerOwner &owner,
const IResourceWriteFilter &writeFilter,
IReplayConfig &replayConfig,
- search::transactionlog::Writer *tlsDirectWriter,
- TlsWriter *tls_writer)
+ search::transactionlog::Writer & tlsDirectWriter,
+ TlsWriter * tlsWriter)
: search::transactionlog::TransLogClient::Session::Callback(),
IDocumentMoveHandler(),
IPruneRemovedDocumentsHandler(),
@@ -499,8 +387,8 @@ FeedHandler::FeedHandler(IThreadingService &writeService,
_writeFilter(writeFilter),
_replayConfig(replayConfig),
_tlsMgr(tlsSpec, docTypeName.getName()),
- _tlsMgrWriter(_tlsMgr, tlsDirectWriter),
- _tlsWriter(tls_writer ? *tls_writer : _tlsMgrWriter),
+ _tlsMgrWriter(_tlsMgr, &tlsDirectWriter),
+ _tlsWriter(tlsWriter ? *tlsWriter : _tlsMgrWriter),
_tlsReplayProgress(),
_serialNum(0),
_prunedSerialNum(0),
@@ -513,14 +401,10 @@ FeedHandler::FeedHandler(IThreadingService &writeService,
_syncLock(),
_syncedSerialNum(0),
_allowSync(false)
-{
-}
-
+{ }
-FeedHandler::~FeedHandler()
-{
-}
+FeedHandler::~FeedHandler() = default;
// Called on DocumentDB creatio
void
@@ -542,70 +426,40 @@ FeedHandler::close()
_tlsMgr.close();
}
-
void
-FeedHandler::replayTransactionLog(SerialNum flushedIndexMgrSerial,
- SerialNum flushedSummaryMgrSerial,
- SerialNum oldestFlushedSerial,
- SerialNum newestFlushedSerial,
+FeedHandler::replayTransactionLog(SerialNum flushedIndexMgrSerial, SerialNum flushedSummaryMgrSerial,
+ SerialNum oldestFlushedSerial, SerialNum newestFlushedSerial,
ConfigStore &config_store)
{
(void) newestFlushedSerial;
assert(_activeFeedView);
assert(_bucketDBHandler);
FeedState::SP state = std::make_shared<ReplayTransactionLogState>
- (getDocTypeName(),
- _activeFeedView,
- *_bucketDBHandler,
- _replayConfig,
- config_store);
+ (getDocTypeName(), _activeFeedView, *_bucketDBHandler, _replayConfig, config_store);
changeFeedState(state);
// Resurrected attribute vector might cause oldestFlushedSerial to
// be lower than _prunedSerialNum, so don't warn for now.
(void) oldestFlushedSerial;
assert(_serialNum >= newestFlushedSerial);
- TransactionLogManager::prepareReplay(
- _tlsMgr.getClient(),
- _docTypeName.getName(),
- flushedIndexMgrSerial,
- flushedSummaryMgrSerial,
- config_store);
+ TransactionLogManager::prepareReplay(_tlsMgr.getClient(), _docTypeName.getName(),
+ flushedIndexMgrSerial, flushedSummaryMgrSerial, config_store);
_tlsReplayProgress = _tlsMgr.startReplay(_prunedSerialNum, _serialNum, *this);
}
-
void
FeedHandler::flushDone(SerialNum flushedSerial)
{
// Called by flush worker thread after performing a flush task
- _writeService.master().execute(
- makeTask(
- makeClosure(
- this,
- &FeedHandler::performFlushDone,
- flushedSerial)));
+ _writeService.master().execute(makeLambdaTask([this, flushedSerial]() { performFlushDone(flushedSerial); }));
+
}
void FeedHandler::changeToNormalFeedState() {
changeFeedState(FeedState::SP(new NormalState(*this)));
}
-void
-FeedHandler::waitForReplayDone()
-{
- _tlsMgr.waitForReplayDone();
-}
-
-void FeedHandler::setReplayDone() {
- _tlsMgr.changeReplayDone();
-}
-
-bool FeedHandler::getReplayDone() const {
- return _tlsMgr.getReplayDone();
-}
-
bool
FeedHandler::isDoingReplay() const {
return _tlsMgr.isDoingReplay();
@@ -624,9 +478,7 @@ void FeedHandler::storeOperation(FeedOperation &op) {
void FeedHandler::tlsPrune(SerialNum oldest_to_keep) {
if (!_tlsWriter.erase(oldest_to_keep)) {
- throw vespalib::IllegalStateException(vespalib::make_string(
- "Failed to prune TLS to token %" PRIu64 ".",
- oldest_to_keep));
+ throw IllegalStateException(make_string("Failed to prune TLS to token %" PRIu64 ".", oldest_to_keep));
}
_prunedSerialNum = oldest_to_keep;
}
@@ -644,8 +496,8 @@ void feedOperationRejected(FeedToken *token, const vespalib::string &opType, con
DocTypeName docTypeName, const vespalib::string &rejectMessage)
{
if (token) {
- vespalib::string message = make_string("%s operation rejected for document '%s' of type '%s': '%s'",
- opType.c_str(), docId.c_str(), docTypeName.toString().c_str(), rejectMessage.c_str());
+ auto message = make_string("%s operation rejected for document '%s' of type '%s': '%s'",
+ opType.c_str(), docId.c_str(), docTypeName.toString().c_str(), rejectMessage.c_str());
token->setResult(ResultUP(new ResultType(Result::RESOURCE_EXHAUSTED, message)), false);
token->fail(documentapi::DocumentProtocol::ERROR_REJECTED, message);
}
@@ -721,9 +573,9 @@ FeedHandler::performOperation(FeedToken::UP token, FeedOperation::UP op)
void
FeedHandler::handleOperation(FeedToken token, FeedOperation::UP op)
{
- _writeService.master().execute(
- makeTask(makeClosure(this,
- &FeedHandler::doHandleOperation, token, std::move(op))));
+ _writeService.master().execute(makeLambdaTask([this, token = std::move(token), op = std::move(op)]() mutable {
+ doHandleOperation(std::move(token), std::move(op));
+ }));
}
void
@@ -738,7 +590,6 @@ FeedHandler::handleMove(MoveOperation &op, std::shared_ptr<search::IDestructorCa
_activeFeedView->handleMove(op, std::move(moveDoneCtx));
}
-
void
FeedHandler::heartBeat()
{
@@ -746,15 +597,13 @@ FeedHandler::heartBeat()
_activeFeedView->heartBeat(_serialNum);
}
-
void
FeedHandler::sync()
{
- _writeService.master().execute(makeTask(makeClosure(this, &FeedHandler::performSync)));
+ _writeService.master().execute(makeLambdaTask([this]() { performSync(); }));
_writeService.sync();
}
-
FeedHandler::RPC::Result
FeedHandler::receive(const Packet &packet)
{
@@ -768,34 +617,24 @@ FeedHandler::receive(const Packet &packet)
return wrap->result;
}
-
void
FeedHandler::eof()
{
// Only called by visit, subscription gets one or more inSync() callbacks.
- _writeService.master().execute(makeTask(makeClosure(this, &FeedHandler::performEof)));
-}
-
-
-void
-FeedHandler::inSync()
-{
- // Called by visit callback thread, when in sync
+ _writeService.master().execute(makeLambdaTask([this]() { performEof(); }));
}
-
void
FeedHandler::
performPruneRemovedDocuments(PruneRemovedDocumentsOperation &pruneOp)
{
const LidVectorContext::SP lids_to_remove = pruneOp.getLidsToRemove();
- if (lids_to_remove.get() && lids_to_remove->getNumLids() != 0) {
+ if (lids_to_remove && lids_to_remove->getNumLids() != 0) {
storeOperation(pruneOp);
_activeFeedView->handlePruneRemovedDocuments(pruneOp);
}
}
-
void
FeedHandler::syncTls(SerialNum syncTo)
{
@@ -805,11 +644,7 @@ FeedHandler::syncTls(SerialNum syncTo)
return;
}
if (!_allowSync) {
- throw vespalib::IllegalStateException(
- vespalib::make_string(
- "Attempted to sync TLS to token %" PRIu64
- " at wrong time.",
- syncTo));
+ throw IllegalStateException(make_string("Attempted to sync TLS to token %" PRIu64 " at wrong time.", syncTo));
}
SerialNum syncedTo(_tlsWriter.sync(syncTo));
{
@@ -819,16 +654,4 @@ FeedHandler::syncTls(SerialNum syncTo)
}
}
-void
-FeedHandler::storeRemoteOperation(const FeedOperation &op)
-{
- SerialNum serialNum(op.getSerialNum());
- assert(serialNum != 0);
- if (serialNum > _serialNum) {
- _tlsWriter.storeOperation(op);
- _serialNum = serialNum;
- }
-}
-
-
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/feedhandler.h b/searchcore/src/vespa/searchcore/proton/server/feedhandler.h
index 71f1a47050f..335a86e0279 100644
--- a/searchcore/src/vespa/searchcore/proton/server/feedhandler.h
+++ b/searchcore/src/vespa/searchcore/proton/server/feedhandler.h
@@ -35,12 +35,7 @@ class RemoveOperation;
class SplitBucketOperation;
class UpdateOperation;
-namespace bucketdb
-{
-
-class IBucketDBHandler;
-
-}
+namespace bucketdb { class IBucketDBHandler; }
/**
* Class handling all aspects of feeding for a document database.
@@ -72,11 +67,9 @@ private:
_tls_mgr(tls_mgr),
_tlsDirectWriter(tlsDirectWriter)
{ }
- virtual void storeOperation(const FeedOperation &op) override;
- virtual bool erase(SerialNum oldest_to_keep) override;
-
- virtual SerialNum
- sync(SerialNum syncTo) override;
+ void storeOperation(const FeedOperation &op) override;
+ bool erase(SerialNum oldest_to_keep) override;
+ SerialNum sync(SerialNum syncTo) override;
};
typedef searchcorespi::index::IThreadingService IThreadingService;
@@ -124,21 +117,12 @@ private:
void createNonExistingDocument(FeedTokenUP, const UpdateOperation &op);
void performRemove(FeedTokenUP token, RemoveOperation &op);
-private:
void performGarbageCollect(FeedTokenUP token);
-
- void
- performCreateBucket(FeedTokenUP token, CreateBucketOperation &op);
-
+ void performCreateBucket(FeedTokenUP token, CreateBucketOperation &op);
void performDeleteBucket(FeedTokenUP token, DeleteBucketOperation &op);
void performSplit(FeedTokenUP token, SplitBucketOperation &op);
void performJoin(FeedTokenUP token, JoinBucketsOperation &op);
void performSync();
-
- /**
- * Used during callback from transaction log.
- */
- void handleTransactionLogEntry(const Packet::Entry &entry);
void performEof();
/**
@@ -147,22 +131,9 @@ private:
void performFlushDone(SerialNum flushedSerial);
void performPrune(SerialNum flushedSerial);
-public:
- void considerDelayedPrune();
-
-private:
- /**
- * Returns the current feed state of this feed handler.
- */
FeedStateSP getFeedState() const;
-
- /**
- * Used to handle feed state transitions.
- */
void changeFeedState(FeedStateSP newState);
-
void changeFeedState(FeedStateSP newState, const vespalib::LockGuard &feedGuard);
-
public:
FeedHandler(const FeedHandler &) = delete;
FeedHandler & operator = (const FeedHandler &) = delete;
@@ -186,25 +157,22 @@ public:
IFeedHandlerOwner &owner,
const IResourceWriteFilter &writerFilter,
IReplayConfig &replayConfig,
- search::transactionlog::Writer *writer,
- TlsWriter *tlsWriter = NULL);
+ search::transactionlog::Writer & writer,
+ TlsWriter * tlsWriter = nullptr);
- virtual
- ~FeedHandler();
+ virtual ~FeedHandler();
/**
* Init this feed handler.
*
* @param oldestConfigSerial The serial number of the oldest config snapshot.
*/
- void
- init(SerialNum oldestConfigSerial);
+ void init(SerialNum oldestConfigSerial);
/**
* Close this feed handler and its components.
*/
- void
- close();
+ void close();
/**
* Start replay of the transaction log.
@@ -228,8 +196,7 @@ public:
*
* @param flushedSerial serial number flushed for all relevant flush targets.
*/
- void
- flushDone(SerialNum flushedSerial);
+ void flushDone(SerialNum flushedSerial);
/**
* Used to flip between normal and recovery feed states.
@@ -240,33 +207,22 @@ public:
* Update the active feed view.
* Always called by the master write thread so locking is not needed.
*/
- void
- setActiveFeedView(IFeedView *feedView)
- {
+ void setActiveFeedView(IFeedView *feedView) {
_activeFeedView = feedView;
}
- void
- setBucketDBHandler(bucketdb::IBucketDBHandler *bucketDBHandler)
- {
+ void setBucketDBHandler(bucketdb::IBucketDBHandler *bucketDBHandler) {
_bucketDBHandler = bucketDBHandler;
}
- /**
- * Wait until transaction log is replayed.
- */
- void waitForReplayDone();
-
void setSerialNum(SerialNum serialNum) { _serialNum = serialNum; }
SerialNum incSerialNum() { return ++_serialNum; }
SerialNum getSerialNum() const override { return _serialNum; }
SerialNum getPrunedSerialNum() const { return _prunedSerialNum; }
- void setReplayDone();
- bool getReplayDone() const;
bool isDoingReplay() const;
float getReplayProgress() const {
- return _tlsReplayProgress.get() != nullptr ? _tlsReplayProgress->getProgress() : 0;
+ return _tlsReplayProgress ? _tlsReplayProgress->getProgress() : 0;
}
bool getTransactionLogReplayDone() const;
vespalib::string getDocTypeName() const { return _docTypeName.getName(); }
@@ -275,46 +231,17 @@ public:
void performOperation(FeedTokenUP token, FeedOperationUP op);
void handleOperation(FeedToken token, FeedOperationUP op);
- /**
- * Implements IDocumentMoveHandler
- */
- virtual void handleMove(MoveOperation &op, std::shared_ptr<search::IDestructorCallback> moveDoneCtx) override;
-
- /**
- * Implements IHeartBeatHandler
- */
- virtual void
- heartBeat() override;
-
- virtual void
- sync();
-
- /**
- * Implements TransLogClient::Session::Callback.
- */
- virtual RPC::Result
- receive(const Packet &packet) override;
-
- virtual void
- eof() override;
-
- virtual void
- inSync() override;
-
- /**
- * Implements IPruneRemovedDocumentsHandler
- */
- void
- performPruneRemovedDocuments(PruneRemovedDocumentsOperation &pruneOp) override;
-
- void
- syncTls(SerialNum syncTo);
+ void handleMove(MoveOperation &op, std::shared_ptr<search::IDestructorCallback> moveDoneCtx) override;
+ void heartBeat() override;
- void
- storeRemoteOperation(const FeedOperation &op);
+ virtual void sync();
+ RPC::Result receive(const Packet &packet) override;
- // Implements IOperationStorer
- virtual void storeOperation(FeedOperation &op) override;
+ void eof() override;
+ void performPruneRemovedDocuments(PruneRemovedDocumentsOperation &pruneOp) override;
+ void syncTls(SerialNum syncTo);
+ void storeOperation(FeedOperation &op) override;
+ void considerDelayedPrune();
};
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/i_document_subdb_owner.h b/searchcore/src/vespa/searchcore/proton/server/i_document_subdb_owner.h
index 3fa205bfbfe..fa19e5c77c4 100644
--- a/searchcore/src/vespa/searchcore/proton/server/i_document_subdb_owner.h
+++ b/searchcore/src/vespa/searchcore/proton/server/i_document_subdb_owner.h
@@ -2,6 +2,7 @@
#pragma once
#include <vespa/vespalib/stllike/string.h>
+#include <vespa/document/bucket/bucketspace.h>
#include <memory>
namespace proton {
@@ -15,6 +16,7 @@ class IDocumentSubDBOwner
public:
virtual ~IDocumentSubDBOwner() {}
virtual void syncFeedView() = 0;
+ virtual document::BucketSpace getBucketSpace() const = 0;
virtual vespalib::string getName() const = 0;
virtual uint32_t getDistributionKey() const = 0;
};
diff --git a/searchcore/src/vespa/searchcore/proton/server/i_proton_configurer_owner.h b/searchcore/src/vespa/searchcore/proton/server/i_proton_configurer_owner.h
index e5c018958fe..242e99fffbf 100644
--- a/searchcore/src/vespa/searchcore/proton/server/i_proton_configurer_owner.h
+++ b/searchcore/src/vespa/searchcore/proton/server/i_proton_configurer_owner.h
@@ -2,6 +2,7 @@
#pragma once
+#include <vespa/document/bucket/bucketspace.h>
#include <vespa/vespalib/stllike/string.h>
#include <memory>
@@ -21,6 +22,7 @@ class IProtonConfigurerOwner
public:
virtual ~IProtonConfigurerOwner() { }
virtual IDocumentDBConfigOwner *addDocumentDB(const DocTypeName &docTypeName,
+ document::BucketSpace bucketSpace,
const vespalib::string &configId,
const std::shared_ptr<BootstrapConfig> &bootstrapConfig,
const std::shared_ptr<DocumentDBConfig> &documentDBConfig,
diff --git a/searchcore/src/vespa/searchcore/proton/server/ibucketstatecalculator.h b/searchcore/src/vespa/searchcore/proton/server/ibucketstatecalculator.h
index 70e352b4d41..10c0b194aac 100644
--- a/searchcore/src/vespa/searchcore/proton/server/ibucketstatecalculator.h
+++ b/searchcore/src/vespa/searchcore/proton/server/ibucketstatecalculator.h
@@ -4,14 +4,14 @@
#include <memory>
-namespace document { class BucketId; }
+namespace document { class Bucket; }
namespace proton {
struct IBucketStateCalculator
{
typedef std::shared_ptr<IBucketStateCalculator> SP;
- virtual bool shouldBeReady(const document::BucketId &bucket) const = 0;
+ virtual bool shouldBeReady(const document::Bucket &bucket) const = 0;
virtual bool clusterUp() const = 0;
virtual bool nodeUp() const = 0;
virtual bool nodeInitializing() const = 0;
diff --git a/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h b/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h
index 586c34d8dcc..2251a24c58a 100644
--- a/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h
+++ b/searchcore/src/vespa/searchcore/proton/server/idocumentsubdb.h
@@ -7,10 +7,7 @@
#include <vespa/searchlib/common/serialnum.h>
#include <vespa/searchlib/util/searchable_stats.h>
-
-namespace search::index {
- class Schema;
-}
+namespace search::index { class Schema; }
namespace document { class DocumentId; }
@@ -66,18 +63,15 @@ public:
virtual std::unique_ptr<DocumentSubDbInitializer>
createInitializer(const DocumentDBConfig &configSnapshot, SerialNum configSerialNum,
- const ProtonConfig::Summary &protonSummaryCfg, const ProtonConfig::Index &indexCfg) const = 0;
+ const ProtonConfig::Index &indexCfg) const = 0;
// Called by master thread
virtual void setup(const DocumentSubDbInitializerResult &initResult) = 0;
virtual void initViews(const DocumentDBConfig &configSnapshot, const std::shared_ptr<matching::SessionManager> &sessionManager) = 0;
virtual IReprocessingTask::List
- applyConfig(const DocumentDBConfig &newConfigSnapshot,
- const DocumentDBConfig &oldConfigSnapshot,
- SerialNum serialNum,
- const ReconfigParams &params,
- IDocumentDBReferenceResolver &resolver) = 0;
+ applyConfig(const DocumentDBConfig &newConfigSnapshot, const DocumentDBConfig &oldConfigSnapshot,
+ SerialNum serialNum, const ReconfigParams &params, IDocumentDBReferenceResolver &resolver) = 0;
virtual std::shared_ptr<ISearchHandler> getSearchView() const = 0;
virtual std::shared_ptr<IFeedView> getFeedView() const = 0;
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.cpp b/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.cpp
index d19e702c574..c68d794a5e0 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.cpp
@@ -53,6 +53,7 @@ injectBucketMoveJob(MaintenanceController &controller,
IFrozenBucketHandler &fbHandler,
bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
const vespalib::string &docTypeName,
+ document::BucketSpace bucketSpace,
IDocumentMoveHandler &moveHandler,
IBucketModifiedHandler &bucketModifiedHandler,
IClusterStateChangedNotifier &clusterStateChangedNotifier,
@@ -74,7 +75,7 @@ injectBucketMoveJob(MaintenanceController &controller,
bucketStateChangedNotifier,
diskMemUsageNotifier,
blockableConfig,
- docTypeName));
+ docTypeName, bucketSpace));
controller.registerJobInMasterThread(std::move(trackJob(jobTrackers.getBucketMove(),
std::move(bmj))));
}
@@ -91,6 +92,7 @@ MaintenanceJobsInjector::injectJobs(MaintenanceController &controller,
IFrozenBucketHandler &fbHandler,
bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
const vespalib::string &docTypeName,
+ document::BucketSpace bucketSpace,
IPruneRemovedDocumentsHandler &prdHandler,
IDocumentMoveHandler &moveHandler,
IBucketModifiedHandler &bucketModifiedHandler,
@@ -119,7 +121,7 @@ MaintenanceJobsInjector::injectJobs(MaintenanceController &controller,
fbHandler, jobTrackers.getLidSpaceCompact(),
diskMemUsageNotifier, clusterStateChangedNotifier, calc);
}
- injectBucketMoveJob(controller, fbHandler, bucketCreateNotifier, docTypeName, moveHandler, bucketModifiedHandler,
+ injectBucketMoveJob(controller, fbHandler, bucketCreateNotifier, docTypeName, bucketSpace, moveHandler, bucketModifiedHandler,
clusterStateChangedNotifier, bucketStateChangedNotifier, calc, jobTrackers,
diskMemUsageNotifier, config.getBlockableJobConfig());
controller.registerJobInMasterThread(std::make_unique<SampleAttributeUsageJob>
diff --git a/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.h b/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.h
index 85fed392ab6..55c218cca6d 100644
--- a/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.h
+++ b/searchcore/src/vespa/searchcore/proton/server/maintenance_jobs_injector.h
@@ -39,6 +39,7 @@ struct MaintenanceJobsInjector
IFrozenBucketHandler &fbHandler,
bucketdb::IBucketCreateNotifier &bucketCreateNotifier,
const vespalib::string &docTypeName,
+ document::BucketSpace bucketSpace,
IPruneRemovedDocumentsHandler &prdHandler,
IDocumentMoveHandler &moveHandler,
IBucketModifiedHandler &bucketModifiedHandler,
diff --git a/searchcore/src/vespa/searchcore/proton/server/memoryflush.cpp b/searchcore/src/vespa/searchcore/proton/server/memoryflush.cpp
index 2d56c7e37e9..e80f2645fcf 100644
--- a/searchcore/src/vespa/searchcore/proton/server/memoryflush.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/memoryflush.cpp
@@ -32,8 +32,7 @@ getName(const IFlushHandler & handler, const IFlushTarget & target)
static constexpr uint64_t gibi = UINT64_C(1024) * UINT64_C(1024) * UINT64_C(1024);
uint64_t
-estimateNeededTlsSizeForFlushTarget(const TlsStats &tlsStats,
- SerialNum flushedSerialNum)
+estimateNeededTlsSizeForFlushTarget(const TlsStats &tlsStats, SerialNum flushedSerialNum)
{
if (flushedSerialNum < tlsStats.getFirstSerial()) {
return tlsStats.getNumBytes();
@@ -45,8 +44,7 @@ estimateNeededTlsSizeForFlushTarget(const TlsStats &tlsStats,
if (flushedSerialNum >= tlsStats.getLastSerial()) {
return 0u;
}
- double bytesPerEntry = static_cast<double>(tlsStats.getNumBytes()) /
- numEntries;
+ double bytesPerEntry = static_cast<double>(tlsStats.getNumBytes()) / numEntries;
return bytesPerEntry * (tlsStats.getLastSerial() - flushedSerialNum);
}
@@ -89,13 +87,15 @@ MemoryFlush::MemoryFlush()
MemoryFlush::~MemoryFlush() { }
-MemoryFlush::Config MemoryFlush::getConfig() const
+MemoryFlush::Config
+MemoryFlush::getConfig() const
{
vespalib::LockGuard guard(_lock);
return _config;
}
-void MemoryFlush::setConfig(const Config &config)
+void
+MemoryFlush::setConfig(const Config &config)
{
vespalib::LockGuard guard(_lock);
_config = config;
@@ -116,7 +116,8 @@ getOrderName(MemoryFlush::OrderType &orderType)
return "DEFAULT";
}
-size_t computeGain(const IFlushTarget::DiskGain & gain) {
+size_t
+computeGain(const IFlushTarget::DiskGain & gain) {
return std::max(100000000l, std::max(gain.getBefore(), gain.getAfter()));
}
bool isDiskBloatToHigh(const IFlushTarget::DiskGain & totalDisk,
@@ -131,8 +132,7 @@ bool isDiskBloatToHigh(const IFlushTarget::DiskGain & totalDisk,
FlushContext::List
MemoryFlush::getFlushTargets(const FlushContext::List &targetList,
- const flushengine::TlsStatsMap &
- tlsStatsMap) const
+ const flushengine::TlsStatsMap & tlsStatsMap) const
{
OrderType order(DEFAULT);
uint64_t totalMemory(0);
@@ -219,8 +219,7 @@ MemoryFlush::getFlushTargets(const FlushContext::List &targetList,
bool
-MemoryFlush::CompareTarget::operator()(const FlushContext::SP &lfc,
- const FlushContext::SP &rfc) const
+MemoryFlush::CompareTarget::operator()(const FlushContext::SP &lfc, const FlushContext::SP &rfc) const
{
const IFlushTarget &lhs = *lfc->getTarget();
const IFlushTarget &rhs = *rfc->getTarget();
diff --git a/searchcore/src/vespa/searchcore/proton/server/memoryflush.h b/searchcore/src/vespa/searchcore/proton/server/memoryflush.h
index ae52a497be8..552a406aad9 100644
--- a/searchcore/src/vespa/searchcore/proton/server/memoryflush.h
+++ b/searchcore/src/vespa/searchcore/proton/server/memoryflush.h
@@ -47,17 +47,13 @@ private:
class CompareTarget
{
public:
- CompareTarget(OrderType order,
- const flushengine::TlsStatsMap &tlsStatsMap)
+ CompareTarget(OrderType order, const flushengine::TlsStatsMap &tlsStatsMap)
: _order(order),
_tlsStatsMap(tlsStatsMap)
{ }
- bool
- operator ()(const FlushContext::SP &lfc,
- const FlushContext::SP &rfc) const;
+ bool operator ()(const FlushContext::SP &lfc, const FlushContext::SP &rfc) const;
private:
-
OrderType _order;
const flushengine::TlsStatsMap &_tlsStatsMap;
};
@@ -66,20 +62,16 @@ public:
using SP = std::shared_ptr<MemoryFlush>;
MemoryFlush();
-
- MemoryFlush(const Config &config,
- fastos::TimeStamp startTime = fastos::TimeStamp(fastos::ClockSystem::now()));
+ explicit MemoryFlush(const Config &config) : MemoryFlush(config, fastos::ClockSystem::now()) { }
+ MemoryFlush(const Config &config, fastos::TimeStamp startTime);
~MemoryFlush();
- // Implements IFlushStrategy
- virtual FlushContext::List
+ FlushContext::List
getFlushTargets(const FlushContext::List &targetList,
- const flushengine::TlsStatsMap &
- tlsStatsMap) const override;
+ const flushengine::TlsStatsMap &tlsStatsMap) const override;
void setConfig(const Config &config);
Config getConfig() const;
};
} // namespace proton
-
diff --git a/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.cpp b/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.cpp
index 3e2ebcbad38..19235bb1e23 100644
--- a/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.cpp
@@ -28,10 +28,4 @@ OperationDoneContext::ack()
}
}
-bool
-OperationDoneContext::shouldTrace(uint32_t traceLevel)
-{
- return _token ? _token->shouldTrace(traceLevel) : false;
-}
-
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.h b/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.h
index a63de0e2fbb..3f7a6436604 100644
--- a/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.h
+++ b/searchcore/src/vespa/searchcore/proton/server/operationdonecontext.h
@@ -5,8 +5,7 @@
#include <vespa/searchlib/common/idestructorcallback.h>
#include <vespa/searchcore/proton/feedoperation/feedoperation.h>
-namespace proton
-{
+namespace proton {
class PerDocTypeFeedMetrics;
class FeedToken;
@@ -33,10 +32,7 @@ public:
PerDocTypeFeedMetrics &metrics);
virtual ~OperationDoneContext();
-
FeedToken *getToken() { return _token.get(); }
-
- bool shouldTrace(uint32_t traceLevel);
};
diff --git a/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.cpp b/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.cpp
index 556ae756f13..541de44d44e 100644
--- a/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/persistencehandlerproxy.cpp
@@ -10,7 +10,6 @@
#include <vespa/searchcore/proton/feedoperation/removeoperation.h>
#include <vespa/searchcore/proton/feedoperation/splitbucketoperation.h>
#include <vespa/searchcore/proton/feedoperation/updateoperation.h>
-#include <vespa/persistence/spi/result.h>
using storage::spi::Bucket;
using storage::spi::Timestamp;
diff --git a/searchcore/src/vespa/searchcore/proton/server/persistenceproviderproxy.cpp b/searchcore/src/vespa/searchcore/proton/server/persistenceproviderproxy.cpp
deleted file mode 100644
index 997dcc8f7a0..00000000000
--- a/searchcore/src/vespa/searchcore/proton/server/persistenceproviderproxy.cpp
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "persistenceproviderproxy.h"
-
-using storage::spi::PersistenceProvider;
-
-namespace proton {
-
-PersistenceProviderProxy::PersistenceProviderProxy(PersistenceProvider &pp)
- : _pp(pp)
-{
-}
-
-} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/persistenceproviderproxy.h b/searchcore/src/vespa/searchcore/proton/server/persistenceproviderproxy.h
deleted file mode 100644
index 41319111277..00000000000
--- a/searchcore/src/vespa/searchcore/proton/server/persistenceproviderproxy.h
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#pragma once
-
-#include <vespa/persistence/spi/persistenceprovider.h>
-
-namespace proton {
-
-class PersistenceProviderProxy : public storage::spi::PersistenceProvider
-{
-private:
- using Bucket = storage::spi::Bucket;
- using BucketIdListResult = storage::spi::BucketIdListResult;
- using BucketInfoResult = storage::spi::BucketInfoResult;
- using ClusterState = storage::spi::ClusterState;
- using Context = storage::spi::Context;
- using CreateIteratorResult = storage::spi::CreateIteratorResult;
- using GetResult = storage::spi::GetResult;
- using IncludedVersions = storage::spi::IncludedVersions;
- using IterateResult = storage::spi::IterateResult;
- using IteratorId = storage::spi::IteratorId;
- using PartitionId = storage::spi::PartitionId;
- using PartitionStateListResult = storage::spi::PartitionStateListResult;
- using RemoveResult = storage::spi::RemoveResult;
- using Result = storage::spi::Result;
- using Selection = storage::spi::Selection;
- using Timestamp = storage::spi::Timestamp;
- using UpdateResult = storage::spi::UpdateResult;
-
- storage::spi::PersistenceProvider &_pp;
-
-public:
- PersistenceProviderProxy(storage::spi::PersistenceProvider &pp);
-
- virtual ~PersistenceProviderProxy() {}
-
- virtual Result initialize() override {
- return _pp.initialize();
- }
-
- // Implements PersistenceProvider
- virtual PartitionStateListResult getPartitionStates() const override {
- return _pp.getPartitionStates();
- }
-
- virtual BucketIdListResult listBuckets(PartitionId partId) const override {
- return _pp.listBuckets(partId);
- }
-
- virtual Result setClusterState(const ClusterState &state) override {
- return _pp.setClusterState(state);
- }
-
- virtual Result setActiveState(const Bucket &bucket,
- storage::spi::BucketInfo::ActiveState newState) override {
- return _pp.setActiveState(bucket, newState);
- }
-
- virtual BucketInfoResult getBucketInfo(const Bucket &bucket) const override {
- return _pp.getBucketInfo(bucket);
- }
-
- virtual Result put(const Bucket &bucket,
- Timestamp timestamp,
- const storage::spi::DocumentSP& doc,
- Context& context) override {
- return _pp.put(bucket, timestamp, doc, context);
- }
-
- virtual RemoveResult remove(const Bucket &bucket,
- Timestamp timestamp,
- const document::DocumentId &docId,
- Context& context) override {
- return _pp.remove(bucket, timestamp, docId, context);
- }
-
- virtual RemoveResult removeIfFound(const Bucket &bucket,
- Timestamp timestamp,
- const document::DocumentId &docId,
- Context& context) override {
- return _pp.removeIfFound(bucket, timestamp, docId, context);
- }
-
- virtual UpdateResult update(const Bucket &bucket,
- Timestamp timestamp,
- const storage::spi::DocumentUpdateSP& docUpd,
- Context& context) override {
- return _pp.update(bucket, timestamp, docUpd, context);
- }
-
- virtual Result flush(const Bucket &bucket, Context& context) override {
- return _pp.flush(bucket, context);
- }
-
- virtual GetResult get(const Bucket &bucket,
- const document::FieldSet& fieldSet,
- const document::DocumentId &docId,
- Context& context) const override {
- return _pp.get(bucket, fieldSet, docId, context);
- }
-
- virtual CreateIteratorResult createIterator(const Bucket &bucket,
- const document::FieldSet& fieldSet,
- const Selection &selection,
- IncludedVersions versions,
- Context& context) override {
- return _pp.createIterator(bucket, fieldSet, selection, versions,
- context);
- }
-
- virtual IterateResult iterate(IteratorId itrId,
- uint64_t maxByteSize,
- Context& context) const override {
- return _pp.iterate(itrId, maxByteSize, context);
- }
-
- virtual Result destroyIterator(IteratorId itrId, Context& context) override {
- return _pp.destroyIterator(itrId, context);
- }
-
- virtual Result createBucket(const Bucket &bucket, Context& context) override {
- return _pp.createBucket(bucket, context);
- }
-
- virtual Result deleteBucket(const Bucket &bucket, Context& context) override {
- return _pp.deleteBucket(bucket, context);
- }
-
- virtual BucketIdListResult getModifiedBuckets() const override {
- return _pp.getModifiedBuckets();
- }
-
- virtual Result maintain(const Bucket &bucket,
- storage::spi::MaintenanceLevel level) override {
- return _pp.maintain(bucket, level);
- }
-
- virtual Result split(const Bucket &source,
- const Bucket &target1,
- const Bucket &target2,
- Context& context) override {
- return _pp.split(source, target1, target2, context);
- }
-
- virtual Result join(const Bucket &source1,
- const Bucket &source2,
- const Bucket &target,
- Context& context) override {
- return _pp.join(source1, source2, target, context);
- }
-
- virtual Result move(const Bucket &source,
- storage::spi::PartitionId target,
- Context& context) override {
- return _pp.move(source, target, context);
- }
-
- virtual Result removeEntry(const Bucket &bucket,
- Timestamp timestamp,
- Context& context) override {
- return _pp.removeEntry(bucket, timestamp, context);
- }
-};
-
-} // namespace proton
-
diff --git a/searchcore/src/vespa/searchcore/proton/server/proton.cpp b/searchcore/src/vespa/searchcore/proton/server/proton.cpp
index d95b0fd44d1..36f19c385f7 100644
--- a/searchcore/src/vespa/searchcore/proton/server/proton.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/proton.cpp
@@ -75,20 +75,31 @@ setFS4Compression(const ProtonConfig & proton)
}
DiskMemUsageSampler::Config
-diskMemUsageSamplerConfig(const ProtonConfig &proton)
+diskMemUsageSamplerConfig(const ProtonConfig &proton, const HwInfo &hwInfo)
{
return DiskMemUsageSampler::Config(
proton.writefilter.memorylimit,
proton.writefilter.disklimit,
- proton.writefilter.sampleinterval);
+ proton.writefilter.sampleinterval,
+ hwInfo);
}
+size_t
+deriveCompactionCompressionThreads(const ProtonConfig &proton,
+ const HwInfo::Cpu &cpuInfo) {
+ size_t scaledCores = (size_t)std::ceil(cpuInfo.cores() * proton.feeding.concurrency);
+ size_t threads = std::max(scaledCores, size_t(proton.summary.log.numthreads));
+
+ // We need at least 1 guaranteed free worker in order to ensure progress so #documentsdbs + 1 should suffice,
+ // but we will not be cheap and give #documentsdbs * 2
+ return std::max(threads, proton.documentdb.size() * 2);;
}
-static const vespalib::string CUSTOM_COMPONENT_API_PATH = "/state/v1/custom/component";
+const vespalib::string CUSTOM_COMPONENT_API_PATH = "/state/v1/custom/component";
+
+}
-Proton::ProtonFileHeaderContext::ProtonFileHeaderContext(const Proton &proton_,
- const vespalib::string &creator)
+Proton::ProtonFileHeaderContext::ProtonFileHeaderContext(const Proton &proton_, const vespalib::string &creator)
: _proton(proton_),
_hostName(),
_creator(creator),
@@ -206,7 +217,7 @@ Proton::init()
{
assert( ! _initStarted && ! _initComplete );
_initStarted = true;
- if (_threadPool.NewThread(&_clock, NULL) == NULL) {
+ if (_threadPool.NewThread(&_clock, nullptr) == nullptr) {
throw IllegalStateException("Failed starting thread for the cheap clock");
}
_protonConfigFetcher.start();
@@ -223,22 +234,27 @@ Proton::init(const BootstrapConfig::SP & configSnapshot)
{
assert( _initStarted && ! _initComplete );
const ProtonConfig &protonConfig = configSnapshot->getProtonConfig();
- const auto &samplerCfgArgs = protonConfig.hwinfo.disk;
- HwInfoSampler::Config samplerCfg(samplerCfgArgs.writespeed,
- samplerCfgArgs.slowwritespeedlimit,
- samplerCfgArgs.samplewritesize);
- _hwInfoSampler = std::make_unique<HwInfoSampler>(protonConfig.basedir,
- samplerCfg);
+ const auto &hwDiskCfg = protonConfig.hwinfo.disk;
+ const auto &hwMemoryCfg = protonConfig.hwinfo.memory;
+ const auto &hwCpuCfg = protonConfig.hwinfo.cpu;
+ // TODO: Forward disk size when performance impact of disk usage sampling is verified
+ HwInfoSampler::Config samplerCfg(0,
+ hwDiskCfg.writespeed,
+ hwDiskCfg.slowwritespeedlimit,
+ hwDiskCfg.samplewritesize,
+ hwDiskCfg.shared,
+ hwMemoryCfg.size,
+ hwCpuCfg.cores);
+ _hwInfoSampler = std::make_unique<HwInfoSampler>(protonConfig.basedir, samplerCfg);
_hwInfo = _hwInfoSampler->hwInfo();
setFS4Compression(protonConfig);
_diskMemUsageSampler = std::make_unique<DiskMemUsageSampler>
(protonConfig.basedir,
- diskMemUsageSamplerConfig(protonConfig));
+ diskMemUsageSamplerConfig(protonConfig, _hwInfo));
_metricsEngine.reset(new MetricsEngine());
_metricsEngine->addMetricsHook(_metricsHook);
- _fileHeaderContext.setClusterName(protonConfig.clustername,
- protonConfig.basedir);
+ _fileHeaderContext.setClusterName(protonConfig.clustername, protonConfig.basedir);
_tls.reset(new TLS(_configUri.createWithNewId(protonConfig.tlsconfigid), _fileHeaderContext));
_matchEngine.reset(new MatchEngine(protonConfig.numsearcherthreads,
protonConfig.numthreadspersearch,
@@ -250,8 +266,8 @@ Proton::init(const BootstrapConfig::SP & configSnapshot)
const ProtonConfig::Flush & flush(protonConfig.flush);
switch (flush.strategy) {
case ProtonConfig::Flush::MEMORY: {
- MemoryFlush::SP memoryFlush = std::make_shared<MemoryFlush>(
- MemoryFlushConfigUpdater::convertConfig(flush.memory));
+ auto memoryFlush = std::make_shared<MemoryFlush>(
+ MemoryFlushConfigUpdater::convertConfig(flush.memory), fastos::ClockSystem::now());
_memoryFlushConfigUpdater = std::make_unique<MemoryFlushConfigUpdater>(memoryFlush, flush.memory);
_diskMemUsageSampler->notifier().addDiskMemUsageListener(_memoryFlushConfigUpdater.get());
strategy = memoryFlush;
@@ -283,14 +299,12 @@ Proton::init(const BootstrapConfig::SP & configSnapshot)
vespalib::string fileConfigId;
_warmupExecutor.reset(new vespalib::ThreadStackExecutor(4, 128*1024));
- // We need at least 1 guaranteed free worker in order to ensure progress so #documentsdbs + 1 should suffice,
- // but we will not be cheap and give #documentsdbs * 2
- const size_t summaryThreads = std::max(size_t(protonConfig.summary.log.numthreads), protonConfig.documentdb.size() * 2);
+
+ const size_t summaryThreads = deriveCompactionCompressionThreads(protonConfig, _hwInfo.cpu());
_summaryExecutor.reset(new vespalib::BlockingThreadStackExecutor(summaryThreads, 128*1024, summaryThreads*16));
InitializeThreads initializeThreads;
if (protonConfig.initialize.threads > 0) {
- initializeThreads = std::make_shared<vespalib::ThreadStackExecutor>
- (protonConfig.initialize.threads, 128 * 1024);
+ initializeThreads = std::make_shared<vespalib::ThreadStackExecutor>(protonConfig.initialize.threads, 128 * 1024);
_initDocumentDbsInSequence = (protonConfig.initialize.threads == 1);
}
_protonConfigurer.applyInitialConfig(initializeThreads);
@@ -341,7 +355,7 @@ Proton::applyConfig(const BootstrapConfig::SP & configSnapshot)
protonConfig.search.memory.limiter.minhits);
const DocumentTypeRepo::SP repo = configSnapshot->getDocumentTypeRepoSP();
- _diskMemUsageSampler->setConfig(diskMemUsageSamplerConfig(protonConfig));
+ _diskMemUsageSampler->setConfig(diskMemUsageSamplerConfig(protonConfig, _hwInfo));
if (_memoryFlushConfigUpdater) {
_memoryFlushConfigUpdater->setConfig(protonConfig.flush.memory);
_flushEngine->kick();
@@ -349,9 +363,10 @@ Proton::applyConfig(const BootstrapConfig::SP & configSnapshot)
}
IDocumentDBConfigOwner *
-Proton::addDocumentDB(const DocTypeName & docTypeName,
- const vespalib::string & configId,
- const BootstrapConfig::SP & bootstrapConfig,
+Proton::addDocumentDB(const DocTypeName &docTypeName,
+ document::BucketSpace bucketSpace,
+ const vespalib::string &configId,
+ const BootstrapConfig::SP &bootstrapConfig,
const DocumentDBConfig::SP &documentDBConfig,
InitializeThreads initializeThreads)
{
@@ -361,7 +376,7 @@ Proton::addDocumentDB(const DocTypeName & docTypeName,
if (docType != NULL) {
LOG(info, "Add document database: doctypename(%s), configid(%s)",
docTypeName.toString().c_str(), configId.c_str());
- return addDocumentDB(*docType, bootstrapConfig, documentDBConfig, initializeThreads).get();
+ return addDocumentDB(*docType, bucketSpace, bootstrapConfig, documentDBConfig, initializeThreads).get();
} else {
LOG(warning,
@@ -507,11 +522,12 @@ Proton::getDocumentDB(const document::DocumentType &docType)
DocumentDB::SP
Proton::addDocumentDB(const document::DocumentType &docType,
+ document::BucketSpace bucketSpace,
const BootstrapConfig::SP &bootstrapConfig,
const DocumentDBConfig::SP &documentDBConfig,
InitializeThreads initializeThreads)
{
- const ProtonConfig &config(*bootstrapConfig->getProtonConfigSP());
+ const ProtonConfig &config(bootstrapConfig->getProtonConfig());
std::lock_guard<std::shared_timed_mutex> guard(_mutex);
DocTypeName docTypeName(docType.getName());
@@ -540,11 +556,12 @@ Proton::addDocumentDB(const document::DocumentType &docType,
_queryLimiter,
_clock,
docTypeName,
+ bucketSpace,
config,
*this,
*_warmupExecutor,
*_summaryExecutor,
- _tls->getTransLogServer().get(),
+ *_tls->getTransLogServer(),
*_metricsEngine,
_fileHeaderContext,
std::move(config_store),
@@ -553,10 +570,8 @@ Proton::addDocumentDB(const document::DocumentType &docType,
try {
ret->start();
} catch (vespalib::Exception &e) {
- LOG(warning,
- "Failed to start database for document type '%s'; %s",
- docTypeName.toString().c_str(),
- e.what());
+ LOG(warning, "Failed to start database for document type '%s'; %s",
+ docTypeName.toString().c_str(), e.what());
return DocumentDB::SP();
}
// Wait for replay done on document dbs added due to reconfigs, since engines are already up and running.
@@ -567,18 +582,16 @@ Proton::addDocumentDB(const document::DocumentType &docType,
_metricsEngine->addDocumentDBMetrics(ret->getMetricsCollection());
_metricsEngine->addMetricsHook(ret->getMetricsUpdateHook());
_documentDBMap[docTypeName] = ret;
- if (_persistenceEngine.get() != NULL) {
+ if (_persistenceEngine) {
// Not allowed to get to service layer to call pause().
std::unique_lock<std::shared_timed_mutex> persistenceWGuard(_persistenceEngine->getWLock());
- PersistenceHandlerProxy::SP
- persistenceHandler(new PersistenceHandlerProxy(ret));
+ auto persistenceHandler = std::make_shared<PersistenceHandlerProxy>(ret);
if (!_isInitializing) {
- _persistenceEngine->
- propagateSavedClusterState(*persistenceHandler);
- _persistenceEngine->populateInitialBucketDB(*persistenceHandler);
+ _persistenceEngine->propagateSavedClusterState(*persistenceHandler);
+ _persistenceEngine->populateInitialBucketDB(bucketSpace, *persistenceHandler);
}
// TODO: Fix race with new cluster state setting.
- _persistenceEngine->putHandler(docTypeName, persistenceHandler);
+ _persistenceEngine->putHandler(bucketSpace, docTypeName, persistenceHandler);
}
SearchHandlerProxy::SP searchHandler(new SearchHandlerProxy(ret));
_summaryEngine->putSearchHandler(docTypeName, searchHandler);
@@ -597,8 +610,9 @@ Proton::removeDocumentDB(const DocTypeName &docTypeName)
{
std::lock_guard<std::shared_timed_mutex> guard(_mutex);
DocumentDBMap::iterator it = _documentDBMap.find(docTypeName);
- if (it == _documentDBMap.end())
+ if (it == _documentDBMap.end()) {
return;
+ }
old = it->second;
_documentDBMap.erase(it);
}
@@ -609,10 +623,10 @@ Proton::removeDocumentDB(const DocTypeName &docTypeName)
// Not allowed to get to service layer to call pause().
std::unique_lock<std::shared_timed_mutex> persistenceWguard(_persistenceEngine->getWLock());
IPersistenceHandler::SP oldHandler;
- oldHandler = _persistenceEngine->removeHandler(docTypeName);
+ oldHandler = _persistenceEngine->removeHandler(old->getBucketSpace(), docTypeName);
if (_initComplete && oldHandler) {
// TODO: Fix race with bucket db modifying ops.
- _persistenceEngine->grabExtraModifiedBuckets(*oldHandler);
+ _persistenceEngine->grabExtraModifiedBuckets(old->getBucketSpace(), *oldHandler);
}
}
_persistenceEngine->destroyIterators();
@@ -638,10 +652,7 @@ Proton::ping(MonitorRequest::UP request, MonitorClient & client)
BootstrapConfig::SP configSnapshot = getActiveConfigSnapshot();
const ProtonConfig &protonConfig = configSnapshot->getProtonConfig();
ret.partid = protonConfig.partition;
- if (_matchEngine->isOnline())
- ret.timestamp = 42; // change to flush caches on tld/qrs
- else
- ret.timestamp = 0;
+ ret.timestamp = (_matchEngine->isOnline()) ? 42 : 0;
ret.activeDocs = getNumActiveDocs();
ret.activeDocsRequested = request->reportActiveDocs;
return reply;
@@ -650,7 +661,7 @@ Proton::ping(MonitorRequest::UP request, MonitorClient & client)
bool
Proton::triggerFlush()
{
- if ((_flushEngine.get() == NULL) || ! _flushEngine->HasThread()) {
+ if (!_flushEngine || ! _flushEngine->HasThread()) {
return false;
}
_flushEngine->triggerFlush();
@@ -671,13 +682,12 @@ createPrepareRestartConfig(const ProtonConfig &protonConfig)
bool
Proton::prepareRestart()
{
- if ((_flushEngine.get() == NULL) || ! _flushEngine->HasThread()) {
+ if (!_flushEngine || ! _flushEngine->HasThread()) {
return false;
}
BootstrapConfig::SP configSnapshot = getActiveConfigSnapshot();
- IFlushStrategy::SP strategy =
- std::make_shared<PrepareRestartFlushStrategy>(
- createPrepareRestartConfig(configSnapshot->getProtonConfig()));
+ auto strategy = std::make_shared<PrepareRestartFlushStrategy>(
+ createPrepareRestartConfig(configSnapshot->getProtonConfig()));
_flushEngine->setStrategy(strategy);
return true;
}
@@ -689,7 +699,7 @@ int countOpenFiles()
static const char * const fd_dir_name = "/proc/self/fd";
int count = 0;
DIR *dp = opendir(fd_dir_name);
- if (dp != NULL) {
+ if (dp != nullptr) {
struct dirent *ptr;
while ((ptr = readdir(dp)) != nullptr) {
if (strcmp(".", ptr->d_name) == 0) continue;
@@ -727,10 +737,6 @@ Proton::updateMetrics(const vespalib::MonitorGuard &)
}
}
-namespace {
-const std::string config_id_tag = "CONFIG ID";
-} // namespace
-
void
Proton::waitForInitDone()
{
diff --git a/searchcore/src/vespa/searchcore/proton/server/proton.h b/searchcore/src/vespa/searchcore/proton/server/proton.h
index 0d6c136b244..67f3662549d 100644
--- a/searchcore/src/vespa/searchcore/proton/server/proton.h
+++ b/searchcore/src/vespa/searchcore/proton/server/proton.h
@@ -138,6 +138,7 @@ private:
std::shared_ptr<IDocumentDBReferenceRegistry> _documentDBReferenceRegistry;
virtual IDocumentDBConfigOwner *addDocumentDB(const DocTypeName & docTypeName,
+ document::BucketSpace bucketSpace,
const vespalib::string & configid,
const BootstrapConfig::SP & bootstrapConfig,
const std::shared_ptr<DocumentDBConfig> &documentDBConfig,
@@ -190,6 +191,7 @@ public:
DocumentDB::SP
addDocumentDB(const document::DocumentType &docType,
+ document::BucketSpace bucketSpace,
const BootstrapConfig::SP &configSnapshot,
const std::shared_ptr<DocumentDBConfig> &documentDBConfig,
InitializeThreads initializeThreads);
diff --git a/searchcore/src/vespa/searchcore/proton/server/proton_configurer.cpp b/searchcore/src/vespa/searchcore/proton/server/proton_configurer.cpp
index 3e18c05ba0f..436cf3865e5 100644
--- a/searchcore/src/vespa/searchcore/proton/server/proton_configurer.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/proton_configurer.cpp
@@ -5,11 +5,11 @@
#include "bootstrapconfig.h"
#include "i_proton_configurer_owner.h"
#include "i_document_db_config_owner.h"
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/vespalib/util/threadstackexecutorbase.h>
#include <future>
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
using vespa::config::search::core::ProtonConfig;
namespace proton {
@@ -114,7 +114,9 @@ ProtonConfigurer::applyConfig(std::shared_ptr<ProtonConfigSnapshot> configSnapsh
_owner.applyConfig(bootstrapConfig);
for (const auto &ddbConfig : protonConfig.documentdb) {
DocTypeName docTypeName(ddbConfig.inputdoctypename);
- configureDocumentDB(*configSnapshot, docTypeName, ddbConfig.configid, initializeThreads);
+ // TODO: set bucket space based on config when available
+ document::BucketSpace bucketSpace = document::BucketSpace::placeHolder();
+ configureDocumentDB(*configSnapshot, docTypeName, bucketSpace, ddbConfig.configid, initializeThreads);
}
pruneDocumentDBs(*configSnapshot);
size_t gen = bootstrapConfig->getGeneration();
@@ -124,7 +126,11 @@ ProtonConfigurer::applyConfig(std::shared_ptr<ProtonConfigSnapshot> configSnapsh
}
void
-ProtonConfigurer::configureDocumentDB(const ProtonConfigSnapshot &configSnapshot, const DocTypeName &docTypeName, const vespalib::string &configId, const InitializeThreads &initializeThreads)
+ProtonConfigurer::configureDocumentDB(const ProtonConfigSnapshot &configSnapshot,
+ const DocTypeName &docTypeName,
+ document::BucketSpace bucketSpace,
+ const vespalib::string &configId,
+ const InitializeThreads &initializeThreads)
{
// called by proton executor thread
const auto &bootstrapConfig = configSnapshot.getBootstrapConfig();
@@ -134,7 +140,7 @@ ProtonConfigurer::configureDocumentDB(const ProtonConfigSnapshot &configSnapshot
const auto &documentDBConfig = cfgitr->second;
auto dbitr(_documentDBs.find(docTypeName));
if (dbitr == _documentDBs.end()) {
- auto *newdb = _owner.addDocumentDB(docTypeName, configId, bootstrapConfig, documentDBConfig, initializeThreads);
+ auto *newdb = _owner.addDocumentDB(docTypeName, bucketSpace, configId, bootstrapConfig, documentDBConfig, initializeThreads);
if (newdb != nullptr) {
auto insres = _documentDBs.insert(std::make_pair(docTypeName, newdb));
assert(insres.second);
diff --git a/searchcore/src/vespa/searchcore/proton/server/proton_configurer.h b/searchcore/src/vespa/searchcore/proton/server/proton_configurer.h
index a7f3fd61d0b..149be3a9e62 100644
--- a/searchcore/src/vespa/searchcore/proton/server/proton_configurer.h
+++ b/searchcore/src/vespa/searchcore/proton/server/proton_configurer.h
@@ -2,12 +2,13 @@
#pragma once
+#include "executor_thread_service.h"
#include "i_proton_configurer.h"
+#include <vespa/document/bucket/bucketspace.h>
#include <vespa/searchcore/proton/common/doctypename.h>
#include <vespa/vespalib/net/simple_component_config_producer.h>
#include <map>
#include <mutex>
-#include "executor_thread_service.h"
namespace proton {
@@ -37,8 +38,11 @@ class ProtonConfigurer : public IProtonConfigurer
bool skipConfig(const ProtonConfigSnapshot *configSnapshot, bool initialConfig);
void applyConfig(std::shared_ptr<ProtonConfigSnapshot> configSnapshot,
InitializeThreads initializeThreads, bool initialConfig);
- void configureDocumentDB(const ProtonConfigSnapshot &configSnapshot, const DocTypeName &docTypeName, const vespalib::string &configId, const InitializeThreads &initializeThreads);
+ void configureDocumentDB(const ProtonConfigSnapshot &configSnapshot,
+ const DocTypeName &docTypeName, document::BucketSpace bucketSpace,
+ const vespalib::string &configId, const InitializeThreads &initializeThreads);
void pruneDocumentDBs(const ProtonConfigSnapshot &configSnapshot);
+
public:
ProtonConfigurer(vespalib::ThreadStackExecutorBase &executor,
IProtonConfigurerOwner &owner);
diff --git a/searchcore/src/vespa/searchcore/proton/server/reconfig_params.cpp b/searchcore/src/vespa/searchcore/proton/server/reconfig_params.cpp
index c1d139c7486..377b012de99 100644
--- a/searchcore/src/vespa/searchcore/proton/server/reconfig_params.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/reconfig_params.cpp
@@ -25,7 +25,8 @@ ReconfigParams::configHasChanged() const
_res.importedFieldsChanged ||
_res.tuneFileDocumentDBChanged ||
_res.schemaChanged ||
- _res.maintenanceChanged;
+ _res.maintenanceChanged ||
+ _res.storeChanged;
}
bool
@@ -56,14 +57,14 @@ bool
ReconfigParams::shouldSummaryManagerChange() const
{
return _res.summaryChanged || _res.summarymapChanged || _res.juniperrcChanged
- || _res.documentTypeRepoChanged || _res.documenttypesChanged;
+ || _res.documentTypeRepoChanged || _res.documenttypesChanged || _res.storeChanged;
}
bool
ReconfigParams::shouldSubDbsChange() const
{
return shouldMatchersChange() || shouldAttributeManagerChange() || shouldSummaryManagerChange()
- || _res.documentTypeRepoChanged || _res.documenttypesChanged;
+ || _res.documentTypeRepoChanged || _res.documenttypesChanged || _res.storeChanged;
}
bool
diff --git a/searchcore/src/vespa/searchcore/proton/server/resource_usage_explorer.cpp b/searchcore/src/vespa/searchcore/proton/server/resource_usage_explorer.cpp
index afe9f6b85d3..33b37649ed2 100644
--- a/searchcore/src/vespa/searchcore/proton/server/resource_usage_explorer.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/resource_usage_explorer.cpp
@@ -9,11 +9,10 @@ using namespace vespalib::slime;
namespace proton {
void
-convertDiskStatsToSlime(const DiskMemUsageFilter::space_info &stats, Cursor &object)
+convertDiskStatsToSlime(const HwInfo &hwInfo, uint64_t diskUsedSizeBytes, Cursor &object)
{
- object.setLong("capacity", stats.capacity);
- object.setLong("free", stats.free);
- object.setLong("available", stats.available);
+ object.setLong("capacity", hwInfo.disk().sizeBytes());
+ object.setLong("used", diskUsedSizeBytes);
}
void
@@ -39,12 +38,12 @@ ResourceUsageExplorer::get_state(const vespalib::slime::Inserter &inserter, bool
Cursor &disk = object.setObject("disk");
disk.setDouble("usedRatio", _usageFilter.getDiskUsedRatio());
disk.setDouble("usedLimit", config._diskLimit);
- convertDiskStatsToSlime(_usageFilter.getDiskStats(), disk.setObject("stats"));
+ convertDiskStatsToSlime(_usageFilter.getHwInfo(), _usageFilter.getDiskUsedSize(), disk.setObject("stats"));
Cursor &memory = object.setObject("memory");
memory.setDouble("usedRatio", _usageFilter.getMemoryUsedRatio());
memory.setDouble("usedLimit", config._memoryLimit);
- memory.setLong("physicalMemory", _usageFilter.getPhysicalMemory());
+ memory.setLong("physicalMemory", _usageFilter.getHwInfo().memory().sizeBytes());
convertMemoryStatsToSlime(_usageFilter.getMemoryStats(), memory.setObject("stats"));
} else {
object.setDouble("disk", _usageFilter.getDiskUsedRatio());
diff --git a/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.cpp b/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.cpp
index 14556c86c18..d3d73b42fbe 100644
--- a/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/searchable_feed_view.cpp
@@ -24,24 +24,16 @@ using storage::spi::BucketInfoResult;
using storage::spi::Timestamp;
using vespalib::IllegalStateException;
using vespalib::make_string;
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
namespace proton {
-namespace {
-
-bool shouldTrace(StoreOnlyFeedView::OnOperationDoneType onWriteDone, uint32_t traceLevel) {
- return onWriteDone && onWriteDone->shouldTrace(traceLevel);
-}
-
-}
-
SearchableFeedView::Context::Context(const IIndexWriter::SP &indexWriter)
: _indexWriter(indexWriter)
{}
-SearchableFeedView::Context::~Context() {}
+SearchableFeedView::Context::~Context() = default;
SearchableFeedView::SearchableFeedView(const StoreOnlyFeedView::Context &storeOnlyCtx,
const PersistentParams &params,
@@ -52,7 +44,7 @@ SearchableFeedView::SearchableFeedView(const StoreOnlyFeedView::Context &storeOn
_hasIndexedFields(_schema->getNumIndexFields() > 0)
{ }
-SearchableFeedView::~SearchableFeedView() {}
+SearchableFeedView::~SearchableFeedView() = default;
void
SearchableFeedView::performSync()
@@ -98,10 +90,6 @@ SearchableFeedView::performIndexPut(SerialNum serialNum, search::DocumentIdT lid
if (immediateCommit) {
_indexWriter->commit(serialNum, onWriteDone);
}
- if (shouldTrace(onWriteDone, 1)) {
- FeedToken *token = onWriteDone->getToken();
- token->trace(1, "Document indexed = . New Value : " + doc.toString(token->shouldTrace(2)));
- }
}
void
@@ -160,9 +148,6 @@ void
SearchableFeedView::updateIndexedFields(SerialNum serialNum, search::DocumentIdT lid, FutureDoc futureDoc,
bool immediateCommit, OnOperationDoneType onWriteDone)
{
- if (shouldTrace(onWriteDone, 1)) {
- onWriteDone->getToken()->trace(1, "Then we can update the index.");
- }
_writeService.index().execute(
makeLambdaTask([serialNum, lid, futureDoc = std::move(futureDoc),
immediateCommit, onWriteDone = std::move(onWriteDone), this]() mutable {
@@ -189,7 +174,7 @@ SearchableFeedView::performIndexRemove(SerialNum serialNum, search::DocumentIdT
bool immediateCommit, OnRemoveDoneType onWriteDone)
{
assert(_writeService.index().isCurrentThread());
- VLOG(getDebugLevel(lid, NULL),
+ VLOG(getDebugLevel(lid, nullptr),
"database(%s): performIndexRemove: serialNum(%" PRIu64 "), lid(%d)",
_params._docTypeName.toString().c_str(), serialNum, lid);
@@ -197,10 +182,6 @@ SearchableFeedView::performIndexRemove(SerialNum serialNum, search::DocumentIdT
if (immediateCommit) {
_indexWriter->commit(serialNum, onWriteDone);
}
- FeedToken *token = onWriteDone ? onWriteDone->getToken() : nullptr;
- if (token != nullptr && token->shouldTrace(1)) {
- token->trace(1, make_string("Document with lid %d removed.", lid));
- }
}
void
@@ -209,7 +190,7 @@ SearchableFeedView::performIndexRemove(SerialNum serialNum, const LidVector &lid
{
assert(_writeService.index().isCurrentThread());
for (const auto lid : lidsToRemove) {
- VLOG(getDebugLevel(lid, NULL),
+ VLOG(getDebugLevel(lid, nullptr),
"database(%s): performIndexRemove: serialNum(%" PRIu64 "), "
"lid(%d)",
_params._docTypeName.toString().c_str(),
diff --git a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp
index df1417f0d73..0b67698524b 100644
--- a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.cpp
@@ -112,8 +112,7 @@ createIndexManagerInitializer(const DocumentDBConfig &configSnapshot,
}
void
-SearchableDocSubDB::setupIndexManager(searchcorespi::IIndexManager::SP
- indexManager)
+SearchableDocSubDB::setupIndexManager(searchcorespi::IIndexManager::SP indexManager)
{
_indexMgr = indexManager;
_indexWriter.reset(new IndexWriter(_indexMgr));
@@ -121,20 +120,12 @@ SearchableDocSubDB::setupIndexManager(searchcorespi::IIndexManager::SP
DocumentSubDbInitializer::UP
SearchableDocSubDB::
-createInitializer(const DocumentDBConfig &configSnapshot,
- SerialNum configSerialNum,
- const ProtonConfig::Summary &protonSummaryCfg,
+createInitializer(const DocumentDBConfig &configSnapshot, SerialNum configSerialNum,
const ProtonConfig::Index &indexCfg) const
{
- auto result = Parent::createInitializer(configSnapshot,
- configSerialNum,
- protonSummaryCfg,
- indexCfg);
- auto indexTask = createIndexManagerInitializer(configSnapshot,
- configSerialNum,
- indexCfg,
- result->writableResult().
- writableIndexManager());
+ auto result = Parent::createInitializer(configSnapshot, configSerialNum, indexCfg);
+ auto indexTask = createIndexManagerInitializer(configSnapshot, configSerialNum, indexCfg,
+ result->writableResult().writableIndexManager());
result->addDependency(indexTask);
return result;
}
@@ -155,23 +146,18 @@ reconfigureMatchingMetrics(const RankProfilesConfig &cfg)
for (const auto &profile : cfg.rankprofile) {
search::fef::Properties properties;
for (const auto &property : profile.fef.property) {
- properties.add(property.name,
- property.value);
+ properties.add(property.name, property.value);
}
size_t numDocIdPartitions = search::fef::indexproperties::matching::NumThreadsPerSearch::lookup(properties);
- _metricsWireService.addRankProfile(_metrics,
- profile.name,
- numDocIdPartitions);
+ _metricsWireService.addRankProfile(_metrics, profile.name, numDocIdPartitions);
}
}
IReprocessingTask::List
-SearchableDocSubDB::applyConfig(const DocumentDBConfig &newConfigSnapshot,
- const DocumentDBConfig &oldConfigSnapshot,
- SerialNum serialNum,
- const ReconfigParams &params,
- IDocumentDBReferenceResolver &resolver)
+SearchableDocSubDB::applyConfig(const DocumentDBConfig &newConfigSnapshot, const DocumentDBConfig &oldConfigSnapshot,
+ SerialNum serialNum, const ReconfigParams &params, IDocumentDBReferenceResolver &resolver)
{
+ StoreOnlyDocSubDB::reconfigure(newConfigSnapshot.getStoreConfig());
IReprocessingTask::List tasks;
updateLidReuseDelayer(&newConfigSnapshot);
if (params.shouldMatchersChange() && _addMetrics) {
@@ -199,8 +185,7 @@ SearchableDocSubDB::applyConfig(const DocumentDBConfig &newConfigSnapshot,
}
void
-SearchableDocSubDB::initViews(const DocumentDBConfig &configSnapshot,
- const SessionManager::SP &sessionManager)
+SearchableDocSubDB::initViews(const DocumentDBConfig &configSnapshot, const SessionManager::SP &sessionManager)
{
assert(_writeService.master().isCurrentThread());
@@ -208,16 +193,9 @@ SearchableDocSubDB::initViews(const DocumentDBConfig &configSnapshot,
const Schema::SP &schema = configSnapshot.getSchemaSP();
const IIndexManager::SP &indexMgr = getIndexManager();
_constantValueRepo.reconfigure(configSnapshot.getRankingConstants());
- Matchers::SP matchers(_configurer.
- createMatchers(schema,
- configSnapshot.getRankProfilesConfig()).
- release());
- MatchView::SP matchView(new MatchView(matchers,
- indexMgr->getSearchable(),
- attrMgr,
- sessionManager,
- _metaStoreCtx,
- _docIdLimit));
+ Matchers::SP matchers(_configurer.createMatchers(schema, configSnapshot.getRankProfilesConfig()).release());
+ MatchView::SP matchView(new MatchView(matchers, indexMgr->getSearchable(), attrMgr,
+ sessionManager, _metaStoreCtx, _docIdLimit));
_rSearchView.set(SearchView::SP(
new SearchView(
getSummaryManager()->createSummarySetup(
@@ -370,4 +348,11 @@ SearchableDocSubDB::tearDownReferences(IDocumentDBReferenceResolver &resolver)
resolver.teardown(*attrMgr);
}
+void
+SearchableDocSubDB::clearViews() {
+ _rFeedView.clear();
+ _rSearchView.clear();
+ Parent::clearViews();
+}
+
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h
index 91ce868937b..6da1a337cda 100644
--- a/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h
+++ b/searchcore/src/vespa/searchcore/proton/server/searchabledocsubdb.h
@@ -37,8 +37,9 @@ class GidToLidChangeHandler;
*
* This class is used directly by the "0.ready" sub database for handling all ready documents.
*/
-class SearchableDocSubDB : public FastAccessDocSubDB,
- public searchcorespi::IIndexManager::Reconfigurer
+class
+SearchableDocSubDB : public FastAccessDocSubDB,
+ public searchcorespi::IIndexManager::Reconfigurer
{
public:
@@ -96,9 +97,6 @@ private:
void initFeedView(const IAttributeWriter::SP &attrWriter, const DocumentDBConfig &configSnapshot);
void reconfigureMatchingMetrics(const vespa::config::search::RankProfilesConfig &config);
- /**
- * Implements IndexManagerReconfigurer API.
- */
bool reconfigure(vespalib::Closure0<bool>::UP closure) override;
void reconfigureIndexSearchable();
void syncViews();
@@ -113,32 +111,17 @@ public:
~SearchableDocSubDB();
std::unique_ptr<DocumentSubDbInitializer>
- createInitializer(const DocumentDBConfig &configSnapshot,
- SerialNum configSerialNum,
- const vespa::config::search::core::
- ProtonConfig::Summary &protonSummaryCfg,
- const vespa::config::search::core::
- ProtonConfig::Index &indexCfg) const override;
+ createInitializer(const DocumentDBConfig &configSnapshot, SerialNum configSerialNum,
+ const vespa::config::search::core::ProtonConfig::Index &indexCfg) const override;
void setup(const DocumentSubDbInitializerResult &initResult) override;
-
- void
- initViews(const DocumentDBConfig &configSnapshot,
- const SessionManagerSP &sessionManager) override;
+ void initViews(const DocumentDBConfig &configSnapshot, const SessionManagerSP &sessionManager) override;
IReprocessingTask::List
- applyConfig(const DocumentDBConfig &newConfigSnapshot,
- const DocumentDBConfig &oldConfigSnapshot,
- SerialNum serialNum,
- const ReconfigParams &params,
- IDocumentDBReferenceResolver &resolver) override;
-
- void clearViews() override
- {
- _rFeedView.clear();
- _rSearchView.clear();
- Parent::clearViews();
- }
+ applyConfig(const DocumentDBConfig &newConfigSnapshot, const DocumentDBConfig &oldConfigSnapshot,
+ SerialNum serialNum, const ReconfigParams &params, IDocumentDBReferenceResolver &resolver) override;
+
+ void clearViews() override;
proton::IAttributeManager::SP getAttributeManager() const override {
return _rSearchView.get()->getAttributeManager();
@@ -159,10 +142,9 @@ public:
search::SearchableStats getSearchableStats() const override ;
IDocumentRetriever::UP getDocumentRetriever() override;
matching::MatchingStats getMatcherStats(const vespalib::string &rankProfile) const override;
- virtual void close() override;
- virtual std::shared_ptr<IDocumentDBReference> getDocumentDBReference() override;
- virtual void tearDownReferences(IDocumentDBReferenceResolver &resolver) override;
+ void close() override;
+ std::shared_ptr<IDocumentDBReference> getDocumentDBReference() override;
+ void tearDownReferences(IDocumentDBReferenceResolver &resolver) override;
};
} // namespace proton
-
diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp
index 5fbae951536..e931a28c6a5 100644
--- a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.cpp
@@ -36,7 +36,7 @@ using search::GrowStrategy;
using search::AttributeGuard;
using search::AttributeVector;
using search::IndexMetaInfo;
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
using search::TuneFileDocumentDB;
using search::index::Schema;
using search::SerialNum;
@@ -62,7 +62,6 @@ IIndexWriter::SP nullIndexWriter;
}
-
StoreOnlyDocSubDB::Config::Config(const DocTypeName &docTypeName,
const vespalib::string &subName,
const vespalib::string &baseDir,
@@ -152,6 +151,12 @@ StoreOnlyDocSubDB::~StoreOnlyDocSubDB()
_rSummaryMgr.reset();
}
+void
+StoreOnlyDocSubDB::clearViews() {
+ _iFeedView.clear();
+ _iSearchView.clear();
+}
+
size_t
StoreOnlyDocSubDB::getNumDocs() const
{
@@ -232,7 +237,7 @@ StoreOnlyDocSubDB::getNewestFlushedSerial()
initializer::InitializerTask::SP
StoreOnlyDocSubDB::
-createSummaryManagerInitializer(const ProtonConfig::Summary protonSummaryCfg,
+createSummaryManagerInitializer(const search::LogDocumentStore::Config & storeCfg,
const search::TuneFileSummary &tuneFile,
search::IBucketizer::SP bucketizer,
std::shared_ptr<SummaryManager::SP> result) const
@@ -241,9 +246,7 @@ createSummaryManagerInitializer(const ProtonConfig::Summary protonSummaryCfg,
vespalib::string baseDir(_baseDir + "/summary");
return std::make_shared<SummaryManagerInitializer>
(grow, baseDir, getSubDbName(), _docTypeName, _summaryExecutor,
- protonSummaryCfg,
- tuneFile,
- _fileHeaderContext, _tlSyncer, bucketizer, result);
+ storeCfg, tuneFile, _fileHeaderContext, _tlSyncer, bucketizer, result);
}
void
@@ -251,8 +254,7 @@ StoreOnlyDocSubDB::setupSummaryManager(SummaryManager::SP summaryManager)
{
_rSummaryMgr = summaryManager;
_iSummaryMgr = _rSummaryMgr; // Upcast allowed with std::shared_ptr
- _flushedDocumentStoreSerialNum =
- _iSummaryMgr->getBackingStore().lastSyncToken();
+ _flushedDocumentStoreSerialNum = _iSummaryMgr->getBackingStore().lastSyncToken();
_summaryAdapter.reset(new SummaryAdapter(_rSummaryMgr));
}
@@ -274,15 +276,9 @@ createDocumentMetaStoreInitializer(const search::TuneFileAttributes &tuneFile,
// initializers to get hold of document meta store instance in
// their constructors.
*result = std::make_shared<DocumentMetaStoreInitializerResult>
- (std::make_shared<DocumentMetaStore>(_bucketDB, attrFileName,
- grow,
- gidCompare, _subDbType),
- tuneFile);
+ (std::make_shared<DocumentMetaStore>(_bucketDB, attrFileName, grow, gidCompare, _subDbType), tuneFile);
return std::make_shared<documentmetastore::DocumentMetaStoreInitializer>
- (baseDir,
- getSubDbName(),
- _docTypeName.toString(),
- (*result)->documentMetaStore());
+ (baseDir, getSubDbName(), _docTypeName.toString(), (*result)->documentMetaStore());
}
@@ -293,36 +289,25 @@ StoreOnlyDocSubDB::setupDocumentMetaStore(DocumentMetaStoreInitializerResult::SP
vespalib::string name = DocumentMetaStore::getFixedName();
DocumentMetaStore::SP dms(dmsResult->documentMetaStore());
if (dms->isLoaded()) {
- _flushedDocumentMetaStoreSerialNum =
- dms->getStatus().getLastSyncToken();
+ _flushedDocumentMetaStoreSerialNum = dms->getStatus().getLastSyncToken();
}
_bucketDBHandlerInitializer.
- addDocumentMetaStore(dms.get(),
- _flushedDocumentMetaStoreSerialNum);
+ addDocumentMetaStore(dms.get(), _flushedDocumentMetaStoreSerialNum);
_metaStoreCtx.reset(new DocumentMetaStoreContext(dms));
LOG(debug, "Added document meta store '%s' with flushed serial num %lu",
name.c_str(), _flushedDocumentMetaStoreSerialNum);
_dms = dms;
- _dmsFlushTarget.reset(new DocumentMetaStoreFlushTarget(dms,
- _tlsSyncer,
- baseDir,
- dmsResult->tuneFile(),
- _fileHeaderContext, _hwInfo));
+ _dmsFlushTarget = std::make_shared<DocumentMetaStoreFlushTarget>(dms, _tlsSyncer, baseDir, dmsResult->tuneFile(),
+ _fileHeaderContext, _hwInfo);
using Type = IFlushTarget::Type;
using Component = IFlushTarget::Component;
- _dmsShrinkTarget = std::make_shared<ShrinkLidSpaceFlushTarget>
- ("documentmetastore.shrink",
- Type::GC, Component::ATTRIBUTE,
- _flushedDocumentMetaStoreSerialNum,
- _dmsFlushTarget->getLastFlushTime(),
- dms);
+ _dmsShrinkTarget = std::make_shared<ShrinkLidSpaceFlushTarget>("documentmetastore.shrink", Type::GC,
+ Component::ATTRIBUTE, _flushedDocumentMetaStoreSerialNum,
+ _dmsFlushTarget->getLastFlushTime(), dms);
}
DocumentSubDbInitializer::UP
-StoreOnlyDocSubDB::createInitializer(const DocumentDBConfig &configSnapshot,
- SerialNum configSerialNum,
- const ProtonConfig::Summary &
- protonSummaryCfg,
+StoreOnlyDocSubDB::createInitializer(const DocumentDBConfig &configSnapshot, SerialNum configSerialNum,
const ProtonConfig::Index &indexCfg) const
{
(void) configSerialNum;
@@ -331,19 +316,14 @@ StoreOnlyDocSubDB::createInitializer(const DocumentDBConfig &configSnapshot,
(const_cast<StoreOnlyDocSubDB &>(*this),
_writeService.master());
auto dmsInitTask =
- createDocumentMetaStoreInitializer(configSnapshot.
- getTuneFileDocumentDBSP()->_attr,
- result->writableResult().
- writableDocumentMetaStore());
+ createDocumentMetaStoreInitializer(configSnapshot.getTuneFileDocumentDBSP()->_attr,
+ result->writableResult().writableDocumentMetaStore());
result->addDocumentMetaStoreInitTask(dmsInitTask);
auto summaryTask =
- createSummaryManagerInitializer(protonSummaryCfg,
- configSnapshot.
- getTuneFileDocumentDBSP()->_summary,
- result->result().documentMetaStore()->
- documentMetaStore(),
- result->writableResult().
- writableSummaryManager());
+ createSummaryManagerInitializer(configSnapshot.getStoreConfig(),
+ configSnapshot.getTuneFileDocumentDBSP()->_summary,
+ result->result().documentMetaStore()->documentMetaStore(),
+ result->writableResult().writableSummaryManager());
result->addDependency(summaryTask);
summaryTask->addDependency(dmsInitTask);
@@ -367,9 +347,7 @@ StoreOnlyDocSubDB::getFlushTargets()
IFlushTarget::List ret;
for (const auto &target : getFlushTargetsInternal()) {
ret.push_back(IFlushTarget::SP
- (new ThreadedFlushTarget(_writeService.master(),
- _getSerialNum,
- target, _subName)));
+ (new ThreadedFlushTarget(_writeService.master(), _getSerialNum, target, _subName)));
}
return ret;
}
@@ -400,12 +378,8 @@ StoreOnlyDocSubDB::getFeedViewPersistentParams()
{
SerialNum flushedDMSSN(_flushedDocumentMetaStoreSerialNum);
SerialNum flushedDSSN(_flushedDocumentStoreSerialNum);
- return StoreOnlyFeedView::PersistentParams(flushedDMSSN,
- flushedDSSN,
- _docTypeName,
- _metrics.feed,
- _subDbId,
- _subDbType);
+ return StoreOnlyFeedView::PersistentParams(flushedDMSSN, flushedDSSN, _docTypeName,
+ _metrics.feed, _subDbId, _subDbType);
}
void
@@ -460,23 +434,27 @@ StoreOnlyDocSubDB::updateLidReuseDelayer(const LidReuseDelayerConfig &config)
}
IReprocessingTask::List
-StoreOnlyDocSubDB::applyConfig(const DocumentDBConfig &newConfigSnapshot,
- const DocumentDBConfig &oldConfigSnapshot,
- SerialNum serialNum,
- const ReconfigParams &params,
- IDocumentDBReferenceResolver &resolver)
+StoreOnlyDocSubDB::applyConfig(const DocumentDBConfig &newConfigSnapshot, const DocumentDBConfig &oldConfigSnapshot,
+ SerialNum serialNum, const ReconfigParams &params, IDocumentDBReferenceResolver &resolver)
{
(void) oldConfigSnapshot;
(void) serialNum;
(void) params;
(void) resolver;
assert(_writeService.master().isCurrentThread());
+ reconfigure(newConfigSnapshot.getStoreConfig());
initFeedView(newConfigSnapshot);
updateLidReuseDelayer(&newConfigSnapshot);
_owner.syncFeedView();
return IReprocessingTask::List();
}
+void
+StoreOnlyDocSubDB::reconfigure(const search::LogDocumentStore::Config & config)
+{
+ _rSummaryMgr->reconfigure(config);
+}
+
proton::IAttributeManager::SP
StoreOnlyDocSubDB::getAttributeManager() const
{
@@ -579,8 +557,7 @@ StoreOnlyDocSubDB::tearDownReferences(IDocumentDBReferenceResolver &resolver)
void
StoreOnlySubDBFileHeaderContext::
-addTags(vespalib::GenericHeader &header,
- const vespalib::string &name) const
+addTags(vespalib::GenericHeader &header, const vespalib::string &name) const
{
_parentFileHeaderContext.addTags(header, name);
typedef GenericHeader::Tag Tag;
diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h
index d62a92d33a8..f5038c252a7 100644
--- a/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h
+++ b/searchcore/src/vespa/searchcore/proton/server/storeonlydocsubdb.h
@@ -98,13 +98,9 @@ public:
const uint32_t _subDbId;
const SubDbType _subDbType;
- Config(const DocTypeName &docTypeName,
- const vespalib::string &subName,
- const vespalib::string &baseDir,
- const search::GrowStrategy &attributeGrow,
- size_t attributeGrowNumDocs,
- uint32_t subDbId,
- SubDbType subDbType);
+ Config(const DocTypeName &docTypeName, const vespalib::string &subName,
+ const vespalib::string &baseDir, const search::GrowStrategy &attributeGrow,
+ size_t attributeGrowNumDocs, uint32_t subDbId, SubDbType subDbType);
~Config();
};
@@ -179,7 +175,7 @@ protected:
std::shared_ptr<IGidToLidChangeHandler> _gidToLidChangeHandler;
std::shared_ptr<initializer::InitializerTask>
- createSummaryManagerInitializer(const ProtonConfig::Summary protonSummaryCfg,
+ createSummaryManagerInitializer(const search::LogDocumentStore::Config & protonSummaryCfg,
const search::TuneFileSummary &tuneFile,
search::IBucketizer::SP bucketizer,
std::shared_ptr<SummaryManager::SP> result) const;
@@ -201,7 +197,7 @@ protected:
using LidReuseDelayerConfig = documentmetastore::LidReuseDelayerConfig;
virtual void updateLidReuseDelayer(const LidReuseDelayerConfig &config);
-
+ void reconfigure(const search::LogDocumentStore::Config & protonConfig);
public:
StoreOnlyDocSubDB(const Config &cfg, const Context &ctx);
~StoreOnlyDocSubDB();
@@ -210,35 +206,21 @@ public:
vespalib::string getName() const override { return _subName; }
std::unique_ptr<DocumentSubDbInitializer>
- createInitializer(const DocumentDBConfig &configSnapshot,
- SerialNum configSerialNum,
- const ProtonConfig::Summary &protonSummaryCfg,
+ createInitializer(const DocumentDBConfig &configSnapshot, SerialNum configSerialNum,
const ProtonConfig::Index &indexCfg) const override;
void setup(const DocumentSubDbInitializerResult &initResult) override;
void initViews(const DocumentDBConfig &configSnapshot, const std::shared_ptr<matching::SessionManager> &sessionManager) override;
IReprocessingTask::List
- applyConfig(const DocumentDBConfig &newConfigSnapshot,
- const DocumentDBConfig &oldConfigSnapshot,
- SerialNum serialNum,
- const ReconfigParams &params,
- IDocumentDBReferenceResolver &resolver) override;
+ applyConfig(const DocumentDBConfig &newConfigSnapshot, const DocumentDBConfig &oldConfigSnapshot,
+ SerialNum serialNum, const ReconfigParams &params, IDocumentDBReferenceResolver &resolver) override;
ISearchHandler::SP getSearchView() const override { return _iSearchView.get(); }
IFeedView::SP getFeedView() const override { return _iFeedView.get(); }
- void clearViews() override {
- _iFeedView.clear();
- _iSearchView.clear();
- }
-
- /**
- * Returns the summary manager that this database uses to manage
- * document summaries of the corresponding document type.
- *
- * @return The summary manager.
- */
+ void clearViews() override;
+
const ISummaryManager::SP &getSummaryManager() const override { return _iSummaryMgr; }
IAttributeManager::SP getAttributeManager() const override;
const std::shared_ptr<searchcorespi::IIndexManager> & getIndexManager() const override;
@@ -259,8 +241,8 @@ public:
IDocumentRetriever::UP getDocumentRetriever() override;
matching::MatchingStats getMatcherStats(const vespalib::string &rankProfile) const override;
void close() override;
- virtual std::shared_ptr<IDocumentDBReference> getDocumentDBReference() override;
- virtual void tearDownReferences(IDocumentDBReferenceResolver &resolver) override;
+ std::shared_ptr<IDocumentDBReference> getDocumentDBReference() override;
+ void tearDownReferences(IDocumentDBReferenceResolver &resolver) override;
};
-} // namespace proton
+}
diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp
index c35f942ca35..de3e1648085 100644
--- a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.cpp
@@ -8,13 +8,13 @@
#include "storeonlyfeedview.h"
#include "updatedonecontext.h"
#include "remove_batch_done_context.h"
-#include <vespa/document/datatype/documenttype.h>
#include <vespa/searchcore/proton/common/commit_time_tracker.h>
#include <vespa/searchcore/proton/common/feedtoken.h>
#include <vespa/searchcore/proton/documentmetastore/ilidreusedelayer.h>
#include <vespa/searchcore/proton/metrics/feed_metrics.h>
#include <vespa/searchcore/proton/reference/i_gid_to_lid_change_handler.h>
-#include <vespa/searchlib/common/scheduletaskcallback.h>
+#include <vespa/document/datatype/documenttype.h>
+#include <vespa/document/fieldvalue/document.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/log/log.h>
@@ -26,7 +26,7 @@ using document::DocumentId;
using document::DocumentTypeRepo;
using document::DocumentUpdate;
using search::index::Schema;
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
using search::IDestructorCallback;
using search::SerialNum;
using storage::spi::BucketInfoResult;
@@ -38,15 +38,11 @@ namespace proton {
namespace {
-bool shouldTrace(StoreOnlyFeedView::OnOperationDoneType onWriteDone, uint32_t traceLevel) {
- return onWriteDone && onWriteDone->shouldTrace(traceLevel);
-}
-
FeedToken::UP dupFeedToken(FeedToken *token)
{
- // If token is not NULL then a new feed token is created, referencing
+ // If token is not nullptr then a new feed token is created, referencing
// same shared state as old token.
- if (token != NULL) {
+ if (token != nullptr) {
return std::make_unique<FeedToken>(*token);
} else {
return FeedToken::UP();
@@ -207,7 +203,7 @@ StoreOnlyFeedView::StoreOnlyFeedView(const Context &ctx, const PersistentParams
_summaryAdapter(ctx._summaryAdapter),
_documentMetaStoreContext(ctx._documentMetaStoreContext),
_repo(ctx._repo),
- _docType(NULL),
+ _docType(nullptr),
_lidReuseDelayer(ctx._lidReuseDelayer),
_commitTimeTracker(ctx._commitTimeTracker),
_pendingLidTracker(),
@@ -311,7 +307,7 @@ StoreOnlyFeedView::internalPut(FeedToken::UP token, const PutOperation &putOp)
assert(!putOp.getValidDbdId(_params._subDbId));
internalRemove(std::move(token), serialNum, std::move(pendingNotifyRemoveDone), putOp.getPrevLid(), putOp.getType(), IDestructorCallback::SP());
}
- if (token.get() != NULL) {
+ if (token) {
token->ack(putOp.getType(), _params._metrics);
}
}
@@ -410,7 +406,7 @@ void StoreOnlyFeedView::heartBeatSummary(SerialNum serialNum) {
void
StoreOnlyFeedView::internalUpdate(FeedToken::UP token, const UpdateOperation &updOp) {
- if (updOp.getUpdate().get() == NULL) {
+ if ( ! updOp.getUpdate()) {
LOG(warning, "database(%s): ignoring invalid update operation",
_params._docTypeName.toString().c_str());
return;
@@ -469,11 +465,6 @@ StoreOnlyFeedView::internalUpdate(FeedToken::UP token, const UpdateOperation &up
});
#pragma GCC diagnostic pop
}
- if (!updateScope._indexedFields && onWriteDone) {
- if (onWriteDone->shouldTrace(1)) {
- token->trace(1, "Partial update applied.");
- }
- }
}
void
@@ -485,38 +476,28 @@ StoreOnlyFeedView::makeUpdatedDocument(SerialNum serialNum, Lid lid, DocumentUpd
const DocumentUpdate & upd = *update;
Document::UP newDoc;
vespalib::nbostream newStream(12345);
- assert(onWriteDone->getToken() == NULL || useDocumentStore(serialNum));
+ assert(onWriteDone->getToken() == nullptr || useDocumentStore(serialNum));
if (useDocumentStore(serialNum)) {
- assert(prevDoc.get() != NULL);
+ assert(prevDoc);
}
- if (prevDoc.get() == NULL) {
+ if (!prevDoc) {
// Replaying, document removed later before summary was flushed.
- assert(onWriteDone->getToken() == NULL);
+ assert(onWriteDone->getToken() == nullptr);
// If we've passed serial number for flushed index then we could
// also check that this operation is marked for ignore by index
// proxy.
} else {
if (upd.getId() == prevDoc->getId()) {
- if (shouldTrace(onWriteDone, 1)) {
- FeedToken *token = onWriteDone->getToken();
- token->trace(1, "The update looks like : " + upd.toString(token->shouldTrace(2)));
- }
+
newDoc = std::move(prevDoc);
if (useDocumentStore(serialNum)) {
- LOG(spam, "Original document :\n%s", newDoc->toXml(" ").c_str());
- LOG(spam, "Update\n%s", upd.toXml().c_str());
upd.applyTo(*newDoc);
- LOG(spam, "Updated document :\n%s", newDoc->toXml(" ").c_str());
newDoc->serialize(newStream);
- LOG(spam, "Serialized new document to a buffer of %zd bytes", newStream.size());
- if (shouldTrace(onWriteDone, 1)) {
- onWriteDone->getToken()->trace(1, "Then we update summary.");
- }
}
} else {
// Replaying, document removed and lid reused before summary
// was flushed.
- assert(onWriteDone->getToken() == NULL && !useDocumentStore(serialNum));
+ assert(onWriteDone->getToken() == nullptr && !useDocumentStore(serialNum));
}
}
promisedDoc.set_value(std::move(newDoc));
@@ -588,7 +569,7 @@ StoreOnlyFeedView::internalRemove(FeedToken::UP token, const RemoveOperation &rm
internalRemove(std::move(token), serialNum, std::move(pendingNotifyRemoveDone), rmOp.getPrevLid(), rmOp.getType(), IDestructorCallback::SP());
}
}
- if (token.get() != NULL) {
+ if (token) {
token->ack(rmOp.getType(), _params._metrics);
}
}
@@ -644,7 +625,7 @@ StoreOnlyFeedView::removeDocuments(const RemoveDocumentsOperation &op, bool remo
{
const SerialNum serialNum = op.getSerialNum();
const LidVectorContext::SP &ctx = op.getLidsToRemove(_params._subDbId);
- if (!ctx.get()) {
+ if (!ctx) {
if (useDocumentMetaStore(serialNum)) {
_metaStore.commit(serialNum, serialNum);
}
diff --git a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h
index fbc8888ac79..021c2b2f8f7 100644
--- a/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h
+++ b/searchcore/src/vespa/searchcore/proton/server/storeonlyfeedview.h
@@ -7,7 +7,6 @@
#include "isummaryadapter.h"
#include "replaypacketdispatcher.h"
#include "searchcontext.h"
-#include "tlcproxy.h"
#include "pendinglidtracker.h"
#include <vespa/searchcore/proton/common/doctypename.h>
#include <vespa/searchcore/proton/common/feeddebugger.h>
@@ -61,10 +60,13 @@ public:
using OnPutDoneType = const std::shared_ptr<PutDoneContext> &;
using OnRemoveDoneType = const std::shared_ptr<RemoveDoneContext> &;
using FeedTokenUP = std::unique_ptr<FeedToken>;
- using FutureDoc = std::future<Document::UP>;
- using PromisedDoc = std::promise<Document::UP>;
+ using FutureDoc = std::future<std::unique_ptr<Document>>;
+ using PromisedDoc = std::promise<std::unique_ptr<Document>>;
using FutureStream = std::future<vespalib::nbostream>;
using PromisedStream = std::promise<vespalib::nbostream>;
+ using DocumentSP = std::shared_ptr<Document>;
+ using DocumentUpdateSP = std::shared_ptr<DocumentUpdate>;
+
using Lid = search::DocumentIdT;
struct Context
@@ -157,7 +159,7 @@ private:
return _writeService.summary();
}
void putSummary(SerialNum serialNum, Lid lid, FutureStream doc, OnOperationDoneType onDone);
- void putSummary(SerialNum serialNum, Lid lid, Document::SP doc, OnOperationDoneType onDone);
+ void putSummary(SerialNum serialNum, Lid lid, DocumentSP doc, OnOperationDoneType onDone);
void removeSummary(SerialNum serialNum, Lid lid, OnWriteDoneType onDone);
void heartBeatSummary(SerialNum serialNum);
@@ -187,7 +189,7 @@ private:
// Ack token early if visibility delay is nonzero
void considerEarlyAck(FeedTokenUP &token, FeedOperation::Type opType);
- void makeUpdatedDocument(SerialNum serialNum, Lid lid, DocumentUpdate::SP upd,
+ void makeUpdatedDocument(SerialNum serialNum, Lid lid, DocumentUpdateSP upd,
OnOperationDoneType onWriteDone,PromisedDoc promisedDoc, PromisedStream promisedStream);
protected:
@@ -199,7 +201,7 @@ private:
virtual void putAttributes(SerialNum serialNum, Lid lid, const Document &doc,
bool immediateCommit, OnPutDoneType onWriteDone);
- virtual void putIndexedFields(SerialNum serialNum, Lid lid, const Document::SP &newDoc,
+ virtual void putIndexedFields(SerialNum serialNum, Lid lid, const DocumentSP &newDoc,
bool immediateCommit, OnOperationDoneType onWriteDone);
virtual UpdateScope getUpdateScope(const DocumentUpdate &upd);
diff --git a/searchcore/src/vespa/searchcore/proton/server/threading_service_config.cpp b/searchcore/src/vespa/searchcore/proton/server/threading_service_config.cpp
new file mode 100644
index 00000000000..6422df9cbd2
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/proton/server/threading_service_config.cpp
@@ -0,0 +1,43 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "threading_service_config.h"
+#include <vespa/searchcore/proton/common/hw_info.h>
+#include <cmath>
+
+namespace proton {
+
+using ProtonConfig = ThreadingServiceConfig::ProtonConfig;
+
+ThreadingServiceConfig::ThreadingServiceConfig(uint32_t indexingThreads_,
+ uint32_t defaultTaskLimit_,
+ uint32_t semiUnboundTaskLimit_)
+ : _indexingThreads(indexingThreads_),
+ _defaultTaskLimit(defaultTaskLimit_),
+ _semiUnboundTaskLimit(semiUnboundTaskLimit_)
+{
+}
+
+namespace {
+
+uint32_t
+calculateIndexingThreads(const ProtonConfig &cfg,
+ const HwInfo::Cpu &cpuInfo)
+{
+ double scaledCores = cpuInfo.cores() * cfg.feeding.concurrency;
+ uint32_t indexingThreads = std::max((uint32_t)std::ceil(scaledCores / 3), (uint32_t)cfg.indexing.threads);
+ return std::max(indexingThreads, 1u);
+}
+
+}
+
+ThreadingServiceConfig
+ThreadingServiceConfig::make(const ProtonConfig &cfg,
+ const HwInfo::Cpu &cpuInfo)
+{
+ uint32_t indexingThreads = calculateIndexingThreads(cfg, cpuInfo);
+ return ThreadingServiceConfig(indexingThreads,
+ cfg.indexing.tasklimit,
+ (cfg.indexing.semiunboundtasklimit / indexingThreads));
+}
+
+}
diff --git a/searchcore/src/vespa/searchcore/proton/server/threading_service_config.h b/searchcore/src/vespa/searchcore/proton/server/threading_service_config.h
new file mode 100644
index 00000000000..67ab4171e80
--- /dev/null
+++ b/searchcore/src/vespa/searchcore/proton/server/threading_service_config.h
@@ -0,0 +1,36 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+#pragma once
+
+#include <vespa/searchcore/config/config-proton.h>
+#include <vespa/searchcore/proton/common/hw_info.h>
+#include <cstdint>
+
+namespace proton {
+
+/**
+ * Config for the threading service used by a documentdb.
+ */
+class ThreadingServiceConfig {
+public:
+ using ProtonConfig = vespa::config::search::core::ProtonConfig;
+
+private:
+ uint32_t _indexingThreads;
+ uint32_t _defaultTaskLimit;
+ uint32_t _semiUnboundTaskLimit;
+
+private:
+ ThreadingServiceConfig(uint32_t indexingThreads_,
+ uint32_t defaultTaskLimit_,
+ uint32_t semiUnboundTaskLimit_);
+
+public:
+ static ThreadingServiceConfig make(const ProtonConfig &cfg,
+ const HwInfo::Cpu &cpuInfo);
+
+ uint32_t indexingThreads() const { return _indexingThreads; }
+ uint32_t defaultTaskLimit() const { return _defaultTaskLimit; }
+ uint32_t semiUnboundTaskLimit() const { return _semiUnboundTaskLimit; }
+};
+
+}
diff --git a/searchcore/src/vespa/searchcore/proton/server/tlcproxy.cpp b/searchcore/src/vespa/searchcore/proton/server/tlcproxy.cpp
index c377be9f73d..215650b6664 100644
--- a/searchcore/src/vespa/searchcore/proton/server/tlcproxy.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/tlcproxy.cpp
@@ -1,7 +1,7 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "tlcproxy.h"
-#include <vespa/vespalib/util/exceptions.h>
+#include <vespa/searchcore/proton/feedoperation/feedoperation.h>
#include <vespa/log/log.h>
LOG_SETUP(".proton.server.tlcproxy");
@@ -17,16 +17,8 @@ void TlcProxy::commit(search::SerialNum serialNum, search::transactionlog::Type
Packet packet;
packet.add(entry);
packet.close();
- if (_tlsDirectWriter != NULL) {
- _tlsDirectWriter->commit(_session.getDomain(), packet);
- } else {
- if (!_session.commit(vespalib::ConstBufferRef(packet.getHandle().c_str(), packet.getHandle().size()))) {
- throw vespalib::IllegalStateException(vespalib::make_string(
- "Failed to commit packet %" PRId64
- " to TLS (type = %d, size = %d).",
- entry.serial(), type, (uint32_t)buf.size()));
- }
- }
+ _tlsDirectWriter.commit(_domain, packet);
+
}
void
diff --git a/searchcore/src/vespa/searchcore/proton/server/tlcproxy.h b/searchcore/src/vespa/searchcore/proton/server/tlcproxy.h
index ccd870e18a1..8e4feb2f354 100644
--- a/searchcore/src/vespa/searchcore/proton/server/tlcproxy.h
+++ b/searchcore/src/vespa/searchcore/proton/server/tlcproxy.h
@@ -1,27 +1,22 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vespa/document/fieldvalue/document.h>
-#include <vespa/document/update/documentupdate.h>
-#include <vespa/searchcore/proton/feedoperation/feedoperation.h>
-#include <vespa/searchlib/query/base.h>
-#include <vespa/searchlib/common/serialnum.h>
-#include <vespa/searchlib/transactionlog/translogclient.h>
-#include "fileconfigmanager.h"
-#include <persistence/spi/types.h>
+#include <vespa/searchlib/transactionlog/common.h>
namespace proton {
+class FeedOperation;
+
class TlcProxy {
- search::transactionlog::TransLogClient::Session & _session;
- search::transactionlog::Writer * _tlsDirectWriter;
+ vespalib::string _domain;
+ search::transactionlog::Writer & _tlsDirectWriter;
- void commit( search::SerialNum serialNum, search::transactionlog::Type type, const vespalib::nbostream &buf);
+ void commit(search::SerialNum serialNum, search::transactionlog::Type type, const vespalib::nbostream &buf);
public:
typedef std::unique_ptr<TlcProxy> UP;
- TlcProxy(search::transactionlog::TransLogClient::Session &session, search::transactionlog::Writer * writer = NULL)
- : _session(session), _tlsDirectWriter(writer) {}
+ TlcProxy(const vespalib::string & domain, search::transactionlog::Writer & writer)
+ : _domain(domain), _tlsDirectWriter(writer) {}
void storeOperation(const FeedOperation &op);
};
diff --git a/searchcore/src/vespa/searchcore/proton/server/tlssyncer.cpp b/searchcore/src/vespa/searchcore/proton/server/tlssyncer.cpp
index 9afc818594f..4d104256cbd 100644
--- a/searchcore/src/vespa/searchcore/proton/server/tlssyncer.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/tlssyncer.cpp
@@ -3,11 +3,11 @@
#include "tlssyncer.h"
#include "igetserialnum.h"
#include <vespa/vespalib/util/threadexecutor.h>
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/searchlib/transactionlog/syncproxy.h>
#include <future>
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
using search::SerialNum;
namespace proton {
diff --git a/searchcore/src/vespa/searchcore/proton/server/tlswriter.h b/searchcore/src/vespa/searchcore/proton/server/tlswriter.h
index da55e3d8590..0956c0ae011 100644
--- a/searchcore/src/vespa/searchcore/proton/server/tlswriter.h
+++ b/searchcore/src/vespa/searchcore/proton/server/tlswriter.h
@@ -5,6 +5,7 @@
#include <vespa/searchlib/common/serialnum.h>
namespace proton {
+
class FeedOperation;
/**
@@ -15,10 +16,7 @@ struct TlsWriter {
virtual void storeOperation(const FeedOperation &op) = 0;
virtual bool erase(search::SerialNum oldest_to_keep) = 0;
-
- virtual search::SerialNum
- sync(search::SerialNum syncTo) = 0;
+ virtual search::SerialNum sync(search::SerialNum syncTo) = 0;
};
-} // namespace proton
-
+}
diff --git a/searchcore/src/vespa/searchcore/proton/server/transactionlogmanagerbase.cpp b/searchcore/src/vespa/searchcore/proton/server/transactionlogmanagerbase.cpp
index 62ea321efbb..95f31f141d7 100644
--- a/searchcore/src/vespa/searchcore/proton/server/transactionlogmanagerbase.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/transactionlogmanagerbase.cpp
@@ -23,10 +23,7 @@ TransactionLogManagerBase::TransactionLogManagerBase(
{
}
-TransactionLogManagerBase::~TransactionLogManagerBase()
-{
-}
-
+TransactionLogManagerBase::~TransactionLogManagerBase() = default;
TransactionLogManagerBase::StatusResult
TransactionLogManagerBase::init()
@@ -65,7 +62,6 @@ TransactionLogManagerBase::init()
return res;
}
-
void
TransactionLogManagerBase::internalStartReplay()
{
@@ -77,7 +73,6 @@ TransactionLogManagerBase::internalStartReplay()
_replayStartTime = timer.MilliSecs();
}
-
void
TransactionLogManagerBase::markReplayStarted()
{
@@ -85,7 +80,6 @@ TransactionLogManagerBase::markReplayStarted()
_replayStarted = true;
}
-
void TransactionLogManagerBase::changeReplayDone()
{
vespalib::MonitorGuard guard(_replayMonitor);
@@ -93,7 +87,6 @@ void TransactionLogManagerBase::changeReplayDone()
guard.broadcast();
}
-
void
TransactionLogManagerBase::waitForReplayDone() const
{
@@ -103,7 +96,6 @@ TransactionLogManagerBase::waitForReplayDone() const
}
}
-
void
TransactionLogManagerBase::close()
{
@@ -117,11 +109,6 @@ TransactionLogManagerBase::close()
}
}
-TransLogClient::Subscriber::UP TransactionLogManagerBase::createTlcSubscriber(
- TransLogClient::Session::Callback &callback) {
- return _tlc.createSubscriber(_domainName, callback);
-}
-
TransLogClient::Visitor::UP TransactionLogManagerBase::createTlcVisitor(
TransLogClient::Session::Callback &callback) {
return _tlc.createVisitor(_domainName, callback);
diff --git a/searchcore/src/vespa/searchcore/proton/server/transactionlogmanagerbase.h b/searchcore/src/vespa/searchcore/proton/server/transactionlogmanagerbase.h
index ae2a8356016..1b109d8d9e1 100644
--- a/searchcore/src/vespa/searchcore/proton/server/transactionlogmanagerbase.h
+++ b/searchcore/src/vespa/searchcore/proton/server/transactionlogmanagerbase.h
@@ -51,10 +51,7 @@ public:
void changeReplayDone();
void close();
- TransLogClient::Subscriber::UP createTlcSubscriber(
- TransLogClient::Session::Callback &callback);
- TransLogClient::Visitor::UP createTlcVisitor(
- TransLogClient::Session::Callback &callback);
+ TransLogClient::Visitor::UP createTlcVisitor(TransLogClient::Session::Callback &callback);
void waitForReplayDone() const;
@@ -64,8 +61,7 @@ public:
bool getReplayDone() const;
bool isDoingReplay() const;
void logReplayComplete() const;
- const vespalib::string &getRpcTarget() const
- { return _tlc.getRPCTarget(); }
+ const vespalib::string &getRpcTarget() const { return _tlc.getRPCTarget(); }
void
markReplayStarted();
diff --git a/searchcore/src/vespa/searchcore/proton/test/CMakeLists.txt b/searchcore/src/vespa/searchcore/proton/test/CMakeLists.txt
index e1fb848b1d4..10fd4b9c518 100644
--- a/searchcore/src/vespa/searchcore/proton/test/CMakeLists.txt
+++ b/searchcore/src/vespa/searchcore/proton/test/CMakeLists.txt
@@ -1,6 +1,7 @@
# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_library(searchcore_test STATIC
SOURCES
+ bucketfactory.cpp
buckethandler.cpp
clusterstatehandler.cpp
documentdb_config_builder.cpp
diff --git a/searchcore/src/vespa/searchcore/proton/common/bucketfactory.cpp b/searchcore/src/vespa/searchcore/proton/test/bucketfactory.cpp
index b80332d97a9..37c0a965f48 100644
--- a/searchcore/src/vespa/searchcore/proton/common/bucketfactory.cpp
+++ b/searchcore/src/vespa/searchcore/proton/test/bucketfactory.cpp
@@ -1,11 +1,13 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "bucketfactory.h"
+#include <vespa/persistence/spi/test.h>
using document::BucketId;
using document::DocumentId;
using storage::spi::Bucket;
using storage::spi::PartitionId;
+using storage::spi::test::makeBucket;
namespace proton {
@@ -21,7 +23,7 @@ BucketFactory::getBucketId(const DocumentId &docId)
Bucket
BucketFactory::getBucket(const DocumentId &docId)
{
- return Bucket(getBucketId(docId), PartitionId(0));
+ return makeBucket(getBucketId(docId));
}
} // namespace proton
diff --git a/searchcore/src/vespa/searchcore/proton/common/bucketfactory.h b/searchcore/src/vespa/searchcore/proton/test/bucketfactory.h
index 0d8f33b436e..0d8f33b436e 100644
--- a/searchcore/src/vespa/searchcore/proton/common/bucketfactory.h
+++ b/searchcore/src/vespa/searchcore/proton/test/bucketfactory.h
diff --git a/searchcore/src/vespa/searchcore/proton/test/bucketstatecalculator.h b/searchcore/src/vespa/searchcore/proton/test/bucketstatecalculator.h
index 314c375ec2c..8a5a46946d8 100644
--- a/searchcore/src/vespa/searchcore/proton/test/bucketstatecalculator.h
+++ b/searchcore/src/vespa/searchcore/proton/test/bucketstatecalculator.h
@@ -3,6 +3,7 @@
#include <vespa/searchcore/proton/server/ibucketstatecalculator.h>
#include <vespa/document/bucket/bucketidlist.h>
+#include <vespa/document/bucket/bucket.h>
namespace proton::test {
@@ -56,9 +57,9 @@ public:
void resetAsked() { _asked.clear(); }
// Implements IBucketStateCalculator
- bool shouldBeReady(const document::BucketId &bucket) const override {
- _asked.push_back(bucket);
- return _ready.count(bucket) == 1;
+ bool shouldBeReady(const document::Bucket &bucket) const override {
+ _asked.push_back(bucket.getBucketId());
+ return _ready.count(bucket.getBucketId()) == 1;
}
bool clusterUp() const override { return _clusterUp; }
diff --git a/searchcore/src/vespa/searchcore/proton/test/documentdb_config_builder.cpp b/searchcore/src/vespa/searchcore/proton/test/documentdb_config_builder.cpp
index 22b9d828cac..e085d1c52f8 100644
--- a/searchcore/src/vespa/searchcore/proton/test/documentdb_config_builder.cpp
+++ b/searchcore/src/vespa/searchcore/proton/test/documentdb_config_builder.cpp
@@ -21,8 +21,7 @@ using vespa::config::search::SummarymapConfig;
using vespa::config::search::summary::JuniperrcConfig;
using vespa::config::search::ImportedFieldsConfig;
-namespace proton {
-namespace test {
+namespace proton::test {
DocumentDBConfigBuilder::DocumentDBConfigBuilder(int64_t generation,
const search::index::Schema::SP &schema,
@@ -42,6 +41,7 @@ DocumentDBConfigBuilder::DocumentDBConfigBuilder(int64_t generation,
_tuneFileDocumentDB(std::make_shared<TuneFileDocumentDB>()),
_schema(schema),
_maintenance(std::make_shared<DocumentDBMaintenanceConfig>()),
+ _store(),
_configId(configId),
_docTypeName(docTypeName),
_extraConfig()
@@ -64,6 +64,7 @@ DocumentDBConfigBuilder::DocumentDBConfigBuilder(const DocumentDBConfig &cfg)
_tuneFileDocumentDB(cfg.getTuneFileDocumentDBSP()),
_schema(cfg.getSchemaSP()),
_maintenance(cfg.getMaintenanceConfigSP()),
+ _store(cfg.getStoreConfig()),
_configId(cfg.getConfigId()),
_docTypeName(cfg.getDocTypeName()),
_extraConfig(cfg.getExtraConfigs())
@@ -90,10 +91,10 @@ DocumentDBConfigBuilder::build()
_tuneFileDocumentDB,
_schema,
_maintenance,
+ _store,
_configId,
_docTypeName,
_extraConfig);
}
}
-}
diff --git a/searchcore/src/vespa/searchcore/proton/test/documentdb_config_builder.h b/searchcore/src/vespa/searchcore/proton/test/documentdb_config_builder.h
index cf20cf08481..87369dab123 100644
--- a/searchcore/src/vespa/searchcore/proton/test/documentdb_config_builder.h
+++ b/searchcore/src/vespa/searchcore/proton/test/documentdb_config_builder.h
@@ -4,8 +4,7 @@
#include <vespa/searchcore/proton/server/documentdbconfig.h>
-namespace proton {
-namespace test {
+namespace proton::test {
/**
* Builder for instances of DocumentDBConfig used in unit tests.
@@ -26,6 +25,7 @@ private:
search::TuneFileDocumentDB::SP _tuneFileDocumentDB;
search::index::Schema::SP _schema;
DocumentDBConfig::MaintenanceConfigSP _maintenance;
+ search::LogDocumentStore::Config _store;
vespalib::string _configId;
vespalib::string _docTypeName;
config::ConfigSnapshot _extraConfig;
@@ -67,4 +67,3 @@ public:
};
}
-}
diff --git a/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h b/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h
index 4049b9c8c0a..6620795483d 100644
--- a/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h
+++ b/searchcore/src/vespa/searchcore/proton/test/dummy_document_sub_db.h
@@ -15,9 +15,7 @@
#include <vespa/searchcore/proton/persistenceengine/i_document_retriever.h>
#include <vespa/searchcore/proton/server/reconfig_params.h>
-namespace proton {
-
-namespace test {
+namespace proton::test {
struct DummyDocumentSubDb : public IDocumentSubDB
{
@@ -45,19 +43,13 @@ struct DummyDocumentSubDb : public IDocumentSubDB
uint32_t getSubDbId() const override { return _subDbId; }
vespalib::string getName() const override { return "dummysubdb"; }
DocumentSubDbInitializer::UP
- createInitializer(const DocumentDBConfig &,
- SerialNum,
- const vespa::config::search::core::ProtonConfig::
- Summary &,
- const vespa::config::search::core::
- ProtonConfig::Index &) const override {
+ createInitializer(const DocumentDBConfig &, SerialNum,
+ const vespa::config::search::core::ProtonConfig::Index &) const override {
return std::make_unique<DocumentSubDbInitializer>
- (const_cast<DummyDocumentSubDb &>(*this),
- _writeService->master());
+ (const_cast<DummyDocumentSubDb &>(*this), _writeService->master());
}
void setup(const DocumentSubDbInitializerResult &) override {}
- void initViews(const DocumentDBConfig &,
- const proton::matching::SessionManager::SP &) override {}
+ void initViews(const DocumentDBConfig &, const proton::matching::SessionManager::SP &) override {}
IReprocessingTask::List applyConfig(const DocumentDBConfig &, const DocumentDBConfig &,
SerialNum, const ReconfigParams &, IDocumentDBReferenceResolver &) override
{
@@ -93,13 +85,10 @@ struct DummyDocumentSubDb : public IDocumentSubDB
matching::MatchingStats getMatcherStats(const vespalib::string &) const override {
return matching::MatchingStats();
}
- virtual std::shared_ptr<IDocumentDBReference> getDocumentDBReference() override {
+ std::shared_ptr<IDocumentDBReference> getDocumentDBReference() override {
return std::shared_ptr<IDocumentDBReference>();
}
- virtual void tearDownReferences(IDocumentDBReferenceResolver &) override { }
+ void tearDownReferences(IDocumentDBReferenceResolver &) override { }
};
-} // namespace test
-
-} // namespace proton
-
+}
diff --git a/searchcorespi/src/vespa/searchcorespi/index/i_thread_service.h b/searchcorespi/src/vespa/searchcorespi/index/i_thread_service.h
index eefd14f7b01..40d92010c9b 100644
--- a/searchcorespi/src/vespa/searchcorespi/index/i_thread_service.h
+++ b/searchcorespi/src/vespa/searchcorespi/index/i_thread_service.h
@@ -4,8 +4,7 @@
#include <vespa/vespalib/util/runnable.h>
#include <vespa/vespalib/util/threadexecutor.h>
-namespace searchcorespi {
-namespace index {
+namespace searchcorespi::index {
/**
* Interface for a single thread used for write tasks.
@@ -29,7 +28,4 @@ struct IThreadService : public vespalib::ThreadExecutor
};
-} // namespace index
-} // namespace searchcorespi
-
-
+}
diff --git a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp
index a2786c90e95..d18ff417074 100644
--- a/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp
+++ b/searchcorespi/src/vespa/searchcorespi/index/indexmaintainer.cpp
@@ -13,7 +13,7 @@
#include <vespa/searchlib/util/filekit.h>
#include <vespa/vespalib/util/autoclosurecaller.h>
#include <vespa/vespalib/util/closuretask.h>
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <sstream>
#include <vespa/searchcorespi/flush/closureflushtask.h>
#include <vespa/vespalib/util/exceptions.h>
@@ -31,7 +31,7 @@ using search::common::FileHeaderContext;
using search::queryeval::ISourceSelector;
using search::queryeval::Source;
using search::SerialNum;
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
using std::ostringstream;
using vespalib::makeClosure;
using vespalib::makeTask;
diff --git a/searchcorespi/src/vespa/searchcorespi/index/ithreadingservice.h b/searchcorespi/src/vespa/searchcorespi/index/ithreadingservice.h
index 65dd8cc1f3f..bded09143ab 100644
--- a/searchcorespi/src/vespa/searchcorespi/index/ithreadingservice.h
+++ b/searchcorespi/src/vespa/searchcorespi/index/ithreadingservice.h
@@ -7,8 +7,7 @@
#include <vespa/vespalib/util/syncable.h>
#include <vespa/searchlib/common/isequencedtaskexecutor.h>
-namespace searchcorespi {
-namespace index {
+namespace searchcorespi::index {
/**
* Interface for the thread model used for write tasks.
@@ -62,5 +61,4 @@ struct IThreadingService : public vespalib::Syncable
virtual search::ISequencedTaskExecutor &attributeFieldWriter() = 0;
};
-} // namespace index
-} // namespace searchcorespi
+}
diff --git a/searchlib/CMakeLists.txt b/searchlib/CMakeLists.txt
index 787ca6ed008..d77ec346cef 100644
--- a/searchlib/CMakeLists.txt
+++ b/searchlib/CMakeLists.txt
@@ -225,3 +225,10 @@ vespa_define_module(
src/tests/util/statefile
src/tests/vespa-fileheader-inspect
)
+
+install_java_artifact(searchlib)
+install_fat_java_artifact(searchlib)
+
+vespa_install_script(src/main/sh/vespa-gbdt-converter bin)
+vespa_install_script(src/main/sh/vespa-treenet-converter bin)
+
diff --git a/searchlib/src/apps/docstore/benchmarkdatastore.cpp b/searchlib/src/apps/docstore/benchmarkdatastore.cpp
index 3dcfbbb31cc..620a139d451 100644
--- a/searchlib/src/apps/docstore/benchmarkdatastore.cpp
+++ b/searchlib/src/apps/docstore/benchmarkdatastore.cpp
@@ -99,7 +99,7 @@ BenchmarkDataStoreApp::benchmark(const vespalib::string & dir, size_t numReads,
tuning._randRead.setWantMemoryMap();
}
search::index::DummyFileHeaderContext fileHeaderContext;
- vespalib::ThreadStackExecutor executor(config.getNumThreads(), 128*1024);
+ vespalib::ThreadStackExecutor executor(1, 128*1024);
transactionlog::NoSyncProxy noTlSyncer;
LogDataStore store(executor, dir, config, growStrategy, tuning,
fileHeaderContext,
diff --git a/searchlib/src/apps/docstore/documentstoreinspect.cpp b/searchlib/src/apps/docstore/documentstoreinspect.cpp
index b7f202f1209..40f603c3da1 100644
--- a/searchlib/src/apps/docstore/documentstoreinspect.cpp
+++ b/searchlib/src/apps/docstore/documentstoreinspect.cpp
@@ -105,7 +105,7 @@ DocumentStoreInspectApp::verify(const vespalib::string & dir)
GrowStrategy growStrategy;
TuneFileSummary tuning;
search::index::DummyFileHeaderContext fileHeaderContext;
- vespalib::ThreadStackExecutor executor(config.getNumThreads(), 128*1024);
+ vespalib::ThreadStackExecutor executor(1, 128*1024);
transactionlog::NoSyncProxy noTlSyncer;
LogDataStore store(executor, dir, config, growStrategy, tuning,
diff --git a/searchlib/src/apps/docstore/verifylogdatastore.cpp b/searchlib/src/apps/docstore/verifylogdatastore.cpp
index 1fb83248b5b..498516882c1 100644
--- a/searchlib/src/apps/docstore/verifylogdatastore.cpp
+++ b/searchlib/src/apps/docstore/verifylogdatastore.cpp
@@ -49,7 +49,7 @@ VerifyLogDataStoreApp::verify(const vespalib::string & dir)
GrowStrategy growStrategy;
TuneFileSummary tuning;
search::index::DummyFileHeaderContext fileHeaderContext;
- vespalib::ThreadStackExecutor executor(config.getNumThreads(), 128*1024);
+ vespalib::ThreadStackExecutor executor(1, 128*1024);
transactionlog::NoSyncProxy noTlSyncer;
try {
diff --git a/searchlib/src/apps/tests/biglogtest.cpp b/searchlib/src/apps/tests/biglogtest.cpp
index b5eb76d40a7..788fb855c7f 100644
--- a/searchlib/src/apps/tests/biglogtest.cpp
+++ b/searchlib/src/apps/tests/biglogtest.cpp
@@ -134,7 +134,7 @@ factory<LogDataStore>::factory(std::string dir)
: DioTune(),
_fileHeaderContext(),
_config(),
- _executor(_config.getNumThreads(), 128*1024),
+ _executor(1, 128*1024),
_noTlSyncer(),
_datastore(_executor, dir, _config, GrowStrategy(), tuning, _fileHeaderContext, _noTlSyncer, NULL)
{}
diff --git a/searchlib/src/apps/tests/btreestress_test.cpp b/searchlib/src/apps/tests/btreestress_test.cpp
index 94f2db165d3..ca92ad4865b 100644
--- a/searchlib/src/apps/tests/btreestress_test.cpp
+++ b/searchlib/src/apps/tests/btreestress_test.cpp
@@ -1,10 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/log/log.h>
-LOG_SETUP("btreestress_test");
+
#include <vespa/vespalib/testkit/test_kit.h>
-#include <string>
-#include <set>
-#include <iostream>
#include <vespa/searchlib/btree/btreeroot.h>
#include <vespa/searchlib/btree/btreebuilder.h>
#include <vespa/searchlib/btree/btreenodeallocator.h>
@@ -22,16 +18,17 @@ LOG_SETUP("btreestress_test");
#include <vespa/searchlib/btree/btreestore.hpp>
#include <vespa/searchlib/btree/btreeaggregator.hpp>
-
#include <vespa/vespalib/util/threadstackexecutor.h>
-#include <vespa/searchlib/common/lambdatask.h>
-#include <vespa/searchlib/util/rand48.h>
+#include <vespa/vespalib/util/lambdatask.h>
+
+#include <vespa/log/log.h>
+LOG_SETUP("btreestress_test");
using MyTree = search::btree::BTree<uint32_t, uint32_t>;
using MyTreeIterator = typename MyTree::Iterator;
using MyTreeConstIterator = typename MyTree::ConstIterator;
using GenerationHandler = vespalib::GenerationHandler;
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
struct Fixture
{
diff --git a/searchlib/src/apps/tests/memoryindexstress_test.cpp b/searchlib/src/apps/tests/memoryindexstress_test.cpp
index 837952061c8..edb9160a1fb 100644
--- a/searchlib/src/apps/tests/memoryindexstress_test.cpp
+++ b/searchlib/src/apps/tests/memoryindexstress_test.cpp
@@ -16,9 +16,6 @@
#include <vespa/searchlib/common/scheduletaskcallback.h>
#include <vespa/vespalib/util/threadstackexecutor.h>
#include <vespa/document/repo/configbuilder.h>
-#include <vespa/document/datatype/annotationtype.h>
-#include <vespa/document/annotation/annotation.h>
-#include <vespa/document/annotation/span.h>
#include <vespa/document/annotation/spanlist.h>
#include <vespa/document/annotation/spantree.h>
#include <vespa/searchlib/util/rand48.h>
@@ -37,7 +34,7 @@ using document::SpanList;
using document::StringFieldValue;
using search::ScheduleTaskCallback;
using search::index::schema::DataType;
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
using search::query::Node;
using search::query::SimplePhrase;
using search::query::SimpleStringTerm;
diff --git a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/RankingExpression.java b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/RankingExpression.java
index 188ff29d629..6e79877a657 100755
--- a/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/RankingExpression.java
+++ b/searchlib/src/main/java/com/yahoo/searchlib/rankingexpression/RankingExpression.java
@@ -257,4 +257,18 @@ public class RankingExpression implements Serializable {
return root.evaluate(context);
}
+ /**
+ * Creates a ranking expression from a string
+ *
+ * @throws IllegalArgumentException if the string is not a valid ranking expression
+ */
+ public static RankingExpression from(String expression) {
+ try {
+ return new RankingExpression(expression);
+ }
+ catch (ParseException e) {
+ throw new IllegalStateException(e);
+ }
+ }
+
}
diff --git a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/RankingExpressionTestCase.java b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/RankingExpressionTestCase.java
index c0157b3c8b6..fe6ac76f32f 100755
--- a/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/RankingExpressionTestCase.java
+++ b/searchlib/src/test/java/com/yahoo/searchlib/rankingexpression/RankingExpressionTestCase.java
@@ -1,11 +1,20 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.searchlib.rankingexpression;
+import com.yahoo.searchlib.rankingexpression.evaluation.DoubleValue;
import com.yahoo.searchlib.rankingexpression.parser.ParseException;
+import com.yahoo.searchlib.rankingexpression.rule.ArithmeticNode;
+import com.yahoo.searchlib.rankingexpression.rule.ArithmeticOperator;
import com.yahoo.searchlib.rankingexpression.rule.CompositeNode;
+import com.yahoo.searchlib.rankingexpression.rule.ConstantNode;
+import com.yahoo.searchlib.rankingexpression.rule.Function;
import com.yahoo.searchlib.rankingexpression.rule.IfNode;
import com.yahoo.searchlib.rankingexpression.rule.ExpressionNode;
import com.yahoo.searchlib.rankingexpression.rule.FunctionNode;
+import com.yahoo.searchlib.rankingexpression.rule.ReferenceNode;
+import com.yahoo.searchlib.rankingexpression.rule.TensorFunctionNode;
+import com.yahoo.tensor.functions.Reduce;
+import com.yahoo.tensor.functions.TensorFunction;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -50,6 +59,18 @@ public class RankingExpressionTestCase {
assertParse("query(var1) + query(var2) - query(var3) * (query(var4) / query(var5))", " $var1 + $var2 - $var3 *($var4 / $var5)");
assertParse("if (if (f1.out < query(p1), 0, 1) < if (f2.out < query(p2), 0, 1), f3.out, query(p3))", "if(if(f1.out<$p1,0,1)<if(f2.out<$p2,0,1),f3.out,$p3)");
}
+
+ @Test
+ public void testProgrammaticBuilding() throws ParseException {
+ ReferenceNode input = new ReferenceNode("input");
+ ReferenceNode constant = new ReferenceNode("constant");
+ ArithmeticNode product = new ArithmeticNode(input, ArithmeticOperator.MULTIPLY, constant);
+ Reduce sum = new Reduce(new TensorFunctionNode.TensorFunctionExpressionNode(product), Reduce.Aggregator.sum);
+ RankingExpression expression = new RankingExpression(new TensorFunctionNode(sum));
+
+ RankingExpression expected = new RankingExpression("sum(input * constant)");
+ assertEquals(expected.toString(), expression.toString());
+ }
@Test
public void testLookaheadIndefinitely() throws Exception {
diff --git a/searchlib/src/tests/diskindex/bitvector/bitvector_test.cpp b/searchlib/src/tests/diskindex/bitvector/bitvector_test.cpp
index 223f224aba5..dc352f70706 100644
--- a/searchlib/src/tests/diskindex/bitvector/bitvector_test.cpp
+++ b/searchlib/src/tests/diskindex/bitvector/bitvector_test.cpp
@@ -47,8 +47,7 @@ FieldWriterWrapper::open(const std::string &path,
const common::FileHeaderContext &fileHeaderContext)
{
vespalib::mkdir(path, false);
- _writer.earlyOpen(path, 64, 10000, false, schema, indexId, tuneFileWrite);
- return _writer.lateOpen(tuneFileWrite, fileHeaderContext);
+ return _writer.open(path, 64, 10000, false, schema, indexId, tuneFileWrite, fileHeaderContext);
}
FieldWriterWrapper &
diff --git a/searchlib/src/tests/diskindex/fieldwriter/fieldwriter_test.cpp b/searchlib/src/tests/diskindex/fieldwriter/fieldwriter_test.cpp
index 72a50d87821..71467519dbd 100644
--- a/searchlib/src/tests/diskindex/fieldwriter/fieldwriter_test.cpp
+++ b/searchlib/src/tests/diskindex/fieldwriter/fieldwriter_test.cpp
@@ -9,10 +9,8 @@
#include <vespa/searchlib/index/postinglisthandle.h>
#include <vespa/searchlib/diskindex/zcposocc.h>
#include <vespa/searchlib/diskindex/zcposoccrandread.h>
-#include <vespa/searchlib/diskindex/checkpointfile.h>
#include <vespa/searchlib/index/dummyfileheadercontext.h>
#include <vespa/searchlib/index/schemautil.h>
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/searchlib/diskindex/fieldwriter.h>
#include <vespa/searchlib/diskindex/fieldreader.h>
#include <vespa/vespalib/io/fileutil.h>
@@ -29,7 +27,6 @@ using search::TuneFileRandRead;
using search::TuneFileSeqRead;
using search::TuneFileSeqWrite;
using search::common::FileHeaderContext;
-using search::diskindex::CheckPointFile;
using search::diskindex::DocIdMapping;
using search::diskindex::FieldReader;
using search::diskindex::FieldWriter;
@@ -48,7 +45,6 @@ using search::index::SchemaUtil;
using search::index::schema::CollectionType;
using search::index::schema::DataType;
using search::queryeval::SearchIterator;
-using vespalib::nbostream;
using namespace search::index;
@@ -91,9 +87,6 @@ makeWordString(uint64_t wordNum)
}
-typedef std::shared_ptr<FieldReader> FieldReaderSP;
-typedef std::shared_ptr<FieldWriter> FieldWriterSP;
-
class FieldWriterTest : public FastOS_Application
{
private:
@@ -144,10 +137,10 @@ FieldWriterTest::~FieldWriterTest()
}
-class WrappedFieldWriter : public search::fakedata::CheckPointCallback
+class WrappedFieldWriter
{
public:
- FieldWriterSP _fieldWriter;
+ std::unique_ptr<FieldWriter> _fieldWriter;
private:
bool _dynamicK;
uint32_t _numWordIds;
@@ -164,13 +157,8 @@ public:
uint32_t docIdLimit);
~WrappedFieldWriter();
- void checkPoint() override;
- void earlyOpen();
- void lateOpen();
void open();
void close();
- void writeCheckPoint();
- void readCheckPoint(bool first);
};
WrappedFieldWriter::~WrappedFieldWriter() {}
@@ -194,32 +182,16 @@ WrappedFieldWriter::WrappedFieldWriter(const vespalib::string &namepref,
void
-WrappedFieldWriter::earlyOpen()
-{
- TuneFileSeqWrite tuneFileWrite;
- _fieldWriter.reset(new FieldWriter(_docIdLimit, _numWordIds));
- _fieldWriter->earlyOpen(_namepref,
- minSkipDocs, minChunkDocs, _dynamicK, _schema,
- _indexId,
- tuneFileWrite);
-}
-
-
-void
-WrappedFieldWriter::lateOpen()
+WrappedFieldWriter::open()
{
TuneFileSeqWrite tuneFileWrite;
DummyFileHeaderContext fileHeaderContext;
fileHeaderContext.disableFileName();
- _fieldWriter->lateOpen(tuneFileWrite, fileHeaderContext);
-}
-
-
-void
-WrappedFieldWriter::open()
-{
- earlyOpen();
- lateOpen();
+ _fieldWriter = std::make_unique<FieldWriter>(_docIdLimit, _numWordIds);
+ _fieldWriter->open(_namepref,
+ minSkipDocs, minChunkDocs, _dynamicK, _schema,
+ _indexId,
+ tuneFileWrite, fileHeaderContext);
}
@@ -231,46 +203,10 @@ WrappedFieldWriter::close()
}
-void
-WrappedFieldWriter::writeCheckPoint()
-{
- CheckPointFile chkptfile("chkpt");
- nbostream out;
- _fieldWriter->checkPointWrite(out);
- chkptfile.write(out, DummyFileHeaderContext());
-}
-
-
-void
-WrappedFieldWriter::readCheckPoint(bool first)
-{
- CheckPointFile chkptfile("chkpt");
- nbostream in;
- bool openRes = chkptfile.read(in);
- assert(first || openRes);
- (void) first;
- if (!openRes)
- return;
- _fieldWriter->checkPointRead(in);
- assert(in.empty());
-}
-
-
-void
-WrappedFieldWriter::checkPoint()
-{
- writeCheckPoint();
- _fieldWriter.reset();
- earlyOpen();
- readCheckPoint(false);
- lateOpen();
-}
-
-
-class WrappedFieldReader : public search::fakedata::CheckPointCallback
+class WrappedFieldReader
{
public:
- FieldReaderSP _fieldReader;
+ std::unique_ptr<FieldReader> _fieldReader;
private:
std::string _namepref;
uint32_t _numWordIds;
@@ -286,21 +222,15 @@ public:
uint32_t docIdLimit);
~WrappedFieldReader();
- void earlyOpen();
- void lateOpen();
void open();
void close();
- void writeCheckPoint();
- void readCheckPoint(bool first);
- virtual void checkPoint() override;
};
WrappedFieldReader::WrappedFieldReader(const vespalib::string &namepref,
uint32_t numWordIds,
uint32_t docIdLimit)
- : search::fakedata::CheckPointCallback(),
- _fieldReader(),
+ : _fieldReader(),
_namepref(dirprefix + namepref),
_numWordIds(numWordIds),
_docIdLimit(docIdLimit),
@@ -323,35 +253,17 @@ WrappedFieldReader::~WrappedFieldReader()
{
}
-
-void
-WrappedFieldReader::earlyOpen()
-{
- TuneFileSeqRead tuneFileRead;
- _fieldReader.reset(new FieldReader());
- _fieldReader->earlyOpen(_namepref, tuneFileRead);
-}
-
-
void
-WrappedFieldReader::lateOpen()
+WrappedFieldReader::open()
{
TuneFileSeqRead tuneFileRead;
_wmap.setup(_numWordIds);
_dmap.setup(_docIdLimit);
+ _fieldReader = std::make_unique<FieldReader>();
_fieldReader->setup(_wmap, _dmap);
- _fieldReader->lateOpen(_namepref, tuneFileRead);
-}
-
-
-void
-WrappedFieldReader::open()
-{
- earlyOpen();
- lateOpen();
+ _fieldReader->open(_namepref, tuneFileRead);
}
-
void
WrappedFieldReader::close()
{
@@ -361,42 +273,6 @@ WrappedFieldReader::close()
void
-WrappedFieldReader::writeCheckPoint()
-{
- CheckPointFile chkptfile("chkpt");
- nbostream out;
- _fieldReader->checkPointWrite(out);
- chkptfile.write(out, DummyFileHeaderContext());
-}
-
-
-void
-WrappedFieldReader::readCheckPoint(bool first)
-{
- CheckPointFile chkptfile("chkpt");
- nbostream in;
- bool openRes = chkptfile.read(in);
- assert(first || openRes);
- (void) first;
- if (!openRes)
- return;
- _fieldReader->checkPointRead(in);
- assert(in.empty());
-}
-
-
-void
-WrappedFieldReader::checkPoint()
-{
- writeCheckPoint();
- _fieldReader.reset();
- earlyOpen();
- readCheckPoint(false);
- lateOpen();
-}
-
-
-void
writeField(FakeWordSet &wordSet,
uint32_t docIdLimit,
const std::string &namepref,
@@ -422,16 +298,11 @@ writeField(FakeWordSet &wordSet,
ostate.open();
unsigned int wordNum = 1;
- uint32_t checkPointCheck = 0;
- uint32_t checkPointInterval = 12227;
for (unsigned int wc = 0; wc < wordSet._words.size(); ++wc) {
for (unsigned int wi = 0; wi < wordSet._words[wc].size(); ++wi) {
FakeWord &fw = *wordSet._words[wc][wi];
ostate._fieldWriter->newWord(makeWordString(wordNum));
- fw.dump(ostate._fieldWriter, false,
- checkPointCheck,
- checkPointInterval,
- NULL);
+ fw.dump(*ostate._fieldWriter, false);
++wordNum;
}
}
@@ -450,74 +321,6 @@ writeField(FakeWordSet &wordSet,
void
-writeFieldCheckPointed(FakeWordSet &wordSet,
- uint32_t docIdLimit,
- const std::string &namepref,
- bool dynamicK)
-{
- const char *dynamicKStr = dynamicK ? "true" : "false";
-
- FastOS_Time tv;
- double before;
- double after;
- bool first = true;
-
- LOG(info,
- "enter writeFieldCheckPointed, "
- "namepref=%s, dynamicK=%s",
- namepref.c_str(),
- dynamicKStr);
- tv.SetNow();
- before = tv.Secs();
-
- unsigned int wordNum = 1;
- uint32_t checkPointCheck = 0;
- uint32_t checkPointInterval = 12227;
- for (unsigned int wc = 0; wc < wordSet._words.size(); ++wc) {
- for (unsigned int wi = 0; wi < wordSet._words[wc].size(); ++wi) {
- FakeWord &fw = *wordSet._words[wc][wi];
-
- WrappedFieldWriter ostate(namepref,
- dynamicK,
- wordSet.getNumWords(), docIdLimit);
- ostate.earlyOpen();
- ostate.readCheckPoint(first);
- first = false;
- ostate.lateOpen();
- ostate._fieldWriter->newWord(makeWordString(wordNum));
- fw.dump(ostate._fieldWriter, false,
- checkPointCheck,
- checkPointInterval,
- &ostate);
- ostate.writeCheckPoint();
- ++wordNum;
- }
- }
- do {
- WrappedFieldWriter ostate(namepref,
- dynamicK,
- wordSet.getNumWords(), docIdLimit);
- ostate.earlyOpen();
- ostate.readCheckPoint(first);
- ostate.lateOpen();
- ostate.close();
- } while (0);
- CheckPointFile dropper("chkpt");
- dropper.remove();
-
- tv.SetNow();
- after = tv.Secs();
- LOG(info,
- "leave writeFieldCheckPointed, "
- "namepref=%s, dynamicK=%s"
- " elapsed=%10.6f",
- namepref.c_str(),
- dynamicKStr,
- after - before);
-}
-
-
-void
readField(FakeWordSet &wordSet,
uint32_t docIdLimit,
const std::string &namepref,
@@ -545,8 +348,6 @@ readField(FakeWordSet &wordSet,
TermFieldMatchData mdfield1;
unsigned int wordNum = 1;
- uint32_t checkPointCheck = 0;
- uint32_t checkPointInterval = 12227;
for (unsigned int wc = 0; wc < wordSet._words.size(); ++wc) {
for (unsigned int wi = 0; wi < wordSet._words[wc].size(); ++wi) {
FakeWord &fw = *wordSet._words[wc][wi];
@@ -554,9 +355,8 @@ readField(FakeWordSet &wordSet,
TermFieldMatchDataArray tfmda;
tfmda.add(&mdfield1);
- fw.validate(istate._fieldReader, wordNum,
- tfmda, verbose,
- checkPointCheck, checkPointInterval, &istate);
+ fw.validate(*istate._fieldReader, wordNum,
+ tfmda, verbose);
++wordNum;
}
}
@@ -564,8 +364,6 @@ readField(FakeWordSet &wordSet,
istate.close();
tv.SetNow();
after = tv.Secs();
- CheckPointFile dropper("chkpt");
- dropper.remove();
LOG(info,
"leave readField, "
"namepref=%s, dynamicK=%s"
@@ -762,35 +560,23 @@ void
testFieldWriterVariants(FakeWordSet &wordSet,
uint32_t docIdLimit, bool verbose)
{
- CheckPointFile dropper("chkpt");
- dropper.remove();
disableSkip();
writeField(wordSet, docIdLimit, "new4", true);
readField(wordSet, docIdLimit, "new4", true, verbose);
readField(wordSet, docIdLimit, "new4", true, verbose);
- writeFieldCheckPointed(wordSet, docIdLimit, "new6", true);
writeField(wordSet, docIdLimit, "new5", false);
readField(wordSet, docIdLimit, "new5", false, verbose);
- writeFieldCheckPointed(wordSet, docIdLimit, "new7", false);
enableSkip();
writeField(wordSet, docIdLimit, "newskip4", true);
readField(wordSet, docIdLimit, "newskip4", true, verbose);
- writeFieldCheckPointed(wordSet, docIdLimit, "newskip6",
- true);
writeField(wordSet, docIdLimit, "newskip5", false);
readField(wordSet, docIdLimit, "newskip5", false, verbose);
- writeFieldCheckPointed(wordSet, docIdLimit, "newskip7",
- false);
enableSkipChunks();
writeField(wordSet, docIdLimit, "newchunk4", true);
readField(wordSet, docIdLimit, "newchunk4", true, verbose);
- writeFieldCheckPointed(wordSet, docIdLimit, "newchunk6",
- true);
writeField(wordSet, docIdLimit, "newchunk5", false);
readField(wordSet, docIdLimit,
"newchunk5",false, verbose);
- writeFieldCheckPointed(wordSet, docIdLimit, "newchunk7",
- false);
disableSkip();
fusionField(wordSet.getNumWords(),
docIdLimit,
@@ -855,8 +641,6 @@ void
testFieldWriterVariantsWithHighLids(FakeWordSet &wordSet, uint32_t docIdLimit,
bool verbose)
{
- CheckPointFile dropper("chkpt");
- dropper.remove();
disableSkip();
writeField(wordSet, docIdLimit, "hlid4", true);
readField(wordSet, docIdLimit, "hlid4", true, verbose);
diff --git a/searchlib/src/tests/docstore/document_store/document_store_test.cpp b/searchlib/src/tests/docstore/document_store/document_store_test.cpp
index e8c2173a87f..2ab24a00557 100644
--- a/searchlib/src/tests/docstore/document_store/document_store_test.cpp
+++ b/searchlib/src/tests/docstore/document_store/document_store_test.cpp
@@ -1,6 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/searchlib/docstore/documentstore.h>
+#include <vespa/searchlib/docstore/logdocumentstore.h>
#include <vespa/searchlib/docstore/cachestats.h>
#include <vespa/document/repo/documenttyperepo.h>
@@ -28,19 +28,19 @@ struct NullDataStore : IDataStore {
fastos::TimeStamp getLastFlushTime() const override { return fastos::TimeStamp(); }
void accept(IDataStoreVisitor &, IDataStoreVisitorProgress &, bool) override { }
double getVisitCost() const override { return 1.0; }
- virtual DataStoreStorageStats getStorageStats() const override {
+ DataStoreStorageStats getStorageStats() const override {
return DataStoreStorageStats(0, 0, 0.0, 0, 0, 0);
}
- virtual MemoryUsage getMemoryUsage() const override { return MemoryUsage(); }
- virtual std::vector<DataStoreFileChunkStats>
+ MemoryUsage getMemoryUsage() const override { return MemoryUsage(); }
+ std::vector<DataStoreFileChunkStats>
getFileChunkStats() const override {
std::vector<DataStoreFileChunkStats> result;
return result;
}
- virtual void compactLidSpace(uint32_t wantedDocLidLimit) override { (void) wantedDocLidLimit; }
- virtual bool canShrinkLidSpace() const override { return false; }
- virtual size_t getEstimatedShrinkLidSpaceGain() const override { return 0; }
- virtual void shrinkLidSpace() override {}
+ void compactLidSpace(uint32_t wantedDocLidLimit) override { (void) wantedDocLidLimit; }
+ bool canShrinkLidSpace() const override { return false; }
+ size_t getEstimatedShrinkLidSpaceGain() const override { return 0; }
+ void shrinkLidSpace() override {}
};
TEST_FFF("require that uncache docstore lookups are counted",
@@ -61,4 +61,23 @@ TEST_FFF("require that cached docstore lookups are counted",
EXPECT_EQUAL(1u, f3.getCacheStats().misses);
}
+TEST("require that DocumentStore::Config equality operator detects inequality") {
+ using C = DocumentStore::Config;
+ EXPECT_TRUE(C() == C());
+ EXPECT_TRUE(C(CompressionConfig::NONE, 100000, 100) == C(CompressionConfig::NONE, 100000, 100));
+ EXPECT_FALSE(C(CompressionConfig::NONE, 100000, 100) == C(CompressionConfig::NONE, 100000, 99));
+ EXPECT_FALSE(C(CompressionConfig::NONE, 100000, 100) == C(CompressionConfig::NONE, 100001, 100));
+ EXPECT_FALSE(C(CompressionConfig::NONE, 100000, 100) == C(CompressionConfig::LZ4, 100000, 100));
+}
+
+TEST("require that LogDocumentStore::Config equality operator detects inequality") {
+ using C = LogDocumentStore::Config;
+ using LC = LogDataStore::Config;
+ using DC = DocumentStore::Config;
+ EXPECT_TRUE(C() == C());
+ EXPECT_FALSE(C() != C());
+ EXPECT_FALSE(C(DC(CompressionConfig::NONE, 100000, 100), LC()) == C());
+ EXPECT_FALSE(C(DC(), LC().setMaxBucketSpread(7)) == C());
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchlib/src/tests/docstore/document_store_visitor/document_store_visitor_test.cpp b/searchlib/src/tests/docstore/document_store_visitor/document_store_visitor_test.cpp
index 1c7053500c7..247ee134854 100644
--- a/searchlib/src/tests/docstore/document_store_visitor/document_store_visitor_test.cpp
+++ b/searchlib/src/tests/docstore/document_store_visitor/document_store_visitor_test.cpp
@@ -28,8 +28,7 @@ using vespalib::compression::CompressionConfig;
using vespalib::asciistream;
using index::DummyFileHeaderContext;
-namespace
-{
+namespace {
const string doc_type_name = "test";
const string header_name = doc_type_name + ".header";
@@ -80,16 +79,8 @@ class MyTlSyncer : public transactionlog::SyncProxy
SerialNum _syncedTo;
public:
- MyTlSyncer()
- : _syncedTo(0)
- {
- }
-
- void
- sync(SerialNum syncTo) override
- {
- _syncedTo = syncTo;
- }
+ MyTlSyncer() : _syncedTo(0) {}
+ void sync(SerialNum syncTo) override { _syncedTo = syncTo; }
};
@@ -106,9 +97,7 @@ public:
MyVisitorBase(DocumentTypeRepo &repo, uint32_t docIdLimit, bool before);
};
-MyVisitorBase::MyVisitorBase(DocumentTypeRepo &repo,
- uint32_t docIdLimit,
- bool before)
+MyVisitorBase::MyVisitorBase(DocumentTypeRepo &repo, uint32_t docIdLimit, bool before)
: _repo(repo),
_visitCount(0u),
_visitRmCount(0u),
@@ -125,11 +114,8 @@ class MyVisitor : public MyVisitorBase,
public:
using MyVisitorBase::MyVisitorBase;
- virtual void
- visit(uint32_t lid, const Document &doc) override;
-
- virtual void
- visit(uint32_t lid) override;
+ void visit(uint32_t lid, const Document &doc) override;
+ void visit(uint32_t lid) override;
};
@@ -184,11 +170,8 @@ public:
MyVisitorProgress();
- virtual void
- updateProgress(double progress) override;
-
- virtual double
- getProgress() const;
+ void updateProgress(double progress) override;
+ double getProgress() const;
};
@@ -232,53 +215,28 @@ struct Fixture
BitVector::UP _valid;
Fixture();
-
~Fixture();
- Document::UP
- makeDoc(uint32_t i);
-
- void
- resetDocStore();
-
- void
- mkdir();
-
- void
- rmdir();
-
- void
- setDocIdLimit(uint32_t docIdLimit);
-
- void
- put(const Document &doc, uint32_t lid);
-
- void
- remove(uint32_t lid);
-
- void
- flush();
-
- void
- populate(uint32_t low, uint32_t high, uint32_t docIdLimit);
-
- void
- applyRemoves(uint32_t rmDocs);
-
- void
- checkRemovePostCond(uint32_t numDocs,
- uint32_t docIdLimit,
- uint32_t rmDocs,
- bool before);
+ Document::UP makeDoc(uint32_t i);
+ void resetDocStore();
+ void mkdir();
+ void rmdir();
+ void setDocIdLimit(uint32_t docIdLimit);
+ void put(const Document &doc, uint32_t lid);
+ void remove(uint32_t lid);
+ void flush();
+ void populate(uint32_t low, uint32_t high, uint32_t docIdLimit);
+ void applyRemoves(uint32_t rmDocs);
+ void checkRemovePostCond(uint32_t numDocs, uint32_t docIdLimit, uint32_t rmDocs, bool before);
};
Fixture::Fixture()
: _baseDir("visitor"),
_repo(makeDocTypeRepoConfig()),
_storeConfig(DocumentStore::Config(CompressionConfig::NONE, 0, 0),
- LogDataStore::Config(50000, 0.2, 3.0, 0.2, 1, true, CompressionConfig::LZ4,
- WriteableFileChunk::Config(CompressionConfig(), 16384))),
- _executor(_storeConfig.getLogConfig().getNumThreads(), 128 * 1024),
+ LogDataStore::Config().setMaxFileSize(50000).setMaxBucketSpread(3.0)
+ .setFileConfig(WriteableFileChunk::Config(CompressionConfig(), 16384))),
+ _executor(1, 128 * 1024),
_fileHeaderContext(),
_tlSyncer(),
_store(),
@@ -307,14 +265,8 @@ Fixture::makeDoc(uint32_t i)
void
Fixture::resetDocStore()
{
- _store.reset(new LogDocumentStore(_executor,
- _baseDir,
- _storeConfig,
- GrowStrategy(),
- TuneFileSummary(),
- _fileHeaderContext,
- _tlSyncer,
- NULL));
+ _store.reset(new LogDocumentStore(_executor, _baseDir, _storeConfig, GrowStrategy(),
+ TuneFileSummary(), _fileHeaderContext, _tlSyncer, nullptr));
}
diff --git a/searchlib/src/tests/docstore/file_chunk/file_chunk_test.cpp b/searchlib/src/tests/docstore/file_chunk/file_chunk_test.cpp
index 598913a3222..31c21723cd0 100644
--- a/searchlib/src/tests/docstore/file_chunk/file_chunk_test.cpp
+++ b/searchlib/src/tests/docstore/file_chunk/file_chunk_test.cpp
@@ -11,6 +11,8 @@
#include <iostream>
#include <vespa/log/log.h>
+#include <vespa/vespalib/util/compressionconfig.h>
+
LOG_SETUP("file_chunk_test");
using namespace search;
@@ -210,5 +212,16 @@ TEST("require that entries with lid >= docIdLimit are skipped in updateLidMap()"
}
}
+using vespalib::compression::CompressionConfig;
+
+TEST("require that operator == detects inequality") {
+ using C = WriteableFileChunk::Config;
+ EXPECT_TRUE(C() == C());
+ EXPECT_TRUE(C({}, 1) == C({}, 1));
+ EXPECT_FALSE(C({}, 2) == C({}, 1));
+ EXPECT_FALSE(C({}, 1) == C({}, 2));
+ EXPECT_FALSE(C({CompressionConfig::LZ4, 9, 60}, 2) == C({}, 2));
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp
index ed6afb06681..ed99003c2f7 100644
--- a/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp
+++ b/searchlib/src/tests/docstore/logdatastore/logdatastore_test.cpp
@@ -202,20 +202,17 @@ TEST("test that DirectIOPadding works accordng to spec") {
TEST("testGrowing") {
FastOS_File::EmptyAndRemoveDirectory("growing");
EXPECT_TRUE(FastOS_File::MakeDirectory("growing"));
- LogDataStore::Config config(100000, 0.1, 3.0, 0.2, 8, true, CompressionConfig::LZ4,
- WriteableFileChunk::Config(CompressionConfig(CompressionConfig::LZ4, 9, 60), 1000));
- vespalib::ThreadStackExecutor executor(config.getNumThreads(), 128*1024);
+ LogDataStore::Config config; //(100000, 0.1, 3.0, 0.2, 8, true, CompressionConfig::LZ4,
+ // WriteableFileChunk::Config(CompressionConfig(CompressionConfig::LZ4, 9, 60), 1000));
+ config.setMaxFileSize(100000).setMaxDiskBloatFactor(0.1).setMaxBucketSpread(3.0).setMinFileSizeFactor(0.2)
+ .compact2ActiveFile(true).compactCompression({CompressionConfig::LZ4})
+ .setFileConfig({{CompressionConfig::LZ4, 9, 60}, 1000});
+ vespalib::ThreadStackExecutor executor(8, 128*1024);
DummyFileHeaderContext fileHeaderContext;
MyTlSyncer tlSyncer;
{
- LogDataStore datastore(executor,
- "growing",
- config,
- GrowStrategy(),
- TuneFileSummary(),
- fileHeaderContext,
- tlSyncer,
- NULL);
+ LogDataStore datastore(executor, "growing", config, GrowStrategy(),
+ TuneFileSummary(), fileHeaderContext, tlSyncer, nullptr);
srand(7);
char buffer[12000];
SerialNum lastSyncToken(0);
@@ -245,14 +242,8 @@ TEST("testGrowing") {
checkStats(datastore, 31000, 30000);
}
{
- LogDataStore datastore(executor,
- "growing",
- config,
- GrowStrategy(),
- TuneFileSummary(),
- fileHeaderContext,
- tlSyncer,
- NULL);
+ LogDataStore datastore(executor, "growing", config, GrowStrategy(),
+ TuneFileSummary(), fileHeaderContext, tlSyncer, nullptr);
checkStats(datastore, 30000, 30000);
}
@@ -285,30 +276,26 @@ void fetchAndTest(IDataStore & datastore, uint32_t lid, const void *a, size_t sz
TEST("testTruncatedIdxFile"){
LogDataStore::Config config;
DummyFileHeaderContext fileHeaderContext;
- vespalib::ThreadStackExecutor executor(config.getNumThreads(), 128*1024);
+ vespalib::ThreadStackExecutor executor(1, 128*1024);
MyTlSyncer tlSyncer;
{
// Files comes from the 'growing test'.
- LogDataStore datastore(executor,
- TEST_PATH("bug-7257706"), config,
- GrowStrategy(), TuneFileSummary(),
- fileHeaderContext, tlSyncer, NULL);
+ LogDataStore datastore(executor, TEST_PATH("bug-7257706"), config, GrowStrategy(),
+ TuneFileSummary(), fileHeaderContext, tlSyncer, NULL);
EXPECT_EQUAL(354ul, datastore.lastSyncToken());
}
const char * magic = "mumbo jumbo";
{
- LogDataStore datastore(executor, "bug-7257706-truncated", config,
- GrowStrategy(), TuneFileSummary(),
- fileHeaderContext, tlSyncer, NULL);
+ LogDataStore datastore(executor, "bug-7257706-truncated", config, GrowStrategy(),
+ TuneFileSummary(), fileHeaderContext, tlSyncer, NULL);
EXPECT_EQUAL(331ul, datastore.lastSyncToken());
datastore.write(332, 7, magic, strlen(magic));
datastore.write(333, 8, magic, strlen(magic));
datastore.flush(datastore.initFlush(334));
}
{
- LogDataStore datastore(executor, "bug-7257706-truncated", config,
- GrowStrategy(), TuneFileSummary(),
- fileHeaderContext, tlSyncer, NULL);
+ LogDataStore datastore(executor, "bug-7257706-truncated", config, GrowStrategy(),
+ TuneFileSummary(), fileHeaderContext, tlSyncer, NULL);
EXPECT_EQUAL(334ul, datastore.lastSyncToken());
}
}
@@ -316,7 +303,7 @@ TEST("testTruncatedIdxFile"){
TEST("testThatEmptyIdxFilesAndDanglingDatFilesAreRemoved") {
LogDataStore::Config config;
DummyFileHeaderContext fileHeaderContext;
- vespalib::ThreadStackExecutor executor(config.getNumThreads(), 128*1024);
+ vespalib::ThreadStackExecutor executor(1, 128*1024);
MyTlSyncer tlSyncer;
LogDataStore datastore(executor, "dangling-test", config,
GrowStrategy(), TuneFileSummary(),
@@ -329,7 +316,7 @@ TEST("testThatEmptyIdxFilesAndDanglingDatFilesAreRemoved") {
TEST("testThatIncompleteCompactedFilesAreRemoved") {
LogDataStore::Config config;
DummyFileHeaderContext fileHeaderContext;
- vespalib::ThreadStackExecutor executor(config.getNumThreads(), 128*1024);
+ vespalib::ThreadStackExecutor executor(1, 128*1024);
MyTlSyncer tlSyncer;
LogDataStore datastore(executor, "incompletecompact-test", config,
GrowStrategy(), TuneFileSummary(),
@@ -349,11 +336,10 @@ public:
_myDir("visitcache"),
_config(),
_fileHeaderContext(),
- _executor(_config.getNumThreads(), 128*1024),
+ _executor(1, 128*1024),
_tlSyncer(),
- _datastore(_executor, _myDir.getDir(), _config,
- GrowStrategy(), TuneFileSummary(),
- _fileHeaderContext, _tlSyncer, NULL)
+ _datastore(_executor, _myDir.getDir(), _config, GrowStrategy(),
+ TuneFileSummary(), _fileHeaderContext, _tlSyncer, NULL)
{ }
~VisitStore();
IDataStore & getStore() { return _datastore; }
@@ -527,14 +513,13 @@ VisitCacheStore::VisitCacheStore() :
_myDir("visitcache"),
_repo(makeDocTypeRepoConfig()),
_config(DocumentStore::Config(CompressionConfig::LZ4, 1000000, 0).allowVisitCaching(true),
- LogDataStore::Config(50000, 0.2, 3.0, 0.2, 1, true,CompressionConfig::LZ4,
- WriteableFileChunk::Config(CompressionConfig(), 16384))),
+ LogDataStore::Config().setMaxFileSize(50000).setMaxBucketSpread(3.0)
+ .setFileConfig(WriteableFileChunk::Config(CompressionConfig(), 16384))),
_fileHeaderContext(),
- _executor(_config.getLogConfig().getNumThreads(), 128*1024),
+ _executor(1, 128*1024),
_tlSyncer(),
- _datastore(_executor, _myDir.getDir(), _config,
- GrowStrategy(), TuneFileSummary(),
- _fileHeaderContext, _tlSyncer, NULL),
+ _datastore(_executor, _myDir.getDir(), _config, GrowStrategy(),
+ TuneFileSummary(), _fileHeaderContext, _tlSyncer, nullptr),
_inserted(),
_serial(1)
{ }
@@ -606,11 +591,10 @@ TEST("testWriteRead") {
{
EXPECT_TRUE(FastOS_File::MakeDirectory("empty"));
DummyFileHeaderContext fileHeaderContext;
- vespalib::ThreadStackExecutor executor(config.getNumThreads(), 128*1024);
+ vespalib::ThreadStackExecutor executor(1, 128*1024);
MyTlSyncer tlSyncer;
- LogDataStore datastore(executor, "empty", config,
- GrowStrategy(), TuneFileSummary(),
- fileHeaderContext, tlSyncer, NULL);
+ LogDataStore datastore(executor, "empty", config, GrowStrategy(),
+ TuneFileSummary(), fileHeaderContext, tlSyncer, NULL);
ASSERT_TRUE(datastore.lastSyncToken() == 0);
size_t headerFootprint = datastore.getDiskHeaderFootprint();
EXPECT_LESS(0u, headerFootprint);
@@ -643,7 +627,7 @@ TEST("testWriteRead") {
}
{
DummyFileHeaderContext fileHeaderContext;
- vespalib::ThreadStackExecutor executor(config.getNumThreads(), 128*1024);
+ vespalib::ThreadStackExecutor executor(1, 128*1024);
MyTlSyncer tlSyncer;
LogDataStore datastore(executor, "empty", config,
GrowStrategy(), TuneFileSummary(),
@@ -694,16 +678,10 @@ TEST("requireThatFlushTimeIsAvailableAfterFlush") {
fastos::TimeStamp before(fastos::ClockSystem::now());
DummyFileHeaderContext fileHeaderContext;
LogDataStore::Config config;
- vespalib::ThreadStackExecutor executor(config.getNumThreads(), 128*1024);
+ vespalib::ThreadStackExecutor executor(1, 128*1024);
MyTlSyncer tlSyncer;
- LogDataStore store(executor,
- testDir.getDir(),
- config,
- GrowStrategy(),
- TuneFileSummary(),
- fileHeaderContext,
- tlSyncer,
- NULL);
+ LogDataStore store(executor, testDir.getDir(), config, GrowStrategy(),
+ TuneFileSummary(), fileHeaderContext, tlSyncer, nullptr);
EXPECT_EQUAL(0, store.getLastFlushTime().time());
uint64_t flushToken = store.initFlush(5);
EXPECT_EQUAL(5u, flushToken);
@@ -757,9 +735,7 @@ TEST("testBucketDensityComputer") {
LogDataStore::Config
getBasicConfig(size_t maxFileSize)
{
- CompressionConfig compCfg;
- WriteableFileChunk::Config fileCfg;
- return LogDataStore::Config(maxFileSize, 0.2, 2.5, 0.2, 1, true, compCfg, fileCfg);
+ return LogDataStore::Config().setMaxFileSize(maxFileSize);
}
vespalib::string
@@ -794,14 +770,8 @@ struct Fixture {
serialNum(0),
fileHeaderCtx(),
tlSyncer(),
- store(executor,
- dirName,
- getBasicConfig(maxFileSize),
- GrowStrategy(),
- TuneFileSummary(),
- fileHeaderCtx,
- tlSyncer,
- nullptr)
+ store(executor, dirName, getBasicConfig(maxFileSize), GrowStrategy(),
+ TuneFileSummary(), fileHeaderCtx, tlSyncer, nullptr)
{
dir.cleanup(dirCleanup);
}
@@ -997,6 +967,19 @@ TEST("require that findIncompleteCompactedFiles does expected filtering") {
}
+TEST("require that config equality operator detects inequality") {
+ using C = LogDataStore::Config;
+ EXPECT_TRUE(C() == C());
+ EXPECT_FALSE(C() == C().setMaxFileSize(1));
+ EXPECT_FALSE(C() == C().setMaxDiskBloatFactor(0.3));
+ EXPECT_FALSE(C() == C().setMaxBucketSpread(0.3));
+ EXPECT_FALSE(C() == C().setMinFileSizeFactor(0.3));
+ EXPECT_FALSE(C() == C().setFileConfig(WriteableFileChunk::Config({}, 70)));
+ EXPECT_FALSE(C() == C().disableCrcOnRead(true));
+ EXPECT_FALSE(C() == C().compact2ActiveFile(false));
+ EXPECT_FALSE(C() == C().compactCompression({CompressionConfig::ZSTD}));
+}
+
TEST_MAIN() {
DummyFileHeaderContext::setCreator("logdatastore_test");
TEST_RUN_ALL();
diff --git a/searchlib/src/tests/features/prod_features_attributematch.cpp b/searchlib/src/tests/features/prod_features_attributematch.cpp
index 8dcceec9a22..4ddb3170efe 100644
--- a/searchlib/src/tests/features/prod_features_attributematch.cpp
+++ b/searchlib/src/tests/features/prod_features_attributematch.cpp
@@ -18,6 +18,7 @@ using AVC = search::attribute::Config;
using AVBT = search::attribute::BasicType;
using AVCT = search::attribute::CollectionType;
using CollectionType = FieldInfo::CollectionType;
+using DataType = FieldInfo::DataType;
void
Test::testAttributeMatch()
@@ -303,4 +304,10 @@ Test::testAttributeMatch()
addScore("attributeMatch(wint).fieldCompleteness", 0);
ASSERT_TRUE(ft.execute(exp));
}
+ { // tensor attribute is not allowed
+ FtFeatureTest ft(_factory, "attributeMatch(tensor)");
+ ft.getIndexEnv().getBuilder().addField(FieldType::ATTRIBUTE, CollectionType::SINGLE, DataType::TENSOR, "tensor");
+ ASSERT_TRUE(ft.getQueryEnv().getBuilder().addAttributeNode("tensor") != nullptr);
+ ASSERT_TRUE(!ft.setup());
+ }
}
diff --git a/searchlib/src/tests/fef/parameter/parameter_test.cpp b/searchlib/src/tests/fef/parameter/parameter_test.cpp
index 5fa0633f56e..2cff534d289 100644
--- a/searchlib/src/tests/fef/parameter/parameter_test.cpp
+++ b/searchlib/src/tests/fef/parameter/parameter_test.cpp
@@ -9,6 +9,7 @@ LOG_SETUP("parameter_test");
using namespace search::fef::test;
using CollectionType = search::fef::FieldInfo::CollectionType;
+using DataType = search::fef::FieldInfo::DataType;
namespace search {
namespace fef {
@@ -135,6 +136,7 @@ ParameterTest::testValidator()
IndexEnvironmentBuilder builder(env);
builder.addField(FieldType::INDEX, CollectionType::SINGLE, "foo")
.addField(FieldType::ATTRIBUTE, CollectionType::SINGLE, "bar")
+ .addField(FieldType::ATTRIBUTE, CollectionType::SINGLE, DataType::TENSOR, "tbar")
.addField(FieldType::INDEX, CollectionType::ARRAY, "afoo")
.addField(FieldType::INDEX, CollectionType::WEIGHTEDSET, "wfoo")
.addField(FieldType::INDEX, CollectionType::SINGLE, "hybrid");
@@ -156,6 +158,8 @@ ParameterTest::testValidator()
EXPECT_TRUE(validate(env, SL().add("baz"), PDS().desc().feature()));
EXPECT_TRUE(validate(env, SL().add("123"), PDS().desc().number()));
EXPECT_TRUE(validate(env, SL().add("baz"), PDS().desc().string()));
+ EXPECT_TRUE(validate(env, SL().add("tbar"), PDS().desc().attributeField(ParameterCollection::ANY)));
+ EXPECT_TRUE(validate(env, SL().add("tbar"), PDS().desc().attribute(ParameterCollection::ANY)));
// first fail but second pass
EXPECT_TRUE(validate(env, SL().add("baz"), PDS().desc().field().desc().string()));
@@ -180,6 +184,8 @@ ParameterTest::testValidator()
EXPECT_FALSE(validate(env, SL().add("hybrid"), PDS().desc().attributeField(ParameterCollection::ANY)));
EXPECT_FALSE(validate(env, SL().add("12a"), PDS().desc().number()));
EXPECT_FALSE(validate(env, SL().add("a12"), PDS().desc().number()));
+ EXPECT_FALSE(validate(env, SL().add("tbar"), PDS().desc().attributeField(ParameterDataTypeSet::normalTypeSet(), ParameterCollection::ANY)));
+ EXPECT_FALSE(validate(env, SL().add("tbar"), PDS().desc().attribute(ParameterDataTypeSet::normalTypeSet(), ParameterCollection::ANY)));
// test repeat
PDS d1 = PDS().desc().field().repeat();
diff --git a/searchlib/src/tests/memoryindex/documentinverter/documentinverter_test.cpp b/searchlib/src/tests/memoryindex/documentinverter/documentinverter_test.cpp
index 8ef4ad30ea2..36cd15c8ada 100644
--- a/searchlib/src/tests/memoryindex/documentinverter/documentinverter_test.cpp
+++ b/searchlib/src/tests/memoryindex/documentinverter/documentinverter_test.cpp
@@ -5,7 +5,6 @@
#include <vespa/searchlib/index/docbuilder.h>
#include <vespa/searchlib/memoryindex/documentinverter.h>
#include <vespa/searchlib/memoryindex/fieldinverter.h>
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/searchlib/test/memoryindex/ordereddocumentinserter.h>
#include <vespa/searchlib/common/sequencedtaskexecutor.h>
#include <vespa/vespalib/testkit/testapp.h>
diff --git a/searchlib/src/tests/memoryindex/fieldinverter/fieldinverter_test.cpp b/searchlib/src/tests/memoryindex/fieldinverter/fieldinverter_test.cpp
index abab62dfda3..1d066747ef8 100644
--- a/searchlib/src/tests/memoryindex/fieldinverter/fieldinverter_test.cpp
+++ b/searchlib/src/tests/memoryindex/fieldinverter/fieldinverter_test.cpp
@@ -2,7 +2,6 @@
#include <vespa/searchlib/index/docbuilder.h>
#include <vespa/searchlib/memoryindex/fieldinverter.h>
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/searchlib/test/memoryindex/ordereddocumentinserter.h>
#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/document/repo/fixedtyperepo.h>
diff --git a/searchlib/src/tests/memoryindex/memoryindex/memoryindex_test.cpp b/searchlib/src/tests/memoryindex/memoryindex/memoryindex_test.cpp
index cfbb0847cd5..77a687796b3 100644
--- a/searchlib/src/tests/memoryindex/memoryindex/memoryindex_test.cpp
+++ b/searchlib/src/tests/memoryindex/memoryindex/memoryindex_test.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/log/log.h>
-LOG_SETUP("memoryindex_test");
+
#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/searchlib/memoryindex/memoryindex.h>
@@ -19,11 +18,14 @@ LOG_SETUP("memoryindex_test");
#include <vespa/searchlib/common/scheduletaskcallback.h>
#include <vespa/vespalib/util/threadstackexecutor.h>
+#include <vespa/log/log.h>
+LOG_SETUP("memoryindex_test");
+
using document::Document;
using document::FieldValue;
using search::ScheduleTaskCallback;
using search::index::schema::DataType;
-using search::makeLambdaTask;
+using vespalib::makeLambdaTask;
using search::query::Node;
using search::query::SimplePhrase;
using search::query::SimpleStringTerm;
diff --git a/searchlib/src/tests/memoryindex/urlfieldinverter/urlfieldinverter_test.cpp b/searchlib/src/tests/memoryindex/urlfieldinverter/urlfieldinverter_test.cpp
index 352b30a6088..16957abe915 100644
--- a/searchlib/src/tests/memoryindex/urlfieldinverter/urlfieldinverter_test.cpp
+++ b/searchlib/src/tests/memoryindex/urlfieldinverter/urlfieldinverter_test.cpp
@@ -5,7 +5,6 @@
#include <vespa/searchlib/index/docbuilder.h>
#include <vespa/searchlib/memoryindex/fieldinverter.h>
#include <vespa/searchlib/memoryindex/urlfieldinverter.h>
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/searchlib/test/memoryindex/ordereddocumentinserter.h>
#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/document/repo/fixedtyperepo.h>
diff --git a/searchlib/src/tests/queryeval/sparse_vector_benchmark/sparse_vector_benchmark_test.cpp b/searchlib/src/tests/queryeval/sparse_vector_benchmark/sparse_vector_benchmark_test.cpp
index 2a9885c9648..38d6483f21a 100644
--- a/searchlib/src/tests/queryeval/sparse_vector_benchmark/sparse_vector_benchmark_test.cpp
+++ b/searchlib/src/tests/queryeval/sparse_vector_benchmark/sparse_vector_benchmark_test.cpp
@@ -234,7 +234,7 @@ struct WeightedSetFactory : SparseVectorFactory {
terms.push_back(childFactory.createChild(i, limit));
weights.push_back(default_weight);
}
- return WeightedSetTermSearch::create(terms, tfmd, weights);
+ return WeightedSetTermSearch::create(terms, tfmd, weights, MatchData::UP(nullptr));
}
};
diff --git a/searchlib/src/tests/queryeval/weighted_set_term/weighted_set_term_test.cpp b/searchlib/src/tests/queryeval/weighted_set_term/weighted_set_term_test.cpp
index 003c9935716..78195f19427 100644
--- a/searchlib/src/tests/queryeval/weighted_set_term/weighted_set_term_test.cpp
+++ b/searchlib/src/tests/queryeval/weighted_set_term/weighted_set_term_test.cpp
@@ -7,7 +7,9 @@
#include <vespa/searchlib/query/tree/simplequery.h>
#include <vespa/searchlib/queryeval/field_spec.h>
#include <vespa/searchlib/queryeval/blueprint.h>
+#include <vespa/searchlib/queryeval/weighted_set_term_blueprint.h>
#include <vespa/searchlib/queryeval/fake_result.h>
+#include <vespa/searchlib/queryeval/emptysearch.h>
#include <vespa/searchlib/queryeval/fake_searchable.h>
#include <vespa/searchlib/queryeval/fake_requestcontext.h>
#include <vespa/searchlib/test/weightedchildrenverifiers.h>
@@ -121,7 +123,7 @@ struct MockFixture {
mock = new MockSearch(initial);
children.push_back(mock);
weights.push_back(1);
- search.reset(WeightedSetTermSearch::create(children, tfmd, weights));
+ search.reset(WeightedSetTermSearch::create(children, tfmd, weights, MatchData::UP(nullptr)));
}
};
@@ -192,7 +194,7 @@ TEST_F("test Eager Matching Child", MockFixture(5)) {
class IteratorChildrenVerifier : public search::test::IteratorChildrenVerifier {
private:
SearchIterator::UP create(const std::vector<SearchIterator*> &children) const override {
- return SearchIterator::UP(WeightedSetTermSearch::create(children, _tfmd, _weights));
+ return SearchIterator::UP(WeightedSetTermSearch::create(children, _tfmd, _weights, MatchData::UP(nullptr)));
}
};
@@ -213,4 +215,45 @@ TEST("verify search iterator conformance with document weight iterator children"
verifier.verify();
}
+struct VerifyMatchData {
+ struct MyBlueprint : search::queryeval::SimpleLeafBlueprint {
+ VerifyMatchData &vmd;
+ MyBlueprint(VerifyMatchData &vmd_in, FieldSpec spec_in)
+ : SimpleLeafBlueprint(spec_in), vmd(vmd_in) {}
+ SearchIterator::UP createLeafSearch(const fef::TermFieldMatchDataArray &tfmda, bool) const override {
+ EXPECT_EQUAL(tfmda.size(), 1u);
+ EXPECT_TRUE(tfmda[0] != nullptr);
+ if (vmd.child_tfmd == nullptr) {
+ vmd.child_tfmd = tfmda[0];
+ } else {
+ EXPECT_EQUAL(vmd.child_tfmd, tfmda[0]);
+ }
+ ++vmd.child_cnt;
+ return std::make_unique<EmptySearch>();
+ }
+ };
+ size_t child_cnt = 0;
+ TermFieldMatchData *child_tfmd = nullptr;
+ search::queryeval::Blueprint::UP create(const FieldSpec &spec) {
+ return std::make_unique<MyBlueprint>(*this, spec);
+ }
+};
+
+TEST("require that children get a common (yet separate) term field match data") {
+ VerifyMatchData vmd;
+ MatchDataLayout layout;
+ auto top_handle = layout.allocTermField(42);
+ FieldSpec top_spec("foo", 42, top_handle);
+ WeightedSetTermBlueprint blueprint(top_spec);
+ for (size_t i = 0; i < 5; ++i) {
+ blueprint.addTerm(vmd.create(blueprint.getNextChildField(top_spec)), 1);
+ }
+ auto match_data = layout.createMatchData();
+ auto search = blueprint.createSearch(*match_data, true);
+ auto top_tfmd = match_data->resolveTermField(top_handle);
+ EXPECT_EQUAL(vmd.child_cnt, 5u);
+ EXPECT_TRUE(vmd.child_tfmd != nullptr);
+ EXPECT_NOT_EQUAL(top_tfmd, vmd.child_tfmd);
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/searchlib/src/tests/transactionlog/translogclient_test.cpp b/searchlib/src/tests/transactionlog/translogclient_test.cpp
index 74b0761320f..9f83db9b23a 100644
--- a/searchlib/src/tests/transactionlog/translogclient_test.cpp
+++ b/searchlib/src/tests/transactionlog/translogclient_test.cpp
@@ -42,7 +42,6 @@ private:
uint32_t countFiles(const vespalib::string &dir);
void checkFilledDomainTest(const TransLogClient::Session::UP &s1, size_t numEntries);
bool visitDomainTest(TransLogClient & tls, TransLogClient::Session * s1, const vespalib::string & name);
- bool subscribeDomainTest(TransLogClient & tls, const vespalib::string & name);
bool partialUpdateTest();
bool test1();
bool testRemove();
@@ -59,22 +58,20 @@ private:
TEST_APPHOOK(Test);
-class CallBackTest : public TransLogClient::Subscriber::Callback
+class CallBackTest : public TransLogClient::Visitor::Callback
{
private:
virtual RPC::Result receive(const Packet & packet) override;
- virtual void inSync() override { _inSync = true; }
virtual void eof() override { _eof = true; }
typedef std::map<SerialNum, ByteBuffer> PacketMap;
PacketMap _packetMap;
public:
- CallBackTest() : _inSync(false), _eof(false) { }
+ CallBackTest() : _eof(false) { }
size_t size() const { return _packetMap.size(); }
bool hasSerial(SerialNum n) const { return (_packetMap.find(n) != _packetMap.end()); }
- void clear() { _inSync = false; _eof = false; _packetMap.clear(); }
+ void clear() { _eof = false; _packetMap.clear(); }
const ByteBuffer & packet(SerialNum n) { return (_packetMap.find(n)->second); }
- bool _inSync;
bool _eof;
};
@@ -91,16 +88,14 @@ RPC::Result CallBackTest::receive(const Packet & p)
return RPC::OK;
}
-class CallBackManyTest : public TransLogClient::Subscriber::Callback
+class CallBackManyTest : public TransLogClient::Visitor::Callback
{
private:
virtual RPC::Result receive(const Packet & packet) override;
- virtual void inSync() override { _inSync = true; }
virtual void eof() override { _eof = true; }
public:
- CallBackManyTest(size_t start) : _inSync(false), _eof(false), _count(start), _value(start) { }
- void clear() { _inSync = false; _eof = false; _count = 0; _value = 0; }
- bool _inSync;
+ CallBackManyTest(size_t start) : _eof(false), _count(start), _value(start) { }
+ void clear() { _eof = false; _count = 0; _value = 0; }
bool _eof;
size_t _count;
size_t _value;
@@ -121,21 +116,19 @@ RPC::Result CallBackManyTest::receive(const Packet & p)
return RPC::OK;
}
-class CallBackUpdate : public TransLogClient::Subscriber::Callback
+class CallBackUpdate : public TransLogClient::Visitor::Callback
{
public:
typedef std::map<SerialNum, Identifiable *> PacketMap;
private:
virtual RPC::Result receive(const Packet & packet) override;
- virtual void inSync() override { _inSync = true; }
virtual void eof() override { _eof = true; }
PacketMap _packetMap;
public:
- CallBackUpdate() : _inSync(false), _eof(false) { }
+ CallBackUpdate() : _eof(false) { }
virtual ~CallBackUpdate() { while (_packetMap.begin() != _packetMap.end()) { delete _packetMap.begin()->second; _packetMap.erase(_packetMap.begin()); } }
bool hasSerial(SerialNum n) const { return (_packetMap.find(n) != _packetMap.end()); }
const PacketMap & map() const { return _packetMap; }
- bool _inSync;
bool _eof;
};
@@ -176,16 +169,14 @@ class CallBackStatsTest : public TransLogClient::Session::Callback
{
private:
virtual RPC::Result receive(const Packet & packet) override;
- virtual void inSync() override { _inSync = true; }
virtual void eof() override { _eof = true; }
public:
- CallBackStatsTest() : _inSync(false), _eof(false),
+ CallBackStatsTest() : _eof(false),
_count(0), _inOrder(0),
_firstSerial(0), _lastSerial(0),
_prevSerial(0) { }
- void clear() { _inSync = false; _eof = false; _count = 0; _inOrder = 0;
+ void clear() { _eof = false; _count = 0; _inOrder = 0;
_firstSerial = 0; _lastSerial = 0; _inOrder = 0; }
- bool _inSync;
bool _eof;
uint64_t _count;
uint64_t _inOrder; // increase when next entry is one above previous
@@ -258,7 +249,6 @@ bool Test::partialUpdateTest()
ASSERT_TRUE(visitor.get());
ASSERT_TRUE( visitor->visit(5, 7) );
for (size_t i(0); ! ca._eof && (i < 1000); i++ ) { FastOS_Thread::Sleep(10); }
- ASSERT_TRUE( ! ca._inSync );
ASSERT_TRUE( ca._eof );
ASSERT_TRUE( ca.map().size() == 1);
ASSERT_TRUE( ca.hasSerial(7) );
@@ -268,7 +258,6 @@ bool Test::partialUpdateTest()
ASSERT_TRUE(visitor1.get());
ASSERT_TRUE( visitor1->visit(4, 5) );
for (size_t i(0); ! ca1._eof && (i < 1000); i++ ) { FastOS_Thread::Sleep(10); }
- ASSERT_TRUE( ! ca1._inSync );
ASSERT_TRUE( ca1._eof );
ASSERT_TRUE( ca1.map().size() == 0);
@@ -277,7 +266,6 @@ bool Test::partialUpdateTest()
ASSERT_TRUE(visitor2.get());
ASSERT_TRUE( visitor2->visit(5, 6) );
for (size_t i(0); ! ca2._eof && (i < 1000); i++ ) { FastOS_Thread::Sleep(10); }
- ASSERT_TRUE( ! ca2._inSync );
ASSERT_TRUE( ca2._eof );
ASSERT_TRUE( ca2.map().size() == 0);
@@ -286,7 +274,6 @@ bool Test::partialUpdateTest()
ASSERT_TRUE(visitor3.get());
ASSERT_TRUE( visitor3->visit(5, 1000) );
for (size_t i(0); ! ca3._eof && (i < 1000); i++ ) { FastOS_Thread::Sleep(10); }
- ASSERT_TRUE( ! ca3._inSync );
ASSERT_TRUE( ca3._eof );
ASSERT_TRUE( ca3.map().size() == 1);
ASSERT_TRUE( ca3.hasSerial(7) );
@@ -451,7 +438,6 @@ bool Test::visitDomainTest(TransLogClient & tls, TransLogClient::Session * s1, c
ASSERT_TRUE(visitor.get());
EXPECT_TRUE( visitor->visit(0, 1) );
for (size_t i(0); ! ca._eof && (i < 60000); i++ ) { FastOS_Thread::Sleep(10); }
- EXPECT_TRUE( ! ca._inSync );
EXPECT_TRUE( ca._eof );
EXPECT_TRUE( ! ca.hasSerial(0) );
EXPECT_TRUE( ca.hasSerial(1) );
@@ -462,7 +448,6 @@ bool Test::visitDomainTest(TransLogClient & tls, TransLogClient::Session * s1, c
ASSERT_TRUE(visitor.get());
EXPECT_TRUE( visitor->visit(1, 2) );
for (size_t i(0); ! ca._eof && (i < 60000); i++ ) { FastOS_Thread::Sleep(10); }
- EXPECT_TRUE( ! ca._inSync );
EXPECT_TRUE( ca._eof );
EXPECT_TRUE( ! ca.hasSerial(0) );
EXPECT_TRUE( ! ca.hasSerial(1) );
@@ -474,7 +459,6 @@ bool Test::visitDomainTest(TransLogClient & tls, TransLogClient::Session * s1, c
EXPECT_TRUE(visitor.get());
EXPECT_TRUE( visitor->visit(0, 3) );
for (size_t i(0); ! ca._eof && (i < 60000); i++ ) { FastOS_Thread::Sleep(10); }
- EXPECT_TRUE( ! ca._inSync );
EXPECT_TRUE( ca._eof );
EXPECT_TRUE( ! ca.hasSerial(0) );
EXPECT_TRUE( ca.hasSerial(1) );
@@ -486,7 +470,6 @@ bool Test::visitDomainTest(TransLogClient & tls, TransLogClient::Session * s1, c
ASSERT_TRUE(visitor.get());
EXPECT_TRUE( visitor->visit(2, 3) );
for (size_t i(0); ! ca._eof && (i < 60000); i++ ) { FastOS_Thread::Sleep(10); }
- EXPECT_TRUE( ! ca._inSync );
EXPECT_TRUE( ca._eof );
EXPECT_TRUE( ! ca.hasSerial(0) );
EXPECT_TRUE( !ca.hasSerial(1) );
@@ -497,23 +480,6 @@ bool Test::visitDomainTest(TransLogClient & tls, TransLogClient::Session * s1, c
return retval;
}
-bool Test::subscribeDomainTest(TransLogClient & tls, const vespalib::string & name)
-{
- bool retval(true);
- CallBackTest ca;
- TransLogClient::Subscriber::UP subscriber = tls.createSubscriber(name, ca);
- ASSERT_TRUE(subscriber.get());
- ASSERT_TRUE( subscriber->subscribe(0) );
- for (size_t i(0); ! ca._inSync && (i < 60000); i++ ) { FastOS_Thread::Sleep(10); }
- ASSERT_TRUE( ca._inSync );
- ASSERT_TRUE( ! ca.hasSerial(0) );
- ASSERT_TRUE( ! ca._eof );
- ASSERT_TRUE( ca.hasSerial(1) );
- ASSERT_TRUE( ca.hasSerial(2) );
- ASSERT_TRUE( ca.hasSerial(3) );
- return retval;
-}
-
bool Test::test1()
{
DummyFileHeaderContext fileHeaderContext;
@@ -525,14 +491,13 @@ bool Test::test1()
TransLogClient::Session::UP s1 = openDomainTest(tls, name);
fillDomainTest(s1.get(), name);
visitDomainTest(tls, s1.get(), name);
- subscribeDomainTest(tls, name);
return true;
}
void Test::createAndFillDomain(const vespalib::string & name, DomainPart::Crc crcMethod, size_t preExistingDomains)
{
DummyFileHeaderContext fileHeaderContext;
- TransLogServer tlss("test13", 18377, ".", fileHeaderContext, 0x10000, false, 4, crcMethod);
+ TransLogServer tlss("test13", 18377, ".", fileHeaderContext, 0x10000, 4, crcMethod);
TransLogClient tls("tcp/localhost:18377");
createDomainTest(tls, name, preExistingDomains);
@@ -569,7 +534,6 @@ bool Test::testRemove()
TransLogClient::Session::UP s1 = openDomainTest(tls, name);
fillDomainTest(s1.get(), name);
visitDomainTest(tls, s1.get(), name);
- subscribeDomainTest(tls, name);
ASSERT_TRUE(tls.remove(name));
return true;
@@ -584,7 +548,6 @@ bool Test::test2()
vespalib::string name("test1");
TransLogClient::Session::UP s1 = openDomainTest(tls, name);
visitDomainTest(tls, s1.get(), name);
- subscribeDomainTest(tls, name);
return true;
}
@@ -603,7 +566,6 @@ assertVisitStats(TransLogClient &tls, const vespalib::string &domain,
for (size_t i(0); ! ca._eof && (i < 60000); i++ ) {
FastOS_Thread::Sleep(10);
}
- ASSERT_TRUE(!ca._inSync);
ASSERT_TRUE(ca._eof);
EXPECT_EQUAL(expFirstSerial, ca._firstSerial);
EXPECT_EQUAL(expLastSerial, ca._lastSerial);
@@ -651,7 +613,6 @@ void Test::testMany()
ASSERT_TRUE(visitor.get());
ASSERT_TRUE( visitor->visit(2, TOTAL_NUM_ENTRIES) );
for (size_t i(0); ! ca._eof && (i < 60000); i++ ) { FastOS_Thread::Sleep(10); }
- ASSERT_TRUE( ! ca._inSync );
ASSERT_TRUE( ca._eof );
EXPECT_EQUAL(ca._count, TOTAL_NUM_ENTRIES);
EXPECT_EQUAL(ca._value, TOTAL_NUM_ENTRIES);
@@ -673,7 +634,6 @@ void Test::testMany()
ASSERT_TRUE(visitor.get());
ASSERT_TRUE( visitor->visit(2, TOTAL_NUM_ENTRIES) );
for (size_t i(0); ! ca._eof && (i < 60000); i++ ) { FastOS_Thread::Sleep(10); }
- ASSERT_TRUE( ! ca._inSync );
ASSERT_TRUE( ca._eof );
EXPECT_EQUAL(ca._count, TOTAL_NUM_ENTRIES);
EXPECT_EQUAL(ca._value, TOTAL_NUM_ENTRIES);
diff --git a/searchlib/src/tests/transactionlogstress/translogstress.cpp b/searchlib/src/tests/transactionlogstress/translogstress.cpp
index aabfdb62a96..abba84b75b6 100644
--- a/searchlib/src/tests/transactionlogstress/translogstress.cpp
+++ b/searchlib/src/tests/transactionlogstress/translogstress.cpp
@@ -23,11 +23,9 @@ using vespalib::make_string;
using vespalib::ConstBufferRef;
using search::index::DummyFileHeaderContext;
-namespace search {
-namespace transactionlog {
+namespace search::transactionlog {
using ClientSession = TransLogClient::Session;
-using Subscriber = TransLogClient::Subscriber;
using Visitor = TransLogClient::Visitor;
//-----------------------------------------------------------------------------
@@ -308,89 +306,12 @@ public:
_generator(generator), _name(name), _id(id), _validate(validate) {}
virtual ~Agent() {}
virtual RPC::Result receive(const Packet & packet) override = 0;
- virtual void inSync() override {}
virtual void eof() override {}
virtual void failed() {}
};
//-----------------------------------------------------------------------------
-// SubscriberAgent
-//-----------------------------------------------------------------------------
-class SubscriberAgent : public Agent
-{
-private:
- std::unique_ptr<Subscriber> _subscriber;
- SerialNum _from;
- SerialNum _next;
- Monitor _monitor;
-
- SerialNum getNext() {
- MonitorGuard guard(_monitor);
- return _next++;
- }
-
-public:
- SubscriberAgent(const std::string & tlsSpec, const std::string & domain,
- const EntryGenerator & generator, SerialNum from, uint32_t id, bool validate) :
- Agent(tlsSpec, domain, generator, "SubscriberAgent", id, validate),
- _subscriber(), _from(from), _next(from + 1) {}
- virtual ~SubscriberAgent() {}
- void start();
- void stop();
- SerialNum getExpectedNext() const {
- MonitorGuard guard(_monitor);
- return _next;
- }
- SerialNumRange getRange() const { return SerialNumRange(_from, _next - 1); }
- virtual RPC::Result receive(const Packet & packet) override;
-};
-
-void
-SubscriberAgent::start()
-{
- _subscriber = _client.createSubscriber(_domain, *this);
- if (_subscriber.get() == NULL) {
- throw std::runtime_error(vespalib::make_string
- ("SubscriberAgent[%u]: Could not open subscriber to %s", _id, _tlsSpec.c_str()));
- }
- if (!_subscriber->subscribe(_from)) {
- throw std::runtime_error(vespalib::make_string
- ("SubscriberAgent[%u]: Could not subscribe to %s from serialnumber %" PRIu64,
- _id, _tlsSpec.c_str(), _from));
- }
-}
-
-void
-SubscriberAgent::stop()
-{
- _subscriber.reset();
-}
-
-RPC::Result
-SubscriberAgent::receive(const Packet & packet)
-{
- auto handle = packet.getHandle();
- while (handle.size() > 0) {
- Packet::Entry entry;
- entry.deserialize(handle);
- Packet::Entry expected = _generator.getRandomEntry(getNext());
- if (_validate) {
- if (!EntryComparator::cmp(entry, expected)) {
- throw std::runtime_error(vespalib::make_string
- ("SubscriberAgent[%u]: Got %s, expected %s", _id,
- EntryPrinter::toStr(entry).c_str(),
- EntryPrinter::toStr(expected).c_str()));
- }
- }
- }
- LOG(info, "SubscriberAgent[%u]: received %s", _id, PacketPrinter::toStr(packet).c_str());
-
- return RPC::OK;
-}
-
-
-//-----------------------------------------------------------------------------
// VisitorAgent
//-----------------------------------------------------------------------------
class VisitorAgent : public Agent
@@ -534,7 +455,6 @@ private:
TransLogClient _client;
std::unique_ptr<ClientSession> _session;
EntryGenerator _generator;
- std::vector<std::shared_ptr<SubscriberAgent> > _subscribers;
std::vector<std::shared_ptr<VisitorAgent> > _visitors;
std::vector<std::shared_ptr<VisitorAgent> > _rndVisitors;
uint64_t _visitorInterval; // in milliseconds
@@ -548,29 +468,22 @@ private:
void makeRandomVisitorVector();
public:
- ControllerThread(const std::string & tlsSpec, const std::string & domain,
- const EntryGenerator & generator, uint32_t numSubscribers, uint32_t numVisitors,
- uint64_t visitorInterval, uint64_t pruneInterval);
+ ControllerThread(const std::string & tlsSpec, const std::string & domain, const EntryGenerator & generator,
+ uint32_t numVisitors, uint64_t visitorInterval, uint64_t pruneInterval);
~ControllerThread();
- void startSubscribers();
uint32_t runningVisitors();
- std::vector<std::shared_ptr<SubscriberAgent> > & getSubscribers() { return _subscribers; }
std::vector<std::shared_ptr<VisitorAgent> > & getVisitors() { return _visitors; }
virtual void doRun() override;
};
ControllerThread::ControllerThread(const std::string & tlsSpec, const std::string & domain,
- const EntryGenerator & generator, uint32_t numSubscribers, uint32_t numVisitors,
+ const EntryGenerator & generator, uint32_t numVisitors,
uint64_t visitorInterval, uint64_t pruneInterval)
: _tlsSpec(tlsSpec), _domain(domain), _client(tlsSpec.c_str()), _session(),
- _generator(generator), _subscribers(), _visitors(), _rndVisitors(), _visitorInterval(visitorInterval),
+ _generator(generator), _visitors(), _rndVisitors(), _visitorInterval(visitorInterval),
_pruneInterval(pruneInterval), _pruneTimer(), _begin(0), _end(0), _count(0)
{
- for (uint32_t i = 0; i < numSubscribers; ++i) {
- _subscribers.push_back(std::make_shared<SubscriberAgent>(tlsSpec, domain, generator, 0, i, true));
- }
-
for (uint32_t i = 0; i < numVisitors; ++i) {
_visitors.push_back(std::make_shared<VisitorAgent>(tlsSpec, domain, generator, i, true));
}
@@ -598,14 +511,6 @@ ControllerThread::makeRandomVisitorVector()
}
void
-ControllerThread::startSubscribers()
-{
- for (size_t i = 0; i < _subscribers.size(); ++i) {
- _subscribers[i]->start();
- }
-}
-
-void
ControllerThread::doRun()
{
_session = _client.open(_domain);
@@ -641,12 +546,6 @@ ControllerThread::doRun()
safePrune = _visitors[i]->getFrom();
}
}
- for (size_t i = 0; i < _subscribers.size(); ++i) {
- SerialNum next = _subscribers[i]->getExpectedNext();
- if (next < safePrune) {
- safePrune = next;
- }
- }
LOG(info, "ControllerThread: status: begin(%" PRIu64 "), end(%" PRIu64 "), count(%zu)", _begin, _end, _count);
LOG(info, "ControllerThread: prune [%" PRIu64 ", %" PRIu64 ">", _begin, safePrune);
if (!_session->erase(safePrune)) {
@@ -672,7 +571,6 @@ private:
uint64_t stressTime;
uint32_t feedRate;
- uint32_t numSubscribers;
uint32_t numVisitors;
uint64_t visitorInterval;
uint64_t pruneInterval;
@@ -683,7 +581,7 @@ private:
long baseSeed;
Config() :
- domainPartSize(0), packetSize(0), stressTime(0), feedRate(0), numSubscribers(0),
+ domainPartSize(0), packetSize(0), stressTime(0), feedRate(0),
numVisitors(0), visitorInterval(0), pruneInterval(0), minStrLen(0), maxStrLen(0), baseSeed(0) {}
};
@@ -702,7 +600,6 @@ TransLogStress::printConfig()
std::cout << "######## Config ########" << std::endl;
std::cout << "stressTime: " << _cfg.stressTime / 1000 << " s" << std::endl;
std::cout << "feedRate: " << _cfg.feedRate << " per/sec" << std::endl;
- std::cout << "numSubscribers: " << _cfg.numSubscribers << std::endl;
std::cout << "numVisitors: " << _cfg.numVisitors << std::endl;
std::cout << "visitorInterval: " << _cfg.visitorInterval << " ms" << std::endl;
std::cout << "pruneInterval: " << _cfg.pruneInterval / 1000 << " s" << std::endl;
@@ -733,7 +630,6 @@ TransLogStress::Main()
_cfg.stressTime = 1000 * 60;
_cfg.feedRate = 10000;
- _cfg.numSubscribers = 1;
_cfg.numVisitors = 1;
_cfg.visitorInterval = 1000 * 1;
_cfg.pruneInterval = 1000 * 12;
@@ -763,9 +659,6 @@ TransLogStress::Main()
case 'f':
_cfg.feedRate = atoi(arg);
break;
- case 's':
- _cfg.numSubscribers = atoi(arg);
- break;
case 'v':
_cfg.numVisitors = atoi(arg);
break;
@@ -830,13 +723,9 @@ TransLogStress::Main()
FastOS_Thread::Sleep(sleepTime);
- ControllerThread controller(tlsSpec, domain, generator, _cfg.numSubscribers, _cfg.numVisitors,
- _cfg.visitorInterval, _cfg.pruneInterval);
+ ControllerThread controller(tlsSpec, domain, generator, _cfg.numVisitors, _cfg.visitorInterval, _cfg.pruneInterval);
threadPool.NewThread(&controller);
- // start subscribers
- controller.startSubscribers();
-
// stop feeder and controller
FastOS_Thread::Sleep(_cfg.stressTime);
printConfig();
@@ -862,24 +751,12 @@ TransLogStress::Main()
std::cout << "</visitor>" << std::endl;
}
- // stop subscribers
- LOG(info, "Stop subscribers...");
- std::vector<std::shared_ptr<SubscriberAgent> > & subscribers = controller.getSubscribers();
- for (size_t i = 0; i < subscribers.size(); ++i) {
- subscribers[i]->stop();
- std::cout << "<subscriber id='" << i << "'>" << std::endl;
- std::cout << " <from>" << subscribers[i]->getRange().from() << "</from>" << std::endl;
- std::cout << " <to>" << subscribers[i]->getRange().to() << "</to>" << std::endl;
- std::cout << "</subscriber>" << std::endl;
- }
-
threadPool.Close();
return 0;
}
}
-}
int main(int argc, char ** argv)
{
diff --git a/searchlib/src/vespa/searchlib/attribute/attribute_weighted_set_blueprint.cpp b/searchlib/src/vespa/searchlib/attribute/attribute_weighted_set_blueprint.cpp
index e6c9e9c0590..3ff7db5a184 100644
--- a/searchlib/src/vespa/searchlib/attribute/attribute_weighted_set_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/attribute_weighted_set_blueprint.cpp
@@ -6,6 +6,7 @@
#include <vespa/searchlib/query/queryterm.h>
#include <vespa/searchlib/common/bitvector.h>
#include <vespa/vespalib/stllike/hash_map.h>
+#include <vespa/searchlib/fef/matchdatalayout.h>
namespace search {
@@ -157,11 +158,15 @@ AttributeWeightedSetBlueprint::createLeafSearch(const fef::TermFieldMatchDataArr
assert(tfmda.size() == 1);
fef::TermFieldMatchData &tfmd = *tfmda[0];
if (strict) { // use generic weighted set search
+ fef::MatchDataLayout layout;
+ auto handle = layout.allocTermField(tfmd.getFieldId());
+ auto match_data = layout.createMatchData();
+ auto child_tfmd = match_data->resolveTermField(handle);
std::vector<queryeval::SearchIterator*> children(_contexts.size());
for (size_t i = 0; i < _contexts.size(); ++i) {
- children[i] = _contexts[i]->createIterator(&tfmd, true).release();
+ children[i] = _contexts[i]->createIterator(child_tfmd, true).release();
}
- return queryeval::SearchIterator::UP(queryeval::WeightedSetTermSearch::create(children, tfmd, _weights));
+ return queryeval::SearchIterator::UP(queryeval::WeightedSetTermSearch::create(children, tfmd, _weights, std::move(match_data)));
} else { // use attribute filter optimization
bool isSingleValue = !_attr.hasMultiValue();
bool isString = (_attr.isStringType() && _attr.hasEnum());
diff --git a/searchlib/src/vespa/searchlib/bitcompression/compression.cpp b/searchlib/src/vespa/searchlib/bitcompression/compression.cpp
index 08f1b8a1e9e..b56f4760ec7 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/compression.cpp
+++ b/searchlib/src/vespa/searchlib/bitcompression/compression.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "compression.h"
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/searchlib/fef/termfieldmatchdata.h>
#include <vespa/searchlib/fef/termfieldmatchdataarray.h>
#include <vespa/vespalib/data/fileheader.h>
@@ -9,8 +8,6 @@
namespace search::bitcompression {
-using vespalib::nbostream;
-
uint8_t CodingTables::_log2Table[65536];
CodingTables tables; // Static initializer
@@ -129,33 +126,6 @@ EncodeContext64EBase<false>::writeBits(uint64_t data, uint32_t length)
}
}
-void
-EncodeContext64Base::checkPointWrite(nbostream &out)
-{
- out << _cacheInt << _cacheFree;
-}
-
-
-void
-EncodeContext64Base::checkPointRead(nbostream &in)
-{
- in >> _cacheInt >> _cacheFree;
-}
-
-
-void
-DecodeContext64Base::checkPointWrite(nbostream &out)
-{
- (void) out;
-}
-
-
-void
-DecodeContext64Base::checkPointRead(nbostream &in)
-{
- (void) in;
-}
-
namespace {
vespalib::string noFeatures = "NoFeatures";
diff --git a/searchlib/src/vespa/searchlib/bitcompression/compression.h b/searchlib/src/vespa/searchlib/bitcompression/compression.h
index a0cba703e65..954afc60a1a 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/compression.h
+++ b/searchlib/src/vespa/searchlib/bitcompression/compression.h
@@ -9,7 +9,6 @@
namespace vespalib {
-class nbostream;
class GenericHeader;
}
@@ -830,9 +829,6 @@ public:
other._cacheFree = _cacheFree;
}
- void checkPointWrite(vespalib::nbostream &out) override;
- void checkPointRead(vespalib::nbostream &in) override;
-
uint64_t getWriteOffset() const {
return _fileWriteBias + (reinterpret_cast<unsigned long>(_valI) << 3) - _cacheFree;
}
@@ -1269,9 +1265,6 @@ public:
return (_preRead == 0) ? 0 : 64 - _preRead;
}
- void checkPointWrite(vespalib::nbostream &out) override;
- void checkPointRead(vespalib::nbostream &in) override;
-
static int64_t convertToSigned(uint64_t val) {
if ((val & 1) != 0)
return - (val >> 1) - 1;
diff --git a/searchlib/src/vespa/searchlib/bitcompression/countcompression.cpp b/searchlib/src/vespa/searchlib/bitcompression/countcompression.cpp
index fca6c749cfb..49d5ae92ec8 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/countcompression.cpp
+++ b/searchlib/src/vespa/searchlib/bitcompression/countcompression.cpp
@@ -3,14 +3,11 @@
#include "compression.h"
#include "countcompression.h"
#include <vespa/searchlib/index/postinglistcounts.h>
-#include <vespa/vespalib/objects/nbostream.h>
namespace search {
namespace bitcompression {
-using vespalib::nbostream;
-
#define K_VALUE_COUNTFILE_LASTDOCID 22
#define K_VALUE_COUNTFILE_NUMCHUNKS 1
#define K_VALUE_COUNTFILE_CHUNKNUMDOCS 18
@@ -19,24 +16,6 @@ using vespalib::nbostream;
void
-PostingListCountFileDecodeContext::checkPointWrite(nbostream &out)
-{
- ParentClass::checkPointWrite(out);
- out << _avgBitsPerDoc << _minChunkDocs << _docIdLimit << _numWordIds;
- out << _minWordNum;
-}
-
-
-void
-PostingListCountFileDecodeContext::checkPointRead(nbostream &in)
-{
- ParentClass::checkPointRead(in);
- in >> _avgBitsPerDoc >> _minChunkDocs >> _docIdLimit >> _numWordIds;
- in >> _minWordNum;
-}
-
-
-void
PostingListCountFileDecodeContext::
readCounts(PostingListCounts &counts)
{
@@ -145,24 +124,6 @@ copyParams(const PostingListCountFileDecodeContext &rhs)
void
-PostingListCountFileEncodeContext::checkPointWrite(nbostream &out)
-{
- ParentClass::checkPointWrite(out);
- out << _avgBitsPerDoc << _minChunkDocs << _docIdLimit << _numWordIds;
- out << _minWordNum;
-}
-
-
-void
-PostingListCountFileEncodeContext::checkPointRead(nbostream &in)
-{
- ParentClass::checkPointRead(in);
- in >> _avgBitsPerDoc >> _minChunkDocs >> _docIdLimit >> _numWordIds;
- in >> _minWordNum;
-}
-
-
-void
PostingListCountFileEncodeContext::
writeCounts(const PostingListCounts &counts)
{
diff --git a/searchlib/src/vespa/searchlib/bitcompression/countcompression.h b/searchlib/src/vespa/searchlib/bitcompression/countcompression.h
index 4a514e58e2a..fc448ba1c30 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/countcompression.h
+++ b/searchlib/src/vespa/searchlib/bitcompression/countcompression.h
@@ -33,8 +33,6 @@ public:
{
}
- void checkPointWrite(vespalib::nbostream &out) override;
- void checkPointRead(vespalib::nbostream &in) override;
void readCounts(PostingListCounts &counts);
void readWordNum(uint64_t &wordNum);
@@ -67,8 +65,6 @@ public:
{
}
- void checkPointWrite(vespalib::nbostream &out) override;
- void checkPointRead(vespalib::nbostream &in) override;
void writeCounts(const PostingListCounts &counts);
void writeWordNum(uint64_t wordNum);
diff --git a/searchlib/src/vespa/searchlib/bitcompression/pagedict4.cpp b/searchlib/src/vespa/searchlib/bitcompression/pagedict4.cpp
index b3cba84d575..82110d354d3 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/pagedict4.cpp
+++ b/searchlib/src/vespa/searchlib/bitcompression/pagedict4.cpp
@@ -5,7 +5,6 @@
#include "countcompression.h"
#include <vespa/searchlib/index/postinglistcounts.h>
#include <vespa/searchlib/index/dictionaryfile.h>
-#include <vespa/vespalib/objects/nbostream.h>
#include <sstream>
#include <vespa/log/log.h>
@@ -71,60 +70,6 @@ operator<<(std::ostream &stream, const index::PostingListCounts &counts)
return stream;
}
-vespalib::nbostream &
-operator<<(vespalib::nbostream &stream,
- const PageDict4StartOffset &startOffset)
-{
- stream << startOffset._fileOffset << startOffset._accNumDocs;
- return stream;
-}
-
-vespalib::nbostream &
-operator>>(vespalib::nbostream &stream, PageDict4StartOffset &startOffset)
-{
- stream >> startOffset._fileOffset >> startOffset._accNumDocs;
- return stream;
-}
-
-
-vespalib::nbostream &
-operator<<(vespalib::nbostream &stream,
- const PageDict4SSReader::L7Entry &l7Entry)
-{
- stream << l7Entry._l7Word << l7Entry._l7StartOffset << l7Entry._l7WordNum;
- stream << l7Entry._l6Offset << l7Entry._sparsePageNum << l7Entry._pageNum;
- stream << l7Entry._l7Ref;
- return stream;
-}
-
-
-vespalib::nbostream &
-operator>>(vespalib::nbostream &stream,
- PageDict4SSReader::L7Entry &l7Entry)
-{
- stream >> l7Entry._l7Word >> l7Entry._l7StartOffset >> l7Entry._l7WordNum;
- stream >> l7Entry._l6Offset >> l7Entry._sparsePageNum >> l7Entry._pageNum;
- stream >> l7Entry._l7Ref;
- return stream;
-}
-
-
-vespalib::nbostream &
-operator<<(vespalib::nbostream &stream,
- const PageDict4SSReader::OverflowRef &oref)
-{
- stream << oref._wordNum << oref._l7Ref;
- return stream;
-}
-
-
-vespalib::nbostream &
-operator>>(vespalib::nbostream &stream, PageDict4SSReader::OverflowRef &oref)
-{
- stream >> oref._wordNum >> oref._l7Ref;
- return stream;
-}
-
typedef index::PostingListCounts Counts;
typedef PageDict4StartOffset StartOffset;
@@ -360,28 +305,6 @@ PageDict4SSWriter::flush()
}
-void
-PageDict4SSWriter::checkPointWrite(vespalib::nbostream &out)
-{
- out << _l6Word;
- out << _l6StartOffset;
- out << _l6PageNum;
- out << _l6SparsePageNum;
- out << _l6WordNum;
-}
-
-
-void
-PageDict4SSWriter::checkPointRead(vespalib::nbostream &in)
-{
- in >> _l6Word;
- in >> _l6StartOffset;
- in >> _l6PageNum;
- in >> _l6SparsePageNum;
- in >> _l6WordNum;
-}
-
-
PageDict4SPWriter::PageDict4SPWriter(SSWriter &ssWriter,
EC &spe)
: _eL3(),
@@ -725,48 +648,6 @@ PageDict4SPWriter::addL5Skip(size_t &lcp)
}
-void
-PageDict4SPWriter::checkPointWrite(vespalib::nbostream &out)
-{
- _wcL3.checkPointWrite(out);
- _wcL4.checkPointWrite(out);
- _wcL5.checkPointWrite(out);
- out << _l3Word << _l4Word << _l5Word << _l6Word;
- out << _l3WordOffset << _l4WordOffset << _l5WordOffset;
- out << _l3StartOffset << _l4StartOffset << _l5StartOffset << _l6StartOffset;
- out << _l3WordNum << _l4WordNum << _l5WordNum << _l6WordNum;
- out << _curL3OffsetL4 << _curL3OffsetL5 << _curL4OffsetL5;
- out << _headerSize;
- out << _l3Entries;
- out << _l4StrideCheck << _l5StrideCheck;
- out << _l3Size << _l4Size << _l5Size;
- out << _prevL3Size << _prevL4Size << _prevL5Size << _prevWordsSize;
- out << _sparsePageNum << _l3PageNum;
- out << _words;
-}
-
-
-void
-PageDict4SPWriter::checkPointRead(vespalib::nbostream &in)
-{
- _wcL3.checkPointRead(in);
- _wcL4.checkPointRead(in);
- _wcL5.checkPointRead(in);
- in >> _l3Word >> _l4Word >> _l5Word >> _l6Word;
- in >> _l3WordOffset >> _l4WordOffset >> _l5WordOffset;
- in >> _l3StartOffset >> _l4StartOffset >> _l5StartOffset >> _l6StartOffset;
- in >> _l3WordNum >> _l4WordNum >> _l5WordNum >> _l6WordNum;
- in >> _curL3OffsetL4 >> _curL3OffsetL5 >> _curL4OffsetL5;
- in >> _headerSize;
- in >> _l3Entries;
- in >> _l4StrideCheck >> _l5StrideCheck;
- in >> _l3Size >> _l4Size >> _l5Size;
- in >> _prevL3Size >> _prevL4Size >> _prevL5Size >> _prevWordsSize;
- in >> _sparsePageNum >> _l3PageNum;
- in >> _words;
-}
-
-
PageDict4PWriter::PageDict4PWriter(SPWriter &spWriter,
EC &pe)
: _eCounts(),
@@ -1148,52 +1029,6 @@ PageDict4PWriter::addL2Skip(size_t &lcp)
}
-void
-PageDict4PWriter::checkPointWrite(vespalib::nbostream &out)
-{
- _wcCounts.checkPointWrite(out);
- _wcL1.checkPointWrite(out);
- _wcL2.checkPointWrite(out);
- out << _countsWord << _l1Word << _l2Word << _l3Word;
- out << _pendingCountsWord;
- out << _countsWordOffset << _l1WordOffset << _l2WordOffset;
- out << _countsStartOffset << _l1StartOffset << _l2StartOffset;
- out << _l3StartOffset;
- out << _curCountOffsetL1 << _curCountOffsetL2 << _curL1OffsetL2;
- out << _headerSize;
- out << _countsEntries;
- out << _l1StrideCheck << _l2StrideCheck;
- out << _countsSize << _l1Size << _l2Size;
- out << _prevL1Size << _prevL2Size;
- out << _pageNum;
- out << _l3WordNum << _wordNum;
- out << _words;
-}
-
-
-void
-PageDict4PWriter::checkPointRead(vespalib::nbostream &in)
-{
- _wcCounts.checkPointRead(in);
- _wcL1.checkPointRead(in);
- _wcL2.checkPointRead(in);
- in >> _countsWord >> _l1Word >> _l2Word >> _l3Word;
- in >> _pendingCountsWord;
- in >> _countsWordOffset >> _l1WordOffset >> _l2WordOffset;
- in >> _countsStartOffset >> _l1StartOffset >> _l2StartOffset;
- in >> _l3StartOffset;
- in >> _curCountOffsetL1 >> _curCountOffsetL2 >> _curL1OffsetL2;
- in >> _headerSize;
- in >> _countsEntries;
- in >> _l1StrideCheck >> _l2StrideCheck;
- in >> _countsSize >> _l1Size >> _l2Size;
- in >> _prevL1Size >> _prevL2Size;
- in >> _pageNum;
- in >> _l3WordNum >> _wordNum;
- in >> _words;
-}
-
-
PageDict4SSLookupRes::
PageDict4SSLookupRes()
: _l6Word(),
@@ -1660,34 +1495,6 @@ lookupOverflow(uint64_t wordNum) const
}
-void
-PageDict4SSReader::checkPointWrite(vespalib::nbostream &out)
-{
- out << _ssFileBitLen << _ssStartOffset;
- out << _l7;
- _ssd.checkPointWrite(out);
- out << _spFileBitLen << _pFileBitLen;
- out << _spStartOffset << _pStartOffset;
- out << _spFirstPageNum << _spFirstPageOffset;
- out << _pFirstPageNum << _pFirstPageOffset;
- out << _overflows;
-}
-
-
-void
-PageDict4SSReader::checkPointRead(vespalib::nbostream &in)
-{
- in >> _ssFileBitLen >> _ssStartOffset;
- in >> _l7;
- _ssd.checkPointRead(in);
- in >> _spFileBitLen >> _pFileBitLen;
- in >> _spStartOffset >> _pStartOffset;
- in >> _spFirstPageNum >> _spFirstPageOffset;
- in >> _pFirstPageNum >> _pFirstPageOffset;
- in >> _overflows;
-}
-
-
PageDict4SPLookupRes::
PageDict4SPLookupRes()
: _l3Word(),
@@ -2512,68 +2319,6 @@ PageDict4Reader::readOverflowCounts(vespalib::string &word,
_startOffset.adjust(counts);
}
-void
-PageDict4Reader::checkPointWrite(vespalib::nbostream &out)
-{
- out << _countsResidue;
- out << _overflowPage;
- out << _counts;
- size_t ccOff = _cc - _counts.begin();
- size_t ceOff = _ce - _counts.begin();
- assert(ceOff == _counts.size());
- out << ccOff << ceOff;
- out << _words;
- size_t wcOff = _wc - _words.begin();
- size_t weOff = _we - _words.begin();
- assert(weOff = _words.size());
- out << wcOff << weOff;
- out << _lastWord;
- out << _lastSSWord;
- out << _l3Residue;
- out << _spwords;
- size_t spwcOff = _spwc - _spwords.begin();
- size_t spweOff = _spwe - _spwords.begin();
- assert(spweOff == _spwords.size());
- out << spwcOff << spweOff;
- _ssd.checkPointWrite(out);
- out << _ssd.getReadOffset();
- out << _wordNum;
-}
-
-void
-PageDict4Reader::checkPointRead(vespalib::nbostream &in)
-{
- in >> _countsResidue;
- in >> _overflowPage;
- in >> _counts;
- size_t ccOff;
- size_t ceOff;
- in >> ccOff >> ceOff;
- _cc = _counts.begin() + ccOff;
- _ce = _counts.begin() + ceOff;
- in >> _words;
- size_t wcOff;
- size_t weOff;
- in >> wcOff >> weOff;
- _wc = _words.begin() + wcOff;
- _we = _words.begin() + weOff;
- in >> _lastWord;
- in >> _lastSSWord;
- in >> _l3Residue;
- in >> _spwords;
- size_t spwcOff;
- size_t spweOff;
- in >> spwcOff >> spweOff;
- _spwc = _spwords.begin() + spwcOff;
- _spwe = _spwords.begin() + spweOff;
- _ssd.checkPointRead(in);
- int64_t ssReadOffset;
- in >> ssReadOffset;
- const ComprBuffer &sscb = _ssReader._cb;
- setDecoderPosition(_ssd, sscb, ssReadOffset);
- in >> _wordNum;
-}
-
} // namespace bitcompression
} // namespace search
diff --git a/searchlib/src/vespa/searchlib/bitcompression/pagedict4.h b/searchlib/src/vespa/searchlib/bitcompression/pagedict4.h
index 2a4cd143cfa..47f2354bcc6 100644
--- a/searchlib/src/vespa/searchlib/bitcompression/pagedict4.h
+++ b/searchlib/src/vespa/searchlib/bitcompression/pagedict4.h
@@ -160,13 +160,6 @@ public:
void
flush();
-
-
- void
- checkPointWrite(vespalib::nbostream &out);
-
- void
- checkPointRead(vespalib::nbostream &in);
};
@@ -307,12 +300,6 @@ public:
{
_ssWriter.addOverflowCounts(word, counts, startOffset, wordNum);
}
-
- void
- checkPointWrite(vespalib::nbostream &out);
-
- void
- checkPointRead(vespalib::nbostream &in);
};
/*
@@ -447,12 +434,6 @@ public:
{
return _wordNum - 1;
}
-
- void
- checkPointWrite(vespalib::nbostream &out);
-
- void
- checkPointRead(vespalib::nbostream &in);
};
@@ -534,12 +515,6 @@ public:
{
return _l7Word < word;
}
-
- friend vespalib::nbostream &
- operator<<(vespalib::nbostream &stream, const L7Entry &l7Entry);
-
- friend vespalib::nbostream &
- operator>>(vespalib::nbostream &stream, L7Entry &l7Entry);
};
class OverflowRef
@@ -565,12 +540,6 @@ public:
{
return _wordNum < wordNum;
}
-
- friend vespalib::nbostream &
- operator<<(vespalib::nbostream &stream, const OverflowRef &oref);
-
- friend vespalib::nbostream &
- operator>>(vespalib::nbostream &stream, OverflowRef &oref);
};
ComprBuffer _cb;
@@ -617,12 +586,6 @@ public:
{
return _ssd;
}
-
- void
- checkPointWrite(vespalib::nbostream &out);
-
- void
- checkPointRead(vespalib::nbostream &in);
};
@@ -752,12 +715,6 @@ public:
void
readOverflowCounts(vespalib::string &word,
Counts &counts);
-
- void
- checkPointWrite(vespalib::nbostream &out);
-
- void
- checkPointRead(vespalib::nbostream &in);
};
} // namespace bitcompression
diff --git a/searchlib/src/vespa/searchlib/common/allocatedbitvector.cpp b/searchlib/src/vespa/searchlib/common/allocatedbitvector.cpp
index 4931455da8d..dcee48aed1a 100644
--- a/searchlib/src/vespa/searchlib/common/allocatedbitvector.cpp
+++ b/searchlib/src/vespa/searchlib/common/allocatedbitvector.cpp
@@ -5,7 +5,6 @@
namespace search {
-using vespalib::nbostream;
using vespalib::GenerationHeldBase;
using vespalib::GenerationHeldAlloc;
using vespalib::GenerationHolder;
diff --git a/searchlib/src/vespa/searchlib/common/isequencedtaskexecutor.h b/searchlib/src/vespa/searchlib/common/isequencedtaskexecutor.h
index 6984f696117..9b825f1c47e 100644
--- a/searchlib/src/vespa/searchlib/common/isequencedtaskexecutor.h
+++ b/searchlib/src/vespa/searchlib/common/isequencedtaskexecutor.h
@@ -3,10 +3,9 @@
#include <vespa/vespalib/util/executor.h>
#include <vespa/vespalib/stllike/hash_fun.h>
-#include "lambdatask.h"
+#include <vespa/vespalib/util/lambdatask.h>
-namespace search
-{
+namespace search {
/**
* Interface class to run multiple tasks in parallel, but tasks with same
@@ -50,7 +49,7 @@ public:
*/
template <class FunctionType>
void executeLambda(uint32_t executorId, FunctionType &&function) {
- executeTask(executorId, makeLambdaTask(std::forward<FunctionType>(function)));
+ executeTask(executorId, vespalib::makeLambdaTask(std::forward<FunctionType>(function)));
}
/**
* Wait for all scheduled tasks to complete.
@@ -69,7 +68,7 @@ public:
template <class FunctionType>
void execute(uint64_t componentId, FunctionType &&function) {
uint32_t executorId = getExecutorId(componentId);
- executeTask(executorId, makeLambdaTask(std::forward<FunctionType>(function)));
+ executeTask(executorId, vespalib::makeLambdaTask(std::forward<FunctionType>(function)));
}
/**
@@ -84,7 +83,7 @@ public:
template <class FunctionType>
void execute(vespalib::stringref componentId, FunctionType &&function) {
uint32_t executorId = getExecutorId(componentId);
- executeTask(executorId, makeLambdaTask(std::forward<FunctionType>(function)));
+ executeTask(executorId, vespalib::makeLambdaTask(std::forward<FunctionType>(function)));
}
};
diff --git a/searchlib/src/vespa/searchlib/config/CMakeLists.txt b/searchlib/src/vespa/searchlib/config/CMakeLists.txt
index 2f34d228f60..571cd3ad9f1 100644
--- a/searchlib/src/vespa/searchlib/config/CMakeLists.txt
+++ b/searchlib/src/vespa/searchlib/config/CMakeLists.txt
@@ -4,4 +4,4 @@ vespa_add_library(searchlib_sconfig OBJECT
DEPENDS
)
vespa_generate_config(searchlib_sconfig translogserver.def)
-install(FILES translogserver.def RENAME searchlib.translogserver.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(translogserver.def searchlib.translogserver.def)
diff --git a/searchlib/src/vespa/searchlib/diskindex/CMakeLists.txt b/searchlib/src/vespa/searchlib/diskindex/CMakeLists.txt
index 9b6bf497c9a..3619affb54e 100644
--- a/searchlib/src/vespa/searchlib/diskindex/CMakeLists.txt
+++ b/searchlib/src/vespa/searchlib/diskindex/CMakeLists.txt
@@ -5,7 +5,6 @@ vespa_add_library(searchlib_diskindex OBJECT
bitvectorfile.cpp
bitvectoridxfile.cpp
bitvectorkeyscope.cpp
- checkpointfile.cpp
dictionarywordreader.cpp
diskindex.cpp
disktermblueprint.cpp
diff --git a/searchlib/src/vespa/searchlib/diskindex/bitvectorfile.cpp b/searchlib/src/vespa/searchlib/diskindex/bitvectorfile.cpp
index defacdb05d7..374c6a8c7e6 100644
--- a/searchlib/src/vespa/searchlib/diskindex/bitvectorfile.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/bitvectorfile.cpp
@@ -4,12 +4,10 @@
#include <vespa/searchlib/index/bitvectorkeys.h>
#include <vespa/searchlib/common/bitvector.h>
#include <vespa/searchlib/common/fileheadercontext.h>
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/vespalib/data/fileheader.h>
namespace search::diskindex {
-using vespalib::nbostream;
using search::index::BitVectorWordSingleKey;
using search::common::FileHeaderContext;
@@ -45,24 +43,6 @@ BitVectorFileWrite::~BitVectorFileWrite()
void
-BitVectorFileWrite::checkPointWrite(nbostream &out)
-{
- flush();
- Parent::checkPointWriteCommon(out);
- out << _datHeaderLen;
- sync();
-}
-
-
-void
-BitVectorFileWrite::checkPointRead(nbostream &in)
-{
- Parent::checkPointRead(in);
- in >> _datHeaderLen;
-}
-
-
-void
BitVectorFileWrite::open(const vespalib::string &name,
uint32_t docIdLimit,
const TuneFileSeqWrite &tuneFileWrite,
@@ -198,33 +178,4 @@ BitVectorFileWrite::close()
BitVectorCandidate::~BitVectorCandidate() {
}
-void
-BitVectorCandidate::checkPointWrite(nbostream &out)
-{
- uint32_t docIdLimit = _bv->size();
- out << docIdLimit << _numDocs << _bitVectorLimit;
- out.saveVector(_array);
- if (getCrossedBitVectorLimit())
- out << *_bv;
-}
-
-
-void
-BitVectorCandidate::checkPointRead(nbostream &in)
-{
- uint32_t docIdLimit = _bv->size();
- uint32_t checkDocIdLimit;
- uint32_t checkBitVectorLimit;
- in >> checkDocIdLimit >> _numDocs >> checkBitVectorLimit;
- assert(checkDocIdLimit == docIdLimit);
- (void) docIdLimit;
- assert(checkBitVectorLimit == _bitVectorLimit);
- in.restoreVector(_array);
- if (getCrossedBitVectorLimit()) {
- in >> *_bv;
- } else {
- _bv->clear();
- }
-}
-
}
diff --git a/searchlib/src/vespa/searchlib/diskindex/bitvectorfile.h b/searchlib/src/vespa/searchlib/diskindex/bitvectorfile.h
index 1b7b522bc52..893b792d96b 100644
--- a/searchlib/src/vespa/searchlib/diskindex/bitvectorfile.h
+++ b/searchlib/src/vespa/searchlib/diskindex/bitvectorfile.h
@@ -37,21 +37,6 @@ public:
~BitVectorFileWrite();
- /**
- * Checkpoint write. Used at semi-regular intervals during indexing
- * to allow for continued indexing after an interrupt. Implies
- * flush from memory to disk, and possibly also sync to permanent
- * storage media.
- */
- void
- checkPointWrite(vespalib::nbostream &out);
-
- /**
- * Checkpoint read. Used when resuming indexing after an interrupt.
- */
- void
- checkPointRead(vespalib::nbostream &in);
-
void
open(const vespalib::string &name, uint32_t docIdLimit,
const TuneFileSeqWrite &tuneFileWrite,
@@ -181,21 +166,6 @@ public:
{
return *_bv;
}
-
- /**
- * Checkpoint write. Used at semi-regular intervals during indexing
- * to allow for continued indexing after an interrupt. Implies
- * flush from memory to disk, and possibly also sync to permanent
- * storage media.
- */
- void
- checkPointWrite(vespalib::nbostream &out);
-
- /**
- * Checkpoint read. Used when resuming indexing after an interrupt.
- */
- void
- checkPointRead(vespalib::nbostream &in);
};
diff --git a/searchlib/src/vespa/searchlib/diskindex/bitvectoridxfile.cpp b/searchlib/src/vespa/searchlib/diskindex/bitvectoridxfile.cpp
index 856c527a8b5..a4eca7feac9 100644
--- a/searchlib/src/vespa/searchlib/diskindex/bitvectoridxfile.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/bitvectoridxfile.cpp
@@ -4,12 +4,10 @@
#include <vespa/searchlib/index/bitvectorkeys.h>
#include <vespa/searchlib/common/bitvector.h>
#include <vespa/searchlib/common/fileheadercontext.h>
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/vespalib/data/fileheader.h>
namespace search::diskindex {
-using vespalib::nbostream;
using search::index::BitVectorWordSingleKey;
using search::common::FileHeaderContext;
@@ -55,35 +53,6 @@ BitVectorIdxFileWrite::idxSize() const
void
-BitVectorIdxFileWrite::checkPointWriteCommon(nbostream &out)
-{
- out << _scope;
- out << _docIdLimit << _numKeys;
- out << _idxHeaderLen;
-}
-
-
-void
-BitVectorIdxFileWrite::checkPointWrite(nbostream &out)
-{
- flush();
- checkPointWriteCommon(out);
- sync();
-}
-
-
-void
-BitVectorIdxFileWrite::checkPointRead(nbostream &in)
-{
- BitVectorKeyScope checkScope;
- in >> checkScope;
- assert(checkScope == _scope);
- in >> _docIdLimit >> _numKeys;
- in >> _idxHeaderLen;
-}
-
-
-void
BitVectorIdxFileWrite::open(const vespalib::string &name,
uint32_t docIdLimit,
const TuneFileSeqWrite &tuneFileWrite,
diff --git a/searchlib/src/vespa/searchlib/diskindex/bitvectoridxfile.h b/searchlib/src/vespa/searchlib/diskindex/bitvectoridxfile.h
index 0ab40d9c4a3..8d704d1bffe 100644
--- a/searchlib/src/vespa/searchlib/diskindex/bitvectoridxfile.h
+++ b/searchlib/src/vespa/searchlib/diskindex/bitvectoridxfile.h
@@ -8,13 +8,6 @@
#include <vespa/vespalib/stllike/string.h>
#include "bitvectorkeyscope.h"
-namespace vespalib
-{
-
-class nbostream;
-
-}
-
namespace search
{
@@ -49,7 +42,6 @@ protected:
BitVectorKeyScope _scope;
uint64_t idxSize() const;
- void checkPointWriteCommon(vespalib::nbostream &out);
void syncCommon();
public:
@@ -57,21 +49,6 @@ public:
~BitVectorIdxFileWrite();
- /**
- * Checkpoint write. Used at semi-regular intervals during indexing
- * to allow for continued indexing after an interrupt. Implies
- * flush from memory to disk, and possibly also sync to permanent
- * storage media.
- */
- void
- checkPointWrite(vespalib::nbostream &out);
-
- /**
- * Checkpoint read. Used when resuming indexing after an interrupt.
- */
- void
- checkPointRead(vespalib::nbostream &in);
-
void
open(const vespalib::string &name, uint32_t docIdLimit,
const TuneFileSeqWrite &tuneFileWrite,
diff --git a/searchlib/src/vespa/searchlib/diskindex/bitvectorkeyscope.cpp b/searchlib/src/vespa/searchlib/diskindex/bitvectorkeyscope.cpp
index 2e02b092ddd..ee18383debd 100644
--- a/searchlib/src/vespa/searchlib/diskindex/bitvectorkeyscope.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/bitvectorkeyscope.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "bitvectorkeyscope.h"
-#include <vespa/vespalib/objects/nbostream.h>
#include <cassert>
using search::diskindex::BitVectorKeyScope;
@@ -19,46 +18,3 @@ const char *getBitVectorKeyScopeSuffix(BitVectorKeyScope scope)
}
}
-
-
-namespace {
-
-uint8_t
-getVal(BitVectorKeyScope scope)
-{
- switch (scope) {
- case BitVectorKeyScope::SHARED_WORDS:
- return 0u;
- default:
- return 1u;
- }
-}
-
-
-const BitVectorKeyScope scopes[] = { BitVectorKeyScope::SHARED_WORDS,
- BitVectorKeyScope::PERFIELD_WORDS };
-
-}
-
-
-namespace vespalib {
-
-nbostream &
-operator<<(nbostream &stream, const BitVectorKeyScope &scope)
-{
- uint8_t val = getVal(scope);
- stream << val;
- return stream;
-}
-
-nbostream &
-operator>>(nbostream &stream, BitVectorKeyScope &scope)
-{
- uint8_t val;
- stream >> val;
- assert(val < sizeof(scopes) / sizeof(scopes[0]));
- scope = scopes[val];
- return stream;
-}
-
-}
diff --git a/searchlib/src/vespa/searchlib/diskindex/bitvectorkeyscope.h b/searchlib/src/vespa/searchlib/diskindex/bitvectorkeyscope.h
index 1c931225cc0..7b2af6adb9d 100644
--- a/searchlib/src/vespa/searchlib/diskindex/bitvectorkeyscope.h
+++ b/searchlib/src/vespa/searchlib/diskindex/bitvectorkeyscope.h
@@ -3,14 +3,6 @@
#pragma once
-namespace vespalib
-{
-
-class nbostream;
-
-}
-
-
namespace search
{
@@ -28,16 +20,3 @@ const char *getBitVectorKeyScopeSuffix(BitVectorKeyScope scope);
}
}
-
-namespace vespalib
-{
-
-nbostream &
-operator<<(nbostream &stream,
- const search::diskindex::BitVectorKeyScope &scope);
-
-nbostream &
-operator>>(nbostream &stream,
- search::diskindex::BitVectorKeyScope &scope);
-
-}
diff --git a/searchlib/src/vespa/searchlib/diskindex/checkpointfile.cpp b/searchlib/src/vespa/searchlib/diskindex/checkpointfile.cpp
deleted file mode 100644
index 0324f00f63c..00000000000
--- a/searchlib/src/vespa/searchlib/diskindex/checkpointfile.cpp
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "checkpointfile.h"
-#include <vespa/vespalib/data/fileheader.h>
-#include <vespa/searchlib/common/fileheadercontext.h>
-#include <cassert>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".diskindex.checkpointfile");
-
-using vespalib::getLastErrorString;
-
-namespace search::diskindex {
-
-using common::FileHeaderContext;
-
-CheckPointFile::CheckPointFile(const vespalib::string &name)
- : _file(),
- _name(name),
- _nameNew(name + ".NEW"),
- _nameNewNew(name + ".NEW.NEW"),
- _writeOpened(false),
- _headerLen(0u)
-{ }
-
-
-CheckPointFile::~CheckPointFile()
-{
- close();
-}
-
-
-void
-CheckPointFile::writeOpen(const FileHeaderContext &fileHeaderContext)
-{
- FastOS_File::Delete(_nameNewNew.c_str());
- _file.OpenWriteOnly(_nameNewNew.c_str());
- _writeOpened = true;
- makeHeader(fileHeaderContext);
-}
-
-
-bool
-CheckPointFile::readOpen()
-{
- bool openres;
-
- openres = _file.OpenReadOnly(_name.c_str());
- if (!openres) {
- bool renameres = FastOS_File::Rename(_nameNew.c_str(),
- _name.c_str());
- if (!renameres)
- return false;
- openres = _file.OpenReadOnly(_name.c_str());
- if (!openres)
- return false;
- }
- _headerLen = readHeader();
- return true;
-}
-
-
-void
-CheckPointFile::close()
-{
- if (_writeOpened) {
- _file.Sync();
- }
- _file.Close();
- if (_writeOpened) {
- updateHeader();
- rename1();
- rename2();
- }
- _writeOpened = false;
-}
-
-
-void
-CheckPointFile::rename1()
-{
- FastOS_File::Delete(_nameNew.c_str());
- bool renameres = FastOS_File::Rename(_nameNewNew.c_str(),
- _nameNew.c_str());
- if (!renameres) {
- LOG(error, "FATAL: rename %s -> %s failed: %s",
- _nameNewNew.c_str(), _nameNew.c_str(), getLastErrorString().c_str());
- abort();
- }
-}
-
-
-void
-CheckPointFile::rename2()
-{
- FastOS_File::Delete(_name.c_str());
- bool renameres = FastOS_File::Rename(_nameNew.c_str(), _name.c_str());
- if (!renameres) {
- LOG(error, "FATAL: rename %s -> %s failed: %s",
- _nameNew.c_str(), _name.c_str(), getLastErrorString().c_str());
- abort();
- }
-}
-
-
-void
-CheckPointFile::remove()
-{
- FastOS_File::Delete(_nameNew.c_str());
- FastOS_File::Delete(_name.c_str());
-}
-
-
-
-void
-CheckPointFile::write(vespalib::nbostream &buf,
- const FileHeaderContext &fileHeaderContext)
-{
- writeOpen(fileHeaderContext);
- _file.WriteBuf(buf.peek(), buf.size());
- close();
-}
-
-
-bool
-CheckPointFile::read(vespalib::nbostream &buf)
-{
- if (!readOpen())
- return false;
- size_t sz = _file.GetSize() - _headerLen;
-
- std::vector<char> tmp(sz);
- _file.ReadBuf(&tmp[0], sz);
- buf.clear();
- buf.write(&tmp[0], sz);
- std::vector<char>().swap(tmp);
- close();
- return true;
-}
-
-
-void
-CheckPointFile::makeHeader(const FileHeaderContext &fileHeaderContext)
-{
- vespalib::FileHeader header;
-
- typedef vespalib::GenericHeader::Tag Tag;
- fileHeaderContext.addTags(header, _file.GetFileName());
- header.putTag(Tag("frozen", 0));
- header.putTag(Tag("desc", "Check point file"));
- header.writeFile(_file);
-}
-
-
-void
-CheckPointFile::updateHeader()
-{
- vespalib::FileHeader h;
- FastOS_File f;
- f.OpenReadWrite(_nameNewNew.c_str());
- h.readFile(f);
- FileHeaderContext::setFreezeTime(h);
- typedef vespalib::GenericHeader::Tag Tag;
- h.putTag(Tag("frozen", 1));
- h.rewriteFile(f);
- f.Sync();
- f.Close();
-}
-
-
-uint32_t
-CheckPointFile::readHeader()
-{
- vespalib::FileHeader h;
- uint32_t headerLen = h.readFile(_file);
- _file.SetPosition(headerLen);
- assert(h.hasTag("frozen"));
- assert(h.getTag("frozen").asInteger() != 0);
- return headerLen;
-}
-
-
-}
diff --git a/searchlib/src/vespa/searchlib/diskindex/checkpointfile.h b/searchlib/src/vespa/searchlib/diskindex/checkpointfile.h
deleted file mode 100644
index 7ea18132b08..00000000000
--- a/searchlib/src/vespa/searchlib/diskindex/checkpointfile.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#pragma once
-
-#include <vespa/vespalib/stllike/string.h>
-#include <vespa/vespalib/objects/nbostream.h>
-#include <vespa/fastos/file.h>
-
-namespace search {
-
-namespace common { class FileHeaderContext; }
-
-namespace diskindex {
-
-class CheckPointFile
-{
-public:
- FastOS_File _file;
- vespalib::string _name;
- vespalib::string _nameNew;
- vespalib::string _nameNewNew;
- bool _writeOpened;
- uint32_t _headerLen;
-
- void writeOpen(const common::FileHeaderContext &fileHeaderContext);
- bool readOpen();
- void close();
- void rename1();
- void rename2();
- void remove();
- void makeHeader(const common::FileHeaderContext &fileHeaderContext);
- void updateHeader();
- uint32_t readHeader();
-public:
- CheckPointFile(const CheckPointFile &) = delete;
- CheckPointFile & operator = (const CheckPointFile &) = delete;
- CheckPointFile(const vespalib::string &name);
- ~CheckPointFile();
-
- void write(vespalib::nbostream &buf, const common::FileHeaderContext &fileHeaderContext);
- bool read(vespalib::nbostream &buf);
-};
-
-
-} // namespace diskindex
-
-} // namespace search
-
diff --git a/searchlib/src/vespa/searchlib/diskindex/docidmapper.cpp b/searchlib/src/vespa/searchlib/diskindex/docidmapper.cpp
index 84066aa4e65..70f7bdfea10 100644
--- a/searchlib/src/vespa/searchlib/diskindex/docidmapper.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/docidmapper.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "docidmapper.h"
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/searchlib/common/documentsummary.h>
#include <vespa/searchlib/common/bitvector.h>
#include <vespa/fastlib/io/bufferedfile.h>
diff --git a/searchlib/src/vespa/searchlib/diskindex/extposocc.cpp b/searchlib/src/vespa/searchlib/diskindex/extposocc.cpp
index 89c801f04b5..c6f9cc757fd 100644
--- a/searchlib/src/vespa/searchlib/diskindex/extposocc.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/extposocc.cpp
@@ -7,7 +7,6 @@
#include <vespa/searchlib/index/docidandfeatures.h>
#include <vespa/searchlib/index/postinglistcounts.h>
#include <vespa/searchlib/index/postinglistcountfile.h>
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/log/log.h>
LOG_SETUP(".diskindex.extposocc");
diff --git a/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp b/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp
index c1964906f85..cffc2e09ef8 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/fieldreader.cpp
@@ -5,7 +5,6 @@
#include "extposocc.h"
#include "pagedict4file.h"
#include <vespa/vespalib/util/error.h>
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/log/log.h>
LOG_SETUP(".diskindex.fieldreader");
@@ -36,7 +35,6 @@ FieldReader::FieldReader()
_oldWordNum(noWordNumHigh()),
_residue(0u),
_docIdLimit(0u),
- _checkPointResume(false),
_word()
{
}
@@ -108,8 +106,8 @@ FieldReader::setup(const WordNumMapping &wordNumMapping,
bool
-FieldReader::earlyOpen(const vespalib::string &prefix,
- const TuneFileSeqRead &tuneFileRead)
+FieldReader::open(const vespalib::string &prefix,
+ const TuneFileSeqRead &tuneFileRead)
{
vespalib::string name = prefix + "posocc.dat.compressed";
FastOS_StatInfo statInfo;
@@ -124,28 +122,20 @@ FieldReader::earlyOpen(const vespalib::string &prefix,
return false;
}
- _dictFile.reset(new search::diskindex::PageDict4FileSeqRead);
+ _dictFile = std::make_unique<PageDict4FileSeqRead>();
PostingListParams featureParams;
- _oldposoccfile.reset(search::diskindex::makePosOccRead(name,
- _dictFile.get(),
- dynamicKPosOccFormat,
- featureParams,
- tuneFileRead));
- return true;
-}
-
-
-bool
-FieldReader::lateOpen(const vespalib::string &prefix,
- const TuneFileSeqRead &tuneFileRead)
-{
+ _oldposoccfile.reset(makePosOccRead(name,
+ _dictFile.get(),
+ dynamicKPosOccFormat,
+ featureParams,
+ tuneFileRead));
vespalib::string cname = prefix + "dictionary";
- vespalib::string name = prefix + "posocc.dat.compressed";
if (!_dictFile->open(cname, tuneFileRead)) {
LOG(error,
"Could not open posocc count file %s for read",
cname.c_str());
+ return false;
}
// open posocc.dat
@@ -153,28 +143,16 @@ FieldReader::lateOpen(const vespalib::string &prefix,
LOG(error,
"Could not open posocc file %s for read",
name.c_str());
+ return false;
}
- if (!_checkPointResume) {
- _oldWordNum = noWordNum();
- _wordNum = _oldWordNum;
- PostingListParams params;
- _oldposoccfile->getParams(params);
- params.get("docIdLimit", _docIdLimit);
- }
+ _oldWordNum = noWordNum();
+ _wordNum = _oldWordNum;
+ PostingListParams params;
+ _oldposoccfile->getParams(params);
+ params.get("docIdLimit", _docIdLimit);
return true;
}
-
-bool
-FieldReader::open(const vespalib::string &prefix,
- const TuneFileSeqRead &tuneFileRead)
-{
- if (!earlyOpen(prefix, tuneFileRead))
- return false;
- return lateOpen(prefix, tuneFileRead);
-}
-
-
bool
FieldReader::close()
{
@@ -204,29 +182,6 @@ FieldReader::close()
void
-FieldReader::checkPointWrite(vespalib::nbostream &out)
-{
- out << _wordNum << _oldWordNum;
- out << _residue << _docIdAndFeatures;
- out << _docIdLimit;
- out << _word;
- _oldposoccfile->checkPointWrite(out);
- _dictFile->checkPointWrite(out);
-}
-
-void
-FieldReader::checkPointRead(vespalib::nbostream &in)
-{
- in >> _wordNum >> _oldWordNum;
- in >> _residue >> _docIdAndFeatures;
- in >> _docIdLimit;
- in >> _word;
- _oldposoccfile->checkPointRead(in);
- _dictFile->checkPointRead(in);
- _checkPointResume = true;
-}
-
-void
FieldReader::setFeatureParams(const PostingListParams &params)
{
_oldposoccfile->setFeatureParams(params);
@@ -261,26 +216,6 @@ FieldReaderEmpty::FieldReaderEmpty(const IndexIterator &index)
bool
-FieldReaderEmpty::earlyOpen(const vespalib::string &prefix,
- const TuneFileSeqRead &tuneFileRead)
-{
- (void) prefix;
- (void) tuneFileRead;
- return true;
-}
-
-
-bool
-FieldReaderEmpty::lateOpen(const vespalib::string &prefix,
- const TuneFileSeqRead &tuneFileRead)
-{
- (void) prefix;
- (void) tuneFileRead;
- return true;
-}
-
-
-bool
FieldReaderEmpty::open(const vespalib::string &prefix,
const TuneFileSeqRead &tuneFileRead)
{
diff --git a/searchlib/src/vespa/searchlib/diskindex/fieldreader.h b/searchlib/src/vespa/searchlib/diskindex/fieldreader.h
index 3d724f31b41..d55aa39d491 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fieldreader.h
+++ b/searchlib/src/vespa/searchlib/diskindex/fieldreader.h
@@ -56,7 +56,6 @@ protected:
uint64_t _oldWordNum;
uint32_t _residue;
uint32_t _docIdLimit;
- bool _checkPointResume;
vespalib::string _word;
static uint64_t
@@ -117,31 +116,11 @@ public:
const DocIdMapping &docIdMapping);
virtual bool
- earlyOpen(const vespalib::string &prefix,
- const TuneFileSeqRead &tuneFileRead);
-
- virtual bool
- lateOpen(const vespalib::string &prefix,
- const TuneFileSeqRead &tuneFileRead);
-
- virtual bool
open(const vespalib::string &prefix, const TuneFileSeqRead &tuneFileRead);
virtual bool
close();
- /*
- * To be called between words, not in the middle of one.
- */
- virtual void
- checkPointWrite(vespalib::nbostream &out);
-
- /*
- * To be called after earlyOpen() but before afterOpen().
- */
- virtual void
- checkPointRead(vespalib::nbostream &in);
-
virtual void
setFeatureParams(const PostingListParams &params);
@@ -172,14 +151,6 @@ public:
FieldReaderEmpty(const IndexIterator &index);
virtual bool
- earlyOpen(const vespalib::string &prefix,
- const TuneFileSeqRead &tuneFileRead) override;
-
- virtual bool
- lateOpen(const vespalib::string &prefix,
- const TuneFileSeqRead &tuneFileRead) override;
-
- virtual bool
open(const vespalib::string &prefix, const TuneFileSeqRead &tuneFileRead)
override;
@@ -213,4 +184,3 @@ public:
} // namespace diskindex
} // namespace search
-
diff --git a/searchlib/src/vespa/searchlib/diskindex/fieldwriter.cpp b/searchlib/src/vespa/searchlib/diskindex/fieldwriter.cpp
index 9f03c06cf56..6b66568ef7a 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fieldwriter.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/fieldwriter.cpp
@@ -5,14 +5,12 @@
#include "extposocc.h"
#include "pagedict4file.h"
#include <vespa/vespalib/util/error.h>
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/log/log.h>
LOG_SETUP(".diskindex.fieldwriter");
namespace search::diskindex {
-using vespalib::nbostream;
using vespalib::getLastErrorString;
using common::FileHeaderContext;
@@ -34,14 +32,15 @@ FieldWriter::FieldWriter(uint32_t docIdLimit,
FieldWriter::~FieldWriter() { }
-void
-FieldWriter::earlyOpen(const vespalib::string &prefix,
- uint32_t minSkipDocs,
- uint32_t minChunkDocs,
- bool dynamicKPosOccFormat,
- const Schema &schema,
- const uint32_t indexId,
- const TuneFileSeqWrite &tuneFileWrite)
+bool
+FieldWriter::open(const vespalib::string &prefix,
+ uint32_t minSkipDocs,
+ uint32_t minChunkDocs,
+ bool dynamicKPosOccFormat,
+ const Schema &schema,
+ const uint32_t indexId,
+ const TuneFileSeqWrite &tuneFileWrite,
+ const FileHeaderContext &fileHeaderContext)
{
_prefix = prefix;
vespalib::string name = prefix + "posocc.dat.compressed";
@@ -64,7 +63,7 @@ FieldWriter::earlyOpen(const vespalib::string &prefix,
params.set("minChunkDocs", minChunkDocs);
}
- _dictFile.reset(new PageDict4FileSeqWrite);
+ _dictFile = std::make_unique<PageDict4FileSeqWrite>();
_dictFile->setParams(countParams);
_posoccfile.reset(diskindex::makePosOccWrite(name,
@@ -75,15 +74,7 @@ FieldWriter::earlyOpen(const vespalib::string &prefix,
schema,
indexId,
tuneFileWrite));
-}
-
-
-bool
-FieldWriter::lateOpen(const TuneFileSeqWrite &tuneFileWrite,
- const FileHeaderContext &fileHeaderContext)
-{
vespalib::string cname = _prefix + "dictionary";
- vespalib::string name = _prefix + "posocc.dat.compressed";
// Open output dictionary file
if (!_dictFile->open(cname, tuneFileWrite, fileHeaderContext)) {
@@ -181,36 +172,6 @@ FieldWriter::close()
void
-FieldWriter::checkPointWrite(nbostream &out)
-{
- out << _wordNum << _prevDocId;
- out << _docIdLimit << _numWordIds;
- out << _compactWordNum << _word;
- _posoccfile->checkPointWrite(out);
- _dictFile->checkPointWrite(out);
- _bvc.checkPointWrite(out);
- _bmapfile.checkPointWrite(out);
-}
-
-
-void
-FieldWriter::checkPointRead(nbostream &in)
-{
- in >> _wordNum >> _prevDocId;
- uint32_t checkDocIdLimit = 0;
- uint64_t checkNumWordIds = 0;
- in >> checkDocIdLimit >> checkNumWordIds;
- assert(checkDocIdLimit == _docIdLimit);
- assert(checkNumWordIds == _numWordIds);
- in >> _compactWordNum >> _word;
- _posoccfile->checkPointRead(in);
- _dictFile->checkPointRead(in);
- _bvc.checkPointRead(in);
- _bmapfile.checkPointRead(in);
-}
-
-
-void
FieldWriter::setFeatureParams(const PostingListParams &params)
{
_posoccfile->setFeatureParams(params);
diff --git a/searchlib/src/vespa/searchlib/diskindex/fieldwriter.h b/searchlib/src/vespa/searchlib/diskindex/fieldwriter.h
index 1cd53b99bce..8f879360ea5 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fieldwriter.h
+++ b/searchlib/src/vespa/searchlib/diskindex/fieldwriter.h
@@ -8,8 +8,6 @@
#include <vespa/searchlib/bitcompression/countcompression.h>
#include <vespa/searchlib/bitcompression/posocccompression.h>
-namespace vespalib { class nbostream; }
-
namespace search {
namespace diskindex {
@@ -73,24 +71,13 @@ public:
uint64_t getSparseWordNum() const { return _wordNum; }
- void earlyOpen(const vespalib::string &prefix, uint32_t minSkipDocs, uint32_t minChunkDocs,
- bool dynamicKPosOccFormat, const Schema &schema, uint32_t indexId,
- const TuneFileSeqWrite &tuneFileWrite);
-
- bool lateOpen(const TuneFileSeqWrite &tuneFileWrite,
- const search::common::FileHeaderContext &fileHeaderContext);
+ bool open(const vespalib::string &prefix, uint32_t minSkipDocs, uint32_t minChunkDocs,
+ bool dynamicKPosOccFormat, const Schema &schema, uint32_t indexId,
+ const TuneFileSeqWrite &tuneFileWrite,
+ const search::common::FileHeaderContext &fileHeaderContext);
bool close();
- /*
- * To be called between words, not in the middle of one.
- */
- void checkPointWrite(vespalib::nbostream &out);
-
- /*
- * To be called after earlyOpen() but before afterOpen().
- */
- void checkPointRead(vespalib::nbostream &in);
void setFeatureParams(const PostingListParams &params);
void getFeatureParams(PostingListParams &params);
static void remove(const vespalib::string &prefix);
diff --git a/searchlib/src/vespa/searchlib/diskindex/fusion.cpp b/searchlib/src/vespa/searchlib/diskindex/fusion.cpp
index 97d21caef96..5dc5af68a38 100644
--- a/searchlib/src/vespa/searchlib/diskindex/fusion.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/fusion.cpp
@@ -314,16 +314,14 @@ Fusion::openFieldWriter(const SchemaUtil::IndexIterator &index,
{
vespalib::string dir = _outDir + "/" + index.getName();
- writer.earlyOpen(dir + "/",
+ if (!writer.open(dir + "/",
64,
262144,
_dynamicKPosIndexFormat,
index.getSchema(),
index.getIndex(),
- _tuneFileIndexing._write);
- // No checkpointing
- if (!writer.lateOpen(_tuneFileIndexing._write,
- _fileHeaderContext)) {
+ _tuneFileIndexing._write,
+ _fileHeaderContext)) {
LOG(error, "Could not open output posocc + dictionary in %s",
dir.c_str());
abort();
diff --git a/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp b/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp
index aacfa400755..171e862f064 100644
--- a/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/indexbuilder.cpp
@@ -292,13 +292,9 @@ FileHandle::open(const vespalib::stringref &dir,
_fieldWriter = new FieldWriter(docIdLimit, numWordIds);
- _fieldWriter->earlyOpen(dir + "/", 64, 262144u, false,
- index.getSchema(), index.getIndex(),
- tuneFileWrite);
-
- // No checkpointing
-
- if (!_fieldWriter->lateOpen(tuneFileWrite, fileHeaderContext)) {
+ if (!_fieldWriter->open(dir + "/", 64, 262144u, false,
+ index.getSchema(), index.getIndex(),
+ tuneFileWrite, fileHeaderContext)) {
LOG(error, "Could not open term writer %s for write (%s)",
dir.c_str(), getLastErrorString().c_str());
abort();
diff --git a/searchlib/src/vespa/searchlib/diskindex/pagedict4file.cpp b/searchlib/src/vespa/searchlib/diskindex/pagedict4file.cpp
index 9c63c5930b6..72fd07e5752 100644
--- a/searchlib/src/vespa/searchlib/diskindex/pagedict4file.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/pagedict4file.cpp
@@ -4,7 +4,6 @@
#include <vespa/searchlib/common/fileheadercontext.h>
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/data/fileheader.h>
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/vespalib/io/fileutil.h>
#include <vespa/log/log.h>
@@ -63,8 +62,7 @@ PageDict4FileSeqRead::PageDict4FileSeqRead()
_ssCompleted(false),
_spCompleted(false),
_pCompleted(false),
- _wordNum(0u),
- _checkPointData()
+ _wordNum(0u)
{
_ssd.setReadContext(&_ssReadContext);
_spd.setReadContext(&_spReadContext);
@@ -229,26 +227,13 @@ PageDict4FileSeqRead::open(const vespalib::string &name,
32768u);
_ssd.emptyBuffer(0);
- if (_checkPointData) {
- _ssReadContext.setPosition(_ssReadContext.getCheckPointOffset());
- if (_ssd._valI >= _ssd._valE)
- _ssReadContext.readComprBuffer();
- _spReadContext.setPosition(_spReadContext.getCheckPointOffset());
- if (_spd._valI >= _spd._valE)
- _spReadContext.readComprBuffer();
- _pReadContext.setPosition(_pReadContext.getCheckPointOffset());
- if (_pd._valI >= _pd._valE)
- _pReadContext.readComprBuffer();
- } else {
- _ssReadContext.readComprBuffer();
- assert(_ssReadContext.getBufferEndFilePos() >= fileSize);
- readSSHeader();
- _spReadContext.readComprBuffer();
- readSPHeader();
- _pReadContext.readComprBuffer();
- readPHeader();
- }
-
+ _ssReadContext.readComprBuffer();
+ assert(_ssReadContext.getBufferEndFilePos() >= fileSize);
+ readSSHeader();
+ _spReadContext.readComprBuffer();
+ readSPHeader();
+ _pReadContext.readComprBuffer();
+ readPHeader();
_ssReader = new SSReader(_ssReadContext,
_ssHeaderLen,
@@ -263,15 +248,9 @@ PageDict4FileSeqRead::open(const vespalib::string &name,
_spd,
_pd);
- if (_checkPointData) {
- _ssReader->checkPointRead(*_checkPointData);
- _pReader->checkPointRead(*_checkPointData);
- assert(_checkPointData->empty());
- } else {
- _ssReader->setup(_ssd);
- _pReader->setup();
- _wordNum = 0;
- }
+ _ssReader->setup(_ssd);
+ _pReader->setup();
+ _wordNum = 0;
return true;
}
@@ -299,40 +278,6 @@ PageDict4FileSeqRead::close()
void
-PageDict4FileSeqRead::checkPointWrite(vespalib::nbostream &out)
-{
- _ssd.checkPointWrite(out);
- _spReadContext.checkPointWrite(out);
- _pReadContext.checkPointWrite(out);
- vespalib::nbostream data;
- _ssReader->checkPointWrite(data);
- _pReader->checkPointWrite(data);
- std::vector<char> checkPointData(data.size());
- data.read(&checkPointData[0], data.size());
- out << checkPointData;
- out << _wordNum;
- out << _ssCompleted << _ssFileBitSize << _ssHeaderLen;
- out << _spCompleted << _spFileBitSize << _spHeaderLen;
- out << _pCompleted << _pFileBitSize << _pHeaderLen;
-}
-
-void
-PageDict4FileSeqRead::checkPointRead(vespalib::nbostream &in)
-{
- _ssd.checkPointRead(in);
- _spReadContext.checkPointRead(in);
- _pReadContext.checkPointRead(in);
- std::vector<char> checkPointData;
- in >> checkPointData;
- _checkPointData.reset(new vespalib::nbostream(checkPointData.size()));
- _checkPointData->write(&checkPointData[0], checkPointData.size());
- in >> _wordNum;
- in >> _ssCompleted >> _ssFileBitSize >> _ssHeaderLen;
- in >> _spCompleted >> _spFileBitSize >> _spHeaderLen;
- in >> _pCompleted >> _pFileBitSize >> _pHeaderLen;
-}
-
-void
PageDict4FileSeqRead::getParams(PostingListParams &params)
{
params.clear();
@@ -418,11 +363,9 @@ PageDict4FileSeqWrite::open(const vespalib::string &name,
assertOpenWriteOnly(ok, ssname);
_ssWriteContext.setFile(&_ssfile);
- if (!_checkPointData) {
- _pWriteContext.allocComprBuf(65536u, 32768u);
- _spWriteContext.allocComprBuf(65536u, 32768u);
- _ssWriteContext.allocComprBuf(65536u, 32768u);
- }
+ _pWriteContext.allocComprBuf(65536u, 32768u);
+ _spWriteContext.allocComprBuf(65536u, 32768u);
+ _ssWriteContext.allocComprBuf(65536u, 32768u);
uint64_t pFileSize = _pfile.GetSize();
uint64_t spFileSize = _spfile.GetSize();
@@ -445,33 +388,24 @@ PageDict4FileSeqWrite::open(const vespalib::string &name,
assert(ssBufferStartFilePos ==
static_cast<uint64_t>(_ssfile.GetPosition()));
- if (!_checkPointData) {
- _pe.setupWrite(_pWriteContext);
- _spe.setupWrite(_spWriteContext);
- _sse.setupWrite(_ssWriteContext);
- assert(_pe.getWriteOffset() == 0);
- assert(_spe.getWriteOffset() == 0);
- assert(_sse.getWriteOffset() == 0);
- _spe.copyParams(_sse);
- _pe.copyParams(_sse);
- // Write initial file headers
- makePHeader(fileHeaderContext);
- makeSPHeader(fileHeaderContext);
- makeSSHeader(fileHeaderContext);
- }
+ _pe.setupWrite(_pWriteContext);
+ _spe.setupWrite(_spWriteContext);
+ _sse.setupWrite(_ssWriteContext);
+ assert(_pe.getWriteOffset() == 0);
+ assert(_spe.getWriteOffset() == 0);
+ assert(_sse.getWriteOffset() == 0);
+ _spe.copyParams(_sse);
+ _pe.copyParams(_sse);
+ // Write initial file headers
+ makePHeader(fileHeaderContext);
+ makeSPHeader(fileHeaderContext);
+ makeSSHeader(fileHeaderContext);
_ssWriter = new SSWriter(_sse);
_spWriter = new SPWriter(*_ssWriter, _spe);
_pWriter = new PWriter(*_spWriter, _pe);
- if (_checkPointData) {
- _ssWriter->checkPointRead(*_checkPointData);
- _spWriter->checkPointRead(*_checkPointData);
- _pWriter->checkPointRead(*_checkPointData);
- assert(_checkPointData->empty());
- } else {
- _spWriter->setup();
- _pWriter->setup();
- }
+ _spWriter->setup();
+ _pWriter->setup();
return true;
}
@@ -683,43 +617,6 @@ PageDict4FileSeqWrite::updateSSHeader(uint64_t fileBitSize)
void
-PageDict4FileSeqWrite::checkPointWrite(vespalib::nbostream &out)
-{
- _ssWriteContext.writeComprBuffer(true);
- _spWriteContext.writeComprBuffer(true);
- _pWriteContext.writeComprBuffer(true);
- _ssWriteContext.checkPointWrite(out);
- _spWriteContext.checkPointWrite(out);
- _pWriteContext.checkPointWrite(out);
- vespalib::nbostream data;
- _ssWriter->checkPointWrite(data);
- _spWriter->checkPointWrite(data);
- _pWriter->checkPointWrite(data);
- std::vector<char> checkPointData(data.size());
- data.read(&checkPointData[0], data.size());
- out << checkPointData;
- out << _ssHeaderLen << _spHeaderLen << _pHeaderLen;
- _ssfile.Sync();
- _spfile.Sync();
- _pfile.Sync();
-}
-
-
-void
-PageDict4FileSeqWrite::checkPointRead(vespalib::nbostream &in)
-{
- _ssWriteContext.checkPointRead(in);
- _spWriteContext.checkPointRead(in);
- _pWriteContext.checkPointRead(in);
- std::vector<char> checkPointData;
- in >> checkPointData;
- _checkPointData.reset(new vespalib::nbostream(checkPointData.size()));
- _checkPointData->write(&checkPointData[0], checkPointData.size());
- in >> _ssHeaderLen >> _spHeaderLen >> _pHeaderLen;
-}
-
-
-void
PageDict4FileSeqWrite::setParams(const PostingListParams &params)
{
params.get("avgBitsPerDoc", _sse._avgBitsPerDoc);
diff --git a/searchlib/src/vespa/searchlib/diskindex/pagedict4file.h b/searchlib/src/vespa/searchlib/diskindex/pagedict4file.h
index fe432a22463..7baa883a740 100644
--- a/searchlib/src/vespa/searchlib/diskindex/pagedict4file.h
+++ b/searchlib/src/vespa/searchlib/diskindex/pagedict4file.h
@@ -50,8 +50,6 @@ class PageDict4FileSeqRead : public index::DictionaryFileSeqRead
uint64_t _wordNum;
- std::unique_ptr<vespalib::nbostream> _checkPointData;
-
void
readSSHeader();
@@ -85,21 +83,6 @@ public:
*/
virtual bool close() override;
- /**
- * Checkpoint write. Used at semi-regular intervals during indexing
- * to allow for continued indexing after an interrupt. Implies
- * flush from memory to disk, and possibly also sync to permanent
- * storage media.
- */
- virtual void
- checkPointWrite(vespalib::nbostream &out) override;
-
- /**
- * Checkpoint read. Used when resuming indexing after an interrupt.
- */
- virtual void
- checkPointRead(vespalib::nbostream &in) override;
-
/*
* Get current parameters.
*/
@@ -142,8 +125,6 @@ class PageDict4FileSeqWrite : public index::DictionaryFileSeqWrite
uint32_t _spHeaderLen; // Length of header for sparse page file (bytes)
uint32_t _ssHeaderLen; // Length of header for sparse sparse file (bytes)
- std::unique_ptr<vespalib::nbostream> _checkPointData;
-
void
writeIndexNames(vespalib::GenericHeader &header);
@@ -196,21 +177,6 @@ public:
virtual bool
close() override;
- /**
- * Checkpoint write. Used at semi-regular intervals during indexing
- * to allow for continued indexing after an interrupt. Implies
- * flush from memory to disk, and possibly also sync to permanent
- * storage media.
- */
- virtual void
- checkPointWrite(vespalib::nbostream &out) override;
-
- /**
- * Checkpoint read. Used when resuming indexing after an interrupt.
- */
- virtual void
- checkPointRead(vespalib::nbostream &in) override;
-
/*
* Set parameters.
*/
diff --git a/searchlib/src/vespa/searchlib/diskindex/wordnummapper.cpp b/searchlib/src/vespa/searchlib/diskindex/wordnummapper.cpp
index 00b6d23fbcf..3bd879de291 100644
--- a/searchlib/src/vespa/searchlib/diskindex/wordnummapper.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/wordnummapper.cpp
@@ -1,7 +1,6 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "wordnummapper.h"
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/fastlib/io/bufferedfile.h>
#include <cassert>
diff --git a/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp b/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp
index 768ee972097..17d4457a318 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp
+++ b/searchlib/src/vespa/searchlib/diskindex/zcposting.cpp
@@ -6,7 +6,6 @@
#include <vespa/searchlib/index/postinglistfile.h>
#include <vespa/searchlib/index/docidandfeatures.h>
#include <vespa/searchlib/common/fileheadercontext.h>
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/vespalib/data/fileheader.h>
#include <vespa/log/log.h>
@@ -27,7 +26,6 @@ using index::PostingListCountFileSeqWrite;
using common::FileHeaderContext;
using bitcompression::FeatureDecodeContextBE;
using bitcompression::FeatureEncodeContextBE;
-using vespalib::nbostream;
using vespalib::getLastErrorString;
@@ -78,11 +76,7 @@ Zc4PostingSeqRead(PostingListCountFileSeqRead *countFile)
_rangeEndOffset(0),
_readAheadEndOffset(0),
_wordStart(0),
- _checkPointPos(0),
- _residue(0),
- _checkPointChunkNo(0u),
- _checkPointResidue(0u),
- _checkPointHasMore(false)
+ _residue(0)
{
if (_countFile != NULL) {
PostingListParams params;
@@ -231,32 +225,6 @@ readDocIdAndFeatures(DocIdAndFeatures &features)
void
-Zc4PostingSeqRead::checkPointWrite(nbostream &out)
-{
- out << _counts;
- out << _wordStart;
- uint64_t curPos = _decodeContext->getReadOffset();
- out << curPos;
- out << _residue;
- out << _chunkNo;
- out << _hasMore;
-}
-
-
-void
-Zc4PostingSeqRead::checkPointRead(nbostream &in)
-{
- in >> _counts;
- in >> _wordStart;
- in >> _checkPointPos;
- in >> _checkPointResidue;
- in >> _checkPointChunkNo;
- in >> _checkPointHasMore;
- assert(_checkPointPos >= _wordStart);
-}
-
-
-void
Zc4PostingSeqRead::readWordStartWithSkip()
{
typedef FeatureEncodeContextBE EC;
@@ -508,34 +476,8 @@ Zc4PostingSeqRead::open(const vespalib::string &name,
_readContext.readComprBuffer();
readHeader();
- if (d._valI >= d._valE)
+ if (d._valI >= d._valE) {
_readContext.readComprBuffer();
- if (_checkPointPos != 0) {
- if (_checkPointResidue != 0 || _checkPointHasMore) {
- // Checkpointed in the middle of a word. Read from
- // start at word until at right position.
- DocIdAndFeatures features;
- _readContext.setPosition(_wordStart);
- assert(_decodeContext->getReadOffset() == _wordStart);
- _readContext.readComprBuffer();
- readWordStart();
- assert(_chunkNo < _checkPointChunkNo ||
- (_chunkNo == _checkPointChunkNo &&
- _residue >= _checkPointResidue));
- while (_chunkNo < _checkPointChunkNo ||
- _residue > _checkPointResidue) {
- readDocIdAndFeatures(features);
- }
- assert(_chunkNo == _checkPointChunkNo);
- assert(_residue == _checkPointResidue);
- assert(_hasMore == _checkPointHasMore);
- assert(_decodeContext->getReadOffset() == _checkPointPos);
- } else {
- // Checkpointed between words.
- _readContext.setPosition(_checkPointPos);
- assert(_decodeContext->getReadOffset() == _checkPointPos);
- _readContext.readComprBuffer();
- }
}
} else {
LOG(error, "could not open %s: %s",
@@ -740,29 +682,6 @@ Zc4PostingSeqWrite::flushWord()
}
-void
-Zc4PostingSeqWrite::checkPointWrite(nbostream &out)
-{
- _writeContext.writeComprBuffer(true); // Also flush slack
- out << _numWords;
- _writeContext.checkPointWrite(out);
- _featureWriteContext.checkPointWrite(out);
- out.saveVector(_docIds) << _writePos << _counts;
- _file.Sync();
-}
-
-
-void
-Zc4PostingSeqWrite::checkPointRead(nbostream &in)
-{
- in >> _numWords;
- _writeContext.checkPointRead(in);
- _featureWriteContext.checkPointRead(in);
- _featureOffset = _encodeFeatures->getWriteOffset();
- in.restoreVector(_docIds) >> _writePos >> _counts;
-}
-
-
uint32_t
Zc4PostingSeqWrite::readHeader(const vespalib::string &name)
{
@@ -1422,10 +1341,3 @@ ZcPostingSeqWrite::flushWordNoSkip()
}
} // namespace search::diskindex
-
-#include <vespa/vespalib/objects/nbostream.hpp>
-namespace vespalib {
- using UInt32Pair = std::pair<uint32_t, uint32_t>;
- template nbostream &nbostream::saveVector<UInt32Pair>(const std::vector<UInt32Pair> &);
- template nbostream &nbostream::restoreVector<UInt32Pair>(std::vector<UInt32Pair> &);
-}
diff --git a/searchlib/src/vespa/searchlib/diskindex/zcposting.h b/searchlib/src/vespa/searchlib/diskindex/zcposting.h
index 64879aa05fb..9a650abdd40 100644
--- a/searchlib/src/vespa/searchlib/diskindex/zcposting.h
+++ b/searchlib/src/vespa/searchlib/diskindex/zcposting.h
@@ -78,11 +78,7 @@ protected:
uint64_t _rangeEndOffset; // End offset for word pair
uint64_t _readAheadEndOffset; // Readahead end offset for word pair
uint64_t _wordStart; // last word header position
- uint64_t _checkPointPos; // file position when checkpointing
uint32_t _residue; // Number of unread documents after word header
- uint32_t _checkPointChunkNo; // _chunkNo when checkpointing
- uint32_t _checkPointResidue; // _residue when checkpointing
- bool _checkPointHasMore; // _hasMore when checkpointing
public:
Zc4PostingSeqRead(index::PostingListCountFileSeqRead *countFile);
@@ -98,8 +94,6 @@ public:
virtual void readCommonWordDocIdAndFeatures(DocIdAndFeatures &features);
void readDocIdAndFeatures(DocIdAndFeatures &features) override;
- void checkPointWrite(vespalib::nbostream &out) override;
- void checkPointRead(vespalib::nbostream &in) override;
void readCounts(const PostingListCounts &counts) override; // Fill in for next word
bool open(const vespalib::string &name, const TuneFileSeqRead &tuneFileRead) override;
bool close() override;
@@ -176,8 +170,6 @@ public:
void writeDocIdAndFeatures(const DocIdAndFeatures &features) override;
void flushWord() override;
- void checkPointWrite(vespalib::nbostream &out) override;
- void checkPointRead(vespalib::nbostream &in) override;
bool open(const vespalib::string &name, const TuneFileSeqWrite &tuneFileWrite,
const search::common::FileHeaderContext &fileHeaderContext) override;
diff --git a/searchlib/src/vespa/searchlib/docstore/documentstore.cpp b/searchlib/src/vespa/searchlib/docstore/documentstore.cpp
index bf59614a297..42920a093eb 100644
--- a/searchlib/src/vespa/searchlib/docstore/documentstore.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/documentstore.cpp
@@ -110,17 +110,19 @@ private:
class BackingStore {
public:
BackingStore(IDataStore &store, const CompressionConfig &compression) :
- _backingStore(store),
- _compression(compression) { }
+ _backingStore(store),
+ _compression(compression)
+ { }
bool read(DocumentIdT key, Value &value) const;
void visit(const IDocumentStore::LidVector &lids, const DocumentTypeRepo &repo, IDocumentVisitor &visitor) const;
void write(DocumentIdT, const Value &) {}
void erase(DocumentIdT) {}
- const CompressionConfig &getCompression(void) const { return _compression; }
+ const CompressionConfig &getCompression() const { return _compression; }
+ void reconfigure(const CompressionConfig &compression);
private:
IDataStore &_backingStore;
- const CompressionConfig _compression;
+ CompressionConfig _compression;
};
void
@@ -172,6 +174,11 @@ BackingStore::read(DocumentIdT key, Value &value) const {
return found;
}
+void
+BackingStore::reconfigure(const CompressionConfig &compression) {
+ _compression = compression;
+}
+
}
using CacheParams = vespalib::CacheParam<
@@ -189,6 +196,15 @@ public:
using VisitCache = docstore::VisitCache;
using docstore::Value;
+bool
+DocumentStore::Config::operator == (const Config &rhs) const {
+ return (_maxCacheBytes == rhs._maxCacheBytes) &&
+ (_allowVisitCaching == rhs._allowVisitCaching) &&
+ (_initialCacheEntries == rhs._initialCacheEntries) &&
+ (_compression == rhs._compression);
+}
+
+
DocumentStore::DocumentStore(const Config & config, IDataStore & store)
: IDocumentStore(),
_config(config),
@@ -201,8 +217,15 @@ DocumentStore::DocumentStore(const Config & config, IDataStore & store)
_cache->reserveElements(config.getInitialCacheEntries());
}
-DocumentStore::~DocumentStore()
-{
+DocumentStore::~DocumentStore() {}
+
+void
+DocumentStore::reconfigure(const Config & config) {
+ _cache->setCapacityBytes(config.getMaxCacheBytes());
+ _store->reconfigure(config.getCompression());
+ _visitCache->reconfigure(_config.getMaxCacheBytes(), config.getCompression());
+
+ _config = config;
}
bool
diff --git a/searchlib/src/vespa/searchlib/docstore/documentstore.h b/searchlib/src/vespa/searchlib/docstore/documentstore.h
index 4ba5c27cd07..e6e4364e6ab 100644
--- a/searchlib/src/vespa/searchlib/docstore/documentstore.h
+++ b/searchlib/src/vespa/searchlib/docstore/documentstore.h
@@ -44,6 +44,7 @@ public:
size_t getInitialCacheEntries() const { return _initialCacheEntries; }
bool allowVisitCaching() const { return _allowVisitCaching; }
Config & allowVisitCaching(bool allow) { _allowVisitCaching = allow; return *this; }
+ bool operator == (const Config &) const;
private:
CompressionConfig _compression;
size_t _maxCacheBytes;
@@ -100,6 +101,7 @@ public:
bool canShrinkLidSpace() const override;
size_t getEstimatedShrinkLidSpaceGain() const override;
void shrinkLidSpace() override;
+ void reconfigure(const Config & config);
private:
bool useCache() const;
diff --git a/searchlib/src/vespa/searchlib/docstore/filechunk.cpp b/searchlib/src/vespa/searchlib/docstore/filechunk.cpp
index 4fac42c1421..93a85eda0aa 100644
--- a/searchlib/src/vespa/searchlib/docstore/filechunk.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/filechunk.cpp
@@ -5,7 +5,7 @@
#include "summaryexceptions.h"
#include "randreaders.h"
#include <vespa/searchlib/util/filekit.h>
-#include <vespa/searchlib/common/lambdatask.h>
+#include <vespa/vespalib/util/lambdatask.h>
#include <vespa/vespalib/data/fileheader.h>
#include <vespa/vespalib/data/databuffer.h>
#include <vespa/vespalib/stllike/asciistream.h>
@@ -345,14 +345,14 @@ FileChunk::appendTo(vespalib::ThreadExecutor & executor, const IGetLid & db, IWr
for (size_t chunkId(0); chunkId < numChunks; chunkId++) {
std::promise<Chunk::UP> promisedChunk;
std::future<Chunk::UP> futureChunk = promisedChunk.get_future();
- executor.execute(makeLambdaTask([promise = std::move(promisedChunk), chunkId, this]() mutable {
+ executor.execute(vespalib::makeLambdaTask([promise = std::move(promisedChunk), chunkId, this]() mutable {
const ChunkInfo & cInfo(_chunkInfo[chunkId]);
vespalib::DataBuffer whole(0ul, ALIGNMENT);
FileRandRead::FSP keepAlive(_file->read(cInfo.getOffset(), whole, cInfo.getSize()));
promise.set_value(std::make_unique<Chunk>(chunkId, whole.getData(), whole.getDataLen()));
}));
- singleExecutor.execute(makeLambdaTask([args = &fixedParams, chunk = std::move(futureChunk)]() mutable {
+ singleExecutor.execute(vespalib::makeLambdaTask([args = &fixedParams, chunk = std::move(futureChunk)]() mutable {
appendChunks(args, chunk.get());
}));
}
diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
index 4fa4142813c..4e3178fcdb1 100644
--- a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
@@ -29,15 +29,33 @@ using docstore::StoreByBucket;
using docstore::BucketCompacter;
using namespace std::literals;
-LogDataStore::LogDataStore(vespalib::ThreadExecutor &executor,
- const vespalib::string &dirName,
- const Config &config,
- const GrowStrategy &growStrategy,
- const TuneFileSummary &tune,
- const FileHeaderContext &fileHeaderContext,
- transactionlog::SyncProxy &tlSyncer,
- const IBucketizer::SP & bucketizer,
- bool readOnly)
+LogDataStore::Config::Config()
+ : _maxFileSize(1000000000ul),
+ _maxDiskBloatFactor(0.2),
+ _maxBucketSpread(2.5),
+ _minFileSizeFactor(0.2),
+ _skipCrcOnRead(false),
+ _compact2ActiveFile(true),
+ _compactCompression(CompressionConfig::LZ4),
+ _fileConfig()
+{ }
+
+bool
+LogDataStore::Config::operator == (const Config & rhs) const {
+ return (_maxBucketSpread == rhs._maxBucketSpread) &&
+ (_maxDiskBloatFactor == rhs._maxDiskBloatFactor) &&
+ (_maxFileSize == rhs._maxFileSize) &&
+ (_minFileSizeFactor == rhs._minFileSizeFactor) &&
+ (_compact2ActiveFile == rhs._compact2ActiveFile) &&
+ (_skipCrcOnRead == rhs._skipCrcOnRead) &&
+ (_compactCompression == rhs._compactCompression) &&
+ (_fileConfig == rhs._fileConfig);
+}
+
+LogDataStore::LogDataStore(vespalib::ThreadExecutor &executor, const vespalib::string &dirName, const Config &config,
+ const GrowStrategy &growStrategy, const TuneFileSummary &tune,
+ const FileHeaderContext &fileHeaderContext, transactionlog::SyncProxy &tlSyncer,
+ const IBucketizer::SP & bucketizer, bool readOnly)
: IDataStore(dirName),
_config(config),
_tune(tune),
@@ -67,6 +85,10 @@ LogDataStore::LogDataStore(vespalib::ThreadExecutor &executor,
updateSerialNum();
}
+void LogDataStore::reconfigure(const Config & config) {
+ _config = config;
+}
+
void
LogDataStore::updateSerialNum()
{
@@ -274,12 +296,9 @@ LogDataStore::compact(uint64_t syncToken)
uint64_t usage = getDiskFootprint();
uint64_t bloat = getDiskBloat();
LOG(debug, "%s", bloatMsg(bloat, usage).c_str());
- if ((_fileChunks.size() > 1) &&
- ( isBucketSpreadTooLarge(getMaxBucketSpread()) ||
- isBloatOverLimit(bloat, usage)))
- {
+ if (_fileChunks.size() > 1) {
LOG(info, "%s. Will compact", bloatMsg(bloat, usage).c_str());
- compactWorst();
+ compactWorst(_config.getMaxDiskBloatFactor(), _config.getMaxBucketSpread());
usage = getDiskFootprint();
bloat = getDiskBloat();
LOG(info, "Done compacting. %s", bloatMsg(bloat, usage).c_str());
@@ -299,7 +318,7 @@ LogDataStore::getMaxCompactGain() const
bloat = 0;
}
size_t spreadAsBloat = diskFootPrint * (1.0 - 1.0/maxSpread);
- if ( ! isBucketSpreadTooLarge(maxSpread)) {
+ if ( maxSpread < _config.getMaxBucketSpread()) {
spreadAsBloat = 0;
}
return (bloat + spreadAsBloat);
@@ -348,7 +367,7 @@ LogDataStore::getMaxBucketSpread() const
}
std::pair<bool, LogDataStore::FileId>
-LogDataStore::findNextToCompact()
+LogDataStore::findNextToCompact(double bloatLimit, double spreadLimit)
{
typedef std::multimap<double, FileId, std::greater<double>> CostMap;
CostMap worstBloat;
@@ -376,10 +395,10 @@ LogDataStore::findNextToCompact()
}
}
std::pair<bool, FileId> retval(false, FileId(-1));
- if ( ! worstBloat.empty() && (worstBloat.begin()->first > _config.getMaxDiskBloatFactor())) {
+ if ( ! worstBloat.empty() && (worstBloat.begin()->first > bloatLimit)) {
retval.first = true;
retval.second = worstBloat.begin()->second;
- } else if ( ! worstSpread.empty() && (worstSpread.begin()->first > _config.getMaxBucketSpread())) {
+ } else if ( ! worstSpread.empty() && (worstSpread.begin()->first > spreadLimit)) {
retval.first = true;
retval.second = worstSpread.begin()->second;
}
@@ -390,8 +409,8 @@ LogDataStore::findNextToCompact()
}
void
-LogDataStore::compactWorst() {
- auto worst = findNextToCompact();
+LogDataStore::compactWorst(double bloatLimit, double spreadLimit) {
+ auto worst = findNextToCompact(bloatLimit, spreadLimit);
if (worst.first) {
compactFile(worst.second);
}
diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.h b/searchlib/src/vespa/searchlib/docstore/logdatastore.h
index eb46e5438a9..52d78cac2bd 100644
--- a/searchlib/src/vespa/searchlib/docstore/logdatastore.h
+++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.h
@@ -36,57 +36,37 @@ public:
using CompressionConfig = vespalib::compression::CompressionConfig;
class Config {
public:
- Config()
- : _maxFileSize(1000000000ul),
- _maxDiskBloatFactor(0.2),
- _maxBucketSpread(2.5),
- _minFileSizeFactor(0.2),
- _numThreads(8),
- _skipCrcOnRead(false),
- _compactToActiveFile(true),
- _compactCompression(CompressionConfig::LZ4),
- _fileConfig()
- { }
-
- Config(size_t maxFileSize,
- double maxDiskBloatFactor,
- double maxBucketSpread,
- double minFileSizeFactor,
- size_t numThreads,
- bool compactToActiveFile,
- const CompressionConfig & compactCompression,
- const WriteableFileChunk::Config & fileConfig)
- : _maxFileSize(maxFileSize),
- _maxDiskBloatFactor(maxDiskBloatFactor),
- _maxBucketSpread(maxBucketSpread),
- _minFileSizeFactor(minFileSizeFactor),
- _numThreads(numThreads),
- _skipCrcOnRead(false),
- _compactToActiveFile(compactToActiveFile),
- _compactCompression(compactCompression),
- _fileConfig(fileConfig)
- { }
+ Config();
+
+ Config & setMaxFileSize(size_t v) { _maxFileSize = v; return *this; }
+ Config & setMaxDiskBloatFactor(double v) { _maxDiskBloatFactor = v; return *this; }
+ Config & setMaxBucketSpread(double v) { _maxBucketSpread = v; return *this; }
+ Config & setMinFileSizeFactor(double v) { _minFileSizeFactor = v; return *this; }
+
+ Config & compactCompression(CompressionConfig v) { _compactCompression = v; return *this; }
+ Config & setFileConfig(WriteableFileChunk::Config v) { _fileConfig = v; return *this; }
size_t getMaxFileSize() const { return _maxFileSize; }
double getMaxDiskBloatFactor() const { return _maxDiskBloatFactor; }
double getMaxBucketSpread() const { return _maxBucketSpread; }
double getMinFileSizeFactor() const { return _minFileSizeFactor; }
- size_t getNumThreads() const { return _numThreads; }
bool crcOnReadDisabled() const { return _skipCrcOnRead; }
- void disableCrcOnRead(bool v) { _skipCrcOnRead = v; }
- bool compact2ActiveFile() const { return _compactToActiveFile; }
+ bool compact2ActiveFile() const { return _compact2ActiveFile; }
const CompressionConfig & compactCompression() const { return _compactCompression; }
const WriteableFileChunk::Config & getFileConfig() const { return _fileConfig; }
+ Config & disableCrcOnRead(bool v) { _skipCrcOnRead = v; return *this;}
+ Config & compact2ActiveFile(bool v) { _compact2ActiveFile = v; return *this; }
+
+ bool operator == (const Config &) const;
private:
size_t _maxFileSize;
double _maxDiskBloatFactor;
double _maxBucketSpread;
double _minFileSizeFactor;
- size_t _numThreads;
bool _skipCrcOnRead;
- bool _compactToActiveFile;
+ bool _compact2ActiveFile;
CompressionConfig _compactCompression;
WriteableFileChunk::Config _fileConfig;
};
@@ -106,15 +86,10 @@ public:
* The caller must keep it alive for the semantic
* lifetime of the log data store.
*/
- LogDataStore(vespalib::ThreadExecutor &executor,
- const vespalib::string &dirName,
- const Config & config,
- const GrowStrategy &growStrategy,
- const TuneFileSummary &tune,
+ LogDataStore(vespalib::ThreadExecutor &executor, const vespalib::string &dirName, const Config & config,
+ const GrowStrategy &growStrategy, const TuneFileSummary &tune,
const search::common::FileHeaderContext &fileHeaderContext,
- transactionlog::SyncProxy &tlSyncer,
- const IBucketizer::SP & bucketizer,
- bool readOnly = false);
+ transactionlog::SyncProxy &tlSyncer, const IBucketizer::SP & bucketizer, bool readOnly = false);
~LogDataStore();
@@ -201,18 +176,17 @@ public:
static NameIdSet findIncompleteCompactedFiles(const NameIdSet & partList);
NameIdSet getAllActiveFiles() const;
+ void reconfigure(const Config & config);
private:
class WrapVisitor;
class WrapVisitorProgress;
class FileChunkHolder;
- void waitForUnblock();
-
// Implements ISetLid API
void setLid(const LockGuard & guard, uint32_t lid, const LidInfo & lm) override;
- void compactWorst();
+ void compactWorst(double bloatLimit, double spreadLimit);
void compactFile(FileId chunkId);
typedef attribute::RcuVector<uint64_t> LidInfoVector;
@@ -257,9 +231,6 @@ private:
_active = fileId;
}
- bool isBucketSpreadTooLarge(double spread) const {
- return (spread >= _config.getMaxBucketSpread());
- }
double getMaxBucketSpread() const;
FileChunk::UP createReadOnlyFile(FileId fileId, NameId nameId);
@@ -273,13 +244,6 @@ private:
bool isReadOnly() const { return _readOnly; }
void updateSerialNum();
- bool isBloatOverLimit() const {
- return isBloatOverLimit(getDiskBloat(), getDiskFootprint());
- }
- bool isBloatOverLimit(uint64_t bloat, uint64_t usage) const {
- return (usage*_config.getMaxDiskBloatFactor() < bloat);
- }
-
size_t computeNumberOfSignificantBucketIdBits(const IBucketizer & bucketizer, FileId fileId) const;
/*
@@ -301,7 +265,7 @@ private:
return (_fileChunks.empty() ? 0 : _fileChunks.back()->getLastPersistedSerialNum());
}
bool shouldCompactToActiveFile(size_t compactedSize) const;
- std::pair<bool, FileId> findNextToCompact();
+ std::pair<bool, FileId> findNextToCompact(double bloatLimit, double spreadLimit);
void incGeneration();
bool canShrinkLidSpace(const vespalib::LockGuard &guard) const;
@@ -326,4 +290,3 @@ private:
};
} // namespace search
-
diff --git a/searchlib/src/vespa/searchlib/docstore/logdocumentstore.cpp b/searchlib/src/vespa/searchlib/docstore/logdocumentstore.cpp
index 313bc2f52d6..e2b29f6bdd6 100644
--- a/searchlib/src/vespa/searchlib/docstore/logdocumentstore.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/logdocumentstore.cpp
@@ -7,6 +7,12 @@ namespace search {
using vespalib::nbostream;
using common::FileHeaderContext;
+bool
+LogDocumentStore::Config::operator == (const Config & rhs) const {
+ (void) rhs;
+ return DocumentStore::Config::operator ==(rhs) && (_logConfig == rhs._logConfig);
+}
+
LogDocumentStore::LogDocumentStore(vespalib::ThreadExecutor & executor,
const vespalib::string & baseDir,
const Config & config,
@@ -18,11 +24,14 @@ LogDocumentStore::LogDocumentStore(vespalib::ThreadExecutor & executor,
: DocumentStore(config, _backingStore),
_backingStore(executor, baseDir, config.getLogConfig(), growStrategy,
tuneFileSummary, fileHeaderContext, tlSyncer, bucketizer)
-{
-}
+{}
+
+LogDocumentStore::~LogDocumentStore() {}
-LogDocumentStore::~LogDocumentStore()
-{
+void
+LogDocumentStore::reconfigure(const Config & config) {
+ DocumentStore::reconfigure(config);
+ _backingStore.reconfigure(config.getLogConfig());
}
} // namespace search
diff --git a/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h b/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h
index 3f0e2642529..3c9aefc9934 100644
--- a/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h
+++ b/searchlib/src/vespa/searchlib/docstore/logdocumentstore.h
@@ -6,15 +6,9 @@
#include "logdatastore.h"
#include <vespa/searchlib/common/tunefileinfo.h>
-namespace search
-{
-
-namespace common
-{
-
-class FileHeaderContext;
+namespace search {
-}
+namespace common { class FileHeaderContext; }
/**
* Simple document store that contains serialized Document instances.
@@ -26,12 +20,15 @@ class LogDocumentStore : public DocumentStore
public:
class Config : public DocumentStore::Config {
public:
+ Config() : DocumentStore::Config(), _logConfig() { }
Config(const DocumentStore::Config & base, const LogDataStore::Config & log) :
DocumentStore::Config(base),
_logConfig(log)
{ }
const LogDataStore::Config & getLogConfig() const { return _logConfig; }
LogDataStore::Config & getLogConfig() { return _logConfig; }
+ bool operator == (const Config & rhs) const;
+ bool operator != (const Config & rhs) const { return ! (*this == rhs); }
private:
LogDataStore::Config _logConfig;
};
@@ -47,17 +44,12 @@ public:
* The caller must keep it alive for the semantic
* lifetime of the log data store.
*/
- LogDocumentStore(vespalib::ThreadExecutor & executor,
- const vespalib::string & baseDir,
- const Config & config,
- const GrowStrategy & growStrategy,
- const TuneFileSummary &tuneFileSummary,
+ LogDocumentStore(vespalib::ThreadExecutor & executor, const vespalib::string & baseDir, const Config & config,
+ const GrowStrategy & growStrategy, const TuneFileSummary &tuneFileSummary,
const common::FileHeaderContext &fileHeaderContext,
- transactionlog::SyncProxy &tlSyncer,
- const IBucketizer::SP & bucketizer);
+ transactionlog::SyncProxy &tlSyncer, const IBucketizer::SP & bucketizer);
~LogDocumentStore();
- LogDataStore::Config & getLogConfig() { return _backingStore.getConfig(); }
- const LogDataStore::Config & getLogConfig() const { return _backingStore.getConfig(); }
+ void reconfigure(const Config & config);
private:
void compact(uint64_t syncToken) override { _backingStore.compact(syncToken); }
LogDataStore _backingStore;
diff --git a/searchlib/src/vespa/searchlib/docstore/visitcache.cpp b/searchlib/src/vespa/searchlib/docstore/visitcache.cpp
index b3fd236d73d..8f73c9862ae 100644
--- a/searchlib/src/vespa/searchlib/docstore/visitcache.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/visitcache.cpp
@@ -145,12 +145,25 @@ VisitCache::BackingStore::read(const KeySet &key, CompressedBlobSet &blobs) cons
return ! blobs.empty();
}
+void
+VisitCache::BackingStore::reconfigure(const CompressionConfig &compression) {
+ _compression = compression;
+}
+
+
VisitCache::VisitCache(IDataStore &store, size_t cacheSize, const CompressionConfig &compression) :
_store(store, compression),
_cache(std::make_unique<Cache>(_store, cacheSize))
{
}
+void
+VisitCache::reconfigure(size_t cacheSize, const CompressionConfig &compression) {
+ _store.reconfigure(compression);
+ _cache->setCapacityBytes(cacheSize);
+}
+
+
VisitCache::Cache::IdSet
VisitCache::Cache::findSetsContaining(const LockGuard &, const KeySet & keys) const {
IdSet found;
diff --git a/searchlib/src/vespa/searchlib/docstore/visitcache.h b/searchlib/src/vespa/searchlib/docstore/visitcache.h
index a89620b7bde..1bf867c5580 100644
--- a/searchlib/src/vespa/searchlib/docstore/visitcache.h
+++ b/searchlib/src/vespa/searchlib/docstore/visitcache.h
@@ -104,6 +104,7 @@ public:
void invalidate(uint32_t key) { remove(key); }
CacheStats getCacheStats() const;
+ void reconfigure(size_t cacheSize, const CompressionConfig &compression);
private:
/**
* This implments the interface the cache uses when it has a cache miss.
@@ -120,10 +121,11 @@ private:
bool read(const KeySet &key, CompressedBlobSet &blobs) const;
void write(const KeySet &, const CompressedBlobSet &) { }
void erase(const KeySet &) { }
- const CompressionConfig &getCompression() const { return _compression; }
+ void reconfigure(const CompressionConfig &compression);
+
private:
- IDataStore &_backingStore;
- const CompressionConfig _compression;
+ IDataStore &_backingStore;
+ CompressionConfig _compression;
};
using CacheParams = vespalib::CacheParam<
diff --git a/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h b/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h
index 2b21f12a314..f5923134138 100644
--- a/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h
+++ b/searchlib/src/vespa/searchlib/docstore/writeablefilechunk.h
@@ -23,10 +23,7 @@ public:
{
public:
using CompressionConfig = vespalib::compression::CompressionConfig;
- Config()
- : _compression(CompressionConfig::LZ4, 9, 60),
- _maxChunkBytes(0x10000)
- { }
+ Config() : Config({CompressionConfig::LZ4, 9, 60}, 0x10000) { }
Config(const CompressionConfig &compression, size_t maxChunkBytes)
: _compression(compression),
@@ -35,6 +32,9 @@ public:
const CompressionConfig & getCompression() const { return _compression; }
size_t getMaxChunkBytes() const { return _maxChunkBytes; }
+ bool operator == (const Config & rhs) const {
+ return (_compression == rhs._compression) && (_maxChunkBytes == rhs._maxChunkBytes);
+ }
private:
CompressionConfig _compression;
size_t _maxChunkBytes;
@@ -42,16 +42,11 @@ public:
public:
typedef std::unique_ptr<WriteableFileChunk> UP;
- WriteableFileChunk(vespalib::ThreadExecutor & executor,
- FileId fileId, NameId nameId,
- const vespalib::string & baseName,
- uint64_t initialSerialNum,
- uint32_t docIdLimit,
- const Config & config,
- const TuneFileSummary &tune,
- const common::FileHeaderContext &fileHeaderContext,
- const IBucketizer * bucketizer,
- bool crcOnReadDisabled);
+ WriteableFileChunk(vespalib::ThreadExecutor & executor, FileId fileId, NameId nameId,
+ const vespalib::string & baseName, uint64_t initialSerialNum,
+ uint32_t docIdLimit, const Config & config,
+ const TuneFileSummary &tune, const common::FileHeaderContext &fileHeaderContext,
+ const IBucketizer * bucketizer, bool crcOnReadDisabled);
~WriteableFileChunk();
ssize_t read(uint32_t lid, SubChunkId chunk, vespalib::DataBuffer & buffer) const override;
diff --git a/searchlib/src/vespa/searchlib/features/agefeature.cpp b/searchlib/src/vespa/searchlib/features/agefeature.cpp
index 82227344a7f..258648408f8 100644
--- a/searchlib/src/vespa/searchlib/features/agefeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/agefeature.cpp
@@ -76,5 +76,11 @@ AgeBlueprint::createExecutor(const search::fef::IQueryEnvironment &env, vespalib
return stash.create<AgeExecutor>(attribute);
}
+fef::ParameterDescriptions
+AgeBlueprint::getDescriptions() const
+{
+ return fef::ParameterDescriptions().desc().attribute(fef::ParameterDataTypeSet::normalTypeSet(), fef::ParameterCollection::ANY);
+}
+
}
}
diff --git a/searchlib/src/vespa/searchlib/features/agefeature.h b/searchlib/src/vespa/searchlib/features/agefeature.h
index de89edd49b1..99898af910f 100644
--- a/searchlib/src/vespa/searchlib/features/agefeature.h
+++ b/searchlib/src/vespa/searchlib/features/agefeature.h
@@ -37,9 +37,7 @@ public:
void visitDumpFeatures(const fef::IIndexEnvironment &env, fef::IDumpFeatureVisitor &) const override;
fef::Blueprint::UP createInstance() const override;
fef::FeatureExecutor &createExecutor(const fef::IQueryEnvironment &env, vespalib::Stash &stash) const override;
- fef::ParameterDescriptions getDescriptions() const override {
- return fef::ParameterDescriptions().desc().attribute(fef::ParameterCollection::ANY);
- }
+ fef::ParameterDescriptions getDescriptions() const override;
bool setup(const fef::IIndexEnvironment & env, const fef::ParameterList & params) override;
};
diff --git a/searchlib/src/vespa/searchlib/features/attributefeature.cpp b/searchlib/src/vespa/searchlib/features/attributefeature.cpp
index 029971b3eeb..5f03cda1869 100644
--- a/searchlib/src/vespa/searchlib/features/attributefeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/attributefeature.cpp
@@ -426,5 +426,14 @@ AttributeBlueprint::createExecutor(const search::fef::IQueryEnvironment &env, ve
}
}
+fef::ParameterDescriptions
+AttributeBlueprint::getDescriptions() const
+{
+ auto dataTypeSet = fef::ParameterDataTypeSet::normalOrTensorTypeSet();
+ return fef::ParameterDescriptions().
+ desc().attribute(dataTypeSet, fef::ParameterCollection::ANY).
+ desc().attribute(dataTypeSet, fef::ParameterCollection::ANY).string();
+}
+
} // namespace features
} // namespace search
diff --git a/searchlib/src/vespa/searchlib/features/attributefeature.h b/searchlib/src/vespa/searchlib/features/attributefeature.h
index 18ec54e14c2..2d206fb738e 100644
--- a/searchlib/src/vespa/searchlib/features/attributefeature.h
+++ b/searchlib/src/vespa/searchlib/features/attributefeature.h
@@ -28,12 +28,7 @@ public:
fef::Blueprint::UP createInstance() const override;
fef::FeatureExecutor &createExecutor(const fef::IQueryEnvironment &env, vespalib::Stash &stash) const override;
- fef::ParameterDescriptions getDescriptions() const override{
- return fef::ParameterDescriptions().
- desc().attribute(fef::ParameterCollection::ANY).
- desc().attribute(fef::ParameterCollection::ANY).string();
- }
-
+ fef::ParameterDescriptions getDescriptions() const override;
bool setup(const fef::IIndexEnvironment & env, const fef::ParameterList & params) override;
};
diff --git a/searchlib/src/vespa/searchlib/features/attributematchfeature.cpp b/searchlib/src/vespa/searchlib/features/attributematchfeature.cpp
index 2ff53951d8b..27c7b77ec26 100644
--- a/searchlib/src/vespa/searchlib/features/attributematchfeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/attributematchfeature.cpp
@@ -6,6 +6,7 @@
#include <vespa/searchlib/fef/featurenamebuilder.h>
#include <vespa/searchlib/fef/fieldinfo.h>
#include <vespa/searchlib/fef/properties.h>
+#include <vespa/searchlib/fef/parameterdescriptions.h>
#include <vespa/searchcommon/attribute/attributecontent.h>
#include <vespa/log/log.h>
@@ -271,7 +272,8 @@ AttributeMatchBlueprint::visitDumpFeatures(const IIndexEnvironment &env,
{
for (uint32_t i = 0; i < env.getNumFields(); ++i) {
const FieldInfo * field = env.getField(i);
- if (field->type() == FieldType::ATTRIBUTE) {
+ if (field->type() == FieldType::ATTRIBUTE &&
+ ParameterDataTypeSet::normalTypeSet().allowedType(field->get_data_type())) {
FeatureNameBuilder fnb;
fnb.baseName(getBaseName()).parameter(field->name());
visitor.visitDumpFeature(fnb.buildName());
diff --git a/searchlib/src/vespa/searchlib/features/attributematchfeature.h b/searchlib/src/vespa/searchlib/features/attributematchfeature.h
index 707572abf9e..3cdc6e322a3 100644
--- a/searchlib/src/vespa/searchlib/features/attributematchfeature.h
+++ b/searchlib/src/vespa/searchlib/features/attributematchfeature.h
@@ -98,7 +98,7 @@ public:
void visitDumpFeatures(const fef::IIndexEnvironment & env, fef::IDumpFeatureVisitor & visitor) const override;
fef::Blueprint::UP createInstance() const override;
fef::ParameterDescriptions getDescriptions() const override {
- return fef::ParameterDescriptions().desc().attributeField(fef::ParameterCollection::ANY);
+ return fef::ParameterDescriptions().desc().attributeField(fef::ParameterDataTypeSet::normalTypeSet(), fef::ParameterCollection::ANY);
}
bool setup(const fef::IIndexEnvironment & env, const fef::ParameterList & params) override;
diff --git a/searchlib/src/vespa/searchlib/features/debug_attribute_wait.cpp b/searchlib/src/vespa/searchlib/features/debug_attribute_wait.cpp
index 7c42df8d9bb..20de658f7f6 100644
--- a/searchlib/src/vespa/searchlib/features/debug_attribute_wait.cpp
+++ b/searchlib/src/vespa/searchlib/features/debug_attribute_wait.cpp
@@ -103,6 +103,12 @@ DebugAttributeWaitBlueprint::createExecutor(const IQueryEnvironment &env, vespal
return stash.create<DebugAttributeWaitExecutor>(env, attribute, _params);
}
+fef::ParameterDescriptions
+DebugAttributeWaitBlueprint::getDescriptions() const
+{
+ return fef::ParameterDescriptions().desc().attribute(fef::ParameterDataTypeSet::normalTypeSet(), fef::ParameterCollection::ANY).number();
+}
+
//-----------------------------------------------------------------------------
} // namespace features
diff --git a/searchlib/src/vespa/searchlib/features/debug_attribute_wait.h b/searchlib/src/vespa/searchlib/features/debug_attribute_wait.h
index fbe027f96dc..fdb23a3f374 100644
--- a/searchlib/src/vespa/searchlib/features/debug_attribute_wait.h
+++ b/searchlib/src/vespa/searchlib/features/debug_attribute_wait.h
@@ -28,9 +28,7 @@ public:
DebugAttributeWaitBlueprint();
void visitDumpFeatures(const fef::IIndexEnvironment & env, fef::IDumpFeatureVisitor & visitor) const override;
fef::Blueprint::UP createInstance() const override;
- fef::ParameterDescriptions getDescriptions() const override {
- return fef::ParameterDescriptions().desc().attribute(fef::ParameterCollection::ANY).number();
- }
+ fef::ParameterDescriptions getDescriptions() const override;
bool setup(const fef::IIndexEnvironment &env, const fef::ParameterList &params) override;
fef::FeatureExecutor &createExecutor(const fef::IQueryEnvironment &env, vespalib::Stash &stash) const override;
};
diff --git a/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp b/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp
index cd84eadf536..cbc8a5158a2 100644
--- a/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/dotproductfeature.cpp
@@ -293,7 +293,7 @@ DotProductBlueprint::setup(const IIndexEnvironment & env, const ParameterList &
ParameterDescriptions
DotProductBlueprint::getDescriptions() const {
- return ParameterDescriptions().desc().attribute(ParameterCollection::ANY).string();
+ return ParameterDescriptions().desc().attribute(ParameterDataTypeSet::normalTypeSet(), ParameterCollection::ANY).string();
}
Blueprint::UP
diff --git a/searchlib/src/vespa/searchlib/features/freshnessfeature.cpp b/searchlib/src/vespa/searchlib/features/freshnessfeature.cpp
index 8b484820fb5..11ae8305e16 100644
--- a/searchlib/src/vespa/searchlib/features/freshnessfeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/freshnessfeature.cpp
@@ -89,6 +89,12 @@ FreshnessBlueprint::createInstance() const
return Blueprint::UP(new FreshnessBlueprint());
}
+fef::ParameterDescriptions
+FreshnessBlueprint::getDescriptions() const
+{
+ return fef::ParameterDescriptions().desc().attribute(fef::ParameterDataTypeSet::normalTypeSet(), fef::ParameterCollection::ANY);
+}
+
FeatureExecutor &
FreshnessBlueprint::createExecutor(const IQueryEnvironment &, vespalib::Stash &stash) const
{
diff --git a/searchlib/src/vespa/searchlib/features/freshnessfeature.h b/searchlib/src/vespa/searchlib/features/freshnessfeature.h
index a3a893ab3fc..e156cad53ed 100644
--- a/searchlib/src/vespa/searchlib/features/freshnessfeature.h
+++ b/searchlib/src/vespa/searchlib/features/freshnessfeature.h
@@ -37,9 +37,7 @@ public:
void visitDumpFeatures(const fef::IIndexEnvironment & env, fef::IDumpFeatureVisitor & visitor) const override;
fef::Blueprint::UP createInstance() const override;
- fef::ParameterDescriptions getDescriptions() const override {
- return fef::ParameterDescriptions().desc().attribute(fef::ParameterCollection::ANY);
- }
+ fef::ParameterDescriptions getDescriptions() const override;
bool setup(const fef::IIndexEnvironment & env, const fef::ParameterList & params) override;
fef::FeatureExecutor &createExecutor(const fef::IQueryEnvironment &env, vespalib::Stash &stash) const override;
};
diff --git a/searchlib/src/vespa/searchlib/features/internal_max_reduce_prod_join_feature.cpp b/searchlib/src/vespa/searchlib/features/internal_max_reduce_prod_join_feature.cpp
index d07654c0a21..92120b925eb 100644
--- a/searchlib/src/vespa/searchlib/features/internal_max_reduce_prod_join_feature.cpp
+++ b/searchlib/src/vespa/searchlib/features/internal_max_reduce_prod_join_feature.cpp
@@ -130,32 +130,12 @@ InternalMaxReduceProdJoinBlueprint::createInstance() const
ParameterDescriptions
InternalMaxReduceProdJoinBlueprint::getDescriptions() const
{
- return ParameterDescriptions().desc().attribute(ParameterCollection::ARRAY).string();
-}
-
-bool supportedAttributeType(Parameter param) {
- const FieldInfo *attributeInfo = param.asField();
- if (attributeInfo == nullptr) {
- return false;
- }
- if (attributeInfo->collection() != FieldInfo::CollectionType::ARRAY) {
- return false;
- }
- if (attributeInfo->get_data_type() == FieldInfo::DataType::INT64) {
- return true;
- }
- if (attributeInfo->get_data_type() == FieldInfo::DataType::INT32) {
- return true;
- }
- return false;
+ return ParameterDescriptions().desc().attribute(ParameterDataTypeSet::int32OrInt64TypeSet(), ParameterCollection::ARRAY).string();
}
bool
InternalMaxReduceProdJoinBlueprint::setup(const IIndexEnvironment &env, const ParameterList &params)
{
- if (!supportedAttributeType(params[0])) {
- return false;
- }
_attribute = params[0].getValue();
_query = params[1].getValue();
describeOutput("scalar", "Internal executor for optimized execution of reduce(join(A,Q,f(x,y)(x*y)),max)");
diff --git a/searchlib/src/vespa/searchlib/features/nativeattributematchfeature.cpp b/searchlib/src/vespa/searchlib/features/nativeattributematchfeature.cpp
index 1e6423f9de8..5dda159f629 100644
--- a/searchlib/src/vespa/searchlib/features/nativeattributematchfeature.cpp
+++ b/searchlib/src/vespa/searchlib/features/nativeattributematchfeature.cpp
@@ -118,6 +118,12 @@ NativeAttributeMatchBlueprint::createInstance() const
return Blueprint::UP(new NativeAttributeMatchBlueprint());
}
+fef::ParameterDescriptions
+NativeAttributeMatchBlueprint::getDescriptions() const
+{
+ return fef::ParameterDescriptions().desc().attribute(fef::ParameterDataTypeSet::normalTypeSet(), fef::ParameterCollection::ANY).repeat();
+}
+
bool
NativeAttributeMatchBlueprint::setup(const IIndexEnvironment & env,
const ParameterList & params)
diff --git a/searchlib/src/vespa/searchlib/features/nativeattributematchfeature.h b/searchlib/src/vespa/searchlib/features/nativeattributematchfeature.h
index e47cbed0344..e2c9dc7aef1 100644
--- a/searchlib/src/vespa/searchlib/features/nativeattributematchfeature.h
+++ b/searchlib/src/vespa/searchlib/features/nativeattributematchfeature.h
@@ -94,9 +94,7 @@ public:
void visitDumpFeatures(const fef::IIndexEnvironment & env, fef::IDumpFeatureVisitor & visitor) const override;
fef::Blueprint::UP createInstance() const override;
- fef::ParameterDescriptions getDescriptions() const override {
- return fef::ParameterDescriptions().desc().attribute(fef::ParameterCollection::ANY).repeat();
- }
+ fef::ParameterDescriptions getDescriptions() const override;
bool setup(const fef::IIndexEnvironment & env, const fef::ParameterList & params) override;
fef::FeatureExecutor &createExecutor(const fef::IQueryEnvironment &env, vespalib::Stash &stash) const override;
diff --git a/searchlib/src/vespa/searchlib/fef/parameterdescriptions.h b/searchlib/src/vespa/searchlib/fef/parameterdescriptions.h
index d0e29cfa318..4c5d2c785cb 100644
--- a/searchlib/src/vespa/searchlib/fef/parameterdescriptions.h
+++ b/searchlib/src/vespa/searchlib/fef/parameterdescriptions.h
@@ -2,6 +2,7 @@
#pragma once
+#include <vespa/searchcommon/common/datatype.h>
#include <vector>
#include <cstddef>
@@ -37,16 +38,77 @@ struct ParameterCollection {
};
};
+/*
+ * A set of accepted data types for a parameter.
+ */
+class ParameterDataTypeSet
+{
+public:
+ using DataType = search::index::schema::DataType;
+
+private:
+ uint32_t _typeMask;
+
+ static uint32_t asMask(DataType dataType) {
+ return (1u << static_cast<unsigned int>(dataType));
+ }
+ static uint32_t normalTypesMask() {
+ return (asMask(DataType::UINT1) |
+ asMask(DataType::UINT2) |
+ asMask(DataType::UINT4) |
+ asMask(DataType::INT8) |
+ asMask(DataType::INT16) |
+ asMask(DataType::INT32) |
+ asMask(DataType::INT64) |
+ asMask(DataType::FLOAT) |
+ asMask(DataType::DOUBLE) |
+ asMask(DataType::STRING) |
+ asMask(DataType::RAW));
+ }
+ static uint32_t allTypesMask() {
+ return (normalTypesMask() |
+ asMask(DataType::BOOLEANTREE) |
+ asMask(DataType::TENSOR) |
+ asMask(DataType::REFERENCE));
+ }
+ ParameterDataTypeSet(uint32_t typeMask)
+ : _typeMask(typeMask)
+ {
+ }
+public:
+ ParameterDataTypeSet()
+ : ParameterDataTypeSet(allTypesMask())
+ {
+ }
+ static ParameterDataTypeSet normalTypeSet() {
+ return ParameterDataTypeSet(normalTypesMask());
+ }
+ static ParameterDataTypeSet int32OrInt64TypeSet() {
+ return ParameterDataTypeSet(asMask(DataType::INT32) | asMask(DataType::INT64));
+ }
+ static ParameterDataTypeSet normalOrTensorTypeSet() {
+ return ParameterDataTypeSet(normalTypesMask() | asMask(DataType::TENSOR));
+ }
+ bool allowedType(DataType dataType) const {
+ return ((asMask(dataType) & _typeMask) != 0);
+ }
+};
+
/**
* The description of a single parameter within a single
* ParameterDescription object.
**/
struct ParamDescItem {
ParameterType::Enum type;
+ ParameterDataTypeSet dataTypeSet;
ParameterCollection::Enum collection;
ParamDescItem(ParameterType::Enum t,
ParameterCollection::Enum c)
- : type(t), collection(c) {}
+ : type(t), dataTypeSet(), collection(c) {}
+ ParamDescItem(ParameterType::Enum t,
+ ParameterDataTypeSet dts,
+ ParameterCollection::Enum c)
+ : type(t), dataTypeSet(dts), collection(c) {}
};
/**
@@ -101,6 +163,9 @@ private:
Description & getCurrent() { return _descriptions.back(); }
void addParameter(const ParamDescItem &param);
+ void addParameter(ParameterType::Enum type, ParameterDataTypeSet dataTypeSet, ParameterCollection::Enum collection) {
+ addParameter(ParamDescItem(type, dataTypeSet, collection));
+ }
void addParameter(ParameterType::Enum type, ParameterCollection::Enum collection) {
addParameter(ParamDescItem(type, collection));
}
@@ -141,6 +206,10 @@ public:
addParameter(ParameterType::ATTRIBUTE_FIELD, collection);
return *this;
}
+ ParameterDescriptions & attributeField(ParameterDataTypeSet dataTypeSet, ParameterCollection::Enum collection) {
+ addParameter(ParameterType::ATTRIBUTE_FIELD, dataTypeSet, collection);
+ return *this;
+ }
/**
* Adds an attribute parameter to the current description.
*/
@@ -148,6 +217,10 @@ public:
addParameter(ParameterType::ATTRIBUTE, collection);
return *this;
}
+ ParameterDescriptions & attribute(ParameterDataTypeSet dataTypeSet, ParameterCollection::Enum collection) {
+ addParameter(ParameterType::ATTRIBUTE, dataTypeSet, collection);
+ return *this;
+ }
/**
* Adds a feature parameter to the current description.
*/
diff --git a/searchlib/src/vespa/searchlib/fef/parametervalidator.cpp b/searchlib/src/vespa/searchlib/fef/parametervalidator.cpp
index 62c9efc6739..dda5ec0b719 100644
--- a/searchlib/src/vespa/searchlib/fef/parametervalidator.cpp
+++ b/searchlib/src/vespa/searchlib/fef/parametervalidator.cpp
@@ -25,6 +25,10 @@ bool checkCollectionType(ParameterCollection::Enum accept, CollectionType actual
return false;
}
+bool checkDataType(ParameterDataTypeSet accept, search::index::schema::DataType actual) {
+ return accept.allowedType(actual);
+}
+
class ValidateException
{
public:
@@ -50,7 +54,9 @@ ParameterValidator::Result & ParameterValidator::Result::operator=(const Result
ParameterValidator::Result::~Result() { }
void
-ParameterValidator::validateField(ParameterType::Enum type, ParameterCollection::Enum collection,
+ParameterValidator::validateField(ParameterType::Enum type,
+ ParameterDataTypeSet dataTypeSet,
+ ParameterCollection::Enum collection,
size_t i, Result & result)
{
const FieldInfo * field = _indexEnv.getFieldByName(_params[i]);
@@ -74,6 +80,10 @@ ParameterValidator::validateField(ParameterType::Enum type, ParameterCollection:
i, _params[i].c_str()));
}
}
+ if (!checkDataType(dataTypeSet, field->get_data_type())) {
+ throw ValidateException(make_string("Param[%zu]: field '%s' has inappropriate data type",
+ i, _params[i].c_str()));
+ }
if (!checkCollectionType(collection, field->collection())) {
throw ValidateException(make_string("Param[%zu]: field '%s' has inappropriate collection type",
i, _params[i].c_str()));
@@ -116,7 +126,7 @@ ParameterValidator::validate(const ParameterDescriptions::Description & desc)
case ParameterType::INDEX_FIELD:
case ParameterType::ATTRIBUTE_FIELD:
case ParameterType::ATTRIBUTE:
- validateField(type, param.collection, i, result);
+ validateField(type, param.dataTypeSet, param.collection, i, result);
break;
case ParameterType::NUMBER:
validateNumber(type, i, result);
diff --git a/searchlib/src/vespa/searchlib/fef/parametervalidator.h b/searchlib/src/vespa/searchlib/fef/parametervalidator.h
index 25dc5296988..6bde59641da 100644
--- a/searchlib/src/vespa/searchlib/fef/parametervalidator.h
+++ b/searchlib/src/vespa/searchlib/fef/parametervalidator.h
@@ -59,7 +59,7 @@ private:
const StringVector & _params;
const ParameterDescriptions & _descs;
- void validateField(ParameterType::Enum type, ParameterCollection::Enum collection,
+ void validateField(ParameterType::Enum type, ParameterDataTypeSet dataTypeSet, ParameterCollection::Enum collection,
size_t i, Result & result);
void validateNumber(ParameterType::Enum type, size_t i, Result & result);
Result validate(const ParameterDescriptions::Description & desc);
diff --git a/searchlib/src/vespa/searchlib/index/docidandfeatures.cpp b/searchlib/src/vespa/searchlib/index/docidandfeatures.cpp
index 6c14498be76..513c542637d 100644
--- a/searchlib/src/vespa/searchlib/index/docidandfeatures.cpp
+++ b/searchlib/src/vespa/searchlib/index/docidandfeatures.cpp
@@ -1,14 +1,11 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "docidandfeatures.h"
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/log/log.h>
LOG_SETUP(".index.docidandfeatures");
namespace search::index {
-using vespalib::nbostream;
-
DocIdAndFeatures::DocIdAndFeatures()
: _docId(0),
_wordDocFeatures(),
@@ -24,100 +21,4 @@ DocIdAndFeatures::DocIdAndFeatures(const DocIdAndFeatures &) = default;
DocIdAndFeatures & DocIdAndFeatures::operator = (const DocIdAndFeatures &) = default;
DocIdAndFeatures::~DocIdAndFeatures() { }
-#if 0
-void
-DocIdAndFeatures::append(const DocIdAndFeatures &rhs, uint32_t localFieldId)
-{
- assert(!rhs.getRaw());
- assert(rhs._fields.size() == 1);
- const WordDocFieldFeatures &field = rhs._fields.front();
- assert(field.getFieldId() == 0);
- uint32_t numElements = field.getNumElements();
- std::vector<WordDocFieldElementFeatures>::const_iterator element =
- rhs._elements.begin();
- std::vector<WordDocFieldElementWordPosFeatures>::const_iterator position =
- rhs._wordPositions.begin();
- assert(_fields.empty() || localFieldId > _fields.back().getFieldId());
- _fields.push_back(field);
- _fields.back().setFieldId(localFieldId);
- for (uint32_t elementDone = 0; elementDone < numElements;
- ++elementDone, ++element) {
- _elements.push_back(*element);
- for (uint32_t posResidue = element->getNumOccs(); posResidue > 0;
- --posResidue, ++position) {
- _wordPositions.push_back(*position);
- }
- }
-}
-#endif
-
-
-nbostream &
-operator<<(nbostream &out, const WordDocElementFeatures &features)
-{
- out << features._elementId << features._numOccs <<
- features._weight << features._elementLen;
- return out;
-}
-
-
-nbostream &
-operator>>(nbostream &in, WordDocElementFeatures &features)
-{
- in >> features._elementId >> features._numOccs >>
- features._weight >> features._elementLen;
- return in;
-}
-
-
-nbostream &
-operator<<(nbostream &out, const WordDocElementWordPosFeatures &features)
-{
- out << features._wordPos;
- return out;
-}
-
-
-nbostream &
-operator>>(nbostream &in, WordDocElementWordPosFeatures &features)
-{
- in >> features._wordPos;
- return in;
-}
-
-
-nbostream &
-operator<<(nbostream &out, const DocIdAndFeatures &features)
-{
- out << features._docId;
- out.saveVector(features._elements).
- saveVector(features._wordPositions);
- out.saveVector(features._blob);
- out << features._bitOffset << features._bitLength << features._raw;
- return out;
-}
-
-
-nbostream &
-operator>>(nbostream &in, DocIdAndFeatures &features)
-{
- in >> features._docId;
- in.restoreVector(features._elements).
- restoreVector(features._wordPositions);
- in.restoreVector(features._blob);
- in >> features._bitOffset >> features._bitLength >> features._raw;
- return in;
-}
-
-}
-
-#include <vespa/vespalib/objects/nbostream.hpp>
-
-namespace vespalib {
- using search::index::WordDocElementFeatures;
- using search::index::WordDocElementWordPosFeatures;
- template nbostream& nbostream::saveVector<WordDocElementFeatures>(const std::vector<WordDocElementFeatures> &);
- template nbostream& nbostream::restoreVector<WordDocElementFeatures>(std::vector<WordDocElementFeatures> &);
- template nbostream& nbostream::saveVector<WordDocElementWordPosFeatures>(const std::vector<WordDocElementWordPosFeatures> &);
- template nbostream& nbostream::restoreVector<WordDocElementWordPosFeatures>(std::vector<WordDocElementWordPosFeatures> &);
}
diff --git a/searchlib/src/vespa/searchlib/index/docidandfeatures.h b/searchlib/src/vespa/searchlib/index/docidandfeatures.h
index 5b8ded5f1e0..91a500495cc 100644
--- a/searchlib/src/vespa/searchlib/index/docidandfeatures.h
+++ b/searchlib/src/vespa/searchlib/index/docidandfeatures.h
@@ -5,8 +5,6 @@
#include <vector>
#include <cstdint>
-namespace vespalib { class nbostream; }
-
namespace search::index {
/*
@@ -195,24 +193,4 @@ public:
void append(const DocIdAndFeatures &rhs, uint32_t localFieldId);
};
-
-vespalib::nbostream &
-operator<<(vespalib::nbostream &out, const WordDocElementFeatures &features);
-
-vespalib::nbostream &
-operator>>(vespalib::nbostream &in, WordDocElementFeatures &features);
-
-vespalib::nbostream &
-operator<<(vespalib::nbostream &out, const WordDocElementWordPosFeatures &features);
-
-vespalib::nbostream &
-operator>>(vespalib::nbostream &in, WordDocElementWordPosFeatures &features);
-
-vespalib::nbostream &
-operator<<(vespalib::nbostream &out, const DocIdAndFeatures &features);
-
-vespalib::nbostream &
-operator>>(vespalib::nbostream &in, DocIdAndFeatures &features);
-
}
-
diff --git a/searchlib/src/vespa/searchlib/index/postinglistcountfile.h b/searchlib/src/vespa/searchlib/index/postinglistcountfile.h
index 5b14b477c42..30c6963f228 100644
--- a/searchlib/src/vespa/searchlib/index/postinglistcountfile.h
+++ b/searchlib/src/vespa/searchlib/index/postinglistcountfile.h
@@ -5,8 +5,6 @@
#include "postinglistcounts.h"
#include <vespa/searchlib/common/tunefileinfo.h>
-namespace vespalib { class nbostream; }
-
namespace search {
namespace common { class FileHeaderContext; }
@@ -34,19 +32,6 @@ public:
virtual ~PostingListCountFileSeqRead();
/**
- * Checkpoint write. Used at semi-regular intervals during indexing
- * to allow for continued indexing after an interrupt. Implies
- * flush from memory to disk, and possibly also sync to permanent
- * storage media.
- */
- virtual void checkPointWrite(vespalib::nbostream &out) = 0;
-
- /**
- * Checkpoint read. Used when resuming indexing after an interrupt.
- */
- virtual void checkPointRead(vespalib::nbostream &in) = 0;
-
- /**
* Open posting list count file for sequential read.
*/
virtual bool open(const vespalib::string &name, const TuneFileSeqRead &tuneFileRead) = 0;
@@ -71,19 +56,6 @@ public:
virtual ~PostingListCountFileSeqWrite();
/**
- * Checkpoint write. Used at semi-regular intervals during indexing
- * to allow for continued indexing after an interrupt. Implies
- * flush from memory to disk, and possibly also sync to permanent
- * storage media.
- */
- virtual void checkPointWrite(vespalib::nbostream &out) = 0;
-
- /**
- * Checkpoint read. Used when resuming indexing after an interrupt.
- */
- virtual void checkPointRead(vespalib::nbostream &in) = 0;
-
- /**
* Open posting list count file for sequential write.
*/
virtual bool open(const vespalib::string &name,
diff --git a/searchlib/src/vespa/searchlib/index/postinglistcounts.cpp b/searchlib/src/vespa/searchlib/index/postinglistcounts.cpp
index d7bfcca03cc..23b0bf6b6a3 100644
--- a/searchlib/src/vespa/searchlib/index/postinglistcounts.cpp
+++ b/searchlib/src/vespa/searchlib/index/postinglistcounts.cpp
@@ -1,6 +1,5 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include <vespa/vespalib/objects/nbostream.h>
#include "postinglistcounts.h"
namespace search::index {
@@ -10,71 +9,4 @@ void swap(PostingListCounts & a, PostingListCounts & b)
a.swap(b);
}
-using vespalib::nbostream;
-
-nbostream &
-operator<<(nbostream &out, const PostingListCounts::Segment &segment)
-{
- out << segment._bitLength << segment._numDocs << segment._lastDoc;
- return out;
-}
-
-
-nbostream &
-operator>>(nbostream &in, PostingListCounts::Segment &segment)
-{
- in >> segment._bitLength >> segment._numDocs >> segment._lastDoc;
- return in;
-}
-
-
-nbostream &
-operator<<(nbostream &out, const PostingListCounts &counts)
-{
- out << counts._numDocs << counts._bitLength;
- size_t numSegments = counts._segments.size();
- out << numSegments;
- for (size_t seg = 0; seg < numSegments; ++seg) {
- out << counts._segments[seg];
- }
- return out;
-}
-
-
-nbostream &
-operator>>(nbostream &in, PostingListCounts &counts)
-{
- in >> counts._numDocs >> counts._bitLength;
- size_t numSegments = 0;
- in >> numSegments;
- counts._segments.reserve(numSegments);
- counts._segments.clear();
- for (size_t seg = 0; seg < numSegments; ++seg) {
- PostingListCounts::Segment segment;
- in >> segment;
- counts._segments.push_back(segment);
- }
- return in;
-}
-
-
-nbostream &
-operator<<(nbostream &out, const PostingListOffsetAndCounts &offsetAndCounts)
-{
- out << offsetAndCounts._offset;
- out << offsetAndCounts._accNumDocs;
- out << offsetAndCounts._counts;
- return out;
-}
-
-
-nbostream &
-operator>>(nbostream &in, PostingListOffsetAndCounts &offsetAndCounts)
-{
- in >> offsetAndCounts._offset;
- in >> offsetAndCounts._accNumDocs;
- in >> offsetAndCounts._counts;
- return in;
-}
-
}
diff --git a/searchlib/src/vespa/searchlib/index/postinglistcounts.h b/searchlib/src/vespa/searchlib/index/postinglistcounts.h
index f59b906e982..d11f7286fa1 100644
--- a/searchlib/src/vespa/searchlib/index/postinglistcounts.h
+++ b/searchlib/src/vespa/searchlib/index/postinglistcounts.h
@@ -4,8 +4,6 @@
#include <vector>
#include <cstdint>
-namespace vespalib { class nbostream; }
-
namespace search {
namespace index {
@@ -47,12 +45,6 @@ public:
_numDocs == rhs._numDocs &&
_lastDoc == rhs._lastDoc);
}
-
- friend vespalib::nbostream &
- operator<<(vespalib::nbostream &out, const Segment &segment);
-
- friend vespalib::nbostream &
- operator>>(vespalib::nbostream &in, Segment &segment);
};
/**
@@ -92,12 +84,6 @@ public:
_bitLength == rhs._bitLength &&
_segments == rhs._segments);
}
-
- friend vespalib::nbostream &
- operator<<(vespalib::nbostream &out, const PostingListCounts &counts);
-
- friend vespalib::nbostream &
- operator>>(vespalib::nbostream &in, PostingListCounts &counts);
};
void swap(PostingListCounts & a, PostingListCounts & b);
@@ -115,12 +101,6 @@ public:
_accNumDocs(0u),
_counts()
{ }
-
- friend vespalib::nbostream &
- operator<<(vespalib::nbostream &out, const PostingListOffsetAndCounts &offsetAndCounts);
-
- friend vespalib::nbostream &
- operator>>(vespalib::nbostream &in, PostingListOffsetAndCounts &offsetAndCounts);
};
} // namespace index
diff --git a/searchlib/src/vespa/searchlib/index/postinglistfile.h b/searchlib/src/vespa/searchlib/index/postinglistfile.h
index 43628bd798b..61247ef3197 100644
--- a/searchlib/src/vespa/searchlib/index/postinglistfile.h
+++ b/searchlib/src/vespa/searchlib/index/postinglistfile.h
@@ -8,8 +8,6 @@
class FastOS_FileInterface;
-namespace vespalib { class nbostream; }
-
namespace search {
namespace common { class FileHeaderContext; }
@@ -38,19 +36,6 @@ public:
virtual void readDocIdAndFeatures(DocIdAndFeatures &features) = 0;
/**
- * Checkpoint write. Used at semi-regular intervals during indexing
- * to allow for continued indexing after an interrupt. Implies
- * flush from memory to disk, and possibly also sync to permanent
- * storage media.
- */
- virtual void checkPointWrite(vespalib::nbostream &out) = 0;
-
- /**
- * Checkpoint read. Used when resuming indexing after an interrupt.
- */
- virtual void checkPointRead(vespalib::nbostream &in) = 0;
-
- /**
* Read counts for a word.
*/
virtual void readCounts(const PostingListCounts &counts) = 0;
@@ -135,19 +120,6 @@ public:
virtual void flushWord() = 0;
/**
- * Checkpoint write. Used at semi-regular intervals during indexing
- * to allow for continued indexing after an interrupt. Implies
- * flush from memory to disk, and possibly also sync to permanent
- * storage media.
- */
- virtual void checkPointWrite(vespalib::nbostream &out) = 0;
-
- /**
- * Checkpoint read. Used when resuming indexing after an interrupt.
- */
- virtual void checkPointRead(vespalib::nbostream &in) = 0;
-
- /**
* Open posting list file for sequential write.
*/
virtual bool
diff --git a/searchlib/src/vespa/searchlib/queryeval/iterator_pack.cpp b/searchlib/src/vespa/searchlib/queryeval/iterator_pack.cpp
index e50fe57ac41..1f5858c1100 100644
--- a/searchlib/src/vespa/searchlib/queryeval/iterator_pack.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/iterator_pack.cpp
@@ -37,12 +37,11 @@ SearchIteratorPack::SearchIteratorPack(const std::vector<SearchIterator*> &child
for (auto child: children) {
_children.emplace_back(child);
}
- assert((_children.size() == _childMatch.size()) ||
- (_childMatch.empty() && (_md.get() == nullptr)));
+ assert((_children.size() == _childMatch.size()) || _childMatch.empty());
}
-SearchIteratorPack::SearchIteratorPack(const std::vector<SearchIterator*> &children)
- : SearchIteratorPack(children, std::vector<fef::TermFieldMatchData*>(), MatchDataUP())
+SearchIteratorPack::SearchIteratorPack(const std::vector<SearchIterator*> &children, MatchDataUP md)
+ : SearchIteratorPack(children, std::vector<fef::TermFieldMatchData*>(), MatchDataUP(std::move(md)))
{ }
std::unique_ptr<BitVector>
diff --git a/searchlib/src/vespa/searchlib/queryeval/iterator_pack.h b/searchlib/src/vespa/searchlib/queryeval/iterator_pack.h
index 20a3a57f34a..58c774e0903 100644
--- a/searchlib/src/vespa/searchlib/queryeval/iterator_pack.h
+++ b/searchlib/src/vespa/searchlib/queryeval/iterator_pack.h
@@ -29,7 +29,7 @@ public:
const std::vector<fef::TermFieldMatchData*> &childMatch,
MatchDataUP md);
- explicit SearchIteratorPack(const std::vector<SearchIterator*> &children);
+ SearchIteratorPack(const std::vector<SearchIterator*> &children, MatchDataUP md);
uint32_t get_docid(uint32_t ref) const {
return _children[ref]->getDocId();
diff --git a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp
index 0c9af1d3e25..fc68c48a247 100644
--- a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.cpp
@@ -9,6 +9,8 @@ namespace search::queryeval {
WeightedSetTermBlueprint::WeightedSetTermBlueprint(const FieldSpec &field)
: ComplexLeafBlueprint(field),
_estimate(),
+ _layout(),
+ _children_field(field.getName(), field.getFieldId(), _layout.allocTermField(field.getFieldId()), false),
_weights(),
_terms()
{
@@ -40,18 +42,18 @@ WeightedSetTermBlueprint::addTerm(Blueprint::UP term, int32_t weight)
term.release();
}
+
SearchIterator::UP
-WeightedSetTermBlueprint::createSearch(search::fef::MatchData &md, bool) const
+WeightedSetTermBlueprint::createLeafSearch(const search::fef::TermFieldMatchDataArray &tfmda,
+ bool) const
{
- const State &state = getState();
- assert(state.numFields() == 1);
- search::fef::TermFieldMatchData &tfmd = *state.field(0).resolve(md);
-
+ assert(tfmda.size() == 1);
+ fef::MatchData::UP md = _layout.createMatchData();
std::vector<SearchIterator*> children(_terms.size());
for (size_t i = 0; i < _terms.size(); ++i) {
- children[i] = _terms[i]->createSearch(md, true).release();
+ children[i] = _terms[i]->createSearch(*md, true).release();
}
- return SearchIterator::UP(WeightedSetTermSearch::create(children, tfmd, _weights));
+ return SearchIterator::UP(WeightedSetTermSearch::create(children, *tfmda[0], _weights, std::move(md)));
}
void
@@ -71,10 +73,4 @@ WeightedSetTermBlueprint::visitMembers(vespalib::ObjectVisitor &visitor) const
visit(visitor, "_terms", _terms);
}
-SearchIterator::UP
-WeightedSetTermBlueprint::createLeafSearch(const search::fef::TermFieldMatchDataArray &, bool) const
-{
- abort();
-}
-
}
diff --git a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h
index 1afa0f9f2b2..b81d6c6f9e9 100644
--- a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h
+++ b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_blueprint.h
@@ -15,6 +15,8 @@ namespace queryeval {
class WeightedSetTermBlueprint : public ComplexLeafBlueprint
{
HitEstimate _estimate;
+ fef::MatchDataLayout _layout;
+ FieldSpec _children_field;
std::vector<int32_t> _weights;
std::vector<Blueprint*> _terms;
@@ -27,18 +29,16 @@ public:
// used by create visitor
// matches signature in dot product blueprint for common blueprint
- // building code. Hands out its own field spec to children. NOTE:
- // this is only ok since children will never be unpacked.
- FieldSpec getNextChildField(const FieldSpec &outer) { return outer; }
+ // building code. Hands out the same field spec to all children.
+ FieldSpec getNextChildField(const FieldSpec &) { return _children_field; }
// used by create visitor
void addTerm(Blueprint::UP term, int32_t weight);
- SearchIteratorUP createSearch(search::fef::MatchData &md, bool strict) const override;
+ SearchIteratorUP createLeafSearch(const search::fef::TermFieldMatchDataArray &tfmda, bool strict) const override;
void visitMembers(vespalib::ObjectVisitor &visitor) const override;
private:
- SearchIteratorUP createLeafSearch(const search::fef::TermFieldMatchDataArray &, bool) const override;
void fetchPostings(bool strict) override;
};
diff --git a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_search.cpp b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_search.cpp
index aae7c60bd80..2801f1c5e0c 100644
--- a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_search.cpp
+++ b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_search.cpp
@@ -134,15 +134,16 @@ public:
SearchIterator *
WeightedSetTermSearch::create(const std::vector<SearchIterator*> &children,
TermFieldMatchData &tmd,
- const std::vector<int32_t> &weights)
+ const std::vector<int32_t> &weights,
+ fef::MatchData::UP match_data)
{
typedef WeightedSetTermSearchImpl<vespalib::LeftArrayHeap, SearchIteratorPack> ArrayHeapImpl;
typedef WeightedSetTermSearchImpl<vespalib::LeftHeap, SearchIteratorPack> HeapImpl;
if (children.size() < 128) {
- return new ArrayHeapImpl(tmd, weights, SearchIteratorPack(children));
+ return new ArrayHeapImpl(tmd, weights, SearchIteratorPack(children, std::move(match_data)));
}
- return new HeapImpl(tmd, weights, SearchIteratorPack(children));
+ return new HeapImpl(tmd, weights, SearchIteratorPack(children, std::move(match_data)));
}
//-----------------------------------------------------------------------------
diff --git a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_search.h b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_search.h
index de7131bd0a9..397ac0caf2e 100644
--- a/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_search.h
+++ b/searchlib/src/vespa/searchlib/queryeval/weighted_set_term_search.h
@@ -29,7 +29,8 @@ protected:
public:
static SearchIterator* create(const std::vector<SearchIterator*> &children,
search::fef::TermFieldMatchData &tmd,
- const std::vector<int32_t> &weights);
+ const std::vector<int32_t> &weights,
+ fef::MatchData::UP match_data);
static SearchIterator::UP create(search::fef::TermFieldMatchData &tmd,
const std::vector<int32_t> &weights,
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp b/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp
index bfc7f7032c1..1fa518af28f 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakeword.cpp
@@ -553,13 +553,10 @@ FakeWord::validate(search::queryeval::SearchIterator *iterator, bool verbose) co
bool
-FakeWord::validate(std::shared_ptr<FieldReader> &fieldReader,
+FakeWord::validate(FieldReader &fieldReader,
uint32_t wordNum,
const fef::TermFieldMatchDataArray &matchData,
- bool verbose,
- uint32_t &checkPointCheck,
- uint32_t checkPointInterval,
- CheckPointCallback *const checkPointCallback) const
+ bool verbose) const
{
uint32_t docId = 0;
uint32_t numDocs;
@@ -585,8 +582,8 @@ FakeWord::validate(std::shared_ptr<FieldReader> &fieldReader,
#endif
numDocs = _postings.size();
for (residue = numDocs; residue > 0; --residue) {
- assert(fieldReader->_wordNum == wordNum);
- DocIdAndFeatures &features(fieldReader->_docIdAndFeatures);
+ assert(fieldReader._wordNum == wordNum);
+ DocIdAndFeatures &features(fieldReader._docIdAndFeatures);
docId = features._docId;
assert(d != de);
assert(d->_docId == docId);
@@ -648,12 +645,7 @@ FakeWord::validate(std::shared_ptr<FieldReader> &fieldReader,
assert(presidue == 0);
++d;
}
- if (++checkPointCheck >= checkPointInterval) {
- checkPointCheck = 0;
- if (checkPointCallback != NULL)
- checkPointCallback->checkPoint();
- }
- fieldReader->read();
+ fieldReader.read();
}
if (matchData.valid()) {
assert(p == pe);
@@ -707,11 +699,8 @@ FakeWord::validate(const search::BitVector &bv) const
bool
-FakeWord::dump(std::shared_ptr<FieldWriter> &fieldWriter,
- bool verbose,
- uint32_t &checkPointCheck,
- uint32_t checkPointInterval,
- CheckPointCallback *checkPointCallback) const
+FakeWord::dump(FieldWriter &fieldWriter,
+ bool verbose) const
{
uint32_t numDocs;
uint32_t residue;
@@ -732,13 +721,8 @@ FakeWord::dump(std::shared_ptr<FieldWriter> &fieldWriter,
assert(d != de);
setupFeatures(*d, &*p, features);
p += d->_positions;
- fieldWriter->add(features);
+ fieldWriter.add(features);
++d;
- if (++checkPointCheck >= checkPointInterval) {
- checkPointCheck = 0;
- if (checkPointCallback != NULL)
- checkPointCallback->checkPoint();
- }
}
assert(p == pe);
assert(d == de);
diff --git a/searchlib/src/vespa/searchlib/test/fakedata/fakeword.h b/searchlib/src/vespa/searchlib/test/fakedata/fakeword.h
index 9417c32f1e5..619c1760797 100644
--- a/searchlib/src/vespa/searchlib/test/fakedata/fakeword.h
+++ b/searchlib/src/vespa/searchlib/test/fakedata/fakeword.h
@@ -17,15 +17,6 @@ namespace search {
namespace fakedata {
-class CheckPointCallback
-{
-public:
- CheckPointCallback() {}
- virtual ~CheckPointCallback() {}
-
- virtual void checkPoint() = 0;
-};
-
/*
* General representation of a faked word, containing all features used
* by any of the candidate posting list formats.
@@ -254,23 +245,17 @@ public:
bool validate(search::queryeval::SearchIterator *iterator, bool verbose) const;
bool
- validate(std::shared_ptr<search::diskindex::FieldReader> &fieldReader,
+ validate(search::diskindex::FieldReader &fieldReader,
uint32_t wordNum,
const fef::TermFieldMatchDataArray &matchData,
- bool verbose,
- uint32_t &checkPointCheck,
- uint32_t checkPointInterval,
- CheckPointCallback *const checkPointCallback) const;
+ bool verbose) const;
void validate(const std::vector<uint32_t> &docIds) const;
void validate(const BitVector &bv) const;
bool
- dump(std::shared_ptr<search::diskindex::FieldWriter> &fieldWriter,
- bool verbose,
- uint32_t &checkPointCheck,
- uint32_t checkPointInterval,
- CheckPointCallback *checkPointCallback) const;
+ dump(search::diskindex::FieldWriter &fieldWriter,
+ bool verbose) const;
const std::string &getName() const { return _name; }
uint32_t getDocIdLimit() const { return _docIdLimit; }
diff --git a/searchlib/src/vespa/searchlib/transactionlog/domain.cpp b/searchlib/src/vespa/searchlib/transactionlog/domain.cpp
index a324e42ab24..74ca3e6527d 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/domain.cpp
+++ b/searchlib/src/vespa/searchlib/transactionlog/domain.cpp
@@ -4,6 +4,7 @@
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/util/closuretask.h>
#include <vespa/fastos/file.h>
+#include <algorithm>
#include <vespa/log/log.h>
LOG_SETUP(".transactionlog.domain");
@@ -20,17 +21,13 @@ using std::runtime_error;
namespace search::transactionlog {
-Domain::Domain(const string &domainName,
- const string & baseDir,
- vespalib::ThreadStackExecutor & executor,
- uint64_t domainPartSize,
- bool useFsync,
- DomainPart::Crc defaultCrcType,
+Domain::Domain(const string &domainName, const string & baseDir, Executor & commitExecutor,
+ Executor & sessionExecutor, uint64_t domainPartSize, DomainPart::Crc defaultCrcType,
const FileHeaderContext &fileHeaderContext) :
_defaultCrcType(defaultCrcType),
- _executor(executor),
+ _commitExecutor(commitExecutor),
+ _sessionExecutor(sessionExecutor),
_sessionId(1),
- _useFsync(useFsync),
_syncMonitor(),
_pendingSync(false),
_name(domainName),
@@ -54,17 +51,17 @@ Domain::Domain(const string &domainName,
const int64_t lastPart = partIdVector.empty() ? 0 : partIdVector.back();
for (const int64_t partId : partIdVector) {
if ( partId != -1) {
- _executor.execute(makeTask(makeClosure(this, &Domain::addPart, partId, partId == lastPart)));
+ _sessionExecutor.execute(makeTask(makeClosure(this, &Domain::addPart, partId, partId == lastPart)));
}
}
- _executor.sync();
+ _sessionExecutor.sync();
if (_parts.empty() || _parts.crbegin()->second->isClosed()) {
- _parts[lastPart].reset(new DomainPart(_name, dir(), lastPart, _useFsync, _defaultCrcType, _fileHeaderContext, false));
+ _parts[lastPart].reset(new DomainPart(_name, dir(), lastPart, _defaultCrcType, _fileHeaderContext, false));
}
}
void Domain::addPart(int64_t partId, bool isLastPart) {
- DomainPart::SP dp(new DomainPart(_name, dir(), partId, _useFsync, _defaultCrcType, _fileHeaderContext, isLastPart));
+ DomainPart::SP dp(new DomainPart(_name, dir(), partId, _defaultCrcType, _fileHeaderContext, isLastPart));
if (dp->size() == 0) {
// Only last domain part is allowed to be truncated down to
// empty size.
@@ -111,8 +108,7 @@ Domain::getDomainInfo() const
DomainInfo info(SerialNumRange(begin(guard), end(guard)), size(guard), byteSize(guard));
for (const auto &entry: _parts) {
const DomainPart &part = *entry.second;
- info.parts.emplace_back(PartInfo(part.range(), part.size(),
- part.byteSize(), part.fileName()));
+ info.parts.emplace_back(PartInfo(part.range(), part.size(), part.byteSize(), part.fileName()));
}
return info;
}
@@ -198,7 +194,7 @@ Domain::triggerSyncNow()
if (!_pendingSync) {
_pendingSync = true;
DomainPart::SP dp(_parts.rbegin()->second);
- _executor.execute(Sync::UP(new Sync(_syncMonitor, dp, _pendingSync)));
+ _sessionExecutor.execute(Sync::UP(new Sync(_syncMonitor, dp, _pendingSync)));
}
}
@@ -256,7 +252,7 @@ void Domain::cleanSessions()
LockGuard guard(_sessionLock);
for (SessionList::iterator it(_sessions.begin()), mt(_sessions.end()); it != mt; ) {
Session * session(it->second.get());
- if ((!session->continous() && session->inSync())) {
+ if (session->inSync()) {
_sessions.erase(it++);
} else if (session->finished()) {
_sessions.erase(it++);
@@ -266,6 +262,18 @@ void Domain::cleanSessions()
}
}
+namespace {
+
+void waitPendingSync(vespalib::Monitor &syncMonitor, bool &pendingSync)
+{
+ MonitorGuard guard(syncMonitor);
+ while (pendingSync) {
+ guard.wait();
+ }
+}
+
+}
+
void Domain::commit(const Packet & packet)
{
DomainPart::SP dp(_parts.rbegin()->second);
@@ -273,15 +281,11 @@ void Domain::commit(const Packet & packet)
Packet::Entry entry;
entry.deserialize(is);
if (dp->byteSize() > _domainPartSize) {
+ waitPendingSync(_syncMonitor, _pendingSync);
triggerSyncNow();
- {
- MonitorGuard guard(_syncMonitor);
- while (_pendingSync) {
- guard.wait();
- }
- }
+ waitPendingSync(_syncMonitor, _pendingSync);
dp->close();
- dp.reset(new DomainPart(_name, dir(), entry.serial(), _useFsync, _defaultCrcType, _fileHeaderContext, false));
+ dp.reset(new DomainPart(_name, dir(), entry.serial(), _defaultCrcType, _fileHeaderContext, false));
{
LockGuard guard(_lock);
_parts[entry.serial()] = dp;
@@ -290,19 +294,9 @@ void Domain::commit(const Packet & packet)
}
dp->commit(entry.serial(), packet);
cleanSessions();
-
- LockGuard guard(_sessionLock);
- for (auto & it : _sessions) {
- const Session::SP & session(it.second);
- if (session->continous()) {
- if (session->ok()) {
- Session::enQ(session, entry.serial(), packet);
- }
- }
- }
}
-bool Domain::erase(const SerialNum & to)
+bool Domain::erase(SerialNum to)
{
bool retval(true);
/// Do not erase the last element
@@ -320,7 +314,8 @@ bool Domain::erase(const SerialNum & to)
return retval;
}
-int Domain::visit(const Domain::SP & domain, const SerialNum & from, const SerialNum & to, FRT_Supervisor & supervisor, FNET_Connection *conn)
+int Domain::visit(const Domain::SP & domain, SerialNum from, SerialNum to,
+ FRT_Supervisor & supervisor, FNET_Connection *conn)
{
assert(this == domain.get());
cleanSessions();
@@ -348,13 +343,14 @@ int Domain::startSession(int sessionId)
int Domain::closeSession(int sessionId)
{
+ _commitExecutor.sync();
int retval(-1);
{
LockGuard guard(_sessionLock);
SessionList::iterator found = _sessions.find(sessionId);
if (found != _sessions.end()) {
retval = 1;
- _executor.sync();
+ _sessionExecutor.sync();
}
}
if (retval == 1) {
@@ -371,18 +367,6 @@ int Domain::closeSession(int sessionId)
return retval;
}
-int Domain::subscribe(const Domain::SP & domain, const SerialNum & from, FRT_Supervisor & supervisor, FNET_Connection *conn)
-{
- assert(this == domain.get());
- cleanSessions();
- SerialNumRange range(from, end());
- Session * session = new Session(_sessionId++, range, domain, supervisor, conn, true);
- LockGuard guard(_sessionLock);
- _sessions[session->id()] = Session::SP(session);
- return session->id();
-}
-
-
Domain::SerialNumList
Domain::scanDir()
{
diff --git a/searchlib/src/vespa/searchlib/transactionlog/domain.h b/searchlib/src/vespa/searchlib/transactionlog/domain.h
index f70ce7654c1..ab7ded91e5b 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/domain.h
+++ b/searchlib/src/vespa/searchlib/transactionlog/domain.h
@@ -1,9 +1,9 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vespa/searchlib/transactionlog/domainpart.h>
-#include <vespa/searchlib/transactionlog/session.h>
-#include <vespa/vespalib/util/threadstackexecutor.h>
+#include "domainpart.h"
+#include "session.h"
+#include <vespa/vespalib/util/threadexecutor.h>
namespace search::transactionlog {
@@ -35,31 +35,20 @@ typedef std::map<vespalib::string, DomainInfo> DomainStats;
class Domain
{
public:
- typedef std::shared_ptr<Domain> SP;
- Domain(const vespalib::string &name,
- const vespalib::string &baseDir,
- vespalib::ThreadStackExecutor & executor,
- uint64_t domainPartSize,
- bool useFsync,
- DomainPart::Crc defaultCrcType,
+ using SP = std::shared_ptr<Domain>;
+ using Executor = vespalib::ThreadExecutor;
+ Domain(const vespalib::string &name, const vespalib::string &baseDir, Executor & commitExecutor,
+ Executor & sessionExecutor, uint64_t domainPartSize, DomainPart::Crc defaultCrcType,
const common::FileHeaderContext &fileHeaderContext);
virtual ~Domain();
DomainInfo getDomainInfo() const;
-
const vespalib::string & name() const { return _name; }
- bool erase(const SerialNum & to);
+ bool erase(SerialNum to);
void commit(const Packet & packet);
- int
- visit(const Domain::SP & self,
- const SerialNum & from,
- const SerialNum & to,
- FRT_Supervisor & supervisor,
- FNET_Connection *conn);
-
- int subscribe(const Domain::SP & self, const SerialNum & from, FRT_Supervisor & supervisor, FNET_Connection *conn);
+ int visit(const Domain::SP & self, SerialNum from, SerialNum to, FRT_Supervisor & supervisor, FNET_Connection *conn);
SerialNum begin() const;
SerialNum end() const;
@@ -82,7 +71,7 @@ public:
return base + "/" + domain;
}
vespalib::Executor::Task::UP execute(vespalib::Executor::Task::UP task) {
- return _executor.execute(std::move(task));
+ return _sessionExecutor.execute(std::move(task));
}
uint64_t size() const;
private:
@@ -94,18 +83,17 @@ private:
vespalib::string dir() const { return getDir(_baseDir, _name); }
void addPart(int64_t partId, bool isLastPart);
- typedef std::vector<SerialNum> SerialNumList;
+ using SerialNumList = std::vector<SerialNum>;
SerialNumList scanDir();
- typedef std::map<int, Session::SP > SessionList;
- typedef std::map<int64_t, DomainPart::SP > DomainPartList;
- typedef vespalib::ThreadStackExecutor Executor;
+ using SessionList = std::map<int, Session::SP>;
+ using DomainPartList = std::map<int64_t, DomainPart::SP>;
DomainPart::Crc _defaultCrcType;
- Executor & _executor;
+ Executor & _commitExecutor;
+ Executor & _sessionExecutor;
std::atomic<int> _sessionId;
- const bool _useFsync;
vespalib::Monitor _syncMonitor;
bool _pendingSync;
vespalib::string _name;
@@ -117,7 +105,6 @@ private:
vespalib::string _baseDir;
const common::FileHeaderContext &_fileHeaderContext;
bool _markedDeleted;
- bool _urgentSync;
};
}
diff --git a/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp b/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp
index ebe3c07c1df..35bdc71c963 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp
+++ b/searchlib/src/vespa/searchlib/transactionlog/domainpart.cpp
@@ -265,15 +265,9 @@ DomainPart::buildPacketMapping(bool allowTruncate)
return currPos;
}
-DomainPart::DomainPart(const string & name,
- const string & baseDir,
- SerialNum s,
- bool useFsync,
- Crc defaultCrc,
- const FileHeaderContext &fileHeaderContext,
- bool allowTruncate) :
+DomainPart::DomainPart(const string & name, const string & baseDir, SerialNum s, Crc defaultCrc,
+ const FileHeaderContext &fileHeaderContext, bool allowTruncate) :
_defaultCrc(defaultCrc),
- _useFsync(useFsync),
_lock(),
_fileLock(),
_range(s),
@@ -428,9 +422,6 @@ DomainPart::commit(SerialNum firstSerial, const Packet &packet)
entry.serial(), _range.to()));
}
}
- if (_useFsync) {
- sync();
- }
bool merged(false);
LockGuard guard(_lock);
diff --git a/searchlib/src/vespa/searchlib/transactionlog/domainpart.h b/searchlib/src/vespa/searchlib/transactionlog/domainpart.h
index 2af0a43d03c..59d0df6df94 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/domainpart.h
+++ b/searchlib/src/vespa/searchlib/transactionlog/domainpart.h
@@ -24,13 +24,8 @@ public:
xxh64=2
};
typedef std::shared_ptr<DomainPart> SP;
- DomainPart(const vespalib::string &name,
- const vespalib::string &baseDir,
- SerialNum s,
- bool useFsync,
- Crc defaultCrc,
- const common::FileHeaderContext &FileHeaderContext,
- bool allowTruncate);
+ DomainPart(const vespalib::string &name, const vespalib::string &baseDir, SerialNum s, Crc defaultCrc,
+ const common::FileHeaderContext &FileHeaderContext, bool allowTruncate);
~DomainPart();
@@ -57,11 +52,7 @@ private:
bool openAndFind(FastOS_FileInterface &file, const SerialNum &from);
int64_t buildPacketMapping(bool allowTruncate);
- static bool
- read(FastOS_FileInterface &file,
- Packet::Entry &entry,
- vespalib::alloc::Alloc &buf,
- bool allowTruncate);
+ static bool read(FastOS_FileInterface &file, Packet::Entry &entry, vespalib::alloc::Alloc &buf, bool allowTruncate);
void write(FastOS_FileInterface &file, const Packet::Entry &entry);
static int32_t calcCrc(Crc crc, const void * buf, size_t len);
@@ -70,11 +61,7 @@ private:
class SkipInfo
{
public:
- SkipInfo(SerialNum s, uint64_t p) :
- _id(s),
- _pos(p)
- {
- }
+ SkipInfo(SerialNum s, uint64_t p) : _id(s), _pos(p) {}
bool operator ==(const SkipInfo &b) const { return cmp(b) == 0; }
bool operator <(const SkipInfo &b) const { return cmp(b) < 0; }
@@ -91,7 +78,6 @@ private:
typedef std::vector<SkipInfo> SkipList;
typedef std::map<SerialNum, Packet> PacketList;
const Crc _defaultCrc;
- const bool _useFsync;
vespalib::Lock _lock;
vespalib::Lock _fileLock;
SerialNumRange _range;
diff --git a/searchlib/src/vespa/searchlib/transactionlog/session.cpp b/searchlib/src/vespa/searchlib/transactionlog/session.cpp
index 302e0c12dda..3ea656be9a2 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/session.cpp
+++ b/searchlib/src/vespa/searchlib/transactionlog/session.cpp
@@ -19,17 +19,7 @@ namespace {
vespalib::Executor::Task::UP
Session::createTask(const Session::SP & session)
{
- if (session->continous()) {
- return Task::UP(new SubscribeTask(session));
- } else {
- return Task::UP(new VisitTask(session));
- }
-}
-
-void
-Session::SubscribeTask::run()
-{
- _session->subscribe();
+ return Task::UP(new VisitTask(session));
}
void
@@ -110,14 +100,6 @@ Session::enQ(const SP & session, SerialNum serial, const Packet & packet)
}
void
-Session::subscribe()
-{
- visit();
- sendPending();
- sendSync();
-}
-
-void
Session::sendPending()
{
for (;;) {
@@ -147,7 +129,7 @@ void
Session::finalize()
{
if (!ok()) {
- LOG(error, "[%d] : Error in %s(%" PRIu64 " - %" PRIu64 "), stopping since I have no idea on what to do.", _id, (continous() ? "subscriber" : "visitor"), _range.from(), _range.to());
+ LOG(error, "[%d] : Error in %s(%" PRIu64 " - %" PRIu64 "), stopping since I have no idea on what to do.", _id, "visitor", _range.from(), _range.to());
}
LOG(debug, "[%d] : Stopped %" PRIu64 " - %" PRIu64, _id, _range.from(), _range.to());
_finished = true;
@@ -206,13 +188,12 @@ Session::rpcAsync(FRT_RPCRequest * req)
}
Session::Session(int sId, const SerialNumRange & r, const Domain::SP & d,
- FRT_Supervisor & supervisor, FNET_Connection *conn, bool subscriber) :
+ FRT_Supervisor & supervisor, FNET_Connection *conn) :
_supervisor(supervisor),
_connection(conn),
_domain(d),
_range(r),
_id(sId),
- _subscriber(subscriber),
_inSync(false),
_ok(true),
_finished(false),
@@ -254,19 +235,6 @@ Session::send(FRT_RPCRequest * req, bool wait)
}
bool
-Session::sendSync()
-{
- FRT_RPCRequest *req = _supervisor.AllocRPCRequest();
- req->SetMethodName("syncCallback");
- req->GetParams()->AddString(_domain->name().c_str());
- req->GetParams()->AddInt32(id());
- bool retval(send(req, true));
- LockGuard guard(_lock);
- _inSync = true;
- return retval;
-}
-
-bool
Session::sendDone()
{
FRT_RPCRequest *req = _supervisor.AllocRPCRequest();
diff --git a/searchlib/src/vespa/searchlib/transactionlog/session.h b/searchlib/src/vespa/searchlib/transactionlog/session.h
index 26e448540c3..ac6f496e151 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/session.h
+++ b/searchlib/src/vespa/searchlib/transactionlog/session.h
@@ -13,7 +13,7 @@ namespace search::transactionlog {
class Domain;
class DomainPart;
-typedef std::shared_ptr<Domain> DomainSP;
+using DomainSP = std::shared_ptr<Domain>;
class Session : public FRT_IRequestWait
{
@@ -24,12 +24,11 @@ public:
typedef std::shared_ptr<Session> SP;
Session(const Session &) = delete;
Session & operator = (const Session &) = delete;
- Session(int sId, const SerialNumRange & r, const DomainSP & d, FRT_Supervisor & supervisor, FNET_Connection *conn, bool subscriber=false);
- virtual ~Session();
+ Session(int sId, const SerialNumRange & r, const DomainSP & d, FRT_Supervisor & supervisor, FNET_Connection *conn);
+ ~Session();
const SerialNumRange & range() const { return _range; }
int id() const { return _id; }
bool inSync() const;
- bool continous() const { return _subscriber; }
bool ok() const { return _ok; }
bool finished() const;
static void enQ(const SP & session, SerialNum serial, const Packet & packet);
@@ -51,13 +50,7 @@ private:
void run() override;
Session::SP _session;
};
- class SubscribeTask : public Task {
- public:
- SubscribeTask(const Session::SP & session) : _session(session) { }
- private:
- void run() override;
- Session::SP _session;
- };
+
class SendTask : public Task {
public:
SendTask(const Session::SP & session) : _session(session) { }
@@ -70,11 +63,9 @@ private:
bool send(const Packet & packet);
void sendPacket(SerialNum serial, const Packet & packet);
bool sendDone();
- bool sendSync();
void sendPending();
void visit();
void visitOnly();
- void subscribe();
void finalize();
bool visit(FastOS_FileInterface & file, DomainPart & dp) __attribute__((noinline));
int32_t rpc(FRT_RPCRequest * req);
@@ -84,7 +75,6 @@ private:
DomainSP _domain;
SerialNumRange _range;
int _id;
- bool _subscriber;
bool _inSync;
bool _ok;
bool _finished;
diff --git a/searchlib/src/vespa/searchlib/transactionlog/translogclient.cpp b/searchlib/src/vespa/searchlib/transactionlog/translogclient.cpp
index af2e8ad47a1..aa2b558ea0c 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/translogclient.cpp
+++ b/searchlib/src/vespa/searchlib/transactionlog/translogclient.cpp
@@ -88,12 +88,8 @@ TransLogClient::Session::UP TransLogClient::open(const vespalib::string & domain
return session;
}
-TransLogClient::Subscriber::UP TransLogClient::createSubscriber(const vespalib::string & domain, TransLogClient::Session::Callback & callBack)
-{
- return TransLogClient::Subscriber::UP(new Subscriber(domain, *this, callBack));
-}
-
-TransLogClient::Visitor::UP TransLogClient::createVisitor(const vespalib::string & domain, TransLogClient::Session::Callback & callBack)
+TransLogClient::Visitor::UP
+TransLogClient::createVisitor(const vespalib::string & domain, TransLogClient::Session::Callback & callBack)
{
return TransLogClient::Visitor::UP(new Visitor(domain, *this, callBack));
}
@@ -151,13 +147,6 @@ void TransLogClient::exportRPC(FRT_Supervisor & supervisor)
rb.ReturnDesc("result", "A resultcode(int) of the operation. Non zero number indicates error.");
//-- Visit Callbacks -----------------------------------------------------------
- rb.DefineMethod("syncCallback", "si", "i", false, FRT_METHOD(TransLogClient::syncCallbackRPC), this);
- rb.MethodDesc("Will tell you that now you are uptodate on the subscribtion.");
- rb.ParamDesc("name", "The name of the domain.");
- rb.ParamDesc("session", "Session handle.");
- rb.ReturnDesc("result", "A resultcode(int) of the operation. Non zero number indicates error.");
-
- //-- Visit Callbacks -----------------------------------------------------------
rb.DefineMethod("eofCallback", "si", "i", false, FRT_METHOD(TransLogClient::eofCallbackRPC), this);
rb.MethodDesc("Will tell you that you are done with the visitor.");
rb.ParamDesc("name", "The name of the domain.");
@@ -182,24 +171,6 @@ void TransLogClient::visitCallbackRPC(FRT_RPCRequest *req)
LOG(debug, "visitCallback(%s, %d)=%d done", domainName, sessionId, retval);
}
-void TransLogClient::syncCallbackRPC(FRT_RPCRequest *req)
-{
- uint32_t retval(uint32_t(-1));
- FRT_Values & params = *req->GetParams();
- FRT_Values & ret = *req->GetReturn();
- const char * domainName = params[0]._string._str;
- int32_t sessionId(params[1]._intval32);
- LOG(debug, "syncCallback(%s, %d)", domainName, sessionId);
- LockGuard guard(_lock);
- Session * session(findSession(domainName, sessionId));
- if (session != NULL) {
- session->inSync();
- retval = 0;
- }
- ret.AddInt32(retval);
- LOG(debug, "syncCallback(%s, %d)=%d done", domainName, sessionId, retval);
-}
-
void TransLogClient::eofCallbackRPC(FRT_RPCRequest *req)
{
uint32_t retval(uint32_t(-1));
@@ -322,21 +293,12 @@ int TransLogClient::SessionKey::cmp(const TransLogClient::SessionKey & b) const
return diff;
}
-TransLogClient::Subscriber::Subscriber(const vespalib::string & domain, TransLogClient & tlc, Callback & callBack) :
+TransLogClient::Visitor::Visitor(const vespalib::string & domain, TransLogClient & tlc, Callback & callBack) :
Session(domain, tlc),
_callback(callBack)
{
}
-TransLogClient::Subscriber::~Subscriber()
-{
-}
-
-TransLogClient::Visitor::Visitor(const vespalib::string & domain, TransLogClient & tlc, Callback & callBack) :
- Subscriber(domain, tlc, callBack)
-{
-}
-
bool TransLogClient::Session::init(FRT_RPCRequest *req)
{
int32_t retval(_tlc.rpc(req));
@@ -364,15 +326,6 @@ bool TransLogClient::Visitor::visit(const SerialNum & from, const SerialNum & to
return init(req);
}
-bool TransLogClient::Subscriber::subscribe(const SerialNum & from)
-{
- FRT_RPCRequest *req = _tlc._supervisor->AllocRPCRequest();
- req->SetMethodName("domainSubscribe");
- req->GetParams()->AddString(_domain.c_str());
- req->GetParams()->AddInt64(from);
- return init(req);
-}
-
bool TransLogClient::Session::run()
{
FRT_RPCRequest *req = _tlc._supervisor->AllocRPCRequest();
@@ -402,8 +355,6 @@ bool TransLogClient::Session::close()
return (retval == 0);
}
-TransLogClient::Visitor::~Visitor()
-{
-}
+TransLogClient::Visitor::~Visitor() = default;
}
diff --git a/searchlib/src/vespa/searchlib/transactionlog/translogclient.h b/searchlib/src/vespa/searchlib/transactionlog/translogclient.h
index a3351549df8..87901890673 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/translogclient.h
+++ b/searchlib/src/vespa/searchlib/transactionlog/translogclient.h
@@ -28,7 +28,6 @@ public:
public:
virtual ~Callback() { }
virtual RPC::Result receive(const Packet & packet) = 0;
- virtual void inSync() { }
virtual void eof() { }
};
public:
@@ -46,7 +45,6 @@ public:
bool sync(const SerialNum &syncTo, SerialNum &syncedTo);
virtual RPC::Result visit(const Packet & ) { return RPC::OK; }
- virtual void inSync() { }
virtual void eof() { }
bool close();
void clear();
@@ -60,32 +58,22 @@ public:
int _sessionId;
};
/// Here you connect to the incomming data getting everything from <from>
- class Subscriber : public Session
+ class Visitor : public Session
{
public:
- typedef std::unique_ptr<Subscriber> UP;
- typedef std::shared_ptr<Subscriber> SP;
+ typedef std::unique_ptr<Visitor> UP;
+ typedef std::shared_ptr<Visitor> SP;
- Subscriber(const vespalib::string & domain, TransLogClient & tlc, Callback & callBack);
- bool subscribe(const SerialNum & from);
- ~Subscriber();
+ Visitor(const vespalib::string & domain, TransLogClient & tlc, Callback & callBack);
+ bool visit(const SerialNum & from, const SerialNum & to);
+ virtual ~Visitor();
RPC::Result visit(const Packet & packet) override { return _callback.receive(packet); }
- void inSync() override { _callback.inSync(); }
void eof() override { _callback.eof(); }
private:
Callback & _callback;
};
/// Here you read the incomming data getting everything from <from>
- class Visitor : public Subscriber
- {
- public:
- typedef std::unique_ptr<Visitor> UP;
- typedef std::shared_ptr<Visitor> SP;
- Visitor(const vespalib::string & domain, TransLogClient & tlc, Callback & callBack);
- bool visit(const SerialNum & from, const SerialNum & to);
- virtual ~Visitor();
- };
public:
typedef std::unique_ptr<TransLogClient> UP;
@@ -100,8 +88,6 @@ public:
Session::UP open(const vespalib::string & domain);
/// Here you can get a list of available domains.
bool listDomains(std::vector<vespalib::string> & dir);
- /// Here you get a subscriber
- Subscriber::UP createSubscriber(const vespalib::string & domain, Session::Callback & callBack);
Visitor::UP createVisitor(const vespalib::string & domain, Session::Callback & callBack);
bool isConnected() const;
@@ -111,7 +97,6 @@ public:
private:
void exportRPC(FRT_Supervisor & supervisor);
void visitCallbackRPC(FRT_RPCRequest *req);
- void syncCallbackRPC(FRT_RPCRequest *req);
void eofCallbackRPC(FRT_RPCRequest *req);
int32_t rpc(FRT_RPCRequest * req);
Session * findSession(const vespalib::string & domain, int sessionId);
diff --git a/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp b/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp
index ad5654978ae..ca17457bdb9 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp
+++ b/searchlib/src/vespa/searchlib/transactionlog/translogserver.cpp
@@ -75,23 +75,26 @@ SyncHandler::PerformTask()
}
-
-
-TransLogServer::TransLogServer(const vespalib::string &name,
- int listenPort,
- const vespalib::string &baseDir,
- const FileHeaderContext &fileHeaderContext,
- uint64_t domainPartSize,
- bool useFsync,
- size_t maxThreads,
- DomainPart::Crc defaultCrcType)
+TransLogServer::TransLogServer(const vespalib::string &name, int listenPort, const vespalib::string &baseDir,
+ const FileHeaderContext &fileHeaderContext)
+ : TransLogServer(name, listenPort, baseDir, fileHeaderContext, 0x10000000)
+{}
+
+TransLogServer::TransLogServer(const vespalib::string &name, int listenPort, const vespalib::string &baseDir,
+ const FileHeaderContext &fileHeaderContext, uint64_t domainPartSize)
+ : TransLogServer(name, listenPort, baseDir, fileHeaderContext, domainPartSize, 4, DomainPart::Crc::xxh64)
+{}
+
+TransLogServer::TransLogServer(const vespalib::string &name, int listenPort, const vespalib::string &baseDir,
+ const FileHeaderContext &fileHeaderContext, uint64_t domainPartSize,
+ size_t maxThreads, DomainPart::Crc defaultCrcType)
: FRT_Invokable(),
_name(name),
_baseDir(baseDir),
_domainPartSize(domainPartSize),
- _useFsync(useFsync),
_defaultCrcType(defaultCrcType),
- _executor(maxThreads, 128*1024),
+ _commitExecutor(maxThreads, 128*1024),
+ _sessionExecutor(maxThreads, 128*1024),
_threadPool(8192, 1),
_supervisor(std::make_unique<FRT_Supervisor>()),
_domains(),
@@ -107,13 +110,8 @@ TransLogServer::TransLogServer(const vespalib::string &name,
domainDir >> domainName;
if ( ! domainName.empty()) {
try {
- Domain::SP domain(new Domain(domainName,
- dir(),
- _executor,
- _domainPartSize,
- _useFsync,
- _defaultCrcType,
- _fileHeaderContext));
+ auto domain = std::make_shared<Domain>(domainName, dir(), _commitExecutor, _sessionExecutor,
+ _domainPartSize, _defaultCrcType,_fileHeaderContext);
_domains[domain->name()] = domain;
} catch (const std::exception & e) {
LOG(warning, "Failed creating %s domain on startup. Exception = %s", domainName.c_str(), e.what());
@@ -169,8 +167,6 @@ void TransLogServer::run()
bool immediate = true;
if (strcmp(req->GetMethodName(), "domainSessionClose") == 0) {
domainSessionClose(req);
- } else if (strcmp(req->GetMethodName(), "domainSubscribe") == 0) {
- domainSubscribe(req);
} else if (strcmp(req->GetMethodName(), "domainVisit") == 0) {
domainVisit(req);
} else if (strcmp(req->GetMethodName(), "createDomain") == 0) {
@@ -305,13 +301,6 @@ void TransLogServer::exportRPC(FRT_Supervisor & supervisor)
rb.ParamDesc("to", "Will erase all up and including.");
rb.ReturnDesc("result", "A resultcode(int) of the operation. Negative number indicates error.");
- //-- Domain Subscribe -----------------------------------------------------------
- rb.DefineMethod("domainSubscribe", "sl", "i", true, FRT_METHOD(TransLogServer::relayToThreadRPC), this);
- rb.MethodDesc("This will create a subscription. It will live till the connection is closed.");
- rb.ParamDesc("name", "The name of the domain.");
- rb.ParamDesc("from", "Will return all entries following(not including) <from>.");
- rb.ReturnDesc("result", "A resultcode(int) of the operation. Negative number indicates error. Positive number is the sessionid");
-
//-- Domain Visit -----------------------------------------------------------
rb.DefineMethod("domainVisit", "sll", "i", true, FRT_METHOD(TransLogServer::relayToThreadRPC), this);
rb.MethodDesc("This will create a visitor that return all operations in the range.");
@@ -335,14 +324,11 @@ void TransLogServer::exportRPC(FRT_Supervisor & supervisor)
rb.ReturnDesc("result", "A resultcode(int) of the operation. Negative number indicates error. 1 means busy -> retry. 0 is OK.");
//-- Domain Sync --
- rb.DefineMethod("domainSync", "sl", "il", true,
- FRT_METHOD(TransLogServer::relayToThreadRPC), this);
+ rb.DefineMethod("domainSync", "sl", "il", true, FRT_METHOD(TransLogServer::relayToThreadRPC), this);
rb.MethodDesc("Sync domain to given entry");
rb.ParamDesc("name", "The name of the domain.");
rb.ParamDesc("syncto", "Entry to sync to");
- rb.ReturnDesc("result",
- "A resultcode(int) of the operation. "
- "Negative number indicates error.");
+ rb.ReturnDesc("result", "A resultcode(int) of the operation. Negative number indicates error.");
rb.ReturnDesc("syncedto", "Entry synced to");
}
@@ -359,13 +345,8 @@ void TransLogServer::createDomain(FRT_RPCRequest *req)
Domain::SP domain(findDomain(domainName));
if ( !domain ) {
try {
- domain.reset(new Domain(domainName,
- dir(),
- _executor,
- _domainPartSize,
- _useFsync,
- _defaultCrcType,
- _fileHeaderContext));
+ domain = std::make_shared<Domain>(domainName, dir(), _commitExecutor, _sessionExecutor,
+ _domainPartSize, _defaultCrcType, _fileHeaderContext);
{
Guard domainGuard(_lock);
_domains[domain->name()] = domain;
@@ -505,22 +486,6 @@ void TransLogServer::domainCommit(FRT_RPCRequest *req)
}
}
-void TransLogServer::domainSubscribe(FRT_RPCRequest *req)
-{
- uint32_t retval(uint32_t(-1));
- FRT_Values & params = *req->GetParams();
- FRT_Values & ret = *req->GetReturn();
- const char * domainName = params[0]._string._str;
- LOG(debug, "domainSubscribe(%s)", domainName);
- Domain::SP domain(findDomain(domainName));
- if (domain) {
- SerialNum from(params[1]._intval64);
- LOG(debug, "domainSubscribe(%s, %" PRIu64 ")", domainName, from);
- retval = domain->subscribe(domain, from, *_supervisor, req->GetConnection());
- }
- ret.AddInt32(retval);
-}
-
void TransLogServer::domainVisit(FRT_RPCRequest *req)
{
uint32_t retval(uint32_t(-1));
diff --git a/searchlib/src/vespa/searchlib/transactionlog/translogserver.h b/searchlib/src/vespa/searchlib/transactionlog/translogserver.h
index e7aca212b07..92832786059 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/translogserver.h
+++ b/searchlib/src/vespa/searchlib/transactionlog/translogserver.h
@@ -3,6 +3,7 @@
#include "domain.h"
#include <vespa/vespalib/util/document_runnable.h>
+#include <vespa/vespalib/util/threadstackexecutor.h>
#include <vespa/document/util/queue.h>
#include <vespa/fnet/frt/invokable.h>
#include <mutex>
@@ -20,17 +21,14 @@ public:
typedef std::unique_ptr<TransLogServer> UP;
typedef std::shared_ptr<TransLogServer> SP;
- TransLogServer(const vespalib::string &name,
- int listenPort,
- const vespalib::string &baseDir,
+ TransLogServer(const vespalib::string &name, int listenPort, const vespalib::string &baseDir,
const common::FileHeaderContext &fileHeaderContext,
- uint64_t domainPartSize=0x10000000,
- bool useFsync=false,
- size_t maxThreads=4,
- DomainPart::Crc defaultCrc=DomainPart::xxh64);
+ uint64_t domainPartSize, size_t maxThreads, DomainPart::Crc defaultCrc);
+ TransLogServer(const vespalib::string &name, int listenPort, const vespalib::string &baseDir,
+ const common::FileHeaderContext &fileHeaderContext, uint64_t domainPartSize);
+ TransLogServer(const vespalib::string &name, int listenPort, const vespalib::string &baseDir,
+ const common::FileHeaderContext &fileHeaderContext);
virtual ~TransLogServer();
- uint64_t getDomainPartSize() const { return _domainPartSize; }
- uint64_t setDomainPartSize();
DomainStats getDomainStats() const;
void commit(const vespalib::string & domainName, const Packet & packet) override;
@@ -63,7 +61,6 @@ private:
void domainSessionRun(FRT_RPCRequest *req);
void domainPrune(FRT_RPCRequest *req);
void domainVisit(FRT_RPCRequest *req);
- void domainSubscribe(FRT_RPCRequest *req);
void domainSessionClose(FRT_RPCRequest *req);
void domainSync(FRT_RPCRequest *req);
@@ -79,20 +76,20 @@ private:
static const Session::SP & getSession(FRT_RPCRequest *req);
- typedef std::map<vespalib::string, Domain::SP > DomainList;
-
- vespalib::string _name;
- vespalib::string _baseDir;
- const uint64_t _domainPartSize;
- const bool _useFsync;
- const DomainPart::Crc _defaultCrcType;
- vespalib::ThreadStackExecutor _executor;
- FastOS_ThreadPool _threadPool;
- std::unique_ptr<FRT_Supervisor> _supervisor;
- DomainList _domains;
- mutable std::mutex _lock; // Protects _domains
- std::mutex _fileLock; // Protects the creating and deleting domains including file system operations.
- document::Queue<FRT_RPCRequest *> _reqQ;
+ using DomainList = std::map<vespalib::string, Domain::SP >;
+
+ vespalib::string _name;
+ vespalib::string _baseDir;
+ const uint64_t _domainPartSize;
+ const DomainPart::Crc _defaultCrcType;
+ vespalib::ThreadStackExecutor _commitExecutor;
+ vespalib::ThreadStackExecutor _sessionExecutor;
+ FastOS_ThreadPool _threadPool;
+ std::unique_ptr<FRT_Supervisor> _supervisor;
+ DomainList _domains;
+ mutable std::mutex _lock; // Protects _domains
+ std::mutex _fileLock; // Protects the creating and deleting domains including file system operations.
+ document::Queue<FRT_RPCRequest *> _reqQ;
const common::FileHeaderContext &_fileHeaderContext;
using Guard = std::lock_guard<std::mutex>;
};
diff --git a/searchlib/src/vespa/searchlib/transactionlog/translogserverapp.cpp b/searchlib/src/vespa/searchlib/transactionlog/translogserverapp.cpp
index b84d7854457..ff4a402b438 100644
--- a/searchlib/src/vespa/searchlib/transactionlog/translogserverapp.cpp
+++ b/searchlib/src/vespa/searchlib/transactionlog/translogserverapp.cpp
@@ -40,7 +40,7 @@ void TransLogServerApp::start()
{
std::shared_ptr<searchlib::TranslogserverConfig> c = _tlsConfig.get();
_tls.reset(new TransLogServer(c->servername, c->listenport, c->basedir, _fileHeaderContext,
- c->filesizemax, c->usefsync, c->maxthreads, getCrc(c->crcmethod)));
+ c->filesizemax, c->maxthreads, getCrc(c->crcmethod)));
}
TransLogServerApp::~TransLogServerApp()
diff --git a/searchlib/src/vespa/searchlib/util/comprbuffer.cpp b/searchlib/src/vespa/searchlib/util/comprbuffer.cpp
index 8c7f97db022..acfc2e46020 100644
--- a/searchlib/src/vespa/searchlib/util/comprbuffer.cpp
+++ b/searchlib/src/vespa/searchlib/util/comprbuffer.cpp
@@ -1,14 +1,12 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "comprbuffer.h"
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/fastos/file.h>
#include <cassert>
+#include <cstring>
namespace search {
-using vespalib::nbostream;
-
ComprBuffer::ComprBuffer(uint32_t unitSize)
: _comprBuf(NULL),
_comprBufSize(0),
@@ -114,25 +112,4 @@ ComprBuffer::referenceComprBuf(const ComprBuffer &rhs)
_comprBufSize = rhs._comprBufSize;
}
-
-void
-ComprBuffer::checkPointWrite(nbostream &out)
-{
- _aligner.checkPointWrite(out);
- out << _comprBufSize << _unitSize << _padBefore;
-}
-
-
-void
-ComprBuffer::checkPointRead(nbostream &in)
-{
- _aligner.checkPointRead(in);
- uint32_t unitSize;
- in >> _comprBufSize >> unitSize >> _padBefore;
- assert(unitSize == _unitSize);
-
- allocComprBuf();
-}
-
-
}
diff --git a/searchlib/src/vespa/searchlib/util/comprbuffer.h b/searchlib/src/vespa/searchlib/util/comprbuffer.h
index 61928976acc..c15671d5e14 100644
--- a/searchlib/src/vespa/searchlib/util/comprbuffer.h
+++ b/searchlib/src/vespa/searchlib/util/comprbuffer.h
@@ -31,7 +31,6 @@ public:
static size_t minimumPadding() { return 8; }
uint32_t getUnitBitSize() const { return _unitSize * 8; }
bool getPadBefore() const { return _padBefore; }
- bool getCheckPointResumed() const { return _aligner.getCheckPointResumed(); }
/*
* When encoding to memory instead of file, the compressed buffer must
@@ -44,18 +43,6 @@ public:
* long as rhs is live and unchanged.
*/
void referenceComprBuf(const ComprBuffer &rhs);
-
- /**
- * Checkpoint write. Used at semi-regular intervals during indexing
- * to allow for continued indexing after an interrupt.
- */
- void checkPointWrite(vespalib::nbostream &out);
-
- /**
- * Checkpoint read. Used when resuming indexing after an interrupt.
- *
- */
- void checkPointRead(vespalib::nbostream &in);
};
}
diff --git a/searchlib/src/vespa/searchlib/util/comprfile.cpp b/searchlib/src/vespa/searchlib/util/comprfile.cpp
index 8b17fd85753..26c66f43993 100644
--- a/searchlib/src/vespa/searchlib/util/comprfile.cpp
+++ b/searchlib/src/vespa/searchlib/util/comprfile.cpp
@@ -1,14 +1,12 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "comprfile.h"
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/fastos/file.h>
#include <cassert>
+#include <cstring>
namespace search {
-using vespalib::nbostream;
-
void
ComprFileReadBase::ReadComprBuffer(uint64_t stopOffset,
bool readAll,
@@ -292,9 +290,7 @@ ComprFileReadContext(ComprFileDecodeContext &decodeContext)
_bitOffset(0),
_stopOffset(0),
_readAll(true),
- _checkPointOffsetValid(false),
- _file(NULL),
- _checkPointOffset(0)
+ _file(NULL)
{
}
@@ -308,9 +304,7 @@ ComprFileReadContext(uint32_t unitSize)
_bitOffset(0),
_stopOffset(0),
_readAll(true),
- _checkPointOffsetValid(false),
- _file(NULL),
- _checkPointOffset(0)
+ _file(NULL)
{
}
@@ -497,28 +491,6 @@ ComprFileReadContext::copyReadContext(const ComprFileReadContext &rhs)
}
}
-
-void
-ComprFileReadContext::checkPointWrite(nbostream &out)
-{
- ComprBuffer::checkPointWrite(out);
- ComprFileDecodeContext &d = *_decodeContext;
- d.checkPointWrite(out);
- uint64_t bitOffset = d.getBitPosV();
- out << bitOffset;
-}
-
-
-void
-ComprFileReadContext::checkPointRead(nbostream &in)
-{
- ComprBuffer::checkPointRead(in);
- ComprFileDecodeContext &d = *_decodeContext;
- d.checkPointRead(in);
- in >> _checkPointOffset; // Cannot seek until file is opened
- _checkPointOffsetValid = true;
-}
-
ComprFileWriteContext::
ComprFileWriteContext(ComprFileEncodeContext &encodeContext)
: ComprBuffer(encodeContext.getUnitByteSize()),
@@ -598,47 +570,4 @@ ComprFileWriteContext::allocComprBuf()
allocComprBuf(32768, 32768);
}
-
-void
-ComprFileWriteContext::checkPointWrite(nbostream &out)
-{
- ComprBuffer::checkPointWrite(out);
- ComprFileEncodeContext &e = *_encodeContext;
- uint64_t bufferStartFilePos = getBufferStartFilePos();
- uint64_t usedSize = e.getUsedUnits(_comprBuf) *
- e.getUnitByteSize();
- out << bufferStartFilePos << usedSize;
- e.checkPointWrite(out);
- if (usedSize != 0) {
- out.write(_comprBuf, usedSize);
- }
- uint64_t bitOffset = e.getBitPosV();
- out << bitOffset;
-}
-
-
-void
-ComprFileWriteContext::checkPointRead(nbostream &in)
-{
- ComprBuffer::checkPointRead(in);
- ComprFileEncodeContext &e = *_encodeContext;
- uint64_t bufferStartFilePos = 0;
- uint64_t usedSize = 0;
- in >> bufferStartFilePos >> usedSize;
- e.checkPointRead(in);
- if (usedSize != 0) {
- assert((usedSize % e.getUnitByteSize()) == 0);
- assert(_comprBufSize >= usedSize / e.getUnitByteSize());
- in.read(_comprBuf, usedSize);
- }
- setBufferStartFilePos(bufferStartFilePos);
- e.afterWrite(*this, usedSize / e.getUnitByteSize(), bufferStartFilePos);
- uint64_t bitOffset = 0;
- in >> bitOffset;
- uint64_t writeOffset = e.getBitPosV();
- assert(bitOffset == writeOffset);
- (void) writeOffset;
-}
-
-
}
diff --git a/searchlib/src/vespa/searchlib/util/comprfile.h b/searchlib/src/vespa/searchlib/util/comprfile.h
index b69ef140868..3d44f088c74 100644
--- a/searchlib/src/vespa/searchlib/util/comprfile.h
+++ b/searchlib/src/vespa/searchlib/util/comprfile.h
@@ -7,8 +7,6 @@
class FastOS_FileInterface;
-namespace vespalib { class nbostream; }
-
namespace search {
class ComprFileWriteContext;
@@ -60,19 +58,6 @@ public:
* Get size of each unit (typically 4 or 8)
*/
virtual uint32_t getUnitByteSize() const = 0;
-
- /**
- * Checkpoint write. Used at semi-regular intervals during indexing
- * to allow for continued indexing after an interrupt. Caller must
- * save position.
- */
- virtual void checkPointWrite(vespalib::nbostream &out) = 0;
-
- /**
- * Checkpoint read. Used when resuming indexing after an interrupt.
- * Caller must restore position.
- */
- virtual void checkPointRead(vespalib::nbostream &in) = 0;
};
class ComprFileReadBase
@@ -110,9 +95,7 @@ private:
int _bitOffset;
uint64_t _stopOffset;
bool _readAll;
- bool _checkPointOffsetValid; // Set only if checkpoint has been read
FastOS_FileInterface *_file;
- uint64_t _checkPointOffset; // bit offset saved by checkPointRead
public:
ComprFileReadContext(ComprFileDecodeContext &decodeContext);
@@ -170,19 +153,6 @@ public:
* For unit testing only. Copy data owned by rhs.
*/
void copyWriteContext(const ComprFileWriteContext &rhs);
-
- /**
- * Checkpoint write. Used at semi-regular intervals during indexing
- * to allow for continued indexing after an interrupt.
- */
- void checkPointWrite(vespalib::nbostream &out);
-
- /**
- * Checkpoint read. Used when resuming indexing after an interrupt.
- */
- void checkPointRead(vespalib::nbostream &in);
- bool getCheckPointOffsetValid() const { return _checkPointOffsetValid; }
- uint64_t getCheckPointOffset() const { return _checkPointOffset; }
};
@@ -216,19 +186,6 @@ public:
*/
virtual uint32_t getUnitByteSize() const = 0;
- /**
- * Checkpoint write. Used at semi-regular intervals during indexing
- * to allow for continued indexing after an interrupt. Caller must
- * save position, although partial unit is saved.
- */
- virtual void checkPointWrite(vespalib::nbostream &out) = 0;
-
- /**
- * Checkpoint read. Used when resuming indexing after an interrupt.
- * Caller must restore positon, although partial unit is restored.
- */
- virtual void checkPointRead(vespalib::nbostream &in) = 0;
-
virtual uint64_t getBitPosV() const = 0;
};
@@ -281,17 +238,6 @@ public:
* no file is attached.
*/
std::pair<void *, size_t> grabComprBuffer(void *&comprBufMalloc);
-
- /**
- * Checkpoint write. Used at semi-regular intervals during indexing
- * to allow for continued indexing after an interrupt.
- */
- void checkPointWrite(vespalib::nbostream &out);
-
- /**
- * Checkpoint read. Used when resuming indexing after an interrupt.
- */
- void checkPointRead(vespalib::nbostream &in);
};
}
diff --git a/searchlib/src/vespa/searchlib/util/filealign.cpp b/searchlib/src/vespa/searchlib/util/filealign.cpp
index 15bef611714..b60c0492854 100644
--- a/searchlib/src/vespa/searchlib/util/filealign.cpp
+++ b/searchlib/src/vespa/searchlib/util/filealign.cpp
@@ -1,14 +1,11 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include "filealign.h"
-#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/fastos/file.h>
#include <cassert>
namespace search {
-using vespalib::nbostream;
-
namespace {
size_t
@@ -42,8 +39,7 @@ FileAlign::FileAlign()
_minAlignedSize(1),
_elemSize(1),
_directIOMemAlign(1),
- _directio(false),
- _checkPointResumed(false)
+ _directio(false)
{ }
@@ -116,24 +112,4 @@ FileAlign::setupAlign(size_t elements,
return elements;
}
-
-void
-FileAlign::checkPointWrite(nbostream &out)
-{
- out << _directIOFileAlign << _preferredFileAlign <<
- _minDirectIOSize << _minAlignedSize << _elemSize <<
- _directIOMemAlign << _directio;
-}
-
-
-void
-FileAlign::checkPointRead(nbostream &in)
-{
- in >> _directIOFileAlign >> _preferredFileAlign >>
- _minDirectIOSize >> _minAlignedSize >> _elemSize >>
- _directIOMemAlign >> _directio;
- _checkPointResumed = true;
-}
-
-
}
diff --git a/searchlib/src/vespa/searchlib/util/filealign.h b/searchlib/src/vespa/searchlib/util/filealign.h
index 3e75fb2f371..49d053957ce 100644
--- a/searchlib/src/vespa/searchlib/util/filealign.h
+++ b/searchlib/src/vespa/searchlib/util/filealign.h
@@ -7,8 +7,6 @@
class FastOS_FileInterface;
-namespace vespalib { class nbostream; }
-
namespace search {
class FileAlign
@@ -21,7 +19,6 @@ private:
size_t _elemSize;
size_t _directIOMemAlign;
bool _directio;
- bool _checkPointResumed;
public:
@@ -64,24 +61,12 @@ public:
*/
size_t setupAlign(size_t elements, size_t elemSize, FastOS_FileInterface *file, size_t preferredFileAlignment);
bool getDirectIO() const { return _directio; }
- bool getCheckPointResumed() const { return _checkPointResumed; }
size_t getDirectIOFileAlign() const { return _directIOFileAlign; }
size_t getDirectIOMemAlign() const { return _directIOMemAlign; }
size_t getMinDirectIOSize() const { return _minDirectIOSize; }
size_t getMinAlignedSize() const { return _minAlignedSize; }
size_t getPreferredFileAlign() const { return _preferredFileAlign; }
size_t getElemSize() const { return _elemSize; }
-
- /**
- * Checkpoint write. Used at semi-regular intervals during indexing
- * to allow for continued indexing after an interrupt.
- */
- void checkPointWrite(vespalib::nbostream &out);
-
- /**
- * Checkpoint read. Used when resuming indexing after an interrupt.
- */
- void checkPointRead(vespalib::nbostream &in);
};
}
diff --git a/searchsummary/src/tests/docsummary/slime_summary/slime_summary_test.cpp b/searchsummary/src/tests/docsummary/slime_summary/slime_summary_test.cpp
index 3df589b0491..b2e324eb42e 100644
--- a/searchsummary/src/tests/docsummary/slime_summary/slime_summary_test.cpp
+++ b/searchsummary/src/tests/docsummary/slime_summary/slime_summary_test.cpp
@@ -24,7 +24,7 @@ struct FieldBlock {
: slime(), binary(1024)
{
size_t used = vespalib::slime::JsonFormat::decode(jsonInput, slime);
- EXPECT_EQUAL(jsonInput.size(), used);
+ EXPECT_TRUE(used > 0);
search::SlimeOutputRawBufAdapter adapter(binary);
vespalib::slime::BinaryFormat::encode(slime, adapter);
}
diff --git a/searchsummary/src/vespa/searchsummary/config/CMakeLists.txt b/searchsummary/src/vespa/searchsummary/config/CMakeLists.txt
index 7f6dc5c4cdd..a01938d8ad4 100644
--- a/searchsummary/src/vespa/searchsummary/config/CMakeLists.txt
+++ b/searchsummary/src/vespa/searchsummary/config/CMakeLists.txt
@@ -4,4 +4,4 @@ vespa_add_library(searchsummary_config OBJECT
DEPENDS
)
vespa_generate_config(searchsummary_config juniperrc.def)
-install(FILES juniperrc.def RENAME vespa.config.search.summary.juniperrc.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(juniperrc.def vespa.config.search.summary.juniperrc.def)
diff --git a/service-monitor/pom.xml b/service-monitor/pom.xml
index 8d8518f2b20..80958ca9bee 100644
--- a/service-monitor/pom.xml
+++ b/service-monitor/pom.xml
@@ -37,6 +37,12 @@
</dependency>
<dependency>
<groupId>com.yahoo.vespa</groupId>
+ <artifactId>jdisc_core</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
<artifactId>configdefinitions</artifactId>
<version>${project.version}</version>
<scope>provided</scope>
@@ -54,6 +60,18 @@
<scope>compile</scope>
</dependency>
<dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>config-model-api</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
+ <groupId>com.yahoo.vespa</groupId>
+ <artifactId>config-provisioning</artifactId>
+ <version>${project.version}</version>
+ <scope>provided</scope>
+ </dependency>
+ <dependency>
<groupId>org.json4s</groupId>
<artifactId>json4s-native_${scala.major-version}</artifactId>
<scope>test</scope>
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ConfigServerApplication.java b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ConfigServerApplication.java
new file mode 100644
index 00000000000..5cffcec82b8
--- /dev/null
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ConfigServerApplication.java
@@ -0,0 +1,52 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.service.monitor;
+
+import com.yahoo.vespa.applicationmodel.ApplicationInstance;
+import com.yahoo.vespa.applicationmodel.ApplicationInstanceId;
+import com.yahoo.vespa.applicationmodel.ClusterId;
+import com.yahoo.vespa.applicationmodel.ConfigId;
+import com.yahoo.vespa.applicationmodel.HostName;
+import com.yahoo.vespa.applicationmodel.ServiceCluster;
+import com.yahoo.vespa.applicationmodel.ServiceInstance;
+import com.yahoo.vespa.applicationmodel.ServiceType;
+import com.yahoo.vespa.applicationmodel.TenantId;
+
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+/**
+ * A service/application model of the config server with health status.
+ */
+public class ConfigServerApplication {
+ public static final ClusterId CLUSTER_ID = new ClusterId("zone-config-servers");
+ public static final ServiceType SERVICE_TYPE = new ServiceType("configserver");
+ public static final TenantId TENANT_ID = new TenantId("hosted-vespa");
+ public static final ApplicationInstanceId APPLICATION_INSTANCE_ID = new ApplicationInstanceId("zone-config-servers");
+ public static final String CONFIG_ID_PREFIX = "configid.";
+
+ ApplicationInstance<ServiceMonitorStatus> toApplicationInstance(List<String> hostnames) {
+ Set<ServiceInstance<ServiceMonitorStatus>> serviceInstances = hostnames.stream()
+ .map(hostname -> new ServiceInstance<>(
+ new ConfigId(CONFIG_ID_PREFIX + hostname),
+ new HostName(hostname),
+ ServiceMonitorStatus.NOT_CHECKED))
+ .collect(Collectors.toSet());
+
+ ServiceCluster<ServiceMonitorStatus> serviceCluster = new ServiceCluster<>(
+ CLUSTER_ID,
+ SERVICE_TYPE,
+ serviceInstances);
+
+ Set<ServiceCluster<ServiceMonitorStatus>> serviceClusters =
+ Stream.of(serviceCluster).collect(Collectors.toSet());
+
+ ApplicationInstance<ServiceMonitorStatus> applicationInstance = new ApplicationInstance<>(
+ TENANT_ID,
+ APPLICATION_INSTANCE_ID,
+ serviceClusters);
+
+ return applicationInstance;
+ }
+}
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ModelGenerator.java b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ModelGenerator.java
new file mode 100644
index 00000000000..ed40dcc675a
--- /dev/null
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ModelGenerator.java
@@ -0,0 +1,139 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.service.monitor;
+
+import com.yahoo.config.model.api.ApplicationInfo;
+import com.yahoo.config.model.api.HostInfo;
+import com.yahoo.config.model.api.ServiceInfo;
+import com.yahoo.config.model.api.SuperModel;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.vespa.applicationmodel.ApplicationInstance;
+import com.yahoo.vespa.applicationmodel.ApplicationInstanceId;
+import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference;
+import com.yahoo.vespa.applicationmodel.ClusterId;
+import com.yahoo.vespa.applicationmodel.ConfigId;
+import com.yahoo.vespa.applicationmodel.HostName;
+import com.yahoo.vespa.applicationmodel.ServiceCluster;
+import com.yahoo.vespa.applicationmodel.ServiceClusterKey;
+import com.yahoo.vespa.applicationmodel.ServiceInstance;
+import com.yahoo.vespa.applicationmodel.ServiceType;
+import com.yahoo.vespa.applicationmodel.TenantId;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * Util to convert SuperModel to ServiceModel and application model classes
+ */
+public class ModelGenerator {
+ public static final String CLUSTER_ID_PROPERTY_NAME = "clustername";
+
+ /**
+ * Create service model based primarily on super model.
+ *
+ * If the configServerhosts is non-empty, a config server application is added.
+ */
+ ServiceModel toServiceModel(
+ SuperModel superModel,
+ Zone zone,
+ List<String> configServerHosts,
+ SlobrokMonitorManager slobrokMonitorManager) {
+ Map<ApplicationInstanceReference,
+ ApplicationInstance<ServiceMonitorStatus>> applicationInstances = new HashMap<>();
+
+ for (ApplicationInfo applicationInfo : superModel.getAllApplicationInfos()) {
+
+ ApplicationInstance<ServiceMonitorStatus> applicationInstance = toApplicationInstance(
+ applicationInfo,
+ zone,
+ slobrokMonitorManager);
+ applicationInstances.put(applicationInstance.reference(), applicationInstance);
+ }
+
+ // The config server is part of the service model (but not super model)
+ if (!configServerHosts.isEmpty()) {
+ ConfigServerApplication configServerApplication = new ConfigServerApplication();
+ ApplicationInstance<ServiceMonitorStatus> configServerApplicationInstance =
+ configServerApplication.toApplicationInstance(configServerHosts);
+ applicationInstances.put(configServerApplicationInstance.reference(), configServerApplicationInstance);
+ }
+
+ return new ServiceModel(applicationInstances);
+ }
+
+ ApplicationInstance<ServiceMonitorStatus> toApplicationInstance(
+ ApplicationInfo applicationInfo,
+ Zone zone,
+ SlobrokMonitorManager slobrokMonitorManager) {
+ Map<ServiceClusterKey, Set<ServiceInstance<ServiceMonitorStatus>>> groupedServiceInstances = new HashMap<>();
+
+ for (HostInfo host : applicationInfo.getModel().getHosts()) {
+ HostName hostName = new HostName(host.getHostname());
+ for (ServiceInfo serviceInfo : host.getServices()) {
+ ServiceClusterKey serviceClusterKey = toServiceClusterKey(serviceInfo);
+ ServiceInstance<ServiceMonitorStatus> serviceInstance =
+ toServiceInstance(
+ applicationInfo.getApplicationId(),
+ serviceInfo,
+ hostName,
+ slobrokMonitorManager);
+
+ if (!groupedServiceInstances.containsKey(serviceClusterKey)) {
+ groupedServiceInstances.put(serviceClusterKey, new HashSet<>());
+ }
+ groupedServiceInstances.get(serviceClusterKey).add(serviceInstance);
+ }
+ }
+
+ Set<ServiceCluster<ServiceMonitorStatus>> serviceClusters = groupedServiceInstances.entrySet().stream()
+ .map(entry -> new ServiceCluster<>(
+ entry.getKey().clusterId(),
+ entry.getKey().serviceType(),
+ entry.getValue()))
+ .collect(Collectors.toSet());
+
+ ApplicationInstance<ServiceMonitorStatus> applicationInstance = new ApplicationInstance<>(
+ new TenantId(applicationInfo.getApplicationId().tenant().toString()),
+ toApplicationInstanceId(applicationInfo, zone),
+ serviceClusters);
+
+ return applicationInstance;
+ }
+
+ ServiceClusterKey toServiceClusterKey(ServiceInfo serviceInfo) {
+ ClusterId clusterId = new ClusterId(serviceInfo.getProperty(CLUSTER_ID_PROPERTY_NAME).orElse(""));
+ ServiceType serviceType = toServiceType(serviceInfo);
+ return new ServiceClusterKey(clusterId, serviceType);
+ }
+
+ ServiceInstance<ServiceMonitorStatus> toServiceInstance(
+ ApplicationId applicationId,
+ ServiceInfo serviceInfo,
+ HostName hostName,
+ SlobrokMonitorManager slobrokMonitorManager) {
+ ConfigId configId = new ConfigId(serviceInfo.getConfigId());
+
+ ServiceMonitorStatus status = slobrokMonitorManager.getStatus(
+ applicationId,
+ toServiceType(serviceInfo),
+ configId);
+
+ return new ServiceInstance<>(configId, hostName, status);
+ }
+
+ ApplicationInstanceId toApplicationInstanceId(ApplicationInfo applicationInfo, Zone zone) {
+ return new ApplicationInstanceId(String.format("%s:%s:%s:%s",
+ applicationInfo.getApplicationId().application().value(),
+ zone.environment().value(),
+ zone.region().value(),
+ applicationInfo.getApplicationId().instance().value()));
+ }
+
+ ServiceType toServiceType(ServiceInfo serviceInfo) {
+ return new ServiceType(serviceInfo.getServiceType());
+ }
+}
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ServiceModel.java b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ServiceModel.java
new file mode 100644
index 00000000000..b39af0238c5
--- /dev/null
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ServiceModel.java
@@ -0,0 +1,34 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.service.monitor;
+
+import com.yahoo.vespa.applicationmodel.ApplicationInstance;
+import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.Optional;
+
+/**
+ * The ServiceModel is almost a mirror of the SuperModel, except that it
+ * also gives ServiceMonitorStatus on each service, and there may be
+ * artificial applications like the config server "application".
+ */
+// @Immutable
+public class ServiceModel {
+ private final Map<ApplicationInstanceReference,
+ ApplicationInstance<ServiceMonitorStatus>> applications;
+
+ ServiceModel(Map<ApplicationInstanceReference,
+ ApplicationInstance<ServiceMonitorStatus>> applications) {
+ this.applications = Collections.unmodifiableMap(applications);
+ }
+
+ Map<ApplicationInstanceReference,
+ ApplicationInstance<ServiceMonitorStatus>> getAllApplicationInstances() {
+ return applications;
+ }
+
+ Optional<ApplicationInstance<ServiceMonitorStatus>> getApplicationInstance(ApplicationInstanceReference reference) {
+ return Optional.ofNullable(applications.get(reference));
+ }
+}
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ServiceMonitorImpl.java b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ServiceMonitorImpl.java
new file mode 100644
index 00000000000..80be6470686
--- /dev/null
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/ServiceMonitorImpl.java
@@ -0,0 +1,65 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.service.monitor;
+
+import com.google.inject.Inject;
+import com.yahoo.cloud.config.ConfigserverConfig;
+import com.yahoo.config.model.api.SuperModelProvider;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.jdisc.Metric;
+import com.yahoo.jdisc.Timer;
+import com.yahoo.vespa.applicationmodel.ApplicationInstance;
+import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference;
+import com.yahoo.vespa.service.monitor.internal.ServiceModelCache;
+import com.yahoo.vespa.service.monitor.internal.ServiceMonitorMetrics;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+
+public class ServiceMonitorImpl implements ServiceMonitor {
+ private static final Logger logger = Logger.getLogger(ServiceMonitorImpl.class.getName());
+
+ private final Zone zone;
+ private final List<String> configServerHosts;
+ private final SlobrokMonitorManager slobrokMonitorManager = new SlobrokMonitorManager();
+ private final ServiceModelCache serviceModelCache;
+
+ @Inject
+ public ServiceMonitorImpl(SuperModelProvider superModelProvider,
+ ConfigserverConfig configserverConfig,
+ Metric metric,
+ Timer timer) {
+ this.zone = superModelProvider.getZone();
+ this.configServerHosts = toConfigServerList(configserverConfig);
+ ServiceMonitorMetrics metrics = new ServiceMonitorMetrics(metric, timer);
+
+ SuperModelListenerImpl superModelListener = new SuperModelListenerImpl(
+ slobrokMonitorManager,
+ metrics,
+ new ModelGenerator(),
+ zone,
+ configServerHosts);
+ superModelListener.start(superModelProvider);
+ serviceModelCache = new ServiceModelCache(
+ () -> superModelListener.get(),
+ timer);
+ }
+
+ private List<String> toConfigServerList(ConfigserverConfig configserverConfig) {
+ if (configserverConfig.multitenant()) {
+ return configserverConfig.zookeeperserver().stream()
+ .map(server -> server.hostname())
+ .collect(Collectors.toList());
+ }
+
+ return Collections.emptyList();
+ }
+
+ @Override
+ public Map<ApplicationInstanceReference,
+ ApplicationInstance<ServiceMonitorStatus>> queryStatusOfAllApplicationInstances() {
+ return serviceModelCache.get().getAllApplicationInstances();
+ }
+}
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/SlobrokMonitor2.java b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/SlobrokMonitor2.java
new file mode 100644
index 00000000000..8737d915bcf
--- /dev/null
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/SlobrokMonitor2.java
@@ -0,0 +1,77 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.service.monitor;
+
+import com.yahoo.config.model.api.ApplicationInfo;
+import com.yahoo.config.model.api.HostInfo;
+import com.yahoo.config.model.api.PortInfo;
+import com.yahoo.config.model.api.ServiceInfo;
+import com.yahoo.jrt.Spec;
+import com.yahoo.jrt.Supervisor;
+import com.yahoo.jrt.Transport;
+import com.yahoo.jrt.slobrok.api.Mirror;
+import com.yahoo.jrt.slobrok.api.SlobrokList;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Objects;
+
+/**
+ * Class to manage Slobrok
+ */
+public class SlobrokMonitor2 implements AutoCloseable {
+ public static final String SLOBROK_SERVICE_TYPE = "slobrok";
+ public static final String SLOBROK_RPC_PORT_TAG = "rpc";
+
+ private final SlobrokList slobrokList;
+ private final Mirror mirror;
+
+ SlobrokMonitor2() {
+ this(new SlobrokList());
+ }
+
+ // Package-private for testing.
+ SlobrokMonitor2(SlobrokList slobrokList, Mirror mirror) {
+ this.slobrokList = slobrokList;
+ this.mirror = mirror;
+ }
+
+ private SlobrokMonitor2(SlobrokList slobrokList) {
+ this(slobrokList, new Mirror(new Supervisor(new Transport()), slobrokList));
+ }
+
+ void updateSlobrokList(ApplicationInfo application) {
+ List<String> slobrokSpecs = getSlobrokSpecs(application);
+ slobrokList.setup(slobrokSpecs.toArray(new String[0]));
+ }
+
+ List<String> getSlobrokSpecs(ApplicationInfo applicationInfo) {
+ List<String> slobrokSpecs = new ArrayList<>();
+
+ for (HostInfo host : applicationInfo.getModel().getHosts()) {
+ for (ServiceInfo service : host.getServices()) {
+ if (!Objects.equals(service.getServiceType(), SLOBROK_SERVICE_TYPE)) {
+ continue;
+ }
+
+ for (PortInfo port : service.getPorts()) {
+ if (port.getTags().contains(SLOBROK_RPC_PORT_TAG)) {
+ Spec spec = new Spec(host.getHostname(), port.getPort());
+ slobrokSpecs.add(spec.toString());
+ }
+ }
+ }
+ }
+
+ return slobrokSpecs;
+ }
+
+ @Override
+ public void close() {
+ // TODO: Make sure registeredInSlobrok returns DOWN from now on (concurrently)
+ mirror.shutdown();
+ }
+
+ boolean registeredInSlobrok(String slobrokServiceName) {
+ return mirror.lookup(slobrokServiceName).length > 0;
+ }
+}
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/SlobrokMonitorManager.java b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/SlobrokMonitorManager.java
new file mode 100644
index 00000000000..95baa9013d1
--- /dev/null
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/SlobrokMonitorManager.java
@@ -0,0 +1,114 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.service.monitor;
+
+import com.yahoo.config.model.api.ApplicationInfo;
+import com.yahoo.config.model.api.SuperModel;
+import com.yahoo.config.model.api.SuperModelListener;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.log.LogLevel;
+import com.yahoo.vespa.applicationmodel.ConfigId;
+import com.yahoo.vespa.applicationmodel.ServiceType;
+
+import java.util.HashMap;
+import java.util.Optional;
+import java.util.function.Supplier;
+import java.util.logging.Logger;
+
+public class SlobrokMonitorManager implements SuperModelListener {
+ private static final Logger logger =
+ Logger.getLogger(SlobrokMonitorManager.class.getName());
+
+ private final Supplier<SlobrokMonitor2> slobrokMonitorFactory;
+
+ private final Object monitor = new Object();
+ private final HashMap<ApplicationId, SlobrokMonitor2> slobrokMonitors = new HashMap<>();
+
+ SlobrokMonitorManager() {
+ this(() -> new SlobrokMonitor2());
+ }
+
+ SlobrokMonitorManager(Supplier<SlobrokMonitor2> slobrokMonitorFactory) {
+ this.slobrokMonitorFactory = slobrokMonitorFactory;
+ }
+
+ @Override
+ public void applicationActivated(SuperModel superModel, ApplicationInfo application) {
+ synchronized (monitor) {
+ SlobrokMonitor2 slobrokMonitor = slobrokMonitors.computeIfAbsent(
+ application.getApplicationId(),
+ id -> slobrokMonitorFactory.get());
+ slobrokMonitor.updateSlobrokList(application);
+ }
+ }
+
+ @Override
+ public void applicationRemoved(SuperModel superModel, ApplicationId id) {
+ synchronized (monitor) {
+ SlobrokMonitor2 slobrokMonitor = slobrokMonitors.remove(id);
+ if (slobrokMonitor == null) {
+ logger.log(LogLevel.WARNING, "Removed application " + id +
+ ", but it was never registered");
+ } else {
+ slobrokMonitor.close();
+ }
+ }
+ }
+
+ ServiceMonitorStatus getStatus(ApplicationId applicationId,
+ ServiceType serviceType,
+ ConfigId configId) {
+ Optional<String> slobrokServiceName = findSlobrokServiceName(serviceType, configId);
+ if (slobrokServiceName.isPresent()) {
+ synchronized (monitor) {
+ SlobrokMonitor2 slobrokMonitor = slobrokMonitors.get(applicationId);
+ if (slobrokMonitor != null &&
+ slobrokMonitor.registeredInSlobrok(slobrokServiceName.get())) {
+ return ServiceMonitorStatus.UP;
+ } else {
+ return ServiceMonitorStatus.DOWN;
+ }
+ }
+ } else {
+ return ServiceMonitorStatus.NOT_CHECKED;
+ }
+ }
+
+ /**
+ * Get the Slobrok service name of the service, or empty if the service
+ * is not registered with Slobrok.
+ */
+ Optional<String> findSlobrokServiceName(ServiceType serviceType, ConfigId configId) {
+ switch (serviceType.s()) {
+ case "adminserver":
+ case "config-sentinel":
+ case "configproxy":
+ case "configserver":
+ case "filedistributorservice":
+ case "logd":
+ case "logserver":
+ case "metricsproxy":
+ case "slobrok":
+ case "transactionlogserver":
+ return Optional.empty();
+
+ case "topleveldispatch":
+ return Optional.of(configId.s());
+
+ case "qrserver":
+ case "container":
+ case "docprocservice":
+ case "container-clustercontroller":
+ return Optional.of("vespa/service/" + configId.s());
+
+ case "searchnode": //TODO: handle only as storagenode instead of both as searchnode/storagenode
+ return Optional.of(configId.s() + "/realtimecontroller");
+ case "distributor":
+ case "storagenode":
+ return Optional.of("storage/cluster." + configId.s());
+ default:
+ logger.log(LogLevel.DEBUG, "Unknown service type " + serviceType.s() +
+ " with config id " + configId.s());
+ return Optional.empty();
+ }
+ }
+}
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/SuperModelListenerImpl.java b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/SuperModelListenerImpl.java
new file mode 100644
index 00000000000..077cb75ac2c
--- /dev/null
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/SuperModelListenerImpl.java
@@ -0,0 +1,89 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.service.monitor;
+
+import com.yahoo.config.model.api.ApplicationInfo;
+import com.yahoo.config.model.api.SuperModel;
+import com.yahoo.config.model.api.SuperModelListener;
+import com.yahoo.config.model.api.SuperModelProvider;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.vespa.service.monitor.internal.LatencyMeasurement;
+import com.yahoo.vespa.service.monitor.internal.ServiceMonitorMetrics;
+
+import java.util.List;
+import java.util.function.Supplier;
+import java.util.logging.Logger;
+
+public class SuperModelListenerImpl implements SuperModelListener, Supplier<ServiceModel> {
+ private static final Logger logger = Logger.getLogger(SuperModelListenerImpl.class.getName());
+
+ private final ServiceMonitorMetrics metrics;
+ private final ModelGenerator modelGenerator;
+ private final Zone zone;
+ private final List<String> configServerHosts;
+
+ // superModel and slobrokMonitorManager are always updated together
+ // and atomically using this monitor.
+ private final Object monitor = new Object();
+ private final SlobrokMonitorManager slobrokMonitorManager;
+ private SuperModel superModel;
+
+ SuperModelListenerImpl(SlobrokMonitorManager slobrokMonitorManager,
+ ServiceMonitorMetrics metrics,
+ ModelGenerator modelGenerator,
+ Zone zone,
+ List<String> configServerHosts) {
+ this.slobrokMonitorManager = slobrokMonitorManager;
+ this.metrics = metrics;
+ this.modelGenerator = modelGenerator;
+ this.zone = zone;
+ this.configServerHosts = configServerHosts;
+ }
+
+ void start(SuperModelProvider superModelProvider) {
+ synchronized (monitor) {
+ // This snapshot() call needs to be within the synchronized block,
+ // since applicationActivated()/applicationRemoved() may be called
+ // asynchronously even before snapshot() returns.
+ SuperModel snapshot = superModelProvider.snapshot(this);
+
+ snapshot.getAllApplicationInfos().stream().forEach(application ->
+ applicationActivated(snapshot, application));
+ }
+ }
+
+ @Override
+ public void applicationActivated(SuperModel superModel, ApplicationInfo application) {
+ synchronized (monitor) {
+ this.superModel = superModel;
+ slobrokMonitorManager.applicationActivated(superModel, application);
+ }
+ }
+
+ @Override
+ public void applicationRemoved(SuperModel superModel, ApplicationId id) {
+ synchronized (monitor) {
+ this.superModel = superModel;
+ slobrokMonitorManager.applicationRemoved(superModel, id);
+ }
+ }
+
+ @Override
+ public ServiceModel get() {
+ try (LatencyMeasurement measurement = metrics.startServiceModelSnapshotLatencyMeasurement()) {
+ // Reference 'measurement' in a dummy statement, otherwise the compiler
+ // complains about "auto-closeable resource is never referenced in body of
+ // corresponding try statement". Why hasn't javac fixed this!?
+ dummy(measurement);
+
+ // WARNING: The slobrok monitor manager may be out-of-sync with super model (no locking)
+ return modelGenerator.toServiceModel(
+ superModel,
+ zone,
+ configServerHosts,
+ slobrokMonitorManager);
+ }
+ }
+
+ private void dummy(LatencyMeasurement measurement) {}
+} \ No newline at end of file
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/internal/LatencyMeasurement.java b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/internal/LatencyMeasurement.java
new file mode 100644
index 00000000000..c7b9782972a
--- /dev/null
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/internal/LatencyMeasurement.java
@@ -0,0 +1,36 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.service.monitor.internal;
+
+import com.yahoo.jdisc.Timer;
+
+import java.util.function.Consumer;
+
+public class LatencyMeasurement implements AutoCloseable {
+ private final Timer timer;
+ private Consumer<Double> elapsedSecondsConsumer;
+ private long startMillis;
+
+ LatencyMeasurement(Timer timer, Consumer<Double> elapsedSecondsConsumer) {
+ this.timer = timer;
+ this.elapsedSecondsConsumer = elapsedSecondsConsumer;
+ }
+
+ LatencyMeasurement start() {
+ startMillis = timer.currentTimeMillis();
+ return this;
+ }
+
+ @Override
+ public void close() {
+ if (elapsedSecondsConsumer != null) {
+ double elapsedSeconds = forceNonNegative(timer.currentTimeMillis() - startMillis) / 1000;
+ elapsedSecondsConsumer.accept(elapsedSeconds);
+ elapsedSecondsConsumer = null;
+ }
+ }
+
+ private static double forceNonNegative(double d) {
+ return d > 0 ? d : 0;
+ }
+}
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/internal/ServiceModelCache.java b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/internal/ServiceModelCache.java
new file mode 100644
index 00000000000..89a961e1037
--- /dev/null
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/internal/ServiceModelCache.java
@@ -0,0 +1,62 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.service.monitor.internal;
+
+import com.yahoo.jdisc.Timer;
+import com.yahoo.vespa.service.monitor.ServiceModel;
+
+import java.util.function.Supplier;
+
+public class ServiceModelCache implements Supplier<ServiceModel> {
+ public static final long EXPIRY_MILLIS = 10000;
+
+ private final Supplier<ServiceModel> expensiveSupplier;
+ private final Timer timer;
+
+ private volatile ServiceModel snapshot;
+ private boolean updatePossiblyInProgress = false;
+
+ private final Object updateMonitor = new Object();
+ private long snapshotMillis;
+
+ public ServiceModelCache(Supplier<ServiceModel> expensiveSupplier, Timer timer) {
+ this.expensiveSupplier = expensiveSupplier;
+ this.timer = timer;
+ }
+
+ @Override
+ public ServiceModel get() {
+ if (snapshot == null) {
+ synchronized (updateMonitor) {
+ if (snapshot == null) {
+ takeSnapshot();
+ }
+ }
+ } else if (expired()) {
+ synchronized (updateMonitor) {
+ if (updatePossiblyInProgress) {
+ return snapshot;
+ }
+
+ updatePossiblyInProgress = true;
+ }
+
+ takeSnapshot();
+
+ synchronized (updateMonitor) {
+ updatePossiblyInProgress = false;
+ }
+ }
+
+ return snapshot;
+ }
+
+ private void takeSnapshot() {
+ snapshot = expensiveSupplier.get();
+ snapshotMillis = timer.currentTimeMillis();
+ }
+
+ private boolean expired() {
+ return timer.currentTimeMillis() - snapshotMillis >= EXPIRY_MILLIS;
+ }
+}
diff --git a/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/internal/ServiceMonitorMetrics.java b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/internal/ServiceMonitorMetrics.java
new file mode 100644
index 00000000000..e55a66473bb
--- /dev/null
+++ b/service-monitor/src/main/java/com/yahoo/vespa/service/monitor/internal/ServiceMonitorMetrics.java
@@ -0,0 +1,38 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.service.monitor.internal;
+
+import com.yahoo.jdisc.Metric;
+import com.yahoo.jdisc.Timer;
+
+import java.util.function.Consumer;
+
+public class ServiceMonitorMetrics {
+ public static String SERVICE_MODEL_METRIC_PREFIX = "serviceModel.";
+
+ private final Metric metric;
+ private final Timer timer;
+
+ public ServiceMonitorMetrics(Metric metric, Timer timer) {
+ this.metric = metric;
+ this.timer = timer;
+ }
+
+ public LatencyMeasurement startServiceModelSnapshotLatencyMeasurement() {
+ Consumer<Double> atCompletion = elapsedSeconds ->
+ setValue(metricKey("snapshot.latency"), elapsedSeconds);
+ return new LatencyMeasurement(timer, atCompletion).start();
+ }
+
+ private static String metricKey(String suffix) {
+ return SERVICE_MODEL_METRIC_PREFIX + suffix;
+ }
+
+ private void setValue(String key, Number number) {
+ setValue(key, number, null);
+ }
+
+ private void setValue(String key, Number number, Metric.Context context) {
+ metric.set(key, number, context);
+ }
+}
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ConfigServerApplicationTest.java b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ConfigServerApplicationTest.java
new file mode 100644
index 00000000000..ec91507c846
--- /dev/null
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ConfigServerApplicationTest.java
@@ -0,0 +1,61 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.service.monitor;
+
+import com.yahoo.vespa.applicationmodel.ApplicationInstance;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+public class ConfigServerApplicationTest {
+ private static final String configServer1 = "cfg1.yahoo.com";
+ private static final String configServer2 = "cfg2.yahoo.com";
+ private static final String configServer3 = "cfg3.yahoo.com";
+ private static final List<String> configServerList = Stream.of(
+ configServer1,
+ configServer2,
+ configServer3).collect(Collectors.toList());
+
+ @Test
+ public void toApplicationInstance() throws Exception {
+ ConfigServerApplication application = new ConfigServerApplication();
+ ApplicationInstance<ServiceMonitorStatus> applicationInstance =
+ application.toApplicationInstance(configServerList);
+
+ // Backward compatibility check
+ assertEquals(
+ SlobrokAndConfigIntersector.configServerApplicationInstanceId(),
+ applicationInstance.applicationInstanceId());
+ assertEquals(
+ SlobrokAndConfigIntersector.syntheticHostedVespaTenantId(),
+ applicationInstance.tenantId());
+
+ assertEquals(
+ ConfigServerApplication.TENANT_ID.toString() +
+ ":" + ConfigServerApplication.APPLICATION_INSTANCE_ID,
+ applicationInstance.reference().toString());
+
+ assertEquals(
+ ConfigServerApplication.CLUSTER_ID,
+ applicationInstance.serviceClusters().iterator().next().clusterId());
+
+ assertEquals(
+ ServiceMonitorStatus.NOT_CHECKED,
+ applicationInstance
+ .serviceClusters().iterator().next()
+ .serviceInstances().iterator().next()
+ .serviceStatus());
+
+ assertTrue(configServerList.contains(
+ applicationInstance
+ .serviceClusters().iterator().next()
+ .serviceInstances().iterator().next()
+ .hostName()
+ .toString()));
+ }
+
+} \ No newline at end of file
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ExampleModel.java b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ExampleModel.java
new file mode 100644
index 00000000000..5d7d955f15e
--- /dev/null
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ExampleModel.java
@@ -0,0 +1,164 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.service.monitor;
+
+import com.yahoo.config.model.api.ApplicationInfo;
+import com.yahoo.config.model.api.HostInfo;
+import com.yahoo.config.model.api.Model;
+import com.yahoo.config.model.api.PortInfo;
+import com.yahoo.config.model.api.ServiceInfo;
+import com.yahoo.config.model.api.SuperModel;
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.InstanceName;
+import com.yahoo.config.provision.TenantName;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class ExampleModel {
+
+ static final String CLUSTER_ID = "cluster-id";
+ static final String SERVICE_NAME = "service-name";
+ static final String SERVICE_TYPE = SlobrokMonitor2.SLOBROK_SERVICE_TYPE;
+ static final String CONFIG_ID = "configid/1";
+ static final String TENANT = "tenant";
+ static final String APPLICATION_NAME = "application";
+ public static final String INSTANCE_NAME = "default";
+
+ static SuperModel createExampleSuperModelWithOneRpcPort(String hostname, int rpcPort) {
+ List<String> hosts = Stream.of(hostname).collect(Collectors.toList());
+
+ ApplicationInfo applicationInfo = ExampleModel
+ .createApplication(TENANT, APPLICATION_NAME)
+ .addServiceCluster(CLUSTER_ID, SERVICE_NAME, SERVICE_TYPE, hosts)
+ .addPort(rpcPort, "footag", SlobrokMonitor2.SLOBROK_RPC_PORT_TAG)
+ .addPort(rpcPort + 1, "bartag")
+ .then()
+ .build();
+
+ Map<TenantName, Map<ApplicationId, ApplicationInfo>> applicationInfos = new HashMap<>();
+ applicationInfos.put(applicationInfo.getApplicationId().tenant(), new HashMap<>());
+ applicationInfos.get(applicationInfo.getApplicationId().tenant())
+ .put(applicationInfo.getApplicationId(), applicationInfo);
+ return new SuperModel(applicationInfos);
+ }
+
+ static ApplicationBuilder createApplication(String tenant,
+ String applicationName) {
+ return new ApplicationBuilder(tenant, applicationName);
+ }
+
+
+ static class ApplicationBuilder {
+ private final String tenant;
+ private final String applicationName;
+ private final List<ClusterBuilder> clusters = new ArrayList<>();
+
+ ApplicationBuilder(String tenant, String applicationName) {
+ this.tenant = tenant;
+ this.applicationName = applicationName;
+ }
+
+ ClusterBuilder addServiceCluster(
+ String clusterId,
+ String serviceName,
+ String serviceType,
+ List<String> hosts) {
+ return new ClusterBuilder(
+ this,
+ clusterId,
+ serviceName,
+ serviceType,
+ hosts);
+ }
+
+ ApplicationInfo build() {
+ List<String> allHosts = clusters.stream()
+ .flatMap(clusterBuilder -> clusterBuilder.hosts.stream())
+ .distinct()
+ .collect(Collectors.toList());
+
+ List<HostInfo> hostInfos = new ArrayList<>();
+ for (String hostname : allHosts) {
+ List<ServiceInfo> serviceInfos = new ArrayList<>();
+ for (ClusterBuilder cluster : clusters) {
+ buildServiceInfo(hostname, cluster).ifPresent(serviceInfos::add);
+ }
+
+ HostInfo hostInfo = new HostInfo(hostname, serviceInfos);
+ hostInfos.add(hostInfo);
+ }
+
+ ApplicationId id = ApplicationId.from(
+ tenant,
+ applicationName,
+ InstanceName.defaultName().toString());
+
+ Model model = mock(Model.class);
+ when(model.getHosts()).thenReturn(hostInfos);
+
+ return new ApplicationInfo(id, 1, model);
+ }
+
+ private Optional<ServiceInfo> buildServiceInfo(
+ String hostname,
+ ClusterBuilder cluster) {
+ int hostIndex = cluster.hosts.indexOf(hostname);
+ if (hostIndex < 0) {
+ return Optional.empty();
+ }
+
+ Map<String, String> properties = new HashMap<>();
+ properties.put(ModelGenerator.CLUSTER_ID_PROPERTY_NAME, cluster.clusterId);
+ return Optional.of(new ServiceInfo(
+ cluster.serviceName,
+ cluster.serviceType,
+ cluster.portInfos,
+ properties,
+ "configid/" + (hostIndex + 1),
+ hostname));
+ }
+ }
+
+ static class ClusterBuilder {
+ private final ApplicationBuilder applicationBuilder;
+ private final String clusterId;
+ private final String serviceName;
+ private final String serviceType;
+ private final List<String> hosts;
+ private final List<PortInfo> portInfos = new ArrayList<>();
+
+ ClusterBuilder(ApplicationBuilder applicationBuilder,
+ String clusterId,
+ String serviceName,
+ String serviceType,
+ List<String> hosts) {
+ this.applicationBuilder = applicationBuilder;
+ this.clusterId = clusterId;
+ this.serviceName = serviceName;
+ this.serviceType = serviceType;
+ this.hosts = hosts;
+ }
+
+ /**
+ * A bit unrealistic, but the port is the same on all hosts.
+ */
+ ClusterBuilder addPort(int port, String... tags) {
+ portInfos.add(new PortInfo(port, Arrays.asList(tags)));
+ return this;
+ }
+
+ ApplicationBuilder then() {
+ applicationBuilder.clusters.add(this);
+ return applicationBuilder;
+ }
+ }
+}
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ExampleModelTest.java b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ExampleModelTest.java
new file mode 100644
index 00000000000..bcd4eae1b4e
--- /dev/null
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ExampleModelTest.java
@@ -0,0 +1,93 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.service.monitor;
+
+import com.yahoo.config.model.api.ApplicationInfo;
+import com.yahoo.config.model.api.HostInfo;
+import com.yahoo.config.model.api.ServiceInfo;
+import org.junit.Test;
+
+import java.util.Collection;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static org.junit.Assert.assertEquals;
+
+public class ExampleModelTest {
+ @Test
+ public void testEmptyApplication() {
+ ApplicationInfo application = ExampleModel
+ .createApplication(
+ "tenant",
+ "app")
+ .build();
+
+ assertEquals("tenant.app", application.getApplicationId().toString());
+ assertEquals(1, application.getGeneration());
+ assertEquals(0, application.getModel().getHosts().size());
+ }
+
+ @Test
+ public void test() {
+ List<String> contentNodes = Stream.of("host1", "host2").collect(Collectors.toList());
+ List<String> containerNodes = Stream.of("host3", "host4").collect(Collectors.toList());
+
+ ApplicationInfo application = ExampleModel
+ .createApplication(
+ "tenant",
+ "app")
+ .addServiceCluster(
+ "product-controllers",
+ "container-clustercontroller.1",
+ "container-clustercontroller",
+ contentNodes)
+ .then()
+ .addServiceCluster(
+ "product",
+ "searchnode.1",
+ "searchnode",
+ contentNodes)
+ .then()
+ .addServiceCluster(
+ "admin",
+ "slobrok.1",
+ "slobrok",
+ containerNodes)
+ .then()
+ .addServiceCluster(
+ "default",
+ "container.1",
+ "container",
+ containerNodes)
+ .then()
+ .build();
+
+ assertEquals("tenant.app", application.getApplicationId().toString());
+
+ Collection<HostInfo> hostInfos = application.getModel().getHosts();
+ assertEquals(containerNodes.size() + contentNodes.size(), hostInfos.size());
+
+ HostInfo host1 = hostInfos.stream()
+ .filter(hostInfo -> hostInfo.getHostname().equals("host1"))
+ .findAny()
+ .orElseThrow(() -> new RuntimeException());
+ ServiceInfo controller1 = host1.getServices().stream()
+ .filter(i -> i.getServiceType().equals("container-clustercontroller"))
+ .findAny()
+ .orElseThrow(() -> new RuntimeException());
+
+ assertEquals("container-clustercontroller", controller1.getServiceType());
+ assertEquals("configid/1", controller1.getConfigId());
+
+ HostInfo host4 = hostInfos.stream()
+ .filter(hostInfo -> hostInfo.getHostname().equals("host4"))
+ .findAny()
+ .orElseThrow(() -> new RuntimeException());
+ ServiceInfo slobrok2 = host4.getServices().stream()
+ .filter(i -> i.getServiceType().equals("slobrok"))
+ .findAny()
+ .orElseThrow(() -> new RuntimeException());
+ assertEquals("configid/2", slobrok2.getConfigId());
+ }
+}
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ModelGeneratorTest.java b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ModelGeneratorTest.java
new file mode 100644
index 00000000000..4b82286235e
--- /dev/null
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/ModelGeneratorTest.java
@@ -0,0 +1,138 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.service.monitor;
+
+import com.yahoo.config.model.api.SuperModel;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.vespa.applicationmodel.ApplicationInstance;
+import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference;
+import com.yahoo.vespa.applicationmodel.ServiceCluster;
+import com.yahoo.vespa.applicationmodel.ServiceInstance;
+import org.junit.Test;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+public class ModelGeneratorTest {
+ private final String ENVIRONMENT = "prod";
+ private final String REGION = "us-west-1";
+ private final String HOSTNAME = "hostname";
+ private final int PORT = 2;
+
+ @Test
+ public void toApplicationModelWithConfigServerApplication() throws Exception {
+ SuperModel superModel =
+ ExampleModel.createExampleSuperModelWithOneRpcPort(HOSTNAME, PORT);
+ ModelGenerator modelGenerator = new ModelGenerator();
+
+ Zone zone = new Zone(Environment.from(ENVIRONMENT), RegionName.from(REGION));
+
+ List<String> configServerHosts = Stream.of("cfg1", "cfg2", "cfg3")
+ .collect(Collectors.toList());
+
+ SlobrokMonitorManager slobrokMonitorManager = mock(SlobrokMonitorManager.class);
+ when(slobrokMonitorManager.getStatus(any(), any(), any()))
+ .thenReturn(ServiceMonitorStatus.UP);
+
+ ServiceModel serviceModel =
+ modelGenerator.toServiceModel(
+ superModel,
+ zone,
+ configServerHosts,
+ slobrokMonitorManager);
+
+ Map<ApplicationInstanceReference,
+ ApplicationInstance<ServiceMonitorStatus>> applicationInstances =
+ serviceModel.getAllApplicationInstances();
+
+ assertEquals(2, applicationInstances.size());
+
+ Iterator<Map.Entry<ApplicationInstanceReference,
+ ApplicationInstance<ServiceMonitorStatus>>> iterator =
+ applicationInstances.entrySet().iterator();
+
+ ApplicationInstance<ServiceMonitorStatus> applicationInstance1 = iterator.next().getValue();
+ ApplicationInstance<ServiceMonitorStatus> applicationInstance2 = iterator.next().getValue();
+
+ if (applicationInstance1.applicationInstanceId().equals(
+ ConfigServerApplication.APPLICATION_INSTANCE_ID)) {
+ verifyConfigServerApplication(applicationInstance1);
+ verifyOtherApplication(applicationInstance2);
+ } else {
+ verifyConfigServerApplication(applicationInstance2);
+ verifyOtherApplication(applicationInstance1);
+ }
+ }
+
+ @Test
+ public void toApplicationModel() throws Exception {
+ SuperModel superModel =
+ ExampleModel.createExampleSuperModelWithOneRpcPort(HOSTNAME, PORT);
+ ModelGenerator modelGenerator = new ModelGenerator();
+
+ Zone zone = new Zone(Environment.from(ENVIRONMENT), RegionName.from(REGION));
+
+ List<String> configServerHosts = Collections.emptyList();
+
+ SlobrokMonitorManager slobrokMonitorManager = mock(SlobrokMonitorManager.class);
+ when(slobrokMonitorManager.getStatus(any(), any(), any()))
+ .thenReturn(ServiceMonitorStatus.UP);
+
+ ServiceModel serviceModel =
+ modelGenerator.toServiceModel(
+ superModel,
+ zone,
+ configServerHosts,
+ slobrokMonitorManager);
+
+ Map<ApplicationInstanceReference,
+ ApplicationInstance<ServiceMonitorStatus>> applicationInstances =
+ serviceModel.getAllApplicationInstances();
+
+ assertEquals(1, applicationInstances.size());
+ verifyOtherApplication(applicationInstances.values().iterator().next());
+ }
+
+ private void verifyOtherApplication(ApplicationInstance<ServiceMonitorStatus> applicationInstance) {
+ assertEquals(String.format("%s:%s:%s:%s:%s",
+ ExampleModel.TENANT,
+ ExampleModel.APPLICATION_NAME,
+ ENVIRONMENT,
+ REGION,
+ ExampleModel.INSTANCE_NAME),
+ applicationInstance.reference().toString());
+
+ assertEquals(ExampleModel.TENANT, applicationInstance.tenantId().toString());
+ Set<ServiceCluster<ServiceMonitorStatus>> serviceClusters =
+ applicationInstance.serviceClusters();
+ assertEquals(1, serviceClusters.size());
+ ServiceCluster<ServiceMonitorStatus> serviceCluster = serviceClusters.iterator().next();
+ assertEquals(ExampleModel.CLUSTER_ID, serviceCluster.clusterId().toString());
+ assertEquals(ExampleModel.SERVICE_TYPE, serviceCluster.serviceType().toString());
+ Set<ServiceInstance<ServiceMonitorStatus>> serviceInstances =
+ serviceCluster.serviceInstances();
+ assertEquals(1, serviceClusters.size());
+ ServiceInstance<ServiceMonitorStatus> serviceInstance = serviceInstances.iterator().next();
+ assertEquals(HOSTNAME, serviceInstance.hostName().toString());
+ assertEquals(ExampleModel.CONFIG_ID, serviceInstance.configId().toString());
+ assertEquals(ServiceMonitorStatus.UP, serviceInstance.serviceStatus());
+ }
+
+ private void verifyConfigServerApplication(
+ ApplicationInstance<ServiceMonitorStatus> applicationInstance) {
+ assertEquals(ConfigServerApplication.APPLICATION_INSTANCE_ID,
+ applicationInstance.applicationInstanceId());
+ }
+
+} \ No newline at end of file
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/SlobrokMonitor2Test.java b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/SlobrokMonitor2Test.java
new file mode 100644
index 00000000000..7be63e527cf
--- /dev/null
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/SlobrokMonitor2Test.java
@@ -0,0 +1,36 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.service.monitor;
+
+import com.yahoo.config.model.api.ApplicationInfo;
+import com.yahoo.jrt.slobrok.api.Mirror;
+import com.yahoo.jrt.slobrok.api.SlobrokList;
+import org.junit.Test;
+
+import static org.mockito.Mockito.mock;
+
+public class SlobrokMonitor2Test {
+ private final SlobrokList slobrokList = mock(SlobrokList.class);
+ private final Mirror mirror = mock(Mirror.class);
+ private SlobrokMonitor2 slobrokMonitor = new SlobrokMonitor2(slobrokList, mirror);
+
+ @Test
+ public void testUpdateSlobrokList() {
+ ApplicationInfo applicationInfo = ExampleModel.createApplication(
+ "tenant",
+ "application-name")
+ .build();
+ }
+
+ @Test
+ public void testUpdateSlobrokList2() {
+ /*
+ final String hostname = "hostname";
+ final int port = 1;
+
+ SuperModel superModel = ExampleModel.createExampleSuperModelWithOneRpcPort(hostname, port);
+ slobrokMonitor.updateSlobrokList(superModel.getApplicationInfo());
+
+ String[] expectedSpecs = new String[] {"tcp/" + hostname + ":" + port};
+ verify(slobrokList).setup(expectedSpecs); */
+ }
+} \ No newline at end of file
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/SlobrokMonitorManagerTest.java b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/SlobrokMonitorManagerTest.java
new file mode 100644
index 00000000000..f34dd91181c
--- /dev/null
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/SlobrokMonitorManagerTest.java
@@ -0,0 +1,92 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.service.monitor;
+
+import com.yahoo.config.model.api.ApplicationInfo;
+import com.yahoo.config.model.api.SuperModel;
+import com.yahoo.vespa.applicationmodel.ConfigId;
+import com.yahoo.vespa.applicationmodel.ServiceType;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.Optional;
+import java.util.function.Supplier;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class SlobrokMonitorManagerTest {
+ // IntelliJ complains if parametrized type is specified, Maven complains if not specified.
+ @SuppressWarnings("unchecked")
+ private final Supplier<SlobrokMonitor2> slobrokMonitorFactory = mock(Supplier.class);
+
+ private final SlobrokMonitorManager slobrokMonitorManager =
+ new SlobrokMonitorManager(slobrokMonitorFactory);
+ private final SlobrokMonitor2 slobrokMonitor = mock(SlobrokMonitor2.class);
+ private final SuperModel superModel = mock(SuperModel.class);
+ private final ApplicationInfo application = mock(ApplicationInfo.class);
+
+ @Before
+ public void setup() {
+ when(slobrokMonitorFactory.get()).thenReturn(slobrokMonitor);
+ }
+
+ @Test
+ public void testActivationOfApplication() {
+ slobrokMonitorManager.applicationActivated(superModel, application);
+ verify(slobrokMonitorFactory, times(1)).get();
+ }
+
+ @Test
+ public void testGetStatus_ApplicationNotInSlobrok() {
+ when(slobrokMonitor.registeredInSlobrok("config.id")).thenReturn(true);
+ assertEquals(ServiceMonitorStatus.DOWN, getStatus("topleveldispatch"));
+ }
+
+ @Test
+ public void testGetStatus_ApplicationInSlobrok() {
+ slobrokMonitorManager.applicationActivated(superModel, application);
+ when(slobrokMonitor.registeredInSlobrok("config.id")).thenReturn(true);
+ assertEquals(ServiceMonitorStatus.UP, getStatus("topleveldispatch"));
+ }
+
+ @Test
+ public void testGetStatus_ServiceNotInSlobrok() {
+ slobrokMonitorManager.applicationActivated(superModel, application);
+ when(slobrokMonitor.registeredInSlobrok("config.id")).thenReturn(false);
+ assertEquals(ServiceMonitorStatus.DOWN, getStatus("topleveldispatch"));
+ }
+
+ @Test
+ public void testGetStatus_NotChecked() {
+ assertEquals(ServiceMonitorStatus.NOT_CHECKED, getStatus("slobrok"));
+ verify(slobrokMonitor, times(0)).registeredInSlobrok(any());
+ }
+
+ private ServiceMonitorStatus getStatus(String serviceType) {
+ return slobrokMonitorManager.getStatus(
+ application.getApplicationId(),
+ new ServiceType(serviceType),
+ new ConfigId("config.id"));
+ }
+
+ @Test
+ public void testLookup() {
+ assertEquals(
+ Optional.of("config.id"),
+ findSlobrokServiceName("topleveldispatch", "config.id"));
+
+ assertEquals(
+ Optional.empty(),
+ findSlobrokServiceName("adminserver", "config.id"));
+ }
+
+ private Optional<String> findSlobrokServiceName(String serviceType, String configId) {
+ return slobrokMonitorManager.findSlobrokServiceName(
+ new ServiceType(serviceType),
+ new ConfigId(configId));
+ }
+} \ No newline at end of file
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/SuperModelListenerImplTest.java b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/SuperModelListenerImplTest.java
new file mode 100644
index 00000000000..e5608e92255
--- /dev/null
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/SuperModelListenerImplTest.java
@@ -0,0 +1,52 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.service.monitor;
+
+import com.yahoo.config.model.api.ApplicationInfo;
+import com.yahoo.config.model.api.SuperModel;
+import com.yahoo.config.model.api.SuperModelProvider;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.vespa.service.monitor.internal.ServiceMonitorMetrics;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class SuperModelListenerImplTest {
+ @Test
+ public void sanityCheck() {
+ SlobrokMonitorManager slobrokMonitorManager = mock(SlobrokMonitorManager.class);
+ ServiceMonitorMetrics metrics = mock(ServiceMonitorMetrics.class);
+ ModelGenerator modelGenerator = mock(ModelGenerator.class);
+ Zone zone = mock(Zone.class);
+ List<String> configServers = new ArrayList<>();
+ SuperModelListenerImpl listener = new SuperModelListenerImpl(
+ slobrokMonitorManager,
+ metrics,
+ modelGenerator,
+ zone,
+ configServers);
+
+ SuperModelProvider superModelProvider = mock(SuperModelProvider.class);
+ SuperModel superModel = mock(SuperModel.class);
+ when(superModelProvider.snapshot(listener)).thenReturn(superModel);
+
+ ApplicationInfo application1 = mock(ApplicationInfo.class);
+ ApplicationInfo application2 = mock(ApplicationInfo.class);
+ List<ApplicationInfo> applications = Stream.of(application1, application2)
+ .collect(Collectors.toList());
+ when(superModel.getAllApplicationInfos()).thenReturn(applications);
+
+ listener.start(superModelProvider);
+ verify(slobrokMonitorManager).applicationActivated(superModel, application1);
+ verify(slobrokMonitorManager).applicationActivated(superModel, application2);
+
+ ServiceModel serviceModel = listener.get();
+ verify(modelGenerator).toServiceModel(superModel, zone, configServers, slobrokMonitorManager);
+ }
+} \ No newline at end of file
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/internal/LatencyMeasurementTest.java b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/internal/LatencyMeasurementTest.java
new file mode 100644
index 00000000000..6efde340a16
--- /dev/null
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/internal/LatencyMeasurementTest.java
@@ -0,0 +1,33 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.service.monitor.internal;
+
+import com.yahoo.jdisc.Timer;
+import org.junit.Test;
+
+import java.util.function.Consumer;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class LatencyMeasurementTest {
+ @Test
+ public void testReportedDuration() {
+ Timer timer = mock(Timer.class);
+ when(timer.currentTimeMillis()).thenReturn(500l, 1000l);
+
+ // IntelliJ complains if parametrized type is specified, Maven complains if not specified.
+ @SuppressWarnings("unchecked")
+ Consumer<Double> consumer = mock(Consumer.class);
+
+ try (LatencyMeasurement measurement = new LatencyMeasurement(timer, consumer)) {
+ // Avoid javac warning by referencing measurement.
+ dummy(measurement);
+ }
+
+ verify(consumer).accept(0.5);
+ }
+
+ private void dummy(LatencyMeasurement measurement) {}
+} \ No newline at end of file
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/internal/ServiceModelCacheTest.java b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/internal/ServiceModelCacheTest.java
new file mode 100644
index 00000000000..2ba6ce0deb8
--- /dev/null
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/internal/ServiceModelCacheTest.java
@@ -0,0 +1,60 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.service.monitor.internal;
+
+import com.yahoo.jdisc.Timer;
+import com.yahoo.vespa.service.monitor.ServiceModel;
+import org.junit.Test;
+
+import java.util.function.Supplier;
+
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class ServiceModelCacheTest {
+ @SuppressWarnings("unchecked")
+ private final Supplier<ServiceModel> rawSupplier = mock(Supplier.class);
+ private final Timer timer = mock(Timer.class);
+ private final ServiceModelCache cache = new ServiceModelCache(rawSupplier, timer);
+
+ @Test
+ public void sanityCheck() {
+ ServiceModel serviceModel = mock(ServiceModel.class);
+ when(rawSupplier.get()).thenReturn(serviceModel);
+
+ long timeMillis = 0;
+ when(timer.currentTimeMillis()).thenReturn(timeMillis);
+
+ // Will always populate cache the first time
+ ServiceModel actualServiceModel = cache.get();
+ assertTrue(actualServiceModel == serviceModel);
+ verify(rawSupplier, times(1)).get();
+
+ // Cache hit
+ timeMillis += ServiceModelCache.EXPIRY_MILLIS / 2;
+ when(timer.currentTimeMillis()).thenReturn(timeMillis);
+ actualServiceModel = cache.get();
+ assertTrue(actualServiceModel == serviceModel);
+
+ // Cache expired
+ timeMillis += ServiceModelCache.EXPIRY_MILLIS + 1;
+ when(timer.currentTimeMillis()).thenReturn(timeMillis);
+
+ ServiceModel serviceModel2 = mock(ServiceModel.class);
+ when(rawSupplier.get()).thenReturn(serviceModel2);
+
+ actualServiceModel = cache.get();
+ assertTrue(actualServiceModel == serviceModel2);
+ // '2' because it's cumulative with '1' from the first times(1).
+ verify(rawSupplier, times(2)).get();
+
+ // Cache hit #2
+ timeMillis += 1;
+ when(timer.currentTimeMillis()).thenReturn(timeMillis);
+ actualServiceModel = cache.get();
+ assertTrue(actualServiceModel == serviceModel2);
+ }
+} \ No newline at end of file
diff --git a/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/internal/ServiceMonitorMetricsTest.java b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/internal/ServiceMonitorMetricsTest.java
new file mode 100644
index 00000000000..45920a436cf
--- /dev/null
+++ b/service-monitor/src/test/java/com/yahoo/vespa/service/monitor/internal/ServiceMonitorMetricsTest.java
@@ -0,0 +1,29 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+package com.yahoo.vespa.service.monitor.internal;
+
+import com.yahoo.jdisc.Metric;
+import com.yahoo.jdisc.Timer;
+import org.junit.Test;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+public class ServiceMonitorMetricsTest {
+ @Test
+ public void testTryWithResources() {
+ Metric metric = mock(Metric.class);
+ Timer timer = mock(Timer.class);
+ ServiceMonitorMetrics metrics = new ServiceMonitorMetrics(metric, timer);
+
+ when(timer.currentTimeMillis()).thenReturn(Long.valueOf(500), Long.valueOf(1000));
+
+ try (LatencyMeasurement measurement = metrics.startServiceModelSnapshotLatencyMeasurement()) {
+ measurement.hashCode();
+ }
+
+ verify(metric).set("serviceModel.snapshot.latency", 0.5, null);
+ }
+
+} \ No newline at end of file
diff --git a/simplemetrics/CMakeLists.txt b/simplemetrics/CMakeLists.txt
new file mode 100644
index 00000000000..c145fbb2ec7
--- /dev/null
+++ b/simplemetrics/CMakeLists.txt
@@ -0,0 +1,4 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(simplemetrics)
+
+install_config_definition(src/main/resources/configdefinitions/manager.def metrics.manager.def)
diff --git a/staging_vespalib/src/vespa/vespalib/stllike/cache.h b/staging_vespalib/src/vespa/vespalib/stllike/cache.h
index 832e6ede43d..3d5ab155877 100644
--- a/staging_vespalib/src/vespa/vespalib/stllike/cache.h
+++ b/staging_vespalib/src/vespa/vespalib/stllike/cache.h
@@ -71,6 +71,8 @@ public:
*/
cache & reserveElements(size_t elems);
+ cache & setCapacityBytes(size_t sz);
+
size_t capacity() const { return Lru::capacity(); }
size_t capacityBytes() const { return _maxBytes; }
size_t size() const { return Lru::size(); }
diff --git a/staging_vespalib/src/vespa/vespalib/stllike/cache.hpp b/staging_vespalib/src/vespa/vespalib/stllike/cache.hpp
index 06e7e249ec6..a8c7d16473c 100644
--- a/staging_vespalib/src/vespa/vespalib/stllike/cache.hpp
+++ b/staging_vespalib/src/vespa/vespalib/stllike/cache.hpp
@@ -21,6 +21,13 @@ cache<P>::reserveElements(size_t elems) {
}
template< typename P >
+cache<P> &
+cache<P>::setCapacityBytes(size_t sz) {
+ _maxBytes = sz;
+ return *this;
+}
+
+template< typename P >
void
cache<P>::invalidate(const K & key) {
vespalib::LockGuard guard(_hashLock);
diff --git a/staging_vespalib/src/vespa/vespalib/util/varholder.h b/staging_vespalib/src/vespa/vespalib/util/varholder.h
index 26f8f57839a..fdcc15d1fb4 100644
--- a/staging_vespalib/src/vespa/vespalib/util/varholder.h
+++ b/staging_vespalib/src/vespa/vespalib/util/varholder.h
@@ -2,41 +2,23 @@
#pragma once
-#include <vespa/vespalib/util/noncopyable.hpp>
#include <vespa/vespalib/util/sync.h>
-namespace vespalib
-{
-
+namespace vespalib {
template <typename T>
-class VarHolder : public noncopyable
+class VarHolder
{
-
- T _v;
- vespalib::Lock _lock;
-
+ T _v;
+ Lock _lock;
public:
- VarHolder(void)
- : _v(),
- _lock()
- {
- }
+ VarHolder() : _v(), _lock() {}
+ explicit VarHolder(const T &v) : _v(v), _lock() {}
+ VarHolder(const VarHolder &) = delete;
+ VarHolder & operator = (const VarHolder &) = delete;
+ ~VarHolder() {}
- explicit
- VarHolder(const T &v)
- : _v(v),
- _lock()
- {
- }
-
- ~VarHolder(void)
- {
- }
-
- void
- set(const T &v)
- {
+ void set(const T &v) {
T old;
{
vespalib::LockGuard guard(_lock);
@@ -45,19 +27,12 @@ public:
}
}
- void
- clear(void)
- {
- set(T());
- }
+ void clear() { set(T()); }
- T
- get(void) const
- {
+ T get() const {
vespalib::LockGuard guard(_lock);
return _v;
}
};
-} // namespace vespalib
-
+}
diff --git a/standalone-container/CMakeLists.txt b/standalone-container/CMakeLists.txt
new file mode 100644
index 00000000000..83c58e09945
--- /dev/null
+++ b/standalone-container/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(standalone-container)
diff --git a/statistics/CMakeLists.txt b/statistics/CMakeLists.txt
new file mode 100644
index 00000000000..3b187b72d6e
--- /dev/null
+++ b/statistics/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_config_definition(src/main/resources/configdefinitions/statistics.def container.statistics.def)
diff --git a/storage/src/tests/common/hostreporter/util.cpp b/storage/src/tests/common/hostreporter/util.cpp
index a9578e8d8cf..e0563a431e6 100644
--- a/storage/src/tests/common/hostreporter/util.cpp
+++ b/storage/src/tests/common/hostreporter/util.cpp
@@ -24,10 +24,10 @@ reporterToSlime(HostReporter &hostReporter, vespalib::Slime &slime) {
hostReporter.report(stream);
stream << End();
std::string jsonData = json.str();
- size_t parsedSize = JsonFormat::decode(Memory(jsonData), slime);
+ size_t parsed = JsonFormat::decode(Memory(jsonData), slime);
- if (jsonData.size() != parsedSize) {
- CPPUNIT_FAIL("Sizes of jsonData mismatched, probably not json:\n" + jsonData);
+ if (parsed == 0) {
+ CPPUNIT_FAIL("jsonData is not json:\n" + jsonData);
}
}
}
diff --git a/storage/src/tests/distributor/distributortest.cpp b/storage/src/tests/distributor/distributortest.cpp
index 3aedd31f574..12a4118aa08 100644
--- a/storage/src/tests/distributor/distributortest.cpp
+++ b/storage/src/tests/distributor/distributortest.cpp
@@ -48,6 +48,8 @@ class Distributor_Test : public CppUnit::TestFixture,
CPPUNIT_TEST(sequencing_config_is_propagated_to_distributor_config);
CPPUNIT_TEST(merge_busy_inhibit_duration_config_is_propagated_to_distributor_config);
CPPUNIT_TEST(merge_busy_inhibit_duration_is_propagated_to_pending_message_tracker);
+ CPPUNIT_TEST(external_client_requests_are_handled_individually_in_priority_order);
+ CPPUNIT_TEST(internal_messages_are_started_in_fifo_order_batch);
CPPUNIT_TEST_SUITE_END();
protected:
@@ -77,6 +79,8 @@ protected:
void sequencing_config_is_propagated_to_distributor_config();
void merge_busy_inhibit_duration_config_is_propagated_to_distributor_config();
void merge_busy_inhibit_duration_is_propagated_to_pending_message_tracker();
+ void external_client_requests_are_handled_individually_in_priority_order();
+ void internal_messages_are_started_in_fifo_order_batch();
public:
void setUp() override {
@@ -867,6 +871,62 @@ void Distributor_Test::merge_busy_inhibit_duration_is_propagated_to_pending_mess
CPPUNIT_ASSERT(!node_info.isBusy(0));
}
+void Distributor_Test::external_client_requests_are_handled_individually_in_priority_order() {
+ setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
+ addNodesToBucketDB(document::BucketId(16, 1), "0=1/1/1/t/a");
+
+ std::vector<api::StorageMessage::Priority> priorities({50, 255, 10, 40, 0});
+ document::DocumentId id("id:foo:testdoctype1:n=1:foo");
+ vespalib::stringref field_set = "";
+ for (auto pri : priorities) {
+ auto cmd = std::make_shared<api::GetCommand>(document::BucketId(), id, field_set);
+ cmd->setPriority(pri);
+ // onDown appends to internal message FIFO queue, awaiting hand-off.
+ _distributor->onDown(cmd);
+ }
+ // At the hand-off point we expect client requests to be prioritized.
+ // For each tick, a priority-order client request is processed and sent off.
+ for (size_t i = 1; i <= priorities.size(); ++i) {
+ tickDistributorNTimes(1);
+ CPPUNIT_ASSERT_EQUAL(size_t(i), _sender.commands.size());
+ }
+
+ std::vector<int> expected({0, 10, 40, 50, 255});
+ std::vector<int> actual;
+ for (auto& msg : _sender.commands) {
+ actual.emplace_back(static_cast<int>(msg->getPriority()));
+ }
+ CPPUNIT_ASSERT_EQUAL(expected, actual);
+}
+
+void Distributor_Test::internal_messages_are_started_in_fifo_order_batch() {
+ // To test internal request ordering, we use NotifyBucketChangeCommand
+ // for the reason that it explicitly updates the bucket database for
+ // each individual invocation.
+ setupDistributor(Redundancy(1), NodeCount(1), "storage:1 distributor:1");
+ document::BucketId bucket(16, 1);
+ addNodesToBucketDB(bucket, "0=1/1/1/t");
+
+ std::vector<api::StorageMessage::Priority> priorities({50, 255, 10, 40, 1});
+ for (auto pri : priorities) {
+ api::BucketInfo fake_info(pri, pri, pri);
+ auto cmd = std::make_shared<api::NotifyBucketChangeCommand>(bucket, fake_info);
+ cmd->setSourceIndex(0);
+ cmd->setPriority(pri);
+ _distributor->onDown(cmd);
+ }
+
+ // Doing a single tick should process all internal requests in one batch
+ tickDistributorNTimes(1);
+ CPPUNIT_ASSERT_EQUAL(size_t(5), _sender.replies.size());
+
+ // The bucket info for priority 1 (last FIFO-order change command received, but
+ // highest priority) should be the end-state of the bucket database, _not_ that
+ // of lowest priority 255.
+ BucketDatabase::Entry e(getBucket(bucket));
+ CPPUNIT_ASSERT_EQUAL(api::BucketInfo(1, 1, 1), e.getBucketInfo().getNode(0)->getBucketInfo());
+}
+
}
}
diff --git a/storage/src/tests/persistence/common/filestortestfixture.cpp b/storage/src/tests/persistence/common/filestortestfixture.cpp
index b7a849e3381..b0225992a20 100644
--- a/storage/src/tests/persistence/common/filestortestfixture.cpp
+++ b/storage/src/tests/persistence/common/filestortestfixture.cpp
@@ -4,8 +4,11 @@
#include <vespa/storage/persistence/filestorage/filestormanager.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
#include <tests/persistence/common/filestortestfixture.h>
+#include <vespa/persistence/spi/test.h>
#include <sstream>
+using storage::spi::test::makeBucket;
+
namespace storage {
spi::LoadType FileStorTestFixture::defaultLoadType = spi::LoadType(0, "default");
@@ -50,7 +53,7 @@ FileStorTestFixture::createBucket(const document::BucketId& bid)
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
_node->getPersistenceProvider().createBucket(
- spi::Bucket(bid, spi::PartitionId(0)), context);
+ makeBucket(bid), context);
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(bid, "foo",
diff --git a/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp b/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp
index 52263b4c2b5..c804354b0ee 100644
--- a/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp
+++ b/storage/src/tests/persistence/common/persistenceproviderwrapper.cpp
@@ -66,11 +66,11 @@ PersistenceProviderWrapper::getPartitionStates() const
}
spi::BucketIdListResult
-PersistenceProviderWrapper::listBuckets(spi::PartitionId partitionId) const
+PersistenceProviderWrapper::listBuckets(BucketSpace bucketSpace, spi::PartitionId partitionId) const
{
- LOG_SPI("listBuckets(" << uint16_t(partitionId) << ")");
+ LOG_SPI("listBuckets(" << bucketSpace.getId() << ", " << uint16_t(partitionId) << ")");
CHECK_ERROR(spi::BucketIdListResult, FAIL_LIST_BUCKETS);
- return _spi.listBuckets(partitionId);
+ return _spi.listBuckets(bucketSpace, partitionId);
}
spi::Result
diff --git a/storage/src/tests/persistence/common/persistenceproviderwrapper.h b/storage/src/tests/persistence/common/persistenceproviderwrapper.h
index 955a23327f9..1f0dc93c44c 100644
--- a/storage/src/tests/persistence/common/persistenceproviderwrapper.h
+++ b/storage/src/tests/persistence/common/persistenceproviderwrapper.h
@@ -87,7 +87,7 @@ public:
spi::Result createBucket(const spi::Bucket&, spi::Context&) override;
spi::PartitionStateListResult getPartitionStates() const override;
- spi::BucketIdListResult listBuckets(spi::PartitionId) const override;
+ spi::BucketIdListResult listBuckets(BucketSpace bucketSpace, spi::PartitionId) const override;
spi::BucketInfoResult getBucketInfo(const spi::Bucket&) const override;
spi::Result put(const spi::Bucket&, spi::Timestamp, const spi::DocumentSP&, spi::Context&) override;
spi::RemoveResult remove(const spi::Bucket&, spi::Timestamp, const spi::DocumentId&, spi::Context&) override;
diff --git a/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp b/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp
index 932443e1e5d..e76828c1eb2 100644
--- a/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp
+++ b/storage/src/tests/persistence/filestorage/deactivatebucketstest.cpp
@@ -3,10 +3,13 @@
#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storageapi/message/state.h>
+#include <vespa/persistence/spi/test.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
#include <tests/persistence/common/filestortestfixture.h>
+using storage::spi::test::makeBucket;
+
namespace storage {
class DeactivateBucketsTest : public FileStorTestFixture
@@ -41,7 +44,7 @@ DeactivateBucketsTest::bucketsInDatabaseDeactivatedWhenNodeDownInClusterState()
lib::ClusterState::CSP(new lib::ClusterState(upState)));
document::BucketId bucket(8, 123);
- spi::Bucket spiBucket(bucket, spi::PartitionId(0));
+ spi::Bucket spiBucket(makeBucket(bucket));
createBucket(bucket);
api::BucketInfo serviceLayerInfo(1, 2, 3, 4, 5, true, true);
diff --git a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
index 4ac1d553ec1..0b8ee0113ea 100644
--- a/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
+++ b/storage/src/tests/persistence/filestorage/filestormanagertest.cpp
@@ -28,6 +28,7 @@
#include <vespa/storageapi/message/persistence.h>
#include <vespa/storageapi/message/removelocation.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/storageapi/message/batch.h>
#include <vespa/config/common/exceptions.h>
#include <vespa/fastos/file.h>
@@ -38,6 +39,7 @@ LOG_SETUP(".filestormanagertest");
using std::unique_ptr;
using document::Document;
using namespace storage::api;
+using storage::spi::test::makeBucket;
#define ASSERT_SINGLE_REPLY(replytype, reply, link, time) \
reply = 0; \
@@ -154,7 +156,7 @@ struct FileStorManagerTest : public CppUnit::TestFixture {
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
_node->getPersistenceProvider().createBucket(
- spi::Bucket(bid, spi::PartitionId(disk)), context);
+ makeBucket(bid, spi::PartitionId(disk)), context);
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(bid, "foo",
@@ -1306,7 +1308,7 @@ FileStorManagerTest::testPriority()
spi::Trace::TraceLevel(0));
_node->getPersistenceProvider().createBucket(
- spi::Bucket(bucket, spi::PartitionId(0)), context);
+ makeBucket(bucket), context);
}
// Populate bucket with the given data
@@ -1395,7 +1397,7 @@ FileStorManagerTest::testSplit1()
documents[i]->getId()).getRawId());
_node->getPersistenceProvider().createBucket(
- spi::Bucket(bucket, spi::PartitionId(0)), context);
+ makeBucket(bucket), context);
std::shared_ptr<api::PutCommand> cmd(
new api::PutCommand(bucket, documents[i], 100 + i));
@@ -1570,7 +1572,7 @@ FileStorManagerTest::testSplitSingleGroup()
documents[i]->getId()).getRawId());
_node->getPersistenceProvider().createBucket(
- spi::Bucket(bucket, spi::PartitionId(0)), context);
+ makeBucket(bucket), context);
std::shared_ptr<api::PutCommand> cmd(
new api::PutCommand(bucket, documents[i], 100 + i));
@@ -1645,7 +1647,7 @@ FileStorManagerTest::putDoc(DummyStorageLink& top,
document::BucketId bucket(16, factory.getBucketId(docId).getRawId());
//std::cerr << "doc bucket is " << bucket << " vs source " << source << "\n";
_node->getPersistenceProvider().createBucket(
- spi::Bucket(target, spi::PartitionId(0)), context);
+ makeBucket(target), context);
Document::SP doc(new Document(*_testdoctype1, docId));
std::shared_ptr<api::PutCommand> cmd(
new api::PutCommand(target, doc, docNum+1));
@@ -1920,7 +1922,7 @@ createIterator(DummyStorageLink& link,
framework::MicroSecTime toTime = framework::MicroSecTime::max(),
bool headerOnly = false)
{
- spi::Bucket bucket(bucketId, spi::PartitionId(0));
+ spi::Bucket bucket(makeBucket(bucketId));
spi::Selection selection =
spi::Selection(spi::DocumentSelection(docSel));
@@ -2877,7 +2879,7 @@ FileStorManagerTest::testSetBucketActiveState()
createBucket(bid, disk);
spi::dummy::DummyPersistence& provider(
dynamic_cast<spi::dummy::DummyPersistence&>(_node->getPersistenceProvider()));
- CPPUNIT_ASSERT(!provider.isActive(spi::Bucket(bid, spi::PartitionId(disk))));
+ CPPUNIT_ASSERT(!provider.isActive(makeBucket(bid, spi::PartitionId(disk))));
{
std::shared_ptr<api::SetBucketStateCommand> cmd(
@@ -2895,7 +2897,7 @@ FileStorManagerTest::testSetBucketActiveState()
CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
}
- CPPUNIT_ASSERT(provider.isActive(spi::Bucket(bid, spi::PartitionId(disk))));
+ CPPUNIT_ASSERT(provider.isActive(makeBucket(bid, spi::PartitionId(disk))));
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
@@ -2939,7 +2941,7 @@ FileStorManagerTest::testSetBucketActiveState()
CPPUNIT_ASSERT_EQUAL(ReturnCode(ReturnCode::OK), reply->getResult());
}
- CPPUNIT_ASSERT(!provider.isActive(spi::Bucket(bid, spi::PartitionId(disk))));
+ CPPUNIT_ASSERT(!provider.isActive(makeBucket(bid, spi::PartitionId(disk))));
{
StorBucketDatabase::WrappedEntry entry(
_node->getStorageBucketDatabase().get(
diff --git a/storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp b/storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp
index 0a195b4610a..856add700e7 100644
--- a/storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp
+++ b/storage/src/tests/persistence/filestorage/filestormodifiedbucketstest.cpp
@@ -3,10 +3,13 @@
#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/bucket.h>
#include <vespa/storage/persistence/filestorage/modifiedbucketchecker.h>
+#include <vespa/persistence/spi/test.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
#include <tests/persistence/common/filestortestfixture.h>
+using storage::spi::test::makeBucket;
+
namespace storage {
/**
@@ -73,7 +76,7 @@ FileStorModifiedBucketsTest::modifyBuckets(uint32_t first, uint32_t count)
for (uint32_t i = 0; i < count; ++i) {
buckets.push_back(document::BucketId(16, first + i));
_node->getPersistenceProvider().setActiveState(
- spi::Bucket(buckets[i], spi::PartitionId(0)),
+ makeBucket(buckets[i]),
spi::BucketInfo::ACTIVE);
}
@@ -91,7 +94,7 @@ FileStorModifiedBucketsTest::modifiedBucketsSendNotifyBucketChange()
for (uint32_t i = 0; i < numBuckets; ++i) {
document::BucketId bucket(16, i);
- createBucket(spi::Bucket(bucket, spi::PartitionId(0)));
+ createBucket(makeBucket(bucket));
c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
}
c.top.waitForMessages(numBuckets, MSG_WAIT_TIME);
@@ -119,7 +122,7 @@ FileStorModifiedBucketsTest::fileStorRepliesToRecheckBucketCommands()
setClusterState("storage:1 distributor:1");
document::BucketId bucket(16, 0);
- createBucket(spi::Bucket(bucket, spi::PartitionId(0)));
+ createBucket(makeBucket(bucket));
c.sendPut(bucket, DocumentIndex(0), PutTimestamp(1000));
c.top.waitForMessages(1, MSG_WAIT_TIME);
c.top.reset();
@@ -131,7 +134,7 @@ FileStorModifiedBucketsTest::fileStorRepliesToRecheckBucketCommands()
// If we don't reply to the recheck bucket commands, we won't trigger
// a new round of getModifiedBuckets and recheck commands.
c.top.reset();
- createBucket(spi::Bucket(document::BucketId(16, 1), spi::PartitionId(0)));
+ createBucket(makeBucket(document::BucketId(16, 1)));
modifyBuckets(1, 1);
c.top.waitForMessages(1, MSG_WAIT_TIME);
assertIsNotifyCommandWithActiveBucket(*c.top.getReply(0));
diff --git a/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp b/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp
index 5ae7250542a..ef49f7cd5dd 100644
--- a/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp
+++ b/storage/src/tests/persistence/filestorage/sanitycheckeddeletetest.cpp
@@ -2,10 +2,13 @@
#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/bucket.h>
+#include <vespa/persistence/spi/test.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
#include <tests/persistence/common/filestortestfixture.h>
+using storage::spi::test::makeBucket;
+
namespace storage {
class SanityCheckedDeleteTest : public FileStorTestFixture {
@@ -27,7 +30,7 @@ void SanityCheckedDeleteTest::delete_bucket_fails_when_provider_out_of_sync() {
TestFileStorComponents c(*this, "delete_bucket_fails_when_provider_out_of_sync");
document::BucketId bucket(8, 123);
document::BucketId syncBucket(8, 234);
- spi::Bucket spiBucket(bucket, spi::PartitionId(0));
+ spi::Bucket spiBucket(makeBucket(bucket));
// Send a put to ensure bucket isn't empty.
spi::BucketInfo infoBefore(send_put_and_get_bucket_info(c, spiBucket));
@@ -81,7 +84,7 @@ spi::BucketInfo SanityCheckedDeleteTest::send_put_and_get_bucket_info(
void SanityCheckedDeleteTest::differing_document_sizes_not_considered_out_of_sync() {
TestFileStorComponents c(*this, "differing_document_sizes_not_considered_out_of_sync");
document::BucketId bucket(8, 123);
- spi::Bucket spiBucket(bucket, spi::PartitionId(0));
+ spi::Bucket spiBucket(makeBucket(bucket));
spi::BucketInfo info_before(send_put_and_get_bucket_info(c, spiBucket));
// Expect 1 byte of reported size, which will mismatch with the actually put document.
diff --git a/storage/src/tests/persistence/legacyoperationhandlertest.cpp b/storage/src/tests/persistence/legacyoperationhandlertest.cpp
index c4d2f12014a..6ceff0661b1 100644
--- a/storage/src/tests/persistence/legacyoperationhandlertest.cpp
+++ b/storage/src/tests/persistence/legacyoperationhandlertest.cpp
@@ -5,11 +5,13 @@
#include <vespa/storage/persistence/messages.h>
#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storageapi/message/multioperation.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
#include <tests/persistence/persistencetestutils.h>
using document::DocumentTypeRepo;
using document::TestDocRepo;
+using storage::spi::test::makeBucket;
namespace storage {
@@ -30,7 +32,7 @@ public:
spi::Context context(spi::LoadType(0, "default"), spi::Priority(0),
spi::Trace::TraceLevel(0));
getPersistenceProvider().createBucket(
- spi::Bucket(document::BucketId(16, 4), spi::PartitionId(0)),
+ makeBucket(document::BucketId(16, 4)),
context);
}
diff --git a/storage/src/tests/persistence/persistencetestutils.cpp b/storage/src/tests/persistence/persistencetestutils.cpp
index d8da1a63e9e..0fd5a30d8bd 100644
--- a/storage/src/tests/persistence/persistencetestutils.cpp
+++ b/storage/src/tests/persistence/persistencetestutils.cpp
@@ -4,12 +4,14 @@
#include <vespa/storageapi/message/persistence.h>
#include <tests/persistence/persistencetestutils.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/document/update/assignvalueupdate.h>
#include <vespa/vespalib/objects/nbostream.h>
#include <vespa/vespalib/util/exceptions.h>
using document::DocumentType;
using storage::framework::defaultimplementation::AllocationLogic;
+using storage::spi::test::makeBucket;
namespace storage {
@@ -78,7 +80,7 @@ PersistenceTestUtils::~PersistenceTestUtils()
std::string
PersistenceTestUtils::dumpBucket(const document::BucketId& bid,
uint16_t disk) {
- return dynamic_cast<spi::dummy::DummyPersistence&>(_env->_node.getPersistenceProvider()).dumpBucket(spi::Bucket(bid, spi::PartitionId(disk)));
+ return dynamic_cast<spi::dummy::DummyPersistence&>(_env->_node.getPersistenceProvider()).dumpBucket(makeBucket(bid, spi::PartitionId(disk)));
}
void
@@ -166,7 +168,7 @@ PersistenceTestUtils::doPutOnDisk(
{
document::Document::SP doc(createRandomDocumentAtLocation(
location, timestamp, minSize, maxSize));
- spi::Bucket b(document::BucketId(16, location), spi::PartitionId(disk));
+ spi::Bucket b(makeBucket(document::BucketId(16, location), spi::PartitionId(disk)));
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
@@ -190,12 +192,12 @@ PersistenceTestUtils::doRemoveOnDisk(
spi::Trace::TraceLevel(0));
if (persistRemove) {
spi::RemoveResult result = getPersistenceProvider().removeIfFound(
- spi::Bucket(bucketId, spi::PartitionId(disk)),
+ makeBucket(bucketId, spi::PartitionId(disk)),
timestamp, docId, context);
return result.wasFound();
}
spi::RemoveResult result = getPersistenceProvider().remove(
- spi::Bucket(bucketId, spi::PartitionId(disk)),
+ makeBucket(bucketId, spi::PartitionId(disk)),
timestamp, docId, context);
return result.wasFound();
@@ -211,7 +213,7 @@ PersistenceTestUtils::doUnrevertableRemoveOnDisk(
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
spi::RemoveResult result = getPersistenceProvider().remove(
- spi::Bucket(bucketId, spi::PartitionId(disk)),
+ makeBucket(bucketId, spi::PartitionId(disk)),
timestamp, docId, context);
return result.wasFound();
}
@@ -229,7 +231,7 @@ PersistenceTestUtils::doGetOnDisk(
if (headerOnly) {
fieldSet.reset(new document::HeaderFields());
}
- return getPersistenceProvider().get(spi::Bucket(
+ return getPersistenceProvider().get(makeBucket(
bucketId, spi::PartitionId(disk)), *fieldSet, docId, context);
}
@@ -305,7 +307,7 @@ PersistenceTestUtils::doPut(const document::Document::SP& doc,
spi::Timestamp time,
uint16_t disk)
{
- spi::Bucket b(bid, spi::PartitionId(disk));
+ spi::Bucket b(makeBucket(bid, spi::PartitionId(disk)));
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
getPersistenceProvider().createBucket(b, context);
@@ -321,7 +323,7 @@ PersistenceTestUtils::doUpdate(document::BucketId bid,
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
return getPersistenceProvider().update(
- spi::Bucket(bid, spi::PartitionId(disk)), time, update, context);
+ makeBucket(bid, spi::PartitionId(disk)), time, update, context);
}
void
@@ -337,10 +339,10 @@ PersistenceTestUtils::doRemove(const document::DocumentId& id, spi::Timestamp ti
spi::Trace::TraceLevel(0));
if (unrevertableRemove) {
getPersistenceProvider().remove(
- spi::Bucket(bucket, spi::PartitionId(disk)), time, id, context);
+ makeBucket(bucket, spi::PartitionId(disk)), time, id, context);
} else {
spi::RemoveResult result = getPersistenceProvider().removeIfFound(
- spi::Bucket(bucket, spi::PartitionId(disk)), time, id, context);
+ makeBucket(bucket, spi::PartitionId(disk)), time, id, context);
if (!result.wasFound()) {
throw vespalib::IllegalStateException(
"Attempted to remove non-existing doc " + id.toString(),
diff --git a/storage/src/tests/persistence/persistencethread_splittest.cpp b/storage/src/tests/persistence/persistencethread_splittest.cpp
index 7eeeeb1e909..df43b714dcb 100644
--- a/storage/src/tests/persistence/persistencethread_splittest.cpp
+++ b/storage/src/tests/persistence/persistencethread_splittest.cpp
@@ -3,8 +3,11 @@
#include <vespa/vdstestlib/cppunit/macros.h>
#include <vespa/storage/persistence/persistencethread.h>
#include <vespa/storageapi/message/bucketsplitting.h>
+#include <vespa/persistence/spi/test.h>
#include <tests/persistence/persistencetestutils.h>
+using storage::spi::test::makeBucket;
+
namespace storage {
namespace {
spi::LoadType defaultLoadType(0, "default");
@@ -174,8 +177,7 @@ PersistenceThread_SplitTest::doTest(SplitCase splitCase)
uint64_t splitMask = 1 << (splitLevelToDivide - 1);
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
- spi::Bucket bucket(document::BucketId(currentSplitLevel, 1),
- spi::PartitionId(0));
+ spi::Bucket bucket(makeBucket(document::BucketId(currentSplitLevel, 1)));
spi::PersistenceProvider& spi(getPersistenceProvider());
spi.deleteBucket(bucket, context);
spi.createBucket(bucket, context);
diff --git a/storage/src/tests/persistence/provider_error_wrapper_test.cpp b/storage/src/tests/persistence/provider_error_wrapper_test.cpp
index 7a8f26cbe93..397bc740e55 100644
--- a/storage/src/tests/persistence/provider_error_wrapper_test.cpp
+++ b/storage/src/tests/persistence/provider_error_wrapper_test.cpp
@@ -1,9 +1,12 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/vdstestlib/cppunit/macros.h>
+#include <vespa/persistence/spi/test.h>
#include <tests/persistence/persistencetestutils.h>
#include <tests/persistence/common/persistenceproviderwrapper.h>
+using storage::spi::test::makeBucket;
+
namespace storage {
class ProviderErrorWrapperTest : public SingleDiskPersistenceTestUtils {
@@ -61,7 +64,7 @@ struct Fixture {
~Fixture() {}
void perform_spi_operation() {
- errorWrapper.getBucketInfo(spi::Bucket(document::BucketId(16, 1234), spi::PartitionId(0)));
+ errorWrapper.getBucketInfo(makeBucket(document::BucketId(16, 1234)));
}
void check_no_listener_invoked_for_error(MockErrorListener& listener, spi::Result::ErrorType error) {
diff --git a/storage/src/tests/persistence/splitbitdetectortest.cpp b/storage/src/tests/persistence/splitbitdetectortest.cpp
index 60f76c2df60..30904e3da3d 100644
--- a/storage/src/tests/persistence/splitbitdetectortest.cpp
+++ b/storage/src/tests/persistence/splitbitdetectortest.cpp
@@ -5,10 +5,12 @@
#include <vespa/storage/persistence/splitbitdetector.h>
#include <vespa/vespalib/io/fileutil.h>
#include <vespa/persistence/dummyimpl/dummypersistence.h>
+#include <vespa/persistence/spi/test.h>
#include <vespa/document/base/testdocman.h>
#include <vespa/document/bucket/bucketidfactory.h>
#include <algorithm>
+using storage::spi::test::makeBucket;
namespace storage {
@@ -61,8 +63,7 @@ SplitBitDetectorTest::testTwoUsers()
document::TestDocMan testDocMan;
spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
provider.getPartitionStates();
- spi::Bucket bucket(document::BucketId(1, 1),
- spi::PartitionId(0));
+ spi::Bucket bucket(makeBucket(document::BucketId(1, 1)));
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
@@ -95,8 +96,7 @@ SplitBitDetectorTest::testSingleUser()
document::TestDocMan testDocMan;
spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
provider.getPartitionStates();
- spi::Bucket bucket(document::BucketId(1, 1),
- spi::PartitionId(0));
+ spi::Bucket bucket(makeBucket(document::BucketId(1, 1)));
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
@@ -123,8 +123,7 @@ SplitBitDetectorTest::testMaxBits()
document::TestDocMan testDocMan;
spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
provider.getPartitionStates();
- spi::Bucket bucket(document::BucketId(1, 1),
- spi::PartitionId(0));
+ spi::Bucket bucket(makeBucket(document::BucketId(1, 1)));
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
int minContentSize = 1, maxContentSize = 1;
@@ -153,7 +152,7 @@ SplitBitDetectorTest::testMaxBitsOneBelowMax()
document::TestDocMan testDocMan;
spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
provider.getPartitionStates();
- spi::Bucket bucket(document::BucketId(15, 1), spi::PartitionId(0));
+ spi::Bucket bucket(makeBucket(document::BucketId(15, 1)));
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
int minContentSize = 1, maxContentSize = 1;
@@ -191,8 +190,7 @@ SplitBitDetectorTest::testUnsplittable()
document::TestDocMan testDocMan;
spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
provider.getPartitionStates();
- spi::Bucket bucket(document::BucketId(1, 1),
- spi::PartitionId(0));
+ spi::Bucket bucket(makeBucket(document::BucketId(1, 1)));
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
@@ -220,8 +218,7 @@ SplitBitDetectorTest::testUnsplittableMinCount()
document::TestDocMan testDocMan;
spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
provider.getPartitionStates();
- spi::Bucket bucket(document::BucketId(1, 1),
- spi::PartitionId(0));
+ spi::Bucket bucket(makeBucket(document::BucketId(1, 1)));
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
@@ -251,8 +248,7 @@ SplitBitDetectorTest::testEmpty()
document::TestDocMan testDocMan;
spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
provider.getPartitionStates();
- spi::Bucket bucket(document::BucketId(1, 1),
- spi::PartitionId(0));
+ spi::Bucket bucket(makeBucket(document::BucketId(1, 1)));
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
@@ -271,8 +267,7 @@ SplitBitDetectorTest::testZeroDocLimitFallbacksToOneBitIncreaseWith1Doc()
document::TestDocMan testDocMan;
spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
provider.getPartitionStates();
- spi::Bucket bucket(document::BucketId(1, 1),
- spi::PartitionId(0));
+ spi::Bucket bucket(makeBucket(document::BucketId(1, 1)));
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
@@ -295,8 +290,7 @@ SplitBitDetectorTest::testZeroDocLimitFallbacksToOneBitIncreaseOnGidCollision()
document::TestDocMan testDocMan;
spi::dummy::DummyPersistence provider(testDocMan.getTypeRepoSP(), 1);
provider.getPartitionStates();
- spi::Bucket bucket(document::BucketId(1, 1),
- spi::PartitionId(0));
+ spi::Bucket bucket(makeBucket(document::BucketId(1, 1)));
spi::Context context(defaultLoadType, spi::Priority(0),
spi::Trace::TraceLevel(0));
diff --git a/storage/src/tests/persistence/testandsettest.cpp b/storage/src/tests/persistence/testandsettest.cpp
index bdef347dc1c..2c1c15dee87 100644
--- a/storage/src/tests/persistence/testandsettest.cpp
+++ b/storage/src/tests/persistence/testandsettest.cpp
@@ -6,12 +6,14 @@
#include <vespa/documentapi/messagebus/messages/testandsetcondition.h>
#include <vespa/document/fieldvalue/fieldvalues.h>
#include <vespa/document/update/assignvalueupdate.h>
+#include <vespa/persistence/spi/test.h>
#include <functional>
using std::unique_ptr;
using std::shared_ptr;
using namespace std::string_literals;
+using storage::spi::test::makeBucket;
namespace storage {
@@ -42,7 +44,7 @@ public:
createBucket(BUCKET_ID);
getPersistenceProvider().createBucket(
- spi::Bucket(BUCKET_ID, spi::PartitionId(0)),
+ makeBucket(BUCKET_ID),
context);
thread = createPersistenceThread(0);
diff --git a/storage/src/tests/storageserver/statereportertest.cpp b/storage/src/tests/storageserver/statereportertest.cpp
index 8622f241a18..3a71444e74c 100644
--- a/storage/src/tests/storageserver/statereportertest.cpp
+++ b/storage/src/tests/storageserver/statereportertest.cpp
@@ -130,7 +130,7 @@ vespalib::Slime slime; \
size_t parsed = JsonFormat::decode(vespalib::Memory(jsonData), slime); \
vespalib::SimpleBuffer buffer; \
JsonFormat::encode(slime, buffer, false); \
- if (jsonData.size() != parsed) { \
+ if (parsed == 0) { \
std::ostringstream error; \
error << "Failed to parse JSON: '\n" \
<< jsonData << "'\n:" << buffer.get().make_string() << "\n"; \
diff --git a/storage/src/vespa/storage/bucketdb/CMakeLists.txt b/storage/src/vespa/storage/bucketdb/CMakeLists.txt
index 5c818631d54..6e3a0c2e986 100644
--- a/storage/src/vespa/storage/bucketdb/CMakeLists.txt
+++ b/storage/src/vespa/storage/bucketdb/CMakeLists.txt
@@ -17,6 +17,6 @@ vespa_add_library(storage_bucketdb OBJECT
storage_storageconfig
)
vespa_generate_config(storage_bucketdb stor-bucketdb.def)
-install(FILES stor-bucketdb.def RENAME vespa.config.content.core.stor-bucketdb.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-bucketdb.def vespa.config.content.core.stor-bucketdb.def)
vespa_generate_config(storage_bucketdb stor-bucket-init.def)
-install(FILES stor-bucket-init.def RENAME vespa.config.content.core.stor-bucket-init.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-bucket-init.def vespa.config.content.core.stor-bucket-init.def)
diff --git a/storage/src/vespa/storage/bucketdb/mapbucketdatabase.cpp b/storage/src/vespa/storage/bucketdb/mapbucketdatabase.cpp
index fbc2b6d348f..5ac4c4b1ee7 100644
--- a/storage/src/vespa/storage/bucketdb/mapbucketdatabase.cpp
+++ b/storage/src/vespa/storage/bucketdb/mapbucketdatabase.cpp
@@ -2,8 +2,12 @@
#include "mapbucketdatabase.h"
#include <vespa/storage/common/bucketoperationlogger.h>
+#include <vespa/vespalib/util/backtrace.h>
#include <ostream>
+#include <vespa/log/bufferedlogger.h>
+LOG_SETUP(".mapbucketdatabase");
+
namespace storage {
MapBucketDatabase::MapBucketDatabase()
@@ -136,10 +140,24 @@ MapBucketDatabase::remove(const document::BucketId& bucket)
remove(0, 0, bucket);
}
+namespace {
+
+void __attribute__((noinline)) log_empty_bucket_insertion(const document::BucketId& id) {
+ // Use buffered logging to avoid spamming the logs in case this is triggered for
+ // many buckets simultaneously.
+ LOGBP(error, "Inserted empty bucket %s into database.\n%s",
+ id.toString().c_str(), vespalib::getStackTrace(2).c_str());
+}
+
+}
+
void
MapBucketDatabase::update(const Entry& newEntry)
{
assert(newEntry.valid());
+ if (newEntry->getNodeCount() == 0) {
+ log_empty_bucket_insertion(newEntry.getBucketId());
+ }
LOG_BUCKET_OPERATION_NO_LOCK(
newEntry.getBucketId(),
vespalib::make_string(
diff --git a/storage/src/vespa/storage/config/CMakeLists.txt b/storage/src/vespa/storage/config/CMakeLists.txt
index 0f3f392f324..4a20d510043 100644
--- a/storage/src/vespa/storage/config/CMakeLists.txt
+++ b/storage/src/vespa/storage/config/CMakeLists.txt
@@ -5,26 +5,26 @@ vespa_add_library(storage_storageconfig OBJECT
DEPENDS
)
vespa_generate_config(storage_storageconfig stor-communicationmanager.def)
-install(FILES stor-communicationmanager.def RENAME vespa.config.content.core.stor-communicationmanager.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-communicationmanager.def vespa.config.content.core.stor-communicationmanager.def)
vespa_generate_config(storage_storageconfig stor-distributormanager.def)
-install(FILES stor-distributormanager.def RENAME vespa.config.content.core.stor-distributormanager.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-distributormanager.def vespa.config.content.core.stor-distributormanager.def)
vespa_generate_config(storage_storageconfig stor-server.def)
-install(FILES stor-server.def RENAME vespa.config.content.core.stor-server.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-server.def vespa.config.content.core.stor-server.def)
vespa_generate_config(storage_storageconfig stor-status.def)
-install(FILES stor-status.def RENAME vespa.config.content.core.stor-status.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-status.def vespa.config.content.core.stor-status.def)
vespa_generate_config(storage_storageconfig stor-messageforwarder.def)
-install(FILES stor-messageforwarder.def RENAME vespa.config.content.core.stor-messageforwarder.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-messageforwarder.def vespa.config.content.core.stor-messageforwarder.def)
vespa_generate_config(storage_storageconfig stor-opslogger.def)
-install(FILES stor-opslogger.def RENAME vespa.config.content.core.stor-opslogger.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-opslogger.def vespa.config.content.core.stor-opslogger.def)
vespa_generate_config(storage_storageconfig stor-visitordispatcher.def)
-install(FILES stor-visitordispatcher.def RENAME vespa.config.content.core.stor-visitordispatcher.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-visitordispatcher.def vespa.config.content.core.stor-visitordispatcher.def)
vespa_generate_config(storage_storageconfig stor-integritychecker.def)
-install(FILES stor-integritychecker.def RENAME vespa.config.content.core.stor-integritychecker.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-integritychecker.def vespa.config.content.core.stor-integritychecker.def)
vespa_generate_config(storage_storageconfig stor-bucketmover.def)
-install(FILES stor-bucketmover.def RENAME vespa.config.content.core.stor-bucketmover.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-bucketmover.def vespa.config.content.core.stor-bucketmover.def)
vespa_generate_config(storage_storageconfig stor-bouncer.def)
-install(FILES stor-bouncer.def RENAME vespa.config.content.core.stor-bouncer.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-bouncer.def vespa.config.content.core.stor-bouncer.def)
vespa_generate_config(storage_storageconfig stor-prioritymapping.def)
-install(FILES stor-prioritymapping.def RENAME vespa.config.content.core.stor-prioritymapping.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-prioritymapping.def vespa.config.content.core.stor-prioritymapping.def)
vespa_generate_config(storage_storageconfig rpc-provider.def)
-install(FILES rpc-provider.def RENAME vespa.config.content.core.rpc-provider.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(rpc-provider.def vespa.config.content.core.rpc-provider.def)
diff --git a/storage/src/vespa/storage/distributor/distributor.cpp b/storage/src/vespa/storage/distributor/distributor.cpp
index 0dc52650131..53df19fd10c 100644
--- a/storage/src/vespa/storage/distributor/distributor.cpp
+++ b/storage/src/vespa/storage/distributor/distributor.cpp
@@ -572,19 +572,61 @@ Distributor::workWasDone()
return !_tickResult.waitWanted();
}
-void
-Distributor::startExternalOperations()
-{
- for (uint32_t i=0; i<_fetchedMessages.size(); ++i) {
- MBUS_TRACE(_fetchedMessages[i]->getTrace(), 9,
- "Distributor: Grabbed from queue to be processed.");
- if (!handleMessage(_fetchedMessages[i])) {
- MBUS_TRACE(_fetchedMessages[i]->getTrace(), 9,
- "Distributor: Not handling it. Sending further down.");
- sendDown(_fetchedMessages[i]);
+namespace {
+
+bool is_client_request(const api::StorageMessage& msg) noexcept {
+ // Despite having been converted to StorageAPI messages, the following
+ // set of messages are never sent to the distributor by other processes
+ // than clients.
+ switch (msg.getType().getId()) {
+ case api::MessageType::GET_ID:
+ case api::MessageType::PUT_ID:
+ case api::MessageType::REMOVE_ID:
+ case api::MessageType::VISITOR_CREATE_ID:
+ case api::MessageType::VISITOR_DESTROY_ID:
+ case api::MessageType::MULTIOPERATION_ID: // Deprecated
+ case api::MessageType::GETBUCKETLIST_ID:
+ case api::MessageType::STATBUCKET_ID:
+ case api::MessageType::UPDATE_ID:
+ case api::MessageType::REMOVELOCATION_ID:
+ case api::MessageType::BATCHPUTREMOVE_ID: // Deprecated
+ case api::MessageType::BATCHDOCUMENTUPDATE_ID: // Deprecated
+ return true;
+ default:
+ return false;
+ }
+}
+
+}
+
+void Distributor::handle_or_propagate_message(const std::shared_ptr<api::StorageMessage>& msg) {
+ if (!handleMessage(msg)) {
+ MBUS_TRACE(msg->getTrace(), 9, "Distributor: Not handling it. Sending further down.");
+ sendDown(msg);
+ }
+}
+
+void Distributor::startExternalOperations() {
+ for (auto& msg : _fetchedMessages) {
+ if (is_client_request(*msg)) {
+ MBUS_TRACE(msg->getTrace(), 9, "Distributor: adding to client request priority queue");
+ _client_request_priority_queue.emplace(std::move(msg));
+ } else {
+ MBUS_TRACE(msg->getTrace(), 9, "Distributor: Grabbed from queue to be processed.");
+ handle_or_propagate_message(msg);
}
}
- if (!_fetchedMessages.empty()) {
+
+ const bool start_single_client_request = !_client_request_priority_queue.empty();
+ if (start_single_client_request) {
+ auto& msg = _client_request_priority_queue.top();
+ MBUS_TRACE(msg->getTrace(), 9, "Distributor: Grabbed from "
+ "client request priority queue to be processed.");
+ handle_or_propagate_message(msg); // TODO move() once we've move-enabled our message chains
+ _client_request_priority_queue.pop();
+ }
+
+ if (!_fetchedMessages.empty() || start_single_client_request) {
signalWorkWasDone();
}
_fetchedMessages.clear();
diff --git a/storage/src/vespa/storage/distributor/distributor.h b/storage/src/vespa/storage/distributor/distributor.h
index caf3a13d113..438001acc40 100644
--- a/storage/src/vespa/storage/distributor/distributor.h
+++ b/storage/src/vespa/storage/distributor/distributor.h
@@ -23,6 +23,7 @@
#include <vespa/config/config.h>
#include <vespa/vespalib/util/sync.h>
#include <unordered_map>
+#include <queue>
namespace storage {
@@ -191,6 +192,7 @@ private:
bool isMaintenanceReply(const api::StorageReply& reply) const;
void handleStatusRequests();
+ void handle_or_propagate_message(const std::shared_ptr<api::StorageMessage>& msg);
void startExternalOperations();
/**
@@ -252,8 +254,20 @@ private:
mutable std::shared_ptr<lib::Distribution> _distribution;
std::shared_ptr<lib::Distribution> _nextDistribution;
- typedef std::vector<std::shared_ptr<api::StorageMessage> > MessageQueue;
+ using MessageQueue = std::vector<std::shared_ptr<api::StorageMessage>>;
+ struct IndirectHigherPriority {
+ template <typename Lhs, typename Rhs>
+ bool operator()(const Lhs& lhs, const Rhs& rhs) const noexcept {
+ return lhs->getPriority() > rhs->getPriority();
+ }
+ };
+ using ClientRequestPriorityQueue = std::priority_queue<
+ std::shared_ptr<api::StorageMessage>,
+ std::vector<std::shared_ptr<api::StorageMessage>>,
+ IndirectHigherPriority
+ >;
MessageQueue _messageQueue;
+ ClientRequestPriorityQueue _client_request_priority_queue;
MessageQueue _fetchedMessages;
framework::TickingThreadPool& _threadPool;
vespalib::Monitor _statusMonitor;
diff --git a/storage/src/vespa/storage/persistence/diskmoveoperationhandler.cpp b/storage/src/vespa/storage/persistence/diskmoveoperationhandler.cpp
index 35f49b6e618..86b04066a5a 100644
--- a/storage/src/vespa/storage/persistence/diskmoveoperationhandler.cpp
+++ b/storage/src/vespa/storage/persistence/diskmoveoperationhandler.cpp
@@ -45,8 +45,8 @@ DiskMoveOperationHandler::handleBucketDiskMove(BucketDiskMoveCommand& cmd,
bucket.toString().c_str(),
deviceIndex, targetDisk);
- spi::Bucket from(bucket, spi::PartitionId(deviceIndex));
- spi::Bucket to(bucket, spi::PartitionId(targetDisk));
+ spi::Bucket from(document::Bucket(document::BucketSpace::placeHolder(), bucket), spi::PartitionId(deviceIndex));
+ spi::Bucket to(document::Bucket(document::BucketSpace::placeHolder(), bucket), spi::PartitionId(targetDisk));
spi::Result result(
_provider.move(from, spi::PartitionId(targetDisk), context));
diff --git a/storage/src/vespa/storage/persistence/filestorage/modifiedbucketchecker.cpp b/storage/src/vespa/storage/persistence/filestorage/modifiedbucketchecker.cpp
index 7735204c92b..d0002841bbd 100644
--- a/storage/src/vespa/storage/persistence/filestorage/modifiedbucketchecker.cpp
+++ b/storage/src/vespa/storage/persistence/filestorage/modifiedbucketchecker.cpp
@@ -120,7 +120,7 @@ ModifiedBucketChecker::onInternalReply(
bool
ModifiedBucketChecker::requestModifiedBucketsFromProvider()
{
- spi::BucketIdListResult result(_provider.getModifiedBuckets());
+ spi::BucketIdListResult result(_provider.getModifiedBuckets(document::BucketSpace::placeHolder()));
if (result.hasError()) {
LOG(debug, "getModifiedBuckets() failed: %s",
result.toString().c_str());
diff --git a/storage/src/vespa/storage/persistence/mergehandler.cpp b/storage/src/vespa/storage/persistence/mergehandler.cpp
index 37fc36c820a..36e417b65e4 100644
--- a/storage/src/vespa/storage/persistence/mergehandler.cpp
+++ b/storage/src/vespa/storage/persistence/mergehandler.cpp
@@ -956,7 +956,7 @@ MergeHandler::handleMergeBucket(api::MergeBucketCommand& cmd,
_env._component.getClock()));
const document::BucketId& id(cmd.getBucketId());
- spi::Bucket bucket(id, spi::PartitionId(_env._partition));
+ spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), id), spi::PartitionId(_env._partition));
LOG(debug, "MergeBucket(%s) with max timestamp %" PRIu64 ".",
bucket.toString().c_str(), cmd.getMaxTimestamp());
@@ -1182,7 +1182,7 @@ MergeHandler::handleGetBucketDiff(api::GetBucketDiffCommand& cmd,
_env._metrics.getBucketDiff,
_env._component.getClock()));
const document::BucketId& id(cmd.getBucketId());
- spi::Bucket bucket(id, spi::PartitionId(_env._partition));
+ spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), id), spi::PartitionId(_env._partition));
LOG(debug, "GetBucketDiff(%s)", bucket.toString().c_str());
checkResult(_spi.createBucket(bucket, context), bucket, "create bucket");
@@ -1305,7 +1305,7 @@ MergeHandler::handleGetBucketDiffReply(api::GetBucketDiffReply& reply,
{
++_env._metrics.getBucketDiffReply;
document::BucketId id(reply.getBucketId());
- spi::Bucket bucket(id, spi::PartitionId(_env._partition));
+ spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), id), spi::PartitionId(_env._partition));
LOG(debug, "GetBucketDiffReply(%s)", bucket.toString().c_str());
if (!_env._fileStorHandler.isMerging(id)) {
@@ -1389,7 +1389,7 @@ MergeHandler::handleApplyBucketDiff(api::ApplyBucketDiffCommand& cmd,
_env._component.getClock()));
const document::BucketId& id(cmd.getBucketId());
- spi::Bucket bucket(id, spi::PartitionId(_env._partition));
+ spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), id), spi::PartitionId(_env._partition));
LOG(debug, "%s", cmd.toString().c_str());
if (_env._fileStorHandler.isMerging(id)) {
@@ -1485,7 +1485,7 @@ MergeHandler::handleApplyBucketDiffReply(api::ApplyBucketDiffReply& reply,
{
++_env._metrics.applyBucketDiffReply;
document::BucketId id(reply.getBucketId());
- spi::Bucket bucket(id, spi::PartitionId(_env._partition));
+ spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), id), spi::PartitionId(_env._partition));
std::vector<api::ApplyBucketDiffCommand::Entry>& diff(reply.getDiff());
LOG(debug, "%s", reply.toString().c_str());
diff --git a/storage/src/vespa/storage/persistence/persistencethread.cpp b/storage/src/vespa/storage/persistence/persistencethread.cpp
index 4755795526a..0ab512cd63f 100644
--- a/storage/src/vespa/storage/persistence/persistencethread.cpp
+++ b/storage/src/vespa/storage/persistence/persistencethread.cpp
@@ -78,7 +78,7 @@ PersistenceThread::getBucket(const DocumentId& id,
+ "bucket " + bucket.toString() + ".", VESPA_STRLOC);
}
- return spi::Bucket(bucket, spi::PartitionId(_env._partition));
+ return spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), bucket), spi::PartitionId(_env._partition));
}
bool
@@ -231,7 +231,7 @@ PersistenceThread::handleRepairBucket(RepairBucketCommand& cmd)
(cmd.verifyBody() ? "Verifying body" : "Not verifying body"));
api::BucketInfo before = _env.getBucketInfo(cmd.getBucketId());
spi::Result result =
- _spi.maintain(spi::Bucket(cmd.getBucketId(),
+ _spi.maintain(spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
spi::PartitionId(_env._partition)),
cmd.verifyBody() ?
spi::HIGH : spi::LOW);
@@ -257,7 +257,7 @@ PersistenceThread::handleMultiOperation(api::MultiOperationCommand& cmd)
MessageTracker::UP tracker(new MessageTracker(
_env._metrics.multiOp[cmd.getLoadType()],
_env._component.getClock()));
- spi::Bucket b = spi::Bucket(cmd.getBucketId(),
+ spi::Bucket b = spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
spi::PartitionId(_env._partition));
long puts = 0;
long removes = 0;
@@ -314,7 +314,7 @@ PersistenceThread::handleRevert(api::RevertCommand& cmd)
MessageTracker::UP tracker(new MessageTracker(
_env._metrics.revert[cmd.getLoadType()],
_env._component.getClock()));
- spi::Bucket b = spi::Bucket(cmd.getBucketId(),
+ spi::Bucket b = spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
spi::PartitionId(_env._partition));
const std::vector<api::Timestamp> tokens = cmd.getRevertTokens();
for (uint32_t i = 0; i < tokens.size(); ++i) {
@@ -337,7 +337,7 @@ PersistenceThread::handleCreateBucket(api::CreateBucketCommand& cmd)
cmd.getBucketId().toString().c_str());
DUMP_LOGGED_BUCKET_OPERATIONS(cmd.getBucketId());
}
- spi::Bucket spiBucket(cmd.getBucketId(), spi::PartitionId(_env._partition));
+ spi::Bucket spiBucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()), spi::PartitionId(_env._partition));
_spi.createBucket(spiBucket, _context);
if (cmd.getActive()) {
_spi.setActiveState(spiBucket, spi::BucketInfo::ACTIVE);
@@ -406,7 +406,7 @@ PersistenceThread::handleDeleteBucket(api::DeleteBucketCommand& cmd)
api::ReturnCode(api::ReturnCode::ABORTED,
"Bucket was deleted during the merge"));
}
- spi::Bucket bucket(cmd.getBucketId(), spi::PartitionId(_env._partition));
+ spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()), spi::PartitionId(_env._partition));
if (!checkProviderBucketInfoMatches(bucket, cmd.getBucketInfo())) {
return tracker;
}
@@ -463,7 +463,7 @@ PersistenceThread::handleReadBucketList(ReadBucketList& cmd)
_env._metrics.readBucketList,
_env._component.getClock()));
- spi::BucketIdListResult result(_spi.listBuckets(cmd.getPartition()));
+ spi::BucketIdListResult result(_spi.listBuckets(document::BucketSpace::placeHolder(), cmd.getPartition()));
if (checkForError(result, *tracker)) {
ReadBucketListReply::SP reply(new ReadBucketListReply(cmd));
result.getList().swap(reply->getBuckets());
@@ -497,7 +497,7 @@ PersistenceThread::handleCreateIterator(CreateIteratorCommand& cmd)
// _context is reset per command, so it's safe to modify it like this.
_context.setReadConsistency(cmd.getReadConsistency());
spi::CreateIteratorResult result(_spi.createIterator(
- spi::Bucket(cmd.getBucketId(), spi::PartitionId(_env._partition)),
+ spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()), spi::PartitionId(_env._partition)),
*fieldSet,
cmd.getSelection(),
cmd.getIncludedVersions(),
@@ -533,7 +533,7 @@ PersistenceThread::handleSplitBucket(api::SplitBucketCommand& cmd)
return tracker;
}
- spi::Bucket spiBucket(cmd.getBucketId(), spi::PartitionId(_env._partition));
+ spi::Bucket spiBucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()), spi::PartitionId(_env._partition));
SplitBitDetector::Result targetInfo;
if (_env._config.enableMultibitSplitOptimalization) {
targetInfo = SplitBitDetector::detectSplit(
@@ -581,8 +581,8 @@ PersistenceThread::handleSplitBucket(api::SplitBucketCommand& cmd)
#endif
spi::Result result = _spi.split(
spiBucket,
- spi::Bucket(target1, spi::PartitionId(lock1.disk)),
- spi::Bucket(target2, spi::PartitionId(lock2.disk)), _context);
+ spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), target1), spi::PartitionId(lock1.disk)),
+ spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), target2), spi::PartitionId(lock2.disk)), _context);
if (result.hasError()) {
tracker->fail(_env.convertErrorCode(result),
result.getErrorMessage());
@@ -646,7 +646,7 @@ PersistenceThread::handleSplitBucket(api::SplitBucketCommand& cmd)
// to an empty target bucket, since the provider will have
// implicitly erased it by this point.
spi::Bucket createTarget(
- spi::Bucket(targets[i].second.bid,
+ spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), targets[i].second.bid),
spi::PartitionId(targets[i].second.diskIndex)));
LOG(debug,
"Split target %s was empty, but re-creating it since "
@@ -756,15 +756,15 @@ PersistenceThread::handleJoinBuckets(api::JoinBucketsCommand& cmd)
}
#endif
spi::Result result =
- _spi.join(spi::Bucket(firstBucket, spi::PartitionId(lock1.disk)),
- spi::Bucket(secondBucket, spi::PartitionId(lock2.disk)),
- spi::Bucket(cmd.getBucketId(),
+ _spi.join(spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), firstBucket), spi::PartitionId(lock1.disk)),
+ spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), secondBucket), spi::PartitionId(lock2.disk)),
+ spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
spi::PartitionId(_env._partition)),
_context);
if (!checkForError(result, *tracker)) {
return tracker;
}
- result = _spi.flush(spi::Bucket(cmd.getBucketId(),
+ result = _spi.flush(spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
spi::PartitionId(_env._partition)),
_context);
if (!checkForError(result, *tracker)) {
@@ -813,7 +813,7 @@ PersistenceThread::handleSetBucketState(api::SetBucketStateCommand& cmd)
NotificationGuard notifyGuard(*_bucketOwnershipNotifier);
LOG(debug, "handleSetBucketState(): %s", cmd.toString().c_str());
- spi::Bucket bucket(cmd.getBucketId(), spi::PartitionId(_env._partition));
+ spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()), spi::PartitionId(_env._partition));
bool shouldBeActive(cmd.getState() == api::SetBucketStateCommand::ACTIVE);
spi::BucketInfo::ActiveState newState(
shouldBeActive
@@ -861,11 +861,11 @@ PersistenceThread::handleInternalBucketJoin(InternalBucketJoinCommand& cmd)
entry.write();
}
spi::Result result =
- _spi.join(spi::Bucket(cmd.getBucketId(),
+ _spi.join(spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
spi::PartitionId(cmd.getDiskOfInstanceToJoin())),
- spi::Bucket(cmd.getBucketId(),
+ spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
spi::PartitionId(cmd.getDiskOfInstanceToJoin())),
- spi::Bucket(cmd.getBucketId(),
+ spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
spi::PartitionId(cmd.getDiskOfInstanceToKeep())),
_context);
if (checkForError(result, *tracker)) {
@@ -1155,7 +1155,7 @@ PersistenceThread::flushAllReplies(
replies.size(), nputs, nremoves, nother));
}
#endif
- spi::Bucket b(bucketId, spi::PartitionId(_env._partition));
+ spi::Bucket b(document::Bucket(document::BucketSpace::placeHolder(), bucketId), spi::PartitionId(_env._partition));
spi::Result result = _spi.flush(b, _context);
uint32_t errorCode = _env.convertErrorCode(result);
if (errorCode != 0) {
diff --git a/storage/src/vespa/storage/persistence/persistenceutil.cpp b/storage/src/vespa/storage/persistence/persistenceutil.cpp
index 6f0833ef3cd..bc0b11ae15d 100644
--- a/storage/src/vespa/storage/persistence/persistenceutil.cpp
+++ b/storage/src/vespa/storage/persistence/persistenceutil.cpp
@@ -166,7 +166,7 @@ PersistenceUtil::getBucketInfo(const document::BucketId& bId, int disk) const
}
spi::BucketInfoResult response =
- _spi.getBucketInfo(spi::Bucket(bId, spi::PartitionId(disk)));
+ _spi.getBucketInfo(spi::Bucket(document::Bucket(document::BucketSpace::placeHolder(), bId), spi::PartitionId(disk)));
return convertBucketInfo(response.getBucketInfo());
}
diff --git a/storage/src/vespa/storage/persistence/processallhandler.cpp b/storage/src/vespa/storage/persistence/processallhandler.cpp
index fa7ea9f74a2..67b150ec70a 100644
--- a/storage/src/vespa/storage/persistence/processallhandler.cpp
+++ b/storage/src/vespa/storage/persistence/processallhandler.cpp
@@ -87,7 +87,8 @@ ProcessAllHandler::handleRemoveLocation(api::RemoveLocationCommand& cmd,
cmd.getBucketId().toString().c_str(),
cmd.getDocumentSelection().c_str());
- spi::Bucket bucket(cmd.getBucketId(),
+ spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(),
+ cmd.getBucketId()),
spi::PartitionId(_env._partition));
UnrevertableRemoveEntryProcessor processor(_spi, bucket, context);
BucketProcessor::iterateAll(_spi,
@@ -117,7 +118,7 @@ ProcessAllHandler::handleStatBucket(api::StatBucketCommand& cmd,
ost << "Persistence bucket " << cmd.getBucketId()
<< ", partition " << _env._partition << "\n";
- spi::Bucket bucket(cmd.getBucketId(),
+ spi::Bucket bucket(document::Bucket(document::BucketSpace::placeHolder(), cmd.getBucketId()),
spi::PartitionId(_env._partition));
StatEntryProcessor processor(ost);
BucketProcessor::iterateAll(_spi,
diff --git a/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp b/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp
index 80873829064..15b0a469b35 100644
--- a/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp
+++ b/storage/src/vespa/storage/persistence/provider_error_wrapper.cpp
@@ -49,9 +49,9 @@ ProviderErrorWrapper::getPartitionStates() const
}
spi::BucketIdListResult
-ProviderErrorWrapper::listBuckets(spi::PartitionId partitionId) const
+ProviderErrorWrapper::listBuckets(BucketSpace bucketSpace, spi::PartitionId partitionId) const
{
- return checkResult(_impl.listBuckets(partitionId));
+ return checkResult(_impl.listBuckets(bucketSpace, partitionId));
}
spi::Result
@@ -164,9 +164,9 @@ ProviderErrorWrapper::deleteBucket(const spi::Bucket& bucket,
}
spi::BucketIdListResult
-ProviderErrorWrapper::getModifiedBuckets() const
+ProviderErrorWrapper::getModifiedBuckets(BucketSpace bucketSpace) const
{
- return checkResult(_impl.getModifiedBuckets());
+ return checkResult(_impl.getModifiedBuckets(bucketSpace));
}
spi::Result
diff --git a/storage/src/vespa/storage/persistence/provider_error_wrapper.h b/storage/src/vespa/storage/persistence/provider_error_wrapper.h
index 84adf37cbc3..122837e75ed 100644
--- a/storage/src/vespa/storage/persistence/provider_error_wrapper.h
+++ b/storage/src/vespa/storage/persistence/provider_error_wrapper.h
@@ -43,7 +43,7 @@ public:
spi::Result initialize() override;
spi::PartitionStateListResult getPartitionStates() const override;
- spi::BucketIdListResult listBuckets(spi::PartitionId) const override;
+ spi::BucketIdListResult listBuckets(BucketSpace bucketSpace, spi::PartitionId) const override;
spi::Result setClusterState(const spi::ClusterState&) override;
spi::Result setActiveState(const spi::Bucket& bucket, spi::BucketInfo::ActiveState newState) override;
spi::BucketInfoResult getBucketInfo(const spi::Bucket&) const override;
@@ -59,7 +59,7 @@ public:
spi::Result destroyIterator(spi::IteratorId, spi::Context&) override;
spi::Result createBucket(const spi::Bucket&, spi::Context&) override;
spi::Result deleteBucket(const spi::Bucket&, spi::Context&) override;
- spi::BucketIdListResult getModifiedBuckets() const override;
+ spi::BucketIdListResult getModifiedBuckets(BucketSpace bucketSpace) const override;
spi::Result maintain(const spi::Bucket& bucket, spi::MaintenanceLevel level) override;
spi::Result split(const spi::Bucket& source, const spi::Bucket& target1, const spi::Bucket& target2, spi::Context&) override;
spi::Result join(const spi::Bucket& source1, const spi::Bucket& source2, const spi::Bucket& target, spi::Context&) override;
diff --git a/storage/src/vespa/storage/storageserver/servicelayernode.cpp b/storage/src/vespa/storage/storageserver/servicelayernode.cpp
index 79d57a8f7e4..e2861ef42cd 100644
--- a/storage/src/vespa/storage/storageserver/servicelayernode.cpp
+++ b/storage/src/vespa/storage/storageserver/servicelayernode.cpp
@@ -17,13 +17,15 @@
#include <vespa/storage/bucketmover/bucketmover.h>
#include <vespa/storage/persistence/filestorage/filestormanager.h>
#include <vespa/storage/persistence/filestorage/modifiedbucketchecker.h>
-#include <vespa/storage/persistence/provider_error_wrapper.h>
#include <vespa/persistence/spi/exceptions.h>
#include <vespa/messagebus/rpcmessagebus.h>
#include <vespa/log/log.h>
LOG_SETUP(".node.servicelayer");
+
+using StorServerConfigBuilder = vespa::config::content::core::StorServerConfigBuilder;
+
namespace storage {
ServiceLayerNode::ServiceLayerNode(
@@ -53,8 +55,7 @@ void ServiceLayerNode::init()
throw spi::HandledException("Failed provider init: " + initResult.toString(), VESPA_STRLOC);
}
- spi::PartitionStateListResult result(
- _persistenceProvider.getPartitionStates());
+ spi::PartitionStateListResult result(_persistenceProvider.getPartitionStates());
if (result.hasError()) {
LOG(error, "Failed to get partition list from persistence provider: %s", result.toString().c_str());
throw spi::HandledException("Failed to get partition list: " + result.toString(), VESPA_STRLOC);
@@ -62,8 +63,7 @@ void ServiceLayerNode::init()
_partitions = result.getList();
if (_partitions.size() == 0) {
LOG(error, "No partitions in persistence provider. See documentation "
- "for your persistence provider as to how to set up "
- "partitions in it.");
+ "for your persistence provider as to how to set up partitions in it.");
throw spi::HandledException("No partitions in provider", VESPA_STRLOC);
}
try{
@@ -93,7 +93,7 @@ ServiceLayerNode::subscribeToConfigs()
{
StorageNode::subscribeToConfigs();
_configFetcher.reset(new config::ConfigFetcher(_configUri.getContext()));
- _configFetcher->subscribe<vespa::config::storage::StorDevicesConfig>(_configUri.getConfigId(), this);
+ _configFetcher->subscribe<StorDevicesConfig>(_configUri.getConfigId(), this);
vespalib::LockGuard configLockGuard(_configLock);
_deviceConfig = std::move(_newDevicesConfig);
@@ -123,8 +123,7 @@ ServiceLayerNode::initializeNodeSpecific()
// Give node state to mount point initialization, such that we can
// get disk count and state of unavailable disks set in reported
// node state.
- NodeStateUpdater::Lock::SP lock(
- _component->getStateUpdater().grabStateChangeLock());
+ NodeStateUpdater::Lock::SP lock(_component->getStateUpdater().grabStateChangeLock());
lib::NodeState ns(*_component->getStateUpdater().getReportedNodeState());
ns.setDiskCount(_partitions.size());
@@ -166,12 +165,12 @@ ServiceLayerNode::initializeNodeSpecific()
if (DIFFER(a)) { LOG(warning, "Live config failure: %s.", b); }
void
-ServiceLayerNode::handleLiveConfigUpdate()
+ServiceLayerNode::handleLiveConfigUpdate(const InitialGuard & initGuard)
{
if (_newServerConfig) {
bool updated = false;
vespa::config::content::core::StorServerConfigBuilder oldC(*_serverConfig);
- vespa::config::content::core::StorServerConfig& newC(*_newServerConfig);
+ StorServerConfig& newC(*_newServerConfig);
DIFFERWARN(diskCount, "Cannot alter partition count of node live");
{
updated = false;
@@ -207,12 +206,11 @@ ServiceLayerNode::handleLiveConfigUpdate()
}
}
}
- StorageNode::handleLiveConfigUpdate();
+ StorageNode::handleLiveConfigUpdate(initGuard);
}
void
-ServiceLayerNode::configure(
- std::unique_ptr<vespa::config::storage::StorDevicesConfig> config)
+ServiceLayerNode::configure(std::unique_ptr<StorDevicesConfig> config)
{
// When we get config, we try to grab the config lock to ensure noone
// else is doing configuration work, and then we write the new config
@@ -223,20 +221,19 @@ ServiceLayerNode::configure(
_newDevicesConfig = std::move(config);
}
if (_distributionConfig) {
- handleLiveConfigUpdate();
+ InitialGuard concurrent_config_guard(_initial_config_mutex);
+ handleLiveConfigUpdate(concurrent_config_guard);
}
}
VisitorMessageSession::UP
ServiceLayerNode::createSession(Visitor& visitor, VisitorThread& thread)
{
- MessageBusVisitorMessageSession::UP mbusSession(
- new MessageBusVisitorMessageSession(visitor, thread));
+ auto mbusSession = std::make_unique<MessageBusVisitorMessageSession>(visitor, thread);
mbus::SourceSessionParams srcParams;
srcParams.setThrottlePolicy(mbus::IThrottlePolicy::SP());
srcParams.setReplyHandler(*mbusSession);
- mbusSession->setSourceSession(
- _communicationManager->getMessageBus().getMessageBus().createSourceSession(srcParams));
+ mbusSession->setSourceSession(_communicationManager->getMessageBus().getMessageBus().createSourceSession(srcParams));
return VisitorMessageSession::UP(std::move(mbusSession));
}
@@ -270,17 +267,13 @@ ServiceLayerNode::createChain()
chain->push_back(StorageLink::UP(new bucketmover::BucketMover(_configUri, compReg)));
chain->push_back(StorageLink::UP(new StorageBucketDBInitializer(
_configUri, _partitions, getDoneInitializeHandler(), compReg)));
- chain->push_back(StorageLink::UP(new BucketManager(
- _configUri, _context.getComponentRegister())));
+ chain->push_back(StorageLink::UP(new BucketManager(_configUri, _context.getComponentRegister())));
chain->push_back(StorageLink::UP(new VisitorManager(
- _configUri, _context.getComponentRegister(),
- *this, _externalVisitors)));
+ _configUri, _context.getComponentRegister(), *this, _externalVisitors)));
chain->push_back(StorageLink::UP(new ModifiedBucketChecker(
- _context.getComponentRegister(), _persistenceProvider,
- _configUri)));
+ _context.getComponentRegister(), _persistenceProvider, _configUri)));
chain->push_back(StorageLink::UP(_fileStorManager = new FileStorManager(
- _configUri, _partitions, _persistenceProvider,
- _context.getComponentRegister())));
+ _configUri, _partitions, _persistenceProvider, _context.getComponentRegister())));
chain->push_back(StorageLink::UP(releaseStateManager().release()));
// Lifetimes of all referenced components shall outlive the last call going
diff --git a/storage/src/vespa/storage/storageserver/servicelayernode.h b/storage/src/vespa/storage/storageserver/servicelayernode.h
index 848cc1d1475..12446152b90 100644
--- a/storage/src/vespa/storage/storageserver/servicelayernode.h
+++ b/storage/src/vespa/storage/storageserver/servicelayernode.h
@@ -28,6 +28,7 @@ class ServiceLayerNode
private config::IFetcherCallback<vespa::config::storage::StorDevicesConfig>
{
+ using StorDevicesConfig = vespa::config::storage::StorDevicesConfig;
ServiceLayerNodeContext& _context;
spi::PersistenceProvider& _persistenceProvider;
spi::PartitionStateList _partitions;
@@ -36,8 +37,8 @@ class ServiceLayerNode
// FIXME: Should probably use the fetcher in StorageNode
std::unique_ptr<config::ConfigFetcher> _configFetcher;
- std::unique_ptr<vespa::config::storage::StorDevicesConfig> _deviceConfig;
- std::unique_ptr<vespa::config::storage::StorDevicesConfig> _newDevicesConfig;
+ std::unique_ptr<StorDevicesConfig> _deviceConfig;
+ std::unique_ptr<StorDevicesConfig> _newDevicesConfig;
FileStorManager* _fileStorManager;
bool _init_has_been_called;
bool _noUsablePartitionMode;
@@ -63,8 +64,8 @@ public:
private:
void subscribeToConfigs() override;
void initializeNodeSpecific() override;
- void handleLiveConfigUpdate() override;
- void configure(std::unique_ptr<vespa::config::storage::StorDevicesConfig> config) override;
+ void handleLiveConfigUpdate(const InitialGuard & initGuard) override;
+ void configure(std::unique_ptr<StorDevicesConfig> config) override;
VisitorMessageSession::UP createSession(Visitor&, VisitorThread&) override;
documentapi::Priority::Value toDocumentPriority(uint8_t storagePriority) const override;
std::unique_ptr<StorageLink> createChain() override;
diff --git a/storage/src/vespa/storage/storageserver/storagenode.cpp b/storage/src/vespa/storage/storageserver/storagenode.cpp
index 85f54431cfb..855efaed6aa 100644
--- a/storage/src/vespa/storage/storageserver/storagenode.cpp
+++ b/storage/src/vespa/storage/storageserver/storagenode.cpp
@@ -19,6 +19,10 @@
#include <vespa/log/log.h>
LOG_SETUP(".node.server");
+using vespa::config::content::StorDistributionConfigBuilder;
+using vespa::config::content::core::StorServerConfigBuilder;
+using std::make_shared;
+
namespace storage {
namespace {
@@ -86,10 +90,11 @@ void
StorageNode::subscribeToConfigs()
{
_configFetcher.reset(new config::ConfigFetcher(_configUri.getContext()));
- _configFetcher->subscribe<vespa::config::content::StorDistributionConfig>(_configUri.getConfigId(), this);
- _configFetcher->subscribe<vespa::config::content::UpgradingConfig>(_configUri.getConfigId(), this);
- _configFetcher->subscribe<vespa::config::content::core::StorServerConfig>(_configUri.getConfigId(), this);
- _configFetcher->subscribe<vespa::config::content::core::StorPrioritymappingConfig>(_configUri.getConfigId(), this);
+ _configFetcher->subscribe<StorDistributionConfig>(_configUri.getConfigId(), this);
+ _configFetcher->subscribe<UpgradingConfig>(_configUri.getConfigId(), this);
+ _configFetcher->subscribe<StorServerConfig>(_configUri.getConfigId(), this);
+ _configFetcher->subscribe<StorPrioritymappingConfig>(_configUri.getConfigId(), this);
+
_configFetcher->start();
vespalib::LockGuard configLockGuard(_configLock);
@@ -99,10 +104,13 @@ StorageNode::subscribeToConfigs()
_priorityConfig = std::move(_newPriorityConfig);
}
-
void
StorageNode::initialize()
{
+ // Avoid racing with concurrent reconfigurations before we've set up the entire
+ // node component stack.
+ std::lock_guard<std::mutex> concurrent_config_guard(_initial_config_mutex);
+
_context.getComponentRegister().registerShutdownListener(*this);
// Fetch configs needed first. These functions will just grab the config
@@ -116,21 +124,14 @@ StorageNode::initialize()
// available
_rootFolder = _serverConfig->rootFolder;
- _context.getComponentRegister().setNodeInfo(
- _serverConfig->clusterName, getNodeType(),
- _serverConfig->nodeIndex);
- _context.getComponentRegister().setLoadTypes(
- documentapi::LoadTypeSet::SP(
- new documentapi::LoadTypeSet(_configUri)));
- _context.getComponentRegister().setBucketIdFactory(
- document::BucketIdFactory());
- _context.getComponentRegister().setDistribution(
- lib::Distribution::SP(new lib::Distribution(*_distributionConfig)));
+ _context.getComponentRegister().setNodeInfo(_serverConfig->clusterName, getNodeType(), _serverConfig->nodeIndex);
+ _context.getComponentRegister().setLoadTypes(make_shared<documentapi::LoadTypeSet>(_configUri));
+ _context.getComponentRegister().setBucketIdFactory(document::BucketIdFactory());
+ _context.getComponentRegister().setDistribution(make_shared<lib::Distribution>(*_distributionConfig));
_context.getComponentRegister().setPriorityConfig(*_priorityConfig);
_metrics.reset(new StorageMetricSet);
- _component.reset(new StorageComponent(
- _context.getComponentRegister(), "storagenode"));
+ _component.reset(new StorageComponent(_context.getComponentRegister(), "storagenode"));
_component->registerMetric(*_metrics);
if (!_context.getComponentRegister().hasMetricManager()) {
_metricManager.reset(new metrics::MetricManager);
@@ -168,17 +169,13 @@ StorageNode::initialize()
_generationFetcher));
// Start deadlock detector
- _deadLockDetector.reset(new DeadLockDetector(
- _context.getComponentRegister()));
- _deadLockDetector->enableWarning(
- _serverConfig->enableDeadLockDetectorWarnings);
+ _deadLockDetector.reset(new DeadLockDetector(_context.getComponentRegister()));
+ _deadLockDetector->enableWarning(_serverConfig->enableDeadLockDetectorWarnings);
_deadLockDetector->enableShutdown(_serverConfig->enableDeadLockDetector);
_deadLockDetector->setProcessSlack(framework::MilliSecTime(
- static_cast<uint32_t>(
- _serverConfig->deadLockDetectorTimeoutSlack * 1000)));
+ static_cast<uint32_t>(_serverConfig->deadLockDetectorTimeoutSlack * 1000)));
_deadLockDetector->setWaitSlack(framework::MilliSecTime(
- static_cast<uint32_t>(
- _serverConfig->deadLockDetectorTimeoutSlack * 1000)));
+ static_cast<uint32_t>(_serverConfig->deadLockDetectorTimeoutSlack * 1000)));
_chain.reset(createChain().release());
@@ -188,7 +185,7 @@ StorageNode::initialize()
// reinitializing metric manager often.
_context.getComponentRegister().getMetricManager().init(_configUri, _context.getThreadPool());
- if (_chain.get() != 0) {
+ if (_chain) {
LOG(debug, "Storage chain configured. Calling open()");
_chain->open();
}
@@ -208,9 +205,8 @@ void
StorageNode::initializeStatusWebServer()
{
if (_singleThreadedDebugMode) return;
- _statusWebServer.reset(new StatusWebServer(
- _context.getComponentRegister(), _context.getComponentRegister(),
- _configUri));
+ _statusWebServer.reset(new StatusWebServer(_context.getComponentRegister(),
+ _context.getComponentRegister(), _configUri));
}
#define DIFFER(a) (!(oldC.a == newC.a))
@@ -223,13 +219,13 @@ StorageNode::setNewDocumentRepo(const document::DocumentTypeRepo::SP& repo)
{
vespalib::LockGuard configLockGuard(_configLock);
_context.getComponentRegister().setDocumentTypeRepo(repo);
- if (_communicationManager != 0) {
+ if (_communicationManager != nullptr) {
_communicationManager->updateMessagebusProtocol(repo);
}
}
void
-StorageNode::updateUpgradeFlag(const vespa::config::content::UpgradingConfig& config)
+StorageNode::updateUpgradeFlag(const UpgradingConfig& config)
{
framework::UpgradeFlags flag(framework::NO_UPGRADE_SPECIAL_HANDLING_ACTIVE);
if (config.upgradingMajorTo) {
@@ -245,69 +241,62 @@ StorageNode::updateUpgradeFlag(const vespa::config::content::UpgradingConfig& co
}
void
-StorageNode::handleLiveConfigUpdate()
+StorageNode::handleLiveConfigUpdate(const InitialGuard & initGuard)
{
- // Make sure we don't conflict with initialize or shutdown threads.
+ // Make sure we don't conflict with initialize or shutdown threads.
+ (void) initGuard;
vespalib::LockGuard configLockGuard(_configLock);
- // If storage haven't initialized, ignore. Initialize code will handle
- // this config.
- if (_chain.get() == 0) return;
+
+ assert(_chain);
// If we get here, initialize is done running. We have to handle changes
// we want to handle.
- if (_newServerConfig.get() != 0) {
+ if (_newServerConfig) {
bool updated = false;
- vespa::config::content::core::StorServerConfigBuilder oldC(*_serverConfig);
- vespa::config::content::core::StorServerConfig& newC(*_newServerConfig);
+ StorServerConfigBuilder oldC(*_serverConfig);
+ StorServerConfig& newC(*_newServerConfig);
DIFFERWARN(rootFolder, "Cannot alter root folder of node live");
DIFFERWARN(clusterName, "Cannot alter cluster name of node live");
DIFFERWARN(nodeIndex, "Cannot alter node index of node live");
DIFFERWARN(isDistributor, "Cannot alter role of node live");
{
if (DIFFER(memorytouse)) {
- LOG(info, "Live config update: Memory to use changed "
- "from %" PRId64 " to %" PRId64 ".",
+ LOG(info, "Live config update: Memory to use changed from %" PRId64 " to %" PRId64 ".",
oldC.memorytouse, newC.memorytouse);
ASSIGN(memorytouse);
- _context.getMemoryManager().setMaximumMemoryUsage(
- newC.memorytouse);
+ _context.getMemoryManager().setMaximumMemoryUsage(newC.memorytouse);
}
}
- _serverConfig.reset(new vespa::config::content::core::StorServerConfig(oldC));
- _newServerConfig.reset(0);
+ _serverConfig.reset(new StorServerConfig(oldC));
+ _newServerConfig.reset();
(void)updated;
}
- if (_newDistributionConfig.get() != 0) {
- vespa::config::content::StorDistributionConfigBuilder oldC(*_distributionConfig);
- vespa::config::content::StorDistributionConfig& newC(*_newDistributionConfig);
+ if (_newDistributionConfig) {
+ StorDistributionConfigBuilder oldC(*_distributionConfig);
+ StorDistributionConfig& newC(*_newDistributionConfig);
bool updated = false;
if (DIFFER(redundancy)) {
- LOG(info, "Live config update: Altering redundancy from %u to %u.",
- oldC.redundancy, newC.redundancy);
+ LOG(info, "Live config update: Altering redundancy from %u to %u.", oldC.redundancy, newC.redundancy);
ASSIGN(redundancy);
}
if (DIFFER(initialRedundancy)) {
- LOG(info, "Live config update: Altering initial redundancy "
- "from %u to %u.",
+ LOG(info, "Live config update: Altering initial redundancy from %u to %u.",
oldC.initialRedundancy, newC.initialRedundancy);
ASSIGN(initialRedundancy);
}
if (DIFFER(ensurePrimaryPersisted)) {
- LOG(info, "Live config update: Now%s requiring primary copy to "
- "succeed for n of m operation to succeed.",
+ LOG(info, "Live config update: Now%s requiring primary copy to succeed for n of m operation to succeed.",
newC.ensurePrimaryPersisted ? "" : " not");
ASSIGN(ensurePrimaryPersisted);
}
if (DIFFER(activePerLeafGroup)) {
- LOG(info, "Live config update: Active per leaf group setting "
- "altered from %s to %s",
+ LOG(info, "Live config update: Active per leaf group setting altered from %s to %s",
oldC.activePerLeafGroup ? "true" : "false",
newC.activePerLeafGroup ? "true" : "false");
ASSIGN(activePerLeafGroup);
}
if (DIFFER(readyCopies)) {
- LOG(info, "Live config update: Altering number of searchable "
- "copies from %u to %u",
+ LOG(info, "Live config update: Altering number of searchable copies from %u to %u",
oldC.readyCopies, newC.readyCopies);
ASSIGN(readyCopies);
}
@@ -316,35 +305,28 @@ StorageNode::handleLiveConfigUpdate()
ASSIGN(group);
}
if (DIFFER(diskDistribution)) {
- LOG(info, "Live config update: Disk distribution altered from "
- "%s to %s.",
- vespa::config::content::StorDistributionConfig::getDiskDistributionName(
- oldC.diskDistribution).c_str(),
- vespa::config::content::StorDistributionConfig::getDiskDistributionName(
- newC.diskDistribution).c_str());
+ LOG(info, "Live config update: Disk distribution altered from %s to %s.",
+ StorDistributionConfig::getDiskDistributionName(oldC.diskDistribution).c_str(),
+ StorDistributionConfig::getDiskDistributionName(newC.diskDistribution).c_str());
ASSIGN(diskDistribution);
}
- _distributionConfig.reset(new vespa::config::content::StorDistributionConfig(oldC));
- _newDistributionConfig.reset(0);
+ _distributionConfig.reset(new StorDistributionConfig(oldC));
+ _newDistributionConfig.reset();
if (updated) {
- _context.getComponentRegister().setDistribution(
- lib::Distribution::SP(new lib::Distribution(oldC)));
- for (StorageLink* link = _chain.get(); link != 0;
- link = link->getNextLink())
- {
+ _context.getComponentRegister().setDistribution(make_shared<lib::Distribution>(oldC));
+ for (StorageLink* link = _chain.get(); link != 0; link = link->getNextLink()) {
link->storageDistributionChanged();
}
}
}
- if (_newClusterConfig.get() != 0) {
+ if (_newClusterConfig) {
updateUpgradeFlag(*_newClusterConfig);
if (*_clusterConfig != *_newClusterConfig) {
- LOG(warning, "Live config failure: "
- "Cannot alter cluster config of node live.");
+ LOG(warning, "Live config failure: Cannot alter cluster config of node live.");
}
- _newClusterConfig.reset(0);
+ _newClusterConfig.reset();
}
- if (_newPriorityConfig.get() != 0) {
+ if (_newPriorityConfig) {
_priorityConfig = std::move(_newPriorityConfig);
_context.getComponentRegister().setPriorityConfig(*_priorityConfig);
}
@@ -354,31 +336,27 @@ void
StorageNode::notifyDoneInitializing()
{
bool isDistributor = (getNodeType() == lib::NodeType::DISTRIBUTOR);
- LOG(info, "%s node ready. Done initializing. Giving out of sequence "
- "metric event. Config id is %s",
+ LOG(info, "%s node ready. Done initializing. Giving out of sequence metric event. Config id is %s",
isDistributor ? "Distributor" : "Storage", _configUri.getConfigId().c_str());
_context.getComponentRegister().getMetricManager().forceEventLogging();
if (!_singleThreadedDebugMode) {
EV_STARTED(isDistributor ? "distributor" : "storagenode");
}
- NodeStateUpdater::Lock::SP lock(
- _component->getStateUpdater().grabStateChangeLock());
+ NodeStateUpdater::Lock::SP lock(_component->getStateUpdater().grabStateChangeLock());
lib::NodeState ns(*_component->getStateUpdater().getReportedNodeState());
ns.setState(lib::State::UP);
_component->getStateUpdater().setReportedNodeState(ns);
_chain->doneInit();
}
-StorageNode::~StorageNode()
-{
-}
+StorageNode::~StorageNode() = default;
void
StorageNode::removeConfigSubscriptions()
{
LOG(debug, "Removing config subscribers");
- _configFetcher.reset(0);
+ _configFetcher.reset();
}
void
@@ -387,8 +365,7 @@ StorageNode::shutdown()
// Try to shut down in opposite order of initialize. Bear in mind that
// we might be shutting down after init exception causing only parts
// of the server to have initialize
- LOG(debug, "Shutting down storage node of type %s",
- getNodeType().toString().c_str());
+ LOG(debug, "Shutting down storage node of type %s", getNodeType().toString().c_str());
if (!_attemptedStopped) {
LOG(warning, "Storage killed before requestShutdown() was called. No "
"reason has been given for why we're stopping.");
@@ -396,7 +373,7 @@ StorageNode::shutdown()
// Remove the subscription to avoid more callbacks from config
removeConfigSubscriptions();
- if (_chain.get()) {
+ if (_chain) {
LOG(debug, "Closing storage chain");
_chain->close();
LOG(debug, "Flushing storage chain");
@@ -409,56 +386,54 @@ StorageNode::shutdown()
}
if (!_singleThreadedDebugMode) {
- EV_STOPPING(getNodeType() == lib::NodeType::DISTRIBUTOR
- ? "distributor" : "storagenode", "Stopped");
+ EV_STOPPING(getNodeType() == lib::NodeType::DISTRIBUTOR ? "distributor" : "storagenode", "Stopped");
}
if (_context.getComponentRegister().hasMetricManager()) {
- LOG(debug, "Stopping metric manager. "
- "(Deleting chain may remove metrics)");
+ LOG(debug, "Stopping metric manager. (Deleting chain may remove metrics)");
_context.getComponentRegister().getMetricManager().stop();
}
// Delete the status web server before the actual status providers, to
// ensure that web server does not query providers during shutdown
- _statusWebServer.reset(0);
+ _statusWebServer.reset();
// For this to be safe, noone can touch the state updater after we start
// deleting the storage chain
LOG(debug, "Removing state updater pointer as we're about to delete it.");
- if (_chain.get()) {
+ if (_chain) {
LOG(debug, "Deleting storage chain");
- _chain.reset(0);
+ _chain.reset();
}
- if (_statusMetrics.get()) {
+ if (_statusMetrics) {
LOG(debug, "Deleting status metrics consumer");
- _statusMetrics.reset(0);
+ _statusMetrics.reset();
}
- if (_stateReporter.get()) {
+ if (_stateReporter) {
LOG(debug, "Deleting state reporter");
- _stateReporter.reset(0);
+ _stateReporter.reset();
}
- if (_memoryStatusViewer.get()) {
+ if (_memoryStatusViewer) {
LOG(debug, "Deleting memory status viewer");
- _memoryStatusViewer.reset(0);
+ _memoryStatusViewer.reset();
}
- if (_stateManager.get()) {
+ if (_stateManager) {
LOG(debug, "Deleting state manager");
- _stateManager.reset(0);
+ _stateManager.reset();
}
- if (_deadLockDetector.get()) {
+ if (_deadLockDetector) {
LOG(debug, "Deleting dead lock detector");
- _deadLockDetector.reset(0);
+ _deadLockDetector.reset();
}
- if (_metricManager.get()) {
+ if (_metricManager) {
LOG(debug, "Deleting metric manager");
- _metricManager.reset(0);
+ _metricManager.reset();
}
- if (_metrics.get()) {
+ if (_metrics) {
LOG(debug, "Deleting metric set");
_metrics.reset();
}
- if (_component.get()) {
+ if (_component) {
LOG(debug, "Deleting component");
_component.reset();
}
@@ -466,7 +441,7 @@ StorageNode::shutdown()
LOG(debug, "Done shutting down node");
}
-void StorageNode::configure(std::unique_ptr<vespa::config::content::core::StorServerConfig> config)
+void StorageNode::configure(std::unique_ptr<StorServerConfig> config)
{
// When we get config, we try to grab the config lock to ensure noone
// else is doing configuration work, and then we write the new config
@@ -476,11 +451,14 @@ void StorageNode::configure(std::unique_ptr<vespa::config::content::core::StorSe
vespalib::LockGuard configLockGuard(_configLock);
_newServerConfig.reset(config.release());
}
- if (_serverConfig.get() != 0) handleLiveConfigUpdate();
+ if (_serverConfig) {
+ InitialGuard concurrent_config_guard(_initial_config_mutex);
+ handleLiveConfigUpdate(concurrent_config_guard);
+ }
}
void
-StorageNode::configure(std::unique_ptr<vespa::config::content::UpgradingConfig> config)
+StorageNode::configure(std::unique_ptr<UpgradingConfig> config)
{
// When we get config, we try to grab the config lock to ensure noone
// else is doing configuration work, and then we write the new config
@@ -490,11 +468,14 @@ StorageNode::configure(std::unique_ptr<vespa::config::content::UpgradingConfig>
vespalib::LockGuard configLockGuard(_configLock);
_newClusterConfig.reset(config.release());
}
- if (_clusterConfig.get() != 0) handleLiveConfigUpdate();
+ if (_clusterConfig) {
+ InitialGuard concurrent_config_guard(_initial_config_mutex);
+ handleLiveConfigUpdate(concurrent_config_guard);
+ }
}
void
-StorageNode::configure(std::unique_ptr<vespa::config::content::StorDistributionConfig> config)
+StorageNode::configure(std::unique_ptr<StorDistributionConfig> config)
{
// When we get config, we try to grab the config lock to ensure noone
// else is doing configuration work, and then we write the new config
@@ -504,17 +485,23 @@ StorageNode::configure(std::unique_ptr<vespa::config::content::StorDistributionC
vespalib::LockGuard configLockGuard(_configLock);
_newDistributionConfig.reset(config.release());
}
- if (_distributionConfig.get() != 0) handleLiveConfigUpdate();
+ if (_distributionConfig) {
+ InitialGuard concurrent_config_guard(_initial_config_mutex);
+ handleLiveConfigUpdate(concurrent_config_guard);
+ }
}
void
-StorageNode::configure(std::unique_ptr<vespa::config::content::core::StorPrioritymappingConfig> config)
+StorageNode::configure(std::unique_ptr<StorPrioritymappingConfig> config)
{
{
vespalib::LockGuard configLockGuard(_configLock);
_newPriorityConfig.reset(config.release());
}
- if (_priorityConfig.get() != 0) handleLiveConfigUpdate();
+ if (_priorityConfig) {
+ InitialGuard concurrent_config_guard(_initial_config_mutex);
+ handleLiveConfigUpdate(concurrent_config_guard);
+ }
}
void StorageNode::configure(std::unique_ptr<document::DocumenttypesConfig> config,
@@ -527,7 +514,10 @@ void StorageNode::configure(std::unique_ptr<document::DocumenttypesConfig> confi
vespalib::LockGuard configLockGuard(_configLock);
_newDoctypesConfig.reset(config.release());
}
- if (_doctypesConfig.get() != 0) handleLiveConfigUpdate();
+ if (_doctypesConfig) {
+ InitialGuard concurrent_config_guard(_initial_config_mutex);
+ handleLiveConfigUpdate(concurrent_config_guard);
+ }
}
bool
@@ -548,10 +538,8 @@ StorageNode::waitUntilInitialized(uint32_t timeout) {
clock.getTimeInMillis() + framework::MilliSecTime(1000 * timeout));
while (true) {
{
- NodeStateUpdater::Lock::SP lock(
- _component->getStateUpdater().grabStateChangeLock());
- lib::NodeState nodeState(
- *_component->getStateUpdater().getReportedNodeState());
+ NodeStateUpdater::Lock::SP lock(_component->getStateUpdater().grabStateChangeLock());
+ lib::NodeState nodeState(*_component->getStateUpdater().getReportedNodeState());
if (nodeState.getState() == lib::State::UP) break;
}
FastOS_Thread::Sleep(10);
@@ -580,7 +568,6 @@ StorageNode::requestShutdown(vespalib::stringref reason)
_attemptedStopped = true;
}
-
void
StorageNode::notifyPartitionDown(int partId, vespalib::stringref reason)
{
@@ -603,7 +590,6 @@ StorageNode::notifyPartitionDown(int partId, vespalib::stringref reason)
_component->getStateUpdater().setReportedNodeState(nodeState);
}
-
std::unique_ptr<StateManager>
StorageNode::releaseStateManager() {
return std::move(_stateManager);
diff --git a/storage/src/vespa/storage/storageserver/storagenode.h b/storage/src/vespa/storage/storageserver/storagenode.h
index 753f3c85330..9b727ef3e0c 100644
--- a/storage/src/vespa/storage/storageserver/storagenode.h
+++ b/storage/src/vespa/storage/storageserver/storagenode.h
@@ -26,6 +26,7 @@
#include <vespa/document/config/config-documenttypes.h>
#include <vespa/config-upgrading.h>
#include <vespa/config-stor-distribution.h>
+#include <mutex>
namespace document { class DocumentTypeRepo; }
@@ -95,7 +96,11 @@ public:
// For testing
StorageLink* getChain() { return _chain.get(); }
virtual void initializeStatusWebServer();
-
+protected:
+ using StorServerConfig = vespa::config::content::core::StorServerConfig;
+ using UpgradingConfig = vespa::config::content::UpgradingConfig;
+ using StorDistributionConfig = vespa::config::content::StorDistributionConfig;
+ using StorPrioritymappingConfig = vespa::config::content::core::StorPrioritymappingConfig;
private:
bool _singleThreadedDebugMode;
// Subscriptions to config
@@ -128,29 +133,31 @@ private:
std::unique_ptr<StorageLink> _chain;
/** Implementation of config callbacks. */
- void configure(std::unique_ptr<vespa::config::content::core::StorServerConfig> config) override;
- void configure(std::unique_ptr<vespa::config::content::UpgradingConfig> config) override;
- void configure(std::unique_ptr<vespa::config::content::StorDistributionConfig> config) override;
- void configure(std::unique_ptr<vespa::config::content::core::StorPrioritymappingConfig>) override;
+ void configure(std::unique_ptr<StorServerConfig> config) override;
+ void configure(std::unique_ptr<UpgradingConfig> config) override;
+ void configure(std::unique_ptr<StorDistributionConfig> config) override;
+ void configure(std::unique_ptr<StorPrioritymappingConfig>) override;
virtual void configure(std::unique_ptr<document::DocumenttypesConfig> config,
bool hasChanged, int64_t generation);
- void updateUpgradeFlag(const vespa::config::content::UpgradingConfig&);
+ void updateUpgradeFlag(const UpgradingConfig&);
protected:
// Lock taken while doing configuration of the server.
vespalib::Lock _configLock;
+ std::mutex _initial_config_mutex;
+ using InitialGuard = std::lock_guard<std::mutex>;
// Current running config. Kept, such that we can see what has been
// changed in live config updates.
- std::unique_ptr<vespa::config::content::core::StorServerConfig> _serverConfig;
- std::unique_ptr<vespa::config::content::UpgradingConfig> _clusterConfig;
- std::unique_ptr<vespa::config::content::StorDistributionConfig> _distributionConfig;
- std::unique_ptr<vespa::config::content::core::StorPrioritymappingConfig> _priorityConfig;
+ std::unique_ptr<StorServerConfig> _serverConfig;
+ std::unique_ptr<UpgradingConfig> _clusterConfig;
+ std::unique_ptr<StorDistributionConfig> _distributionConfig;
+ std::unique_ptr<StorPrioritymappingConfig> _priorityConfig;
std::unique_ptr<document::DocumenttypesConfig> _doctypesConfig;
// New configs gotten that has yet to have been handled
- std::unique_ptr<vespa::config::content::core::StorServerConfig> _newServerConfig;
- std::unique_ptr<vespa::config::content::UpgradingConfig> _newClusterConfig;
- std::unique_ptr<vespa::config::content::StorDistributionConfig> _newDistributionConfig;
- std::unique_ptr<vespa::config::content::core::StorPrioritymappingConfig> _newPriorityConfig;
+ std::unique_ptr<StorServerConfig> _newServerConfig;
+ std::unique_ptr<UpgradingConfig> _newClusterConfig;
+ std::unique_ptr<StorDistributionConfig> _newDistributionConfig;
+ std::unique_ptr<StorPrioritymappingConfig> _newPriorityConfig;
std::unique_ptr<document::DocumenttypesConfig> _newDoctypesConfig;
std::unique_ptr<StorageComponent> _component;
config::ConfigUri _configUri;
@@ -169,7 +176,7 @@ protected:
virtual void subscribeToConfigs();
virtual void initializeNodeSpecific() = 0;
virtual std::unique_ptr<StorageLink> createChain() = 0;
- virtual void handleLiveConfigUpdate();
+ virtual void handleLiveConfigUpdate(const InitialGuard & initGuard);
void shutdown();
virtual void removeConfigSubscriptions();
};
diff --git a/storage/src/vespa/storage/visiting/CMakeLists.txt b/storage/src/vespa/storage/visiting/CMakeLists.txt
index ee10bbd58ab..6d93d96114d 100644
--- a/storage/src/vespa/storage/visiting/CMakeLists.txt
+++ b/storage/src/vespa/storage/visiting/CMakeLists.txt
@@ -17,4 +17,4 @@ vespa_add_library(storage_visitor OBJECT
storage_storageconfig
)
vespa_generate_config(storage_visitor stor-visitor.def)
-install(FILES stor-visitor.def RENAME vespa.config.content.core.stor-visitor.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(stor-visitor.def vespa.config.content.core.stor-visitor.def)
diff --git a/storageapi/src/vespa/storageapi/mbusprot/storageprotocol.h b/storageapi/src/vespa/storageapi/mbusprot/storageprotocol.h
index 699f1c4c239..10289adaf1a 100644
--- a/storageapi/src/vespa/storageapi/mbusprot/storageprotocol.h
+++ b/storageapi/src/vespa/storageapi/mbusprot/storageprotocol.h
@@ -6,7 +6,7 @@
namespace storage::mbusprot {
-class StorageProtocol : public mbus::IProtocol
+class StorageProtocol final : public mbus::IProtocol
{
public:
typedef std::shared_ptr<StorageProtocol> SP;
@@ -20,7 +20,7 @@ public:
mbus::IRoutingPolicy::UP createPolicy(const mbus::string& name, const mbus::string& param) const override;
mbus::Blob encode(const vespalib::Version&, const mbus::Routable&) const override;
mbus::Routable::UP decode(const vespalib::Version&, mbus::BlobRef) const override;
-
+ virtual bool requireSequencing() const override { return true; }
private:
ProtocolSerialization5_0 _serializer5_0;
ProtocolSerialization5_1 _serializer5_1;
diff --git a/storageserver/src/apps/storaged/CMakeLists.txt b/storageserver/src/apps/storaged/CMakeLists.txt
index b971ce44339..2de4398f04c 100644
--- a/storageserver/src/apps/storaged/CMakeLists.txt
+++ b/storageserver/src/apps/storaged/CMakeLists.txt
@@ -9,3 +9,5 @@ vespa_add_executable(storageserver_storaged_app
storageserver_storageapp
searchlib_searchlib_uca
)
+
+install(CODE "execute_process(COMMAND ln -snf vespa-storaged-bin \$ENV{DESTDIR}/\${CMAKE_INSTALL_PREFIX}/sbin/vespa-distributord-bin)")
diff --git a/storageserver/src/apps/storaged/storage.cpp b/storageserver/src/apps/storaged/storage.cpp
index 1f3109381fd..9e98f599bc5 100644
--- a/storageserver/src/apps/storaged/storage.cpp
+++ b/storageserver/src/apps/storaged/storage.cpp
@@ -18,7 +18,6 @@
#include "forcelink.h"
#include <vespa/storageserver/app/memfileservicelayerprocess.h>
#include <vespa/storageserver/app/dummyservicelayerprocess.h>
-#include <vespa/storageserver/app/rpcservicelayerprocess.h>
#include <vespa/vespalib/util/programoptions.h>
#include <vespa/vespalib/util/shutdownguard.h>
#include <iostream>
@@ -43,8 +42,6 @@ Process::UP createProcess(vespalib::stringref configId) {
return Process::UP(new MemFileServiceLayerProcess(configId));
case vespa::config::content::core::StorServerConfig::PersistenceProvider::DUMMY:
return Process::UP(new DummyServiceLayerProcess(configId));
- case vespa::config::content::core::StorServerConfig::PersistenceProvider::RPC:
- return Process::UP(new RpcServiceLayerProcess(configId));
default:
throw vespalib::IllegalStateException(
"Unknown persistence provider.", VESPA_STRLOC);
diff --git a/storageserver/src/vespa/storageserver/app/CMakeLists.txt b/storageserver/src/vespa/storageserver/app/CMakeLists.txt
index 25bd4686c36..2311c636228 100644
--- a/storageserver/src/vespa/storageserver/app/CMakeLists.txt
+++ b/storageserver/src/vespa/storageserver/app/CMakeLists.txt
@@ -5,7 +5,6 @@ vespa_add_library(storageserver_storageapp STATIC
distributorprocess.cpp
servicelayerprocess.cpp
dummyservicelayerprocess.cpp
- rpcservicelayerprocess.cpp
memfileservicelayerprocess.cpp
DEPENDS
storage_storageserver
diff --git a/storageserver/src/vespa/storageserver/app/rpcservicelayerprocess.cpp b/storageserver/src/vespa/storageserver/app/rpcservicelayerprocess.cpp
deleted file mode 100644
index ab0cd1f6089..00000000000
--- a/storageserver/src/vespa/storageserver/app/rpcservicelayerprocess.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include "rpcservicelayerprocess.h"
-#include <vespa/storage/config/config-stor-server.h>
-#include <vespa/config/helper/configgetter.hpp>
-
-#include <vespa/log/log.h>
-LOG_SETUP(".process.servicelayer");
-
-namespace storage {
-
-// RpcServiceLayerProcess implementation
-
-RpcServiceLayerProcess::RpcServiceLayerProcess(const config::ConfigUri & configUri)
- : ServiceLayerProcess(configUri)
-{
-}
-
-void
-RpcServiceLayerProcess::shutdown()
-{
- ServiceLayerProcess::shutdown();
- _provider.reset(0);
-}
-
-void
-RpcServiceLayerProcess::setupProvider()
-{
- std::unique_ptr<vespa::config::content::core::StorServerConfig> serverConfig =
- config::ConfigGetter<vespa::config::content::core::StorServerConfig>::getConfig(_configUri.getConfigId(), _configUri.getContext());
-
- _provider.reset(new spi::ProviderProxy(
- serverConfig->persistenceProvider.rpc.connectspec, *getTypeRepo()));
-}
-
-void
-RpcServiceLayerProcess::updateConfig()
-{
- ServiceLayerProcess::updateConfig();
- LOG(info, "Config updated. Sending new config to RPC proxy provider");
- _provider->setRepo(*getTypeRepo());
-}
-
-} // storage
diff --git a/storageserver/src/vespa/storageserver/app/rpcservicelayerprocess.h b/storageserver/src/vespa/storageserver/app/rpcservicelayerprocess.h
deleted file mode 100644
index cd104795c76..00000000000
--- a/storageserver/src/vespa/storageserver/app/rpcservicelayerprocess.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-/**
- * \class storage::RpcServiceLayerProcess
- *
- * \brief A process running a service layer with RPC persistence provider.
- */
-#pragma once
-
-#include <vespa/persistence/proxy/providerproxy.h>
-#include <vespa/storageserver/app/servicelayerprocess.h>
-
-namespace storage {
-
-class RpcServiceLayerProcess : public ServiceLayerProcess {
- spi::ProviderProxy::UP _provider;
-
-public:
- RpcServiceLayerProcess(const config::ConfigUri & configUri);
- ~RpcServiceLayerProcess() { shutdown(); }
-
- virtual void shutdown() override;
- virtual void setupProvider() override;
- virtual void updateConfig() override;
- virtual spi::PersistenceProvider& getProvider() override { return *_provider; }
-};
-
-} // storage
-
diff --git a/travis/travis-build-cpp.sh b/travis/travis-build-cpp.sh
index 825da67bf54..bc2b564d13f 100755
--- a/travis/travis-build-cpp.sh
+++ b/travis/travis-build-cpp.sh
@@ -11,6 +11,7 @@ export CCACHE_MAXSIZE="1250M"
export CCACHE_COMPRESS=1
NUM_THREADS=4
ccache --print-config
+source /etc/profile.d/devtoolset-6.sh || true
cd ${BUILD_DIR}
bash ${SOURCE_DIR}/bootstrap-cpp.sh ${SOURCE_DIR} ${BUILD_DIR}
diff --git a/travis/travis-build-java.sh b/travis/travis-build-java.sh
index 68341d784b5..0a69e167dba 100755
--- a/travis/travis-build-java.sh
+++ b/travis/travis-build-java.sh
@@ -6,6 +6,7 @@ SOURCE_DIR=/source
NUM_THREADS=4
cd "${SOURCE_DIR}"
-export MAVEN_OPTS="-Xms128m -Xmx512m"
+export MAVEN_OPTS="-Xms128m -Xmx1g"
+source /etc/profile.d/devtoolset-6.sh || true
sh ./bootstrap.sh java
mvn install -nsu -B -T ${NUM_THREADS} -V # Should ideally split out test phase, but some unit tests fails on 'mvn test'
diff --git a/vagrant/README.md b/vagrant/README.md
index 02d11900558..1f1f0eee7d2 100644
--- a/vagrant/README.md
+++ b/vagrant/README.md
@@ -7,22 +7,54 @@
## Create dev environment
-### Change working directory to <vespa-source>/vagrant
+#### 1. Change working directory to &lt;vespa-source&gt;/vagrant
+
cd <vespa-source>/vagrant
-### Start and provision the environment
+#### 2. Install Vagrant VirtualBox Guest Additions plugin
+This is required for mounting shared folders and get mouse pointer integration and seamless windows in the virtual CentOS desktop.
+
+ vagrant plugin install vagrant-vbguest
+
+#### 3. Start and provision the environment
+
vagrant up
-### Connect to machine via SSH
+#### 4. Connect to machine via SSH
SSH agent forwarding is enabled to ensure easy interaction with GitHub inside the machine.
vagrant ssh
-### Checkout vespa source inside machine
-This is needed in order to compile and run tests fast on the local file system inside the machine.
+#### 5. Checkout vespa source inside virtual machine
+This is needed in order to compile and run tests fast on the local file system inside the virtual machine.
git clone git@github.com:vespa-engine/vespa.git
## Build C++ modules
-Please follow the instructions described [here](../README.md#build-c-modules).
+Please follow the build instructions described [here](../README.md#build-c-modules).
+Skip these steps if doing development with CLion.
+
+
+## Build and Develop using CLion
+CLion is installed as part of the environment and is recommended for C++ development.
+
+#### 1. Bootstrap C++ building
+Go to <vespa-source> directory and execute:
+
+ ./bootstrap-cpp.sh . .
+
+#### 2. Start CLion
+Open a terminal inside the virtual CentOS desktop and run:
+
+ clion
+
+#### 3. Open the Vespa Project
+Go to *File* -> *Open* and choose &lt;vespa-source>&gt;/CMakeLists.txt.
+
+#### 4. Set compiler threads
+Go to *File* -> *Settings* -> *Build, Execution, Deployment* -> *CMake*.
+Under *Build Options* specify "-j 4" and click *Apply*.
+
+#### 5. Build all modules
+Choose target **all_modules** from the set of build targets and click build.
diff --git a/vagrant/Vagrantfile b/vagrant/Vagrantfile
index 74b2f5bdbdb..a06f5afbd49 100644
--- a/vagrant/Vagrantfile
+++ b/vagrant/Vagrantfile
@@ -4,7 +4,7 @@
# For a complete reference, please see the online documentation at https://docs.vagrantup.com.
Vagrant.configure("2") do |config|
- config.vm.box = "boxcutter/centos73-desktop"
+ config.vm.box = "boxcutter/centos7-desktop"
config.ssh.forward_agent = true
@@ -16,7 +16,7 @@ Vagrant.configure("2") do |config|
vb.name = "vespa-dev"
vb.memory = "8192"
- vb.cpus = 8
+ vb.cpus = 4
end
# Install required and nice-to-have packages
@@ -27,6 +27,7 @@ Vagrant.configure("2") do |config|
yum -y install yum-utils
yum -y install git \
ccache \
+ maven \
rpm-build \
valgrind \
sudo \
@@ -35,7 +36,8 @@ Vagrant.configure("2") do |config|
yum-builddep -y /vagrant/dist/vespa.spec
echo -e "* soft nproc 409600\n* hard nproc 409600" > /etc/security/limits.d/99-nproc.conf
echo -e "* soft nofile 262144\n* hard nofile 262144" > /etc/security/limits.d/99-nofile.conf
- wget -q -O - https://download.jetbrains.com/cpp/CLion-2017.2.2.tar.gz | tar -C /opt -zx
- ln -sf /opt/clion-2017.2.2/bin/clion.sh /usr/bin/clion
+ echo -e "fs.inotify.max_user_watches = 524288" > /etc/sysctl.d/clion.conf
+ wget -q -O - https://download.jetbrains.com/cpp/CLion-2017.2.3.tar.gz | tar -C /opt -zx
+ ln -sf /opt/clion-2017.2.3/bin/clion.sh /usr/bin/clion
SHELL
end
diff --git a/vdstestlib/src/vespa/vdstestlib/cppunit/macros.h b/vdstestlib/src/vespa/vdstestlib/cppunit/macros.h
index 5f7ce892723..fdde94bbbd6 100644
--- a/vdstestlib/src/vespa/vdstestlib/cppunit/macros.h
+++ b/vdstestlib/src/vespa/vdstestlib/cppunit/macros.h
@@ -4,6 +4,7 @@
*/
#pragma once
#include <cppunit/extensions/HelperMacros.h>
+#include <vespa/vespalib/test/insertion_operators.h>
// Wrapper for CPPUNIT_ASSERT_EQUAL_MESSAGE to prevent it from evaluating
@@ -147,49 +148,6 @@
// Create output operator for containers.
// Needed so we can use CPPUNIT_ASSERT_EQUAL with them.
-// TODO: Remove these functions from the std namespace.
-namespace std {
- template<typename T>
- inline std::ostream& operator<<(std::ostream& out, const std::vector<T>& v)
- {
- out << "std::vector(" << v.size() << ") {";
- for (uint32_t i=0, n=v.size(); i<n; ++i) {
- out << "\n " << v[i];
- }
- if (v.size() > 0) out << "\n";
- return out << "}";
- }
- template<typename T>
- inline std::ostream& operator<<(std::ostream& out, const std::set<T>& v)
- {
- out << "std::set(" << v.size() << ") {";
- for (typename std::set<T>::const_iterator it = v.begin(); it != v.end();
- ++it)
- {
- out << "\n " << *it;
- }
- if (v.size() > 0) out << "\n";
- return out << "}";
- }
- template<typename S, typename T>
- inline std::ostream& operator<<(std::ostream& out, const std::map<S, T>& m)
- {
- out << "std::map(" << m.size() << ") {";
- for (typename std::map<S, T>::const_iterator it = m.begin();
- it != m.end(); ++it)
- {
- out << "\n " << *it;
- }
- if (m.size() > 0) out << "\n";
- return out << "}";
- }
- template<typename S, typename T>
- inline std::ostream& operator<<(std::ostream& out, const std::pair<S, T>& p)
- {
- return out << "std::pair(" << p.first << ", " << p.second << ")";
- }
-}
-
template<typename S, typename T>
std::ostream&
operator<<(std::ostream& out, const std::unordered_map<S, T>& umap)
diff --git a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java
index 6a99a7bc08b..db0fada990f 100644
--- a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java
+++ b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/VespaRecordWriter.java
@@ -153,7 +153,7 @@ public class VespaRecordWriter extends RecordWriter {
while (tokenizer.hasMoreTokens()) {
String endpoint = tokenizer.nextToken().trim();
sessionParams.addCluster(new Cluster.Builder().addEndpoint(
- Endpoint.create(endpoint, 4080, false)
+ Endpoint.create(endpoint, configuration.defaultPort(), configuration.useSSL())
).build());
}
diff --git a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/util/VespaConfiguration.java b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/util/VespaConfiguration.java
index 59da262428f..a05d2a35e4f 100644
--- a/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/util/VespaConfiguration.java
+++ b/vespa-hadoop/src/main/java/com/yahoo/vespa/hadoop/mapreduce/util/VespaConfiguration.java
@@ -11,6 +11,8 @@ import java.util.Properties;
public class VespaConfiguration {
public static final String ENDPOINT = "vespa.feed.endpoint";
+ public static final String DEFAULT_PORT = "vespa.feed.defaultport";
+ public static final String USE_SSL = "vespa.feed.ssl";
public static final String PROXY_HOST = "vespa.feed.proxy.host";
public static final String PROXY_PORT = "vespa.feed.proxy.port";
public static final String DRYRUN = "vespa.feed.dryrun";
@@ -46,6 +48,16 @@ public class VespaConfiguration {
}
+ public int defaultPort() {
+ return getInt(DEFAULT_PORT, 4080);
+ }
+
+
+ public boolean useSSL() {
+ return getBoolean(USE_SSL, false);
+ }
+
+
public String proxyHost() {
return getString(PROXY_HOST);
}
@@ -167,6 +179,8 @@ public class VespaConfiguration {
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append(ENDPOINT + ": " + endpoint() + "\n");
+ sb.append(DEFAULT_PORT + ": " + defaultPort() + "\n");
+ sb.append(USE_SSL + ": " + useSSL() + "\n");
sb.append(PROXY_HOST + ": " + proxyHost() + "\n");
sb.append(PROXY_PORT + ": " + proxyPort() + "\n");
sb.append(DRYRUN + ": " + dryrun() +"\n");
diff --git a/vespa-http-client/CMakeLists.txt b/vespa-http-client/CMakeLists.txt
new file mode 100644
index 00000000000..511b4a4c985
--- /dev/null
+++ b/vespa-http-client/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(vespa-http-client)
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/FeedClient.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/FeedClient.java
index ed62c507a4b..299541a5a2d 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/FeedClient.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/FeedClient.java
@@ -17,7 +17,6 @@ import java.util.concurrent.atomic.AtomicInteger;
*
* @author dybis
* @see FeedClientFactory
- * @since 5.29.0
*/
public interface FeedClient extends AutoCloseable {
@@ -25,6 +24,7 @@ public interface FeedClient extends AutoCloseable {
* Streams a document to cluster(s). If the pipeline and buffers are full, this call will be blocking.
* Documents might time out before they are sent. Failed documents are not retried.
* Don't call stream() after close is called.
+ *
* @param documentId Document id of the document.
* @param documentData The document data as JSON or XML (as specified when using the factory to create the API)
*/
@@ -34,6 +34,7 @@ public interface FeedClient extends AutoCloseable {
* Streams a document to cluster(s). If the pipeline and buffers are full, this call will be blocking.
* Documents might time out before they are sent. Failed documents are not retried.
* Don't call stream() after close is called.
+ *
* @param documentId Document id of the document.
* @param documentData The document data as JSON or XML (as specified when using the factory to create the API)
* @param context Any context, will be accessible in the result of the callback.
@@ -47,7 +48,7 @@ public interface FeedClient extends AutoCloseable {
* AtomicInteger for counters and follow general guides for thread-safe programming.
* There is an example implementation in class SimpleLoggerResultCallback.
*/
- static interface ResultCallback {
+ interface ResultCallback {
void onCompletion(String docId, Result documentResult);
}
@@ -60,17 +61,19 @@ public interface FeedClient extends AutoCloseable {
/**
* Returns stats about the cluster.
+ *
* @return JSON string with information about cluster.
*/
- public String getStatsAsJson();
+ String getStatsAsJson();
/**
* Utility function that takes an array of JSON documents and calls the FeedClient for each element.
+ *
* @param inputStream This can be a very large stream. The outer element is an array (of document operations).
* @param feedClient The feedClient that will receive the document operations.
* @param numSent increased per document sent to API (but no waiting for results).
*/
- public static void feedJson(InputStream inputStream, FeedClient feedClient, AtomicInteger numSent) {
+ static void feedJson(InputStream inputStream, FeedClient feedClient, AtomicInteger numSent) {
JsonReader.read(inputStream, feedClient, numSent);
}
@@ -78,15 +81,17 @@ public interface FeedClient extends AutoCloseable {
* Utility function that takes an array of XML documents and calls the FeedClient for each element.
* The XML document has to be formatted with line space on each line (like "regular" XML, but stricter
* than the specifications of XML).
+ *
* @param inputStream This can be a very large stream.
* @param feedClient The feedClient that will receive the document operations.
* @param numSent increased per document sent to API (but no waiting for results).
*/
- public static void feedXml(InputStream inputStream, FeedClient feedClient, AtomicInteger numSent) {
+ static void feedXml(InputStream inputStream, FeedClient feedClient, AtomicInteger numSent) {
try {
XmlFeedReader.read(inputStream, feedClient, numSent);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/FeedClientFactory.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/FeedClientFactory.java
index aba16e9c9a4..cfa9ce670f6 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/FeedClientFactory.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/FeedClientFactory.java
@@ -9,6 +9,7 @@ import static com.yahoo.vespa.http.client.SessionFactory.createTimeoutExecutor;
/**
* Factory for creating FeedClient.
+ *
* @author dybis
*/
public class FeedClientFactory {
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/Result.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/Result.java
index ed2ed08e4ac..138be61de80 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/Result.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/Result.java
@@ -16,8 +16,7 @@ import java.util.List;
* but may contain more than one Result.Detail instances, as these pertains to a
* single endpoint, and a Result may wrap data for multiple endpoints.
*
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
- * @since 5.1.20
+ * @author Einar M R Rosenvinge
*/
// This should be an interface, but in order to be binary compatible during refactoring we made it abstract.
public class Result {
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/Session.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/Session.java
index 0c85c025793..de39eabd3ee 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/Session.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/Session.java
@@ -13,11 +13,11 @@ import java.util.concurrent.BlockingQueue;
*
* A {@link SessionFactory} is provided to instantiate Sessions.
*
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
- * @since 5.1.20
+ * @author Einar M R Rosenvinge
* @see SessionFactory
*/
public interface Session extends AutoCloseable {
+
/**
* Returns an OutputStream that can be used to write ONE operation, identified by the
* given document ID. The data format must match the
@@ -33,7 +33,7 @@ public interface Session extends AutoCloseable {
* @param documentId the unique ID identifying this operation in the system
* @return an OutputStream to write the operation payload into
*/
- public OutputStream stream(CharSequence documentId);
+ OutputStream stream(CharSequence documentId);
/**
* Returns {@link Result}s for all operations enqueued by {@link #stream(CharSequence)}.
@@ -44,7 +44,7 @@ public interface Session extends AutoCloseable {
* @return a blocking queue for retrieving results
* @see Result
*/
- public BlockingQueue<Result> results();
+ BlockingQueue<Result> results();
/**
* Closes this Session. All resources are freed, persistent connections are closed and
@@ -52,11 +52,12 @@ public interface Session extends AutoCloseable {
*
* @throws RuntimeException in cases where underlying resources throw on shutdown/close
*/
- public void close();
+ void close();
/**
* Returns stats about the cluster.
* @return JSON string with information about cluster.
*/
- public String getStatsAsJson();
+ String getStatsAsJson();
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/SessionFactory.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/SessionFactory.java
index 25241d249ff..2e47e45dff0 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/SessionFactory.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/SessionFactory.java
@@ -13,8 +13,7 @@ import java.util.concurrent.ThreadFactory;
/**
* Factory for creating {@link Session} instances.
*
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
- * @since 5.1.20
+ * @author Einar M R Rosenvinge
*/
public final class SessionFactory {
@@ -77,4 +76,5 @@ public final class SessionFactory {
return t;
}
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/SimpleLoggerResultCallback.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/SimpleLoggerResultCallback.java
index 8729644e5ea..85e6baec9f7 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/SimpleLoggerResultCallback.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/SimpleLoggerResultCallback.java
@@ -13,6 +13,7 @@ import java.util.concurrent.atomic.AtomicInteger;
* "Result received: 34 (1 failed so far, 2003 sent, success rate 1999.23 docs/sec)."
* On each failure it will print the Result object content. If tracing is enabled, it will print trace messages to
* std err as well.
+ *
* @author dybis
*/
public class SimpleLoggerResultCallback implements FeedClient.ResultCallback {
@@ -99,4 +100,5 @@ public class SimpleLoggerResultCallback implements FeedClient.ResultCallback {
}
}
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/Cluster.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/Cluster.java
index fb928e2dacd..557aeedb4c1 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/Cluster.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/Cluster.java
@@ -11,11 +11,11 @@ import java.util.List;
/**
* A set of {@link Endpoint} instances. Construct using {@link Cluster.Builder}.
*
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
- * @since 5.1.20
+ * @author Einar M R Rosenvinge
*/
@Immutable
public final class Cluster {
+
/**
* Builder for {@link Cluster}.
*/
@@ -74,5 +74,4 @@ public final class Cluster {
return "Cluster " + endpoints + ", route " + route;
}
-
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/ConnectionParams.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/ConnectionParams.java
index 6d6c1ddaa49..3fe42b21e93 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/ConnectionParams.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/ConnectionParams.java
@@ -21,9 +21,8 @@ import java.util.concurrent.TimeUnit;
* the Session to the Vespa clusters.
* This class is immutable
* and has no public constructor - to instantiate one, use a {@link Builder}.
-
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
- * @since 5.1.20
+ *
+ * @author Einar M R Rosenvinge
*/
@Immutable
public final class ConnectionParams {
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/Endpoint.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/Endpoint.java
index b044c359712..de7bfb5bb19 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/Endpoint.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/Endpoint.java
@@ -9,8 +9,7 @@ import java.io.Serializable;
* Represents an endpoint, in most cases a JDisc container
* in a Vespa cluster configured with <code>document-api</code>.
*
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
- * @since 5.1.20
+ * @author Einar M R Rosenvinge
*/
@Immutable
public final class Endpoint implements Serializable {
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/FeedParams.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/FeedParams.java
index 46487f8431a..4f5b30444a9 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/FeedParams.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/FeedParams.java
@@ -11,8 +11,7 @@ import java.util.concurrent.TimeUnit;
* when creating {@link com.yahoo.vespa.http.client.Session}s. This class is immutable
* and has no public constructor - to instantiate one, use a {@link Builder}.
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
- * @since 5.1.20
+ * @author Einar M R Rosenvinge
*/
@Immutable
public final class FeedParams {
@@ -327,4 +326,5 @@ public final class FeedParams {
public long getLocalQueueTimeOut() {
return localQueueTimeOut;
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/SessionParams.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/SessionParams.java
index 4f1f76766d7..9a8640d09e2 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/SessionParams.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/config/SessionParams.java
@@ -14,13 +14,13 @@ import java.util.List;
* when creating {@link com.yahoo.vespa.http.client.Session}s. This class is immutable
* and has no public constructor - to instantiate one, use a {@link Builder}.
*
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
- * @since 5.1.20
+ * @author Einar M R Rosenvinge
* @see com.yahoo.vespa.http.client.SessionFactory
* @see Builder
*/
@Immutable
public final class SessionParams {
+
/**
* Interface for handling serious errors with connection.
*/
@@ -180,4 +180,5 @@ public final class SessionParams {
public ErrorReporter getErrorReport() {
return errorReport;
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/FeedClientImpl.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/FeedClientImpl.java
index 11cea32dd37..a16d992324d 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/FeedClientImpl.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/FeedClientImpl.java
@@ -17,6 +17,7 @@ import java.util.concurrent.TimeUnit;
/**
* Implementation of FeedClient. It is a thin layer on top of multiClusterHandler and multiClusterResultAggregator.
+ *
* @author dybis
*/
public class FeedClientImpl implements FeedClient {
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/MultiClusterSessionOutputStream.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/MultiClusterSessionOutputStream.java
index 129b67b13b8..bf55a46277d 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/MultiClusterSessionOutputStream.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/MultiClusterSessionOutputStream.java
@@ -9,9 +9,11 @@ import java.io.IOException;
/**
* Class for wiring up the Session API. It is the return value of stream() in the Session API.
+ *
* @author dybis
*/
class MultiClusterSessionOutputStream extends ByteArrayOutputStream {
+
private final CharSequence documentId;
private final OperationProcessor operationProcessor;
private final Object context;
@@ -31,4 +33,5 @@ class MultiClusterSessionOutputStream extends ByteArrayOutputStream {
operationProcessor.sendDocument(document);
super.close();
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ApacheGatewayConnection.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ApacheGatewayConnection.java
index 01c36c7df28..420f64d4bf3 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ApacheGatewayConnection.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ApacheGatewayConnection.java
@@ -43,7 +43,8 @@ import java.util.logging.Logger;
import java.util.zip.GZIPOutputStream;
/**
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
+ * @author Einar M R Rosenvinge
+ *
* @since 5.1.20
*/
@Beta
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ByteBufferInputStream.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ByteBufferInputStream.java
index 044055937d0..56a525502a5 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ByteBufferInputStream.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ByteBufferInputStream.java
@@ -10,7 +10,8 @@ import java.util.ArrayDeque;
import java.util.Deque;
/**
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
+ * @author Einar M R Rosenvinge
+ *
* @since 5.1.20
*/
@Beta
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java
index fa9c1a0a65d..fb57c63dbf2 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java
@@ -22,8 +22,7 @@ import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
/**
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
- * @since 5.1.20
+ * @author Einar M R Rosenvinge
*/
@Beta
public class ClusterConnection implements AutoCloseable {
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/DocumentQueue.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/DocumentQueue.java
index b9be94e03f1..671e6f07dbe 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/DocumentQueue.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/DocumentQueue.java
@@ -13,10 +13,13 @@ import java.util.Optional;
import java.util.concurrent.TimeUnit;
/**
- * Document queue that only gives you document operations on documents for which there are no already in flight operations for.
+ * Document queue that only gives you document operations on documents for which there are no
+ * already in flight operations for.
+ *
* @author dybis
*/
class DocumentQueue {
+
private final Deque<Document> queue;
private final int maxSize;
private boolean closed = false;
@@ -119,4 +122,5 @@ class DocumentQueue {
return Optional.empty();
}
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/DryRunGatewayConnection.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/DryRunGatewayConnection.java
index 058cc8411c0..23ab5e36e14 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/DryRunGatewayConnection.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/DryRunGatewayConnection.java
@@ -57,4 +57,5 @@ public class DryRunGatewayConnection implements GatewayConnection {
@Override
public void close() { }
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/EndpointIOException.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/EndpointIOException.java
index f75a5b122d7..ae15f6ec22b 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/EndpointIOException.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/EndpointIOException.java
@@ -7,9 +7,11 @@ import java.io.IOException;
/**
* Class for throwing exception from endpoint.
+ *
* @author dybis
*/
public class EndpointIOException extends IOException {
+
private final Endpoint endpoint;
private static final long serialVersionUID = 29335813211L;
@@ -21,4 +23,5 @@ public class EndpointIOException extends IOException {
public Endpoint getEndpoint() {
return endpoint;
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/EndpointResultQueue.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/EndpointResultQueue.java
index 405ecf8ade2..98dd067b7c5 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/EndpointResultQueue.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/EndpointResultQueue.java
@@ -18,11 +18,11 @@ import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
/**
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
- * @since 5.1.22
+ * @author Einar M R Rosenvinge
*/
@Beta
class EndpointResultQueue {
+
private static Logger log = Logger.getLogger(EndpointResultQueue.class.getName());
private final OperationProcessor operationProcessor;
private final Map<String, TimerFuture> futureByOperation = new HashMap<>();
@@ -58,8 +58,7 @@ class EndpointResultQueue {
resultReceived(result, clusterId, true);
}
- private synchronized void resultReceived(
- EndpointResult result, int clusterId, boolean duplicateGivesWarning) {
+ private synchronized void resultReceived(EndpointResult result, int clusterId, boolean duplicateGivesWarning) {
operationProcessor.resultReceived(result, clusterId);
TimerFuture timerFuture = futureByOperation.remove(result.getOperationId());
@@ -133,4 +132,5 @@ class EndpointResultQueue {
return future;
}
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/GatewayConnection.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/GatewayConnection.java
index 8fe17d30a5d..3e5bdfe3056 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/GatewayConnection.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/GatewayConnection.java
@@ -9,6 +9,7 @@ import java.io.InputStream;
import java.util.List;
public interface GatewayConnection {
+
InputStream writeOperations(List<Document> docs) throws ServerResponseException, IOException;
InputStream drain() throws ServerResponseException, IOException;
@@ -20,4 +21,5 @@ public interface GatewayConnection {
void handshake() throws ServerResponseException, IOException;
void close();
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/GatewayThrottler.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/GatewayThrottler.java
index fc495249c30..cc637904553 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/GatewayThrottler.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/GatewayThrottler.java
@@ -10,6 +10,7 @@ import java.util.Random;
* @author dybis
*/
public class GatewayThrottler {
+
private long backOffTimeMs = 0;
private final long maxSleepTimeMs;
private static Random random = new Random();
@@ -42,4 +43,5 @@ public class GatewayThrottler {
Double result = expected * factor;
return result.intValue();
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/IOThread.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/IOThread.java
index 2d4dc26395a..7874dcb24ab 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/IOThread.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/IOThread.java
@@ -23,11 +23,12 @@ import java.util.logging.Logger;
/**
* Class for handling asynchronous feeding of new documents and processing of results.
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
- * @since 5.1.20
+ *
+ * @author Einar M R Rosenvinge
*/
@Beta
class IOThread implements Runnable, AutoCloseable {
+
private static Logger log = Logger.getLogger(IOThread.class.getName());
private final Endpoint endpoint;
private final GatewayConnection client;
@@ -403,4 +404,5 @@ class IOThread implements Runnable, AutoCloseable {
resultQueue.failOperation(endpointResult, clusterId);
}
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/ConcurrentDocumentOperationBlocker.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/ConcurrentDocumentOperationBlocker.java
index 5709032f2d7..366675d083c 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/ConcurrentDocumentOperationBlocker.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/ConcurrentDocumentOperationBlocker.java
@@ -5,6 +5,7 @@ import java.util.concurrent.Semaphore;
/**
* A semaphore that can be re-sized.
+ *
* @author dybis
*/
final public class ConcurrentDocumentOperationBlocker {
@@ -64,4 +65,5 @@ final public class ConcurrentDocumentOperationBlocker {
super.reducePermits(reduction);
}
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/DocumentSendInfo.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/DocumentSendInfo.java
index 13399486714..54eac939b9f 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/DocumentSendInfo.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/DocumentSendInfo.java
@@ -12,6 +12,7 @@ import java.util.Map;
* This class is NOT thread-safe by design.
*/
class DocumentSendInfo {
+
private final Document document;
private final Map<Integer, Result.Detail> detailByClusterId = new HashMap<>();
// This is lazily populated as normal cases does not require retries.
@@ -68,4 +69,5 @@ class DocumentSendInfo {
Document getDocument() {
return document;
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/EndPointResultFactory.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/EndPointResultFactory.java
index eb6572d4dbb..129e3ca245b 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/EndPointResultFactory.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/EndPointResultFactory.java
@@ -18,11 +18,11 @@ import java.util.List;
import java.util.logging.Logger;
/**
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
- * @since 5.1.20
+ * @author Einar M R Rosenvinge
*/
@Beta
public final class EndPointResultFactory {
+
private static Logger log = Logger.getLogger(EndPointResultFactory.class.getName());
private static final String EMPTY_MESSAGE = "-";
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/IncompleteResultsThrottler.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/IncompleteResultsThrottler.java
index 8721bfb53fa..6ecbb56888e 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/IncompleteResultsThrottler.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/IncompleteResultsThrottler.java
@@ -30,6 +30,7 @@ import java.util.Random;
* @author dybis
*/
public class IncompleteResultsThrottler {
+
private final ConcurrentDocumentOperationBlocker blocker = new ConcurrentDocumentOperationBlocker();
private final int maxInFlightValue;
private final int minInFlightValue;
@@ -180,4 +181,5 @@ public class IncompleteResultsThrottler {
tryBoostingSizeIfMinValueOverSeveralCycles(size), maxInFlightValue));
blocker.setMaxConcurrency(maxInFlightNow);
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationProcessor.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationProcessor.java
index 605830a3205..7cea767f9ba 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationProcessor.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationProcessor.java
@@ -29,11 +29,12 @@ import java.util.logging.Logger;
/**
* Merges several endpointResult into one Result and does the callback.
+ *
* @author dybis
- * @since 5.1.20
*/
@Beta
public class OperationProcessor {
+
private static final Logger log = Logger.getLogger(OperationProcessor.class.getName());
private final Map<String, DocumentSendInfo> docSendInfoByOperationId = new HashMap<>();
private final ArrayListMultimap<String, Document> blockedDocumentsByDocumentId = ArrayListMultimap.create();
@@ -191,13 +192,10 @@ public class OperationProcessor {
final Result result = process(endpointResult, clusterId);
if (result != null) {
- try {
- resultCallback.onCompletion(result.getDocumentId(), result);
- if (traceToStderr && result.hasLocalTrace()) {
- System.err.println(result.toString());
- }
- } finally {
- incompleteResultsThrottler.resultReady(result.isSuccess());
+ incompleteResultsThrottler.resultReady(result.isSuccess());
+ resultCallback.onCompletion(result.getDocumentId(), result);
+ if (traceToStderr && result.hasLocalTrace()) {
+ System.err.println(result.toString());
}
}
}
@@ -244,12 +242,10 @@ public class OperationProcessor {
try {
clusterConnection.post(document);
} catch (EndpointIOException eio) {
- resultReceived(
- EndPointResultFactory.createError(
- eio.getEndpoint(),
- document.getOperationId(),
- eio),
- clusterConnection.getClusterId());
+ resultReceived(EndPointResultFactory.createError(eio.getEndpoint(),
+ document.getOperationId(),
+ eio),
+ clusterConnection.getClusterId());
}
}
@@ -298,4 +294,5 @@ public class OperationProcessor {
throw new RuntimeException("Did not manage to shut down retry threads. Please report problem.");
}
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationStats.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationStats.java
index b407cc67e9e..a28c2f9805f 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationStats.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationStats.java
@@ -14,6 +14,7 @@ import java.io.Writer;
import java.util.List;
public class OperationStats {
+
private static JsonFactory jsonFactory = new JsonFactory();
private final String sessionParamsAsXmlString;
private List<ClusterConnection> clusters;
@@ -65,4 +66,5 @@ public class OperationStats {
return "{ \"Error\" : \""+ e.getMessage() + "\"}";
}
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/runner/CommandLineArguments.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/runner/CommandLineArguments.java
index 598c64a20a6..23fb788f116 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/runner/CommandLineArguments.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/runner/CommandLineArguments.java
@@ -19,6 +19,7 @@ import java.util.concurrent.TimeUnit;
/**
* Commandline interface for the binary.
+ *
* @author dybis
*/
@Beta
@@ -28,6 +29,7 @@ public class CommandLineArguments {
/**
* Creates a CommandLineArguments instance and populates it with data.
+ *
* @param args array of arguments.
* @return null on failure or if help option is set to true.
*/
@@ -223,4 +225,5 @@ public class CommandLineArguments {
}
return builder.build();
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/runner/FormatInputStream.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/runner/FormatInputStream.java
index 302a4ce03f5..36cdf18e102 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/runner/FormatInputStream.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/runner/FormatInputStream.java
@@ -21,6 +21,7 @@ import java.util.Optional;
* @author valerijf
*/
public class FormatInputStream {
+
private InputStream inputStream;
private Format format;
@@ -100,4 +101,5 @@ public class FormatInputStream {
public enum Format {
JSON, XML
}
+
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/runner/Runner.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/runner/Runner.java
index 164c2524483..0983d893ecc 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/runner/Runner.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/runner/Runner.java
@@ -14,11 +14,11 @@ import java.util.Optional;
import java.util.concurrent.atomic.AtomicInteger;
/**
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
+ * @author Einar M R Rosenvinge
* @author dybis
- * @since 5.1.20
*/
public class Runner {
+
/**
* Feed data from inputFile to session.
* @param feedClient where to send data to
diff --git a/vespa_jersey2/CMakeLists.txt b/vespa_jersey2/CMakeLists.txt
new file mode 100644
index 00000000000..f4f6c44202c
--- /dev/null
+++ b/vespa_jersey2/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_java_artifact_dependencies(vespa_jersey2)
diff --git a/vespabase/CMakeLists.txt b/vespabase/CMakeLists.txt
index e658a959d84..ea007b130f9 100644
--- a/vespabase/CMakeLists.txt
+++ b/vespabase/CMakeLists.txt
@@ -31,3 +31,7 @@ configure_file(src/vespa-configserver.service.in src/vespa-configserver.service
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/src/vespa.service ${CMAKE_CURRENT_BINARY_DIR}/src/vespa-configserver.service DESTINATION etc/systemd/system)
install(FILES src/Defaults.pm DESTINATION lib/perl5/site_perl/Yahoo/Vespa)
+
+configure_file(conf/default-env.txt.in conf/default-env.txt @ONLY)
+install(FILES ${CMAKE_CURRENT_BINARY_DIR}/conf/default-env.txt DESTINATION conf/vespa)
+
diff --git a/vespabase/conf/.gitignore b/vespabase/conf/.gitignore
new file mode 100644
index 00000000000..1019a9f6ec5
--- /dev/null
+++ b/vespabase/conf/.gitignore
@@ -0,0 +1 @@
+default-env.txt
diff --git a/vespabase/conf/default-env.txt.in b/vespabase/conf/default-env.txt.in
new file mode 100644
index 00000000000..4855ac9f571
--- /dev/null
+++ b/vespabase/conf/default-env.txt.in
@@ -0,0 +1,2 @@
+fallback VESPA_HOME @CMAKE_INSTALL_PREFIX@
+override VESPA_USER vespa
diff --git a/vespabase/src/rhel-prestart.sh b/vespabase/src/rhel-prestart.sh
index c00557243a7..37fc5e98533 100755
--- a/vespabase/src/rhel-prestart.sh
+++ b/vespabase/src/rhel-prestart.sh
@@ -97,6 +97,7 @@ fixdir ${VESPA_USER} wheel 755 var/db/vespa/config_server/serverdb/application
fixdir ${VESPA_USER} wheel 755 var/db/vespa/index
fixdir ${VESPA_USER} wheel 755 var/db/vespa/logcontrol
fixdir ${VESPA_USER} wheel 755 var/db/vespa/search
+fixdir ${VESPA_USER} wheel 755 var/jdisc_core
fixdir ${VESPA_USER} wheel 755 var/vespa/bundlecache
fixdir ${VESPA_USER} wheel 755 var/vespa/bundlecache/configserver
fixdir ${VESPA_USER} wheel 755 var/vespa/cache/config/
diff --git a/vespaclient-container-plugin/CMakeLists.txt b/vespaclient-container-plugin/CMakeLists.txt
new file mode 100644
index 00000000000..4c8a8647b23
--- /dev/null
+++ b/vespaclient-container-plugin/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(vespaclient-container-plugin)
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandler.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandler.java
index 3e16bced996..e7ed9ce10db 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandler.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandler.java
@@ -8,11 +8,11 @@ import java.util.Optional;
/**
* Abstract the backend stuff for the REST API, such as retrieving or updating documents.
*
- * @author dybis
+ * @author Haakon Dybdahl
*/
public interface OperationHandler {
- class VisitResult{
+ class VisitResult {
public final Optional<String> token;
public final String documentsAsJsonList;
@@ -23,7 +23,19 @@ public interface OperationHandler {
}
}
- VisitResult visit(RestUri restUri, String documentSelection, Optional<String> cluster, Optional<String> continuation) throws RestApiException;
+ class VisitOptions {
+ public final Optional<String> cluster;
+ public final Optional<String> continuation;
+ public final Optional<Integer> wantedDocumentCount;
+
+ public VisitOptions(Optional<String> cluster, Optional<String> continuation, Optional<Integer> wantedDocumentCount) {
+ this.cluster = cluster;
+ this.continuation = continuation;
+ this.wantedDocumentCount = wantedDocumentCount;
+ }
+ }
+
+ VisitResult visit(RestUri restUri, String documentSelection, VisitOptions options) throws RestApiException;
void put(RestUri restUri, VespaXMLFeedReader.Operation data, Optional<String> route) throws RestApiException;
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandlerImpl.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandlerImpl.java
index 482a39c60e5..46678ea67e3 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandlerImpl.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/OperationHandlerImpl.java
@@ -49,6 +49,7 @@ public class OperationHandlerImpl implements OperationHandler {
}
public static final int VISIT_TIMEOUT_MS = 120000;
+ public static final int WANTED_DOCUMENT_COUNT_UPPER_BOUND = 1000; // Approximates the max default size of a bucket
private final DocumentAccess documentAccess;
private final DocumentApiMetrics metricsHelper;
private final ClusterEnumerator clusterEnumerator;
@@ -109,13 +110,8 @@ public class OperationHandlerImpl implements OperationHandler {
}
@Override
- public VisitResult visit(
- RestUri restUri,
- String documentSelection,
- Optional<String> cluster,
- Optional<String> continuation) throws RestApiException {
-
- VisitorParameters visitorParameters = createVisitorParameters(restUri, documentSelection, cluster, continuation);
+ public VisitResult visit(RestUri restUri, String documentSelection, VisitOptions options) throws RestApiException {
+ VisitorParameters visitorParameters = createVisitorParameters(restUri, documentSelection, options);
VisitorControlHandler visitorControlHandler = new VisitorControlHandler();
visitorParameters.setControlHandler(visitorControlHandler);
@@ -326,13 +322,13 @@ public class OperationHandlerImpl implements OperationHandler {
private VisitorParameters createVisitorParameters(
RestUri restUri,
String documentSelection,
- Optional<String> clusterName,
- Optional<String> continuation)
+ VisitOptions options)
throws RestApiException {
StringBuilder selection = new StringBuilder();
if (! documentSelection.isEmpty()) {
+ // TODO shouldn't selection be wrapped in () itself ?
selection.append("(").append(documentSelection).append(" and ");
}
selection.append(restUri.getDocumentType()).append(" and (id.namespace=='").append(restUri.getNamespace()).append("')");
@@ -346,24 +342,26 @@ public class OperationHandlerImpl implements OperationHandler {
params.setMaxBucketsPerVisitor(1);
params.setMaxPending(32);
params.setMaxFirstPassHits(1);
- params.setMaxTotalHits(1);
+ params.setMaxTotalHits(options.wantedDocumentCount
+ .map(n -> Math.min(Math.max(n, 1), WANTED_DOCUMENT_COUNT_UPPER_BOUND))
+ .orElse(1));
params.setThrottlePolicy(new StaticThrottlePolicy().setMaxPendingCount(1));
params.setToTimestamp(0L);
params.setFromTimestamp(0L);
params.setSessionTimeoutMs(VISIT_TIMEOUT_MS);
- params.visitInconsistentBuckets(true);
+ params.visitInconsistentBuckets(true); // TODO document this as part of consistency doc
params.setVisitorOrdering(VisitorOrdering.ASCENDING);
- params.setRoute(resolveClusterRoute(clusterName));
+ params.setRoute(resolveClusterRoute(options.cluster));
params.setTraceLevel(0);
params.setPriority(DocumentProtocol.Priority.NORMAL_4);
params.setVisitRemoves(false);
- if (continuation.isPresent()) {
+ if (options.continuation.isPresent()) {
try {
- params.setResumeToken(ContinuationHit.getToken(continuation.get()));
+ params.setResumeToken(ContinuationHit.getToken(options.continuation.get()));
} catch (Exception e) {
throw new RestApiException(Response.createErrorResponse(500, ExceptionUtils.getStackTrace(e), restUri, RestUri.apiErrorCodes.UNSPECIFIED));
}
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java
index 1f1eca9674b..5e0fea8ab7d 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java
@@ -38,7 +38,7 @@ import java.util.concurrent.atomic.AtomicInteger;
/**
* API for handling single operation on a document and visiting.
*
- * @author dybis
+ * @author Haakon Dybdahl
*/
public class RestApi extends LoggingRequestHandler {
@@ -52,6 +52,7 @@ public class RestApi extends LoggingRequestHandler {
private static final String SELECTION = "selection";
private static final String CLUSTER = "cluster";
private static final String CONTINUATION = "continuation";
+ private static final String WANTED_DOCUMENT_COUNT = "wantedDocumentCount";
private static final String APPLICATION_JSON = "application/json";
private final OperationHandler operationHandler;
private SingleDocumentParser singleDocumentParser;
@@ -96,19 +97,33 @@ public class RestApi extends LoggingRequestHandler {
this.singleDocumentParser = new SingleDocumentParser(docTypeManager);
}
- // Returns null if invalid value.
- private Optional<Boolean> parseBoolean(String parameter, HttpRequest request) {
+ private static Optional<String> requestProperty(String parameter, HttpRequest request) {
final String property = request.getProperty(parameter);
if (property != null && ! property.isEmpty()) {
- switch (property) {
- case "true" : return Optional.of(true);
- case "false": return Optional.of(false);
- default : return null;
- }
+ return Optional.of(property);
}
return Optional.empty();
}
+ private static boolean parseBooleanStrict(String value) {
+ if ("true".equalsIgnoreCase(value)) {
+ return true;
+ } else if ("false".equalsIgnoreCase(value)) {
+ return false;
+ }
+ throw new IllegalArgumentException(String.format("Value not convertible to bool: '%s'", value));
+ }
+
+ private static Optional<Boolean> parseBoolean(String parameter, HttpRequest request) {
+ Optional<String> property = requestProperty(parameter, request);
+ return property.map(RestApi::parseBooleanStrict);
+ }
+
+ private static Optional<Integer> parseInteger(String parameter, HttpRequest request) throws NumberFormatException {
+ Optional<String> property = requestProperty(parameter, request);
+ return property.map(Integer::parseInt);
+ }
+
@Override
public HttpResponse handle(HttpRequest request) {
try {
@@ -134,8 +149,10 @@ public class RestApi extends LoggingRequestHandler {
return Response.createErrorResponse(500, "Exception while parsing URI: " + e2.getMessage(), RestUri.apiErrorCodes.URL_PARSING);
}
- Optional<Boolean> create = parseBoolean(CREATE_PARAMETER_NAME, request);
- if (create == null) {
+ final Optional<Boolean> create;
+ try {
+ create = parseBoolean(CREATE_PARAMETER_NAME, request);
+ } catch (IllegalArgumentException e) {
return Response.createErrorResponse(403, "Non valid value for 'create' parameter, must be empty, true, or " +
"false: " + request.getProperty(CREATE_PARAMETER_NAME), RestUri.apiErrorCodes.INVALID_CREATE_VALUE);
}
@@ -184,9 +201,7 @@ public class RestApi extends LoggingRequestHandler {
if (condition != null && ! condition.isEmpty()) {
operationUpdate.getDocumentUpdate().setCondition(new TestAndSetCondition(condition));
}
- if (create.isPresent()) {
- operationUpdate.getDocumentUpdate().setCreateIfNonExistent(create.get());
- }
+ create.ifPresent(c -> operationUpdate.getDocumentUpdate().setCreateIfNonExistent(c));
return operationUpdate;
}
@@ -214,11 +229,16 @@ public class RestApi extends LoggingRequestHandler {
}
};
}
+
+ private static HttpResponse createInvalidParameterResponse(String parameter, String explanation) {
+ return Response.createErrorResponse(403, String.format("Invalid '%s' value. %s", parameter, explanation), RestUri.apiErrorCodes.UNSPECIFIED);
+ }
private HttpResponse handleVisit(RestUri restUri, HttpRequest request) throws RestApiException {
String documentSelection = Optional.ofNullable(request.getProperty(SELECTION)).orElse("");
if (restUri.getGroup().isPresent() && ! restUri.getGroup().get().value.isEmpty()) {
if (! documentSelection.isEmpty()) {
+ // TODO why is this restriction in place? Document selection allows composition of location predicate and other expressions
return Response.createErrorResponse(
400,
"Visiting does not support setting value for group/value in combination with expression, try using only expression parameter instead.",
@@ -234,11 +254,17 @@ public class RestApi extends LoggingRequestHandler {
}
Optional<String> cluster = Optional.ofNullable(request.getProperty(CLUSTER));
Optional<String> continuation = Optional.ofNullable(request.getProperty(CONTINUATION));
- final OperationHandler.VisitResult visit = operationHandler.visit(restUri, documentSelection, cluster, continuation);
- final ObjectNode resultNode = mapper.createObjectNode();
- if (visit.token.isPresent()) {
- resultNode.put(CONTINUATION, visit.token.get());
+ Optional<Integer> wantedDocumentCount;
+ try {
+ wantedDocumentCount = parseInteger(WANTED_DOCUMENT_COUNT, request);
+ } catch (IllegalArgumentException e) {
+ return createInvalidParameterResponse(WANTED_DOCUMENT_COUNT, "Expected integer");
}
+
+ final OperationHandler.VisitOptions options = new OperationHandler.VisitOptions(cluster, continuation, wantedDocumentCount);
+ final OperationHandler.VisitResult visit = operationHandler.visit(restUri, documentSelection, options);
+ final ObjectNode resultNode = mapper.createObjectNode();
+ visit.token.ifPresent(t -> resultNode.put(CONTINUATION, t));
resultNode.putArray(DOCUMENTS).addPOJO(visit.documentsAsJsonList);
resultNode.put(PATH_NAME, restUri.getRawPath());
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/OperationHandlerImplTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/OperationHandlerImplTest.java
index 796c6d23deb..5735e84f3fe 100644
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/OperationHandlerImplTest.java
+++ b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/OperationHandlerImplTest.java
@@ -91,7 +91,7 @@ public class OperationHandlerImplTest {
return new String( stream.toByteArray());
}
- private class OperationHandlerImplFixture {
+ private static class OperationHandlerImplFixture {
DocumentAccess documentAccess = mock(DocumentAccess.class);
AtomicReference<VisitorParameters> assignedParameters = new AtomicReference<>();
VisitorControlHandler.CompletionCode completionCode = VisitorControlHandler.CompletionCode.SUCCESS;
@@ -123,6 +123,14 @@ public class OperationHandlerImplTest {
return new RestUri(new URI("http://localhost/document/v1/namespace/document-type/docid/"));
}
+ private static OperationHandler.VisitOptions visitOptionsWithWantedDocumentCount(int wantedDocumentCount) {
+ return new OperationHandler.VisitOptions(Optional.empty(), Optional.empty(), Optional.of(wantedDocumentCount));
+ }
+
+ private static OperationHandler.VisitOptions emptyVisitOptions() {
+ return new OperationHandler.VisitOptions(Optional.empty(), Optional.empty(), Optional.empty());
+ }
+
@Test
public void timeout_without_buckets_visited_throws_timeout_error() throws Exception {
OperationHandlerImplFixture fixture = new OperationHandlerImplFixture();
@@ -131,7 +139,7 @@ public class OperationHandlerImplTest {
// RestApiException hides its guts internally, so cannot trivially use @Rule directly to check for error category
try {
OperationHandlerImpl handler = fixture.createHandler();
- handler.visit(dummyVisitUri(), "", Optional.empty(), Optional.empty());
+ handler.visit(dummyVisitUri(), "", emptyVisitOptions());
} catch (RestApiException e) {
assertThat(e.getResponse().getStatus(), is(500));
assertThat(renderRestApiExceptionAsString(e), containsString("Timed out"));
@@ -145,7 +153,7 @@ public class OperationHandlerImplTest {
fixture.bucketsVisited = 1;
OperationHandlerImpl handler = fixture.createHandler();
- handler.visit(dummyVisitUri(), "", Optional.empty(), Optional.empty());
+ handler.visit(dummyVisitUri(), "", emptyVisitOptions());
}
@Test
@@ -153,8 +161,50 @@ public class OperationHandlerImplTest {
OperationHandlerImplFixture fixture = new OperationHandlerImplFixture();
OperationHandlerImpl handler = fixture.createHandler();
- handler.visit(dummyVisitUri(), "", Optional.empty(), Optional.empty());
+ handler.visit(dummyVisitUri(), "", emptyVisitOptions());
assertThat(fixture.assignedParameters.get().getSessionTimeoutMs(), is((long)OperationHandlerImpl.VISIT_TIMEOUT_MS));
}
+
+ private static VisitorParameters generatedParametersFromVisitOptions(OperationHandler.VisitOptions options) throws Exception {
+ OperationHandlerImplFixture fixture = new OperationHandlerImplFixture();
+ OperationHandlerImpl handler = fixture.createHandler();
+
+ handler.visit(dummyVisitUri(), "", options);
+ return fixture.assignedParameters.get();
+ }
+
+ @Test
+ public void provided_wanted_document_count_is_propagated_to_visitor_parameters() throws Exception {
+ VisitorParameters params = generatedParametersFromVisitOptions(visitOptionsWithWantedDocumentCount(123));
+ assertThat(params.getMaxTotalHits(), is((long)123));
+ }
+
+ @Test
+ public void wanted_document_count_is_1_unless_specified() throws Exception {
+ VisitorParameters params = generatedParametersFromVisitOptions(emptyVisitOptions());
+ assertThat(params.getMaxTotalHits(), is((long)1));
+ }
+
+ @Test
+ public void too_low_wanted_document_count_is_bounded_to_1() throws Exception {
+ VisitorParameters params = generatedParametersFromVisitOptions(visitOptionsWithWantedDocumentCount(-1));
+ assertThat(params.getMaxTotalHits(), is((long)1));
+
+ params = generatedParametersFromVisitOptions(visitOptionsWithWantedDocumentCount(Integer.MIN_VALUE));
+ assertThat(params.getMaxTotalHits(), is((long)1));
+
+ params = generatedParametersFromVisitOptions(visitOptionsWithWantedDocumentCount(0));
+ assertThat(params.getMaxTotalHits(), is((long)1));
+ }
+
+ @Test
+ public void too_high_wanted_document_count_is_bounded_to_upper_bound() throws Exception {
+ VisitorParameters params = generatedParametersFromVisitOptions(visitOptionsWithWantedDocumentCount(OperationHandlerImpl.WANTED_DOCUMENT_COUNT_UPPER_BOUND + 1));
+ assertThat(params.getMaxTotalHits(), is((long)OperationHandlerImpl.WANTED_DOCUMENT_COUNT_UPPER_BOUND));
+
+ params = generatedParametersFromVisitOptions(visitOptionsWithWantedDocumentCount(Integer.MAX_VALUE));
+ assertThat(params.getMaxTotalHits(), is((long)OperationHandlerImpl.WANTED_DOCUMENT_COUNT_UPPER_BOUND));
+ }
+
}
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/MockedOperationHandler.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/MockedOperationHandler.java
index 97f45c4062a..f353013232f 100644
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/MockedOperationHandler.java
+++ b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/MockedOperationHandler.java
@@ -18,9 +18,11 @@ public class MockedOperationHandler implements OperationHandler {
int deleteCount = 0;
@Override
- public VisitResult visit(RestUri restUri, String documentSelection, Optional<String> cluster, Optional<String> continuation) throws RestApiException {
- return new VisitResult(Optional.of("token"), "List of json docs, cont token " + continuation.map(a->a).orElse("not set") + ", doc selection: '"
- + documentSelection + "'");
+ public VisitResult visit(RestUri restUri, String documentSelection, VisitOptions options) throws RestApiException {
+ return new VisitResult(Optional.of("token"), "List of json docs, cont token "
+ + options.continuation.orElse("not set") + ", doc selection: '"
+ + documentSelection + "'"
+ + options.wantedDocumentCount.map(n -> String.format(", min docs returned: %d", n)).orElse(""));
}
@Override
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java
index 10ae80a5d03..91390e3a0d8 100644
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java
+++ b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java
@@ -270,6 +270,7 @@ public class RestApiTest {
assertThat(rest, containsString(visit_response_part3));
}
+ // TODO why is this a limitation?
String visit_test_bad_uri = "/document/v1/namespace/document-type/group/abc?continuation=abc&selection=foo";
String visit_test_bad_response = "Visiting does not support setting value for group/value in combination with expression";
@@ -294,6 +295,22 @@ public class RestApiTest {
assertThat(rest, containsString(visit_test_response_selection_rewrite));
}
+ @Test
+ public void wanted_document_count_returned_parameter_is_propagated() throws IOException {
+ Request request = new Request(String.format("http://localhost:%s/document/v1/namespace/document-type/docid/?wantedDocumentCount=321", getFirstListenPort()));
+ HttpGet get = new HttpGet(request.getUri());
+ String rest = doRest(get);
+ assertThat(rest, containsString("min docs returned: 321"));
+ }
+
+ @Test
+ public void wanted_document_count_parameter_returns_error_response() throws IOException {
+ Request request = new Request(String.format("http://localhost:%s/document/v1/namespace/document-type/docid/?wantedDocumentCount=aardvark", getFirstListenPort()));
+ HttpGet get = new HttpGet(request.getUri());
+ String rest = doRest(get);
+ assertThat(rest, containsString("Invalid 'wantedDocumentCount' value. Expected integer"));
+ }
+
private String doRest(HttpRequestBase request) throws IOException {
HttpClient client = HttpClientBuilder.create().build();
HttpResponse response = client.execute(request);
diff --git a/vespaclient-core/CMakeLists.txt b/vespaclient-core/CMakeLists.txt
new file mode 100644
index 00000000000..facea0b447d
--- /dev/null
+++ b/vespaclient-core/CMakeLists.txt
@@ -0,0 +1,3 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_config_definition(src/main/resources/configdefinitions/feeder.def vespaclient.config.feeder.def)
+install_config_definition(src/main/resources/configdefinitions/spooler.def vespa.config.content.spooler.spooler.def)
diff --git a/vespaclient-java/CMakeLists.txt b/vespaclient-java/CMakeLists.txt
new file mode 100644
index 00000000000..b9240adee8a
--- /dev/null
+++ b/vespaclient-java/CMakeLists.txt
@@ -0,0 +1,12 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(vespaclient-java)
+
+vespa_install_script(src/main/sh/vespa-document-statistics.sh vespa-document-statistics bin)
+vespa_install_script(src/main/sh/vespa-stat.sh vespa-stat bin)
+vespa_install_script(src/main/sh/vespa-query-profile-dump-tool.sh vespa-query-profile-dump-tool bin)
+vespa_install_script(src/main/sh/vespa-summary-benchmark.sh vespa-summary-benchmark bin)
+vespa_install_script(src/main/sh/vespa-destination.sh vespa-destination bin)
+vespa_install_script(src/main/sh/vespa-feeder.sh vespa-feeder bin)
+vespa_install_script(src/main/sh/vespa-get.sh vespa-get bin)
+vespa_install_script(src/main/sh/vespa-visit.sh vespa-visit bin)
+vespa_install_script(src/main/sh/vespa-visit-target.sh vespa-visit-target bin)
diff --git a/vespaclient-java/src/main/java/com/yahoo/vespavisit/VdsVisit.java b/vespaclient-java/src/main/java/com/yahoo/vespavisit/VdsVisit.java
index 872ca3e347a..79b48731cc7 100644
--- a/vespaclient-java/src/main/java/com/yahoo/vespavisit/VdsVisit.java
+++ b/vespaclient-java/src/main/java/com/yahoo/vespavisit/VdsVisit.java
@@ -261,7 +261,7 @@ public class VdsVisit {
.longOpt("cluster")
.hasArg(true)
.argName("cluster")
- .desc("Visit the given VDS cluster.")
+ .desc("Visit the given cluster.")
.build());
options.addOption("v", "verbose", false, "Indent XML, show progress and info on STDERR.");
diff --git a/vespajlib/CMakeLists.txt b/vespajlib/CMakeLists.txt
new file mode 100644
index 00000000000..7235535ace8
--- /dev/null
+++ b/vespajlib/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_java_artifact(vespajlib)
diff --git a/vespajlib/pom.xml b/vespajlib/pom.xml
index 27e8d4020ae..1f98a5e4c02 100644
--- a/vespajlib/pom.xml
+++ b/vespajlib/pom.xml
@@ -11,7 +11,7 @@
<packaging>container-plugin</packaging>
<version>6-SNAPSHOT</version>
<description>
- Library for use in Java components of Vespa. Shared code which did
+ Library for use in Java components of Vespa. Shared code which do
not fit anywhere else.
</description>
diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/classlock/ClassLock.java b/vespajlib/src/main/java/com/yahoo/concurrent/classlock/ClassLock.java
index 2a3c70d31d2..b0d26bfeb1c 100644
--- a/vespajlib/src/main/java/com/yahoo/concurrent/classlock/ClassLock.java
+++ b/vespajlib/src/main/java/com/yahoo/concurrent/classlock/ClassLock.java
@@ -1,6 +1,8 @@
package com.yahoo.concurrent.classlock;
/**
+ * An acquired lock which is released on close
+ *
* @author valerijf
*/
public class ClassLock implements AutoCloseable {
@@ -12,6 +14,11 @@ public class ClassLock implements AutoCloseable {
this.clazz = clazz;
}
+ /**
+ * Releases this lock
+ *
+ * @throws IllegalArgumentException if this lock has already been released
+ */
@Override
public void close() {
classLocking.unlock(clazz, this);
diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/classlock/ClassLocking.java b/vespajlib/src/main/java/com/yahoo/concurrent/classlock/ClassLocking.java
index 8014148cc30..5330e869396 100644
--- a/vespajlib/src/main/java/com/yahoo/concurrent/classlock/ClassLocking.java
+++ b/vespajlib/src/main/java/com/yahoo/concurrent/classlock/ClassLocking.java
@@ -2,36 +2,72 @@ package com.yahoo.concurrent.classlock;
import java.util.HashMap;
import java.util.Map;
+import java.util.function.BooleanSupplier;
/**
+ * This class is injectable to Vespa plugins and is used to acquire locks cross
+ * application deployments.
+ *
* @author valerijf
*/
public class ClassLocking {
- private final Map<Class<?>, ClassLock> classLocks = new HashMap<>();
+ private final Map<String, ClassLock> classLocks = new HashMap<>();
private final Object monitor = new Object();
+ /**
+ * Locks key. This will block until the key is acquired.
+ * Users of this <b>must</b> close any lock acquired.
+ */
public ClassLock lock(Class<?> clazz) {
+ return lockWhile(clazz, () -> true);
+ }
+
+ /**
+ * Locks key. This will block until the key is acquired or the interrupt condition is
+ * no longer true. Condition is only checked at the start, everytime a lock is released
+ * and when {@link #interrupt()} is called.
+ *
+ * Users of this <b>must</b> close any lock acquired.
+ *
+ * @throws LockInterruptException if interruptCondition returned false before
+ * the lock could be acquired
+ */
+ public ClassLock lockWhile(Class<?> clazz, BooleanSupplier interruptCondition) {
synchronized (monitor) {
- while(classLocks.containsKey(clazz)) {
+ while (classLocks.containsKey(clazz.getName())) {
try {
monitor.wait();
- } catch (InterruptedException ignored) { }
+ } catch (InterruptedException ignored) {
+ }
+
+ if (!interruptCondition.getAsBoolean()) {
+ throw new LockInterruptException();
+ }
}
ClassLock classLock = new ClassLock(this, clazz);
- classLocks.put(clazz, classLock);
+ classLocks.put(clazz.getName(), classLock);
return classLock;
}
}
void unlock(Class<?> clazz, ClassLock classLock) {
synchronized (monitor) {
- if (classLock.equals(classLocks.get(clazz))) {
- classLocks.remove(clazz);
+ if (classLock.equals(classLocks.get(clazz.getName()))) {
+ classLocks.remove(clazz.getName());
monitor.notifyAll();
} else {
throw new IllegalArgumentException("Lock has already been released");
}
}
}
+
+ /**
+ * Notifies {@link #lockWhile} to check the interrupt condition
+ */
+ public void interrupt() {
+ synchronized (monitor) {
+ monitor.notifyAll();
+ }
+ }
}
diff --git a/vespajlib/src/main/java/com/yahoo/concurrent/classlock/LockInterruptException.java b/vespajlib/src/main/java/com/yahoo/concurrent/classlock/LockInterruptException.java
new file mode 100644
index 00000000000..b2ae4166564
--- /dev/null
+++ b/vespajlib/src/main/java/com/yahoo/concurrent/classlock/LockInterruptException.java
@@ -0,0 +1,8 @@
+package com.yahoo.concurrent.classlock;
+
+/**
+ * @author valerijf
+ */
+@SuppressWarnings("serial")
+public class LockInterruptException extends RuntimeException {
+}
diff --git a/vespajlib/src/main/java/com/yahoo/io/IOUtils.java b/vespajlib/src/main/java/com/yahoo/io/IOUtils.java
index a5e16fc89ea..2572842b213 100644
--- a/vespajlib/src/main/java/com/yahoo/io/IOUtils.java
+++ b/vespajlib/src/main/java/com/yahoo/io/IOUtils.java
@@ -14,9 +14,10 @@ import java.nio.ByteBuffer;
* <p>Some static io convenience methods.</p>
*
* @author bratseth
- * @author <a href="mailto:borud@yahoo-inc.com">Bjorn Borud</a>
+ * @author Bjorn Borud
*/
public abstract class IOUtils {
+
static private final Charset utf8Charset = Charset.forName("utf-8");
/** Closes a writer, or does nothing if the writer is null */
@@ -410,6 +411,11 @@ public abstract class IOUtils {
return ret.toString();
}
+ /** Read an input stream completely into a string */
+ public static String readAll(InputStream stream, Charset charset) throws IOException {
+ return readAll(new InputStreamReader(stream, charset));
+ }
+
/** Convenience method for closing a list of readers. Does nothing if the given reader list is null. */
public static void closeAll(List<Reader> readers) {
if (readers==null) return;
diff --git a/vespalib/src/tests/data/input_reader/input_reader_test.cpp b/vespalib/src/tests/data/input_reader/input_reader_test.cpp
index e8098b7e3ea..535c188d01e 100644
--- a/vespalib/src/tests/data/input_reader/input_reader_test.cpp
+++ b/vespalib/src/tests/data/input_reader/input_reader_test.cpp
@@ -112,4 +112,47 @@ TEST("expect that obtain does not set failure state on input reader") {
}
}
+TEST("require that bytes can be unread when appropriate") {
+ const char *data = "12345";
+ MemoryInput memory_input(data);
+ ChunkedInput input(memory_input, 3);
+ InputReader src(input);
+ EXPECT_TRUE(!src.try_unread());
+ EXPECT_EQUAL(src.read(), '1');
+ EXPECT_EQUAL(src.read(), '2');
+ EXPECT_EQUAL(src.read(), '3');
+ EXPECT_TRUE(src.try_unread());
+ EXPECT_TRUE(src.try_unread());
+ EXPECT_TRUE(src.try_unread());
+ EXPECT_TRUE(!src.try_unread());
+ EXPECT_EQUAL(src.read(), '1');
+ EXPECT_EQUAL(src.read(), '2');
+ EXPECT_EQUAL(src.read(), '3');
+ EXPECT_EQUAL(src.read(), '4');
+ EXPECT_TRUE(src.try_unread());
+ EXPECT_TRUE(!src.try_unread());
+ EXPECT_EQUAL(src.read(), '4');
+ EXPECT_EQUAL(src.read(), '5');
+ EXPECT_EQUAL(src.obtain(), 0u);
+ EXPECT_TRUE(!src.try_unread());
+ EXPECT_TRUE(!src.failed());
+}
+
+TEST("require that try read finds eof without failing the reader") {
+ const char *data = "12345";
+ MemoryInput memory_input(data);
+ ChunkedInput input(memory_input, 3);
+ InputReader src(input);
+ EXPECT_EQUAL(src.try_read(), '1');
+ EXPECT_EQUAL(src.try_read(), '2');
+ EXPECT_EQUAL(src.try_read(), '3');
+ EXPECT_EQUAL(src.try_read(), '4');
+ EXPECT_EQUAL(src.try_read(), '5');
+ EXPECT_TRUE(src.try_unread());
+ EXPECT_EQUAL(src.try_read(), '5');
+ EXPECT_EQUAL(src.try_read(), '\0');
+ EXPECT_TRUE(!src.try_unread());
+ EXPECT_TRUE(!src.failed());
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/executor/executor_test.cpp b/vespalib/src/tests/executor/executor_test.cpp
index c508417e1c2..9015391beaa 100644
--- a/vespalib/src/tests/executor/executor_test.cpp
+++ b/vespalib/src/tests/executor/executor_test.cpp
@@ -2,30 +2,13 @@
#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/vespalib/util/closuretask.h>
+#include <vespa/vespalib/util/lambdatask.h>
using namespace vespalib;
-namespace {
-
-class Test : public vespalib::TestApp {
- void requireThatClosuresCanBeWrappedInATask();
-
-public:
- int Main() override;
-};
-
-int
-Test::Main()
-{
- TEST_INIT("executor_test");
-
- TEST_DO(requireThatClosuresCanBeWrappedInATask());
-
- TEST_DONE();
-}
-
void setBool(bool *b) { *b = true; }
-void Test::requireThatClosuresCanBeWrappedInATask() {
+
+TEST("require that closures can be wrapped as tasks") {
bool called = false;
Executor::Task::UP task = makeTask(makeClosure(setBool, &called));
EXPECT_TRUE(!called);
@@ -33,6 +16,12 @@ void Test::requireThatClosuresCanBeWrappedInATask() {
EXPECT_TRUE(called);
}
-} // namespace
+TEST("require that lambdas can be wrapped as tasks") {
+ bool called = false;
+ Executor::Task::UP task = makeLambdaTask([&called]() { called = true; });
+ EXPECT_TRUE(!called);
+ task->run();
+ EXPECT_TRUE(called);
+}
-TEST_APPHOOK(Test);
+TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/objects/nbostream/nbostream_test.cpp b/vespalib/src/tests/objects/nbostream/nbostream_test.cpp
index f2e24f5acdc..8b9ccb8a848 100644
--- a/vespalib/src/tests/objects/nbostream/nbostream_test.cpp
+++ b/vespalib/src/tests/objects/nbostream/nbostream_test.cpp
@@ -201,22 +201,6 @@ TEST_F("Test serializing std::pair", Fixture)
f.assertSerialize(exp, val);
}
-TEST_F("Test saveVector", Fixture)
-{
- std::vector<int16_t> val({ 0x0123, 0x4567 });
- val.reserve(16);
- ExpBuffer exp({ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
- 0x01, 0x23, 0x45, 0x67 });
- f._stream.saveVector(val);
- EXPECT_EQUAL(exp, f._stream);
- std::vector<int16_t> checkVal;
- f._stream.restoreVector(checkVal);
- EXPECT_EQUAL(val, checkVal);
- EXPECT_EQUAL(val.capacity(), checkVal.capacity());
-}
-
-
TEST_F("Test write", Fixture)
{
f._stream.write("Hello", 5);
diff --git a/vespalib/src/tests/slime/slime_binary_format_test.cpp b/vespalib/src/tests/slime/slime_binary_format_test.cpp
index 371a843a445..e6661cbf554 100644
--- a/vespalib/src/tests/slime/slime_binary_format_test.cpp
+++ b/vespalib/src/tests/slime/slime_binary_format_test.cpp
@@ -632,8 +632,7 @@ TEST("testOptionalDecodeOrder") {
Slime from_json(const vespalib::string &json) {
Slime slime;
- size_t size = vespalib::slime::JsonFormat::decode(json, slime);
- EXPECT_EQUAL(size, json.size());
+ EXPECT_TRUE(vespalib::slime::JsonFormat::decode(json, slime) > 0);
return slime;
}
diff --git a/vespalib/src/tests/slime/slime_json_format_test.cpp b/vespalib/src/tests/slime/slime_json_format_test.cpp
index 52e293f4a5e..d1f77f09af1 100644
--- a/vespalib/src/tests/slime/slime_json_format_test.cpp
+++ b/vespalib/src/tests/slime/slime_json_format_test.cpp
@@ -1,10 +1,14 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/vespalib/data/slime/slime.h>
+#include <vespa/vespalib/data/input.h>
+#include <vespa/vespalib/data/memory_input.h>
#include <iostream>
#include <fstream>
using namespace vespalib::slime::convenience;
+using vespalib::Input;
+using vespalib::MemoryInput;
std::string make_json(const Slime &slime, bool compact) {
vespalib::SimpleBuffer buf;
@@ -60,7 +64,13 @@ std::string json_string(const std::string &str) {
std::string normalize(const std::string &json) {
Slime slime;
- EXPECT_GREATER(vespalib::slime::JsonFormat::decode(json, slime), 0u);
+ EXPECT_TRUE(vespalib::slime::JsonFormat::decode(json, slime) > 0);
+ return make_json(slime, true);
+}
+
+std::string normalize(Input &input) {
+ Slime slime;
+ EXPECT_TRUE(vespalib::slime::JsonFormat::decode(input, slime) > 0);
return make_json(slime, true);
}
@@ -359,4 +369,19 @@ TEST_F("decode bytes not null-terminated", Slime) {
EXPECT_TRUE(parse_json_bytes(mem, f));
}
+TEST("require that multiple adjacent values can be decoded from a single input") {
+ vespalib::string data("true{}false[]null\"foo\"'bar'1.5null");
+ MemoryInput input(data);
+ EXPECT_EQUAL(std::string("true"), normalize(input));
+ EXPECT_EQUAL(std::string("{}"), normalize(input));
+ EXPECT_EQUAL(std::string("false"), normalize(input));
+ EXPECT_EQUAL(std::string("[]"), normalize(input));
+ EXPECT_EQUAL(std::string("null"), normalize(input));
+ EXPECT_EQUAL(std::string("\"foo\""), normalize(input));
+ EXPECT_EQUAL(std::string("\"bar\""), normalize(input));
+ EXPECT_EQUAL(std::string("1.5"), normalize(input));
+ EXPECT_EQUAL(std::string("null"), normalize(input));
+ EXPECT_EQUAL(input.obtain().size, 0u);
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/slime/slime_test.cpp b/vespalib/src/tests/slime/slime_test.cpp
index 02d9efb0e1e..fae61f4194b 100644
--- a/vespalib/src/tests/slime/slime_test.cpp
+++ b/vespalib/src/tests/slime/slime_test.cpp
@@ -4,6 +4,7 @@ LOG_SETUP("slime_test");
#include <vespa/vespalib/testkit/testapp.h>
#include <vespa/vespalib/data/slime/slime.h>
#include <vespa/vespalib/data/slime/strfmt.h>
+#include <type_traits>
using namespace vespalib::slime::convenience;
@@ -396,4 +397,50 @@ TEST("require that we can resolve to symbol table from a cursor") {
EXPECT_TRUE(sd == slime.lookup(D));
}
+template <typename T>
+void verify_cursor_ref(T &&) {
+ EXPECT_TRUE((std::is_same<Cursor&,T>::value));
+}
+
+template <typename T>
+void verify_inspector_ref(T &&) {
+ EXPECT_TRUE((std::is_same<Inspector&,T>::value));
+}
+
+TEST("require that top-level convenience accessors work as expected for objects") {
+ Slime object;
+ Cursor &c = object.setObject();
+ c.setLong("a", 10);
+ c.setLong("b", 20);
+ c.setLong("c", 30);
+ Symbol sym_b = object.lookup("b");
+ const Slime &const_object = object;
+ TEST_DO(verify_cursor_ref(object[0]));
+ TEST_DO(verify_inspector_ref(const_object[0]));
+ EXPECT_EQUAL(object[0].asLong(), 0);
+ EXPECT_EQUAL(object[sym_b].asLong(), 20);
+ EXPECT_EQUAL(object["c"].asLong(), 30);
+ EXPECT_EQUAL(const_object[0].asLong(), 0);
+ EXPECT_EQUAL(const_object[sym_b].asLong(), 20);
+ EXPECT_EQUAL(const_object["c"].asLong(), 30);
+}
+
+TEST("require that top-level convenience accessors work as expected for arrays") {
+ Slime array;
+ Cursor &c = array.setArray();
+ c.addLong(10);
+ c.addLong(20);
+ c.addLong(30);
+ Symbol sym_b(1);
+ const Slime &const_array = array;
+ TEST_DO(verify_cursor_ref(array[0]));
+ TEST_DO(verify_inspector_ref(const_array[0]));
+ EXPECT_EQUAL(array[0].asLong(), 10);
+ EXPECT_EQUAL(array[sym_b].asLong(), 0);
+ EXPECT_EQUAL(array["c"].asLong(), 0);
+ EXPECT_EQUAL(const_array[0].asLong(), 10);
+ EXPECT_EQUAL(const_array[sym_b].asLong(), 0);
+ EXPECT_EQUAL(const_array["c"].asLong(), 0);
+}
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/vespa/vespalib/data/input_reader.h b/vespalib/src/vespa/vespalib/data/input_reader.h
index b3dfa0d4cd7..f92ced7e508 100644
--- a/vespalib/src/vespa/vespalib/data/input_reader.h
+++ b/vespalib/src/vespa/vespalib/data/input_reader.h
@@ -74,6 +74,35 @@ public:
}
/**
+ * Try to read a single byte. This function will not fail the
+ * reader with buffer underflow if eof is reached.
+ *
+ * @return the next input byte, or 0 if eof is reached
+ **/
+ char try_read() {
+ if (__builtin_expect(obtain() > 0, true)) {
+ return _data.data[_pos++];
+ }
+ return 0;
+ }
+
+ /**
+ * Try to unread a single byte. This will work for data that is
+ * read, but not yet evicted. Note that after eof is found (the
+ * obtain function returns 0), unreading will not be possible.
+ *
+ * @return whether unreading could be performed
+ **/
+ bool try_unread() {
+ if (__builtin_expect(_pos > 0, true)) {
+ --_pos;
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ /**
* Read a continous sequence of bytes. Bytes within an input chunk
* will be referenced directly. Reads crossing chunk boundries
* will result in a gathering copy into a temporary buffer owned
diff --git a/vespalib/src/vespa/vespalib/data/slime/json_format.cpp b/vespalib/src/vespa/vespalib/data/slime/json_format.cpp
index d6126667d68..18bf5289f0d 100644
--- a/vespalib/src/vespa/vespalib/data/slime/json_format.cpp
+++ b/vespalib/src/vespa/vespalib/data/slime/json_format.cpp
@@ -180,11 +180,7 @@ struct JsonDecoder {
JsonDecoder(InputReader &reader) : in(reader), c(in.read()), key(), value() {}
void next() {
- if (in.obtain() > 0) {
- c = in.read();
- } else {
- c = 0;
- }
+ c = in.try_read();
}
bool skip(char x) {
@@ -489,6 +485,7 @@ JsonFormat::decode(Input &input, Slime &slime)
InputReader reader(input);
JsonDecoder decoder(reader);
decoder.decodeValue(slime);
+ reader.try_unread();
if (reader.failed()) {
slime.wrap("partial_result");
slime.get().setLong("offending_offset", reader.get_offset());
diff --git a/vespalib/src/vespa/vespalib/data/slime/slime.h b/vespalib/src/vespa/vespalib/data/slime/slime.h
index af081887c9a..aa44b38b353 100644
--- a/vespalib/src/vespa/vespalib/data/slime/slime.h
+++ b/vespalib/src/vespa/vespalib/data/slime/slime.h
@@ -130,6 +130,12 @@ public:
Inspector &get() const { return _root.get(); }
+ template <typename ID>
+ Inspector &operator[](ID id) const { return get()[id]; }
+
+ template <typename ID>
+ Cursor &operator[](ID id) { return get()[id]; }
+
Cursor &setNix() {
return _root.set(slime::NixValueFactory());
}
diff --git a/vespalib/src/vespa/vespalib/objects/nbostream.cpp b/vespalib/src/vespa/vespalib/objects/nbostream.cpp
index d8e35a69d16..0225b788e68 100644
--- a/vespalib/src/vespa/vespalib/objects/nbostream.cpp
+++ b/vespalib/src/vespa/vespalib/objects/nbostream.cpp
@@ -1,8 +1,9 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#include "nbostream.hpp"
+#include "nbostream.h"
#include "hexdump.h"
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/util/stringfmt.h>
+#include <cassert>
namespace vespalib {
@@ -135,13 +136,4 @@ void nbostream::swap(nbostream & os)
std::swap(_rbuf, os._rbuf);
}
-template nbostream& nbostream::saveVector<int16_t>(const std::vector<int16_t> &);
-template nbostream& nbostream::restoreVector<int16_t>(std::vector<int16_t> &);
-template nbostream& nbostream::saveVector<int32_t>(const std::vector<int32_t> &);
-template nbostream& nbostream::restoreVector<int32_t>(std::vector<int32_t> &);
-template nbostream& nbostream::saveVector<uint32_t>(const std::vector<uint32_t> &);
-template nbostream& nbostream::restoreVector<uint32_t>(std::vector<uint32_t> &);
-template nbostream& nbostream::saveVector<uint64_t>(const std::vector<uint64_t> &);
-template nbostream& nbostream::restoreVector<uint64_t>(std::vector<uint64_t> &);
-
}
diff --git a/vespalib/src/vespa/vespalib/objects/nbostream.h b/vespalib/src/vespa/vespalib/objects/nbostream.h
index 75d3bc03313..70a590f79d1 100644
--- a/vespalib/src/vespa/vespalib/objects/nbostream.h
+++ b/vespalib/src/vespa/vespalib/objects/nbostream.h
@@ -138,16 +138,6 @@ public:
return *this;
}
- // For checkpointing where capacity should be restored
- template <typename T>
- nbostream &
- saveVector(const std::vector<T> &val);
-
- // For checkpointing where capacity should be restored
- template <typename T>
- nbostream &
- restoreVector(std::vector<T> &val);
-
size_t size() const { return left(); }
size_t capacity() const { return _wbuf.size(); }
bool empty() const { return size() == 0; }
diff --git a/vespalib/src/vespa/vespalib/objects/nbostream.hpp b/vespalib/src/vespa/vespalib/objects/nbostream.hpp
deleted file mode 100644
index 8b045f517a1..00000000000
--- a/vespalib/src/vespa/vespalib/objects/nbostream.hpp
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-#pragma once
-
-#include "nbostream.h"
-#include <cassert>
-
-namespace vespalib {
-
-template <typename T>
-nbostream &
-nbostream::saveVector(const std::vector<T> &val)
-{
- size_t valCapacity = val.capacity();
- size_t valSize = val.size();
- assert(valCapacity >= valSize);
- *this << valCapacity << valSize;
- for (const T & v : val) {
- *this << v;
- }
- return *this;
-}
-
-template <typename T>
-nbostream &
-nbostream::restoreVector(std::vector<T> &val)
-{
- size_t valCapacity = 0;
- size_t valSize = 0;
- *this >> valCapacity >> valSize;
- assert(valCapacity >= valSize);
- val.reserve(valCapacity);
- val.clear();
- T i;
- for (size_t j = 0; j < valSize; ++j) {
- *this >> i;
- val.push_back(i);
- }
- return *this;
-}
-
-}
diff --git a/vespalib/src/vespa/vespalib/util/CMakeLists.txt b/vespalib/src/vespa/vespalib/util/CMakeLists.txt
index 6d08c3b1126..58739ee4df6 100644
--- a/vespalib/src/vespa/vespalib/util/CMakeLists.txt
+++ b/vespalib/src/vespa/vespalib/util/CMakeLists.txt
@@ -37,6 +37,7 @@ vespa_add_library(vespalib_vespalib_util OBJECT
rwlock.cpp
sequence.cpp
sha1.cpp
+ sig_catch.cpp
signalhandler.cpp
simple_thread_bundle.cpp
slaveproc.cpp
diff --git a/searchlib/src/vespa/searchlib/common/lambdatask.h b/vespalib/src/vespa/vespalib/util/lambdatask.h
index 01b57694d11..35543407aaa 100644
--- a/searchlib/src/vespa/searchlib/common/lambdatask.h
+++ b/vespalib/src/vespa/vespalib/util/lambdatask.h
@@ -1,9 +1,9 @@
// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
-#include <vespa/vespalib/util/executor.h>
+#include "executor.h"
-namespace search {
+namespace vespalib {
template <class FunctionType>
class LambdaTask : public vespalib::Executor::Task {
@@ -26,4 +26,4 @@ makeLambdaTask(FunctionType &&function)
(std::forward<FunctionType>(function));
}
-} // namespace search
+}
diff --git a/vespalib/src/vespa/vespalib/util/sig_catch.cpp b/vespalib/src/vespa/vespalib/util/sig_catch.cpp
new file mode 100644
index 00000000000..48bf8fbb05d
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/util/sig_catch.cpp
@@ -0,0 +1,22 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include "sig_catch.h"
+#include "signalhandler.h"
+
+namespace vespalib {
+
+SigCatch::SigCatch()
+{
+ SignalHandler::PIPE.ignore();
+ SignalHandler::INT.hook();
+ SignalHandler::TERM.hook();
+}
+
+bool
+SigCatch::receivedStopSignal()
+{
+ return (SignalHandler::INT.check() ||
+ SignalHandler::TERM.check());
+}
+
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/util/sig_catch.h b/vespalib/src/vespa/vespalib/util/sig_catch.h
new file mode 100644
index 00000000000..96c74ee4d07
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/util/sig_catch.h
@@ -0,0 +1,25 @@
+// Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace vespalib {
+
+/**
+ * @brief Use this class for simple common-case signal handling.
+ **/
+
+class SigCatch
+{
+public:
+ /**
+ * Constructor installs signal handlers.
+ **/
+ SigCatch();
+
+ /**
+ * Check if a signal to stop has been received.
+ **/
+ bool receivedStopSignal();
+};
+
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/util/signalhandler.cpp b/vespalib/src/vespa/vespalib/util/signalhandler.cpp
index 6bc7fcd17df..21543ef10d8 100644
--- a/vespalib/src/vespa/vespalib/util/signalhandler.cpp
+++ b/vespalib/src/vespa/vespalib/util/signalhandler.cpp
@@ -25,6 +25,7 @@ Shutdown shutdown;
SignalHandler SignalHandler::HUP(SIGHUP);
SignalHandler SignalHandler::INT(SIGINT);
SignalHandler SignalHandler::TERM(SIGTERM);
+SignalHandler SignalHandler::CHLD(SIGCHLD);
SignalHandler SignalHandler::PIPE(SIGPIPE);
SignalHandler SignalHandler::SEGV(SIGSEGV);
SignalHandler SignalHandler::ABRT(SIGABRT);
diff --git a/vespalib/src/vespa/vespalib/util/signalhandler.h b/vespalib/src/vespa/vespalib/util/signalhandler.h
index 6b233b2e690..f34ddba5530 100644
--- a/vespalib/src/vespa/vespalib/util/signalhandler.h
+++ b/vespalib/src/vespa/vespalib/util/signalhandler.h
@@ -67,6 +67,7 @@ public:
static SignalHandler HUP;
static SignalHandler INT;
static SignalHandler TERM;
+ static SignalHandler CHLD;
static SignalHandler PIPE;
static SignalHandler SEGV;
static SignalHandler ABRT;
diff --git a/vespamalloc/src/tests/thread/thread_test.sh b/vespamalloc/src/tests/thread/thread_test.sh
index b06c27e4a17..b2e91840711 100755
--- a/vespamalloc/src/tests/thread/thread_test.sh
+++ b/vespamalloc/src/tests/thread/thread_test.sh
@@ -17,8 +17,8 @@ fi
VESPA_MALLOC_SO=../../../src/vespamalloc/libvespamalloc.so
VESPA_MALLOC_SO_D=../../../src/vespamalloc/libvespamallocd.so
-LD_PRELOAD=$VESPA_MALLOC_SO ./vespamalloc_thread_test_app return 20
-LD_PRELOAD=$VESPA_MALLOC_SO ./vespamalloc_thread_test_app exit 20
-LD_PRELOAD=$VESPA_MALLOC_SO ./vespamalloc_thread_test_app cancel 20
+#LD_PRELOAD=$VESPA_MALLOC_SO ./vespamalloc_thread_test_app return 20
+#LD_PRELOAD=$VESPA_MALLOC_SO ./vespamalloc_thread_test_app exit 20
+#LD_PRELOAD=$VESPA_MALLOC_SO ./vespamalloc_thread_test_app cancel 20
#LD_PRELOAD=$VESPA_MALLOC_SO ./vespamalloc_racemanythreads_test_app 4000 20
#LD_PRELOAD=$VESPA_MALLOC_SO_D ./vespamalloc_racemanythreads_test_app 4000 20
diff --git a/vsm/src/tests/docsum/docsum.cpp b/vsm/src/tests/docsum/docsum.cpp
index 43cf1c5309c..4409c5f6215 100644
--- a/vsm/src/tests/docsum/docsum.cpp
+++ b/vsm/src/tests/docsum/docsum.cpp
@@ -116,7 +116,7 @@ DocsumTest::assertSlimeFieldWriter(SlimeFieldWriter & sfw, const FieldValue & fv
vespalib::Slime expSlime;
size_t used = vespalib::slime::JsonFormat::decode(exp, expSlime);
- EXPECT_EQUAL(exp.size(), used);
+ EXPECT_TRUE(used > 0);
EXPECT_EQUAL(expSlime, gotSlime);
}
diff --git a/vsm/src/vespa/vsm/config/CMakeLists.txt b/vsm/src/vespa/vsm/config/CMakeLists.txt
index ea65d8c8fb4..e3bd2db68e2 100644
--- a/vsm/src/vespa/vsm/config/CMakeLists.txt
+++ b/vsm/src/vespa/vsm/config/CMakeLists.txt
@@ -4,8 +4,8 @@ vespa_add_library(vsm_vconfig OBJECT
DEPENDS
)
vespa_generate_config(vsm_vconfig vsmfields.def)
-install(FILES vsmfields.def RENAME vespa.config.search.vsm.vsmfields.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(vsmfields.def vespa.config.search.vsm.vsmfields.def)
vespa_generate_config(vsm_vconfig vsm.def)
-install(FILES vsm.def RENAME vespa.config.search.vsm.vsm.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(vsm.def vespa.config.search.vsm.vsm.def)
vespa_generate_config(vsm_vconfig vsmsummary.def)
-install(FILES vsmsummary.def RENAME vespa.config.search.vsm.vsmsummary.def DESTINATION var/db/vespa/config_server/serverdb/classes)
+install_config_definition(vsmsummary.def vespa.config.search.vsm.vsmsummary.def)
diff --git a/zkfacade/CMakeLists.txt b/zkfacade/CMakeLists.txt
new file mode 100644
index 00000000000..6610356c0fd
--- /dev/null
+++ b/zkfacade/CMakeLists.txt
@@ -0,0 +1,2 @@
+# Copyright 2017 Yahoo Holdings. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+install_fat_java_artifact(zkfacade)
diff --git a/zkfacade/src/main/java/com/yahoo/vespa/curator/Lock.java b/zkfacade/src/main/java/com/yahoo/vespa/curator/Lock.java
index 4ae02bb3745..1ab5558e2b9 100644
--- a/zkfacade/src/main/java/com/yahoo/vespa/curator/Lock.java
+++ b/zkfacade/src/main/java/com/yahoo/vespa/curator/Lock.java
@@ -43,7 +43,7 @@ public class Lock implements Mutex {
}
if (! acquired) throw new UncheckedTimeoutException("Timed out after waiting " + timeout.toString() +
- " to acquire lock + '" + lockPath + "'");
+ " to acquire lock '" + lockPath + "'");
}
@Override