summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVegard Sjonfjell <vegard@yahoo-inc.com>2016-10-05 15:34:15 +0200
committerVegard Sjonfjell <vegard@yahoo-inc.com>2016-10-05 15:34:15 +0200
commitd22ceb89608611124291c5c9e30f7f70bac8aa98 (patch)
tree960ff704f4fc0f0fdb9dcc776e87e957b420853b
parent23cd65bb2d4a25f2d52a70f573ce4a3e25ee6b8c (diff)
parente8b571ebc3eb2592f16ca546a65bf318ba0f4df7 (diff)
Merge branch 'master' into voffeloff/move-jsontesthelper
-rw-r--r--application-preprocessor/src/main/java/com/yahoo/application/preprocessor/ApplicationPreprocessor.java2
-rw-r--r--application/src/main/java/com/yahoo/application/Application.java8
-rw-r--r--application/src/main/java/com/yahoo/application/container/JDisc.java5
-rw-r--r--application/src/test/java/com/yahoo/application/ApplicationTest.java3
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AnnotatedClusterState.java69
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGenerator.java345
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateHistoryEntry.java46
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateReason.java15
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateView.java4
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/EventDiffCalculator.java143
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java197
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GroupAvailabilityCalculator.java4
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MasterElectionHandler.java4
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeEvent.java4
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java32
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateReason.java10
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java530
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateVersionTracker.java140
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateGenerator.java941
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java2
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/MasterDataGatherer.java4
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java3
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/ClusterStateRequestHandler.java10
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/LegacyIndexPageRequestHandler.java25
-rw-r--r--clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/StaticResourceRequestHandler.java66
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java170
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java895
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DistributionBitCountTest.java7
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/EventDiffCalculatorTest.java319
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java4
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GroupAutoTakedownTest.java198
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java13
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeInfoTest.java80
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java4
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandlerTest.java (renamed from clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/SystemStateGeneratorTest.java)88
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java174
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateVersionTrackerTest.java229
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/ClusterEventWithDescription.java40
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventForNode.java37
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTimeIs.java40
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTypeIs.java27
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasStateReasonForNode.java49
-rw-r--r--clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventWithDescription.java39
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/application/IncludeProcessor.java3
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java12
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/application/XmlPreProcessor.java2
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/ApplicationPackageXmlFilesValidator.java33
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java28
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java4
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java5
-rw-r--r--config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java10
-rw-r--r--config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorTest.java128
-rw-r--r--config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java4
-rw-r--r--config-application-package/src/test/java/com/yahoo/config/application/XmlPreprocessorTest.java3
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java10
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/ComponentInfo.java5
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java2
-rw-r--r--config-model-api/src/main/java/com/yahoo/config/model/api/ConfigServerSpec.java10
-rw-r--r--config-model/src/main/Makefile8
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/ApplicationConfigProducerRoot.java1
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/producer/AbstractConfigProducer.java8
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java7
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/provision/SingleNodeProvisioner.java5
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java10
-rw-r--r--config-model/src/main/java/com/yahoo/config/model/test/MockRoot.java2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/Search.java1
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java2
-rw-r--r--config-model/src/main/java/com/yahoo/searchdefinition/document/Stemming.java25
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/Client.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/ConfigProducer.java39
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java16
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/PlainFormatter.java28
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/PortsMeta.java3
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java45
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java10
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/admin/Configserver.java3
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/VespaModelBuilder.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomClientsBuilder.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomV20ClientsBuilder.java229
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java15
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java6
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/container/xml/DocumentApiOptionsBuilder.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/Redundancy.java12
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java4
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributionConfigProducer.java2
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java1
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java5
-rw-r--r--config-model/src/main/java/com/yahoo/vespa/model/utils/FreezableMap.java4
-rw-r--r--config-model/src/main/resources/schema/common.rnc1
-rw-r--r--config-model/src/main/resources/schema/containercluster.rnc3
-rw-r--r--config-model/src/main/resources/schema/content.rnc2
-rw-r--r--config-model/src/main/resources/schema/deployment.rnc26
-rw-r--r--config-model/src/main/resources/schema/schemas.xml1
-rw-r--r--config-model/src/test/cfg/application/app1/deployment.xml8
-rw-r--r--config-model/src/test/cfg/application/app_invalid_deployment_xml/deployment.xml8
-rw-r--r--config-model/src/test/cfg/application/app_invalid_deployment_xml/hosts.xml9
-rw-r--r--config-model/src/test/cfg/application/app_invalid_deployment_xml/services.xml15
-rw-r--r--config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/deployment.xml5
-rw-r--r--config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/hosts.xml9
-rw-r--r--config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/services.xml15
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java23
-rw-r--r--config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java21
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContainerRestartValidatorTest.java8
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/content/ClusterTest.java613
-rw-r--r--config-model/src/test/java/com/yahoo/vespa/model/test/utils/VespaModelCreatorWithMockPkg.java9
-rw-r--r--config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java6
-rw-r--r--config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java2
-rw-r--r--config/src/main/java/com/yahoo/config/subscription/ConfigSubscriber.java1
-rw-r--r--configdefinitions/src/vespa/zookeeper-server.def4
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java8
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java13
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java6
-rw-r--r--configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java5
-rw-r--r--configserver/src/test/java/com/yahoo/vespa/config/server/provision/StaticProvisionerTest.java1
-rw-r--r--container-core/src/main/java/com/yahoo/container/handler/VipStatusHandler.java1
-rwxr-xr-xcontainer-disc/src/main/sh/vespa-start-container-daemon.sh17
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/VespaSVersionRetriever.java29
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/cluster/ClusterMonitor.java6
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/fastsearch/FastHit.java8
-rw-r--r--container-search/src/main/java/com/yahoo/prelude/querytransform/StemmingSearcher.java26
-rw-r--r--container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java8
-rw-r--r--container-search/src/main/java/com/yahoo/search/querytransform/SortingDegrader.java2
-rw-r--r--container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java267
-rw-r--r--container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java6
-rw-r--r--container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcherTestCase.java2
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java66
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/CounterWrapper.java39
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/GaugeWrapper.java35
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapper.java41
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricValue.java9
-rw-r--r--docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/package-info.java5
-rw-r--r--docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerTest.java4
-rw-r--r--docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapperTest.java49
-rw-r--r--document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java2
-rw-r--r--document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer42.java8
-rw-r--r--document/src/test/java/com/yahoo/document/json/DocumentUpdateJsonSerializerTest.java1
-rw-r--r--documentapi/src/main/java/com/yahoo/documentapi/DocumentAccess.java30
-rwxr-xr-xdocumentapi/src/main/java/com/yahoo/documentapi/DocumentAccessParams.java37
-rw-r--r--documentapi/src/main/java/com/yahoo/documentapi/local/LocalDocumentAccess.java8
-rw-r--r--documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusDocumentAccess.java57
-rw-r--r--documentapi/src/main/java/com/yahoo/documentapi/messagebus/loadtypes/LoadTypeSet.java5
-rw-r--r--fastlib/src/vespa/fastlib/io/bufferedinputstream.cpp177
-rw-r--r--fastlib/src/vespa/fastlib/io/bufferedinputstream.h50
-rw-r--r--fastlib/src/vespa/fastlib/net/httpheaderparser.cpp89
-rw-r--r--fastlib/src/vespa/fastlib/net/httpheaderparser.h49
-rw-r--r--filedistribution/src/apps/filedistributor/filedistributor.cpp25
-rw-r--r--filedistribution/src/vespa/filedistribution/distributor/filedownloader.cpp3
-rw-r--r--jdisc_core/src/main/java/com/yahoo/jdisc/application/OsgiFramework.java15
-rw-r--r--jrt/src/com/yahoo/jrt/Acceptor.java18
-rw-r--r--jrt/src/com/yahoo/jrt/Connection.java15
-rw-r--r--jrt/src/com/yahoo/jrt/Connector.java2
-rw-r--r--jrt/src/com/yahoo/jrt/Request.java5
-rw-r--r--jrt/src/com/yahoo/jrt/Spec.java29
-rw-r--r--jrt/src/com/yahoo/jrt/Transport.java5
-rw-r--r--jrt/src/com/yahoo/jrt/slobrok/api/IMirror.java9
-rw-r--r--jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java308
-rw-r--r--jrt/src/com/yahoo/jrt/slobrok/api/Register.java67
-rw-r--r--jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java156
-rw-r--r--linguistics/src/main/java/com/yahoo/language/process/StemMode.java2
-rw-r--r--logserver/src/main/java/com/yahoo/logserver/net/LogConnection.java2
-rw-r--r--logserver/src/test/java/com/yahoo/logserver/net/test/LogConnectionTestCase.java3
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/MessageBus.java44
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/NetworkMessageBus.java43
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/RPCMessageBus.java49
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/network/Identity.java13
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/network/Network.java23
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalNetwork.java56
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalWire.java17
-rw-r--r--messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCNetwork.java2
-rwxr-xr-xmessagebus/src/test/java/com/yahoo/messagebus/ChokeTestCase.java37
-rw-r--r--node-admin/README_MAC.md2
-rwxr-xr-xnode-admin/include/nodectl-instance.sh2
-rwxr-xr-xnode-admin/scripts/app.sh4
-rw-r--r--node-admin/scripts/common.sh2
-rwxr-xr-xnode-admin/scripts/config-server.sh2
-rwxr-xr-xnode-admin/scripts/node-repo.sh8
-rw-r--r--node-admin/src/main/application/services.xml8
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java81
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/MaintenanceScheduler.java17
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/MaintenanceSchedulerImpl.java102
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java195
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java42
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgent.java13
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java40
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImpl.java10
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProvider.java3
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProviderImpl.java31
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/RestApiHandler.java46
-rw-r--r--node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/SecretAgentHandler.java43
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ComponentsProviderWithMocks.java12
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java7
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java7
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeStateTest.java7
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ResumeTest.java7
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/StorageMaintainerMock.java (renamed from node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MaintenanceSchedulerMock.java)12
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java30
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java9
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java16
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImplTest.java14
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/restapi/SecretAgentHandlerTest.java31
-rw-r--r--node-admin/src/test/java/com/yahoo/vespa/hosted/node/maintenance/DeleteOldAppDataTest.java15
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java13
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Flavor.java36
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java4
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java36
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java20
-rw-r--r--node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/FlavorConfigBuilder.java13
-rw-r--r--node-repository/src/main/resources/configdefinitions/node-repository.def7
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainerTest.java6
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MockDeployer.java16
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java334
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java532
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java4
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java43
-rw-r--r--node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java17
-rw-r--r--pom.xml32
-rw-r--r--sample-apps/blog-tutorial-shared/README.md83
-rw-r--r--sample-apps/blog-tutorial-shared/src/R/generateDataset.R56
-rw-r--r--sample-apps/blog-tutorial-shared/src/main/pig/tutorial_get_recommendation_list.pig21
-rw-r--r--sample-apps/blog-tutorial-shared/src/python/__init__.py (renamed from sample-apps/blog-tutorial-shared/src/main/python/__init__.py)0
-rw-r--r--sample-apps/blog-tutorial-shared/src/python/parse.py (renamed from sample-apps/blog-tutorial-shared/src/main/python/parse.py)0
-rwxr-xr-xsample-apps/blog-tutorial-shared/src/python/vespaModel.py (renamed from sample-apps/blog-recommendation/src/main/python/vespaModel.py)0
-rw-r--r--searchcore/src/apps/fdispatch/fdispatch.cpp53
-rw-r--r--searchcore/src/vespa/searchcore/config/proton.def11
-rw-r--r--searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp25
-rw-r--r--searchlib/src/tests/attribute/multivaluemapping/multivaluemapping_test.cpp32
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multivaluemapping.cpp27
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multivaluemapping.h378
-rw-r--r--searchlib/src/vespa/searchlib/attribute/multivaluemapping.hpp5
-rw-r--r--searchlib/src/vespa/searchlib/docstore/logdatastore.cpp3
-rw-r--r--simplemetrics/src/main/java/com/yahoo/metrics/simple/Gauge.java2
-rw-r--r--simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java23
-rw-r--r--simplemetrics/src/main/resources/configdefinitions/manager.def2
-rw-r--r--standalone-container/src/main/scala/com/yahoo/application/container/impl/ClassLoaderOsgiFramework.scala3
-rw-r--r--standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneContainerApplication.scala19
-rw-r--r--standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneSubscriberFactory.scala3
-rw-r--r--standalone-container/src/test/java/com/yahoo/container/standalone/StandaloneContainerActivatorTest.java2
-rw-r--r--standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainer.scala2
-rw-r--r--standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainerTest.scala6
-rw-r--r--testutil/src/main/java/com/yahoo/test/json/JsonTestHelper.java1
-rw-r--r--vdslib/src/main/java/com/yahoo/vdslib/state/ClusterState.java96
-rw-r--r--vdslib/src/main/java/com/yahoo/vdslib/state/NodeState.java18
-rw-r--r--vdslib/src/test/java/com/yahoo/vdslib/state/ClusterStateTestCase.java134
-rw-r--r--vdslib/src/test/java/com/yahoo/vdslib/state/NodeStateTestCase.java9
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/FeedClientImpl.java5
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java4
-rw-r--r--vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/IOThread.java25
-rw-r--r--vespa-http-client/src/test/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationProcessorTest.java33
-rwxr-xr-xvespabase/src/start-cbinaries.sh2
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java9
-rwxr-xr-xvespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemove.java14
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemoveLocation.java13
-rwxr-xr-xvespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerStatus.java13
-rwxr-xr-xvespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/GetSearcher.java13
-rw-r--r--vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/VisitSearcher.java12
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/DocumentApiApplicationTest.java23
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java11
-rw-r--r--vespaclient-container-plugin/src/test/java/com/yahoo/storage/searcher/VisitorSearcherTestCase.java9
-rw-r--r--vespaclient-container-plugin/src/test/rest-api-application/services.xml (renamed from vespaclient-container-plugin/src/test/application/services.xml)0
-rwxr-xr-xvespaclient-core/src/main/java/com/yahoo/feedapi/FeedContext.java35
-rwxr-xr-xvespaclient-core/src/main/java/com/yahoo/feedapi/FeederOptions.java12
-rwxr-xr-xvespaclient-core/src/main/java/com/yahoo/feedapi/MessageBusSessionFactory.java17
-rwxr-xr-xvespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandler.java12
-rwxr-xr-xvespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerBase.java10
-rw-r--r--vespaclient-core/src/main/java/com/yahoo/vespaclient/ClusterList.java39
-rw-r--r--vespajlib/src/main/java/com/yahoo/net/HostName.java6
-rw-r--r--vespajlib/src/main/java/com/yahoo/net/LinuxInetAddress.java11
-rw-r--r--vespalib/CMakeLists.txt2
-rw-r--r--vespalib/src/testlist.txt2
-rw-r--r--vespalib/src/tests/eval/simple_tensor/simple_tensor_test.cpp153
-rw-r--r--vespalib/src/tests/eval/tensor/eval_tensor_test.cpp16
-rw-r--r--vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp78
-rw-r--r--vespalib/src/tests/tensor/join_tensor_addresses/.gitignore1
-rw-r--r--vespalib/src/tests/tensor/join_tensor_addresses/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/tensor/join_tensor_addresses/FILES1
-rw-r--r--vespalib/src/tests/tensor/join_tensor_addresses/join_tensor_addresses_test.cpp317
-rw-r--r--vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp25
-rw-r--r--vespalib/src/tests/tensor/tensor_address_element_iterator/.gitignore1
-rw-r--r--vespalib/src/tests/tensor/tensor_address_element_iterator/CMakeLists.txt9
-rw-r--r--vespalib/src/tests/tensor/tensor_address_element_iterator/FILES1
-rw-r--r--vespalib/src/tests/tensor/tensor_address_element_iterator/tensor_address_element_iterator_test.cpp343
-rw-r--r--vespalib/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp4
-rw-r--r--vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp307
-rw-r--r--vespalib/src/tests/tensor/tensor_performance/tensor_performance_test.cpp12
-rw-r--r--vespalib/src/vespa/vespalib/eval/simple_tensor_engine.cpp24
-rw-r--r--vespalib/src/vespa/vespalib/eval/simple_tensor_engine.h1
-rw-r--r--vespalib/src/vespa/vespalib/eval/tensor_engine.h1
-rw-r--r--vespalib/src/vespa/vespalib/eval/tensor_spec.cpp39
-rw-r--r--vespalib/src/vespa/vespalib/eval/tensor_spec.h17
-rw-r--r--vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp680
-rw-r--r--vespalib/src/vespa/vespalib/eval/value.cpp8
-rw-r--r--vespalib/src/vespa/vespalib/eval/value.h8
-rw-r--r--vespalib/src/vespa/vespalib/tensor/decoded_tensor_address_store.h57
-rw-r--r--vespalib/src/vespa/vespalib/tensor/default_tensor_engine.cpp27
-rw-r--r--vespalib/src/vespa/vespalib/tensor/default_tensor_engine.h1
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/CMakeLists.txt4
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp71
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h7
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp124
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h46
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.h25
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp32
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.cpp106
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.h36
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.cpp204
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.h32
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.h21
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp133
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp59
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h31
-rw-r--r--vespalib/src/vespa/vespalib/tensor/dimensions_vector_iterator.h46
-rw-r--r--vespalib/src/vespa/vespalib/tensor/join_tensor_addresses.h128
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/CMakeLists.txt8
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.cpp132
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.h76
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.cpp38
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.h36
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/direct_sparse_tensor_builder.h19
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/join_sparse_tensors.h62
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp118
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.h10
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_builder.h8
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.cpp69
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.h39
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_decoder.h4
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_padder.h2
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.cpp51
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.h58
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_ref.h (renamed from vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_ref.h)12
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.h23
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.hpp35
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.cpp4
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.h4
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.cpp89
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.h26
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_match.cpp4
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.cpp148
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.h47
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_reduce.hpp29
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.cpp (renamed from vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_unsorted_address_builder.cpp)21
-rw-r--r--vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.h (renamed from vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_unsorted_address_builder.h)6
-rw-r--r--vespalib/src/vespa/vespalib/tensor/tensor.h8
-rw-r--r--vespalib/src/vespa/vespalib/tensor/tensor_address_element_iterator.h174
-rw-r--r--vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp16
-rw-r--r--vespalib/src/vespa/vespalib/tensor/tensor_operation.h1
-rw-r--r--vespalib/src/vespa/vespalib/test/insertion_operators.h18
-rw-r--r--vsm/src/tests/searcher/searcher.cpp16
-rw-r--r--vsm/src/vespa/vsm/searcher/utf8stringfieldsearcherbase.cpp4
352 files changed, 9619 insertions, 7020 deletions
diff --git a/application-preprocessor/src/main/java/com/yahoo/application/preprocessor/ApplicationPreprocessor.java b/application-preprocessor/src/main/java/com/yahoo/application/preprocessor/ApplicationPreprocessor.java
index 1762a4b9884..fe9225cd7a6 100644
--- a/application-preprocessor/src/main/java/com/yahoo/application/preprocessor/ApplicationPreprocessor.java
+++ b/application-preprocessor/src/main/java/com/yahoo/application/preprocessor/ApplicationPreprocessor.java
@@ -42,7 +42,7 @@ public class ApplicationPreprocessor {
new Zone(environment.orElse(Environment.defaultEnvironment()), region.orElse(RegionName.defaultName())),
(a, b) -> {
}, logger);
- preprocessed.validateXML(logger);
+ preprocessed.validateXML();
}
diff --git a/application/src/main/java/com/yahoo/application/Application.java b/application/src/main/java/com/yahoo/application/Application.java
index cc1b785ae0b..cfcce72487b 100644
--- a/application/src/main/java/com/yahoo/application/Application.java
+++ b/application/src/main/java/com/yahoo/application/Application.java
@@ -44,6 +44,13 @@ import java.util.*;
@Beta
public final class Application implements AutoCloseable {
+ /**
+ * This system property is set to "true" upon creation of an Application.
+ * This is useful for components which are created by dependendy injection which needs to modify
+ * their behavior to function without reliance on any processes outside the JVM.
+ */
+ public static final String vespaLocalProperty = "vespa.local";
+
private final JDisc container;
private final List<ContentCluster> contentClusters;
private final Path path;
@@ -51,6 +58,7 @@ public final class Application implements AutoCloseable {
// For internal use only
Application(Path path, Networking networking, boolean deletePathWhenClosing) {
+ System.setProperty(vespaLocalProperty, "true");
this.path = path;
this.deletePathWhenClosing = deletePathWhenClosing;
contentClusters = ContentCluster.fromPath(path);
diff --git a/application/src/main/java/com/yahoo/application/container/JDisc.java b/application/src/main/java/com/yahoo/application/container/JDisc.java
index 0c6caf9fdf9..dba16a0e3fe 100644
--- a/application/src/main/java/com/yahoo/application/container/JDisc.java
+++ b/application/src/main/java/com/yahoo/application/container/JDisc.java
@@ -23,6 +23,7 @@ import com.yahoo.jdisc.test.TestDriver;
import com.yahoo.processing.handler.ProcessingHandler;
import com.yahoo.search.handler.SearchHandler;
+import java.io.File;
import java.nio.file.Path;
/**
@@ -97,7 +98,7 @@ public final class JDisc implements AutoCloseable {
* @param networking enabled or disabled
* @return a new JDisc instance
*/
- public static JDisc fromPath(final Path path, Networking networking) {
+ public static JDisc fromPath(Path path, Networking networking) {
return new JDisc(path, false, networking, new ConfigModelRepo());
}
@@ -105,7 +106,7 @@ public final class JDisc implements AutoCloseable {
* Create a jDisc instance which is given a config model repo (in which (mock) content clusters
* can be looked up).
*/
- public static JDisc fromPath(final Path path, Networking networking, ConfigModelRepo configModelRepo) {
+ public static JDisc fromPath(Path path, Networking networking, ConfigModelRepo configModelRepo) {
return new JDisc(path, false, networking, configModelRepo);
}
diff --git a/application/src/test/java/com/yahoo/application/ApplicationTest.java b/application/src/test/java/com/yahoo/application/ApplicationTest.java
index 6f4e6103743..7b515cb843b 100644
--- a/application/src/test/java/com/yahoo/application/ApplicationTest.java
+++ b/application/src/test/java/com/yahoo/application/ApplicationTest.java
@@ -28,6 +28,7 @@ import org.apache.http.HttpResponse;
import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.DefaultHttpClient;
+import org.junit.Ignore;
import org.junit.Test;
import java.io.BufferedReader;
@@ -363,7 +364,7 @@ public class ApplicationTest {
assertEquals(200, statusCode);
}
}
-
+
private static int getFreePort() throws IOException {
try (ServerSocket socket = new ServerSocket(0)) {
socket.setReuseAddress(true);
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AnnotatedClusterState.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AnnotatedClusterState.java
new file mode 100644
index 00000000000..05a66ddbf2b
--- /dev/null
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/AnnotatedClusterState.java
@@ -0,0 +1,69 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core;
+
+import com.yahoo.vdslib.state.ClusterState;
+import com.yahoo.vdslib.state.Node;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+
+public class AnnotatedClusterState {
+ private final ClusterState clusterState;
+ private final Map<Node, NodeStateReason> nodeStateReasons;
+ private final Optional<ClusterStateReason> clusterStateReason;
+
+ public AnnotatedClusterState(ClusterState clusterState,
+ Optional<ClusterStateReason> clusterStateReason,
+ Map<Node, NodeStateReason> nodeStateReasons)
+ {
+ this.clusterState = clusterState;
+ this.clusterStateReason = clusterStateReason;
+ this.nodeStateReasons = nodeStateReasons;
+ }
+
+ public static AnnotatedClusterState emptyState() {
+ return new AnnotatedClusterState(ClusterState.emptyState(), Optional.empty(), emptyNodeStateReasons());
+ }
+
+ static Map<Node, NodeStateReason> emptyNodeStateReasons() {
+ return Collections.emptyMap();
+ }
+
+ public ClusterState getClusterState() {
+ return clusterState;
+ }
+
+ public Map<Node, NodeStateReason> getNodeStateReasons() {
+ return Collections.unmodifiableMap(nodeStateReasons);
+ }
+
+ public Optional<ClusterStateReason> getClusterStateReason() {
+ return clusterStateReason;
+ }
+
+ @Override
+ public String toString() {
+ return clusterState.toString();
+ }
+
+ public String toString(boolean verbose) {
+ return clusterState.toString(verbose);
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AnnotatedClusterState that = (AnnotatedClusterState) o;
+ return Objects.equals(clusterState, that.clusterState) &&
+ Objects.equals(nodeStateReasons, that.nodeStateReasons) &&
+ Objects.equals(clusterStateReason, that.clusterStateReason);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(clusterState, nodeStateReasons, clusterStateReason);
+ }
+}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGenerator.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGenerator.java
new file mode 100644
index 00000000000..e6fbed71153
--- /dev/null
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGenerator.java
@@ -0,0 +1,345 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core;
+
+import com.yahoo.vdslib.state.ClusterState;
+import com.yahoo.vdslib.state.Node;
+import com.yahoo.vdslib.state.NodeState;
+import com.yahoo.vdslib.state.NodeType;
+import com.yahoo.vdslib.state.State;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.TreeMap;
+
+/**
+ * Pure functional cluster state generator which deterministically constructs a full
+ * cluster state given the state of the content cluster, a set of cluster controller
+ * configuration parameters and the current time.
+ *
+ * State version tracking is considered orthogonal to state generation. Therefore,
+ * cluster state version is _not_ set here; its incrementing must be handled by the
+ * caller.
+ */
+public class ClusterStateGenerator {
+
+ static class Params {
+ public ContentCluster cluster;
+ public Map<NodeType, Integer> transitionTimes;
+ public long currentTimeInMillis = 0;
+ public int maxPrematureCrashes = 0;
+ public int minStorageNodesUp = 1;
+ public int minDistributorNodesUp = 1;
+ public double minRatioOfStorageNodesUp = 0.0;
+ public double minRatioOfDistributorNodesUp = 0.0;
+ public double minNodeRatioPerGroup = 0.0;
+ public int idealDistributionBits = 16;
+ public int highestObservedDistributionBitCount = 16;
+ public int lowestObservedDistributionBitCount = 16;
+ public int maxInitProgressTimeMs = 5000;
+
+ Params() {
+ this.transitionTimes = buildTransitionTimeMap(0, 0);
+ }
+
+ // FIXME de-dupe
+ static Map<NodeType, Integer> buildTransitionTimeMap(int distributorTransitionTimeMs, int storageTransitionTimeMs) {
+ Map<com.yahoo.vdslib.state.NodeType, java.lang.Integer> maxTransitionTime = new TreeMap<>();
+ maxTransitionTime.put(com.yahoo.vdslib.state.NodeType.DISTRIBUTOR, distributorTransitionTimeMs);
+ maxTransitionTime.put(com.yahoo.vdslib.state.NodeType.STORAGE, storageTransitionTimeMs);
+ return maxTransitionTime;
+ }
+
+ Params cluster(ContentCluster cluster) {
+ this.cluster = cluster;
+ return this;
+ }
+ Params maxInitProgressTime(int maxTimeMs) {
+ this.maxInitProgressTimeMs = maxTimeMs;
+ return this;
+ }
+ Params transitionTimes(int timeMs) {
+ this.transitionTimes = buildTransitionTimeMap(timeMs, timeMs);
+ return this;
+ }
+ Params transitionTimes(Map<NodeType, Integer> timesMs) {
+ this.transitionTimes = timesMs;
+ return this;
+ }
+ Params currentTimeInMilllis(long currentTimeMs) {
+ this.currentTimeInMillis = currentTimeMs;
+ return this;
+ }
+ Params maxPrematureCrashes(int count) {
+ this.maxPrematureCrashes = count;
+ return this;
+ }
+ Params minStorageNodesUp(int nodes) {
+ this.minStorageNodesUp = nodes;
+ return this;
+ }
+ Params minDistributorNodesUp(int nodes) {
+ this.minDistributorNodesUp = nodes;
+ return this;
+ }
+ Params minRatioOfStorageNodesUp(double minRatio) {
+ this.minRatioOfStorageNodesUp = minRatio;
+ return this;
+ }
+ Params minRatioOfDistributorNodesUp(double minRatio) {
+ this.minRatioOfDistributorNodesUp = minRatio;
+ return this;
+ }
+ Params minNodeRatioPerGroup(double minRatio) {
+ this.minNodeRatioPerGroup = minRatio;
+ return this;
+ }
+ Params idealDistributionBits(int distributionBits) {
+ this.idealDistributionBits = distributionBits;
+ return this;
+ }
+ Params highestObservedDistributionBitCount(int bitCount) {
+ this.highestObservedDistributionBitCount = bitCount;
+ return this;
+ }
+ Params lowestObservedDistributionBitCount(int bitCount) {
+ this.lowestObservedDistributionBitCount = bitCount;
+ return this;
+ }
+
+ /**
+ * Infer parameters from controller options. Important: does _not_ set cluster;
+ * it must be explicitly set afterwards on the returned parameter object before
+ * being used to compute states.
+ */
+ static Params fromOptions(FleetControllerOptions opts) {
+ return new Params()
+ .maxPrematureCrashes(opts.maxPrematureCrashes)
+ .minStorageNodesUp(opts.minStorageNodesUp)
+ .minDistributorNodesUp(opts.minDistributorNodesUp)
+ .minRatioOfStorageNodesUp(opts.minRatioOfStorageNodesUp)
+ .minRatioOfDistributorNodesUp(opts.minRatioOfDistributorNodesUp)
+ .minNodeRatioPerGroup(opts.minNodeRatioPerGroup)
+ .idealDistributionBits(opts.distributionBits)
+ .transitionTimes(opts.maxTransitionTime);
+ }
+ }
+
+ static AnnotatedClusterState generatedStateFrom(final Params params) {
+ final ContentCluster cluster = params.cluster;
+ final ClusterState workingState = ClusterState.emptyState();
+ final Map<Node, NodeStateReason> nodeStateReasons = new HashMap<>();
+
+ for (final NodeInfo nodeInfo : cluster.getNodeInfo()) {
+ final NodeState nodeState = computeEffectiveNodeState(nodeInfo, params);
+ workingState.setNodeState(nodeInfo.getNode(), nodeState);
+ }
+
+ takeDownGroupsWithTooLowAvailability(workingState, nodeStateReasons, params);
+
+ final Optional<ClusterStateReason> reasonToBeDown = clusterDownReason(workingState, params);
+ if (reasonToBeDown.isPresent()) {
+ workingState.setClusterState(State.DOWN);
+ }
+ workingState.setDistributionBits(inferDistributionBitCount(cluster, workingState, params));
+
+ return new AnnotatedClusterState(workingState, reasonToBeDown, nodeStateReasons);
+ }
+
+ private static boolean nodeIsConsideredTooUnstable(final NodeInfo nodeInfo, final Params params) {
+ return (params.maxPrematureCrashes != 0
+ && nodeInfo.getPrematureCrashCount() > params.maxPrematureCrashes);
+ }
+
+ private static void applyWantedStateToBaselineState(final NodeState baseline, final NodeState wanted) {
+ // Only copy state and description from Wanted state; this preserves auxiliary
+ // information such as disk states and startup timestamp.
+ baseline.setState(wanted.getState());
+ baseline.setDescription(wanted.getDescription());
+ }
+
+ private static NodeState computeEffectiveNodeState(final NodeInfo nodeInfo, final Params params) {
+ final NodeState reported = nodeInfo.getReportedState();
+ final NodeState wanted = nodeInfo.getWantedState();
+ final NodeState baseline = reported.clone();
+
+ if (nodeIsConsideredTooUnstable(nodeInfo, params)) {
+ baseline.setState(State.DOWN);
+ }
+ if (startupTimestampAlreadyObservedByAllNodes(nodeInfo, baseline)) {
+ baseline.setStartTimestamp(0);
+ }
+ if (nodeInfo.isStorage()) {
+ applyStorageSpecificStateTransforms(nodeInfo, params, reported, wanted, baseline);
+ }
+ if (baseline.above(wanted)) {
+ applyWantedStateToBaselineState(baseline, wanted);
+ }
+
+ return baseline;
+ }
+
+ private static void applyStorageSpecificStateTransforms(NodeInfo nodeInfo, Params params, NodeState reported,
+ NodeState wanted, NodeState baseline)
+ {
+ if (reported.getState() == State.INITIALIZING) {
+ if (timedOutWithoutNewInitProgress(reported, nodeInfo, params)
+ || shouldForceInitToDown(reported)
+ || nodeInfo.recentlyObservedUnstableDuringInit())
+ {
+ baseline.setState(State.DOWN);
+ }
+ if (shouldForceInitToMaintenance(reported, wanted)) {
+ baseline.setState(State.MAINTENANCE);
+ }
+ }
+ // TODO ensure that maintenance cannot override Down for any other cases
+ if (withinTemporalMaintenancePeriod(nodeInfo, baseline, params) && wanted.getState() != State.DOWN) {
+ baseline.setState(State.MAINTENANCE);
+ }
+ }
+
+ // TODO remove notion of init timeout progress? Seems redundant when we've already got RPC timeouts
+ private static boolean timedOutWithoutNewInitProgress(final NodeState reported, final NodeInfo nodeInfo, final Params params) {
+ if (reported.getState() != State.INITIALIZING) {
+ return false;
+ }
+ if (params.maxInitProgressTimeMs <= 0) {
+ return false; // No upper bound for max init time; auto-down for all intents and purposes disabled.
+ }
+ return nodeInfo.getInitProgressTime() + params.maxInitProgressTimeMs <= params.currentTimeInMillis;
+ }
+
+ // Init while listing buckets should be treated as Down, as distributors expect a storage node
+ // in Init mode to have a bucket set readily available. Clients also expect a node in Init to
+ // be able to receive operations.
+ // Precondition: reported.getState() == State.INITIALIZING
+ private static boolean shouldForceInitToDown(final NodeState reported) {
+ return reported.getInitProgress() <= NodeState.getListingBucketsInitProgressLimit() + 0.00001;
+ }
+
+ // Special case: since each node is published with a single state, if we let a Retired node
+ // be published with Initializing, it'd start receiving feed and merges. Avoid this by
+ // having it be in maintenance instead for the duration of the init period.
+ private static boolean shouldForceInitToMaintenance(final NodeState reported, final NodeState wanted) {
+ return reported.getState() == State.INITIALIZING && wanted.getState() == State.RETIRED;
+ }
+
+ private static boolean startupTimestampAlreadyObservedByAllNodes(final NodeInfo nodeInfo, final NodeState baseline) {
+ return baseline.getStartTimestamp() == nodeInfo.getStartTimestamp(); // TODO rename NodeInfo getter/setter
+ }
+
+ /**
+ * Determines whether a given storage node should be implicitly set as being
+ * in a maintenance state despite its reported state being Down. This is
+ * predominantly a case when contact has just been lost with a node, but we
+ * do not want to immediately set it to Down just yet (where "yet" is a configurable
+ * amount of time; see params.transitionTime). This is to prevent common node
+ * restart/upgrade scenarios from triggering redistribution and data replication
+ * that would be useless work if the node comes back up immediately afterwards.
+ *
+ * Only makes sense to call for storage nodes, since distributors don't support
+ * being in maintenance mode.
+ */
+ private static boolean withinTemporalMaintenancePeriod(final NodeInfo nodeInfo,
+ final NodeState baseline,
+ final Params params)
+ {
+ final Integer transitionTime = params.transitionTimes.get(nodeInfo.getNode().getType());
+ if (transitionTime == 0 || !baseline.getState().oneOf("sd")) {
+ return false;
+ }
+ return nodeInfo.getTransitionTime() + transitionTime > params.currentTimeInMillis;
+ }
+
+ private static void takeDownGroupsWithTooLowAvailability(final ClusterState workingState,
+ Map<Node, NodeStateReason> nodeStateReasons,
+ final Params params)
+ {
+ final GroupAvailabilityCalculator calc = new GroupAvailabilityCalculator.Builder()
+ .withMinNodeRatioPerGroup(params.minNodeRatioPerGroup)
+ .withDistribution(params.cluster.getDistribution())
+ .build();
+ final Set<Integer> nodesToTakeDown = calc.nodesThatShouldBeDown(workingState);
+
+ for (Integer idx : nodesToTakeDown) {
+ final Node node = storageNode(idx);
+ final NodeState newState = new NodeState(NodeType.STORAGE, State.DOWN);
+ newState.setDescription("group node availability below configured threshold");
+ workingState.setNodeState(node, newState);
+ nodeStateReasons.put(node, NodeStateReason.GROUP_IS_DOWN);
+ }
+ }
+
+ private static Node storageNode(int index) {
+ return new Node(NodeType.STORAGE, index);
+ }
+
+ // TODO we'll want to explicitly persist a bit lower bound in ZooKeeper and ensure we
+ // never go below it (this is _not_ the case today). Nodes that have min bits lower than
+ // this will just have to start splitting out in the background before being allowed
+ // to join the cluster.
+
+ private static int inferDistributionBitCount(final ContentCluster cluster,
+ final ClusterState state,
+ final Params params)
+ {
+ int bitCount = params.idealDistributionBits;
+ final Optional<Integer> minBits = cluster.getConfiguredNodes().values().stream()
+ .map(configuredNode -> cluster.getNodeInfo(storageNode(configuredNode.index())))
+ .filter(node -> state.getNodeState(node.getNode()).getState().oneOf("iur"))
+ .map(nodeInfo -> nodeInfo.getReportedState().getMinUsedBits())
+ .min(Integer::compare);
+
+ if (minBits.isPresent() && minBits.get() < bitCount) {
+ bitCount = minBits.get();
+ }
+ if (bitCount > params.lowestObservedDistributionBitCount && bitCount < params.idealDistributionBits) {
+ bitCount = params.lowestObservedDistributionBitCount;
+ }
+
+ return bitCount;
+ }
+
+ private static boolean nodeStateIsConsideredAvailable(final NodeState ns) {
+ return (ns.getState() == State.UP
+ || ns.getState() == State.RETIRED
+ || ns.getState() == State.INITIALIZING);
+ }
+
+ private static long countAvailableNodesOfType(final NodeType type,
+ final ContentCluster cluster,
+ final ClusterState state)
+ {
+ return cluster.getConfiguredNodes().values().stream()
+ .map(node -> state.getNodeState(new Node(type, node.index())))
+ .filter(ClusterStateGenerator::nodeStateIsConsideredAvailable)
+ .count();
+ }
+
+ private static Optional<ClusterStateReason> clusterDownReason(final ClusterState state, final Params params) {
+ final ContentCluster cluster = params.cluster;
+
+ final long upStorageCount = countAvailableNodesOfType(NodeType.STORAGE, cluster, state);
+ final long upDistributorCount = countAvailableNodesOfType(NodeType.DISTRIBUTOR, cluster, state);
+ // There's a 1-1 relationship between distributors and storage nodes, so don't need to
+ // keep track of separate node counts for computing availability ratios.
+ final long nodeCount = cluster.getConfiguredNodes().size();
+
+ if (upStorageCount < params.minStorageNodesUp) {
+ return Optional.of(ClusterStateReason.TOO_FEW_STORAGE_NODES_AVAILABLE);
+ }
+ if (upDistributorCount < params.minDistributorNodesUp) {
+ return Optional.of(ClusterStateReason.TOO_FEW_DISTRIBUTOR_NODES_AVAILABLE);
+ }
+ if (params.minRatioOfStorageNodesUp * nodeCount > upStorageCount) {
+ return Optional.of(ClusterStateReason.TOO_LOW_AVAILABLE_STORAGE_NODE_RATIO);
+ }
+ if (params.minRatioOfDistributorNodesUp * nodeCount > upDistributorCount) {
+ return Optional.of(ClusterStateReason.TOO_LOW_AVAILABLE_DISTRIBUTOR_NODE_RATIO);
+ }
+ return Optional.empty();
+ }
+
+}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateHistoryEntry.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateHistoryEntry.java
new file mode 100644
index 00000000000..3963fcaa45b
--- /dev/null
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateHistoryEntry.java
@@ -0,0 +1,46 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core;
+
+import com.yahoo.vdslib.state.ClusterState;
+
+import java.util.Objects;
+
+public class ClusterStateHistoryEntry {
+
+ private final ClusterState state;
+ private final long time;
+
+ ClusterStateHistoryEntry(final ClusterState state, final long time) {
+ this.state = state;
+ this.time = time;
+ }
+
+ public ClusterState state() {
+ return state;
+ }
+
+ public long time() {
+ return time;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ ClusterStateHistoryEntry that = (ClusterStateHistoryEntry) o;
+ return time == that.time &&
+ Objects.equals(state, that.state);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(state, time);
+ }
+
+ // String representation only used for test expectation failures and debugging output.
+ // Actual status page history entry rendering emits formatted date/time.
+ public String toString() {
+ return String.format("state '%s' at time %d", state, time);
+ }
+
+}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateReason.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateReason.java
new file mode 100644
index 00000000000..3557ed1ceb8
--- /dev/null
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateReason.java
@@ -0,0 +1,15 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core;
+
+/**
+ * Explicit reasons for why a cluster has been assigned a particular global state.
+ * This only includes reasons that aren't directly possible to infer from diffing
+ * two cluster states; i.e. distribution bit changes aren't listed here because
+ * they are obvious from direct inspection.
+ */
+public enum ClusterStateReason {
+ TOO_FEW_STORAGE_NODES_AVAILABLE,
+ TOO_FEW_DISTRIBUTOR_NODES_AVAILABLE,
+ TOO_LOW_AVAILABLE_STORAGE_NODE_RATIO,
+ TOO_LOW_AVAILABLE_DISTRIBUTOR_NODE_RATIO,
+}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateView.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateView.java
index 328acfb4dbe..644d6b28b05 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateView.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/ClusterStateView.java
@@ -41,6 +41,10 @@ public class ClusterStateView {
return new ClusterStateView(clusterState, createNewAggregator(clusterState, metricUpdater), metricUpdater);
}
+ public static ClusterStateView create(final ClusterState clusterState, final MetricUpdater metricUpdater) {
+ return new ClusterStateView(clusterState, createNewAggregator(clusterState, metricUpdater), metricUpdater);
+ }
+
private static ClusterStatsAggregator createNewAggregator(ClusterState clusterState, MetricUpdater metricUpdater) {
Set<Integer> upDistributors = getIndicesOfUpNodes(clusterState, NodeType.DISTRIBUTOR);
Set<Integer> upStorageNodes = getIndicesOfUpNodes(clusterState, NodeType.STORAGE);
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/EventDiffCalculator.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/EventDiffCalculator.java
new file mode 100644
index 00000000000..2e5d99f2e67
--- /dev/null
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/EventDiffCalculator.java
@@ -0,0 +1,143 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core;
+
+import com.yahoo.vdslib.distribution.ConfiguredNode;
+import com.yahoo.vdslib.state.ClusterState;
+import com.yahoo.vdslib.state.Node;
+import com.yahoo.vdslib.state.NodeState;
+import com.yahoo.vdslib.state.NodeType;
+import com.yahoo.vdslib.state.State;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Optional;
+
+/**
+ * Responsible for inferring the difference between two cluster states and their
+ * state annotations and producing a set of events that describe the changes between
+ * the two. Diffing the states directly provides a clear picture of _what_ has changed,
+ * while the annotations are generally required to explain _why_ the changes happened
+ * in the first place.
+ *
+ * Events are primarily used for administrative/user visibility into what's happening
+ * in the cluster and are output to the Vespa log as well as kept in a circular history
+ * buffer per node and for the cluster as a whole.
+ */
+public class EventDiffCalculator {
+
+ static class Params {
+ ContentCluster cluster;
+ AnnotatedClusterState fromState;
+ AnnotatedClusterState toState;
+ long currentTime;
+
+ public Params cluster(ContentCluster cluster) {
+ this.cluster = cluster;
+ return this;
+ }
+ public Params fromState(AnnotatedClusterState clusterState) {
+ this.fromState = clusterState;
+ return this;
+ }
+ public Params toState(AnnotatedClusterState clusterState) {
+ this.toState = clusterState;
+ return this;
+ }
+ public Params currentTimeMs(long time) {
+ this.currentTime = time;
+ return this;
+ }
+ }
+
+ public static List<Event> computeEventDiff(final Params params) {
+ final List<Event> events = new ArrayList<>();
+
+ emitPerNodeDiffEvents(params, events);
+ emitWholeClusterDiffEvent(params, events);
+ return events;
+ }
+
+ private static ClusterEvent createClusterEvent(String description, Params params) {
+ return new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE, description, params.currentTime);
+ }
+
+ private static boolean clusterDownBecause(final Params params, ClusterStateReason wantedReason) {
+ final Optional<ClusterStateReason> actualReason = params.toState.getClusterStateReason();
+ return actualReason.isPresent() && actualReason.get().equals(wantedReason);
+ }
+
+ private static void emitWholeClusterDiffEvent(final Params params, final List<Event> events) {
+ final ClusterState fromState = params.fromState.getClusterState();
+ final ClusterState toState = params.toState.getClusterState();
+
+ if (clusterHasTransitionedToUpState(fromState, toState)) {
+ events.add(createClusterEvent("Enough nodes available for system to become up", params));
+ } else if (clusterHasTransitionedToDownState(fromState, toState)) {
+ if (clusterDownBecause(params, ClusterStateReason.TOO_FEW_STORAGE_NODES_AVAILABLE)) {
+ events.add(createClusterEvent("Too few storage nodes available in cluster. Setting cluster state down", params));
+ } else if (clusterDownBecause(params, ClusterStateReason.TOO_FEW_DISTRIBUTOR_NODES_AVAILABLE)) {
+ events.add(createClusterEvent("Too few distributor nodes available in cluster. Setting cluster state down", params));
+ } else if (clusterDownBecause(params, ClusterStateReason.TOO_LOW_AVAILABLE_STORAGE_NODE_RATIO)) {
+ events.add(createClusterEvent("Too low ratio of available storage nodes. Setting cluster state down", params));
+ } else if (clusterDownBecause(params, ClusterStateReason.TOO_LOW_AVAILABLE_DISTRIBUTOR_NODE_RATIO)) {
+ events.add(createClusterEvent("Too low ratio of available distributor nodes. Setting cluster state down", params));
+ } else {
+ events.add(createClusterEvent("Cluster is down", params));
+ }
+ }
+ }
+
+ private static NodeEvent createNodeEvent(NodeInfo nodeInfo, String description, Params params) {
+ return new NodeEvent(nodeInfo, description, NodeEvent.Type.CURRENT, params.currentTime);
+ }
+
+ private static void emitPerNodeDiffEvents(final Params params, final List<Event> events) {
+ final ContentCluster cluster = params.cluster;
+ final ClusterState fromState = params.fromState.getClusterState();
+ final ClusterState toState = params.toState.getClusterState();
+
+ for (ConfiguredNode node : cluster.getConfiguredNodes().values()) {
+ for (NodeType nodeType : NodeType.getTypes()) {
+ final Node n = new Node(nodeType, node.index());
+ emitSingleNodeEvents(params, events, cluster, fromState, toState, n);
+ }
+ }
+ }
+
+ private static void emitSingleNodeEvents(Params params, List<Event> events, ContentCluster cluster, ClusterState fromState, ClusterState toState, Node n) {
+ final NodeState nodeFrom = fromState.getNodeState(n);
+ final NodeState nodeTo = toState.getNodeState(n);
+ if (!nodeTo.equals(nodeFrom)) {
+ final NodeInfo info = cluster.getNodeInfo(n);
+ events.add(createNodeEvent(info, String.format("Altered node state in cluster state from '%s' to '%s'",
+ nodeFrom.toString(true), nodeTo.toString(true)), params));
+
+ NodeStateReason prevReason = params.fromState.getNodeStateReasons().get(n);
+ NodeStateReason currReason = params.toState.getNodeStateReasons().get(n);
+ if (isGroupDownEdge(prevReason, currReason)) {
+ events.add(createNodeEvent(info, "Group node availability is below configured threshold", params));
+ } else if (isGroupUpEdge(prevReason, currReason)) {
+ events.add(createNodeEvent(info, "Group node availability has been restored", params));
+ }
+ }
+ }
+
+ private static boolean isGroupUpEdge(NodeStateReason prevReason, NodeStateReason currReason) {
+ return prevReason == NodeStateReason.GROUP_IS_DOWN && currReason != NodeStateReason.GROUP_IS_DOWN;
+ }
+
+ private static boolean isGroupDownEdge(NodeStateReason prevReason, NodeStateReason currReason) {
+ return prevReason != NodeStateReason.GROUP_IS_DOWN && currReason == NodeStateReason.GROUP_IS_DOWN;
+ }
+
+ private static boolean clusterHasTransitionedToUpState(ClusterState prevState, ClusterState currentState) {
+ return prevState.getClusterState() != State.UP && currentState.getClusterState() == State.UP;
+ }
+
+ private static boolean clusterHasTransitionedToDownState(ClusterState prevState, ClusterState currentState) {
+ return prevState.getClusterState() != State.DOWN && currentState.getClusterState() == State.DOWN;
+ }
+
+ public static Params params() { return new Params(); }
+
+}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java
index ceeeddf49fa..b21cae4ed71 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/FleetController.java
@@ -7,6 +7,7 @@ import com.yahoo.vdslib.distribution.ConfiguredNode;
import com.yahoo.vdslib.state.ClusterState;
import com.yahoo.vdslib.state.Node;
import com.yahoo.vdslib.state.NodeState;
+import com.yahoo.vdslib.state.State;
import com.yahoo.vespa.clustercontroller.core.database.DatabaseHandler;
import com.yahoo.vespa.clustercontroller.core.hostinfo.HostInfo;
import com.yahoo.vespa.clustercontroller.core.listeners.*;
@@ -37,8 +38,9 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
private final ContentCluster cluster;
private final Communicator communicator;
private final NodeStateGatherer stateGatherer;
- private final SystemStateGenerator systemStateGenerator;
+ private final StateChangeHandler stateChangeHandler;
private final SystemStateBroadcaster systemStateBroadcaster;
+ private final StateVersionTracker stateVersionTracker;
private final StatusPageServerInterface statusPageServer;
private final RpcServer rpcServer;
private final DatabaseHandler database;
@@ -59,7 +61,7 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
private final List<com.yahoo.vdslib.state.ClusterState> newStates = new ArrayList<>();
private long configGeneration = -1;
private long nextConfigGeneration = -1;
- private List<RemoteClusterControllerTask> remoteTasks = new ArrayList<>();
+ private Queue<RemoteClusterControllerTask> remoteTasks = new LinkedList<>();
private final MetricUpdater metricUpdater;
private boolean isMaster = false;
@@ -69,7 +71,7 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
private final RunDataExtractor dataExtractor = new RunDataExtractor() {
@Override
- public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return systemStateGenerator.getClusterState(); }
+ public com.yahoo.vdslib.state.ClusterState getLatestClusterState() { return stateVersionTracker.getVersionedClusterState(); }
@Override
public FleetControllerOptions getOptions() { return options; }
@Override
@@ -87,7 +89,7 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
RpcServer server,
NodeLookup nodeLookup,
DatabaseHandler database,
- SystemStateGenerator systemStateGenerator,
+ StateChangeHandler stateChangeHandler,
SystemStateBroadcaster systemStateBroadcaster,
MasterElectionHandler masterElectionHandler,
MetricUpdater metricUpdater,
@@ -103,8 +105,9 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
this.communicator = communicator;
this.database = database;
this.stateGatherer = nodeStateGatherer;
- this.systemStateGenerator = systemStateGenerator;
+ this.stateChangeHandler = stateChangeHandler;
this.systemStateBroadcaster = systemStateBroadcaster;
+ this.stateVersionTracker = new StateVersionTracker(metricUpdater);
this.metricUpdater = metricUpdater;
this.statusPageServer = statusPage;
@@ -120,12 +123,12 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
new NodeHealthRequestHandler(dataExtractor));
this.statusRequestRouter.addHandler(
"^/clusterstate",
- new ClusterStateRequestHandler(systemStateGenerator));
+ new ClusterStateRequestHandler(stateVersionTracker));
this.statusRequestRouter.addHandler(
"^/$",
new LegacyIndexPageRequestHandler(
timer, options.showLocalSystemStatesInEventLog, cluster,
- masterElectionHandler, systemStateGenerator,
+ masterElectionHandler, stateVersionTracker,
eventLog, timer.getCurrentTimeInMillis(), dataExtractor));
propagateOptions();
@@ -169,7 +172,7 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
options.nodeStateRequestRoundTripTimeMaxSeconds);
DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer);
NodeLookup lookUp = new SlobrokClient(timer);
- SystemStateGenerator stateGenerator = new SystemStateGenerator(timer, log, metricUpdater);
+ StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater);
SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer);
MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
FleetController controller = new FleetController(
@@ -246,7 +249,7 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
public com.yahoo.vdslib.state.ClusterState getSystemState() {
synchronized(monitor) {
- return systemStateGenerator.getClusterState();
+ return stateVersionTracker.getVersionedClusterState();
}
}
@@ -299,41 +302,41 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
@Override
public void handleNewNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
- systemStateGenerator.handleNewReportedNodeState(node, newState, this);
+ stateChangeHandler.handleNewReportedNodeState(stateVersionTracker.getVersionedClusterState(), node, newState, this);
}
@Override
public void handleNewWantedNodeState(NodeInfo node, NodeState newState) {
verifyInControllerThread();
wantedStateChanged = true;
- systemStateGenerator.proposeNewNodeState(node, newState);
+ stateChangeHandler.proposeNewNodeState(stateVersionTracker.getVersionedClusterState(), node, newState);
}
@Override
public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo newHostInfo) {
verifyInControllerThread();
- systemStateGenerator.handleUpdatedHostInfo(nodeInfo, newHostInfo);
+ stateVersionTracker.handleUpdatedHostInfo(stateChangeHandler.getHostnames(), nodeInfo, newHostInfo);
}
@Override
public void handleNewNode(NodeInfo node) {
verifyInControllerThread();
- systemStateGenerator.handleNewNode(node);
+ stateChangeHandler.handleNewNode(node);
}
@Override
public void handleMissingNode(NodeInfo node) {
verifyInControllerThread();
- systemStateGenerator.handleMissingNode(node, this);
+ stateChangeHandler.handleMissingNode(stateVersionTracker.getVersionedClusterState(), node, this);
}
@Override
public void handleNewRpcAddress(NodeInfo node) {
verifyInControllerThread();
- systemStateGenerator.handleNewRpcAddress(node);
+ stateChangeHandler.handleNewRpcAddress(node);
}
@Override
public void handleReturnedRpcAddress(NodeInfo node) {
verifyInControllerThread();
- systemStateGenerator.handleReturnedRpcAddress(node);
+ stateChangeHandler.handleReturnedRpcAddress(node);
}
public void handleNewSystemState(com.yahoo.vdslib.state.ClusterState state) {
@@ -370,7 +373,9 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
/** Called when all distributors have acked newest cluster state version. */
public void handleAllDistributorsInSync(DatabaseHandler database, DatabaseHandler.Context context) throws InterruptedException {
- systemStateGenerator.handleAllDistributorsInSync(database, context);
+ Set<ConfiguredNode> nodes = new HashSet<>(cluster.clusterInfo().getConfiguredNodes().values());
+ stateChangeHandler.handleAllDistributorsInSync(
+ stateVersionTracker.getVersionedClusterState(), nodes, database, context);
}
private boolean changesConfiguredNodeSet(Collection<ConfiguredNode> newNodes) {
@@ -409,17 +414,11 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
database.setZooKeeperSessionTimeout(options.zooKeeperSessionTimeout);
stateGatherer.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
stateGatherer.setNodeStateRequestTimeout(options.nodeStateRequestTimeoutMS);
- systemStateGenerator.setNodes(cluster.clusterInfo());
- systemStateGenerator.setMaxTransitionTime(options.maxTransitionTime);
- systemStateGenerator.setMaxInitProgressTime(options.maxInitProgressTime);
- systemStateGenerator.setMaxPrematureCrashes(options.maxPrematureCrashes);
- systemStateGenerator.setStableStateTimePeriod(options.stableStateTimePeriod);
- systemStateGenerator.setMinNodesUp(options.minDistributorNodesUp, options.minStorageNodesUp,
- options.minRatioOfDistributorNodesUp, options.minRatioOfStorageNodesUp);
- systemStateGenerator.setMinNodeRatioPerGroup(options.minNodeRatioPerGroup);
- systemStateGenerator.setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
- systemStateGenerator.setDistributionBits(options.distributionBits);
- systemStateGenerator.setDistribution(options.storageDistribution);
+
+ // TODO: remove as many temporal parameter dependencies as possible here. Currently duplication of state.
+ stateChangeHandler.reconfigureFromOptions(options);
+ stateChangeHandler.setStateChangedFlag(); // Always trigger state recomputation after reconfig
+
masterElectionHandler.setFleetControllerCount(options.fleetControllerCount);
masterElectionHandler.setMasterZooKeeperCooldownPeriod(options.masterZooKeeperCooldownPeriod);
@@ -491,7 +490,7 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
didWork = database.doNextZooKeeperTask(databaseContext);
didWork |= updateMasterElectionState();
didWork |= handleLeadershipEdgeTransitions();
- systemStateGenerator.setMaster(isMaster);
+ stateChangeHandler.setMaster(isMaster);
// Process zero or more getNodeState responses that we have received.
didWork |= stateGatherer.processResponses(this);
@@ -510,10 +509,10 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
didWork |= processAnyPendingStatusPageRequest();
if (rpcServer != null) {
- didWork |= rpcServer.handleRpcRequests(cluster, systemStateGenerator.getClusterState(), this, this);
+ didWork |= rpcServer.handleRpcRequests(cluster, consolidatedClusterState(), this, this);
}
- processAllQueuedRemoteTasks();
+ didWork |= processNextQueuedRemoteTask();
processingCycle = false;
++cycleCount;
@@ -606,25 +605,52 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
}
}
- private void processAllQueuedRemoteTasks() {
+ private boolean processNextQueuedRemoteTask() {
if ( ! remoteTasks.isEmpty()) {
- RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
- context.cluster = cluster;
- context.currentState = systemStateGenerator.getConsolidatedClusterState();
- context.masterInfo = masterElectionHandler;
- context.nodeStateOrHostInfoChangeHandler = this;
- context.nodeAddedOrRemovedListener = this;
- for (RemoteClusterControllerTask task : remoteTasks) {
- log.finest("Processing remote task " + task.getClass().getName());
- task.doRemoteFleetControllerTask(context);
- task.notifyCompleted();
- log.finest("Done processing remote task " + task.getClass().getName());
- }
- log.fine("Completed processing remote tasks");
- remoteTasks.clear();
+ final RemoteClusterControllerTask.Context context = createRemoteTaskProcessingContext();
+ final RemoteClusterControllerTask task = remoteTasks.poll();
+ log.finest("Processing remote task " + task.getClass().getName());
+ task.doRemoteFleetControllerTask(context);
+ task.notifyCompleted();
+ log.finest("Done processing remote task " + task.getClass().getName());
+ return true;
}
+ return false;
}
+ private RemoteClusterControllerTask.Context createRemoteTaskProcessingContext() {
+ final RemoteClusterControllerTask.Context context = new RemoteClusterControllerTask.Context();
+ context.cluster = cluster;
+ context.currentState = consolidatedClusterState();
+ context.masterInfo = masterElectionHandler;
+ context.nodeStateOrHostInfoChangeHandler = this;
+ context.nodeAddedOrRemovedListener = this;
+ return context;
+ }
+
+ /**
+ * A "consolidated" cluster state is guaranteed to have up-to-date information on which nodes are
+ * up or down even when the whole cluster is down. The regular, published cluster state is not
+ * normally updated to reflect node events when the cluster is down.
+ */
+ ClusterState consolidatedClusterState() {
+ final ClusterState publishedState = stateVersionTracker.getVersionedClusterState();
+ if (publishedState.getClusterState() == State.UP) {
+ return publishedState; // Short-circuit; already represents latest node state
+ }
+ // Latest candidate state contains the most up to date state information, even if it may not
+ // have been published yet.
+ final ClusterState current = stateVersionTracker.getLatestCandidateState().getClusterState().clone();
+ current.setVersion(publishedState.getVersion());
+ return current;
+ }
+
+ /*
+ System test observations:
+ - a node that stops normally (U -> S) then goes down erroneously triggers premature crash handling
+ - long time before content node state convergence (though this seems to be the case for legacy impl as well)
+ */
+
private boolean resyncLocallyCachedState() throws InterruptedException {
boolean didWork = false;
// Let non-master state gatherers update wanted states once in a while, so states generated and shown are close to valid.
@@ -637,31 +663,99 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
// Send getNodeState requests to zero or more nodes.
didWork |= stateGatherer.sendMessages(cluster, communicator, this);
- didWork |= systemStateGenerator.watchTimers(cluster, this);
- didWork |= systemStateGenerator.notifyIfNewSystemState(cluster, this);
+ // Important: timer events must use a consolidated state, or they might trigger edge events multiple times.
+ didWork |= stateChangeHandler.watchTimers(cluster, consolidatedClusterState(), this);
+
+ didWork |= recomputeClusterStateIfRequired();
if ( ! isStateGatherer) {
if ( ! isMaster) {
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became node state gatherer as we are fleetcontroller master candidate.", timer.getCurrentTimeInMillis()));
// Update versions to use so what is shown is closer to what is reality on the master
- systemStateGenerator.setLatestSystemStateVersion(database.getLatestSystemStateVersion());
+ stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
}
}
isStateGatherer = true;
return didWork;
}
+ private boolean recomputeClusterStateIfRequired() {
+ if (mustRecomputeCandidateClusterState()) {
+ stateChangeHandler.unsetStateChangedFlag();
+ final AnnotatedClusterState candidate = computeCurrentAnnotatedState();
+ stateVersionTracker.updateLatestCandidateState(candidate);
+
+ if (stateVersionTracker.candidateChangedEnoughFromCurrentToWarrantPublish()
+ || stateVersionTracker.hasReceivedNewVersionFromZooKeeper())
+ {
+ final long timeNowMs = timer.getCurrentTimeInMillis();
+ final AnnotatedClusterState before = stateVersionTracker.getAnnotatedVersionedClusterState();
+
+ stateVersionTracker.promoteCandidateToVersionedState(timeNowMs);
+ emitEventsForAlteredStateEdges(before, stateVersionTracker.getAnnotatedVersionedClusterState(), timeNowMs);
+ handleNewSystemState(stateVersionTracker.getVersionedClusterState());
+ return true;
+ }
+ }
+ return false;
+ }
+
+ private AnnotatedClusterState computeCurrentAnnotatedState() {
+ ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
+ params.currentTimeInMilllis(timer.getCurrentTimeInMillis())
+ .cluster(cluster)
+ .lowestObservedDistributionBitCount(stateVersionTracker.getLowestObservedDistributionBits());
+ return ClusterStateGenerator.generatedStateFrom(params);
+ }
+
+ private void emitEventsForAlteredStateEdges(final AnnotatedClusterState fromState,
+ final AnnotatedClusterState toState,
+ final long timeNowMs) {
+ final List<Event> deltaEvents = EventDiffCalculator.computeEventDiff(
+ EventDiffCalculator.params()
+ .cluster(cluster)
+ .fromState(fromState)
+ .toState(toState)
+ .currentTimeMs(timeNowMs));
+ for (Event event : deltaEvents) {
+ eventLog.add(event, isMaster);
+ }
+
+ emitStateAppliedEvents(timeNowMs, fromState.getClusterState(), toState.getClusterState());
+ }
+
+ private void emitStateAppliedEvents(long timeNowMs, ClusterState fromClusterState, ClusterState toClusterState) {
+ eventLog.add(new ClusterEvent(
+ ClusterEvent.Type.SYSTEMSTATE,
+ "New cluster state version " + toClusterState.getVersion() + ". Change from last: " +
+ fromClusterState.getTextualDifference(toClusterState),
+ timeNowMs), isMaster);
+
+ if (toClusterState.getDistributionBitCount() != fromClusterState.getDistributionBitCount()) {
+ eventLog.add(new ClusterEvent(
+ ClusterEvent.Type.SYSTEMSTATE,
+ "Altering distribution bits in system from "
+ + fromClusterState.getDistributionBitCount() + " to " +
+ toClusterState.getDistributionBitCount(),
+ timeNowMs), isMaster);
+ }
+ }
+
+ private boolean mustRecomputeCandidateClusterState() {
+ return stateChangeHandler.stateMayHaveChanged() || stateVersionTracker.hasReceivedNewVersionFromZooKeeper();
+ }
+
private boolean handleLeadershipEdgeTransitions() throws InterruptedException {
boolean didWork = false;
if (masterElectionHandler.isMaster()) {
if ( ! isMaster) {
metricUpdater.becameMaster();
// If we just became master, restore wanted states from database
- systemStateGenerator.setLatestSystemStateVersion(database.getLatestSystemStateVersion());
+ stateVersionTracker.setVersionRetrievedFromZooKeeper(database.getLatestSystemStateVersion());
didWork = database.loadStartTimestamps(cluster);
didWork |= database.loadWantedStates(databaseContext);
eventLog.add(new ClusterEvent(ClusterEvent.Type.MASTER_ELECTION, "This node just became fleetcontroller master. Bumped version to "
- + systemStateGenerator.getClusterState().getVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
+ + stateVersionTracker.getCurrentVersion() + " to be in line.", timer.getCurrentTimeInMillis()));
long currentTime = timer.getCurrentTimeInMillis();
firstAllowedStateBroadcast = currentTime + options.minTimeBeforeFirstSystemStateBroadcast;
log.log(LogLevel.DEBUG, "At time " + currentTime + " we set first system state broadcast time to be "
@@ -693,6 +787,7 @@ public class FleetController implements NodeStateOrHostInfoChangeHandler, NodeAd
} catch (InterruptedException e) {
log.log(LogLevel.DEBUG, "Event thread stopped by interrupt exception: " + e);
} catch (Throwable t) {
+ t.printStackTrace();
log.log(LogLevel.ERROR, "Fatal error killed fleet controller", t);
synchronized (monitor) { running = false; }
System.exit(1);
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GroupAvailabilityCalculator.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GroupAvailabilityCalculator.java
index 74b15b61ac3..e24e5f6914e 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GroupAvailabilityCalculator.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/GroupAvailabilityCalculator.java
@@ -10,6 +10,7 @@ import com.yahoo.vdslib.state.NodeState;
import com.yahoo.vdslib.state.NodeType;
import com.yahoo.vdslib.state.State;
+import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.Stream;
@@ -105,6 +106,9 @@ class GroupAvailabilityCalculator {
}
public Set<Integer> nodesThatShouldBeDown(ClusterState state) {
+ if (distribution == null) { // FIXME: for tests that don't set distribution properly!
+ return Collections.emptySet();
+ }
if (isFlatCluster(distribution.getRootGroup())) {
// Implicit group takedown only applies to hierarchic cluster setups.
return new HashSet<>();
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MasterElectionHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MasterElectionHandler.java
index 6c48bdf12d0..1a48b088ca3 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MasterElectionHandler.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/MasterElectionHandler.java
@@ -240,7 +240,7 @@ public class MasterElectionHandler implements MasterInterface {
.append(".</p>");
} else if (masterGoneFromZooKeeperTime + masterZooKeeperCooldownPeriod > timer.getCurrentTimeInMillis()) {
long time = timer.getCurrentTimeInMillis() - masterGoneFromZooKeeperTime;
- sb.append("<p>There is currently no master. Only " + (time / 1000) + " seconds have past since")
+ sb.append("<p>There is currently no master. Only " + (time / 1000) + " seconds have passed since")
.append(" old master disappeared. At least " + (masterZooKeeperCooldownPeriod / 1000) + " must pass")
.append(" before electing new master unless all possible master candidates are online.</p>");
}
@@ -249,7 +249,7 @@ public class MasterElectionHandler implements MasterInterface {
sb.append("<p>As we are number ").append(nextInLineCount)
.append(" in line for taking over as master, we're gathering state from nodes.</p>");
sb.append("<p><font color=\"red\">As we are not the master, we don't know about nodes current system state"
- + " or wanted states, so some statistics below are a bit incorrect. Look at status page on master "
+ + " or wanted states, so some statistics below may be stale. Look at status page on master "
+ "for updated data.</font></p>");
}
if (index * 2 > totalCount) {
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeEvent.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeEvent.java
index d9d83c705b1..944cbd02082 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeEvent.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeEvent.java
@@ -45,4 +45,8 @@ public class NodeEvent implements Event {
public String getCategory() {
return type.toString();
}
+
+ public Type getType() {
+ return type;
+ }
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java
index c261a4bb194..87a32e1e088 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeInfo.java
@@ -35,6 +35,18 @@ abstract public class NodeInfo implements Comparable<NodeInfo> {
/** Whether this node has been configured to be retired and should therefore always return retired as its wanted state */
private boolean configuredRetired;
+ /**
+ * Node has been observed transitioning from Init to Down at least once during the last "premature crash count"
+ * period. Gets reset whenever the crash count is reset to zero after a period of stability.
+ *
+ * Flag can also be explicitly toggled by external code, such as if a reported node state
+ * handler discovers "reverse" init progress. This indicates a "silent" down edge and should be
+ * handled as such.
+ *
+ * It is an explicit choice that we only do this on an edge to Down (and not Stopping). Stopping implies
+ * an administrative action, not that the node itself is unstable.
+ */
+ private boolean recentlyObservedUnstableDuringInit;
/** The time we set the current state last. */
private long nextAttemptTime;
@@ -97,6 +109,7 @@ abstract public class NodeInfo implements Comparable<NodeInfo> {
this.version = getLatestVersion();
this.connectionVersion = getLatestVersion();
this.configuredRetired = configuredRetired;
+ this.recentlyObservedUnstableDuringInit = false;
this.rpcAddress = rpcAddress;
this.lastSeenInSlobrok = null;
this.nextAttemptTime = 0;
@@ -132,7 +145,17 @@ abstract public class NodeInfo implements Comparable<NodeInfo> {
public int getConnectionAttemptCount() { return connectionAttemptCount; }
+ public boolean recentlyObservedUnstableDuringInit() {
+ return recentlyObservedUnstableDuringInit;
+ }
+ public void setRecentlyObservedUnstableDuringInit(boolean unstable) {
+ recentlyObservedUnstableDuringInit = unstable;
+ }
+
public void setPrematureCrashCount(int count) {
+ if (count == 0) {
+ recentlyObservedUnstableDuringInit = false;
+ }
if (prematureCrashCount != count) {
prematureCrashCount = count;
log.log(LogLevel.DEBUG, "Premature crash count on " + toString() + " set to " + count);
@@ -213,6 +236,7 @@ abstract public class NodeInfo implements Comparable<NodeInfo> {
public ContentCluster getCluster() { return cluster; }
/** Returns true if the node is currentl registered in slobrok */
+ // FIXME why is this called "isRpcAddressOutdated" then???
public boolean isRpcAddressOutdated() { return lastSeenInSlobrok != null; }
public Long getRpcAddressOutdatedTimestamp() { return lastSeenInSlobrok; }
@@ -277,8 +301,10 @@ abstract public class NodeInfo implements Comparable<NodeInfo> {
if (state.getState().equals(State.DOWN) && !reportedState.getState().oneOf("d")) {
downStableStateTime = time;
log.log(LogLevel.DEBUG, "Down stable state on " + toString() + " altered to " + time);
- }
- else if (state.getState().equals(State.UP) && !reportedState.getState().oneOf("u")) {
+ if (reportedState.getState() == State.INITIALIZING) {
+ recentlyObservedUnstableDuringInit = true;
+ }
+ } else if (state.getState().equals(State.UP) && !reportedState.getState().oneOf("u")) {
upStableStateTime = time;
log.log(LogLevel.DEBUG, "Up stable state on " + toString() + " altered to " + time);
}
@@ -403,7 +429,7 @@ abstract public class NodeInfo implements Comparable<NodeInfo> {
public void setSystemStateVersionSent(ClusterState state) {
if (state == null) throw new Error("Should not clear info for last version sent");
if (systemStateVersionSent.containsKey(state.getVersion())) {
- throw new IllegalStateException("We have already sent cluster state version " + version + " to " + node);
+ throw new IllegalStateException("We have already sent cluster state version " + state.getVersion() + " to " + node);
}
systemStateVersionSent.put(state.getVersion(), state);
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateReason.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateReason.java
new file mode 100644
index 00000000000..da338626d5d
--- /dev/null
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/NodeStateReason.java
@@ -0,0 +1,10 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core;
+
+public enum NodeStateReason {
+ // FIXME some of these reasons may be unnecessary as they are reported implicitly by reported/wanted state changes
+ NODE_TOO_UNSTABLE,
+ WITHIN_MAINTENANCE_GRACE_PERIOD,
+ FORCED_INTO_MAINTENANCE,
+ GROUP_IS_DOWN
+}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java
new file mode 100644
index 00000000000..83ba274c422
--- /dev/null
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandler.java
@@ -0,0 +1,530 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core;
+
+import com.yahoo.jrt.Spec;
+import com.yahoo.log.LogLevel;
+import com.yahoo.vdslib.distribution.ConfiguredNode;
+import com.yahoo.vdslib.state.*;
+import com.yahoo.vespa.clustercontroller.core.database.DatabaseHandler;
+import com.yahoo.vespa.clustercontroller.core.listeners.NodeStateOrHostInfoChangeHandler;
+
+import java.util.*;
+import java.util.logging.Logger;
+
+/**
+ * This class gets node state updates and timer events and uses these to decide
+ * whether a new cluster state should be generated.
+ *
+ * TODO refactor logic out into smaller, separate components. Still state duplication
+ * between ClusterStateGenerator and StateChangeHandler, especially for temporal
+ * state transition configuration parameters.
+ */
+public class StateChangeHandler {
+
+ private static Logger log = Logger.getLogger(StateChangeHandler.class.getName());
+
+ private final Timer timer;
+ private final EventLogInterface eventLog;
+ private boolean stateMayHaveChanged = false;
+ private boolean isMaster = false;
+
+ private Map<NodeType, Integer> maxTransitionTime = new TreeMap<>();
+ private int maxInitProgressTime = 5000;
+ private int maxPrematureCrashes = 4;
+ private long stableStateTimePeriod = 60 * 60 * 1000;
+ private Map<Integer, String> hostnames = new HashMap<>();
+ private int maxSlobrokDisconnectGracePeriod = 1000;
+ private static final boolean disableUnstableNodes = true;
+
+ /**
+ * @param metricUpdater may be null, in which case no metrics will be recorded.
+ */
+ public StateChangeHandler(Timer timer, EventLogInterface eventLog, MetricUpdater metricUpdater) {
+ this.timer = timer;
+ this.eventLog = eventLog;
+ maxTransitionTime.put(NodeType.DISTRIBUTOR, 5000);
+ maxTransitionTime.put(NodeType.STORAGE, 5000);
+ }
+
+ public void handleAllDistributorsInSync(final ClusterState currentState,
+ final Set<ConfiguredNode> nodes,
+ final DatabaseHandler database,
+ final DatabaseHandler.Context dbContext) throws InterruptedException {
+ int startTimestampsReset = 0;
+ log.log(LogLevel.DEBUG, String.format("handleAllDistributorsInSync invoked for state version %d", currentState.getVersion()));
+ for (NodeType nodeType : NodeType.getTypes()) {
+ for (ConfiguredNode configuredNode : nodes) {
+ final Node node = new Node(nodeType, configuredNode.index());
+ final NodeInfo nodeInfo = dbContext.getCluster().getNodeInfo(node);
+ final NodeState nodeState = currentState.getNodeState(node);
+ if (nodeInfo != null && nodeState != null) {
+ if (nodeState.getStartTimestamp() > nodeInfo.getStartTimestamp()) {
+ if (log.isLoggable(LogLevel.DEBUG)) {
+ log.log(LogLevel.DEBUG, String.format("Storing away new start timestamp for node %s (%d)",
+ node, nodeState.getStartTimestamp()));
+ }
+ nodeInfo.setStartTimestamp(nodeState.getStartTimestamp());
+ }
+ if (nodeState.getStartTimestamp() > 0) {
+ if (log.isLoggable(LogLevel.DEBUG)) {
+ log.log(LogLevel.DEBUG, String.format("Resetting timestamp in cluster state for node %s", node));
+ }
+ ++startTimestampsReset;
+ }
+ } else if (log.isLoggable(LogLevel.DEBUG)) {
+ log.log(LogLevel.DEBUG, node + ": " +
+ (nodeInfo == null ? "null" : nodeInfo.getStartTimestamp()) + ", " +
+ (nodeState == null ? "null" : nodeState.getStartTimestamp()));
+ }
+ }
+ }
+ if (startTimestampsReset > 0) {
+ eventLog.add(new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE, "Reset " + startTimestampsReset +
+ " start timestamps as all available distributors have seen newest cluster state.",
+ timer.getCurrentTimeInMillis()));
+ stateMayHaveChanged = true;
+ database.saveStartTimestamps(dbContext);
+ } else {
+ log.log(LogLevel.DEBUG, "Found no start timestamps to reset in cluster state.");
+ }
+ }
+
+ public boolean stateMayHaveChanged() {
+ return stateMayHaveChanged;
+ }
+
+ public void setStateChangedFlag() { stateMayHaveChanged = true; }
+ public void unsetStateChangedFlag() {
+ stateMayHaveChanged = false;
+ }
+
+ public void setMaster(boolean isMaster) {
+ this.isMaster = isMaster;
+ }
+
+ public void setMaxTransitionTime(Map<NodeType, Integer> map) { maxTransitionTime = map; }
+ public void setMaxInitProgressTime(int millisecs) { maxInitProgressTime = millisecs; }
+ public void setMaxSlobrokDisconnectGracePeriod(int millisecs) {
+ maxSlobrokDisconnectGracePeriod = millisecs;
+ }
+ public void setStableStateTimePeriod(long millisecs) { stableStateTimePeriod = millisecs; }
+ public void setMaxPrematureCrashes(int count) { maxPrematureCrashes = count; }
+
+ // TODO nodeListener is only used via updateNodeInfoFromReportedState -> handlePrematureCrash
+ // TODO this will recursively invoke proposeNewNodeState, which will presumably (i.e. hopefully) be a no-op...
+ public void handleNewReportedNodeState(final ClusterState currentClusterState,
+ final NodeInfo node,
+ final NodeState reportedState,
+ final NodeStateOrHostInfoChangeHandler nodeListener)
+ {
+ final NodeState currentState = currentClusterState.getNodeState(node.getNode());
+ final LogLevel level = (currentState.equals(reportedState) && node.getVersion() == 0) ? LogLevel.SPAM : LogLevel.DEBUG;
+ if (log.isLoggable(level)) {
+ log.log(level, String.format("Got nodestate reply from %s: %s (Current state is %s)",
+ node, node.getReportedState().getTextualDifference(reportedState), currentState.toString(true)));
+ }
+ final long currentTime = timer.getCurrentTimeInMillis();
+
+ if (reportedState.getState().equals(State.DOWN)) {
+ node.setTimeOfFirstFailingConnectionAttempt(currentTime);
+ }
+
+ // *** LOGGING ONLY
+ if ( ! reportedState.similarTo(node.getReportedState())) {
+ if (reportedState.getState().equals(State.DOWN)) {
+ eventLog.addNodeOnlyEvent(new NodeEvent(node, "Failed to get node state: " + reportedState.toString(true), NodeEvent.Type.REPORTED, currentTime), LogLevel.INFO);
+ } else {
+ eventLog.addNodeOnlyEvent(new NodeEvent(node, "Now reporting state " + reportedState.toString(true), NodeEvent.Type.REPORTED, currentTime), LogLevel.DEBUG);
+ }
+ }
+
+ if (reportedState.equals(node.getReportedState()) && ! reportedState.getState().equals(State.INITIALIZING)) {
+ return;
+ }
+
+ updateNodeInfoFromReportedState(node, currentState, reportedState, nodeListener);
+
+ if (reportedState.getMinUsedBits() != currentState.getMinUsedBits()) {
+ final int oldCount = currentState.getMinUsedBits();
+ final int newCount = reportedState.getMinUsedBits();
+ log.log(LogLevel.DEBUG,
+ String.format("Altering node state to reflect that min distribution bit count has changed from %d to %d",
+ oldCount, newCount));
+ eventLog.add(new NodeEvent(node, String.format("Altered min distribution bit count from %d to %d", oldCount, newCount),
+ NodeEvent.Type.CURRENT, currentTime), isMaster);
+ } else if (log.isLoggable(LogLevel.DEBUG)) {
+ log.log(LogLevel.DEBUG, String.format("Not altering state of %s in cluster state because new state is too similar: %s",
+ node, currentState.getTextualDifference(reportedState)));
+ }
+
+ stateMayHaveChanged = true;
+ }
+
+ public void handleNewNode(NodeInfo node) {
+ setHostName(node);
+ String message = "Found new node " + node + " in slobrok at " + node.getRpcAddress();
+ eventLog.add(new NodeEvent(node, message, NodeEvent.Type.REPORTED, timer.getCurrentTimeInMillis()), isMaster);
+ }
+
+ public void handleMissingNode(final ClusterState currentClusterState,
+ final NodeInfo node,
+ final NodeStateOrHostInfoChangeHandler nodeListener)
+ {
+ removeHostName(node);
+
+ final long timeNow = timer.getCurrentTimeInMillis();
+
+ if (node.getLatestNodeStateRequestTime() != null) {
+ eventLog.add(new NodeEvent(node, "Node is no longer in slobrok, but we still have a pending state request.", NodeEvent.Type.REPORTED, timeNow), isMaster);
+ } else {
+ eventLog.add(new NodeEvent(node, "Node is no longer in slobrok. No pending state request to node.", NodeEvent.Type.REPORTED, timeNow), isMaster);
+ }
+
+ if (node.getReportedState().getState().equals(State.STOPPING)) {
+ log.log(LogLevel.DEBUG, "Node " + node.getNode() + " is no longer in slobrok. Was in stopping state, so assuming it has shut down normally. Setting node down");
+ NodeState ns = node.getReportedState().clone();
+ ns.setState(State.DOWN);
+ handleNewReportedNodeState(currentClusterState, node, ns.clone(), nodeListener);
+ } else {
+ log.log(LogLevel.DEBUG, "Node " + node.getNode() + " no longer in slobrok was in state " + node.getReportedState() + ". Waiting to see if it reappears in slobrok");
+ }
+
+ stateMayHaveChanged = true;
+ }
+
+ /**
+ * Propose a new state for a node. This may happen due to an administrator action, orchestration, or
+ * a configuration change.
+ *
+ * If the newly proposed state differs from the state the node currently has in the system,
+ * a cluster state regeneration will be triggered.
+ */
+ public void proposeNewNodeState(final ClusterState currentClusterState, final NodeInfo node, final NodeState proposedState) {
+ final NodeState currentState = currentClusterState.getNodeState(node.getNode());
+ final NodeState currentReported = node.getReportedState();
+
+ if (currentState.getState().equals(proposedState.getState())) {
+ return;
+ }
+ stateMayHaveChanged = true;
+
+ if (log.isLoggable(LogLevel.DEBUG)) {
+ log.log(LogLevel.DEBUG, String.format("Got new wanted nodestate for %s: %s", node, currentState.getTextualDifference(proposedState)));
+ }
+ // Should be checked earlier before state was set in cluster
+ assert(proposedState.getState().validWantedNodeState(node.getNode().getType()));
+ long timeNow = timer.getCurrentTimeInMillis();
+ if (proposedState.above(currentReported)) {
+ eventLog.add(new NodeEvent(node, String.format("Wanted state %s, but we cannot force node into that " +
+ "state yet as it is currently in %s", proposedState, currentReported),
+ NodeEvent.Type.REPORTED, timeNow), isMaster);
+ return;
+ }
+ if ( ! proposedState.similarTo(currentState)) {
+ eventLog.add(new NodeEvent(node, String.format("Node state set to %s.", proposedState),
+ NodeEvent.Type.WANTED, timeNow), isMaster);
+ }
+ }
+
+ public void handleNewRpcAddress(NodeInfo node) {
+ setHostName(node);
+ String message = "Node " + node + " has a new address in slobrok: " + node.getRpcAddress();
+ eventLog.add(new NodeEvent(node, message, NodeEvent.Type.REPORTED, timer.getCurrentTimeInMillis()), isMaster);
+ }
+
+ public void handleReturnedRpcAddress(NodeInfo node) {
+ setHostName(node);
+ String message = "Node got back into slobrok with same address as before: " + node.getRpcAddress();
+ eventLog.add(new NodeEvent(node, message, NodeEvent.Type.REPORTED, timer.getCurrentTimeInMillis()), isMaster);
+ }
+
+ private void setHostName(NodeInfo node) {
+ String rpcAddress = node.getRpcAddress();
+ if (rpcAddress == null) {
+ // This may happen if we haven't seen the node in Slobrok yet.
+ return;
+ }
+
+ Spec address = new Spec(rpcAddress);
+ if (address.malformed()) {
+ return;
+ }
+
+ hostnames.put(node.getNodeIndex(), address.host());
+ }
+
+ void reconfigureFromOptions(FleetControllerOptions options) {
+ setMaxPrematureCrashes(options.maxPrematureCrashes);
+ setStableStateTimePeriod(options.stableStateTimePeriod);
+ setMaxInitProgressTime(options.maxInitProgressTime);
+ setMaxSlobrokDisconnectGracePeriod(options.maxSlobrokDisconnectGracePeriod);
+ setMaxTransitionTime(options.maxTransitionTime);
+ }
+
+ private void removeHostName(NodeInfo node) {
+ hostnames.remove(node.getNodeIndex());
+ }
+
+ Map<Integer, String> getHostnames() {
+ return Collections.unmodifiableMap(hostnames);
+ }
+
+ // TODO too many hidden behavior dependencies between this and the actually
+ // generated cluster state. Still a bit of a mine field...
+ // TODO remove all node state mutation from this function entirely in favor of ClusterStateGenerator!
+ // `--> this will require adding more event edges and premature crash handling to it. Which is fine.
+ public boolean watchTimers(final ContentCluster cluster,
+ final ClusterState currentClusterState,
+ final NodeStateOrHostInfoChangeHandler nodeListener)
+ {
+ boolean triggeredAnyTimers = false;
+ final long currentTime = timer.getCurrentTimeInMillis();
+
+ for(NodeInfo node : cluster.getNodeInfo()) {
+ triggeredAnyTimers |= handleTimeDependentOpsForNode(currentClusterState, nodeListener, currentTime, node);
+ }
+
+ if (triggeredAnyTimers) {
+ stateMayHaveChanged = true;
+ }
+ return triggeredAnyTimers;
+ }
+
+ private boolean handleTimeDependentOpsForNode(final ClusterState currentClusterState,
+ final NodeStateOrHostInfoChangeHandler nodeListener,
+ final long currentTime,
+ final NodeInfo node)
+ {
+ final NodeState currentStateInSystem = currentClusterState.getNodeState(node.getNode());
+ final NodeState lastReportedState = node.getReportedState();
+ boolean triggeredAnyTimers = false;
+
+ triggeredAnyTimers = reportDownIfOutdatedSlobrokNode(
+ currentClusterState, nodeListener, currentTime, node, lastReportedState);
+
+ if (nodeStillUnavailableAfterTransitionTimeExceeded(
+ currentTime, node, currentStateInSystem, lastReportedState))
+ {
+ eventLog.add(new NodeEvent(node, String.format(
+ "%d milliseconds without contact. Marking node down.",
+ currentTime - node.getTransitionTime()),
+ NodeEvent.Type.CURRENT, currentTime), isMaster);
+ triggeredAnyTimers = true;
+ }
+
+ if (nodeInitProgressHasTimedOut(currentTime, node, currentStateInSystem, lastReportedState)) {
+ eventLog.add(new NodeEvent(node, String.format(
+ "%d milliseconds without initialize progress. Marking node down. " +
+ "Premature crash count is now %d.",
+ currentTime - node.getInitProgressTime(),
+ node.getPrematureCrashCount() + 1),
+ NodeEvent.Type.CURRENT, currentTime), isMaster);
+ handlePrematureCrash(node, nodeListener);
+ triggeredAnyTimers = true;
+ }
+
+ if (mayResetCrashCounterOnStableUpNode(currentTime, node, lastReportedState)) {
+ node.setPrematureCrashCount(0);
+ log.log(LogLevel.DEBUG, "Resetting premature crash count on node " + node + " as it has been up for a long time.");
+ triggeredAnyTimers = true;
+ } else if (mayResetCrashCounterOnStableDownNode(currentTime, node, lastReportedState)) {
+ node.setPrematureCrashCount(0);
+ log.log(LogLevel.DEBUG, "Resetting premature crash count on node " + node + " as it has been down for a long time.");
+ triggeredAnyTimers = true;
+ }
+
+ return triggeredAnyTimers;
+ }
+
+ private boolean nodeInitProgressHasTimedOut(long currentTime, NodeInfo node, NodeState currentStateInSystem, NodeState lastReportedState) {
+ return !currentStateInSystem.getState().equals(State.DOWN)
+ && node.getWantedState().above(new NodeState(node.getNode().getType(), State.DOWN))
+ && lastReportedState.getState().equals(State.INITIALIZING)
+ && maxInitProgressTime != 0
+ && node.getInitProgressTime() + maxInitProgressTime <= currentTime
+ && node.getNode().getType().equals(NodeType.STORAGE);
+ }
+
+ private boolean mayResetCrashCounterOnStableDownNode(long currentTime, NodeInfo node, NodeState lastReportedState) {
+ return node.getDownStableStateTime() + stableStateTimePeriod <= currentTime
+ && lastReportedState.getState().equals(State.DOWN)
+ && node.getPrematureCrashCount() <= maxPrematureCrashes
+ && node.getPrematureCrashCount() != 0;
+ }
+
+ private boolean mayResetCrashCounterOnStableUpNode(long currentTime, NodeInfo node, NodeState lastReportedState) {
+ return node.getUpStableStateTime() + stableStateTimePeriod <= currentTime
+ && lastReportedState.getState().equals(State.UP)
+ && node.getPrematureCrashCount() <= maxPrematureCrashes
+ && node.getPrematureCrashCount() != 0;
+ }
+
+ private boolean nodeStillUnavailableAfterTransitionTimeExceeded(
+ long currentTime,
+ NodeInfo node,
+ NodeState currentStateInSystem,
+ NodeState lastReportedState)
+ {
+ return currentStateInSystem.getState().equals(State.MAINTENANCE)
+ && node.getWantedState().above(new NodeState(node.getNode().getType(), State.DOWN))
+ && (lastReportedState.getState().equals(State.DOWN) || node.isRpcAddressOutdated())
+ && node.getTransitionTime() + maxTransitionTime.get(node.getNode().getType()) < currentTime;
+ }
+
+ private boolean reportDownIfOutdatedSlobrokNode(ClusterState currentClusterState,
+ NodeStateOrHostInfoChangeHandler nodeListener,
+ long currentTime,
+ NodeInfo node,
+ NodeState lastReportedState)
+ {
+ if (node.isRpcAddressOutdated()
+ && !lastReportedState.getState().equals(State.DOWN)
+ && node.getRpcAddressOutdatedTimestamp() + maxSlobrokDisconnectGracePeriod <= currentTime)
+ {
+ final String desc = String.format(
+ "Set node down as it has been out of slobrok for %d ms which " +
+ "is more than the max limit of %d ms.",
+ currentTime - node.getRpcAddressOutdatedTimestamp(),
+ maxSlobrokDisconnectGracePeriod);
+ node.abortCurrentNodeStateRequests();
+ NodeState state = lastReportedState.clone();
+ state.setState(State.DOWN);
+ if (!state.hasDescription()) {
+ state.setDescription(desc);
+ }
+ eventLog.add(new NodeEvent(node, desc, NodeEvent.Type.CURRENT, currentTime), isMaster);
+ handleNewReportedNodeState(currentClusterState, node, state.clone(), nodeListener);
+ node.setReportedState(state, currentTime);
+ return true;
+ }
+ return false;
+ }
+
+ private boolean isControlledShutdown(NodeState state) {
+ return (state.getState() == State.STOPPING
+ && (state.getDescription().contains("Received signal 15 (SIGTERM - Termination signal)")
+ || state.getDescription().contains("controlled shutdown")));
+ }
+
+ /**
+ * Modify a node's cross-state information in the cluster based on a newly arrived reported state.
+ *
+ * @param node the node we are computing the state of
+ * @param currentState the current state of the node
+ * @param reportedState the new state reported by (or, in the case of down - inferred from) the node
+ * @param nodeListener this listener is notified for some of the system state changes that this will return
+ */
+ private void updateNodeInfoFromReportedState(final NodeInfo node,
+ final NodeState currentState,
+ final NodeState reportedState,
+ final NodeStateOrHostInfoChangeHandler nodeListener) {
+ final long timeNow = timer.getCurrentTimeInMillis();
+ if (log.isLoggable(LogLevel.DEBUG)) {
+ log.log(LogLevel.DEBUG, String.format("Finding new cluster state entry for %s switching state %s",
+ node, currentState.getTextualDifference(reportedState)));
+ }
+
+ if (handleReportedNodeCrashEdge(node, currentState, reportedState, nodeListener, timeNow)) {
+ return;
+ }
+ if (initializationProgressHasIncreased(currentState, reportedState)) {
+ node.setInitProgressTime(timeNow);
+ if (log.isLoggable(LogLevel.SPAM)) {
+ log.log(LogLevel.SPAM, "Reset initialize timer on " + node + " to " + node.getInitProgressTime());
+ }
+ }
+ if (handleImplicitCrashEdgeFromReverseInitProgress(node, currentState, reportedState, nodeListener, timeNow)) {
+ return;
+ }
+ markNodeUnstableIfDownEdgeDuringInit(node, currentState, reportedState, nodeListener, timeNow);
+ }
+
+ // If we go down while initializing, mark node unstable, such that we don't mark it initializing again before it is up.
+ private void markNodeUnstableIfDownEdgeDuringInit(final NodeInfo node,
+ final NodeState currentState,
+ final NodeState reportedState,
+ final NodeStateOrHostInfoChangeHandler nodeListener,
+ final long timeNow) {
+ if (currentState.getState().equals(State.INITIALIZING)
+ && reportedState.getState().oneOf("ds")
+ && !isControlledShutdown(reportedState))
+ {
+ eventLog.add(new NodeEvent(node, String.format("Stop or crash during initialization. " +
+ "Premature crash count is now %d.", node.getPrematureCrashCount() + 1),
+ NodeEvent.Type.CURRENT, timeNow), isMaster);
+ handlePrematureCrash(node, nodeListener);
+ }
+ }
+
+ // TODO do we need this when we have startup timestamps? at least it's unit tested.
+ // TODO this seems fairly contrived...
+ // If we get reverse initialize progress, mark node unstable, such that we don't mark it initializing again before it is up.
+ private boolean handleImplicitCrashEdgeFromReverseInitProgress(final NodeInfo node,
+ final NodeState currentState,
+ final NodeState reportedState,
+ final NodeStateOrHostInfoChangeHandler nodeListener,
+ final long timeNow) {
+ if (currentState.getState().equals(State.INITIALIZING) &&
+ (reportedState.getState().equals(State.INITIALIZING) && reportedState.getInitProgress() < currentState.getInitProgress()))
+ {
+ eventLog.add(new NodeEvent(node, String.format(
+ "Stop or crash during initialization detected from reverse initializing progress." +
+ " Progress was %g but is now %g. Premature crash count is now %d.",
+ currentState.getInitProgress(), reportedState.getInitProgress(),
+ node.getPrematureCrashCount() + 1),
+ NodeEvent.Type.CURRENT, timeNow), isMaster);
+ node.setRecentlyObservedUnstableDuringInit(true);
+ handlePrematureCrash(node, nodeListener);
+ return true;
+ }
+ return false;
+ }
+
+ private boolean handleReportedNodeCrashEdge(NodeInfo node, NodeState currentState,
+ NodeState reportedState, NodeStateOrHostInfoChangeHandler nodeListener,
+ long timeNow) {
+ if (nodeUpToDownEdge(node, currentState, reportedState)) {
+ node.setTransitionTime(timeNow);
+ if (node.getUpStableStateTime() + stableStateTimePeriod > timeNow && !isControlledShutdown(reportedState)) {
+ log.log(LogLevel.DEBUG, "Stable state: " + node.getUpStableStateTime() + " + " + stableStateTimePeriod + " > " + timeNow);
+ eventLog.add(new NodeEvent(node,
+ String.format("Stopped or possibly crashed after %d ms, which is before " +
+ "stable state time period. Premature crash count is now %d.",
+ timeNow - node.getUpStableStateTime(), node.getPrematureCrashCount() + 1),
+ NodeEvent.Type.CURRENT,
+ timeNow), isMaster);
+ if (handlePrematureCrash(node, nodeListener)) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ private boolean initializationProgressHasIncreased(NodeState currentState, NodeState reportedState) {
+ return reportedState.getState().equals(State.INITIALIZING) &&
+ (!currentState.getState().equals(State.INITIALIZING) ||
+ reportedState.getInitProgress() > currentState.getInitProgress());
+ }
+
+ private boolean nodeUpToDownEdge(NodeInfo node, NodeState currentState, NodeState reportedState) {
+ return currentState.getState().oneOf("ur") && reportedState.getState().oneOf("dis")
+ && (node.getWantedState().getState().equals(State.RETIRED) || !reportedState.getState().equals(State.INITIALIZING));
+ }
+
+ private boolean handlePrematureCrash(NodeInfo node, NodeStateOrHostInfoChangeHandler changeListener) {
+ node.setPrematureCrashCount(node.getPrematureCrashCount() + 1);
+ if (disableUnstableNodes && node.getPrematureCrashCount() > maxPrematureCrashes) {
+ NodeState wantedState = new NodeState(node.getNode().getType(), State.DOWN)
+ .setDescription("Disabled by fleet controller as it prematurely shut down " + node.getPrematureCrashCount() + " times in a row");
+ NodeState oldState = node.getWantedState();
+ node.setWantedState(wantedState);
+ if ( ! oldState.equals(wantedState)) {
+ changeListener.handleNewWantedNodeState(node, wantedState);
+ }
+ return true;
+ }
+ return false;
+ }
+
+}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateVersionTracker.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateVersionTracker.java
new file mode 100644
index 00000000000..f5a67ca9434
--- /dev/null
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/StateVersionTracker.java
@@ -0,0 +1,140 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core;
+
+import com.yahoo.vdslib.state.ClusterState;
+import com.yahoo.vespa.clustercontroller.core.hostinfo.HostInfo;
+
+import java.util.Collections;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Keeps track of the active cluster state and handles the transition edges between
+ * one state to the next. In particular, it ensures that states have strictly increasing
+ * version numbers.
+ *
+ * Wraps ClusterStateView to ensure its knowledge of available nodes stays up to date.
+ */
+public class StateVersionTracker {
+
+ // We always increment the version _before_ publishing, so the effective first cluster
+ // state version when starting from 1 will be 2. This matches legacy behavior and a bunch
+ // of existing tests expect it.
+ private int currentVersion = 1;
+ private int lastZooKeeperVersion = 0;
+
+ // The lowest published distribution bit count for the lifetime of this controller.
+ // TODO this mirrors legacy behavior, but should be moved into stable ZK state.
+ private int lowestObservedDistributionBits = 16;
+
+ private ClusterState currentUnversionedState = ClusterState.emptyState();
+ private AnnotatedClusterState latestCandidateState = AnnotatedClusterState.emptyState();
+ private AnnotatedClusterState currentClusterState = latestCandidateState;
+
+ private final MetricUpdater metricUpdater;
+ private ClusterStateView clusterStateView;
+
+ private final LinkedList<ClusterStateHistoryEntry> clusterStateHistory = new LinkedList<>();
+ private int maxHistoryEntryCount = 50;
+
+ StateVersionTracker(final MetricUpdater metricUpdater) {
+ this.metricUpdater = metricUpdater;
+ clusterStateView = ClusterStateView.create(currentUnversionedState, metricUpdater);
+ }
+
+ void setVersionRetrievedFromZooKeeper(final int version) {
+ this.currentVersion = Math.max(1, version);
+ this.lastZooKeeperVersion = this.currentVersion;
+ }
+
+ /**
+ * Sets limit on how many cluster states can be kept in the in-memory queue. Once
+ * the list exceeds this limit, the oldest state is repeatedly removed until the limit
+ * is no longer exceeded.
+ *
+ * Takes effect upon the next invocation of promoteCandidateToVersionedState().
+ */
+ void setMaxHistoryEntryCount(final int maxHistoryEntryCount) {
+ this.maxHistoryEntryCount = maxHistoryEntryCount;
+ }
+
+ int getCurrentVersion() {
+ return this.currentVersion;
+ }
+
+ boolean hasReceivedNewVersionFromZooKeeper() {
+ return currentVersion <= lastZooKeeperVersion;
+ }
+
+ int getLowestObservedDistributionBits() {
+ return lowestObservedDistributionBits;
+ }
+
+ AnnotatedClusterState getAnnotatedVersionedClusterState() {
+ return currentClusterState;
+ }
+
+ public ClusterState getVersionedClusterState() {
+ return currentClusterState.getClusterState();
+ }
+
+ public void updateLatestCandidateState(final AnnotatedClusterState candidate) {
+ assert(latestCandidateState.getClusterState().getVersion() == 0);
+ latestCandidateState = candidate;
+ }
+
+ /**
+ * Returns the last state provided to updateLatestCandidateState, which _may or may not_ be
+ * a published state. Primary use case for this function is a caller which is interested in
+ * changes that may not be reflected in the published state. The best example of this would
+ * be node state changes when a cluster is marked as Down.
+ */
+ public AnnotatedClusterState getLatestCandidateState() {
+ return latestCandidateState;
+ }
+
+ public List<ClusterStateHistoryEntry> getClusterStateHistory() {
+ return Collections.unmodifiableList(clusterStateHistory);
+ }
+
+ boolean candidateChangedEnoughFromCurrentToWarrantPublish() {
+ return !currentUnversionedState.similarToIgnoringInitProgress(latestCandidateState.getClusterState());
+ }
+
+ void promoteCandidateToVersionedState(final long currentTimeMs) {
+ final int newVersion = currentVersion + 1;
+ updateStatesForNewVersion(latestCandidateState, newVersion);
+ currentVersion = newVersion;
+
+ recordCurrentStateInHistoryAtTime(currentTimeMs);
+ }
+
+ private void updateStatesForNewVersion(final AnnotatedClusterState newState, final int newVersion) {
+ currentClusterState = new AnnotatedClusterState(
+ newState.getClusterState().clone(), // Because we mutate version below
+ newState.getClusterStateReason(),
+ newState.getNodeStateReasons());
+ currentClusterState.getClusterState().setVersion(newVersion);
+ currentUnversionedState = newState.getClusterState().clone();
+ lowestObservedDistributionBits = Math.min(
+ lowestObservedDistributionBits,
+ newState.getClusterState().getDistributionBitCount());
+ // TODO should this take place in updateLatestCandidateState instead? I.e. does it require a consolidated state?
+ clusterStateView = ClusterStateView.create(currentClusterState.getClusterState(), metricUpdater);
+ }
+
+ private void recordCurrentStateInHistoryAtTime(final long currentTimeMs) {
+ clusterStateHistory.addFirst(new ClusterStateHistoryEntry(
+ currentClusterState.getClusterState(), currentTimeMs));
+ while (clusterStateHistory.size() > maxHistoryEntryCount) {
+ clusterStateHistory.removeLast();
+ }
+ }
+
+ void handleUpdatedHostInfo(final Map<Integer, String> hostnames, final NodeInfo node, final HostInfo hostInfo) {
+ // TODO the wiring here isn't unit tested. Need mockable integration points.
+ clusterStateView.handleUpdatedHostInfo(hostnames, node, hostInfo);
+ }
+
+}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateGenerator.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateGenerator.java
deleted file mode 100644
index 7edff399633..00000000000
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/SystemStateGenerator.java
+++ /dev/null
@@ -1,941 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.clustercontroller.core;
-
-import com.yahoo.jrt.Spec;
-import com.yahoo.log.LogLevel;
-import com.yahoo.vdslib.distribution.ConfiguredNode;
-import com.yahoo.vdslib.distribution.Distribution;
-import com.yahoo.vdslib.state.*;
-import com.yahoo.vespa.clustercontroller.core.database.DatabaseHandler;
-import com.yahoo.vespa.clustercontroller.core.hostinfo.HostInfo;
-import com.yahoo.vespa.clustercontroller.core.listeners.NodeStateOrHostInfoChangeHandler;
-import com.yahoo.vespa.clustercontroller.core.listeners.SystemStateListener;
-
-import java.util.*;
-import java.util.logging.Logger;
-import java.text.ParseException;
-import java.util.stream.Collectors;
-
-/**
- * This class get node state updates and uses them to decide the cluster state.
- */
-// TODO: Remove all current state from this and make it rely on state from ClusterInfo instead
-// TODO: Do this ASAP! SystemStateGenerator should ideally behave as a pure function!
-public class SystemStateGenerator {
-
- private static Logger log = Logger.getLogger(SystemStateGenerator.class.getName());
-
- private final Timer timer;
- private final EventLogInterface eventLog;
- private ClusterStateView currentClusterStateView;
- private ClusterStateView nextClusterStateView;
- private Distribution distribution;
- private boolean nextStateViewChanged = false;
- private boolean isMaster = false;
-
- private Map<NodeType, Integer> maxTransitionTime = new TreeMap<>();
- private int maxInitProgressTime = 5000;
- private int maxPrematureCrashes = 4;
- private long stableStateTimePeriod = 60 * 60 * 1000;
- private static final int maxHistorySize = 50;
- private Set<ConfiguredNode> nodes;
- private Map<Integer, String> hostnames = new HashMap<>();
- private int minDistributorNodesUp = 1;
- private int minStorageNodesUp = 1;
- private double minRatioOfDistributorNodesUp = 0.50;
- private double minRatioOfStorageNodesUp = 0.50;
- private double minNodeRatioPerGroup = 0.0;
- private int maxSlobrokDisconnectGracePeriod = 1000;
- private int idealDistributionBits = 16;
- private static final boolean disableUnstableNodes = true;
-
- private final LinkedList<SystemStateHistoryEntry> systemStateHistory = new LinkedList<>();
-
- /**
- * @param metricUpdater may be null, in which case no metrics will be recorded.
- */
- public SystemStateGenerator(Timer timer, EventLogInterface eventLog, MetricUpdater metricUpdater) {
- try {
- currentClusterStateView = ClusterStateView.create("", metricUpdater);
- nextClusterStateView = ClusterStateView.create("", metricUpdater);
- } catch (ParseException e) {
- throw new RuntimeException("Parsing empty string should always work");
- }
- this.timer = timer;
- this.eventLog = eventLog;
- maxTransitionTime.put(NodeType.DISTRIBUTOR, 5000);
- maxTransitionTime.put(NodeType.STORAGE, 5000);
- }
-
- public void handleAllDistributorsInSync(DatabaseHandler database,
- DatabaseHandler.Context dbContext) throws InterruptedException {
- int startTimestampsReset = 0;
- for (NodeType nodeType : NodeType.getTypes()) {
- for (ConfiguredNode configuredNode : nodes) {
- Node node = new Node(nodeType, configuredNode.index());
- NodeInfo nodeInfo = dbContext.getCluster().getNodeInfo(node);
- NodeState nodeState = nextClusterStateView.getClusterState().getNodeState(node);
- if (nodeInfo != null && nodeState != null) {
- if (nodeState.getStartTimestamp() > nodeInfo.getStartTimestamp()) {
- log.log(LogLevel.DEBUG, "Storing away new start timestamp for node " + node);
- nodeInfo.setStartTimestamp(nodeState.getStartTimestamp());
- }
- if (nodeState.getStartTimestamp() > 0) {
- log.log(LogLevel.DEBUG, "Resetting timestamp in cluster state for node " + node);
- nodeState.setStartTimestamp(0);
- nextClusterStateView.getClusterState().setNodeState(node, nodeState);
- ++startTimestampsReset;
- }
- } else {
- log.log(LogLevel.DEBUG, node + ": " +
- (nodeInfo == null ? "null" : nodeInfo.getStartTimestamp()) + ", " +
- (nodeState == null ? "null" : nodeState.getStartTimestamp()));
- }
- }
- }
- if (startTimestampsReset > 0) {
- eventLog.add(new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE, "Reset " + startTimestampsReset +
- " start timestamps as all available distributors have seen newest cluster state.", timer.getCurrentTimeInMillis()));
- nextStateViewChanged = true;
- database.saveStartTimestamps(dbContext);
- } else {
- log.log(LogLevel.DEBUG, "Found no start timestamps to reset in cluster state.");
- }
- }
-
- public void setMaxTransitionTime(Map<NodeType, Integer> map) { maxTransitionTime = map; }
- public void setMaxInitProgressTime(int millisecs) { maxInitProgressTime = millisecs; }
- public void setMaxPrematureCrashes(int count) { maxPrematureCrashes = count; }
- public void setStableStateTimePeriod(long millisecs) { stableStateTimePeriod = millisecs; }
-
- public ClusterStateView currentClusterStateView() { return currentClusterStateView; }
-
- /** Returns an immutable list of the historical states this has generated */
- public List<SystemStateHistoryEntry> systemStateHistory() {
- return Collections.unmodifiableList(systemStateHistory);
- }
-
- public void setMinNodesUp(int minDistNodes, int minStorNodes, double minDistRatio, double minStorRatio) {
- minDistributorNodesUp = minDistNodes;
- minStorageNodesUp = minStorNodes;
- minRatioOfDistributorNodesUp = minDistRatio;
- minRatioOfStorageNodesUp = minStorRatio;
- nextStateViewChanged = true;
- }
-
- public void setMinNodeRatioPerGroup(double upRatio) {
- this.minNodeRatioPerGroup = upRatio;
- nextStateViewChanged = true;
- }
-
- /** Sets the nodes of this and attempts to keep the node state in sync */
- public void setNodes(ClusterInfo newClusterInfo) {
- this.nodes = new HashSet<>(newClusterInfo.getConfiguredNodes().values());
-
- for (ConfiguredNode node : this.nodes) {
- NodeInfo newNodeInfo = newClusterInfo.getStorageNodeInfo(node.index());
- NodeState currentState = currentClusterStateView.getClusterState().getNodeState(new Node(NodeType.STORAGE, node.index()));
- if (currentState.getState() == State.RETIRED || currentState.getState() == State.UP) { // then correct to configured state
- proposeNewNodeState(newNodeInfo, new NodeState(NodeType.STORAGE, node.retired() ? State.RETIRED : State.UP));
- }
- }
-
- // Ensure that any nodes that have been removed from the config are also
- // promptly removed from the next (and subsequent) generated cluster states.
- pruneAllNodesNotContainedInConfig();
-
- nextStateViewChanged = true;
- }
-
- private void pruneAllNodesNotContainedInConfig() {
- Set<Integer> configuredIndices = this.nodes.stream().map(ConfiguredNode::index).collect(Collectors.toSet());
- final ClusterState candidateNextState = nextClusterStateView.getClusterState();
- pruneNodesNotContainedInConfig(candidateNextState, configuredIndices, NodeType.DISTRIBUTOR);
- pruneNodesNotContainedInConfig(candidateNextState, configuredIndices, NodeType.STORAGE);
- }
-
- public void setDistribution(Distribution distribution) {
- this.distribution = distribution;
- nextStateViewChanged = true;
- }
-
- public void setMaster(boolean isMaster) {
- this.isMaster = isMaster;
- }
- public void setMaxSlobrokDisconnectGracePeriod(int millisecs) { maxSlobrokDisconnectGracePeriod = millisecs; }
-
- public void setDistributionBits(int bits) {
- if (bits == idealDistributionBits) return;
- idealDistributionBits = bits;
- int currentDistributionBits = calculateMinDistributionBitCount();
- if (currentDistributionBits != nextClusterStateView.getClusterState().getDistributionBitCount()) {
- nextClusterStateView.getClusterState().setDistributionBits(currentDistributionBits);
- nextStateViewChanged = true;
- }
- }
-
- public int getDistributionBits() { return idealDistributionBits; }
-
- public int calculateMinDistributionBitCount() {
- int currentDistributionBits = idealDistributionBits;
- int minNode = -1;
- for (ConfiguredNode node : nodes) {
- NodeState ns = nextClusterStateView.getClusterState().getNodeState(new Node(NodeType.STORAGE, node.index()));
- if (ns.getState().oneOf("iur")) {
- if (ns.getMinUsedBits() < currentDistributionBits) {
- currentDistributionBits = ns.getMinUsedBits();
- minNode = node.index();
- }
- }
- }
- if (minNode == -1) {
- log.log(LogLevel.DEBUG, "Distribution bit count should still be default as all available nodes have at least split to " + idealDistributionBits + " bits");
- } else {
- log.log(LogLevel.DEBUG, "Distribution bit count is limited to " + currentDistributionBits + " due to storage node " + minNode);
- }
- return currentDistributionBits;
- }
-
- public ClusterState getClusterState() { return currentClusterStateView.getClusterState(); }
-
- /**
- * Return the current cluster state, but if the cluster is down, modify the node states with the
- * actual node states from the temporary next state.
- */
- public ClusterState getConsolidatedClusterState() {
- ClusterState currentState = currentClusterStateView.getClusterState();
- if (currentState.getClusterState().equals(State.UP)) {
- return currentState;
- }
-
- ClusterState nextState = nextClusterStateView.getClusterState();
- if (!currentState.getClusterState().equals(nextState.getClusterState())) {
- log.warning("Expected current cluster state object to have same global state as the under creation instance.");
- }
- ClusterState state = nextState.clone();
- state.setVersion(currentState.getVersion());
- state.setOfficial(false);
- return state;
- }
-
- private Optional<Event> getDownDueToTooFewNodesEvent(ClusterState nextClusterState) {
- int upStorageCount = 0, upDistributorCount = 0;
- int dcount = nodes.size();
- int scount = nodes.size();
- for (NodeType type : NodeType.getTypes()) {
- for (ConfiguredNode node : nodes) {
- NodeState ns = nextClusterState.getNodeState(new Node(type, node.index()));
- if (ns.getState() == State.UP || ns.getState() == State.RETIRED || ns.getState() == State.INITIALIZING) {
- if (type.equals(NodeType.STORAGE))
- ++upStorageCount;
- else
- ++upDistributorCount;
- }
- }
- }
-
- long timeNow = timer.getCurrentTimeInMillis();
- if (upStorageCount < minStorageNodesUp) {
- return Optional.of(new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE,
- "Less than " + minStorageNodesUp + " storage nodes available (" + upStorageCount + "). Setting cluster state down.",
- timeNow));
- }
- if (upDistributorCount < minDistributorNodesUp) {
- return Optional.of(new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE,
- "Less than " + minDistributorNodesUp + " distributor nodes available (" + upDistributorCount + "). Setting cluster state down.",
- timeNow));
- }
- if (minRatioOfStorageNodesUp * scount > upStorageCount) {
- return Optional.of(new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE,
- "Less than " + (100 * minRatioOfStorageNodesUp) + " % of storage nodes are available ("
- + upStorageCount + "/" + scount + "). Setting cluster state down.",
- timeNow));
- }
- if (minRatioOfDistributorNodesUp * dcount > upDistributorCount) {
- return Optional.of(new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE,
- "Less than " + (100 * minRatioOfDistributorNodesUp) + " % of distributor nodes are available ("
- + upDistributorCount + "/" + dcount + "). Setting cluster state down.",
- timeNow));
- }
- return Optional.empty();
- }
-
- private static Node storageNode(int index) {
- return new Node(NodeType.STORAGE, index);
- }
-
- private void performImplicitStorageNodeStateTransitions(ClusterState candidateState, ContentCluster cluster) {
- if (distribution == null) {
- return; // FIXME due to tests that don't bother setting distr config! Never happens in prod.
- }
- // First clear the states of any nodes that according to reported/wanted state alone
- // should have their states cleared. We might still take these down again based on the
- // decisions of the group availability calculator, but this way we ensure that groups
- // that no longer should be down will have their nodes implicitly made available again.
- // TODO this will be void once SystemStateGenerator has been rewritten to be stateless.
- final Set<Integer> clearedNodes = clearDownStateForStorageNodesThatCanBeUp(candidateState, cluster);
-
- final GroupAvailabilityCalculator calc = new GroupAvailabilityCalculator.Builder()
- .withMinNodeRatioPerGroup(minNodeRatioPerGroup)
- .withDistribution(distribution)
- .build();
- final Set<Integer> nodesToTakeDown = calc.nodesThatShouldBeDown(candidateState);
- markNodesAsDownDueToGroupUnavailability(cluster, candidateState, nodesToTakeDown, clearedNodes);
-
- clearedNodes.removeAll(nodesToTakeDown);
- logEventsForNodesThatWereTakenUp(clearedNodes, cluster);
- }
-
- private void logEventsForNodesThatWereTakenUp(Set<Integer> newlyUpNodes, ContentCluster cluster) {
- newlyUpNodes.forEach(i -> {
- final NodeInfo info = cluster.getNodeInfo(storageNode(i)); // Should always be non-null here.
- // TODO the fact that this only happens for group up events is implementation specific
- // should generalize this if we get other such events.
- eventLog.addNodeOnlyEvent(new NodeEvent(info,
- "Group availability restored; taking node back up",
- NodeEvent.Type.CURRENT, timer.getCurrentTimeInMillis()), LogLevel.INFO);
- });
- }
-
- private void markNodesAsDownDueToGroupUnavailability(ContentCluster cluster,
- ClusterState candidateState,
- Set<Integer> nodesToTakeDown,
- Set<Integer> clearedNodes)
- {
- for (Integer idx : nodesToTakeDown) {
- final Node node = storageNode(idx);
- NodeState newState = new NodeState(NodeType.STORAGE, State.DOWN);
- newState.setDescription("group node availability below configured threshold");
- candidateState.setNodeState(node, newState);
-
- logNodeGroupDownEdgeEventOnce(clearedNodes, node, cluster);
- }
- }
-
- private void logNodeGroupDownEdgeEventOnce(Set<Integer> clearedNodes, Node node, ContentCluster cluster) {
- final NodeInfo nodeInfo = cluster.getNodeInfo(node);
- // If clearedNodes contains the index it means we're just re-downing a node
- // that was previously down. If this is the case, we'd cause a duplicate
- // event if we logged it now as well.
- if (nodeInfo != null && !clearedNodes.contains(node.getIndex())) {
- eventLog.addNodeOnlyEvent(new NodeEvent(nodeInfo,
- "Setting node down as the total availability of its group is " +
- "below the configured threshold",
- NodeEvent.Type.CURRENT, timer.getCurrentTimeInMillis()), LogLevel.INFO);
- }
- }
-
- private NodeState baselineNodeState(NodeInfo info) {
- NodeState reported = info.getReportedState();
- NodeState wanted = info.getWantedState();
-
- final NodeState baseline = reported.clone();
- if (wanted.getState() != State.UP) {
- baseline.setDescription(wanted.getDescription());
- if (reported.above(wanted)) {
- baseline.setState(wanted.getState());
- }
- }
- // Don't reintroduce start timestamp to the node's state if it has already been
- // observed by all distributors. This matches how handleNewReportedNodeState() sets timestamps.
- // TODO make timestamp semantics clearer. Non-obvious what the two different timestamp stores imply.
- // For posterity: reported.getStartTimestamp() is the start timestamp the node itself has stated.
- // info.getStartTimestamp() is the timestamp written as having been observed by all distributors
- // (which is done in handleAllDistributorsInSync()).
- if (reported.getStartTimestamp() <= info.getStartTimestamp()) {
- baseline.setStartTimestamp(0);
- }
-
- return baseline;
- }
-
- // Returns set of nodes whose state was cleared
- private Set<Integer> clearDownStateForStorageNodesThatCanBeUp(
- ClusterState candidateState, ContentCluster cluster)
- {
- final int nodeCount = candidateState.getNodeCount(NodeType.STORAGE);
- final Set<Integer> clearedNodes = new HashSet<>();
- for (int i = 0; i < nodeCount; ++i) {
- final Node node = storageNode(i);
- final NodeInfo info = cluster.getNodeInfo(node);
- final NodeState currentState = candidateState.getNodeState(node);
- if (mayClearCurrentNodeState(currentState, info)) {
- candidateState.setNodeState(node, baselineNodeState(info));
- clearedNodes.add(i);
- }
- }
- return clearedNodes;
- }
-
- private boolean mayClearCurrentNodeState(NodeState currentState, NodeInfo info) {
- if (currentState.getState() != State.DOWN) {
- return false;
- }
- if (info == null) {
- // Nothing known about node in cluster info; we definitely don't want it
- // to be taken up at this point.
- return false;
- }
- // There exists an edge in watchTimers where a node in Maintenance is implicitly
- // transitioned into Down without being Down in either reported or wanted states
- // iff isRpcAddressOutdated() is true. To avoid getting into an edge where we
- // inadvertently clear this state because its reported/wanted states seem fine,
- // we must also check if that particular edge could have happened. I.e. whether
- // the node's RPC address is marked as outdated.
- // It also makes sense in general to not allow taking a node back up automatically
- // if its RPC connectivity appears to be bad.
- if (info.isRpcAddressOutdated()) {
- return false;
- }
- // Rationale: we can only enter this statement if the _current_ (generated) state
- // of the node is Down. Aside from the group take-down logic, there should not exist
- // any other edges in the cluster controller state transition logic where a node
- // may be set Down while both its reported state and wanted state imply that a better
- // state should already have been chosen. Consequently we allow the node to have its
- // Down-state cleared.
- return (info.getReportedState().getState() != State.DOWN
- && !info.getWantedState().getState().oneOf("d"));
- }
-
- private ClusterStateView createNextVersionOfClusterStateView(ContentCluster cluster) {
- // If you change this method, see *) in notifyIfNewSystemState
- ClusterStateView candidateClusterStateView = nextClusterStateView.cloneForNewState();
- ClusterState candidateClusterState = candidateClusterStateView.getClusterState();
-
- int currentDistributionBits = calculateMinDistributionBitCount();
- if (currentDistributionBits != nextClusterStateView.getClusterState().getDistributionBitCount()) {
- candidateClusterState.setDistributionBits(currentDistributionBits);
- }
- performImplicitStorageNodeStateTransitions(candidateClusterState, cluster);
-
- return candidateClusterStateView;
- }
-
- private void pruneNodesNotContainedInConfig(ClusterState candidateClusterState,
- Set<Integer> configuredIndices,
- NodeType nodeType)
- {
- final int nodeCount = candidateClusterState.getNodeCount(nodeType);
- for (int i = 0; i < nodeCount; ++i) {
- final Node node = new Node(nodeType, i);
- final NodeState currentState = candidateClusterState.getNodeState(node);
- if (!configuredIndices.contains(i) && !currentState.getState().equals(State.DOWN)) {
- log.log(LogLevel.INFO, "Removing node " + node + " from state as it is no longer present in config");
- candidateClusterState.setNodeState(node, new NodeState(nodeType, State.DOWN));
- }
- }
- }
-
- private void recordNewClusterStateHasBeenChosen(
- ClusterState currentClusterState, ClusterState newClusterState, Event clusterEvent) {
- long timeNow = timer.getCurrentTimeInMillis();
-
- if (!currentClusterState.getClusterState().equals(State.UP) &&
- newClusterState.getClusterState().equals(State.UP)) {
- eventLog.add(new ClusterEvent(ClusterEvent.Type.SYSTEMSTATE,
- "Enough nodes available for system to become up.", timeNow), isMaster);
- } else if (currentClusterState.getClusterState().equals(State.UP) &&
- ! newClusterState.getClusterState().equals(State.UP)) {
- assert(clusterEvent != null);
- eventLog.add(clusterEvent, isMaster);
- }
-
- if (newClusterState.getDistributionBitCount() != currentClusterState.getDistributionBitCount()) {
- eventLog.add(new ClusterEvent(
- ClusterEvent.Type.SYSTEMSTATE,
- "Altering distribution bits in system from "
- + currentClusterState.getDistributionBitCount() + " to " +
- currentClusterState.getDistributionBitCount(),
- timeNow), isMaster);
- }
-
- eventLog.add(new ClusterEvent(
- ClusterEvent.Type.SYSTEMSTATE,
- "New cluster state version " + newClusterState.getVersion() + ". Change from last: " +
- currentClusterState.getTextualDifference(newClusterState),
- timeNow), isMaster);
-
- log.log(LogLevel.DEBUG, "Created new cluster state version: " + newClusterState.toString(true));
- systemStateHistory.addFirst(new SystemStateHistoryEntry(newClusterState, timeNow));
- if (systemStateHistory.size() > maxHistorySize) {
- systemStateHistory.removeLast();
- }
- }
-
- private void mergeIntoNextClusterState(ClusterState sourceState) {
- final ClusterState nextState = nextClusterStateView.getClusterState();
- final int nodeCount = sourceState.getNodeCount(NodeType.STORAGE);
- for (int i = 0; i < nodeCount; ++i) {
- final Node node = storageNode(i);
- final NodeState stateInSource = sourceState.getNodeState(node);
- final NodeState stateInTarget = nextState.getNodeState(node);
- if (stateInSource.getState() != stateInTarget.getState()) {
- nextState.setNodeState(node, stateInSource);
- }
- }
- }
-
- public boolean notifyIfNewSystemState(ContentCluster cluster, SystemStateListener stateListener) {
- if ( ! nextStateViewChanged) return false;
-
- ClusterStateView newClusterStateView = createNextVersionOfClusterStateView(cluster);
-
- ClusterState newClusterState = newClusterStateView.getClusterState();
- // Creating the next version of the state may implicitly take down nodes, so our checks
- // for taking the entire cluster down must happen _after_ this
- Optional<Event> clusterDown = getDownDueToTooFewNodesEvent(newClusterState);
- newClusterState.setClusterState(clusterDown.isPresent() ? State.DOWN : State.UP);
-
- if (newClusterState.similarTo(currentClusterStateView.getClusterState())) {
- log.log(LogLevel.DEBUG,
- "State hasn't changed enough to warrant new cluster state. Not creating new state: " +
- currentClusterStateView.getClusterState().getTextualDifference(newClusterState));
- return false;
- }
-
- // Update the version of newClusterState now. This cannot be done prior to similarTo(),
- // since it makes the cluster states different. From now on, the new cluster state is immutable.
- newClusterState.setVersion(currentClusterStateView.getClusterState().getVersion() + 1);
-
- recordNewClusterStateHasBeenChosen(currentClusterStateView.getClusterState(),
- newClusterStateView.getClusterState(), clusterDown.orElse(null));
-
- // *) Ensure next state is still up to date.
- // This should make nextClusterStateView a deep-copy of currentClusterStateView.
- // If more than the distribution bits and state are deep-copied in
- // createNextVersionOfClusterStateView(), we need to add corresponding statements here.
- // This seems like a hack...
- nextClusterStateView.getClusterState().setDistributionBits(newClusterState.getDistributionBitCount());
- nextClusterStateView.getClusterState().setClusterState(newClusterState.getClusterState());
- mergeIntoNextClusterState(newClusterState);
-
- currentClusterStateView = newClusterStateView;
- nextStateViewChanged = false;
-
- stateListener.handleNewSystemState(currentClusterStateView.getClusterState());
-
- return true;
- }
-
- public void setLatestSystemStateVersion(int version) {
- currentClusterStateView.getClusterState().setVersion(Math.max(1, version));
- nextStateViewChanged = true;
- }
-
- private void setNodeState(NodeInfo node, NodeState newState) {
- NodeState oldState = nextClusterStateView.getClusterState().getNodeState(node.getNode());
-
- // Correct UP to RETIRED if the node wants to be retired
- if (newState.above(node.getWantedState()))
- newState.setState(node.getWantedState().getState());
-
- // Keep old description if a new one is not set and we're not going up or in initializing mode
- if ( ! newState.getState().oneOf("ui") && oldState.hasDescription()) {
- newState.setDescription(oldState.getDescription());
- }
-
- // Keep disk information if not set in new state
- if (newState.getDiskCount() == 0 && oldState.getDiskCount() != 0) {
- newState.setDiskCount(oldState.getDiskCount());
- for (int i=0; i<oldState.getDiskCount(); ++i) {
- newState.setDiskState(i, oldState.getDiskState(i));
- }
- }
- if (newState.equals(oldState)) {
- return;
- }
-
- eventLog.add(new NodeEvent(node, "Altered node state in cluster state from '" + oldState.toString(true)
- + "' to '" + newState.toString(true) + "'.",
- NodeEvent.Type.CURRENT, timer.getCurrentTimeInMillis()), isMaster);
- nextClusterStateView.getClusterState().setNodeState(node.getNode(), newState);
- nextStateViewChanged = true;
- }
-
- public void handleNewReportedNodeState(NodeInfo node, NodeState reportedState, NodeStateOrHostInfoChangeHandler nodeListener) {
- ClusterState nextState = nextClusterStateView.getClusterState();
- NodeState currentState = nextState.getNodeState(node.getNode());
- log.log(currentState.equals(reportedState) && node.getVersion() == 0 ? LogLevel.SPAM : LogLevel.DEBUG,
- "Got nodestate reply from " + node + ": "
- + node.getReportedState().getTextualDifference(reportedState) + " (Current state is " + currentState.toString(true) + ")");
- long currentTime = timer.getCurrentTimeInMillis();
- if (reportedState.getState().equals(State.DOWN)) {
- node.setTimeOfFirstFailingConnectionAttempt(currentTime);
- }
- if ( ! reportedState.similarTo(node.getReportedState())) {
- if (reportedState.getState().equals(State.DOWN)) {
- eventLog.addNodeOnlyEvent(new NodeEvent(node, "Failed to get node state: " + reportedState.toString(true), NodeEvent.Type.REPORTED, currentTime), LogLevel.INFO);
- } else {
- eventLog.addNodeOnlyEvent(new NodeEvent(node, "Now reporting state " + reportedState.toString(true), NodeEvent.Type.REPORTED, currentTime), LogLevel.DEBUG);
- }
- }
- if (reportedState.equals(node.getReportedState()) && ! reportedState.getState().equals(State.INITIALIZING))
- return;
-
- NodeState alteredState = decideNodeStateGivenReportedState(node, currentState, reportedState, nodeListener);
- if (alteredState != null) {
- ClusterState clusterState = currentClusterStateView.getClusterState();
-
- if (alteredState.above(node.getWantedState())) {
- log.log(LogLevel.DEBUG, "Cannot set node in state " + alteredState.getState() + " when wanted state is " + node.getWantedState());
- alteredState.setState(node.getWantedState().getState());
- }
- if (reportedState.getStartTimestamp() > node.getStartTimestamp()) {
- alteredState.setStartTimestamp(reportedState.getStartTimestamp());
- } else {
- alteredState.setStartTimestamp(0);
- }
- if (!alteredState.similarTo(currentState)) {
- setNodeState(node, alteredState);
- } else if (!alteredState.equals(currentState)) {
- if (currentState.getState().equals(State.INITIALIZING) && alteredState.getState().equals(State.INITIALIZING) &&
- Math.abs(currentState.getInitProgress() - alteredState.getInitProgress()) > 0.000000001)
- {
- log.log(LogLevel.DEBUG, "Only silently updating init progress for " + node + " in cluster state because new "
- + "state is too similar to tag new version: " + currentState.getTextualDifference(alteredState));
- currentState.setInitProgress(alteredState.getInitProgress());
- nextState.setNodeState(node.getNode(), currentState);
-
- NodeState currentNodeState = clusterState.getNodeState(node.getNode());
- if (currentNodeState.getState().equals(State.INITIALIZING)) {
- currentNodeState.setInitProgress(alteredState.getInitProgress());
- clusterState.setNodeState(node.getNode(), currentNodeState);
- }
- } else if (alteredState.getMinUsedBits() != currentState.getMinUsedBits()) {
- log.log(LogLevel.DEBUG, "Altering node state to reflect that min distribution bit count have changed from "
- + currentState.getMinUsedBits() + " to " + alteredState.getMinUsedBits());
- int oldCount = currentState.getMinUsedBits();
- currentState.setMinUsedBits(alteredState.getMinUsedBits());
- nextState.setNodeState(node.getNode(), currentState);
- int minDistBits = calculateMinDistributionBitCount();
- if (minDistBits < nextState.getDistributionBitCount()
- || (nextState.getDistributionBitCount() < this.idealDistributionBits && minDistBits >= this.idealDistributionBits))
- {
- // If this will actually affect global cluster state.
- eventLog.add(new NodeEvent(node, "Altered min distribution bit count from " + oldCount
- + " to " + currentState.getMinUsedBits() + ". Updated cluster state.", NodeEvent.Type.CURRENT, currentTime), isMaster);
- nextStateViewChanged = true;
- } else {
- log.log(LogLevel.DEBUG, "Altered min distribution bit count from " + oldCount
- + " to " + currentState.getMinUsedBits() + ". No effect for cluster state with ideal " + this.idealDistributionBits
- + ", new " + minDistBits + ", old " + nextState.getDistributionBitCount() + " though.");
- clusterState.setNodeState(node.getNode(), currentState);
- }
- } else {
- log.log(LogLevel.DEBUG, "Not altering state of " + node + " in cluster state because new state is too similar: "
- + currentState.getTextualDifference(alteredState));
- }
- } else if (alteredState.getDescription().contains("Listing buckets")) {
- currentState.setDescription(alteredState.getDescription());
- nextState.setNodeState(node.getNode(), currentState);
- NodeState currentNodeState = clusterState.getNodeState(node.getNode());
- currentNodeState.setDescription(alteredState.getDescription());
- clusterState.setNodeState(node.getNode(), currentNodeState);
- }
- }
- }
-
- public void handleNewNode(NodeInfo node) {
- setHostName(node);
- String message = "Found new node " + node + " in slobrok at " + node.getRpcAddress();
- eventLog.add(new NodeEvent(node, message, NodeEvent.Type.REPORTED, timer.getCurrentTimeInMillis()), isMaster);
- }
-
- public void handleMissingNode(NodeInfo node, NodeStateOrHostInfoChangeHandler nodeListener) {
- removeHostName(node);
-
- long timeNow = timer.getCurrentTimeInMillis();
-
- if (node.getLatestNodeStateRequestTime() != null) {
- eventLog.add(new NodeEvent(node, "Node is no longer in slobrok, but we still have a pending state request.", NodeEvent.Type.REPORTED, timeNow), isMaster);
- } else {
- eventLog.add(new NodeEvent(node, "Node is no longer in slobrok. No pending state request to node.", NodeEvent.Type.REPORTED, timeNow), isMaster);
- }
- if (node.getReportedState().getState().equals(State.STOPPING)) {
- log.log(LogLevel.DEBUG, "Node " + node.getNode() + " is no longer in slobrok. Was in stopping state, so assuming it has shut down normally. Setting node down");
- NodeState ns = node.getReportedState().clone();
- ns.setState(State.DOWN);
- handleNewReportedNodeState(node, ns.clone(), nodeListener);
- node.setReportedState(ns, timer.getCurrentTimeInMillis()); // Must reset it to null to get connection attempts counted
- } else {
- log.log(LogLevel.DEBUG, "Node " + node.getNode() + " no longer in slobrok was in state " + node.getReportedState() + ". Waiting to see if it reappears in slobrok");
- }
- }
-
- /**
- * Propose a new state for a node. This may happen due to an administrator action, orchestration, or
- * a configuration change.
- */
- public void proposeNewNodeState(NodeInfo node, NodeState proposedState) {
- NodeState currentState = nextClusterStateView.getClusterState().getNodeState(node.getNode());
- NodeState currentReported = node.getReportedState(); // TODO: Is there a reason to have both of this and the above?
-
- NodeState newCurrentState = currentReported.clone();
-
- newCurrentState.setState(proposedState.getState()).setDescription(proposedState.getDescription());
-
- if (currentState.getState().equals(newCurrentState.getState())) return;
-
- log.log(LogLevel.DEBUG, "Got new wanted nodestate for " + node + ": " + currentState.getTextualDifference(proposedState));
- // Should be checked earlier before state was set in cluster
- assert(newCurrentState.getState().validWantedNodeState(node.getNode().getType()));
- long timeNow = timer.getCurrentTimeInMillis();
- if (newCurrentState.above(currentReported)) {
- eventLog.add(new NodeEvent(node, "Wanted state " + newCurrentState + ", but we cannot force node into that state yet as it is currently in " + currentReported, NodeEvent.Type.REPORTED, timeNow), isMaster);
- return;
- }
- if ( ! newCurrentState.similarTo(currentState)) {
- eventLog.add(new NodeEvent(node, "Node state set to " + newCurrentState + ".", NodeEvent.Type.WANTED, timeNow), isMaster);
- }
- setNodeState(node, newCurrentState);
- }
-
- public void handleNewRpcAddress(NodeInfo node) {
- setHostName(node);
- String message = "Node " + node + " has a new address in slobrok: " + node.getRpcAddress();
- eventLog.add(new NodeEvent(node, message, NodeEvent.Type.REPORTED, timer.getCurrentTimeInMillis()), isMaster);
- }
-
- public void handleReturnedRpcAddress(NodeInfo node) {
- setHostName(node);
- String message = "Node got back into slobrok with same address as before: " + node.getRpcAddress();
- eventLog.add(new NodeEvent(node, message, NodeEvent.Type.REPORTED, timer.getCurrentTimeInMillis()), isMaster);
- }
-
- private void setHostName(NodeInfo node) {
- String rpcAddress = node.getRpcAddress();
- if (rpcAddress == null) {
- // This may happen if we haven't seen the node in Slobrok yet.
- return;
- }
-
- Spec address = new Spec(rpcAddress);
- if (address.malformed()) {
- return;
- }
-
- hostnames.put(node.getNodeIndex(), address.host());
- }
-
- private void removeHostName(NodeInfo node) {
- hostnames.remove(node.getNodeIndex());
- }
-
- public boolean watchTimers(ContentCluster cluster, NodeStateOrHostInfoChangeHandler nodeListener) {
- boolean triggeredAnyTimers = false;
- long currentTime = timer.getCurrentTimeInMillis();
- for(NodeInfo node : cluster.getNodeInfo()) {
- NodeState currentStateInSystem = nextClusterStateView.getClusterState().getNodeState(node.getNode());
- NodeState lastReportedState = node.getReportedState();
-
- // If we haven't had slobrok contact in a given amount of time and node is still not considered down,
- // mark it down.
- if (node.isRpcAddressOutdated()
- && !lastReportedState.getState().equals(State.DOWN)
- && node.getRpcAddressOutdatedTimestamp() + maxSlobrokDisconnectGracePeriod <= currentTime)
- {
- StringBuilder sb = new StringBuilder().append("Set node down as it has been out of slobrok for ")
- .append(currentTime - node.getRpcAddressOutdatedTimestamp()).append(" ms which is more than the max limit of ")
- .append(maxSlobrokDisconnectGracePeriod).append(" ms.");
- node.abortCurrentNodeStateRequests();
- NodeState state = lastReportedState.clone();
- state.setState(State.DOWN);
- if (!state.hasDescription()) state.setDescription(sb.toString());
- eventLog.add(new NodeEvent(node, sb.toString(), NodeEvent.Type.CURRENT, currentTime), isMaster);
- handleNewReportedNodeState(node, state.clone(), nodeListener);
- node.setReportedState(state, currentTime);
- triggeredAnyTimers = true;
- }
-
- // If node is still unavailable after transition time, mark it down
- if (currentStateInSystem.getState().equals(State.MAINTENANCE)
- && ( ! nextStateViewChanged || ! this.nextClusterStateView.getClusterState().getNodeState(node.getNode()).getState().equals(State.DOWN))
- && node.getWantedState().above(new NodeState(node.getNode().getType(), State.DOWN))
- && (lastReportedState.getState().equals(State.DOWN) || node.isRpcAddressOutdated())
- && node.getTransitionTime() + maxTransitionTime.get(node.getNode().getType()) < currentTime)
- {
- eventLog.add(new NodeEvent(node, (currentTime - node.getTransitionTime())
- + " milliseconds without contact. Marking node down.", NodeEvent.Type.CURRENT, currentTime), isMaster);
- NodeState newState = new NodeState(node.getNode().getType(), State.DOWN).setDescription(
- (currentTime - node.getTransitionTime()) + " ms without contact. Too long to keep in maintenance. Marking node down");
- // Keep old description if there is one as it is likely closer to the cause of the problem
- if (currentStateInSystem.hasDescription()) newState.setDescription(currentStateInSystem.getDescription());
- setNodeState(node, newState);
- triggeredAnyTimers = true;
- }
-
- // If node hasn't increased its initializing progress within initprogresstime, mark it down.
- if (!currentStateInSystem.getState().equals(State.DOWN)
- && node.getWantedState().above(new NodeState(node.getNode().getType(), State.DOWN))
- && lastReportedState.getState().equals(State.INITIALIZING)
- && maxInitProgressTime != 0
- && node.getInitProgressTime() + maxInitProgressTime <= currentTime
- && node.getNode().getType().equals(NodeType.STORAGE))
- {
- eventLog.add(new NodeEvent(node, (currentTime - node.getInitProgressTime()) + " milliseconds "
- + "without initialize progress. Marking node down."
- + " Premature crash count is now " + (node.getPrematureCrashCount() + 1) + ".", NodeEvent.Type.CURRENT, currentTime), isMaster);
- NodeState newState = new NodeState(node.getNode().getType(), State.DOWN).setDescription(
- (currentTime - node.getInitProgressTime()) + " ms without initialize progress. Assuming node has deadlocked.");
- setNodeState(node, newState);
- handlePrematureCrash(node, nodeListener);
- triggeredAnyTimers = true;
- }
- if (node.getUpStableStateTime() + stableStateTimePeriod <= currentTime
- && lastReportedState.getState().equals(State.UP)
- && node.getPrematureCrashCount() <= maxPrematureCrashes
- && node.getPrematureCrashCount() != 0)
- {
- node.setPrematureCrashCount(0);
- log.log(LogLevel.DEBUG, "Resetting premature crash count on node " + node + " as it has been up for a long time.");
- triggeredAnyTimers = true;
- } else if (node.getDownStableStateTime() + stableStateTimePeriod <= currentTime
- && lastReportedState.getState().equals(State.DOWN)
- && node.getPrematureCrashCount() <= maxPrematureCrashes
- && node.getPrematureCrashCount() != 0)
- {
- node.setPrematureCrashCount(0);
- log.log(LogLevel.DEBUG, "Resetting premature crash count on node " + node + " as it has been down for a long time.");
- triggeredAnyTimers = true;
- }
- }
- return triggeredAnyTimers;
- }
-
- private boolean isControlledShutdown(NodeState state) {
- return (state.getState() == State.STOPPING && (state.getDescription().contains("Received signal 15 (SIGTERM - Termination signal)")
- || state.getDescription().contains("controlled shutdown")));
- }
-
- /**
- * Decide the state assigned to a new node given the state it reported
- *
- * @param node the node we are computing the state of
- * @param currentState the current state of the node
- * @param reportedState the new state reported by (or, in the case of down - inferred from) the node
- * @param nodeListener this listener is notified for some of the system state changes that this will return
- * @return the node node state, or null to keep the nodes current state
- */
- private NodeState decideNodeStateGivenReportedState(NodeInfo node, NodeState currentState, NodeState reportedState,
- NodeStateOrHostInfoChangeHandler nodeListener) {
- long timeNow = timer.getCurrentTimeInMillis();
-
- log.log(LogLevel.DEBUG, "Finding new cluster state entry for " + node + " switching state " + currentState.getTextualDifference(reportedState));
-
- // Set nodes in maintenance if 1) down, or 2) initializing but set retired, to avoid migrating data
- // to the retired node while it is initializing
- if (currentState.getState().oneOf("ur") && reportedState.getState().oneOf("dis")
- && (node.getWantedState().getState().equals(State.RETIRED) || !reportedState.getState().equals(State.INITIALIZING)))
- {
- long currentTime = timer.getCurrentTimeInMillis();
- node.setTransitionTime(currentTime);
- if (node.getUpStableStateTime() + stableStateTimePeriod > currentTime && !isControlledShutdown(reportedState)) {
- log.log(LogLevel.DEBUG, "Stable state: " + node.getUpStableStateTime() + " + " + stableStateTimePeriod + " > " + currentTime);
- eventLog.add(new NodeEvent(node,
- "Stopped or possibly crashed after " + (currentTime - node.getUpStableStateTime())
- + " ms, which is before stable state time period."
- + " Premature crash count is now " + (node.getPrematureCrashCount() + 1) + ".",
- NodeEvent.Type.CURRENT,
- timeNow), isMaster);
- if (handlePrematureCrash(node, nodeListener)) return null;
- }
- if (maxTransitionTime.get(node.getNode().getType()) != 0) {
- return new NodeState(node.getNode().getType(), State.MAINTENANCE).setDescription(reportedState.getDescription());
- }
- }
-
- // If we got increasing initialization progress, reset initialize timer
- if (reportedState.getState().equals(State.INITIALIZING) &&
- (!currentState.getState().equals(State.INITIALIZING) ||
- reportedState.getInitProgress() > currentState.getInitProgress()))
- {
- node.setInitProgressTime(timer.getCurrentTimeInMillis());
- log.log(LogLevel.DEBUG, "Reset initialize timer on " + node + " to " + node.getInitProgressTime());
- }
-
- // If we get reverse initialize progress, mark node unstable, such that we don't mark it initializing again before it is up.
- if (currentState.getState().equals(State.INITIALIZING) &&
- (reportedState.getState().equals(State.INITIALIZING) && reportedState.getInitProgress() < currentState.getInitProgress()))
- {
- eventLog.add(new NodeEvent(node, "Stop or crash during initialization detected from reverse initializing progress."
- + " Progress was " + currentState.getInitProgress() + " but is now " + reportedState.getInitProgress() + "."
- + " Premature crash count is now " + (node.getPrematureCrashCount() + 1) + ".",
- NodeEvent.Type.CURRENT, timeNow), isMaster);
- return (handlePrematureCrash(node, nodeListener) ? null : new NodeState(node.getNode().getType(), State.DOWN).setDescription(
- "Got reverse intialize progress. Assuming node have prematurely crashed"));
- }
-
- // If we go down while initializing, mark node unstable, such that we don't mark it initializing again before it is up.
- if (currentState.getState().equals(State.INITIALIZING) && reportedState.getState().oneOf("ds") && !isControlledShutdown(reportedState))
- {
- eventLog.add(new NodeEvent(node, "Stop or crash during initialization."
- + " Premature crash count is now " + (node.getPrematureCrashCount() + 1) + ".",
- NodeEvent.Type.CURRENT, timeNow), isMaster);
- return (handlePrematureCrash(node, nodeListener) ? null : new NodeState(node.getNode().getType(), State.DOWN).setDescription(reportedState.getDescription()));
- }
-
- // Ignore further unavailable states when node is set in maintenance
- if (currentState.getState().equals(State.MAINTENANCE) && reportedState.getState().oneOf("dis"))
- {
- if (node.getWantedState().getState().equals(State.RETIRED) || !reportedState.getState().equals(State.INITIALIZING)
- || reportedState.getInitProgress() <= NodeState.getListingBucketsInitProgressLimit() + 0.00001) {
- log.log(LogLevel.DEBUG, "Ignoring down and initializing reports while in maintenance mode on " + node + ".");
- return null;
- }
- }
-
- // Hide initializing state if node has been unstable. (Not for distributors as these own buckets while initializing)
- if ((currentState.getState().equals(State.DOWN) || currentState.getState().equals(State.UP)) &&
- reportedState.getState().equals(State.INITIALIZING) && node.getPrematureCrashCount() > 0 &&
- !node.isDistributor())
- {
- log.log(LogLevel.DEBUG, "Not setting " + node + " initializing again as it crashed prematurely earlier.");
- return new NodeState(node.getNode().getType(), State.DOWN).setDescription("Not setting node back up as it failed prematurely at last attempt");
- }
- // Hide initializing state in cluster state if initialize progress is so low that we haven't listed buckets yet
- if (!node.isDistributor() && reportedState.getState().equals(State.INITIALIZING) &&
- reportedState.getInitProgress() <= NodeState.getListingBucketsInitProgressLimit() + 0.00001)
- {
- log.log(LogLevel.DEBUG, "Not setting " + node + " initializing in cluster state quite yet, as initializing progress still indicate it is listing buckets.");
- return new NodeState(node.getNode().getType(), State.DOWN).setDescription("Listing buckets. Progress " + (100 * reportedState.getInitProgress()) + " %.");
- }
- return reportedState.clone();
- }
-
- public boolean handlePrematureCrash(NodeInfo node, NodeStateOrHostInfoChangeHandler changeListener) {
- node.setPrematureCrashCount(node.getPrematureCrashCount() + 1);
- if (disableUnstableNodes && node.getPrematureCrashCount() > maxPrematureCrashes) {
- NodeState wantedState = new NodeState(node.getNode().getType(), State.DOWN)
- .setDescription("Disabled by fleet controller as it prematurely shut down " + node.getPrematureCrashCount() + " times in a row");
- NodeState oldState = node.getWantedState();
- node.setWantedState(wantedState);
- if ( ! oldState.equals(wantedState)) {
- changeListener.handleNewWantedNodeState(node, wantedState);
- }
- return true;
- }
- return false;
- }
-
- public void handleUpdatedHostInfo(NodeInfo nodeInfo, HostInfo hostInfo) {
- // Only pass the host info to the latest cluster state view.
- currentClusterStateView.handleUpdatedHostInfo(hostnames, nodeInfo, hostInfo);
- }
-
- public class SystemStateHistoryEntry {
-
- private final ClusterState state;
- private final long time;
-
- SystemStateHistoryEntry(ClusterState state, long time) {
- this.state = state;
- this.time = time;
- }
-
- public ClusterState state() { return state; }
-
- public long time() { return time; }
-
- }
-
-}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java
index a21ed994d5d..c4e7c6897e1 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/DatabaseHandler.java
@@ -248,6 +248,8 @@ public class DatabaseHandler {
log.log(LogLevel.DEBUG, "Fleetcontroller " + nodeIndex
+ ": Attempting to store last system state version " + pendingStore.lastSystemStateVersion
+ " into zookeeper.");
+ // TODO guard version write with a CaS predicated on the version we last read/wrote.
+ // TODO Drop leadership status if there is a mismatch, as it implies we're racing with another leader.
if (database.storeLatestSystemStateVersion(pendingStore.lastSystemStateVersion)) {
currentlyStored.lastSystemStateVersion = pendingStore.lastSystemStateVersion;
pendingStore.lastSystemStateVersion = null;
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/MasterDataGatherer.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/MasterDataGatherer.java
index cd9c66d18f0..f952f842151 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/MasterDataGatherer.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/database/MasterDataGatherer.java
@@ -51,7 +51,7 @@ public class MasterDataGatherer {
public void process(WatchedEvent watchedEvent) {
switch (watchedEvent.getType()) {
case NodeChildrenChanged: // Fleetcontrollers have either connected or disconnected to ZooKeeper
- log.log(LogLevel.INFO, "Fleetcontroller " + nodeIndex + ": A change occured in the list of registered fleetcontrollers. Requesting new information");
+ log.log(LogLevel.INFO, "Fleetcontroller " + nodeIndex + ": A change occurred in the list of registered fleetcontrollers. Requesting new information");
session.getChildren(zooKeeperRoot + "indexes", this, childListener, null);
break;
case NodeDataChanged: // A fleetcontroller has changed what node it is voting for
@@ -160,7 +160,7 @@ public class MasterDataGatherer {
}
}
- /** Calling restart, ignores what we currently know and starts another circly. Typically called after reconnecting to ZooKeeperServer. */
+ /** Calling restart, ignores what we currently know and starts another cycle. Typically called after reconnecting to ZooKeeperServer. */
public void restart() {
synchronized (nextMasterData) {
masterData = new TreeMap<Integer, Integer>();
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java
index 46fb18180e5..9619a15de3c 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/rpc/RpcServer.java
@@ -100,8 +100,7 @@ public class RpcServer {
register = new Register(supervisor, slist,
new Spec(InetAddress.getLocalHost().getHostName(), acceptor.port()), slobrokBackOffPolicy);
} else {
- register = new Register(supervisor, slist,
- InetAddress.getLocalHost().getHostName(), acceptor.port());
+ register = new Register(supervisor, slist, InetAddress.getLocalHost().getHostName(), acceptor.port());
}
register.registerName(getSlobrokName());
}
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/ClusterStateRequestHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/ClusterStateRequestHandler.java
index 6de9205bbe3..9428370faf5 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/ClusterStateRequestHandler.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/ClusterStateRequestHandler.java
@@ -2,19 +2,19 @@
package com.yahoo.vespa.clustercontroller.core.status;
import com.yahoo.vdslib.state.ClusterState;
+import com.yahoo.vespa.clustercontroller.core.StateVersionTracker;
import com.yahoo.vespa.clustercontroller.core.status.statuspage.StatusPageResponse;
import com.yahoo.vespa.clustercontroller.core.status.statuspage.StatusPageServer;
-import com.yahoo.vespa.clustercontroller.core.SystemStateGenerator;
public class ClusterStateRequestHandler implements StatusPageServer.RequestHandler {
- private final SystemStateGenerator systemStateGenerator;
+ private final StateVersionTracker stateVersionTracker;
- public ClusterStateRequestHandler(SystemStateGenerator systemStateGenerator) {
- this.systemStateGenerator = systemStateGenerator;
+ public ClusterStateRequestHandler(StateVersionTracker stateVersionTracker) {
+ this.stateVersionTracker = stateVersionTracker;
}
@Override
public StatusPageResponse handle(StatusPageServer.HttpRequest request) {
- ClusterState cs = systemStateGenerator.getClusterState();
+ ClusterState cs = stateVersionTracker.getVersionedClusterState();
StatusPageResponse response = new StatusPageResponse();
response.setContentType("text/plain");
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/LegacyIndexPageRequestHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/LegacyIndexPageRequestHandler.java
index 85db0ac0ef9..ec75ba3532d 100644
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/LegacyIndexPageRequestHandler.java
+++ b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/LegacyIndexPageRequestHandler.java
@@ -17,21 +17,22 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa
private final Timer timer;
private final ContentCluster cluster;
private final MasterElectionHandler masterElectionHandler;
- private final SystemStateGenerator systemStateGenerator;
+ private final StateVersionTracker stateVersionTracker;
private final EventLog eventLog;
private final long startedTime;
private final RunDataExtractor data;
private boolean showLocalSystemStatesInLog = true;
public LegacyIndexPageRequestHandler(Timer timer, boolean showLocalSystemStatesInLog, ContentCluster cluster,
- MasterElectionHandler masterElectionHandler, SystemStateGenerator systemStateGenerator,
+ MasterElectionHandler masterElectionHandler,
+ StateVersionTracker stateVersionTracker,
EventLog eventLog, long startedTime, RunDataExtractor data)
{
this.timer = timer;
this.showLocalSystemStatesInLog = showLocalSystemStatesInLog;
this.cluster = cluster;
this.masterElectionHandler = masterElectionHandler;
- this.systemStateGenerator = systemStateGenerator;
+ this.stateVersionTracker = stateVersionTracker;
this.eventLog = eventLog;
this.startedTime = startedTime;
this.data = data;
@@ -63,7 +64,7 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa
new VdsClusterHtmlRendrer(),
content,
timer,
- systemStateGenerator.getClusterState(),
+ stateVersionTracker.getVersionedClusterState(),
data.getOptions().storageDistribution,
data.getOptions(),
eventLog,
@@ -71,7 +72,7 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa
// Overview of current config
data.getOptions().writeHtmlState(content, request);
// Current cluster state and cluster state history
- writeHtmlState(systemStateGenerator, content, request);
+ writeHtmlState(stateVersionTracker, content, request);
} else {
// Overview of current config
data.getOptions().writeHtmlState(content, request);
@@ -84,7 +85,7 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa
return response;
}
- public void writeHtmlState(SystemStateGenerator systemStateGenerator, StringBuilder sb, StatusPageServer.HttpRequest request) {
+ public void writeHtmlState(StateVersionTracker stateVersionTracker, StringBuilder sb, StatusPageServer.HttpRequest request) {
boolean showLocal = showLocalSystemStatesInLog;
if (request.hasQueryParameter("showlocal")) {
showLocal = true;
@@ -93,9 +94,9 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa
}
sb.append("<h2 id=\"clusterstates\">Cluster states</h2>\n")
- .append("<p>Current cluster state:<br><code>").append(systemStateGenerator.currentClusterStateView().toString()).append("</code></p>\n");
+ .append("<p>Current cluster state:<br><code>").append(stateVersionTracker.getVersionedClusterState().toString()).append("</code></p>\n");
- if ( ! systemStateGenerator.systemStateHistory().isEmpty()) {
+ if ( ! stateVersionTracker.getClusterStateHistory().isEmpty()) {
TimeZone tz = TimeZone.getTimeZone("UTC");
sb.append("<h3 id=\"clusterstatehistory\">Cluster state history</h3>\n");
if (showLocal) {
@@ -106,10 +107,10 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa
.append(" <th>Cluster state</th>\n")
.append("</tr>\n");
// Write cluster state history in reverse order (newest on top)
- Iterator<SystemStateGenerator.SystemStateHistoryEntry> stateIterator = systemStateGenerator.systemStateHistory().iterator();
- SystemStateGenerator.SystemStateHistoryEntry current = null;
+ Iterator<ClusterStateHistoryEntry> stateIterator = stateVersionTracker.getClusterStateHistory().iterator();
+ ClusterStateHistoryEntry current = null;
while (stateIterator.hasNext()) {
- SystemStateGenerator.SystemStateHistoryEntry nextEntry = stateIterator.next();
+ ClusterStateHistoryEntry nextEntry = stateIterator.next();
if (nextEntry.state().isOfficial() || showLocal) {
if (current != null) writeClusterStateEntry(current, nextEntry, sb, tz);
current = nextEntry;
@@ -120,7 +121,7 @@ public class LegacyIndexPageRequestHandler implements StatusPageServer.RequestHa
}
}
- private void writeClusterStateEntry(SystemStateGenerator.SystemStateHistoryEntry entry, SystemStateGenerator.SystemStateHistoryEntry last, StringBuilder sb, TimeZone tz) {
+ private void writeClusterStateEntry(ClusterStateHistoryEntry entry, ClusterStateHistoryEntry last, StringBuilder sb, TimeZone tz) {
sb.append("<tr><td>").append(RealTimer.printDate(entry.time(), tz))
.append("</td><td>").append(entry.state().isOfficial() ? "" : "<font color=\"grey\">");
sb.append(entry.state());
diff --git a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/StaticResourceRequestHandler.java b/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/StaticResourceRequestHandler.java
deleted file mode 100644
index fa8128753f6..00000000000
--- a/clustercontroller-core/src/main/java/com/yahoo/vespa/clustercontroller/core/status/StaticResourceRequestHandler.java
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.clustercontroller.core.status;
-
-import com.yahoo.vespa.clustercontroller.core.status.statuspage.StatusPageResponse;
-import com.yahoo.vespa.clustercontroller.core.status.statuspage.StatusPageServer;
-
-import java.io.ByteArrayOutputStream;
-import java.io.InputStream;
-import java.io.IOException;
-
-/**
- * HTTP request handler for serving a single JAR resource as if it were
- * a regular file hosted on the server. Always serves the content verbatim
- * (i.e. as a byte stream), specifying a Content-Type provided when creating
- * the handler.
- *
- * @author <a href="mailto:vekterli@yahoo-inc.com">Tor Brede Vekterli</a>
- * @since 5.28
- */
-public class StaticResourceRequestHandler implements StatusPageServer.RequestHandler {
- private final byte[] resourceData;
- private final String contentType;
-
- public StaticResourceRequestHandler(String resourcePath,
- String contentType)
- throws IOException
- {
- this.resourceData = loadResource(resourcePath);
- this.contentType = contentType;
- }
-
- private byte[] loadResource(String resourcePath) throws IOException {
- InputStream resourceStream = getClass().getClassLoader().getResourceAsStream(resourcePath);
- if (resourceStream == null) {
- throw new IOException("No resource with path '" + resourcePath + "' could be found");
- }
- return readStreamData(resourceStream);
- }
-
- @Override
- public StatusPageResponse handle(StatusPageServer.HttpRequest request) {
- final StatusPageResponse response = new StatusPageResponse();
- response.setClientCachingEnabled(true);
- response.setContentType(contentType);
- try {
- response.getOutputStream().write(resourceData);
- } catch (IOException e) {
- response.setResponseCode(StatusPageResponse.ResponseCode.INTERNAL_SERVER_ERROR);
- }
- return response;
- }
-
- private byte[] readStreamData(InputStream resourceStream) throws IOException {
- final byte[] buf = new byte[4096];
- final ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
- while (true) {
- int read = resourceStream.read(buf);
- if (read < 0) {
- break;
- }
- outputStream.write(buf, 0, read);
- }
- outputStream.close();
- return outputStream.toByteArray();
- }
-}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java
index aca26000931..3eda886e721 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterFixture.java
@@ -3,21 +3,18 @@ package com.yahoo.vespa.clustercontroller.core;
import com.yahoo.vdslib.distribution.ConfiguredNode;
import com.yahoo.vdslib.distribution.Distribution;
+import com.yahoo.vdslib.state.ClusterState;
import com.yahoo.vdslib.state.Node;
import com.yahoo.vdslib.state.NodeState;
import com.yahoo.vdslib.state.NodeType;
import com.yahoo.vdslib.state.State;
import com.yahoo.vespa.clustercontroller.core.listeners.NodeStateOrHostInfoChangeHandler;
-import com.yahoo.vespa.clustercontroller.core.mocks.TestEventLog;
import com.yahoo.vespa.clustercontroller.utils.util.NoMetricReporter;
-import com.yahoo.vespa.config.content.StorDistributionConfig;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
import static org.mockito.Mockito.mock;
@@ -26,98 +23,163 @@ class ClusterFixture {
public final Distribution distribution;
public final FakeTimer timer;
public final EventLogInterface eventLog;
- public final SystemStateGenerator generator;
+ public final StateChangeHandler nodeStateChangeHandler;
+ public final ClusterStateGenerator.Params params = new ClusterStateGenerator.Params();
- public ClusterFixture(ContentCluster cluster, Distribution distribution) {
+ ClusterFixture(ContentCluster cluster, Distribution distribution) {
this.cluster = cluster;
this.distribution = distribution;
this.timer = new FakeTimer();
this.eventLog = mock(EventLogInterface.class);
- this.generator = createGeneratorForFixtureCluster();
+ this.nodeStateChangeHandler = createNodeStateChangeHandlerForCluster();
+ this.params.cluster(this.cluster);
}
- public SystemStateGenerator createGeneratorForFixtureCluster() {
+ StateChangeHandler createNodeStateChangeHandlerForCluster() {
final int controllerIndex = 0;
MetricUpdater metricUpdater = new MetricUpdater(new NoMetricReporter(), controllerIndex);
- SystemStateGenerator generator = new SystemStateGenerator(timer, eventLog, metricUpdater);
- generator.setNodes(cluster.clusterInfo());
- generator.setDistribution(distribution);
- return generator;
+ return new StateChangeHandler(timer, eventLog, metricUpdater);
}
- public void bringEntireClusterUp() {
+ ClusterFixture bringEntireClusterUp() {
cluster.clusterInfo().getConfiguredNodes().forEach((idx, node) -> {
reportStorageNodeState(idx, State.UP);
reportDistributorNodeState(idx, State.UP);
});
+ return this;
}
- public void reportStorageNodeState(final int index, State state) {
- final Node node = new Node(NodeType.STORAGE, index);
- final NodeState nodeState = new NodeState(NodeType.STORAGE, state);
- nodeState.setDescription("mockdesc");
+ ClusterFixture markEntireClusterDown() {
+ cluster.clusterInfo().getConfiguredNodes().forEach((idx, node) -> {
+ reportStorageNodeState(idx, State.DOWN);
+ reportDistributorNodeState(idx, State.DOWN);
+ });
+ return this;
+ }
+
+ private void doReportNodeState(final Node node, final NodeState nodeState) {
+ final ClusterState stateBefore = rawGeneratedClusterState();
+
NodeStateOrHostInfoChangeHandler handler = mock(NodeStateOrHostInfoChangeHandler.class);
NodeInfo nodeInfo = cluster.getNodeInfo(node);
- generator.handleNewReportedNodeState(nodeInfo, nodeState, handler);
+ nodeStateChangeHandler.handleNewReportedNodeState(stateBefore, nodeInfo, nodeState, handler);
nodeInfo.setReportedState(nodeState, timer.getCurrentTimeInMillis());
}
- public void reportStorageNodeState(final int index, NodeState nodeState) {
+ ClusterFixture reportStorageNodeState(final int index, State state, String description) {
final Node node = new Node(NodeType.STORAGE, index);
- final NodeInfo nodeInfo = cluster.getNodeInfo(node);
- final long mockTime = 1234;
- NodeStateOrHostInfoChangeHandler changeListener = mock(NodeStateOrHostInfoChangeHandler.class);
- generator.handleNewReportedNodeState(nodeInfo, nodeState, changeListener);
- nodeInfo.setReportedState(nodeState, mockTime);
+ final NodeState nodeState = new NodeState(NodeType.STORAGE, state);
+ nodeState.setDescription(description);
+ doReportNodeState(node, nodeState);
+ return this;
}
- public void reportDistributorNodeState(final int index, State state) {
+ ClusterFixture reportStorageNodeState(final int index, State state) {
+ return reportStorageNodeState(index, state, "mockdesc");
+ }
+
+ ClusterFixture reportStorageNodeState(final int index, NodeState nodeState) {
+ doReportNodeState(new Node(NodeType.STORAGE, index), nodeState);
+ return this;
+ }
+
+ ClusterFixture reportDistributorNodeState(final int index, State state) {
final Node node = new Node(NodeType.DISTRIBUTOR, index);
final NodeState nodeState = new NodeState(NodeType.DISTRIBUTOR, state);
- NodeStateOrHostInfoChangeHandler handler = mock(NodeStateOrHostInfoChangeHandler.class);
+ doReportNodeState(node, nodeState);
+ return this;
+ }
+
+ ClusterFixture reportDistributorNodeState(final int index, NodeState nodeState) {
+ doReportNodeState(new Node(NodeType.DISTRIBUTOR, index), nodeState);
+ return this;
+ }
+
+ private void doProposeWantedState(final Node node, final NodeState nodeState, String description) {
+ final ClusterState stateBefore = rawGeneratedClusterState();
+
+ nodeState.setDescription(description);
NodeInfo nodeInfo = cluster.getNodeInfo(node);
+ nodeInfo.setWantedState(nodeState);
- generator.handleNewReportedNodeState(nodeInfo, nodeState, handler);
- nodeInfo.setReportedState(nodeState, timer.getCurrentTimeInMillis());
+ nodeStateChangeHandler.proposeNewNodeState(stateBefore, nodeInfo, nodeState);
}
- public void proposeStorageNodeWantedState(final int index, State state) {
+ ClusterFixture proposeStorageNodeWantedState(final int index, State state, String description) {
final Node node = new Node(NodeType.STORAGE, index);
final NodeState nodeState = new NodeState(NodeType.STORAGE, state);
+ doProposeWantedState(node, nodeState, description);
+ return this;
+ }
+
+ ClusterFixture proposeStorageNodeWantedState(final int index, State state) {
+ return proposeStorageNodeWantedState(index, state, "mockdesc");
+ }
+
+ ClusterFixture proposeDistributorWantedState(final int index, State state) {
+ final ClusterState stateBefore = rawGeneratedClusterState();
+ final Node node = new Node(NodeType.DISTRIBUTOR, index);
+ final NodeState nodeState = new NodeState(NodeType.DISTRIBUTOR, state);
nodeState.setDescription("mockdesc");
NodeInfo nodeInfo = cluster.getNodeInfo(node);
nodeInfo.setWantedState(nodeState);
- generator.proposeNewNodeState(nodeInfo, nodeState);
+ nodeStateChangeHandler.proposeNewNodeState(stateBefore, nodeInfo, nodeState);
+ return this;
+ }
+ ClusterFixture disableAutoClusterTakedown() {
+ setMinNodesUp(0, 0, 0.0, 0.0);
+ return this;
}
- public void disableAutoClusterTakedown() {
- generator.setMinNodesUp(0, 0, 0.0, 0.0);
+ ClusterFixture setMinNodesUp(int minDistNodes, int minStorNodes, double minDistRatio, double minStorRatio) {
+ params.minStorageNodesUp(minStorNodes)
+ .minDistributorNodesUp(minDistNodes)
+ .minRatioOfStorageNodesUp(minStorRatio)
+ .minRatioOfDistributorNodesUp(minDistRatio);
+ return this;
}
- public void disableTransientMaintenanceModeOnDown() {
- Map<NodeType, Integer> maxTransitionTime = new TreeMap<>();
- maxTransitionTime.put(NodeType.DISTRIBUTOR, 0);
- maxTransitionTime.put(NodeType.STORAGE, 0);
- generator.setMaxTransitionTime(maxTransitionTime);
+ ClusterFixture setMinNodeRatioPerGroup(double upRatio) {
+ params.minNodeRatioPerGroup(upRatio);
+ return this;
}
- public void enableTransientMaintenanceModeOnDown(final int transitionTime) {
+ static Map<NodeType, Integer> buildTransitionTimeMap(int distributorTransitionTime, int storageTransitionTime) {
Map<NodeType, Integer> maxTransitionTime = new TreeMap<>();
- maxTransitionTime.put(NodeType.DISTRIBUTOR, transitionTime);
- maxTransitionTime.put(NodeType.STORAGE, transitionTime);
- generator.setMaxTransitionTime(maxTransitionTime);
+ maxTransitionTime.put(NodeType.DISTRIBUTOR, distributorTransitionTime);
+ maxTransitionTime.put(NodeType.STORAGE, storageTransitionTime);
+ return maxTransitionTime;
}
- public String generatedClusterState() {
- return generator.getClusterState().toString();
+ void disableTransientMaintenanceModeOnDown() {
+ this.params.transitionTimes(0);
}
- public String verboseGeneratedClusterState() { return generator.getClusterState().toString(true); }
+ void enableTransientMaintenanceModeOnDown(final int transitionTimeMs) {
+ this.params.transitionTimes(transitionTimeMs);
+ }
+
+ AnnotatedClusterState annotatedGeneratedClusterState() {
+ params.currentTimeInMilllis(timer.getCurrentTimeInMillis());
+ return ClusterStateGenerator.generatedStateFrom(params);
+ }
- public static ClusterFixture forFlatCluster(int nodeCount) {
+ ClusterState rawGeneratedClusterState() {
+ return annotatedGeneratedClusterState().getClusterState();
+ }
+
+ String generatedClusterState() {
+ return annotatedGeneratedClusterState().getClusterState().toString();
+ }
+
+ String verboseGeneratedClusterState() {
+ return annotatedGeneratedClusterState().getClusterState().toString(true);
+ }
+
+ static ClusterFixture forFlatCluster(int nodeCount) {
Collection<ConfiguredNode> nodes = DistributionBuilder.buildConfiguredNodes(nodeCount);
Distribution distribution = DistributionBuilder.forFlatCluster(nodeCount);
@@ -126,11 +188,27 @@ class ClusterFixture {
return new ClusterFixture(cluster, distribution);
}
- public static ClusterFixture forHierarchicCluster(DistributionBuilder.GroupBuilder root) {
+ static ClusterFixture forHierarchicCluster(DistributionBuilder.GroupBuilder root) {
List<ConfiguredNode> nodes = DistributionBuilder.buildConfiguredNodes(root.totalNodeCount());
Distribution distribution = DistributionBuilder.forHierarchicCluster(root);
ContentCluster cluster = new ContentCluster("foo", nodes, distribution, 0, 0.0);
return new ClusterFixture(cluster, distribution);
}
+
+ ClusterStateGenerator.Params generatorParams() {
+ return new ClusterStateGenerator.Params().cluster(cluster);
+ }
+
+ ContentCluster cluster() {
+ return this.cluster;
+ }
+
+ static Node storageNode(int index) {
+ return new Node(NodeType.STORAGE, index);
+ }
+
+ static Node distributorNode(int index) {
+ return new Node(NodeType.DISTRIBUTOR, index);
+ }
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java
new file mode 100644
index 00000000000..b9b97c27949
--- /dev/null
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/ClusterStateGeneratorTest.java
@@ -0,0 +1,895 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core;
+
+import com.yahoo.vdslib.distribution.ConfiguredNode;
+import com.yahoo.vdslib.state.DiskState;
+import com.yahoo.vdslib.state.Node;
+import com.yahoo.vdslib.state.NodeState;
+import com.yahoo.vdslib.state.NodeType;
+import com.yahoo.vdslib.state.State;
+import org.junit.Test;
+
+import java.util.List;
+import java.util.Optional;
+
+import static com.yahoo.vespa.clustercontroller.core.matchers.HasStateReasonForNode.hasStateReasonForNode;
+import static com.yahoo.vespa.clustercontroller.core.ClusterFixture.storageNode;
+
+import static org.hamcrest.core.IsEqual.equalTo;
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
+
+public class ClusterStateGeneratorTest {
+
+ private static AnnotatedClusterState generateFromFixtureWithDefaultParams(ClusterFixture fixture) {
+ final ClusterStateGenerator.Params params = new ClusterStateGenerator.Params();
+ params.cluster = fixture.cluster;
+ params.transitionTimes = ClusterFixture.buildTransitionTimeMap(0, 0);
+ params.currentTimeInMillis = 0;
+ return ClusterStateGenerator.generatedStateFrom(params);
+ }
+
+ @Test
+ public void cluster_with_all_nodes_reported_down_has_state_down() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(6).markEntireClusterDown();
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+
+ assertThat(state.getClusterState().getClusterState(), is(State.DOWN));
+ // The returned message in this case depends on which "is cluster down?" check
+ // kicks in first. Currently, the minimum storage node count does.
+ assertThat(state.getClusterStateReason(), equalTo(Optional.of(ClusterStateReason.TOO_FEW_STORAGE_NODES_AVAILABLE)));
+ }
+
+ @Test
+ public void cluster_with_all_nodes_up_state_correct_distributor_and_storage_count() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(6).bringEntireClusterUp();
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+
+ assertThat(state.toString(), equalTo("distributor:6 storage:6"));
+ }
+
+ @Test
+ public void distributor_reported_states_reflected_in_generated_state() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(9)
+ .bringEntireClusterUp()
+ .reportDistributorNodeState(2, State.DOWN)
+ .reportDistributorNodeState(4, State.STOPPING);
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+
+ assertThat(state.toString(), equalTo("distributor:9 .2.s:d .4.s:s storage:9"));
+ }
+
+ // NOTE: initializing state tested separately since it involves init progress state info
+ @Test
+ public void storage_reported_states_reflected_in_generated_state() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(9)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, State.DOWN)
+ .reportStorageNodeState(4, State.STOPPING);
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+
+ assertThat(state.toString(), equalTo("distributor:9 storage:9 .0.s:d .4.s:s"));
+ }
+
+ @Test
+ public void storage_reported_disk_state_included_in_generated_state() {
+ final NodeState stateWithDisks = new NodeState(NodeType.STORAGE, State.UP);
+ stateWithDisks.setDiskCount(7);
+ stateWithDisks.setDiskState(5, new DiskState(State.DOWN));
+
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(9)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(2, stateWithDisks);
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+
+ assertThat(state.toString(), equalTo("distributor:9 storage:9 .2.d:7 .2.d.5.s:d"));
+ }
+
+ @Test
+ public void worse_distributor_wanted_state_overrides_reported_state() {
+ // Maintenance mode is illegal for distributors and therefore not tested
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(7)
+ .bringEntireClusterUp()
+ .proposeDistributorWantedState(5, State.DOWN) // Down worse than Up
+ .reportDistributorNodeState(2, State.STOPPING)
+ .proposeDistributorWantedState(2, State.DOWN); // Down worse than Stopping
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+
+ assertThat(state.toString(), equalTo("distributor:7 .2.s:d .5.s:d storage:7"));
+ }
+
+ @Test
+ public void worse_storage_wanted_state_overrides_reported_state() {
+ // Does not test all maintenance mode overrides; see maintenance_mode_overrides_reported_state
+ // for that.
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(7)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(2, State.STOPPING)
+ .proposeStorageNodeWantedState(2, State.MAINTENANCE) // Maintenance worse than Stopping
+ .proposeStorageNodeWantedState(4, State.RETIRED) // Retired is "worse" than Up
+ .proposeStorageNodeWantedState(5, State.DOWN); // Down worse than Up
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+
+ assertThat(state.toString(), equalTo("distributor:7 storage:7 .2.s:m .4.s:r .5.s:d"));
+ }
+
+ @Test
+ public void better_distributor_wanted_state_does_not_override_reported_state() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(7)
+ .bringEntireClusterUp()
+ .reportDistributorNodeState(0, State.DOWN)
+ .proposeDistributorWantedState(0, State.UP);
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+
+ assertThat(state.toString(), equalTo("distributor:7 .0.s:d storage:7"));
+ }
+
+ @Test
+ public void better_storage_wanted_state_does_not_override_reported_state() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(7)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(1, State.DOWN)
+ .proposeStorageNodeWantedState(1, State.UP)
+ .reportStorageNodeState(2, State.DOWN)
+ .proposeStorageNodeWantedState(2, State.RETIRED);
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+
+ assertThat(state.toString(), equalTo("distributor:7 storage:7 .1.s:d .2.s:d"));
+ }
+
+ /**
+ * If we let a Retired node be published as Initializing when it is in init state, we run
+ * the risk of having both feed and merge ops be sent towards it, which is not what we want.
+ * Consequently we pretend such nodes are never in init state and just transition them
+ * directly from Maintenance -> Up.
+ */
+ @Test
+ public void retired_node_in_init_state_is_set_to_maintenance() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(1, State.INITIALIZING)
+ .proposeStorageNodeWantedState(1, State.RETIRED);
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+
+ assertThat(state.toString(), equalTo("distributor:3 storage:3 .1.s:m"));
+ }
+
+ /**
+ * A storage node will report itself as being in initializing mode immediately when
+ * starting up. It can only accept external operations once it has finished listing
+ * the set of buckets (but not necessarily their contents). As a consequence of this,
+ * we have to map reported init state while bucket listing mode to Down. This will
+ * prevent clients from thinking they can use the node and prevent distributors form
+ * trying to fetch yet non-existent bucket sets from it.
+ *
+ * Detecting the bucket-listing stage is currently done by inspecting its init progress
+ * value and triggering on a sufficiently low value.
+ */
+ @Test
+ public void storage_node_in_init_mode_while_listing_buckets_is_marked_down() {
+ final NodeState initWhileListingBuckets = new NodeState(NodeType.STORAGE, State.INITIALIZING);
+ initWhileListingBuckets.setInitProgress(0.0);
+
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(1, initWhileListingBuckets);
+
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+ assertThat(state.toString(), equalTo("distributor:3 storage:3 .1.s:d"));
+ }
+
+ /**
+ * Implicit down while reported as init should not kick into effect if the Wanted state
+ * is set to Maintenance.
+ */
+ @Test
+ public void implicit_down_while_listing_buckets_does_not_override_wanted_state() {
+ final NodeState initWhileListingBuckets = new NodeState(NodeType.STORAGE, State.INITIALIZING);
+ initWhileListingBuckets.setInitProgress(0.0);
+
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(1, initWhileListingBuckets)
+ .proposeStorageNodeWantedState(1, State.MAINTENANCE);
+
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+ assertThat(state.toString(), equalTo("distributor:3 storage:3 .1.s:m"));
+ }
+
+ @Test
+ public void distributor_nodes_in_init_mode_are_not_mapped_to_down() {
+ final NodeState initWhileListingBuckets = new NodeState(NodeType.DISTRIBUTOR, State.INITIALIZING);
+ initWhileListingBuckets.setInitProgress(0.0);
+
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportDistributorNodeState(1, initWhileListingBuckets);
+
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+ assertThat(state.toString(), equalTo("distributor:3 .1.s:i .1.i:0.0 storage:3"));
+ }
+
+ /**
+ * Maintenance mode overrides all reported states, even Down.
+ */
+ @Test
+ public void maintenance_mode_wanted_state_overrides_reported_state() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(7)
+ .bringEntireClusterUp()
+ .proposeStorageNodeWantedState(0, State.MAINTENANCE)
+ .reportStorageNodeState(2, State.STOPPING)
+ .proposeStorageNodeWantedState(2, State.MAINTENANCE)
+ .reportStorageNodeState(3, State.DOWN)
+ .proposeStorageNodeWantedState(3, State.MAINTENANCE)
+ .reportStorageNodeState(4, State.INITIALIZING)
+ .proposeStorageNodeWantedState(4, State.MAINTENANCE);
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+
+ assertThat(state.toString(), equalTo("distributor:7 storage:7 .0.s:m .2.s:m .3.s:m .4.s:m"));
+ }
+
+ @Test
+ public void wanted_state_description_carries_over_to_generated_state() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(7)
+ .bringEntireClusterUp()
+ .proposeStorageNodeWantedState(1, State.MAINTENANCE, "foo")
+ .proposeStorageNodeWantedState(2, State.DOWN, "bar")
+ .proposeStorageNodeWantedState(3, State.RETIRED, "baz");
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+
+ // We have to use toString(true) to get verbose printing including the descriptions,
+ // as these are omitted by default.
+ assertThat(state.toString(true), equalTo("distributor:7 storage:7 .1.s:m .1.m:foo " +
+ ".2.s:d .2.m:bar .3.s:r .3.m:baz"));
+ }
+
+ @Test
+ public void reported_disk_state_not_hidden_by_wanted_state() {
+ final NodeState stateWithDisks = new NodeState(NodeType.STORAGE, State.UP);
+ stateWithDisks.setDiskCount(5);
+ stateWithDisks.setDiskState(3, new DiskState(State.DOWN));
+
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(9)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(2, stateWithDisks)
+ .proposeStorageNodeWantedState(2, State.RETIRED)
+ .reportStorageNodeState(3, stateWithDisks)
+ .proposeStorageNodeWantedState(3, State.MAINTENANCE);
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+
+ // We do not publish disk states for nodes in Down state. This differs from how the
+ // legacy controller did things, but such states cannot be counted on for ideal state
+ // calculations either way. In particular, reported disk states are not persisted and
+ // only exist transiently in the cluster controller's memory. A controller restart is
+ // sufficient to clear all disk states that have been incidentally remembered for now
+ // downed nodes.
+ // The keen reader may choose to convince themselves of this independently by reading the
+ // code in com.yahoo.vdslib.distribution.Distribution#getIdealStorageNodes and observing
+ // how disk states for nodes that are in a down-state are never considered.
+ assertThat(state.toString(), equalTo("distributor:9 storage:9 .2.s:r .2.d:5 .2.d.3.s:d " +
+ ".3.s:m .3.d:5 .3.d.3.s:d"));
+ }
+
+ @Test
+ public void config_retired_mode_is_reflected_in_generated_state() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp();
+ List<ConfiguredNode> nodes = DistributionBuilder.buildConfiguredNodes(5);
+ nodes.set(2, new ConfiguredNode(2, true));
+ fixture.cluster.setNodes(nodes);
+
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+
+ assertThat(state.toString(), equalTo("distributor:5 storage:5 .2.s:r"));
+ }
+
+ private void do_test_change_within_node_transition_time_window_generates_maintenance(State reportedState) {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp();
+ final ClusterStateGenerator.Params params = fixture.generatorParams()
+ .currentTimeInMilllis(10_000)
+ .transitionTimes(2000);
+
+ fixture.reportStorageNodeState(1, reportedState);
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1));
+ // Node 1 transitioned to reported `reportedState` at time 9000ms after epoch. This means that according to the
+ // above transition time config, it should remain in generated maintenance mode until time 11000ms,
+ // at which point it should finally transition to generated state Down.
+ nodeInfo.setTransitionTime(9000);
+ {
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:5 storage:5 .1.s:m"));
+ }
+
+ nodeInfo.setTransitionTime(10999);
+ {
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:5 storage:5 .1.s:m"));
+ }
+ }
+
+ @Test
+ public void reported_down_node_within_transition_time_has_maintenance_generated_state() {
+ do_test_change_within_node_transition_time_window_generates_maintenance(State.DOWN);
+ }
+
+ @Test
+ public void reported_stopping_node_within_transition_time_has_maintenance_generated_state() {
+ do_test_change_within_node_transition_time_window_generates_maintenance(State.STOPPING);
+ }
+
+ @Test
+ public void reported_node_down_after_transition_time_has_down_generated_state() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp();
+ final ClusterStateGenerator.Params params = fixture.generatorParams()
+ .currentTimeInMilllis(11_000)
+ .transitionTimes(2000);
+
+ fixture.reportStorageNodeState(1, State.DOWN);
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1));
+ nodeInfo.setTransitionTime(9000);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:5 storage:5 .1.s:d"));
+ }
+
+ @Test
+ public void distributor_nodes_are_not_implicitly_transitioned_to_maintenance_mode() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp();
+ final ClusterStateGenerator.Params params = fixture.generatorParams()
+ .currentTimeInMilllis(10_000)
+ .transitionTimes(2000);
+
+ fixture.reportDistributorNodeState(2, State.DOWN);
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.DISTRIBUTOR, 2));
+ nodeInfo.setTransitionTime(9000);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:5 .2.s:d storage:5"));
+ }
+
+ @Test
+ public void transient_maintenance_mode_does_not_override_wanted_down_state() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp();
+ final ClusterStateGenerator.Params params = fixture.generatorParams()
+ .currentTimeInMilllis(10_000)
+ .transitionTimes(2000);
+
+ fixture.proposeStorageNodeWantedState(2, State.DOWN);
+ fixture.reportStorageNodeState(2, State.DOWN);
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 2));
+ nodeInfo.setTransitionTime(9000);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ // Should _not_ be in maintenance mode, since we explicitly want it to stay down.
+ assertThat(state.toString(), equalTo("distributor:5 storage:5 .2.s:d"));
+ }
+
+ @Test
+ public void reported_down_retired_node_within_transition_time_transitions_to_maintenance() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp();
+ final ClusterStateGenerator.Params params = fixture.generatorParams()
+ .currentTimeInMilllis(10_000)
+ .transitionTimes(2000);
+
+ fixture.proposeStorageNodeWantedState(2, State.RETIRED);
+ fixture.reportStorageNodeState(2, State.DOWN);
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 2));
+ nodeInfo.setTransitionTime(9000);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:5 storage:5 .2.s:m"));
+ }
+
+ @Test
+ public void crash_count_exceeding_limit_marks_node_as_down() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp();
+ final ClusterStateGenerator.Params params = fixture.generatorParams().maxPrematureCrashes(10);
+
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 3));
+ nodeInfo.setPrematureCrashCount(11);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:5 storage:5 .3.s:d"));
+ }
+
+ @Test
+ public void crash_count_not_exceeding_limit_does_not_mark_node_as_down() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5).bringEntireClusterUp();
+ final ClusterStateGenerator.Params params = fixture.generatorParams().maxPrematureCrashes(10);
+
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 3));
+ nodeInfo.setPrematureCrashCount(10); // "Max crashes" range is inclusive
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:5 storage:5"));
+ }
+
+ @Test
+ public void exceeded_crash_count_does_not_override_wanted_maintenance_state() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5)
+ .bringEntireClusterUp()
+ .proposeStorageNodeWantedState(1, State.MAINTENANCE);
+ final ClusterStateGenerator.Params params = fixture.generatorParams().maxPrematureCrashes(10);
+
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1));
+ nodeInfo.setPrematureCrashCount(11);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:5 storage:5 .1.s:m"));
+ }
+
+ // Stopping -> Down is expected and does not indicate an unstable node.
+ @Test
+ public void transition_from_controlled_stop_to_down_does_not_add_to_crash_counter() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(2)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(1, State.STOPPING, "controlled shutdown") // urgh, string matching logic
+ .reportStorageNodeState(1, State.DOWN);
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1));
+ assertThat(nodeInfo.getPrematureCrashCount(), equalTo(0));
+ }
+
+ @Test
+ public void non_observed_storage_node_start_timestamp_is_included_in_state() {
+ final NodeState nodeState = new NodeState(NodeType.STORAGE, State.UP);
+ // A reported state timestamp that is not yet marked as observed in the NodeInfo
+ // for the same node is considered not observed by other nodes and must therefore
+ // be included in the generated cluster state
+ nodeState.setStartTimestamp(5000);
+
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, nodeState);
+
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+ assertThat(state.toString(), equalTo("distributor:5 storage:5 .0.t:5000"));
+ }
+
+ @Test
+ public void non_observed_distributor_start_timestamp_is_included_in_state() {
+ final NodeState nodeState = new NodeState(NodeType.DISTRIBUTOR, State.UP);
+ nodeState.setStartTimestamp(6000);
+
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5)
+ .bringEntireClusterUp()
+ .reportDistributorNodeState(1, nodeState);
+
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+ assertThat(state.toString(), equalTo("distributor:5 .1.t:6000 storage:5"));
+ }
+
+ @Test
+ public void fully_observed_storage_node_timestamp_not_included_in_state() {
+ final NodeState nodeState = new NodeState(NodeType.STORAGE, State.UP);
+ nodeState.setStartTimestamp(5000);
+
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, nodeState);
+
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 0));
+ nodeInfo.setStartTimestamp(5000);
+
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+ assertThat(state.toString(), equalTo("distributor:5 storage:5"));
+ }
+
+ @Test
+ public void fully_observed_distributor_timestamp_not_included_in_state() {
+ final NodeState nodeState = new NodeState(NodeType.DISTRIBUTOR, State.UP);
+ nodeState.setStartTimestamp(6000);
+
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5)
+ .bringEntireClusterUp()
+ .reportDistributorNodeState(0, nodeState);
+
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.DISTRIBUTOR, 0));
+ nodeInfo.setStartTimestamp(6000);
+
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+ assertThat(state.toString(), equalTo("distributor:5 storage:5"));
+ }
+
+ @Test
+ public void cluster_down_if_less_than_min_count_of_storage_nodes_available() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, State.DOWN)
+ .reportStorageNodeState(2, State.DOWN);
+ final ClusterStateGenerator.Params params = fixture.generatorParams().minStorageNodesUp(2);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("cluster:d distributor:3 storage:2 .0.s:d"));
+ assertThat(state.getClusterStateReason(), equalTo(Optional.of(ClusterStateReason.TOO_FEW_STORAGE_NODES_AVAILABLE)));
+ }
+
+ @Test
+ public void cluster_not_down_if_more_than_min_count_of_storage_nodes_are_available() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, State.DOWN);
+ final ClusterStateGenerator.Params params = fixture.generatorParams().minStorageNodesUp(2);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:3 storage:3 .0.s:d"));
+ assertThat(state.getClusterStateReason(), equalTo(Optional.empty()));
+ }
+
+ @Test
+ public void cluster_down_if_less_than_min_count_of_distributors_available() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportDistributorNodeState(0, State.DOWN)
+ .reportDistributorNodeState(2, State.DOWN);
+ final ClusterStateGenerator.Params params = fixture.generatorParams().minDistributorNodesUp(2);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("cluster:d distributor:2 .0.s:d storage:3"));
+ assertThat(state.getClusterStateReason(), equalTo(Optional.of(ClusterStateReason.TOO_FEW_DISTRIBUTOR_NODES_AVAILABLE)));
+ }
+
+ @Test
+ public void cluster_not_down_if_more_than_min_count_of_distributors_are_available() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportDistributorNodeState(0, State.DOWN);
+ final ClusterStateGenerator.Params params = fixture.generatorParams().minDistributorNodesUp(2);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:3 .0.s:d storage:3"));
+ assertThat(state.getClusterStateReason(), equalTo(Optional.empty()));
+ }
+
+ @Test
+ public void maintenance_mode_counted_as_down_for_cluster_availability() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, State.DOWN)
+ .proposeStorageNodeWantedState(2, State.MAINTENANCE);
+ final ClusterStateGenerator.Params params = fixture.generatorParams().minStorageNodesUp(2);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("cluster:d distributor:3 storage:3 .0.s:d .2.s:m"));
+ }
+
+ @Test
+ public void init_and_retired_counted_as_up_for_cluster_availability() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, State.INITIALIZING)
+ .proposeStorageNodeWantedState(1, State.RETIRED);
+ // Any node being treated as down should take down the cluster here
+ final ClusterStateGenerator.Params params = fixture.generatorParams().minStorageNodesUp(3);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:3 storage:3 .0.s:i .0.i:1.0 .1.s:r"));
+ }
+
+ @Test
+ public void cluster_down_if_less_than_min_ratio_of_storage_nodes_available() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, State.DOWN)
+ .reportStorageNodeState(2, State.DOWN);
+ final ClusterStateGenerator.Params params = fixture.generatorParams().minRatioOfStorageNodesUp(0.5);
+
+ // TODO de-dupe a lot of these tests?
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("cluster:d distributor:3 storage:2 .0.s:d"));
+ assertThat(state.getClusterStateReason(), equalTo(Optional.of(ClusterStateReason.TOO_LOW_AVAILABLE_STORAGE_NODE_RATIO)));
+ }
+
+ @Test
+ public void cluster_not_down_if_more_than_min_ratio_of_storage_nodes_available() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, State.DOWN);
+ // Min node ratio is inclusive, i.e. 0.5 of 2 nodes is enough for cluster to be up.
+ final ClusterStateGenerator.Params params = fixture.generatorParams().minRatioOfStorageNodesUp(0.5);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:3 storage:3 .0.s:d"));
+ assertThat(state.getClusterStateReason(), equalTo(Optional.empty()));
+ }
+
+ @Test
+ public void cluster_down_if_less_than_min_ratio_of_distributors_available() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportDistributorNodeState(0, State.DOWN)
+ .reportDistributorNodeState(2, State.DOWN);
+ final ClusterStateGenerator.Params params = fixture.generatorParams().minRatioOfDistributorNodesUp(0.5);
+
+ // TODO de-dupe a lot of these tests?
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("cluster:d distributor:2 .0.s:d storage:3"));
+ assertThat(state.getClusterStateReason(), equalTo(Optional.of(ClusterStateReason.TOO_LOW_AVAILABLE_DISTRIBUTOR_NODE_RATIO)));
+ }
+
+ @Test
+ public void cluster_not_down_if_more_than_min_ratio_of_distributors_available() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportDistributorNodeState(0, State.DOWN);
+ final ClusterStateGenerator.Params params = fixture.generatorParams().minRatioOfDistributorNodesUp(0.5);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:3 .0.s:d storage:3"));
+ assertThat(state.getClusterStateReason(), equalTo(Optional.empty()));
+ }
+
+ @Test
+ public void group_nodes_are_marked_down_if_group_availability_too_low() {
+ final ClusterFixture fixture = ClusterFixture
+ .forHierarchicCluster(DistributionBuilder.withGroups(3).eachWithNodeCount(3))
+ .bringEntireClusterUp()
+ .reportStorageNodeState(4, State.DOWN);
+ final ClusterStateGenerator.Params params = fixture.generatorParams().minNodeRatioPerGroup(0.68);
+
+ // Node 4 is down, which is more than 32% of nodes down in group #2. Nodes 3,5 should be implicitly
+ // marked down as it is in the same group.
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:9 storage:9 .3.s:d .4.s:d .5.s:d"));
+ }
+
+ @Test
+ public void group_nodes_are_not_marked_down_if_group_availability_sufficiently_high() {
+ final ClusterFixture fixture = ClusterFixture
+ .forHierarchicCluster(DistributionBuilder.withGroups(3).eachWithNodeCount(3))
+ .bringEntireClusterUp()
+ .reportStorageNodeState(4, State.DOWN);
+ final ClusterStateGenerator.Params params = fixture.generatorParams().minNodeRatioPerGroup(0.65);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:9 storage:9 .4.s:d")); // No other nodes down implicitly
+ }
+
+ @Test
+ public void implicitly_downed_group_nodes_receive_a_state_description() {
+ final ClusterFixture fixture = ClusterFixture
+ .forHierarchicCluster(DistributionBuilder.withGroups(2).eachWithNodeCount(2))
+ .bringEntireClusterUp()
+ .reportStorageNodeState(3, State.DOWN);
+ final ClusterStateGenerator.Params params = fixture.generatorParams().minNodeRatioPerGroup(0.51);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(true), equalTo("distributor:4 storage:4 " +
+ ".2.s:d .2.m:group\\x20node\\x20availability\\x20below\\x20configured\\x20threshold " +
+ ".3.s:d .3.m:mockdesc")); // Preserve description for non-implicitly taken down node
+ }
+
+ @Test
+ public void implicitly_downed_group_nodes_are_annotated_with_group_reason() {
+ final ClusterFixture fixture = ClusterFixture
+ .forHierarchicCluster(DistributionBuilder.withGroups(2).eachWithNodeCount(2))
+ .bringEntireClusterUp()
+ .reportStorageNodeState(3, State.DOWN);
+ final ClusterStateGenerator.Params params = fixture.generatorParams().minNodeRatioPerGroup(0.51);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.getNodeStateReasons(),
+ hasStateReasonForNode(storageNode(2), NodeStateReason.GROUP_IS_DOWN));
+ }
+
+ @Test
+ public void maintenance_nodes_in_downed_group_are_not_affected() {
+ final ClusterFixture fixture = ClusterFixture
+ .forHierarchicCluster(DistributionBuilder.withGroups(3).eachWithNodeCount(3))
+ .bringEntireClusterUp()
+ .proposeStorageNodeWantedState(3, State.MAINTENANCE)
+ .reportStorageNodeState(4, State.DOWN);
+ final ClusterStateGenerator.Params params = fixture.generatorParams().minNodeRatioPerGroup(0.68);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ // 4 is down by itself, 5 is down implicitly and 3 should happily stay in Maintenance mode.
+ // Side note: most special cases for when a node should and should not be affected by group
+ // down edges are covered in GroupAvailabilityCalculatorTest and GroupAutoTakedownTest.
+ // We test this case explicitly since it's an assurance that code integration works as expected.
+ assertThat(state.toString(), equalTo("distributor:9 storage:9 .3.s:m .4.s:d .5.s:d"));
+ }
+
+ /**
+ * Cluster-wide distribution bit count cannot be higher than the lowest split bit
+ * count reported by the set of storage nodes. This is because the distribution bit
+ * directly impacts which level of the bucket tree is considered the root level,
+ * and any buckets caught over this level would not be accessible in the data space.
+ */
+ @Test
+ public void distribution_bits_bounded_by_reported_min_bits_from_storage_node() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(1, new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(7));
+
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+ assertThat(state.toString(), equalTo("bits:7 distributor:3 storage:3"));
+ }
+
+ @Test
+ public void distribution_bits_bounded_by_lowest_reporting_storage_node() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(6))
+ .reportStorageNodeState(1, new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(5));
+
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+ assertThat(state.toString(), equalTo("bits:5 distributor:3 storage:3"));
+ }
+
+ @Test
+ public void distribution_bits_bounded_by_config_parameter() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3).bringEntireClusterUp();
+
+ final ClusterStateGenerator.Params params = fixture.generatorParams().idealDistributionBits(12);
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("bits:12 distributor:3 storage:3"));
+ }
+
+ // TODO do we really want this behavior? It's the legacy one, but it seems... dangerous.. Especially for maintenance
+ // TODO We generally want to avoid distribution bit decreases if at all possible, since "collapsing"
+ // the top-level bucket space can cause data loss on timestamp collisions across super buckets.
+ @Test
+ public void distribution_bit_not_influenced_by_nodes_down_or_in_maintenance() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(7))
+ .reportStorageNodeState(1, new NodeState(NodeType.STORAGE, State.DOWN).setMinUsedBits(6))
+ .reportStorageNodeState(2, new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(5))
+ .proposeStorageNodeWantedState(2, State.MAINTENANCE);
+
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+ assertThat(state.toString(), equalTo("bits:7 distributor:3 storage:3 .1.s:d .2.s:m"));
+ }
+
+ private String do_test_distribution_bit_watermark(int lowestObserved, int node0MinUsedBits) {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(node0MinUsedBits));
+
+ final ClusterStateGenerator.Params params = fixture.generatorParams()
+ .highestObservedDistributionBitCount(8) // TODO is this even needed for our current purposes?
+ .lowestObservedDistributionBitCount(lowestObserved);
+
+ return ClusterStateGenerator.generatedStateFrom(params).toString();
+ }
+
+ /**
+ * Distribution bit increases should not take place incrementally. Doing so would
+ * let e.g. a transition from 10 bits to 20 bits cause 10 interim full re-distributions.
+ */
+ @Test
+ public void published_distribution_bit_bound_by_low_watermark_when_nodes_report_less_than_config_bits() {
+ assertThat(do_test_distribution_bit_watermark(5, 5),
+ equalTo("bits:5 distributor:3 storage:3"));
+ assertThat(do_test_distribution_bit_watermark(5, 6),
+ equalTo("bits:5 distributor:3 storage:3"));
+ assertThat(do_test_distribution_bit_watermark(5, 15),
+ equalTo("bits:5 distributor:3 storage:3"));
+ }
+
+ @Test
+ public void published_state_jumps_to_configured_ideal_bits_when_all_nodes_report_it() {
+ // Note: the rest of the mocked nodes always report 16 bits by default
+ assertThat(do_test_distribution_bit_watermark(5, 16),
+ equalTo("distributor:3 storage:3")); // "bits:16" implied
+ }
+
+ private String do_test_storage_node_with_no_init_progress(State wantedState) {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.5))
+ .proposeStorageNodeWantedState(0, wantedState);
+
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 0));
+ nodeInfo.setInitProgressTime(10_000);
+
+ final ClusterStateGenerator.Params params = fixture.generatorParams()
+ .maxInitProgressTime(1000)
+ .currentTimeInMilllis(11_000);
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ return state.toString();
+ }
+
+ @Test
+ public void storage_node_with_no_init_progress_within_timeout_is_marked_down() {
+ assertThat(do_test_storage_node_with_no_init_progress(State.UP),
+ equalTo("distributor:3 storage:3 .0.s:d"));
+ }
+
+ /**
+ * As per usual, we shouldn't transition implicitly to Down if Maintenance is set
+ * as the wanted state.
+ */
+ @Test
+ public void maintenance_wanted_state_overrides_storage_node_with_no_init_progress() {
+ assertThat(do_test_storage_node_with_no_init_progress(State.MAINTENANCE),
+ equalTo("distributor:3 storage:3 .0.s:m"));
+ }
+
+ /**
+ * Legacy behavior: if a node has crashed (i.e. transition into Down) at least once
+ * while in Init mode, its subsequent init mode will not be made public.
+ * This means the node will remain in a Down-state until it has finished
+ * initializing. This is presumably because unstable nodes may not be able to finish
+ * their init stage and would otherwise pop in and out of the cluster state.
+ */
+ @Test
+ public void unstable_init_storage_node_has_init_state_substituted_by_down() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, State.INITIALIZING)
+ .reportStorageNodeState(0, State.DOWN) // Init -> Down triggers unstable init flag
+ .reportStorageNodeState(0, new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.5));
+
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+ assertThat(state.toString(), equalTo("distributor:5 storage:5 .0.s:d"));
+ }
+
+ @Test
+ public void storage_node_with_crashes_but_not_unstable_init_does_not_have_init_state_substituted_by_down() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.5));
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 0));
+ nodeInfo.setPrematureCrashCount(5);
+
+ final AnnotatedClusterState state = generateFromFixtureWithDefaultParams(fixture);
+ assertThat(state.toString(), equalTo("distributor:5 storage:5 .0.s:i .0.i:0.5"));
+ }
+
+ /**
+ * The generated state must be considered over the Reported state when deciding whether
+ * to override it with the Wanted state. Otherwise, an unstable retired node could have
+ * its generated state be Retired instead of Down. We want it to stay down instead of
+ * potentially contributing additional instability to the cluster.
+ */
+ @Test
+ public void unstable_retired_node_should_be_marked_down() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(5)
+ .bringEntireClusterUp()
+ .proposeStorageNodeWantedState(3, State.RETIRED);
+ final ClusterStateGenerator.Params params = fixture.generatorParams().maxPrematureCrashes(10);
+
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 3));
+ nodeInfo.setPrematureCrashCount(11);
+
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:5 storage:5 .3.s:d"));
+ }
+
+ @Test
+ public void generator_params_can_inherit_values_from_controller_options() {
+ FleetControllerOptions options = new FleetControllerOptions("foocluster");
+ options.maxPrematureCrashes = 1;
+ options.minStorageNodesUp = 2;
+ options.minDistributorNodesUp = 3;
+ options.minRatioOfStorageNodesUp = 0.4;
+ options.minRatioOfDistributorNodesUp = 0.5;
+ options.minNodeRatioPerGroup = 0.6;
+ options.distributionBits = 7;
+ options.maxTransitionTime = ClusterStateGenerator.Params.buildTransitionTimeMap(1000, 2000);
+ final ClusterStateGenerator.Params params = ClusterStateGenerator.Params.fromOptions(options);
+ assertThat(params.maxPrematureCrashes, equalTo(options.maxPrematureCrashes));
+ assertThat(params.minStorageNodesUp, equalTo(options.minStorageNodesUp));
+ assertThat(params.minDistributorNodesUp, equalTo(options.minDistributorNodesUp));
+ assertThat(params.minRatioOfStorageNodesUp, equalTo(options.minRatioOfStorageNodesUp));
+ assertThat(params.minRatioOfDistributorNodesUp, equalTo(options.minRatioOfDistributorNodesUp));
+ assertThat(params.minNodeRatioPerGroup, equalTo(options.minNodeRatioPerGroup));
+ assertThat(params.transitionTimes, equalTo(options.maxTransitionTime));
+ }
+
+ @Test
+ public void configured_zero_init_progress_time_disables_auto_init_to_down_feature() {
+ final ClusterFixture fixture = ClusterFixture.forFlatCluster(3)
+ .bringEntireClusterUp()
+ .reportStorageNodeState(0, new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.5));
+
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 0));
+ nodeInfo.setInitProgressTime(10_000);
+
+ final ClusterStateGenerator.Params params = fixture.generatorParams()
+ .maxInitProgressTime(0)
+ .currentTimeInMilllis(11_000);
+ final AnnotatedClusterState state = ClusterStateGenerator.generatedStateFrom(params);
+ assertThat(state.toString(), equalTo("distributor:3 storage:3 .0.s:i .0.i:0.5"));
+ }
+
+}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DistributionBitCountTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DistributionBitCountTest.java
index 1adb0dcad7d..74661147085 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DistributionBitCountTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/DistributionBitCountTest.java
@@ -74,13 +74,14 @@ public class DistributionBitCountTest extends FleetControllerTest {
nodes.get(3).setNodeState(new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(11));
ClusterState startState = waitForState("version:\\d+ bits:11 distributor:10 storage:10");
- ClusterState state = waitForClusterStateIncludingNodesWithMinUsedBits(11, 2);
nodes.get(1).setNodeState(new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(12));
- assertEquals(state + "->" + fleetController.getSystemState(), startState.getVersion(), fleetController.getSystemState().getVersion());
+ assertEquals(startState + "->" + fleetController.getSystemState(),
+ startState.getVersion(), fleetController.getSystemState().getVersion());
for (int i = 0; i < 10; ++i) {
- nodes.get(i).setNodeState(new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(17));
+ // nodes is array of [distr.0, stor.0, distr.1, stor.1, ...] and we just want the storage nodes
+ nodes.get(i*2 + 1).setNodeState(new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(17));
}
assertEquals(startState.getVersion() + 1, waitForState("version:\\d+ bits:17 distributor:10 storage:10").getVersion());
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/EventDiffCalculatorTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/EventDiffCalculatorTest.java
new file mode 100644
index 00000000000..2a5b3adcfe7
--- /dev/null
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/EventDiffCalculatorTest.java
@@ -0,0 +1,319 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core;
+
+import static com.yahoo.vespa.clustercontroller.core.matchers.EventForNode.eventForNode;
+import static com.yahoo.vespa.clustercontroller.core.matchers.NodeEventWithDescription.nodeEventWithDescription;
+import static com.yahoo.vespa.clustercontroller.core.matchers.ClusterEventWithDescription.clusterEventWithDescription;
+import static com.yahoo.vespa.clustercontroller.core.matchers.EventTypeIs.eventTypeIs;
+import static com.yahoo.vespa.clustercontroller.core.matchers.EventTimeIs.eventTimeIs;
+import static org.hamcrest.CoreMatchers.allOf;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.Assert.assertThat;
+import static org.hamcrest.CoreMatchers.hasItem;
+
+import static com.yahoo.vespa.clustercontroller.core.ClusterFixture.storageNode;
+import static com.yahoo.vespa.clustercontroller.core.ClusterFixture.distributorNode;
+
+import com.yahoo.vdslib.state.ClusterState;
+import com.yahoo.vdslib.state.Node;
+import org.junit.Test;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+
+public class EventDiffCalculatorTest {
+
+ private static Map<Node, NodeStateReason> emptyNodeStateReasons() {
+ return Collections.emptyMap();
+ }
+
+ private static class EventFixture {
+ final ClusterFixture clusterFixture;
+ // TODO could reasonably put shared state into a common class to avoid dupes for both before/after
+ Optional<ClusterStateReason> clusterReasonBefore = Optional.empty();
+ Optional<ClusterStateReason> clusterReasonAfter = Optional.empty();
+ ClusterState clusterStateBefore = ClusterState.emptyState();
+ ClusterState clusterStateAfter = ClusterState.emptyState();
+ final Map<Node, NodeStateReason> nodeReasonsBefore = new HashMap<>();
+ final Map<Node, NodeStateReason> nodeReasonsAfter = new HashMap<>();
+ long currentTimeMs = 0;
+
+ EventFixture(int nodeCount) {
+ this.clusterFixture = ClusterFixture.forFlatCluster(nodeCount);
+ }
+
+ EventFixture clusterStateBefore(String stateStr) {
+ clusterStateBefore = ClusterState.stateFromString(stateStr);
+ return this;
+ }
+ EventFixture clusterStateAfter(String stateStr) {
+ clusterStateAfter = ClusterState.stateFromString(stateStr);
+ return this;
+ }
+ EventFixture storageNodeReasonBefore(int index, NodeStateReason reason) {
+ nodeReasonsBefore.put(storageNode(index), reason);
+ return this;
+ }
+ EventFixture storageNodeReasonAfter(int index, NodeStateReason reason) {
+ nodeReasonsAfter.put(storageNode(index), reason);
+ return this;
+ }
+ EventFixture clusterReasonBefore(ClusterStateReason reason) {
+ this.clusterReasonBefore = Optional.of(reason);
+ return this;
+ }
+ EventFixture clusterReasonAfter(ClusterStateReason reason) {
+ this.clusterReasonAfter = Optional.of(reason);
+ return this;
+ }
+ EventFixture currentTimeMs(long timeMs) {
+ this.currentTimeMs = timeMs;
+ return this;
+ }
+
+ List<Event> computeEventDiff() {
+ final AnnotatedClusterState stateBefore = new AnnotatedClusterState(
+ clusterStateBefore, clusterReasonBefore, nodeReasonsBefore);
+ final AnnotatedClusterState stateAfter = new AnnotatedClusterState(
+ clusterStateAfter, clusterReasonAfter, nodeReasonsAfter);
+
+ return EventDiffCalculator.computeEventDiff(
+ EventDiffCalculator.params()
+ .cluster(clusterFixture.cluster())
+ .fromState(stateBefore)
+ .toState(stateAfter)
+ .currentTimeMs(currentTimeMs));
+ }
+
+ static EventFixture createForNodes(int nodeCount) {
+ return new EventFixture(nodeCount);
+ }
+
+ }
+
+ @Test
+ public void single_storage_node_state_transition_emits_altered_node_state_event() {
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("distributor:3 storage:3")
+ .clusterStateAfter("distributor:3 storage:3 .0.s:d");
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(1));
+ assertThat(events, hasItem(allOf(
+ eventForNode(storageNode(0)),
+ eventTypeIs(NodeEvent.Type.CURRENT),
+ nodeEventWithDescription("Altered node state in cluster state from 'U' to 'D'"))));
+ }
+
+ @Test
+ public void single_distributor_node_state_transition_emits_altered_node_state_event() {
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("distributor:3 storage:3")
+ .clusterStateAfter("distributor:3 .1.s:d storage:3");
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(1));
+ assertThat(events, hasItem(allOf(
+ eventForNode(distributorNode(1)),
+ eventTypeIs(NodeEvent.Type.CURRENT),
+ nodeEventWithDescription("Altered node state in cluster state from 'U' to 'D'"))));
+ }
+
+ @Test
+ public void node_state_change_event_is_tagged_with_given_time() {
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("distributor:3 storage:3")
+ .clusterStateAfter("distributor:3 storage:3 .0.s:d")
+ .currentTimeMs(123456);
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(1));
+ assertThat(events, hasItem(eventTimeIs(123456)));
+ }
+
+ @Test
+ public void multiple_node_state_transitions_emit_multiple_node_state_events() {
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("distributor:3 storage:3 .1.s:d")
+ .clusterStateAfter("distributor:3 .2.s:d storage:3 .0.s:r");
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(3));
+ assertThat(events, hasItem(allOf(
+ eventForNode(distributorNode(2)),
+ nodeEventWithDescription("Altered node state in cluster state from 'U' to 'D'"))));
+ assertThat(events, hasItem(allOf(
+ eventForNode(storageNode(0)),
+ nodeEventWithDescription("Altered node state in cluster state from 'U' to 'R'"))));
+ assertThat(events, hasItem(allOf(
+ eventForNode(storageNode(1)),
+ nodeEventWithDescription("Altered node state in cluster state from 'D' to 'U'"))));
+ }
+
+ @Test
+ public void no_emitted_node_state_event_when_node_state_not_changed() {
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("distributor:3 storage:3")
+ .clusterStateAfter("distributor:3 storage:3");
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(0));
+ }
+
+ @Test
+ public void node_down_edge_with_group_down_reason_has_separate_event_emitted() {
+ // We sneakily use a flat cluster here but still use a 'group down' reason. Differ doesn't currently care.
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("distributor:3 storage:3")
+ .clusterStateAfter("distributor:3 storage:3 .1.s:d")
+ .storageNodeReasonAfter(1, NodeStateReason.GROUP_IS_DOWN);
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(2));
+ // Both the regular edge event and the group down event is emitted
+ assertThat(events, hasItem(allOf(
+ eventForNode(storageNode(1)),
+ nodeEventWithDescription("Altered node state in cluster state from 'U' to 'D'"))));
+ assertThat(events, hasItem(allOf(
+ eventForNode(storageNode(1)),
+ eventTypeIs(NodeEvent.Type.CURRENT),
+ nodeEventWithDescription("Group node availability is below configured threshold"))));
+ }
+
+ @Test
+ public void group_down_to_group_down_does_not_emit_new_event() {
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("distributor:3 storage:3 .1.s:d")
+ .clusterStateAfter("distributor:3 storage:3 .1.s:m")
+ .storageNodeReasonBefore(1, NodeStateReason.GROUP_IS_DOWN)
+ .storageNodeReasonAfter(1, NodeStateReason.GROUP_IS_DOWN);
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(1));
+ // Should not get a group availability event since nothing has changed in this regard
+ assertThat(events, hasItem(allOf(
+ eventForNode(storageNode(1)),
+ nodeEventWithDescription("Altered node state in cluster state from 'D' to 'M'"))));
+ }
+
+ @Test
+ public void group_down_to_clear_reason_emits_group_up_event() {
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("distributor:3 storage:3 .2.s:d")
+ .clusterStateAfter("distributor:3 storage:3")
+ .storageNodeReasonBefore(2, NodeStateReason.GROUP_IS_DOWN); // But no after-reason.
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(2));
+ assertThat(events, hasItem(allOf(
+ eventForNode(storageNode(2)),
+ nodeEventWithDescription("Altered node state in cluster state from 'D' to 'U'"))));
+ assertThat(events, hasItem(allOf(
+ eventForNode(storageNode(2)),
+ eventTypeIs(NodeEvent.Type.CURRENT),
+ nodeEventWithDescription("Group node availability has been restored"))));
+ }
+
+ @Test
+ public void cluster_up_edge_emits_sufficient_node_availability_event() {
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("cluster:d distributor:3 storage:3")
+ .clusterStateAfter("distributor:3 storage:3");
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(1));
+ assertThat(events, hasItem(
+ clusterEventWithDescription("Enough nodes available for system to become up")));
+ }
+
+ @Test
+ public void cluster_down_event_without_reason_annotation_emits_generic_down_event() {
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("distributor:3 storage:3")
+ .clusterStateAfter("cluster:d distributor:3 storage:3");
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(1));
+ assertThat(events, hasItem(
+ clusterEventWithDescription("Cluster is down")));
+ }
+
+ @Test
+ public void cluster_event_is_tagged_with_given_time() {
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("distributor:3 storage:3")
+ .clusterStateAfter("cluster:d distributor:3 storage:3")
+ .currentTimeMs(56789);
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(1));
+ assertThat(events, hasItem(eventTimeIs(56789)));
+ }
+
+ @Test
+ public void no_event_emitted_for_cluster_down_to_down_edge() {
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("cluster:d distributor:3 storage:3")
+ .clusterStateAfter("cluster:d distributor:3 storage:3");
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(0));
+ }
+
+ @Test
+ public void too_few_storage_nodes_cluster_down_reason_emits_corresponding_event() {
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("distributor:3 storage:3")
+ .clusterStateAfter("cluster:d distributor:3 storage:3")
+ .clusterReasonAfter(ClusterStateReason.TOO_FEW_STORAGE_NODES_AVAILABLE);
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(1));
+ // TODO(?) these messages currently don't include the current configured limits
+ assertThat(events, hasItem(
+ clusterEventWithDescription("Too few storage nodes available in cluster. Setting cluster state down")));
+ }
+
+ @Test
+ public void too_few_distributor_nodes_cluster_down_reason_emits_corresponding_event() {
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("distributor:3 storage:3")
+ .clusterStateAfter("cluster:d distributor:3 storage:3")
+ .clusterReasonAfter(ClusterStateReason.TOO_FEW_DISTRIBUTOR_NODES_AVAILABLE);
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(1));
+ assertThat(events, hasItem(
+ clusterEventWithDescription("Too few distributor nodes available in cluster. Setting cluster state down")));
+ }
+
+ @Test
+ public void too_low_storage_node_ratio_cluster_down_reason_emits_corresponding_event() {
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("distributor:3 storage:3")
+ .clusterStateAfter("cluster:d distributor:3 storage:3")
+ .clusterReasonAfter(ClusterStateReason.TOO_LOW_AVAILABLE_STORAGE_NODE_RATIO);
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(1));
+ assertThat(events, hasItem(
+ clusterEventWithDescription("Too low ratio of available storage nodes. Setting cluster state down")));
+ }
+
+ @Test
+ public void too_low_distributor_node_ratio_cluster_down_reason_emits_corresponding_event() {
+ final EventFixture fixture = EventFixture.createForNodes(3)
+ .clusterStateBefore("distributor:3 storage:3")
+ .clusterStateAfter("cluster:d distributor:3 storage:3")
+ .clusterReasonAfter(ClusterStateReason.TOO_LOW_AVAILABLE_DISTRIBUTOR_NODE_RATIO);
+
+ final List<Event> events = fixture.computeEventDiff();
+ assertThat(events.size(), equalTo(1));
+ assertThat(events, hasItem(
+ clusterEventWithDescription("Too low ratio of available distributor nodes. Setting cluster state down")));
+ }
+
+}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java
index f4b3e648f63..d0aa0bceba9 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/FleetControllerTest.java
@@ -6,13 +6,11 @@ import com.yahoo.jrt.slobrok.server.Slobrok;
import com.yahoo.log.LogLevel;
import com.yahoo.log.LogSetup;
import com.yahoo.vdslib.distribution.ConfiguredNode;
-import com.yahoo.vdslib.distribution.Distribution;
import com.yahoo.vdslib.state.ClusterState;
import com.yahoo.vdslib.state.Node;
import com.yahoo.vdslib.state.NodeState;
import com.yahoo.vdslib.state.NodeType;
import com.yahoo.vespa.clustercontroller.core.database.DatabaseHandler;
-import com.yahoo.vespa.clustercontroller.core.hostinfo.HostInfo;
import com.yahoo.vespa.clustercontroller.core.rpc.RPCCommunicator;
import com.yahoo.vespa.clustercontroller.core.rpc.RpcServer;
import com.yahoo.vespa.clustercontroller.core.rpc.SlobrokClient;
@@ -150,7 +148,7 @@ public abstract class FleetControllerTest implements Waiter {
}
RpcServer rpcServer = new RpcServer(timer, timer, options.clusterName, options.fleetControllerIndex, options.slobrokBackOffPolicy);
DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer);
- SystemStateGenerator stateGenerator = new SystemStateGenerator(timer, log, metricUpdater);
+ StateChangeHandler stateGenerator = new StateChangeHandler(timer, log, metricUpdater);
SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer);
MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
FleetController controller = new FleetController(timer, log, cluster, stateGatherer, communicator, status, rpcServer, lookUp, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options);
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GroupAutoTakedownTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GroupAutoTakedownTest.java
index be60fba234a..a7307e0180a 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GroupAutoTakedownTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/GroupAutoTakedownTest.java
@@ -9,19 +9,22 @@ import com.yahoo.vdslib.state.NodeType;
import com.yahoo.vdslib.state.State;
import com.yahoo.vespa.clustercontroller.core.database.DatabaseHandler;
import com.yahoo.vespa.clustercontroller.core.listeners.NodeStateOrHostInfoChangeHandler;
-import com.yahoo.vespa.clustercontroller.core.listeners.SystemStateListener;
+
+import static com.yahoo.vespa.clustercontroller.core.matchers.EventForNode.eventForNode;
+import static com.yahoo.vespa.clustercontroller.core.matchers.NodeEventWithDescription.nodeEventWithDescription;
import org.junit.Test;
-import org.mockito.ArgumentMatcher;
import java.util.HashSet;
+import java.util.List;
import java.util.Set;
import static org.hamcrest.core.AllOf.allOf;
+import static org.hamcrest.core.IsCollectionContaining.hasItem;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.argThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@@ -43,26 +46,29 @@ public class GroupAutoTakedownTest {
}
private static void setSharedFixtureOptions(ClusterFixture fixture, double minNodeRatioPerGroup) {
- fixture.generator.setMinNodeRatioPerGroup(minNodeRatioPerGroup);
+ fixture.setMinNodeRatioPerGroup(minNodeRatioPerGroup);
fixture.disableTransientMaintenanceModeOnDown();
fixture.disableAutoClusterTakedown();
fixture.bringEntireClusterUp();
}
private String stateAfterStorageTransition(ClusterFixture fixture, final int index, final State state) {
- transitionStoreNodeToState(fixture, index, state);
+ transitionStorageNodeToState(fixture, index, state);
return fixture.generatedClusterState();
}
private String verboseStateAfterStorageTransition(ClusterFixture fixture, final int index, final State state) {
- transitionStoreNodeToState(fixture, index, state);
+ transitionStorageNodeToState(fixture, index, state);
return fixture.verboseGeneratedClusterState();
}
- private void transitionStoreNodeToState(ClusterFixture fixture, int index, State state) {
+ private void transitionStorageNodeToState(ClusterFixture fixture, int index, State state) {
fixture.reportStorageNodeState(index, state);
- SystemStateListener listener = mock(SystemStateListener.class);
- assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener));
+ }
+
+ private AnnotatedClusterState annotatedStateAfterStorageTransition(ClusterFixture fixture, final int index, final State state) {
+ transitionStorageNodeToState(fixture, index, state);
+ return fixture.annotatedGeneratedClusterState();
}
/**
@@ -74,12 +80,9 @@ public class GroupAutoTakedownTest {
public void config_does_not_apply_to_flat_hierarchy_clusters() {
ClusterFixture fixture = createFixtureForAllUpFlatCluster(5, 0.99);
- SystemStateListener listener = mock(SystemStateListener.class);
- // First invocation; generates initial state and clears "new state" flag
- assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener));
- assertEquals("version:1 distributor:5 storage:5", fixture.generatedClusterState());
+ assertEquals("distributor:5 storage:5", fixture.generatedClusterState());
- assertEquals("version:2 distributor:5 storage:5 .1.s:d",
+ assertEquals("distributor:5 storage:5 .1.s:d",
stateAfterStorageTransition(fixture, 1, State.DOWN));
}
@@ -88,15 +91,13 @@ public class GroupAutoTakedownTest {
ClusterFixture fixture = createFixtureForAllUpHierarchicCluster(
DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.51);
- SystemStateListener listener = mock(SystemStateListener.class);
- assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener));
- assertEquals("version:1 distributor:6 storage:6", fixture.generatedClusterState());
+ assertEquals("distributor:6 storage:6", fixture.generatedClusterState());
// Same group as node 4
- assertEquals("version:2 distributor:6 storage:4",
+ assertEquals("distributor:6 storage:4",
stateAfterStorageTransition(fixture, 5, State.DOWN));
// Same group as node 1
- assertEquals("version:3 distributor:6 storage:4 .0.s:d .1.s:d",
+ assertEquals("distributor:6 storage:4 .0.s:d .1.s:d",
stateAfterStorageTransition(fixture, 0, State.DOWN));
}
@@ -106,11 +107,11 @@ public class GroupAutoTakedownTest {
DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.51);
// Group #2 -> down
- assertEquals("version:1 distributor:6 storage:4",
+ assertEquals("distributor:6 storage:4",
stateAfterStorageTransition(fixture, 5, State.DOWN));
// Group #2 -> back up again
- assertEquals("version:2 distributor:6 storage:6",
+ assertEquals("distributor:6 storage:6",
stateAfterStorageTransition(fixture, 5, State.UP));
}
@@ -119,16 +120,12 @@ public class GroupAutoTakedownTest {
ClusterFixture fixture = createFixtureForAllUpHierarchicCluster(
DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.51);
- assertEquals("version:1 distributor:6 storage:4",
+ assertEquals("distributor:6 storage:4",
stateAfterStorageTransition(fixture, 5, State.DOWN));
// 4, 5 in same group; this should not cause a new state since it's already implicitly down
fixture.reportStorageNodeState(4, State.DOWN);
-
- SystemStateListener listener = mock(SystemStateListener.class);
- assertFalse(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener));
-
- assertEquals("version:1 distributor:6 storage:4", fixture.generatedClusterState());
+ assertEquals("distributor:6 storage:4", fixture.generatedClusterState());
}
@Test
@@ -139,7 +136,7 @@ public class GroupAutoTakedownTest {
// Nodes 6 and 7 are taken down implicitly and should have a message reflecting this.
// Node 8 is taken down by the fixture and gets a fixture-assigned message that
// we should _not_ lose/overwrite.
- assertEquals("version:1 distributor:9 storage:9 .6.s:d " +
+ assertEquals("distributor:9 storage:9 .6.s:d " +
".6.m:group\\x20node\\x20availability\\x20below\\x20configured\\x20threshold " +
".7.s:d " +
".7.m:group\\x20node\\x20availability\\x20below\\x20configured\\x20threshold " +
@@ -151,12 +148,12 @@ public class GroupAutoTakedownTest {
public void legacy_cluster_wide_availabilty_ratio_is_computed_after_group_takedowns() {
ClusterFixture fixture = createFixtureForAllUpHierarchicCluster(
DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.51);
- fixture.generator.setMinNodesUp(5, 5, 0.51, 0.51);
+ fixture.setMinNodesUp(5, 5, 0.51, 0.51);
// Taking down a node in a group forces the entire group down, which leaves us with
// only 4 content nodes (vs. minimum of 5 as specified above). The entire cluster
// should be marked as down in this case.
- assertEquals("version:1 cluster:d distributor:6 storage:4",
+ assertEquals("cluster:d distributor:6 storage:4",
stateAfterStorageTransition(fixture, 5, State.DOWN));
}
@@ -165,16 +162,12 @@ public class GroupAutoTakedownTest {
ClusterFixture fixture = createFixtureForAllUpHierarchicCluster(
DistributionBuilder.withGroups(3).eachWithNodeCount(3), 0.99);
- NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 5));
- fixture.generator.proposeNewNodeState(nodeInfo, new NodeState(NodeType.STORAGE, State.MAINTENANCE));
- SystemStateListener listener = mock(SystemStateListener.class);
- assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener));
-
+ fixture.proposeStorageNodeWantedState(5, State.MAINTENANCE);
// Maintenance not counted as down, so group still up
- assertEquals("version:1 distributor:9 storage:9 .5.s:m", fixture.generatedClusterState());
+ assertEquals("distributor:9 storage:9 .5.s:m", fixture.generatedClusterState());
// Group goes down, but maintenance node should still be in maintenance
- assertEquals("version:2 distributor:9 storage:9 .3.s:d .4.s:d .5.s:m",
+ assertEquals("distributor:9 storage:9 .3.s:d .4.s:d .5.s:m",
stateAfterStorageTransition(fixture, 4, State.DOWN));
}
@@ -186,51 +179,16 @@ public class GroupAutoTakedownTest {
// Our timers are mocked, so taking down node 4 will deterministically transition to
// a transient maintenance mode. Group should not be taken down here.
- assertEquals("version:1 distributor:9 storage:9 .4.s:m",
+ assertEquals("distributor:9 storage:9 .4.s:m",
stateAfterStorageTransition(fixture, 4, State.DOWN));
// However, once grace period expires the group should be taken down.
fixture.timer.advanceTime(1001);
NodeStateOrHostInfoChangeHandler changeListener = mock(NodeStateOrHostInfoChangeHandler.class);
- fixture.generator.watchTimers(fixture.cluster, changeListener);
- SystemStateListener stateListener = mock(SystemStateListener.class);
- assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, stateListener));
-
- assertEquals("version:2 distributor:9 storage:9 .3.s:d .4.s:d .5.s:d", fixture.generatedClusterState());
- }
-
- private static class NodeEventWithDescription extends ArgumentMatcher<NodeEvent> {
- private final String expected;
-
- NodeEventWithDescription(String expected) {
- this.expected = expected;
- }
-
- @Override
- public boolean matches(Object o) {
- return expected.equals(((NodeEvent)o).getDescription());
- }
- }
+ fixture.nodeStateChangeHandler.watchTimers(
+ fixture.cluster, fixture.annotatedGeneratedClusterState().getClusterState(), changeListener);
- private static NodeEventWithDescription nodeEventWithDescription(String description) {
- return new NodeEventWithDescription(description);
- }
-
- private static class EventForNode extends ArgumentMatcher<NodeEvent> {
- private final Node expected;
-
- EventForNode(Node expected) {
- this.expected = expected;
- }
-
- @Override
- public boolean matches(Object o) {
- return ((NodeEvent)o).getNode().getNode().equals(expected);
- }
- }
-
- private static EventForNode eventForNode(Node expected) {
- return new EventForNode(expected);
+ assertEquals("distributor:9 storage:9 .3.s:d .4.s:d .5.s:d", fixture.generatedClusterState());
}
private static Node contentNode(int index) {
@@ -242,13 +200,14 @@ public class GroupAutoTakedownTest {
ClusterFixture fixture = createFixtureForAllUpHierarchicCluster(
DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.51);
- assertEquals("version:1 distributor:6 storage:4",
- stateAfterStorageTransition(fixture, 5, State.DOWN));
+ final List<Event> events = EventDiffCalculator.computeEventDiff(EventDiffCalculator.params()
+ .cluster(fixture.cluster)
+ .fromState(fixture.annotatedGeneratedClusterState())
+ .toState(annotatedStateAfterStorageTransition(fixture, 5, State.DOWN)));
- verify(fixture.eventLog).addNodeOnlyEvent(argThat(allOf(
- nodeEventWithDescription("Setting node down as the total availability of its group is " +
- "below the configured threshold"),
- eventForNode(contentNode(4)))), any());
+ assertThat(events, hasItem(allOf(
+ nodeEventWithDescription("Group node availability is below configured threshold"),
+ eventForNode(contentNode(4)))));
}
@Test
@@ -256,30 +215,31 @@ public class GroupAutoTakedownTest {
ClusterFixture fixture = createFixtureForAllUpHierarchicCluster(
DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.51);
- assertEquals("version:1 distributor:6 storage:4",
+ assertEquals("distributor:6 storage:4",
stateAfterStorageTransition(fixture, 5, State.DOWN));
- assertEquals("version:2 distributor:6 storage:6",
- stateAfterStorageTransition(fixture, 5, State.UP));
- verify(fixture.eventLog).addNodeOnlyEvent(argThat(allOf(
- nodeEventWithDescription("Group availability restored; taking node back up"),
- eventForNode(contentNode(4)))), any());
+ final List<Event> events = EventDiffCalculator.computeEventDiff(EventDiffCalculator.params()
+ .cluster(fixture.cluster)
+ .fromState(fixture.annotatedGeneratedClusterState())
+ .toState(annotatedStateAfterStorageTransition(fixture, 5, State.UP)));
+
+ assertThat(events, hasItem(allOf(
+ nodeEventWithDescription("Group node availability has been restored"),
+ eventForNode(contentNode(4)))));
}
@Test
- public void wanted_state_retired_implicitly_down_node_transitioned_it_to_retired_mode_immediately() {
+ public void wanted_state_retired_implicitly_down_node_is_transitioned_to_retired_mode_immediately() {
ClusterFixture fixture = createFixtureForAllUpHierarchicCluster(
DistributionBuilder.withGroups(3).eachWithNodeCount(3), 0.99);
- assertEquals("version:1 distributor:9 storage:6",
+ assertEquals("distributor:9 storage:6",
stateAfterStorageTransition(fixture, 6, State.DOWN));
// Node 7 is implicitly down. Mark wanted state as retired. It should now be Retired
// but not Down.
fixture.proposeStorageNodeWantedState(7, State.RETIRED);
- SystemStateListener stateListener = mock(SystemStateListener.class);
- assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, stateListener));
- assertEquals("version:2 distributor:9 storage:8 .6.s:d .7.s:r", fixture.generatedClusterState());
+ assertEquals("distributor:9 storage:8 .6.s:d .7.s:r", fixture.generatedClusterState());
}
@Test
@@ -287,9 +247,9 @@ public class GroupAutoTakedownTest {
ClusterFixture fixture = createFixtureForAllUpHierarchicCluster(
DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.49);
- assertEquals("version:1 distributor:6 storage:6 .4.s:d",
+ assertEquals("distributor:6 storage:6 .4.s:d",
stateAfterStorageTransition(fixture, 4, State.DOWN));
- assertEquals("version:2 distributor:6 storage:4",
+ assertEquals("distributor:6 storage:4",
stateAfterStorageTransition(fixture, 5, State.DOWN));
// Node 5 gets config-retired under our feet.
@@ -299,9 +259,8 @@ public class GroupAutoTakedownTest {
// TODO this should ideally also set the retired flag in the distribution
// config, but only the ConfiguredNodes are actually looked at currently.
fixture.cluster.setNodes(nodes);
- fixture.generator.setNodes(fixture.cluster.clusterInfo());
- assertEquals("version:3 distributor:6 storage:6 .4.s:d .5.s:r",
+ assertEquals("distributor:6 storage:6 .4.s:d .5.s:r",
stateAfterStorageTransition(fixture, 5, State.UP));
}
@@ -314,14 +273,12 @@ public class GroupAutoTakedownTest {
newState.setInitProgress(0.5);
fixture.reportStorageNodeState(4, newState);
- SystemStateListener stateListener = mock(SystemStateListener.class);
- assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, stateListener));
- assertEquals("version:1 distributor:6 storage:6 .4.s:i .4.i:0.5", fixture.generatedClusterState());
+ assertEquals("distributor:6 storage:6 .4.s:i .4.i:0.5", fixture.generatedClusterState());
- assertEquals("version:2 distributor:6 storage:4",
+ assertEquals("distributor:6 storage:4",
stateAfterStorageTransition(fixture, 5, State.DOWN));
- assertEquals("version:3 distributor:6 storage:6 .4.s:i .4.i:0.5",
+ assertEquals("distributor:6 storage:6 .4.s:i .4.i:0.5",
stateAfterStorageTransition(fixture, 5, State.UP));
}
@@ -330,20 +287,17 @@ public class GroupAutoTakedownTest {
ClusterFixture fixture = createFixtureForAllUpHierarchicCluster(
DistributionBuilder.withGroups(3).eachWithNodeCount(2), 0.51);
- final Node node = new Node(NodeType.STORAGE, 4);
final NodeState newState = new NodeState(NodeType.STORAGE, State.UP);
newState.setDiskCount(7);
newState.setDiskState(5, new DiskState(State.DOWN));
fixture.reportStorageNodeState(4, newState);
- SystemStateListener stateListener = mock(SystemStateListener.class);
- assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, stateListener));
- assertEquals("version:1 distributor:6 storage:6 .4.d:7 .4.d.5.s:d", fixture.generatedClusterState());
+ assertEquals("distributor:6 storage:6 .4.d:7 .4.d.5.s:d", fixture.generatedClusterState());
- assertEquals("version:2 distributor:6 storage:4",
+ assertEquals("distributor:6 storage:4",
stateAfterStorageTransition(fixture, 5, State.DOWN));
- assertEquals("version:3 distributor:6 storage:6 .4.d:7 .4.d.5.s:d",
+ assertEquals("distributor:6 storage:6 .4.d:7 .4.d.5.s:d",
stateAfterStorageTransition(fixture, 5, State.UP));
}
@@ -352,19 +306,15 @@ public class GroupAutoTakedownTest {
ClusterFixture fixture = createFixtureForAllUpHierarchicCluster(
DistributionBuilder.withGroups(3).eachWithNodeCount(3), 0.60);
- NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 5));
- nodeInfo.setWantedState(new NodeState(NodeType.STORAGE, State.DOWN).setDescription("borkbork"));
- fixture.generator.proposeNewNodeState(nodeInfo, nodeInfo.getWantedState());
- SystemStateListener listener = mock(SystemStateListener.class);
- assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener));
+ fixture.proposeStorageNodeWantedState(5, State.DOWN, "borkbork");
- assertEquals("version:1 distributor:9 storage:9 .5.s:d .5.m:borkbork", fixture.verboseGeneratedClusterState());
+ assertEquals("distributor:9 storage:9 .5.s:d .5.m:borkbork", fixture.verboseGeneratedClusterState());
- assertEquals("version:2 distributor:9 storage:9 " +
+ assertEquals("distributor:9 storage:9 " +
".3.s:d .3.m:group\\x20node\\x20availability\\x20below\\x20configured\\x20threshold " +
".4.s:d .4.m:mockdesc .5.s:d .5.m:borkbork",
verboseStateAfterStorageTransition(fixture, 4, State.DOWN));
- assertEquals("version:3 distributor:9 storage:9 .5.s:d .5.m:borkbork",
+ assertEquals("distributor:9 storage:9 .5.s:d .5.m:borkbork",
verboseStateAfterStorageTransition(fixture, 4, State.UP));
}
@@ -378,25 +328,23 @@ public class GroupAutoTakedownTest {
fixture.reportStorageNodeState(4, newState);
- SystemStateListener listener = mock(SystemStateListener.class);
- assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener));
-
- assertEquals("version:1 distributor:6 storage:6 .4.t:123456", fixture.generatedClusterState());
+ assertEquals("distributor:6 storage:6 .4.t:123456", fixture.generatedClusterState());
DatabaseHandler handler = mock(DatabaseHandler.class);
DatabaseHandler.Context context = mock(DatabaseHandler.Context.class);
when(context.getCluster()).thenReturn(fixture.cluster);
- fixture.generator.handleAllDistributorsInSync(handler, context);
- assertTrue(fixture.generator.notifyIfNewSystemState(fixture.cluster, listener));
+ Set<ConfiguredNode> nodes = new HashSet<>(fixture.cluster.clusterInfo().getConfiguredNodes().values());
+ fixture.nodeStateChangeHandler.handleAllDistributorsInSync(
+ fixture.annotatedGeneratedClusterState().getClusterState(), nodes, handler, context);
// Timestamp should now be cleared from state
- assertEquals("version:2 distributor:6 storage:6", fixture.generatedClusterState());
+ assertEquals("distributor:6 storage:6", fixture.generatedClusterState());
// Trigger a group down+up edge. Timestamp should _not_ be reintroduced since it was previously cleared.
- assertEquals("version:3 distributor:6 storage:4",
+ assertEquals("distributor:6 storage:4",
stateAfterStorageTransition(fixture, 5, State.DOWN));
- assertEquals("version:4 distributor:6 storage:6",
+ assertEquals("distributor:6 storage:6",
stateAfterStorageTransition(fixture, 5, State.UP));
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java
index ba2cd287a9a..80435ee7c7d 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/MasterElectionTest.java
@@ -191,15 +191,15 @@ public class MasterElectionTest extends FleetControllerTest {
log.log(LogLevel.INFO, "Leaving waitForMaster");
}
- private static class VersionMonotonicityChecker {
+ private static class StrictlyIncreasingVersionChecker {
private ClusterState lastState;
- private VersionMonotonicityChecker(ClusterState initialState) {
+ private StrictlyIncreasingVersionChecker(ClusterState initialState) {
this.lastState = initialState;
}
- public static VersionMonotonicityChecker bootstrappedWith(ClusterState initialState) {
- return new VersionMonotonicityChecker(initialState);
+ public static StrictlyIncreasingVersionChecker bootstrappedWith(ClusterState initialState) {
+ return new StrictlyIncreasingVersionChecker(initialState);
}
public void updateAndVerify(ClusterState currentState) {
@@ -207,7 +207,7 @@ public class MasterElectionTest extends FleetControllerTest {
lastState = currentState;
if (currentState.getVersion() <= last.getVersion()) {
throw new IllegalStateException(
- String.format("Cluster state version monotonicity invariant broken! " +
+ String.format("Cluster state version strict increase invariant broken! " +
"Old state was '%s', new state is '%s'", last, currentState));
}
}
@@ -226,7 +226,8 @@ public class MasterElectionTest extends FleetControllerTest {
waitForStableSystem();
waitForMaster(0);
Arrays.asList(0, 1, 2, 3, 4).stream().forEach(this::waitForCompleteCycle);
- VersionMonotonicityChecker checker = VersionMonotonicityChecker.bootstrappedWith(fleetControllers.get(0).getClusterState());
+ StrictlyIncreasingVersionChecker checker = StrictlyIncreasingVersionChecker.bootstrappedWith(
+ fleetControllers.get(0).getClusterState());
fleetControllers.get(0).shutdown();
waitForMaster(1);
Arrays.asList(1, 2, 3, 4).stream().forEach(this::waitForCompleteCycle);
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeInfoTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeInfoTest.java
new file mode 100644
index 00000000000..bf0adf7736c
--- /dev/null
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/NodeInfoTest.java
@@ -0,0 +1,80 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core;
+
+import com.yahoo.vdslib.state.Node;
+import com.yahoo.vdslib.state.NodeType;
+import com.yahoo.vdslib.state.State;
+import org.junit.Test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class NodeInfoTest {
+
+ @Test
+ public void unstable_init_flag_is_initially_clear() {
+ ClusterFixture fixture = ClusterFixture.forFlatCluster(3);
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1));
+ assertFalse(nodeInfo.recentlyObservedUnstableDuringInit());
+ }
+
+ private static ClusterFixture fixtureWithNodeMarkedAsUnstableInit(int nodeIndex) {
+ return ClusterFixture.forFlatCluster(3)
+ .reportStorageNodeState(nodeIndex, State.INITIALIZING)
+ .reportStorageNodeState(nodeIndex, State.DOWN);
+ }
+
+ @Test
+ public void down_edge_during_init_state_marks_as_unstable_init() {
+ ClusterFixture fixture = fixtureWithNodeMarkedAsUnstableInit(1);
+
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1));
+ assertTrue(nodeInfo.recentlyObservedUnstableDuringInit());
+ }
+
+ @Test
+ public void stopping_edge_during_init_does_not_mark_as_unstable_init() {
+ ClusterFixture fixture = ClusterFixture.forFlatCluster(3).reportStorageNodeState(0, State.INITIALIZING);
+ fixture.reportStorageNodeState(0, State.STOPPING);
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 0));
+
+ assertFalse(nodeInfo.recentlyObservedUnstableDuringInit());
+ }
+
+ /**
+ * The cluster controller will, after a time of observed stable state, reset the crash
+ * counter for a given node. This should also reset the unstable init flag to keep it
+ * from haunting a now stable node.
+ */
+ @Test
+ public void zeroing_crash_count_resets_unstable_init_flag() {
+ ClusterFixture fixture = fixtureWithNodeMarkedAsUnstableInit(1);
+
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1));
+ nodeInfo.setPrematureCrashCount(0);
+ assertFalse(nodeInfo.recentlyObservedUnstableDuringInit());
+ }
+
+ /**
+ * A non-zero crash count update, on the other hand, implies the node is suffering
+ * further instabilities and should not clear the unstable init flag.
+ */
+ @Test
+ public void non_zero_crash_count_update_does_not_reset_unstable_init_flag() {
+ ClusterFixture fixture = fixtureWithNodeMarkedAsUnstableInit(1);
+
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1));
+ nodeInfo.setPrematureCrashCount(3);
+ assertTrue(nodeInfo.recentlyObservedUnstableDuringInit());
+ }
+
+ @Test
+ public void non_zero_crash_count_does_not_implicitly_set_unstable_init_flag() {
+ ClusterFixture fixture = ClusterFixture.forFlatCluster(3);
+
+ final NodeInfo nodeInfo = fixture.cluster.getNodeInfo(new Node(NodeType.STORAGE, 1));
+ nodeInfo.setPrematureCrashCount(1);
+ assertFalse(nodeInfo.recentlyObservedUnstableDuringInit());
+ }
+
+}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java
index 2816b75622e..f7f86907205 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/RpcServerTest.java
@@ -437,13 +437,13 @@ public class RpcServerTest extends FleetControllerTest {
{ // Configuration change: Remove the previously retired nodes
/*
TODO: Verify current result: version:23 distributor:7 .0.s:d .1.s:d .2.s:d .3.s:d .4.s:d storage:7 .0.s:m .1.s:m .2.s:m .3.s:m .4.s:m
- TODO: Make this work without stopping/disconnecting (see SystemStateGenerator.setNodes
+ TODO: Make this work without stopping/disconnecting (see StateChangeHandler.setNodes
Set<ConfiguredNode> configuredNodes = new TreeSet<>();
configuredNodes.add(new ConfiguredNode(5, false));
configuredNodes.add(new ConfiguredNode(6, false));
FleetControllerOptions options = new FleetControllerOptions("mycluster", configuredNodes);
options.slobrokConnectionSpecs = this.options.slobrokConnectionSpecs;
- this.options.maxInitProgressTime = 30000;
+ this.options.maxInitProgressTimeMs = 30000;
this.options.stableStateTimePeriod = 60000;
fleetController.updateOptions(options, 0);
for (int i = 0; i < 5*2; i++) {
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/SystemStateGeneratorTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandlerTest.java
index 35118933b42..f591e8efc06 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/SystemStateGeneratorTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeHandlerTest.java
@@ -6,7 +6,6 @@ import com.yahoo.vdslib.distribution.Distribution;
import com.yahoo.vdslib.state.*;
import com.yahoo.vespa.clustercontroller.core.hostinfo.HostInfo;
import com.yahoo.vespa.clustercontroller.core.listeners.NodeStateOrHostInfoChangeHandler;
-import com.yahoo.vespa.clustercontroller.core.listeners.SystemStateListener;
import com.yahoo.vespa.clustercontroller.core.mocks.TestEventLog;
import com.yahoo.vespa.clustercontroller.core.testutils.LogFormatter;
import junit.framework.TestCase;
@@ -16,33 +15,16 @@ import java.util.Set;
import java.util.TreeSet;
import java.util.logging.Logger;
-public class SystemStateGeneratorTest extends TestCase {
- private static final Logger log = Logger.getLogger(SystemStateGeneratorTest.class.getName());
- class Config {
+public class StateChangeHandlerTest extends TestCase {
+ private static final Logger log = Logger.getLogger(StateChangeHandlerTest.class.getName());
+ private class Config {
int nodeCount = 3;
int stableStateTime = 1000 * 60000;
int maxSlobrokDisconnectPeriod = 60000;
int maxPrematureCrashes = 3;
}
- class TestSystemStateListener implements SystemStateListener {
- LinkedList<ClusterState> states = new LinkedList<>();
- @Override
- public void handleNewSystemState(ClusterState state) {
- states.add(state);
- }
-
- public String toString() {
- StringBuilder sb = new StringBuilder();
- sb.append("States(");
- for (ClusterState state : states) sb.append('\n').append(state.toString());
- sb.append(")");
- return sb.toString();
- }
-
- }
-
- class TestNodeStateOrHostInfoChangeHandler implements NodeStateOrHostInfoChangeHandler {
+ private class TestNodeStateOrHostInfoChangeHandler implements NodeStateOrHostInfoChangeHandler {
LinkedList<String> events = new LinkedList<>();
@@ -75,9 +57,9 @@ public class SystemStateGeneratorTest extends TestCase {
private Set<ConfiguredNode> configuredNodes = new TreeSet<>();
private Config config;
private ContentCluster cluster;
- private SystemStateGenerator generator;
- private TestSystemStateListener systemStateListener;
+ private StateChangeHandler nodeStateChangeHandler;
private TestNodeStateOrHostInfoChangeHandler nodeStateUpdateListener;
+ private final ClusterStateGenerator.Params params = new ClusterStateGenerator.Params();
public void setUp() {
LogFormatter.initializeLogging();
@@ -88,20 +70,18 @@ public class SystemStateGeneratorTest extends TestCase {
this.config = config;
for (int i=0; i<config.nodeCount; ++i) configuredNodes.add(new ConfiguredNode(i, false));
cluster = new ContentCluster("testcluster", configuredNodes, distribution, 0, 0.0);
- generator = new SystemStateGenerator(clock, eventLog, null);
- generator.setNodes(cluster.clusterInfo());
- generator.setStableStateTimePeriod(config.stableStateTime);
- generator.setMaxPrematureCrashes(config.maxPrematureCrashes);
- generator.setMaxSlobrokDisconnectGracePeriod(config.maxSlobrokDisconnectPeriod);
- generator.setMinNodesUp(1, 1, 0, 0);
- systemStateListener = new TestSystemStateListener();
+ nodeStateChangeHandler = new StateChangeHandler(clock, eventLog, null);
+ params.minStorageNodesUp(1).minDistributorNodesUp(1)
+ .minRatioOfStorageNodesUp(0.0).minRatioOfDistributorNodesUp(0.0)
+ .maxPrematureCrashes(config.maxPrematureCrashes)
+ .transitionTimes(5000)
+ .cluster(cluster);
nodeStateUpdateListener = new TestNodeStateOrHostInfoChangeHandler();
}
- private void assertNewClusterStateReceived() {
- assertTrue(generator.notifyIfNewSystemState(cluster, systemStateListener));
- assertTrue(systemStateListener.toString(), systemStateListener.states.size() == 1);
- systemStateListener.states.clear();
+ private ClusterState currentClusterState() {
+ params.currentTimeInMilllis(clock.getCurrentTimeInMillis());
+ return ClusterStateGenerator.generatedStateFrom(params).getClusterState();
}
private void startWithStableStateClusterWithNodesUp() {
@@ -109,61 +89,55 @@ public class SystemStateGeneratorTest extends TestCase {
for (ConfiguredNode i : configuredNodes) {
NodeInfo nodeInfo = cluster.clusterInfo().setRpcAddress(new Node(type, i.index()), null);
nodeInfo.markRpcAddressLive();
- generator.handleNewReportedNodeState(nodeInfo, new NodeState(type, State.UP), null);
+ nodeStateChangeHandler.handleNewReportedNodeState(
+ currentClusterState(), nodeInfo, new NodeState(type, State.UP), null);
nodeInfo.setReportedState(new NodeState(type, State.UP), clock.getCurrentTimeInMillis());
}
}
- assertNewClusterStateReceived();
for (NodeType type : NodeType.getTypes()) {
for (ConfiguredNode i : configuredNodes) {
Node n = new Node(type, i.index());
- assertEquals(State.UP, generator.getClusterState().getNodeState(n).getState());
+ assertEquals(State.UP, currentClusterState().getNodeState(n).getState());
}
}
clock.advanceTime(config.stableStateTime);
}
private void markNodeOutOfSlobrok(Node node) {
+ final ClusterState stateBefore = currentClusterState();
log.info("Marking " + node + " out of slobrok");
cluster.getNodeInfo(node).markRpcAddressOutdated(clock);
- generator.handleMissingNode(cluster.getNodeInfo(node), nodeStateUpdateListener);
- assertTrue(nodeStateUpdateListener.toString(), nodeStateUpdateListener.events.isEmpty());
- nodeStateUpdateListener.events.clear();
+ nodeStateChangeHandler.handleMissingNode(stateBefore, cluster.getNodeInfo(node), nodeStateUpdateListener);
assertTrue(eventLog.toString(), eventLog.toString().contains("Node is no longer in slobrok"));
eventLog.clear();
}
private void markNodeBackIntoSlobrok(Node node, State state) {
+ final ClusterState stateBefore = currentClusterState();
log.info("Marking " + node + " back in slobrok");
cluster.getNodeInfo(node).markRpcAddressLive();
- generator.handleReturnedRpcAddress(cluster.getNodeInfo(node));
- assertEquals(0, nodeStateUpdateListener.events.size());
- assertEquals(0, systemStateListener.states.size());
- generator.handleNewReportedNodeState(cluster.getNodeInfo(node), new NodeState(node.getType(), state), nodeStateUpdateListener);
+ nodeStateChangeHandler.handleReturnedRpcAddress(cluster.getNodeInfo(node));
+ nodeStateChangeHandler.handleNewReportedNodeState(
+ stateBefore, cluster.getNodeInfo(node),
+ new NodeState(node.getType(), state), nodeStateUpdateListener);
cluster.getNodeInfo(node).setReportedState(new NodeState(node.getType(), state), clock.getCurrentTimeInMillis());
- assertEquals(0, nodeStateUpdateListener.events.size());
- assertEquals(0, systemStateListener.states.size());
}
private void verifyClusterStateChanged(Node node, State state) {
log.info("Verifying cluster state has been updated for " + node + " to " + state);
- assertTrue(generator.notifyIfNewSystemState(cluster, systemStateListener));
- assertEquals(1, systemStateListener.states.size());
- assertEquals(state, systemStateListener.states.get(0).getNodeState(node).getState());
- systemStateListener.states.clear();
- assertEquals(state, generator.getClusterState().getNodeState(node).getState());
+ assertTrue(nodeStateChangeHandler.stateMayHaveChanged());
+ assertEquals(state, currentClusterState().getNodeState(node).getState());
}
private void verifyNodeStateAfterTimerWatch(Node node, State state) {
log.info("Verifying state of node after timer watch.");
- generator.watchTimers(cluster, nodeStateUpdateListener);
+ nodeStateChangeHandler.watchTimers(cluster, currentClusterState(), nodeStateUpdateListener);
assertEquals(0, nodeStateUpdateListener.events.size());
verifyClusterStateChanged(node, state);
}
private void verifyPrematureCrashCountCleared(Node node) {
- assertTrue(generator.watchTimers(cluster, nodeStateUpdateListener));
- assertEquals(0, nodeStateUpdateListener.events.size());
+ assertTrue(nodeStateChangeHandler.watchTimers(cluster, currentClusterState(), nodeStateUpdateListener));
assertEquals(0, cluster.getNodeInfo(node).getPrematureCrashCount());
}
@@ -175,15 +149,15 @@ public class SystemStateGeneratorTest extends TestCase {
log.info("Iteration " + j);
assertEquals(0, cluster.getNodeInfo(node).getPrematureCrashCount());
assertEquals(State.UP, cluster.getNodeInfo(node).getWantedState().getState());
- assertEquals(State.UP, generator.getClusterState().getNodeState(node).getState());
+ assertEquals(State.UP, currentClusterState().getNodeState(node).getState());
for (int k=0; k<config.maxPrematureCrashes; ++k) {
log.info("Premature iteration " + k);
markNodeOutOfSlobrok(node);
log.info("Passing max disconnect time period. Watching timers");
clock.advanceTime(config.maxSlobrokDisconnectPeriod);
-
verifyNodeStateAfterTimerWatch(node, State.MAINTENANCE);
+
cluster.getNodeInfo(node).setReportedState(new NodeState(node.getType(), State.DOWN), clock.getCurrentTimeInMillis());
assertEquals(k, cluster.getNodeInfo(node).getPrematureCrashCount());
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java
index b94691bb880..c31f80d9b53 100644
--- a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateChangeTest.java
@@ -8,8 +8,10 @@ import com.yahoo.vespa.clustercontroller.core.database.DatabaseHandler;
import com.yahoo.vespa.clustercontroller.core.testutils.StateWaiter;
import com.yahoo.vespa.clustercontroller.utils.util.NoMetricReporter;
import org.junit.Before;
-import org.junit.Ignore;
import org.junit.Test;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.MatcherAssert.assertThat;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
@@ -43,7 +45,7 @@ public class StateChangeTest extends FleetControllerTest {
options.minStorageNodesUp, options.minRatioOfStorageNodesUp);
NodeStateGatherer stateGatherer = new NodeStateGatherer(timer, timer, eventLog);
DatabaseHandler database = new DatabaseHandler(timer, options.zooKeeperServerAddress, options.fleetControllerIndex, timer);
- SystemStateGenerator stateGenerator = new SystemStateGenerator(timer, eventLog, metricUpdater);
+ StateChangeHandler stateGenerator = new StateChangeHandler(timer, eventLog, metricUpdater);
SystemStateBroadcaster stateBroadcaster = new SystemStateBroadcaster(timer, timer);
MasterElectionHandler masterElectionHandler = new MasterElectionHandler(options.fleetControllerIndex, options.fleetControllerCount, timer, timer);
ctrl = new FleetController(timer, eventLog, cluster, stateGatherer, communicator, null, null, communicator, database, stateGenerator, stateBroadcaster, masterElectionHandler, metricUpdater, options);
@@ -109,8 +111,13 @@ public class StateChangeTest extends FleetControllerTest {
// Now, fleet controller should have generated a new cluster state.
ctrl.tick();
- assertEquals("version:6 distributor:10 .0.s:i .0.i:0.0 .1.s:i .1.i:0.0 .2.s:i .2.i:0.0 .3.s:i .3.i:0.0 .4.s:i .4.i:0.0 .5.s:i .5.i:0.0 .6.s:i .6.i:0.0 .7.s:i .7.i:0.0 .8.s:i .8.i:0.0 .9.s:i .9.i:0.0 storage:10 .0.s:i .0.i:0.9 .1.s:i .1.i:0.9 .2.s:i .2.i:0.9 .3.s:i .3.i:0.9 .4.s:i .4.i:0.9 .5.s:i .5.i:0.9 .6.s:i .6.i:0.9 .7.s:i .7.i:0.9 .8.s:i .8.i:0.9 .9.s:i .9.i:0.9",
- ctrl.getSystemState().toString());
+ // Regular init progress does not update the cluster state until the node is done initializing (or goes down,
+ // whichever comes first).
+ assertEquals("version:6 distributor:10 .0.s:i .0.i:0.0 .1.s:i .1.i:0.0 .2.s:i .2.i:0.0 .3.s:i .3.i:0.0 " +
+ ".4.s:i .4.i:0.0 .5.s:i .5.i:0.0 .6.s:i .6.i:0.0 .7.s:i .7.i:0.0 .8.s:i .8.i:0.0 " +
+ ".9.s:i .9.i:0.0 storage:10 .0.s:i .0.i:0.1 .1.s:i .1.i:0.1 .2.s:i .2.i:0.1 .3.s:i .3.i:0.1 " +
+ ".4.s:i .4.i:0.1 .5.s:i .5.i:0.1 .6.s:i .6.i:0.1 .7.s:i .7.i:0.1 .8.s:i .8.i:0.1 .9.s:i .9.i:0.1",
+ ctrl.consolidatedClusterState().toString());
timer.advanceTime(options.maxInitProgressTime / 20);
ctrl.tick();
@@ -131,24 +138,23 @@ public class StateChangeTest extends FleetControllerTest {
assertEquals("version:8 distributor:10 storage:10", ctrl.getSystemState().toString());
-
verifyNodeEvents(new Node(NodeType.DISTRIBUTOR, 0),
"Event: distributor.0: Now reporting state U\n" +
- "Event: distributor.0: Altered node state in cluster state from 'D' to 'U'.\n" +
+ "Event: distributor.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" +
"Event: distributor.0: Now reporting state I, i 0.00\n" +
- "Event: distributor.0: Altered node state in cluster state from 'U' to 'I, i 0.00'.\n" +
+ "Event: distributor.0: Altered node state in cluster state from 'U' to 'I, i 0.00'\n" +
"Event: distributor.0: Now reporting state U\n" +
- "Event: distributor.0: Altered node state in cluster state from 'I, i 0.00' to 'U'.\n");
+ "Event: distributor.0: Altered node state in cluster state from 'I, i 0.00' to 'U'\n");
verifyNodeEvents(new Node(NodeType.STORAGE, 0),
"Event: storage.0: Now reporting state U\n" +
- "Event: storage.0: Altered node state in cluster state from 'D' to 'U'.\n" +
+ "Event: storage.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" +
"Event: storage.0: Now reporting state I, i 0.00 (ls)\n" +
- "Event: storage.0: Altered node state in cluster state from 'U' to 'D: Listing buckets. Progress 0.0 %.'.\n" +
+ "Event: storage.0: Altered node state in cluster state from 'U' to 'D'\n" +
"Event: storage.0: Now reporting state I, i 0.100 (read)\n" +
- "Event: storage.0: Altered node state in cluster state from 'D: Listing buckets. Progress 0.0 %.' to 'I, i 0.100 (read)'.\n" +
+ "Event: storage.0: Altered node state in cluster state from 'D' to 'I, i 0.100 (read)'\n" +
"Event: storage.0: Now reporting state U\n" +
- "Event: storage.0: Altered node state in cluster state from 'I, i 0.900 (read)' to 'U'.\n");
+ "Event: storage.0: Altered node state in cluster state from 'I, i 0.100 (read)' to 'U'\n");
}
@Test
@@ -172,7 +178,6 @@ public class StateChangeTest extends FleetControllerTest {
assertEquals("version:4 distributor:10 .0.s:d storage:10", ctrl.getSystemState().toString());
timer.advanceTime(1000);
- long distStartTime = timer.getCurrentTimeInMillis() / 1000;
ctrl.tick();
@@ -210,23 +215,24 @@ public class StateChangeTest extends FleetControllerTest {
verifyNodeEvents(new Node(NodeType.DISTRIBUTOR, 0),
"Event: distributor.0: Now reporting state U\n" +
- "Event: distributor.0: Altered node state in cluster state from 'D' to 'U'.\n" +
+ "Event: distributor.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" +
"Event: distributor.0: Failed to get node state: D: Closed at other end\n" +
"Event: distributor.0: Stopped or possibly crashed after 0 ms, which is before stable state time period. Premature crash count is now 1.\n" +
- "Event: distributor.0: Altered node state in cluster state from 'U' to 'D: Closed at other end'.\n" +
+ "Event: distributor.0: Altered node state in cluster state from 'U' to 'D: Closed at other end'\n" +
"Event: distributor.0: Now reporting state U, t 12345678\n" +
- "Event: distributor.0: Altered node state in cluster state from 'D: Closed at other end' to 'U, t 12345678'.\n");
+ "Event: distributor.0: Altered node state in cluster state from 'D: Closed at other end' to 'U, t 12345678'\n" +
+ "Event: distributor.0: Altered node state in cluster state from 'U, t 12345678' to 'U'\n");
verifyNodeEvents(new Node(NodeType.STORAGE, 0),
"Event: storage.0: Now reporting state U\n" +
- "Event: storage.0: Altered node state in cluster state from 'D' to 'U'.\n" +
+ "Event: storage.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" +
"Event: storage.0: Failed to get node state: D: Closed at other end\n" +
"Event: storage.0: Stopped or possibly crashed after 1000 ms, which is before stable state time period. Premature crash count is now 1.\n" +
- "Event: storage.0: Altered node state in cluster state from 'U' to 'M: Closed at other end'.\n" +
+ "Event: storage.0: Altered node state in cluster state from 'U' to 'M: Closed at other end'\n" +
"Event: storage.0: 5001 milliseconds without contact. Marking node down.\n" +
- "Event: storage.0: Altered node state in cluster state from 'M: Closed at other end' to 'D: Closed at other end'.\n" +
+ "Event: storage.0: Altered node state in cluster state from 'M: Closed at other end' to 'D: Closed at other end'\n" +
"Event: storage.0: Now reporting state U, t 12345679\n" +
- "Event: storage.0: Altered node state in cluster state from 'D: Closed at other end' to 'U, t 12345679'.\n");
+ "Event: storage.0: Altered node state in cluster state from 'D: Closed at other end' to 'U, t 12345679'\n");
assertEquals(1, ctrl.getCluster().getNodeInfo(new Node(NodeType.DISTRIBUTOR, 0)).getPrematureCrashCount());
assertEquals(1, ctrl.getCluster().getNodeInfo(new Node(NodeType.STORAGE, 0)).getPrematureCrashCount());
@@ -239,7 +245,7 @@ public class StateChangeTest extends FleetControllerTest {
@Test
public void testNodeGoingDownAndUpNotifying() throws Exception {
- // Same test as above, but node manage to notify why it is going down first.
+ // Same test as above, but node manages to notify why it is going down first.
FleetControllerOptions options = new FleetControllerOptions("mycluster", createNodes(10));
options.nodeStateRequestTimeoutMS = 60 * 60 * 1000;
options.maxSlobrokDisconnectGracePeriod = 100000;
@@ -291,21 +297,21 @@ public class StateChangeTest extends FleetControllerTest {
verifyNodeEvents(new Node(NodeType.DISTRIBUTOR, 0),
"Event: distributor.0: Now reporting state U\n" +
- "Event: distributor.0: Altered node state in cluster state from 'D' to 'U'.\n" +
+ "Event: distributor.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" +
"Event: distributor.0: Failed to get node state: D: controlled shutdown\n" +
- "Event: distributor.0: Altered node state in cluster state from 'U' to 'D: controlled shutdown'.\n" +
+ "Event: distributor.0: Altered node state in cluster state from 'U' to 'D: controlled shutdown'\n" +
"Event: distributor.0: Now reporting state U\n" +
- "Event: distributor.0: Altered node state in cluster state from 'D: controlled shutdown' to 'U'.\n");
+ "Event: distributor.0: Altered node state in cluster state from 'D: controlled shutdown' to 'U'\n");
verifyNodeEvents(new Node(NodeType.STORAGE, 0),
"Event: storage.0: Now reporting state U\n" +
- "Event: storage.0: Altered node state in cluster state from 'D' to 'U'.\n" +
+ "Event: storage.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" +
"Event: storage.0: Failed to get node state: D: controlled shutdown\n" +
- "Event: storage.0: Altered node state in cluster state from 'U' to 'M: controlled shutdown'.\n" +
+ "Event: storage.0: Altered node state in cluster state from 'U' to 'M: controlled shutdown'\n" +
"Event: storage.0: 5001 milliseconds without contact. Marking node down.\n" +
- "Event: storage.0: Altered node state in cluster state from 'M: controlled shutdown' to 'D: controlled shutdown'.\n" +
+ "Event: storage.0: Altered node state in cluster state from 'M: controlled shutdown' to 'D: controlled shutdown'\n" +
"Event: storage.0: Now reporting state U\n" +
- "Event: storage.0: Altered node state in cluster state from 'D: controlled shutdown' to 'U'.\n");
+ "Event: storage.0: Altered node state in cluster state from 'D: controlled shutdown' to 'U'\n");
}
@@ -346,7 +352,7 @@ public class StateChangeTest extends FleetControllerTest {
verifyNodeEvents(new Node(NodeType.STORAGE, 0),
"Event: storage.0: Now reporting state U\n" +
- "Event: storage.0: Altered node state in cluster state from 'D' to 'U'.\n" +
+ "Event: storage.0: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" +
"Event: storage.0: Node is no longer in slobrok, but we still have a pending state request.\n");
}
@@ -393,15 +399,15 @@ public class StateChangeTest extends FleetControllerTest {
verifyNodeEvents(new Node(NodeType.STORAGE, 6),
"Event: storage.6: Now reporting state U\n" +
- "Event: storage.6: Altered node state in cluster state from 'D' to 'U'.\n" +
+ "Event: storage.6: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" +
"Event: storage.6: Failed to get node state: D: Connection error: Closed at other end\n" +
"Event: storage.6: Stopped or possibly crashed after 0 ms, which is before stable state time period. Premature crash count is now 1.\n" +
- "Event: storage.6: Altered node state in cluster state from 'U' to 'M: Connection error: Closed at other end'.\n" +
+ "Event: storage.6: Altered node state in cluster state from 'U' to 'M: Connection error: Closed at other end'\n" +
"Event: storage.6: Now reporting state I, i 0.00 (ls)\n" +
"Event: storage.6: Now reporting state I, i 0.600 (read)\n" +
- "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'I, i 0.600 (read)'.\n" +
+ "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'I, i 0.600 (read)'\n" +
"Event: storage.6: Now reporting state U\n" +
- "Event: storage.6: Altered node state in cluster state from 'I, i 0.600 (read)' to 'U'.\n");
+ "Event: storage.6: Altered node state in cluster state from 'I, i 0.600 (read)' to 'U'\n");
}
@Test
@@ -453,14 +459,14 @@ public class StateChangeTest extends FleetControllerTest {
verifyNodeEvents(new Node(NodeType.STORAGE, 6),
"Event: storage.6: Now reporting state U\n" +
- "Event: storage.6: Altered node state in cluster state from 'D' to 'R'.\n" +
+ "Event: storage.6: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'R'\n" +
"Event: storage.6: Failed to get node state: D: Connection error: Closed at other end\n" +
"Event: storage.6: Stopped or possibly crashed after 0 ms, which is before stable state time period. Premature crash count is now 1.\n" +
- "Event: storage.6: Altered node state in cluster state from 'R' to 'M: Connection error: Closed at other end'.\n" +
+ "Event: storage.6: Altered node state in cluster state from 'R' to 'M: Connection error: Closed at other end'\n" +
"Event: storage.6: Now reporting state I, i 0.00 (ls)\n" +
"Event: storage.6: Now reporting state I, i 0.600 (read)\n" +
"Event: storage.6: Now reporting state U\n" +
- "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'R: Connection error: Closed at other end'.\n");
+ "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'R'\n");
}
@Test
@@ -522,7 +528,7 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
- assertEquals("Listing buckets. Progress 0.1 %.", ctrl.getSystemState().getNodeState(new Node(NodeType.STORAGE, 6)).getDescription());
+ assertEquals("version:5 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
communicator.setNodeState(new Node(NodeType.STORAGE, 6), new NodeState(NodeType.STORAGE, State.INITIALIZING).setInitProgress(0.1), "");
@@ -542,16 +548,16 @@ public class StateChangeTest extends FleetControllerTest {
verifyNodeEvents(new Node(NodeType.STORAGE, 6),
"Event: storage.6: Now reporting state U\n" +
- "Event: storage.6: Altered node state in cluster state from 'D' to 'U'.\n" +
+ "Event: storage.6: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" +
"Event: storage.6: Failed to get node state: D: Connection error: Closed at other end\n" +
- "Event: storage.6: Altered node state in cluster state from 'U' to 'M: Connection error: Closed at other end'.\n" +
+ "Event: storage.6: Altered node state in cluster state from 'U' to 'M: Connection error: Closed at other end'\n" +
"Event: storage.6: 100000 milliseconds without contact. Marking node down.\n" +
- "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'D: Connection error: Closed at other end'.\n" +
+ "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'D: Connection error: Closed at other end'\n" +
"Event: storage.6: Now reporting state I, i 0.00100 (ls)\n" +
"Event: storage.6: Now reporting state I, i 0.100 (read)\n" +
- "Event: storage.6: Altered node state in cluster state from 'D: Listing buckets. Progress 0.1 %.' to 'I, i 0.100 (read)'.\n" +
+ "Event: storage.6: Altered node state in cluster state from 'D: Connection error: Closed at other end' to 'I, i 0.100 (read)'\n" +
"Event: storage.6: Now reporting state U\n" +
- "Event: storage.6: Altered node state in cluster state from 'I, i 0.100 (read)' to 'U'.\n");
+ "Event: storage.6: Altered node state in cluster state from 'I, i 0.100 (read)' to 'U'\n");
}
@Test
@@ -613,9 +619,6 @@ public class StateChangeTest extends FleetControllerTest {
// Still down since it seemingly crashed during last init.
assertEquals("version:7 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
- assertEquals("Down: 5001 ms without initialize progress. Assuming node has deadlocked.",
- ctrl.getSystemState().getNodeState(new Node(NodeType.STORAGE, 6)).toString());
-
ctrl.tick();
communicator.setNodeState(new Node(NodeType.STORAGE, 6), State.UP, "");
@@ -626,20 +629,20 @@ public class StateChangeTest extends FleetControllerTest {
verifyNodeEvents(new Node(NodeType.STORAGE, 6),
"Event: storage.6: Now reporting state U\n" +
- "Event: storage.6: Altered node state in cluster state from 'D' to 'U'.\n" +
+ "Event: storage.6: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" +
"Event: storage.6: Failed to get node state: D: Connection error: Closed at other end\n" +
- "Event: storage.6: Altered node state in cluster state from 'U' to 'M: Connection error: Closed at other end'.\n" +
+ "Event: storage.6: Altered node state in cluster state from 'U' to 'M: Connection error: Closed at other end'\n" +
"Event: storage.6: 1000000 milliseconds without contact. Marking node down.\n" +
- "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'D: Connection error: Closed at other end'.\n" +
+ "Event: storage.6: Altered node state in cluster state from 'M: Connection error: Closed at other end' to 'D: Connection error: Closed at other end'\n" +
"Event: storage.6: Now reporting state I, i 0.100 (read)\n" +
- "Event: storage.6: Altered node state in cluster state from 'D: Connection error: Closed at other end' to 'I, i 0.100 (read)'.\n" +
+ "Event: storage.6: Altered node state in cluster state from 'D: Connection error: Closed at other end' to 'I, i 0.100 (read)'\n" +
"Event: storage.6: 5001 milliseconds without initialize progress. Marking node down. Premature crash count is now 1.\n" +
- "Event: storage.6: Altered node state in cluster state from 'I, i 0.100 (read)' to 'D: 5001 ms without initialize progress. Assuming node has deadlocked.'.\n" +
+ "Event: storage.6: Altered node state in cluster state from 'I, i 0.100 (read)' to 'D'\n" +
"Event: storage.6: Failed to get node state: D: Connection error: Closed at other end\n" +
"Event: storage.6: Now reporting state I, i 0.00 (ls)\n" +
"Event: storage.6: Now reporting state I, i 0.100 (read)\n" +
"Event: storage.6: Now reporting state U\n" +
- "Event: storage.6: Altered node state in cluster state from 'D: 5001 ms without initialize progress. Assuming node has deadlocked.' to 'U'.\n");
+ "Event: storage.6: Altered node state in cluster state from 'D' to 'U'\n");
}
@@ -684,9 +687,6 @@ public class StateChangeTest extends FleetControllerTest {
ctrl.tick();
assertEquals("version:7 distributor:10 storage:10 .6.s:d", ctrl.getSystemState().toString());
-
- String desc = ctrl.getSystemState().getNodeState(new Node(NodeType.STORAGE, 6)).getDescription();
- assertEquals("Got reverse intialize progress. Assuming node have prematurely crashed", desc);
}
@Test
@@ -1132,4 +1132,70 @@ public class StateChangeTest extends FleetControllerTest {
}
}
+ @Test
+ public void consolidated_cluster_state_reflects_node_changes_when_cluster_is_down() throws Exception {
+ FleetControllerOptions options = new FleetControllerOptions("mycluster", createNodes(10));
+ options.maxTransitionTime.put(NodeType.STORAGE, 0);
+ options.minStorageNodesUp = 10;
+ options.minDistributorNodesUp = 10;
+ initialize(options);
+
+ ctrl.tick();
+ assertThat(ctrl.consolidatedClusterState().toString(), equalTo("version:3 distributor:10 storage:10"));
+
+ communicator.setNodeState(new Node(NodeType.STORAGE, 2), State.DOWN, "foo");
+ ctrl.tick();
+
+ assertThat(ctrl.consolidatedClusterState().toString(),
+ equalTo("version:4 cluster:d distributor:10 storage:10 .2.s:d"));
+
+ // After this point, any further node changes while the cluster is still down won't be published.
+ // This is because cluster state similarity checks are short-circuited if both are Down, as no other parts
+ // of the state matter. Despite this, REST API access and similar features need up-to-date information,
+ // and therefore need to get a state which represents the _current_ state rather than the published state.
+ // The consolidated state offers this by selectively generating the current state on-demand if the
+ // cluster is down.
+ communicator.setNodeState(new Node(NodeType.STORAGE, 5), State.DOWN, "bar");
+ ctrl.tick();
+
+ // NOTE: _same_ version, different node state content. Overall cluster down-state is still the same.
+ assertThat(ctrl.consolidatedClusterState().toString(),
+ equalTo("version:4 cluster:d distributor:10 storage:10 .2.s:d .5.s:d"));
+ }
+
+ // Related to the above test, watchTimer invocations must receive the _current_ state and not the
+ // published state. Failure to ensure this would cause events to be fired non-stop, as the effect
+ // of previous timer invocations (with subsequent state generation) would not be visible.
+ @Test
+ public void timer_events_during_cluster_down_observe_most_recent_node_changes() throws Exception {
+ FleetControllerOptions options = new FleetControllerOptions("mycluster", createNodes(10));
+ options.maxTransitionTime.put(NodeType.STORAGE, 1000);
+ options.minStorageNodesUp = 10;
+ options.minDistributorNodesUp = 10;
+ initialize(options);
+
+ ctrl.tick();
+ communicator.setNodeState(new Node(NodeType.STORAGE, 2), State.DOWN, "foo");
+ timer.advanceTime(500);
+ ctrl.tick();
+ communicator.setNodeState(new Node(NodeType.STORAGE, 3), State.DOWN, "foo");
+ ctrl.tick();
+ assertThat(ctrl.consolidatedClusterState().toString(), equalTo("version:4 cluster:d distributor:10 storage:10 .2.s:m .3.s:m"));
+
+ // Subsequent timer tick should _not_ trigger additional events. Providing published state
+ // only would result in "Marking node down" events for node 2 emitted per tick.
+ for (int i = 0; i < 3; ++i) {
+ timer.advanceTime(5000);
+ ctrl.tick();
+ }
+
+ verifyNodeEvents(new Node(NodeType.STORAGE, 2),
+ "Event: storage.2: Now reporting state U\n" +
+ "Event: storage.2: Altered node state in cluster state from 'D: Node not seen in slobrok.' to 'U'\n" +
+ "Event: storage.2: Failed to get node state: D: foo\n" +
+ "Event: storage.2: Stopped or possibly crashed after 500 ms, which is before stable state time period. Premature crash count is now 1.\n" +
+ "Event: storage.2: Altered node state in cluster state from 'U' to 'M: foo'\n" +
+ "Event: storage.2: 5000 milliseconds without contact. Marking node down.\n");
+ }
+
}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateVersionTrackerTest.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateVersionTrackerTest.java
new file mode 100644
index 00000000000..72f8c9fb8b7
--- /dev/null
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/StateVersionTrackerTest.java
@@ -0,0 +1,229 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core;
+
+import com.yahoo.vdslib.state.ClusterState;
+import com.yahoo.vdslib.state.Node;
+import com.yahoo.vdslib.state.NodeState;
+import com.yahoo.vdslib.state.NodeType;
+import com.yahoo.vdslib.state.State;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.Optional;
+
+import static org.hamcrest.CoreMatchers.hasItems;
+import static org.hamcrest.core.IsEqual.equalTo;
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.mock;
+
+public class StateVersionTrackerTest {
+
+ private static AnnotatedClusterState stateWithoutAnnotations(String stateStr) {
+ final ClusterState state = ClusterState.stateFromString(stateStr);
+ return new AnnotatedClusterState(state, Optional.empty(), AnnotatedClusterState.emptyNodeStateReasons());
+ }
+
+ private static StateVersionTracker createWithMockedMetrics() {
+ return new StateVersionTracker(mock(MetricUpdater.class));
+ }
+
+ private static void updateAndPromote(final StateVersionTracker versionTracker,
+ final AnnotatedClusterState state,
+ final long timeMs)
+ {
+ versionTracker.updateLatestCandidateState(state);
+ versionTracker.promoteCandidateToVersionedState(timeMs);
+ }
+
+ @Test
+ public void version_is_incremented_when_new_state_is_applied() {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+ versionTracker.setVersionRetrievedFromZooKeeper(100);
+ updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:2 storage:2"), 123);
+ assertThat(versionTracker.getCurrentVersion(), equalTo(101));
+ assertThat(versionTracker.getVersionedClusterState().toString(), equalTo("version:101 distributor:2 storage:2"));
+ }
+
+ @Test
+ public void version_is_1_upon_construction() {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+ assertThat(versionTracker.getCurrentVersion(), equalTo(1));
+ }
+
+ @Test
+ public void set_current_version_caps_lowest_version_to_1() {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+ versionTracker.setVersionRetrievedFromZooKeeper(0);
+ assertThat(versionTracker.getCurrentVersion(), equalTo(1));
+ }
+
+ @Test
+ public void new_version_from_zk_predicate_initially_false() {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+ assertThat(versionTracker.hasReceivedNewVersionFromZooKeeper(), is(false));
+ }
+
+ @Test
+ public void new_version_from_zk_predicate_true_after_setting_zk_version() {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+ versionTracker.setVersionRetrievedFromZooKeeper(5);
+ assertThat(versionTracker.hasReceivedNewVersionFromZooKeeper(), is(true));
+ }
+
+ @Test
+ public void new_version_from_zk_predicate_false_after_applying_higher_version() {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+ versionTracker.setVersionRetrievedFromZooKeeper(5);
+ updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:2 storage:2"), 123);
+ assertThat(versionTracker.hasReceivedNewVersionFromZooKeeper(), is(false));
+ }
+
+ @Test
+ public void exposed_states_are_empty_upon_construction() {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+ assertThat(versionTracker.getVersionedClusterState().toString(), equalTo(""));
+ assertThat(versionTracker.getAnnotatedVersionedClusterState().getClusterState().toString(), equalTo(""));
+ }
+
+ @Test
+ public void diff_from_initial_state_implies_changed_state() {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+ versionTracker.updateLatestCandidateState(stateWithoutAnnotations("cluster:d"));
+ assertTrue(versionTracker.candidateChangedEnoughFromCurrentToWarrantPublish());
+ }
+
+ private static boolean stateChangedBetween(String fromState, String toState) {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+ updateAndPromote(versionTracker, stateWithoutAnnotations(fromState), 123);
+ versionTracker.updateLatestCandidateState(stateWithoutAnnotations(toState));
+ return versionTracker.candidateChangedEnoughFromCurrentToWarrantPublish();
+ }
+
+ @Test
+ public void version_mismatch_not_counted_as_changed_state() {
+ assertFalse(stateChangedBetween("distributor:2 storage:2", "distributor:2 storage:2"));
+ }
+
+ @Test
+ public void different_distributor_node_count_implies_changed_state() {
+ assertTrue(stateChangedBetween("distributor:2 storage:2", "distributor:3 storage:2"));
+ assertTrue(stateChangedBetween("distributor:3 storage:2", "distributor:2 storage:2"));
+ }
+
+ @Test
+ public void different_storage_node_count_implies_changed_state() {
+ assertTrue(stateChangedBetween("distributor:2 storage:2", "distributor:2 storage:3"));
+ assertTrue(stateChangedBetween("distributor:2 storage:3", "distributor:2 storage:2"));
+ }
+
+ @Test
+ public void different_distributor_node_state_implies_changed_state() {
+ assertTrue(stateChangedBetween("distributor:2 storage:2", "distributor:2 .0.s:d storage:2"));
+ assertTrue(stateChangedBetween("distributor:2 .0.s:d storage:2", "distributor:2 storage:2"));
+ }
+
+ @Test
+ public void different_storage_node_state_implies_changed_state() {
+ assertTrue(stateChangedBetween("distributor:2 storage:2", "distributor:2 storage:2 .0.s:d"));
+ assertTrue(stateChangedBetween("distributor:2 storage:2 .0.s:d", "distributor:2 storage:2"));
+ }
+
+ @Test
+ public void lowest_observed_distribution_bit_is_initially_16() {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+ assertThat(versionTracker.getLowestObservedDistributionBits(), equalTo(16));
+ }
+
+ @Test
+ public void lowest_observed_distribution_bit_is_tracked_across_states() {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+ updateAndPromote(versionTracker, stateWithoutAnnotations("bits:15 distributor:2 storage:2"), 100);
+ assertThat(versionTracker.getLowestObservedDistributionBits(), equalTo(15));
+
+ updateAndPromote(versionTracker, stateWithoutAnnotations("bits:17 distributor:2 storage:2"), 200);
+ assertThat(versionTracker.getLowestObservedDistributionBits(), equalTo(15));
+
+ updateAndPromote(versionTracker, stateWithoutAnnotations("bits:14 distributor:2 storage:2"), 300);
+ assertThat(versionTracker.getLowestObservedDistributionBits(), equalTo(14));
+ }
+
+ // For similarity purposes, only the cluster-wide bits matter, not the individual node state
+ // min used bits. The former is derived from the latter, but the latter is not visible in the
+ // published state (but _is_ visible in the internal ClusterState structures).
+ @Test
+ public void per_node_min_bits_changes_are_not_considered_different() {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+ final AnnotatedClusterState stateWithMinBits = stateWithoutAnnotations("distributor:2 storage:2");
+ stateWithMinBits.getClusterState().setNodeState(
+ new Node(NodeType.STORAGE, 0),
+ new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(15));
+ updateAndPromote(versionTracker, stateWithMinBits, 123);
+ versionTracker.updateLatestCandidateState(stateWithoutAnnotations("distributor:2 storage:2"));
+ assertFalse(versionTracker.candidateChangedEnoughFromCurrentToWarrantPublish());
+ }
+
+ @Test
+ public void state_history_is_initially_empty() {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+ assertTrue(versionTracker.getClusterStateHistory().isEmpty());
+ }
+
+ private static ClusterStateHistoryEntry historyEntry(final String state, final long time) {
+ return new ClusterStateHistoryEntry(ClusterState.stateFromString(state), time);
+ }
+
+ @Test
+ public void applying_state_adds_to_cluster_state_history() {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+ updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:2 storage:2") ,100);
+ updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:3 storage:3"), 200);
+ updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:4 storage:4"), 300);
+
+ // Note: newest entry first
+ assertThat(versionTracker.getClusterStateHistory(),
+ equalTo(Arrays.asList(
+ historyEntry("version:4 distributor:4 storage:4", 300),
+ historyEntry("version:3 distributor:3 storage:3", 200),
+ historyEntry("version:2 distributor:2 storage:2", 100))));
+ }
+
+ @Test
+ public void old_states_pruned_when_state_history_limit_reached() {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+ versionTracker.setMaxHistoryEntryCount(2);
+
+ updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:2 storage:2") ,100);
+ updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:3 storage:3"), 200);
+ updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:4 storage:4"), 300);
+
+ assertThat(versionTracker.getClusterStateHistory(),
+ equalTo(Arrays.asList(
+ historyEntry("version:4 distributor:4 storage:4", 300),
+ historyEntry("version:3 distributor:3 storage:3", 200))));
+
+ updateAndPromote(versionTracker, stateWithoutAnnotations("distributor:5 storage:5"), 400);
+
+ assertThat(versionTracker.getClusterStateHistory(),
+ equalTo(Arrays.asList(
+ historyEntry("version:5 distributor:5 storage:5", 400),
+ historyEntry("version:4 distributor:4 storage:4", 300))));
+ }
+
+ @Test
+ public void can_get_latest_non_published_candidate_state() {
+ final StateVersionTracker versionTracker = createWithMockedMetrics();
+
+ AnnotatedClusterState candidate = stateWithoutAnnotations("distributor:2 storage:2");
+ versionTracker.updateLatestCandidateState(candidate);
+ assertThat(versionTracker.getLatestCandidateState(), equalTo(candidate));
+
+ candidate = stateWithoutAnnotations("distributor:3 storage:3");
+ versionTracker.updateLatestCandidateState(candidate);
+ assertThat(versionTracker.getLatestCandidateState(), equalTo(candidate));
+ }
+
+}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/ClusterEventWithDescription.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/ClusterEventWithDescription.java
new file mode 100644
index 00000000000..111a2c63144
--- /dev/null
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/ClusterEventWithDescription.java
@@ -0,0 +1,40 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core.matchers;
+
+import com.yahoo.vespa.clustercontroller.core.ClusterEvent;
+import com.yahoo.vespa.clustercontroller.core.NodeEvent;
+import org.hamcrest.Description;
+import org.hamcrest.Factory;
+import org.mockito.ArgumentMatcher;
+
+public class ClusterEventWithDescription extends ArgumentMatcher<ClusterEvent> {
+ private final String expected;
+
+ public ClusterEventWithDescription(String expected) {
+ this.expected = expected;
+ }
+
+ @Override
+ public boolean matches(Object o) {
+ if (!(o instanceof ClusterEvent)) {
+ return false;
+ }
+ return expected.equals(((ClusterEvent) o).getDescription());
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText(String.format("ClusterEvent with description '%s'", expected));
+ }
+
+ @Override
+ public void describeMismatch(Object item, Description description) {
+ ClusterEvent other = (ClusterEvent)item;
+ description.appendText(String.format("got description '%s'", other.getDescription()));
+ }
+
+ @Factory
+ public static ClusterEventWithDescription clusterEventWithDescription(String description) {
+ return new ClusterEventWithDescription(description);
+ }
+}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventForNode.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventForNode.java
new file mode 100644
index 00000000000..1f2372dea29
--- /dev/null
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventForNode.java
@@ -0,0 +1,37 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core.matchers;
+
+import com.yahoo.vdslib.state.Node;
+import com.yahoo.vespa.clustercontroller.core.NodeEvent;
+import org.hamcrest.Description;
+import org.hamcrest.Factory;
+import org.mockito.ArgumentMatcher;
+
+public class EventForNode extends ArgumentMatcher<NodeEvent> {
+ private final Node expected;
+
+ EventForNode(Node expected) {
+ this.expected = expected;
+ }
+
+ @Override
+ public boolean matches(Object o) {
+ return ((NodeEvent)o).getNode().getNode().equals(expected);
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText(String.format("NodeEvent for node %s", expected));
+ }
+
+ @Override
+ public void describeMismatch(Object item, Description description) {
+ NodeEvent other = (NodeEvent)item;
+ description.appendText(String.format("got node %s", other.getNode().getNode()));
+ }
+
+ @Factory
+ public static EventForNode eventForNode(Node expected) {
+ return new EventForNode(expected);
+ }
+}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTimeIs.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTimeIs.java
new file mode 100644
index 00000000000..c99505d28ee
--- /dev/null
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTimeIs.java
@@ -0,0 +1,40 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core.matchers;
+
+import com.yahoo.vespa.clustercontroller.core.Event;
+import org.hamcrest.Description;
+import org.hamcrest.Factory;
+import org.mockito.ArgumentMatcher;
+
+public class EventTimeIs extends ArgumentMatcher<Event> {
+ private final long expected;
+
+ public EventTimeIs(long expected) {
+ this.expected = expected;
+ }
+
+ @Override
+ public boolean matches(Object o) {
+ if (!(o instanceof Event)) {
+ return false;
+ }
+ return expected == ((Event)o).getTimeMs();
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText(String.format("Event with time %d", expected));
+ }
+
+ @Override
+ public void describeMismatch(Object item, Description description) {
+ Event other = (Event)item;
+ description.appendText(String.format("event time is %d", other.getTimeMs()));
+ }
+
+ @Factory
+ public static EventTimeIs eventTimeIs(long time) {
+ return new EventTimeIs(time);
+ }
+}
+
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTypeIs.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTypeIs.java
new file mode 100644
index 00000000000..5430bc5d8a3
--- /dev/null
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/EventTypeIs.java
@@ -0,0 +1,27 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core.matchers;
+
+import com.yahoo.vespa.clustercontroller.core.NodeEvent;
+import org.hamcrest.Factory;
+import org.mockito.ArgumentMatcher;
+
+public class EventTypeIs extends ArgumentMatcher<NodeEvent> {
+ private final NodeEvent.Type expected;
+
+ public EventTypeIs(NodeEvent.Type expected) {
+ this.expected = expected;
+ }
+
+ @Override
+ public boolean matches(Object o) {
+ if (!(o instanceof NodeEvent)) {
+ return false;
+ }
+ return expected.equals(((NodeEvent)o).getType());
+ }
+
+ @Factory
+ public static EventTypeIs eventTypeIs(NodeEvent.Type type) {
+ return new EventTypeIs(type);
+ }
+}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasStateReasonForNode.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasStateReasonForNode.java
new file mode 100644
index 00000000000..a147b9af466
--- /dev/null
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/HasStateReasonForNode.java
@@ -0,0 +1,49 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core.matchers;
+
+import com.yahoo.vdslib.state.Node;
+import com.yahoo.vespa.clustercontroller.core.NodeStateReason;
+import org.hamcrest.Description;
+import org.hamcrest.Factory;
+import org.mockito.ArgumentMatcher;
+
+import java.util.Map;
+
+public class HasStateReasonForNode extends ArgumentMatcher<Map<Node, NodeStateReason>> {
+ private final Node node;
+ private final NodeStateReason expected;
+
+ public HasStateReasonForNode(Node node, NodeStateReason expected) {
+ this.node = node;
+ this.expected = expected;
+ }
+
+ @Override
+ public boolean matches(Object o) {
+ if (o == null || !(o instanceof Map)) {
+ return false;
+ }
+ return expected == ((Map)o).get(node);
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText(String.format("has node state reason %s", expected.toString()));
+ }
+
+ @Override
+ public void describeMismatch(Object item, Description description) {
+ @SuppressWarnings("unchecked")
+ Map<Node, NodeStateReason> other = (Map<Node, NodeStateReason>)item;
+ if (other.containsKey(node)) {
+ description.appendText(String.format("has reason %s", other.get(node).toString()));
+ } else {
+ description.appendText("has no entry for node");
+ }
+ }
+
+ @Factory
+ public static HasStateReasonForNode hasStateReasonForNode(Node node, NodeStateReason reason) {
+ return new HasStateReasonForNode(node, reason);
+ }
+}
diff --git a/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventWithDescription.java b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventWithDescription.java
new file mode 100644
index 00000000000..5ac89030c23
--- /dev/null
+++ b/clustercontroller-core/src/test/java/com/yahoo/vespa/clustercontroller/core/matchers/NodeEventWithDescription.java
@@ -0,0 +1,39 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.clustercontroller.core.matchers;
+
+import com.yahoo.vespa.clustercontroller.core.NodeEvent;
+import org.hamcrest.Description;
+import org.hamcrest.Factory;
+import org.mockito.ArgumentMatcher;
+
+public class NodeEventWithDescription extends ArgumentMatcher<NodeEvent> {
+ private final String expected;
+
+ public NodeEventWithDescription(String expected) {
+ this.expected = expected;
+ }
+
+ @Override
+ public boolean matches(Object o) {
+ if (!(o instanceof NodeEvent)) {
+ return false;
+ }
+ return expected.equals(((NodeEvent) o).getDescription());
+ }
+
+ @Override
+ public void describeTo(Description description) {
+ description.appendText(String.format("NodeEvent with description '%s'", expected));
+ }
+
+ @Override
+ public void describeMismatch(Object item, Description description) {
+ NodeEvent other = (NodeEvent)item;
+ description.appendText(String.format("got description '%s'", other.getDescription()));
+ }
+
+ @Factory
+ public static NodeEventWithDescription nodeEventWithDescription(String description) {
+ return new NodeEventWithDescription(description);
+ }
+}
diff --git a/config-application-package/src/main/java/com/yahoo/config/application/IncludeProcessor.java b/config-application-package/src/main/java/com/yahoo/config/application/IncludeProcessor.java
index cd68d214d3d..4268e4f835e 100644
--- a/config-application-package/src/main/java/com/yahoo/config/application/IncludeProcessor.java
+++ b/config-application-package/src/main/java/com/yahoo/config/application/IncludeProcessor.java
@@ -41,7 +41,7 @@ class IncludeProcessor implements PreProcessor {
Element elem = (Element) list.item(0);
Element parent = (Element) elem.getParentNode();
String filename = elem.getAttribute("file");
- boolean required = elem.hasAttribute("required") ? Boolean.parseBoolean(elem.getAttribute("required")) : true;
+ boolean required = ! elem.hasAttribute("required") || Boolean.parseBoolean(elem.getAttribute("required"));
File file = new File(currentFolder, filename);
Document subFile = IncludeProcessor.parseIncludeFile(file, parent.getTagName(), required);
@@ -76,4 +76,5 @@ class IncludeProcessor implements PreProcessor {
w.append(endTag);
return XML.getDocument(new StringReader(w.toString()));
}
+
}
diff --git a/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java b/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java
index f3da285f524..32e9aec56cb 100644
--- a/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java
+++ b/config-application-package/src/main/java/com/yahoo/config/application/OverrideProcessor.java
@@ -20,6 +20,7 @@ import java.util.logging.Logger;
* @since 5.22
*/
class OverrideProcessor implements PreProcessor {
+
private static final Logger log = Logger.getLogger(OverrideProcessor.class.getName());
private final Environment environment;
@@ -140,6 +141,9 @@ class OverrideProcessor implements PreProcessor {
}
}
+ if (bestMatch > 1) // there was a region/environment specific overriode
+ doElementSpecificProcessingOnOverride(bestMatchElement);
+
// Remove elements not specific
for (Element child : children) {
if (child != bestMatchElement) {
@@ -148,6 +152,14 @@ class OverrideProcessor implements PreProcessor {
}
}
+ /** Called on each element which is selected by matching some override condition */
+ private void doElementSpecificProcessingOnOverride(Element element) {
+ // if node capacity is specified explicitly for some evn/region we should require that capacity
+ if ( element.getTagName().equals("nodes"))
+ if (element.getChildNodes().getLength() == 0) // specifies capacity, not a list of nodes
+ element.setAttribute("required", "true");
+ }
+
/**
* Retains all elements where at least one element is overridden. Removes non-overridden elements from map.
*/
diff --git a/config-application-package/src/main/java/com/yahoo/config/application/XmlPreProcessor.java b/config-application-package/src/main/java/com/yahoo/config/application/XmlPreProcessor.java
index 4e08e514504..b70a5054563 100644
--- a/config-application-package/src/main/java/com/yahoo/config/application/XmlPreProcessor.java
+++ b/config-application-package/src/main/java/com/yahoo/config/application/XmlPreProcessor.java
@@ -25,6 +25,7 @@ import java.util.List;
* @since 5.22
*/
public class XmlPreProcessor {
+
final static String deployNamespace = "xmlns:deploy";
final static String deployNamespaceUri = "vespa";
final static String preprocessNamespace = "xmlns:preprocess";
@@ -68,4 +69,5 @@ public class XmlPreProcessor {
chain.add(new PropertiesProcessor());
return chain;
}
+
}
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/ApplicationPackageXmlFilesValidator.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/ApplicationPackageXmlFilesValidator.java
index 393bd1c2de7..06ecede09a5 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/ApplicationPackageXmlFilesValidator.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/ApplicationPackageXmlFilesValidator.java
@@ -7,12 +7,10 @@ import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.provision.Version;
import com.yahoo.path.Path;
import com.yahoo.io.reader.NamedReader;
-import com.yahoo.log.LogLevel;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
-import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
@@ -24,7 +22,6 @@ import java.util.Optional;
public class ApplicationPackageXmlFilesValidator {
private final AppSubDirs appDirs;
- private final DeployLogger logger;
private final Optional<Version> vespaVersion;
private static final FilenameFilter xmlFilter = new FilenameFilter() {
@@ -34,31 +31,32 @@ public class ApplicationPackageXmlFilesValidator {
}
};
- public ApplicationPackageXmlFilesValidator(AppSubDirs appDirs, DeployLogger logger, Optional<Version> vespaVersion) {
+
+ public ApplicationPackageXmlFilesValidator(AppSubDirs appDirs, Optional<Version> vespaVersion) {
this.appDirs = appDirs;
- this.logger = logger;
this.vespaVersion = vespaVersion;
}
- public static ApplicationPackageXmlFilesValidator createDefaultXMLValidator(File appDir, DeployLogger logger, Optional<Version> vespaVersion) {
- return new ApplicationPackageXmlFilesValidator(new AppSubDirs(appDir), logger, vespaVersion);
+ // TODO: Remove when no version older than 6.33 is used
+ public ApplicationPackageXmlFilesValidator(AppSubDirs appDirs, DeployLogger logger, Optional<Version> vespaVersion) {
+ this.appDirs = appDirs;
+ this.vespaVersion = vespaVersion;
}
- public static ApplicationPackageXmlFilesValidator createTestXmlValidator(File appDir) {
- return new ApplicationPackageXmlFilesValidator(new AppSubDirs(appDir), new BaseDeployLogger(), Optional.<Version>empty());
+ public static ApplicationPackageXmlFilesValidator createDefaultXMLValidator(File appDir, Optional<Version> vespaVersion) {
+ return new ApplicationPackageXmlFilesValidator(new AppSubDirs(appDir), vespaVersion);
}
- // Verify that files a and b does not coexist.
- private void checkConflicts(String a, String b) throws IllegalArgumentException {
- if (appDirs.file(a).exists() && appDirs.file(b).exists())
- throw new IllegalArgumentException("Application package in " + appDirs.root() + " contains both " + a + " and " + b +
- ", please use just one of them");
+ public static ApplicationPackageXmlFilesValidator createTestXmlValidator(File appDir) {
+ return new ApplicationPackageXmlFilesValidator(new AppSubDirs(appDir), Optional.<Version>empty());
}
@SuppressWarnings("deprecation")
public void checkApplication() throws IOException {
validateHostsFile(SchemaValidator.hostsXmlSchemaName);
validateServicesFile(SchemaValidator.servicesXmlSchemaName);
+ // TODO: Disable temporarily, need to get out feature to support ignoring validation errors
+ //validateDeploymentFile(SchemaValidator.deploymentXmlSchemaName);
if (appDirs.searchdefinitions().exists()) {
if (FilesApplicationPackage.getSearchDefinitionFiles(appDirs.root()).isEmpty()) {
@@ -85,7 +83,6 @@ public class ApplicationPackageXmlFilesValidator {
if (appDirs.file(FilesApplicationPackage.HOSTS).exists()) {
validate(hostsXmlSchemaName, FilesApplicationPackage.HOSTS);
}
-
}
private void validateServicesFile(String servicesXmlSchemaName) throws IOException {
@@ -93,6 +90,12 @@ public class ApplicationPackageXmlFilesValidator {
validate(servicesXmlSchemaName, servicesFileName());
}
+ private void validateDeploymentFile(String deploymentXmlSchemaName) throws IOException {
+ if (appDirs.file(FilesApplicationPackage.DEPLOYMENT_FILE.getName()).exists()) {
+ validate(deploymentXmlSchemaName, FilesApplicationPackage.DEPLOYMENT_FILE.getName());
+ }
+ }
+
private void validate(String schemaName, String xmlFileName) throws IOException {
createSchemaValidator(schemaName, vespaVersion).validate(appDirs.file(xmlFileName));
}
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java
index 3b85e617f87..002c31d5910 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/FilesApplicationPackage.java
@@ -44,10 +44,8 @@ import java.net.URL;
import java.security.MessageDigest;
import java.util.*;
import java.util.jar.JarFile;
-import java.util.logging.Level;
import java.util.logging.Logger;
-import static com.yahoo.io.IOUtils.readAll;
import static com.yahoo.text.Lowercase.toLowerCase;
@@ -626,13 +624,27 @@ public class FilesApplicationPackage implements ApplicationPackage {
}
@Override
+ public void validateXML() throws IOException {
+ validateXML(Optional.empty());
+ }
+
+ // TODO: Remove when no version older than 6.33 is used
+ @Override
public void validateXML(DeployLogger logger) throws IOException {
- validateXML(logger, Optional.empty());
+ validateXML(Optional.empty());
+ }
+
+ @Override
+ public void validateXML(Optional<Version> vespaVersion) throws IOException {
+ ApplicationPackageXmlFilesValidator xmlFilesValidator = ApplicationPackageXmlFilesValidator.createDefaultXMLValidator(appDir, vespaVersion);
+ xmlFilesValidator.checkApplication();
+ ApplicationPackageXmlFilesValidator.checkIncludedDirs(this);
}
+ // TODO: Remove when no version older than 6.33 is used
@Override
public void validateXML(DeployLogger logger, Optional<Version> vespaVersion) throws IOException {
- ApplicationPackageXmlFilesValidator xmlFilesValidator = ApplicationPackageXmlFilesValidator.createDefaultXMLValidator(appDir, logger, vespaVersion);
+ ApplicationPackageXmlFilesValidator xmlFilesValidator = ApplicationPackageXmlFilesValidator.createDefaultXMLValidator(appDir, vespaVersion);
xmlFilesValidator.checkApplication();
ApplicationPackageXmlFilesValidator.checkIncludedDirs(this);
}
@@ -659,10 +671,10 @@ public class FilesApplicationPackage implements ApplicationPackage {
@Override
public ApplicationPackage preprocess(Zone zone, RuleConfigDeriver ignored, DeployLogger logger) throws IOException, TransformerException, ParserConfigurationException, SAXException {
IOUtils.recursiveDeleteDir(preprocessedDir);
- IOUtils.copyDirectory(appDir, preprocessedDir, -1, (dir, name) -> !name.equals(".preprocessed") &&
- !name.equals(SERVICES) &&
- !name.equals(HOSTS) &&
- !name.equals(CONFIG_DEFINITIONS_DIR));
+ IOUtils.copyDirectory(appDir, preprocessedDir, -1, (dir, name) -> ! name.equals(".preprocessed") &&
+ ! name.equals(SERVICES) &&
+ ! name.equals(HOSTS) &&
+ ! name.equals(CONFIG_DEFINITIONS_DIR));
preprocessXML(new File(preprocessedDir, SERVICES), getServicesFile(), zone);
if (getHostsFile().exists()) {
preprocessXML(new File(preprocessedDir, HOSTS), getHostsFile(), zone);
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java
index 334fda6e6eb..ce63ad23852 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/MockFileRegistry.java
@@ -35,8 +35,4 @@ public class MockFileRegistry implements FileRegistry {
return result;
}
- @Override
- public Set<String> allRelativePaths() {
- return Collections.emptySet();
- }
}
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java
index 67a24e0159b..ed4ccf51ff7 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/PreGeneratedFileRegistry.java
@@ -83,11 +83,6 @@ public class PreGeneratedFileRegistry implements FileRegistry {
}
@Override
- public Set<String> allRelativePaths() {
- return path2Hash.keySet();
- }
-
- @Override
public List<Entry> export() {
List<Entry> entries = new ArrayList<>();
for (Map.Entry<String, String> entry : path2Hash.entrySet()) {
diff --git a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java
index a28a17dc831..698fa8fdce7 100644
--- a/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java
+++ b/config-application-package/src/main/java/com/yahoo/config/model/application/provider/SchemaValidator.java
@@ -43,6 +43,7 @@ public class SchemaValidator {
public static final String schemaDirBase = System.getProperty("java.io.tmpdir", File.separator + "tmp" + File.separator + "vespa");
static final String servicesXmlSchemaName = "services.rnc";
static final String hostsXmlSchemaName = "hosts.rnc";
+ static final String deploymentXmlSchemaName = "deployment.rnc";
private final CustomErrorHandler errorHandler = new CustomErrorHandler();
private final ValidationDriver driver;
private DeployLogger deployLogger;
@@ -91,6 +92,15 @@ public class SchemaValidator {
return new SchemaValidator(hostsXmlSchemaName);
}
+ /**
+ * Create a validator for deployment.xml for tests
+ *
+ * @throws IOException if it is not possible to read schema files
+ */
+ public static SchemaValidator createTestValidatorDeployment() throws IOException {
+ return new SchemaValidator(deploymentXmlSchemaName);
+ }
+
private class CustomErrorHandler implements ErrorHandler {
volatile String fileName;
diff --git a/config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorTest.java b/config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorTest.java
new file mode 100644
index 00000000000..338302e9e57
--- /dev/null
+++ b/config-application-package/src/test/java/com/yahoo/config/application/HostedOverrideProcessorTest.java
@@ -0,0 +1,128 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.config.application;
+
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.RegionName;
+import org.custommonkey.xmlunit.XMLUnit;
+import org.junit.Test;
+import org.w3c.dom.Document;
+import org.xml.sax.SAXException;
+
+import javax.xml.parsers.ParserConfigurationException;
+import javax.xml.stream.XMLStreamException;
+import javax.xml.transform.TransformerException;
+import java.io.IOException;
+import java.io.StringReader;
+
+/**
+ * @author bratseth
+ */
+public class HostedOverrideProcessorTest {
+
+ static {
+ XMLUnit.setIgnoreWhitespace(true);
+ }
+
+ private static final String input =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" +
+ "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" +
+ " <container id=\"foo\" version=\"1.0\">" +
+ " <nodes count='1'/>" +
+ " <nodes deploy:environment=\"staging\" count='2'/>" +
+ " <nodes deploy:environment=\"prod\" count='3'/>" +
+ " <nodes deploy:environment=\"prod\" deploy:region=\"us-west\" count='4'/>" +
+ " </container>" +
+ "</services>";
+
+
+ @Test
+ public void testParsingDefault() throws IOException, SAXException, XMLStreamException, ParserConfigurationException, TransformerException {
+ String expected =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" +
+ "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" +
+ " <container id=\"foo\" version=\"1.0\">" +
+ " <nodes count='1'/>" +
+ " </container>" +
+ "</services>";
+ assertOverride(Environment.test, RegionName.defaultName(), expected);
+ }
+
+ @Test
+ public void testParsingEnvironmentAndRegion() throws ParserConfigurationException, IOException, SAXException, TransformerException {
+ String expected =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" +
+ "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" +
+ " <container id=\"foo\" version=\"1.0\">" +
+ " <nodes count='4' required='true'/>" +
+ " </container>" +
+ "</services>";
+ assertOverride(Environment.from("prod"), RegionName.from("us-west"), expected);
+ }
+
+ @Test
+ public void testParsingEnvironmentUnknownRegion() throws ParserConfigurationException, IOException, SAXException, TransformerException {
+ String expected =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" +
+ "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" +
+ " <container id=\"foo\" version=\"1.0\">" +
+ " <nodes count='3' required='true'/>" +
+ " </container>" +
+ "</services>";
+ assertOverride(Environment.valueOf("prod"), RegionName.from("us-east"), expected);
+ }
+
+ @Test
+ public void testParsingEnvironmentNoRegion() throws ParserConfigurationException, IOException, SAXException, TransformerException {
+ String expected =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" +
+ "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" +
+ " <container id=\"foo\" version=\"1.0\">" +
+ " <nodes count='3' required='true'/>" +
+ " </container>" +
+ "</services>";
+ assertOverride(Environment.from("prod"), RegionName.defaultName(), expected);
+ }
+
+ @Test
+ public void testParsingUnknownEnvironment() throws ParserConfigurationException, IOException, SAXException, TransformerException {
+ String expected =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" +
+ "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" +
+ " <container id=\"foo\" version=\"1.0\">" +
+ " <nodes count='1'/>" +
+ " </container>" +
+ "</services>";
+ assertOverride(Environment.from("dev"), RegionName.defaultName(), expected);
+ }
+
+ @Test
+ public void testParsingUnknownEnvironmentUnknownRegion() throws ParserConfigurationException, IOException, SAXException, TransformerException {
+ String expected =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" +
+ "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" +
+ " <container id=\"foo\" version=\"1.0\">" +
+ " <nodes count='1'/>" +
+ " </container>" +
+ "</services>";
+ assertOverride(Environment.from("test"), RegionName.from("us-west"), expected);
+ }
+
+ @Test
+ public void testParsingInheritEnvironment() throws ParserConfigurationException, IOException, SAXException, TransformerException {
+ String expected =
+ "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?>" +
+ "<services xmlns:deploy=\"vespa\" xmlns:preprocess=\"?\" version=\"1.0\">" +
+ " <container id=\"foo\" version=\"1.0\">" +
+ " <nodes count='2' required='true'/>" +
+ " </container>" +
+ "</services>";
+ assertOverride(Environment.from("staging"), RegionName.from("us-west"), expected);
+ }
+
+ private void assertOverride(Environment environment, RegionName region, String expected) throws TransformerException {
+ Document inputDoc = Xml.getDocument(new StringReader(input));
+ Document newDoc = new OverrideProcessor(environment, region).process(inputDoc);
+ TestBase.assertDocument(expected, newDoc);
+ }
+
+}
diff --git a/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java b/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java
index 07068e236cd..6d9bf2cbfa5 100644
--- a/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java
+++ b/config-application-package/src/test/java/com/yahoo/config/application/IncludeProcessorTest.java
@@ -17,6 +17,7 @@ import java.nio.file.NoSuchFileException;
* @since 5.22
*/
public class IncludeProcessorTest {
+
@Test
public void testInclude() throws IOException, SAXException, XMLStreamException, ParserConfigurationException, TransformerException {
File app = new File("src/test/resources/multienvapp");
@@ -68,7 +69,7 @@ public class IncludeProcessorTest {
"</jdisc></services>";
Document doc = (new IncludeProcessor(app)).process(docBuilder.parse(Xml.getServices(app)));
- System.out.println(Xml.documentAsString(doc));
+ // System.out.println(Xml.documentAsString(doc));
TestBase.assertDocument(expected, doc);
}
@@ -78,4 +79,5 @@ public class IncludeProcessorTest {
DocumentBuilder docBuilder = Xml.getPreprocessDocumentBuilder();
(new IncludeProcessor(app)).process(docBuilder.parse(Xml.getServices(app)));
}
+
}
diff --git a/config-application-package/src/test/java/com/yahoo/config/application/XmlPreprocessorTest.java b/config-application-package/src/test/java/com/yahoo/config/application/XmlPreprocessorTest.java
index eecbb1e7313..f6528e84368 100644
--- a/config-application-package/src/test/java/com/yahoo/config/application/XmlPreprocessorTest.java
+++ b/config-application-package/src/test/java/com/yahoo/config/application/XmlPreprocessorTest.java
@@ -83,7 +83,7 @@ public class XmlPreprocessorTest {
"</services>";
Document docUsWest = (new XmlPreProcessor(appDir, services, Environment.prod, RegionName.from("us-west"))).run();
- System.out.println(Xml.documentAsString(docUsWest));
+ // System.out.println(Xml.documentAsString(docUsWest));
TestBase.assertDocument(expectedUsWest, docUsWest);
String expectedUsEast = "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\"?><services xmlns:deploy=\"vespa\" xmlns:preprocess=\"properties\" version=\"1.0\">\n" +
@@ -162,4 +162,5 @@ public class XmlPreprocessorTest {
Document docDev = (new XmlPreProcessor(appDir, new StringReader(input), Environment.prod, RegionName.from("default")).run());
TestBase.assertDocument(expectedProd, docDev);
}
+
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java b/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java
index 2a4751af083..a5fb7a152d8 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/ApplicationPackage.java
@@ -217,14 +217,24 @@ public interface ApplicationPackage {
throw new UnsupportedOperationException("This application package cannot return file references");
}
+ // TODO: Remove when no version older than 6.33 is in use
default void validateXML(DeployLogger logger) throws IOException {
throw new UnsupportedOperationException("This application package cannot validate XML");
}
+ default void validateXML() throws IOException {
+ throw new UnsupportedOperationException("This application package cannot validate XML");
+ }
+
+ // TODO: Remove when no version older than 6.33 is in use
default void validateXML(DeployLogger logger, Optional<Version> vespaVersion) throws IOException {
throw new UnsupportedOperationException("This application package cannot validate XML");
}
+ default void validateXML(Optional<Version> vespaVersion) throws IOException {
+ throw new UnsupportedOperationException("This application package cannot validate XML");
+ }
+
default void writeMetaData() throws IOException {
throw new UnsupportedOperationException("This application package cannot write its metadata");
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/ComponentInfo.java b/config-model-api/src/main/java/com/yahoo/config/application/api/ComponentInfo.java
index 42732ddfc47..52aaa148c1b 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/ComponentInfo.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/ComponentInfo.java
@@ -8,6 +8,7 @@ package com.yahoo.config.application.api;
* @author tonytv
*/
public class ComponentInfo {
+
final String pathRelativeToAppDir;
public ComponentInfo(String pathRelativeToAppDir) {
@@ -18,4 +19,8 @@ public class ComponentInfo {
public String getPathRelativeToAppDir() {
return pathRelativeToAppDir;
}
+
+ @Override
+ public String toString() { return "component at '" + pathRelativeToAppDir + "'"; }
+
}
diff --git a/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java b/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java
index fe4aab72cb0..8b211d1d400 100644
--- a/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java
+++ b/config-model-api/src/main/java/com/yahoo/config/application/api/FileRegistry.java
@@ -18,8 +18,6 @@ public interface FileRegistry {
*/
String fileSourceHost();
- Set<String> allRelativePaths();
-
List<Entry> export();
class Entry {
diff --git a/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigServerSpec.java b/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigServerSpec.java
index 0a43f190675..499c43906e2 100644
--- a/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigServerSpec.java
+++ b/config-model-api/src/main/java/com/yahoo/config/model/api/ConfigServerSpec.java
@@ -7,8 +7,10 @@ package com.yahoo.config.model.api;
* @author tonytv
*/
public interface ConfigServerSpec {
- public String getHostName();
- public int getConfigServerPort();
- public int getHttpPort();
- public int getZooKeeperPort();
+
+ String getHostName();
+ int getConfigServerPort();
+ int getHttpPort();
+ int getZooKeeperPort();
+
}
diff --git a/config-model/src/main/Makefile b/config-model/src/main/Makefile
index 5e7024ccff9..c3dfd0c2e3e 100644
--- a/config-model/src/main/Makefile
+++ b/config-model/src/main/Makefile
@@ -3,7 +3,7 @@
trangjar=../../target/trang.jar
-all: resources/schema/services.rng resources/schema/hosts.rng resources/schema/container-include.rng resources/schema/services.xsd resources/schema/hosts.xsd resources/schema/container-include.xsd
+all: resources/schema/services.rng resources/schema/hosts.rng resources/schema/container-include.rng resources/schema/services.xsd resources/schema/hosts.xsd resources/schema/container-include.xsd resources/schema/deployment.xsd
resources/schema/services.rng: resources/schema/services.rnc resources/schema/common.rnc resources/schema/admin.rnc resources/schema/clients.rnc resources/schema/docproc.rnc resources/schema/routing.rnc resources/schema/clients-v2.rnc resources/schema/content.rnc resources/schema/genericmodule.rnc resources/schema/legacygenericcluster.rnc resources/schema/genericcluster.rnc resources/schema/legacygenericmodule.rnc resources/schema/containercluster.rnc
java -jar $(trangjar) -I rnc -O rng resources/schema/services.rnc resources/schema/services.rng
@@ -25,6 +25,12 @@ resources/schema/hosts.rng: resources/schema/hosts.rnc
resources/schema/hosts.xsd: resources/schema/hosts.rng
java -jar $(trangjar) -I rng -O xsd resources/schema/hosts.rng resources/schema/hosts.xsd
+resources/schema/deployment.rng: resources/schema/deployment.rnc
+ java -jar $(trangjar) -I rnc -O rng resources/schema/deployment.rnc resources/schema/deployment.rng
+
+resources/schema/deployment.xsd: resources/schema/deployment.rng
+ java -jar $(trangjar) -I rng -O xsd resources/schema/deployment.rng resources/schema/deployment.xsd
+
clean:
rm -f resources/schema/*.rng
rm -f resources/schema/*.xsd
diff --git a/config-model/src/main/java/com/yahoo/config/model/ApplicationConfigProducerRoot.java b/config-model/src/main/java/com/yahoo/config/model/ApplicationConfigProducerRoot.java
index e6df94c8855..0b0ac77443c 100644
--- a/config-model/src/main/java/com/yahoo/config/model/ApplicationConfigProducerRoot.java
+++ b/config-model/src/main/java/com/yahoo/config/model/ApplicationConfigProducerRoot.java
@@ -265,6 +265,7 @@ public class ApplicationConfigProducerRoot extends AbstractConfigProducer<Abstra
}
public FileDistributionConfigProducer getFileDistributionConfigProducer() {
+ if (admin == null) return null; // no admin if standalone
return admin.getFileDistributionConfigProducer();
}
diff --git a/config-model/src/main/java/com/yahoo/config/model/producer/AbstractConfigProducer.java b/config-model/src/main/java/com/yahoo/config/model/producer/AbstractConfigProducer.java
index 8e1097907f1..8778107cd8a 100644
--- a/config-model/src/main/java/com/yahoo/config/model/producer/AbstractConfigProducer.java
+++ b/config-model/src/main/java/com/yahoo/config/model/producer/AbstractConfigProducer.java
@@ -105,10 +105,10 @@ public abstract class AbstractConfigProducer<CHILD extends AbstractConfigProduce
child.setParent(this);
if (childrenBySubId.get(child.getSubId()) != null) {
throw new IllegalArgumentException("Multiple services/instances of the id '" + child.getSubId() + "' under the service/instance " +
- errorMsgClassName() + " '" + subId + "'. (This is commonly caused by service/node index " +
- "collisions in the config.)." +
- "\nExisting instance: " + childrenBySubId.get(child.getSubId()) +
- "\nAttempted to add: " + child);
+ errorMsgClassName() + " '" + subId + "'. (This is commonly caused by service/node index " +
+ "collisions in the config.)." +
+ "\nExisting instance: " + childrenBySubId.get(child.getSubId()) +
+ "\nAttempted to add: " + child);
}
childrenBySubId.put(child.getSubId(), child);
diff --git a/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java b/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java
index 5c9d03b434f..c4ac4d91001 100644
--- a/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java
+++ b/config-model/src/main/java/com/yahoo/config/model/provision/InMemoryProvisioner.java
@@ -101,8 +101,9 @@ public class InMemoryProvisioner implements HostProvisioner {
throw new IllegalArgumentException("Requested " + requestedCapacity.nodeCount() + " nodes in " +
groups + " groups, but the node count is not divisible into this number of groups");
- int capacity = failOnOutOfCapacity ? requestedCapacity.nodeCount() :
- Math.min(requestedCapacity.nodeCount(), freeNodes.get("default").size() + totalAllocatedTo(cluster));
+ int capacity = failOnOutOfCapacity || requestedCapacity.isRequired()
+ ? requestedCapacity.nodeCount()
+ : Math.min(requestedCapacity.nodeCount(), freeNodes.get("default").size() + totalAllocatedTo(cluster));
if (groups > capacity)
groups = capacity;
@@ -138,7 +139,7 @@ public class InMemoryProvisioner implements HostProvisioner {
int nextIndex = nextIndexInCluster.getOrDefault(new Pair<>(clusterGroup.type(), clusterGroup.id()), startIndex);
while (allocation.size() < nodesInGroup) {
- if (freeNodes.get(flavor).isEmpty()) throw new IllegalArgumentException("No nodes of flavor '" + flavor + "' available");
+ if (freeNodes.get(flavor).isEmpty()) throw new IllegalArgumentException("Insufficient capacity of flavor '" + flavor + "'");
Host newHost = freeNodes.removeValue(flavor, 0);
ClusterMembership membership = ClusterMembership.from(clusterGroup, nextIndex++);
allocation.add(new HostSpec(newHost.hostname(), newHost.aliases(), membership));
diff --git a/config-model/src/main/java/com/yahoo/config/model/provision/SingleNodeProvisioner.java b/config-model/src/main/java/com/yahoo/config/model/provision/SingleNodeProvisioner.java
index 1d5544873d9..fe8b3935fcf 100644
--- a/config-model/src/main/java/com/yahoo/config/model/provision/SingleNodeProvisioner.java
+++ b/config-model/src/main/java/com/yahoo/config/model/provision/SingleNodeProvisioner.java
@@ -27,10 +27,11 @@ public class SingleNodeProvisioner implements HostProvisioner {
public SingleNodeProvisioner() {
try {
host = new Host(HostSystem.lookupCanonicalHostname(HostName.getLocalhost()));
- } catch (UnknownHostException e) {
+ this.hostSpec = new HostSpec(host.hostname(), host.aliases());
+ }
+ catch (UnknownHostException e) {
throw new RuntimeException(e);
}
- this.hostSpec = new HostSpec(host.hostname(), host.aliases());
}
@Override
diff --git a/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java b/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java
index 731410c9bf3..c30c62b44bc 100644
--- a/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java
+++ b/config-model/src/main/java/com/yahoo/config/model/test/MockApplicationPackage.java
@@ -232,6 +232,16 @@ public class MockApplicationPackage implements ApplicationPackage {
" </host>" +
"</hosts>";
+
+ @Override
+ public void validateXML() throws IOException {
+ if (failOnValidateXml) {
+ throw new IllegalArgumentException("Error in application package");
+ } else {
+ throw new UnsupportedOperationException("This application package cannot validate XML");
+ }
+ }
+
@Override
public void validateXML(DeployLogger logger) throws IOException {
if (failOnValidateXml) {
diff --git a/config-model/src/main/java/com/yahoo/config/model/test/MockRoot.java b/config-model/src/main/java/com/yahoo/config/model/test/MockRoot.java
index fa84cf1c7eb..314060e7543 100644
--- a/config-model/src/main/java/com/yahoo/config/model/test/MockRoot.java
+++ b/config-model/src/main/java/com/yahoo/config/model/test/MockRoot.java
@@ -35,7 +35,9 @@ import java.util.Set;
*
* @author gjoranv
*/
+// TODO: mockRoot instances can probably be replaced by VespaModel.createIncomplete
public class MockRoot extends AbstractConfigProducerRoot {
+
private static final long serialVersionUID = 1L;
public static final String MOCKHOST = "mockhost";
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/Search.java b/config-model/src/main/java/com/yahoo/searchdefinition/Search.java
index 2ab634801c2..9032f913d0b 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/Search.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/Search.java
@@ -51,6 +51,7 @@ public class Search implements Serializable {
private boolean documentsOnly = false;
// The stemming setting of this search definition. Default is SHORTEST.
+ // TODO: Change to Stemming.BEST on Vespa 7
private Stemming stemming = Stemming.SHORTEST;
// Documents contained in this definition.
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java b/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java
index e98ee662b3a..0d8d21400aa 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/derived/IndexInfo.java
@@ -420,7 +420,7 @@ public class IndexInfo extends Derived implements IndexInfoConfig.Producer {
if (active != null) {
return active;
}
- // assume default
+ // assume default: TODO: Change to Stemming.BEST on Vespa 7
return Stemming.SHORTEST;
}
diff --git a/config-model/src/main/java/com/yahoo/searchdefinition/document/Stemming.java b/config-model/src/main/java/com/yahoo/searchdefinition/document/Stemming.java
index f471201f55e..5b145051de5 100644
--- a/config-model/src/main/java/com/yahoo/searchdefinition/document/Stemming.java
+++ b/config-model/src/main/java/com/yahoo/searchdefinition/document/Stemming.java
@@ -17,13 +17,17 @@ public enum Stemming {
/** No stemming */
NONE("none"),
- /** Stem as much as possible */
+ /** @deprecated incorrectly don't stem at all */
+ @Deprecated
ALL("all"),
/** select shortest possible stem */
SHORTEST("shortest"),
- /** index (and query?) multiple stems */
+ /** select the "best" stem alternative */
+ BEST("best"),
+
+ /** index multiple stems */
MULTIPLE("multiple");
private static Logger log=Logger.getLogger(Stemming.class.getName());
@@ -36,6 +40,7 @@ public enum Stemming {
*
* @throws IllegalArgumentException if there is no stemming type with the given name
*/
+ @SuppressWarnings("deprecation")
public static Stemming get(String stemmingName) {
try {
Stemming stemming = Stemming.valueOf(stemmingName.toUpperCase());
@@ -49,7 +54,7 @@ public enum Stemming {
}
}
- private Stemming(String name) {
+ Stemming(String name) {
this.name = name;
}
@@ -59,14 +64,16 @@ public enum Stemming {
return "stemming " + name;
}
+ @SuppressWarnings("deprecation")
public StemMode toStemMode() {
- if (this == Stemming.SHORTEST) {
- return StemMode.SHORTEST;
- }
- if (this == Stemming.MULTIPLE) {
- return StemMode.ALL;
+ switch(this) {
+ case SHORTEST: return StemMode.SHORTEST;
+ case MULTIPLE: return StemMode.ALL;
+ case BEST : return StemMode.BEST;
+ case NONE: return StemMode.NONE;
+ case ALL: return StemMode.SHORTEST; // Intentional; preserve historic behavior
+ default: throw new IllegalStateException("Inconvertible stem mode " + this);
}
- return StemMode.NONE;
}
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/Client.java b/config-model/src/main/java/com/yahoo/vespa/model/Client.java
index 15685f5f669..2a2498cc310 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/Client.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/Client.java
@@ -8,7 +8,7 @@ import com.yahoo.config.model.producer.AbstractConfigProducer;
* This is a placeholder config producer that makes global configuration available through a single identifier. This
* is added directly to the {@link ApplicationConfigProducerRoot} producer, and so can be accessed by the simple "client" identifier.
*
- * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen</a>
+ * @author Simon Thoresen
*/
public class Client extends AbstractConfigProducer {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/ConfigProducer.java b/config-model/src/main/java/com/yahoo/vespa/model/ConfigProducer.java
index 852e4e73331..aaeedf10bc8 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/ConfigProducer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/ConfigProducer.java
@@ -19,44 +19,38 @@ import com.yahoo.config.model.producer.UserConfigRepo;
*/
public interface ConfigProducer extends com.yahoo.config.ConfigInstance.Producer {
- /**
- * @return the configId of this ConfigProducer.
- */
- public String getConfigId();
+ /** Returns the configId of this ConfigProducer. */
+ String getConfigId();
- /**
- * @return The one and only HostSystem of the root node
- */
- public HostSystem getHostSystem();
+ /** Returns the one and only HostSystem of the root node */
+ HostSystem getHostSystem();
/** Returns the user configs of this */
- public UserConfigRepo getUserConfigs();
+ UserConfigRepo getUserConfigs();
- /**
- * @return this ConfigProducer's children (only 1st level)
- */
- public Map<String,? extends ConfigProducer> getChildren();
+ /** Returns this ConfigProducer's children (only 1st level) */
+ Map<String,? extends ConfigProducer> getChildren();
- /**
- * @return a List of all Services that are descendants to this ConfigProducer
- */
- public List<Service> getDescendantServices();
+ /** Returns a List of all Services that are descendants to this ConfigProducer */
+ List<Service> getDescendantServices();
/**
* Writes files that need to be written. The files will usually
* only be written when the Vespa model is generated through the
* deploy-application script.
- * gv: This is primarily intended for debugging.
+ * This is primarily intended for debugging.
+ *
* @param directory directory to write files to
* @throws java.io.IOException if writing fails
*/
- public void writeFiles(File directory) throws IOException;
+ void writeFiles(File directory) throws IOException;
/**
* Dump the three of config producers to the specified stream.
+ *
* @param out The stream to print to, e.g. System.out
*/
- public void dump(PrintStream out);
+ void dump(PrintStream out);
/**
* Build config from this and all parent ConfigProducers,
@@ -74,11 +68,12 @@ public interface ConfigProducer extends com.yahoo.config.ConfigInstance.Producer
* @param builder The ConfigBuilder to add user config overrides.
* @return true if overrides were added, false if not.
*/
- public boolean addUserConfig(ConfigInstance.Builder builder);
+ boolean addUserConfig(ConfigInstance.Builder builder);
/**
* check constraints depending on the state of the vespamodel graph.
* When overriding, you must invoke super.
*/
- public void validate() throws Exception;
+ void validate() throws Exception;
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
index d3e922c69dc..2d825e3332d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/HostSystem.java
@@ -5,12 +5,21 @@ import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.model.api.HostProvisioner;
import com.yahoo.config.model.producer.AbstractConfigProducer;
import com.yahoo.config.model.test.MockRoot;
-import com.yahoo.config.provision.*;
+import com.yahoo.config.provision.Capacity;
+import com.yahoo.config.provision.ClusterMembership;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.HostSpec;
+import com.yahoo.config.provision.ProvisionLogger;
import com.yahoo.net.HostName;
import java.net.InetAddress;
import java.net.UnknownHostException;
-import java.util.*;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
import java.util.logging.Level;
import java.util.stream.Collectors;
@@ -73,6 +82,7 @@ public class HostSystem extends AbstractConfigProducer<Host> {
* @return The canonical hostname, or null if unable to resolve.
* @throws UnknownHostException if the hostname cannot be resolved
*/
+ // public - This is used by amenders outside this repo
public static String lookupCanonicalHostname(String hostname) throws UnknownHostException {
return java.net.InetAddress.getByName(hostname).getCanonicalHostName();
}
@@ -87,7 +97,7 @@ public class HostSystem extends AbstractConfigProducer<Host> {
if (ipAddresses.containsKey(hostname)) return ipAddresses.get(hostname);
String ipAddress;
- if (hostname.startsWith(MockRoot.MOCKHOST)) {
+ if (hostname.startsWith(MockRoot.MOCKHOST)) { // TODO: Remove
ipAddress = "0.0.0.0";
} else {
try {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/PlainFormatter.java b/config-model/src/main/java/com/yahoo/vespa/model/PlainFormatter.java
deleted file mode 100644
index d424f4fa31b..00000000000
--- a/config-model/src/main/java/com/yahoo/vespa/model/PlainFormatter.java
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.model;
-
-import java.util.logging.Formatter;
-import java.util.logging.LogRecord;
-
-/**
- * A log formatter that returns a plain log message only with level, not
- * including timestamp and method (as java.util.logging.SimpleFormatter).
- * See bug #1789867.
- *
- * @author gjoranv
- */
-public class PlainFormatter extends Formatter {
-
- public PlainFormatter() {
- super();
- }
-
- public String format(LogRecord record) {
- StringBuffer sb = new StringBuffer();
-
- sb.append(record.getLevel().getName()).append(": ");
- sb.append(formatMessage(record)).append("\n");
-
- return sb.toString();
- }
-}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/PortsMeta.java b/config-model/src/main/java/com/yahoo/vespa/model/PortsMeta.java
index ea2151f9976..a0b3cc7294b 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/PortsMeta.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/PortsMeta.java
@@ -7,11 +7,12 @@ import java.util.LinkedList;
import java.util.List;
/**
- * Track metainformation about the ports of a service.
+ * Track meta information about the ports of a service.
*
* @author Vidar Larsen
*/
public class PortsMeta implements Serializable {
+
/** A list of all ports. The list elements are lists of strings. */
private List<LinkedList<String>> ports;
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java
index 9a23be1f5c5..bdba3549033 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModel.java
@@ -100,6 +100,8 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
/** The validation overrides of this. This is never null. */
private final ValidationOverrides validationOverrides;
+
+ private final FileDistributor fileDistributor;
/** Creates a Vespa Model from internal model types only */
public VespaModel(ApplicationPackage app) throws IOException, SAXException {
@@ -130,23 +132,38 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
* @param deployState the global deploy state to use for this model.
*/
public VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState) throws IOException, SAXException {
+ this(configModelRegistry, deployState, true, null);
+ }
+
+ private VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState, boolean complete, FileDistributor fileDistributor) throws IOException, SAXException {
super("vespamodel");
this.deployState = deployState;
this.validationOverrides = deployState.validationOverrides();
configModelRegistry = new VespaConfigModelRegistry(configModelRegistry);
VespaModelBuilder builder = new VespaDomBuilder();
root = builder.getRoot(VespaModel.ROOT_CONFIGID, deployState, this);
- configModelRepo.readConfigModels(deployState, builder, root, configModelRegistry);
- addServiceClusters(deployState.getApplicationPackage(), builder);
- setupRouting();
- log.log(LogLevel.DEBUG, "hostsystem=" + getHostSystem());
- this.info = Optional.of(createProvisionInfo());
- getAdmin().addPerHostServices(getHostSystem().getHosts(), deployState.getProperties());
- freezeModelTopology();
- root.prepare(configModelRepo);
- configModelRepo.prepareConfigModels();
- validateWrapExceptions();
- this.deployState = null;
+ if (complete) { // create a a completed, frozen model
+ configModelRepo.readConfigModels(deployState, builder, root, configModelRegistry);
+ addServiceClusters(deployState.getApplicationPackage(), builder);
+ this.info = Optional.of(createProvisionInfo()); // must happen after the two lines above
+ setupRouting();
+ this.fileDistributor = root.getFileDistributionConfigProducer().getFileDistributor();
+ getAdmin().addPerHostServices(getHostSystem().getHosts(), deployState.getProperties());
+ freezeModelTopology();
+ root.prepare(configModelRepo);
+ configModelRepo.prepareConfigModels();
+ validateWrapExceptions();
+ this.deployState = null;
+ }
+ else { // create a model with no services instantiated and the given file distributor
+ this.info = Optional.of(createProvisionInfo());
+ this.fileDistributor = fileDistributor;
+ }
+ }
+
+ /** Creates a mutable model with no services instantiated */
+ public static VespaModel createIncomplete(DeployState deployState) throws IOException, SAXException {
+ return new VespaModel(new NullConfigModelRegistry(), deployState, false, new FileDistributor(deployState.getFileRegistry()));
}
private ProvisionInfo createProvisionInfo() {
@@ -192,7 +209,8 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
}
public FileDistributor getFileDistributor() {
- return root.getFileDistributionConfigProducer().getFileDistributor();
+ // return root.getFileDistributionConfigProducer().getFileDistributor();
+ return fileDistributor;
}
/** Returns this models Vespa instance */
@@ -437,9 +455,8 @@ public final class VespaModel extends AbstractConfigProducerRoot implements Seri
@Override
public DeployState getDeployState() {
- if (deployState == null) {
+ if (deployState == null)
throw new IllegalStateException("Cannot call getDeployState() once model has been built");
- }
return deployState;
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
index 9b234435ce2..adbd4d7bae1 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/VespaModelFactory.java
@@ -4,7 +4,6 @@ package com.yahoo.vespa.model;
import com.google.inject.Inject;
import com.yahoo.component.provider.ComponentRegistry;
import com.yahoo.config.application.api.ApplicationPackage;
-import com.yahoo.config.application.api.DeployLogger;
import com.yahoo.config.model.ConfigModelRegistry;
import com.yahoo.config.model.MapConfigModelRegistry;
import com.yahoo.config.model.NullConfigModelRegistry;
@@ -89,7 +88,6 @@ public class VespaModelFactory implements ModelFactory {
if (modelContext.appDir().isPresent()) {
ApplicationPackageXmlFilesValidator validator =
ApplicationPackageXmlFilesValidator.createDefaultXMLValidator(modelContext.appDir().get(),
- modelContext.deployLogger(),
modelContext.vespaVersion());
try {
validator.checkApplication();
@@ -101,7 +99,7 @@ public class VespaModelFactory implements ModelFactory {
}
} else {
- validateXML(modelContext.applicationPackage(), modelContext.deployLogger(), ignoreValidationErrors);
+ validateXML(modelContext.applicationPackage(), ignoreValidationErrors);
}
DeployState deployState = createDeployState(modelContext);
VespaModel model = buildModel(deployState);
@@ -173,9 +171,9 @@ public class VespaModelFactory implements ModelFactory {
return modelContext.properties().hostedVespa() && id.isHostedVespaRoutingApplication();
}
- private void validateXML(ApplicationPackage applicationPackage, DeployLogger deployLogger, boolean ignoreValidationErrors) {
+ private void validateXML(ApplicationPackage applicationPackage, boolean ignoreValidationErrors) {
try {
- applicationPackage.validateXML(deployLogger);
+ applicationPackage.validateXML();
} catch (IllegalArgumentException e) {
rethrowUnlessIgnoreErrors(e, ignoreValidationErrors);
} catch (Exception e) {
@@ -185,7 +183,7 @@ public class VespaModelFactory implements ModelFactory {
private List<ConfigChangeAction> validateModel(VespaModel model, DeployState deployState, boolean ignoreValidationErrors) {
try {
- deployState.getApplicationPackage().validateXML(deployState.getDeployLogger());
+ deployState.getApplicationPackage().validateXML();
return Validation.validate(model, ignoreValidationErrors, deployState);
} catch (IllegalArgumentException e) {
rethrowUnlessIgnoreErrors(e, ignoreValidationErrors);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java
index 38a1e59433f..67281b7816d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/Admin.java
@@ -34,7 +34,7 @@ public class Admin extends AbstractConfigProducer implements Serializable {
private static final long serialVersionUID = 1L;
private final Yamas yamas;
- private final Map<String,MetricsConsumer> metricsConsumers;
+ private final Map<String, MetricsConsumer> metricsConsumers;
private final List<Configserver> configservers = new ArrayList<>();
private final List<Slobrok> slobroks = new ArrayList<>();
@@ -200,7 +200,7 @@ public class Admin extends AbstractConfigProducer implements Serializable {
HostResource deployHost = getHostSystem().getHostByHostname(fileDistributor.fileSourceHost());
if (deployHostIsMissing(deployHost)) {
throw new RuntimeException("Could not find host in the application's host system: '" +
- fileDistributor.fileSourceHost() + "'. Hostsystem=" + getHostSystem());
+ fileDistributor.fileSourceHost() + "'. Hostsystem=" + getHostSystem());
}
FileDistributorService fds = new FileDistributorService(fileDistribution, host.getHost().getHostName(),
@@ -245,4 +245,5 @@ public class Admin extends AbstractConfigProducer implements Serializable {
public boolean multitenant() {
return multitenant;
}
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/admin/Configserver.java b/config-model/src/main/java/com/yahoo/vespa/model/admin/Configserver.java
index 11508ba91ed..47332b064da 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/admin/Configserver.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/admin/Configserver.java
@@ -96,10 +96,12 @@ public class Configserver extends AbstractService {
// TODO: Remove this implementation when we are on Hosted Vespa.
public static class Spec implements ConfigServerSpec {
+
private final String hostName;
private final int configServerPort;
private final int httpPort;
private final int zooKeeperPort;
+
public String getHostName() {
return hostName;
}
@@ -142,4 +144,5 @@ public class Configserver extends AbstractService {
this.zooKeeperPort = zooKeeperPort;
}
}
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/VespaModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/VespaModelBuilder.java
index 75e9caefbd5..bcf523e1c99 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/VespaModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/VespaModelBuilder.java
@@ -27,4 +27,5 @@ public abstract class VespaModelBuilder {
* @param configModelRepo a {@link com.yahoo.config.model.ConfigModelRepo instance}
*/
public abstract void postProc(AbstractConfigProducer producerRoot, ConfigModelRepo configModelRepo);
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomClientsBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomClientsBuilder.java
index 876017e16bc..f1829a1d718 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomClientsBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomClientsBuilder.java
@@ -35,4 +35,5 @@ public class DomClientsBuilder extends LegacyConfigModelBuilder<Clients> {
throw new IllegalArgumentException("Version '" + version + "' of 'clients' not supported.");
}
}
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomV20ClientsBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomV20ClientsBuilder.java
index cea325b785f..b4070c67ae1 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomV20ClientsBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/DomV20ClientsBuilder.java
@@ -1,40 +1,17 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.model.builder.xml.dom;
-import com.yahoo.component.ComponentId;
-import com.yahoo.component.ComponentSpecification;
-import com.yahoo.component.chain.Phase;
-import com.yahoo.component.chain.dependencies.Dependencies;
-import com.yahoo.component.chain.model.ChainSpecification;
-import com.yahoo.component.chain.model.ChainedComponentModel;
-import com.yahoo.config.model.ConfigModelUtils;
import com.yahoo.vespa.config.content.spooler.SpoolerConfig;
import com.yahoo.config.model.producer.AbstractConfigProducer;
-import com.yahoo.container.bundle.BundleInstantiationSpecification;
-import com.yahoo.osgi.provider.model.ComponentModel;
import com.yahoo.text.XML;
import com.yahoo.vespa.defaults.Defaults;
import com.yahoo.vespa.model.SimpleConfigProducer;
import com.yahoo.vespa.model.builder.xml.dom.VespaDomBuilder.DomConfigProducerBuilder;
-import com.yahoo.vespa.model.builder.xml.dom.chains.docproc.DomDocprocChainsBuilder;
import com.yahoo.vespa.model.clients.Clients;
-import com.yahoo.vespa.model.clients.HttpGatewayOwner;
import com.yahoo.vespa.model.clients.VespaSpoolMaster;
import com.yahoo.vespa.model.clients.VespaSpooler;
import com.yahoo.vespa.model.clients.VespaSpoolerProducer;
import com.yahoo.vespa.model.clients.VespaSpoolerService;
-import com.yahoo.vespa.model.container.Container;
-import com.yahoo.vespa.model.container.ContainerCluster;
-import com.yahoo.vespa.model.container.component.Handler;
-import com.yahoo.vespa.model.container.component.chain.ProcessingHandler;
-import com.yahoo.vespa.model.container.docproc.ContainerDocproc;
-import com.yahoo.vespa.model.container.docproc.DocprocChains;
-import com.yahoo.vespa.model.container.search.ContainerHttpGateway;
-import com.yahoo.vespa.model.container.search.ContainerSearch;
-import com.yahoo.vespa.model.container.search.searchchain.SearchChain;
-import com.yahoo.vespa.model.container.search.searchchain.SearchChains;
-import com.yahoo.vespa.model.container.search.searchchain.Searcher;
-import com.yahoo.vespa.model.container.xml.ContainerModelBuilder;
import com.yahoo.vespaclient.config.FeederConfig;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
@@ -43,9 +20,6 @@ import org.w3c.dom.NodeList;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.logging.Level;
/**
* Builds the Clients plugin
@@ -54,29 +28,17 @@ import java.util.logging.Level;
*/
public class DomV20ClientsBuilder {
- public static final String vespaClientBundleSpecification = "vespaclient-container-plugin";
-
// The parent docproc plugin to register data with.
private final Clients clients;
DomV20ClientsBuilder(Clients clients, String version) {
- this.clients = clients;
- if (!version.equals("2.0")) {
+ if ( ! version.equals("2.0"))
throw new IllegalArgumentException("Version '" + version + "' of 'clients' not supported.");
- }
+ this.clients = clients;
}
public void build(Element spec) {
- NodeList children = spec.getElementsByTagName("gateways");
- if (children.getLength() > 0 && clients.getConfigProducer()!=null)
- clients.getConfigProducer().deployLogger().log(Level.WARNING, "The 'gateways' element is deprecated, and will be disallowed in a " +
- "later version of Vespa. Use 'document-api' under 'jdisc' instead, see: " +
- ConfigModelUtils.createDocLink("reference/services-jdisc.html"));
- for (int i = 0; i < children.getLength(); i++) {
- createGateways(clients.getConfigProducer(), (Element) children.item(i), clients);
- }
-
- children = spec.getElementsByTagName("spoolers");
+ NodeList children = spec.getElementsByTagName("spoolers");
for (int i = 0; i < children.getLength(); i++) {
createSpoolers(clients.getConfigProducer(), (Element) children.item(i), clients);
}
@@ -87,29 +49,6 @@ public class DomV20ClientsBuilder {
}
}
- static Boolean getBooleanNodeValue(Node node) {
- return Boolean.valueOf(node.getFirstChild().getNodeValue());
- }
-
- static boolean getHttpFileServerEnabled(Element parentHttpFileServer, Element httpFileServer) {
- boolean ret=false;
- if (parentHttpFileServer != null) {
- for (Element child : XML.getChildren(parentHttpFileServer)) {
- if ("enabled".equals(child.getNodeName())) {
- ret = getBooleanNodeValue(child);
- }
- }
- }
- if (httpFileServer != null) {
- for (Element child : XML.getChildren(httpFileServer)) {
- if ("enabled".equals(child.getNodeName())) {
- ret = getBooleanNodeValue(child);
- }
- }
- }
- return ret;
- }
-
private void createLoadTypes(Element element, Clients clients) {
for (Element e : XML.getChildren(element, "type")) {
String priority = e.getAttribute("default-priority");
@@ -118,31 +57,6 @@ public class DomV20ClientsBuilder {
}
/**
- * Creates HttpGateway objects using the given xml Element.
- *
- * @param pcp AbstractConfigProducer
- * @param element The xml Element
- */
- private void createGateways(AbstractConfigProducer pcp, Element element, Clients clients) {
- String jvmArgs = null;
- if (element.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) jvmArgs=element.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME);
-
- Element gatewaysFeederOptions = findFeederOptions(element);
-
- HttpGatewayOwner owner = new HttpGatewayOwner(pcp, getFeederConfig(null, gatewaysFeederOptions));
- ContainerCluster cluster = new ContainerHttpGatewayClusterBuilder().build(owner, element);
-
- int index = 0;
- for (Element e : XML.getChildren(element, "gateway")) {
- ContainerHttpGateway qrs = new ContainerHttpGatewayBuilder(cluster, index).build(cluster, e);
-
- if ("".equals(qrs.getJvmArgs()) && jvmArgs!=null) qrs.setJvmArgs(jvmArgs);
- index++;
- }
- clients.setContainerHttpGateways(cluster);
- }
-
- /**
* Creates VespaSpooler objects using the given xml Element.
*/
private void createSpoolers(AbstractConfigProducer pcp, Element element, Clients clients) {
@@ -170,13 +84,10 @@ public class DomV20ClientsBuilder {
}
}
- private void createSpoolMasters(SimpleConfigProducer producer,
- Element element) {
+ private void createSpoolMasters(SimpleConfigProducer producer, Element element) {
int i=0;
- for (Element e : XML.getChildren(element, "spoolmaster")) {
- VespaSpoolMaster master = new VespaSpoolMasterBuilder(i).build(producer, e);
- i++;
- }
+ for (Element e : XML.getChildren(element, "spoolmaster"))
+ new VespaSpoolMasterBuilder(i++).build(producer, e);
}
private SpoolerConfig.Builder getSpoolConfig(Element conf) {
@@ -313,133 +224,6 @@ public class DomV20ClientsBuilder {
}
}
- public static class ContainerHttpGatewayClusterBuilder extends DomConfigProducerBuilder<ContainerCluster> {
- @Override
- protected ContainerCluster doBuild(AbstractConfigProducer parent,
- Element spec) {
-
- ContainerCluster cluster = new ContainerCluster(parent, "gateway", "gateway");
-
- SearchChains searchChains = new SearchChains(cluster, "searchchain");
- Set<ComponentSpecification> inherited = new TreeSet<>();
- //inherited.add(new ComponentSpecification("vespa", null, null));
- {
- SearchChain mySearchChain = new SearchChain(new ChainSpecification(new ComponentId("vespaget"),
- new ChainSpecification.Inheritance(inherited, null), new ArrayList<>(), new TreeSet<>()));
- Searcher getComponent = newVespaClientSearcher("com.yahoo.storage.searcher.GetSearcher");
- mySearchChain.addInnerComponent(getComponent);
- searchChains.add(mySearchChain);
- }
- {
- SearchChain mySearchChain = new SearchChain(new ChainSpecification(new ComponentId("vespavisit"),
- new ChainSpecification.Inheritance(inherited, null), new ArrayList<>(), new TreeSet<>()));
- Searcher getComponent = newVespaClientSearcher("com.yahoo.storage.searcher.VisitSearcher");
- mySearchChain.addInnerComponent(getComponent);
- searchChains.add(mySearchChain);
- }
-
- ContainerSearch containerSearch = new ContainerSearch(cluster, searchChains, new ContainerSearch.Options());
- cluster.setSearch(containerSearch);
-
- cluster.addComponent(newVespaClientHandler("com.yahoo.feedhandler.VespaFeedHandler", "http://*/feed"));
- cluster.addComponent(newVespaClientHandler("com.yahoo.feedhandler.VespaFeedHandlerRemove", "http://*/remove"));
- cluster.addComponent(newVespaClientHandler("com.yahoo.feedhandler.VespaFeedHandlerRemoveLocation", "http://*/removelocation"));
- cluster.addComponent(newVespaClientHandler("com.yahoo.feedhandler.VespaFeedHandlerGet", "http://*/get"));
- cluster.addComponent(newVespaClientHandler("com.yahoo.feedhandler.VespaFeedHandlerVisit", "http://*/visit"));
- cluster.addComponent(newVespaClientHandler("com.yahoo.feedhandler.VespaFeedHandlerCompatibility", "http://*/document"));
- cluster.addComponent(newVespaClientHandler("com.yahoo.feedhandler.VespaFeedHandlerStatus", "http://*/feedstatus"));
- final ProcessingHandler<SearchChains> searchHandler = new ProcessingHandler<>(
- cluster.getSearch().getChains(), "com.yahoo.search.handler.SearchHandler");
- searchHandler.addServerBindings("http://*/search/*");
- cluster.addComponent(searchHandler);
-
- ContainerModelBuilder.addDefaultHandler_legacyBuilder(cluster);
-
- //BEGIN HACK for docproc chains:
- DocprocChains docprocChains = getDocprocChains(cluster, spec);
- if (docprocChains != null) {
- ContainerDocproc containerDocproc = new ContainerDocproc(cluster, docprocChains);
- cluster.setDocproc(containerDocproc);
- }
- //END HACK
-
- return cluster;
- }
-
- private Handler newVespaClientHandler(String componentId, String binding) {
- Handler<AbstractConfigProducer<?>> handler = new Handler<>(new ComponentModel(
- BundleInstantiationSpecification.getFromStrings(componentId, null, vespaClientBundleSpecification), ""));
- handler.addServerBindings(binding);
- handler.addServerBindings(binding + '/');
- return handler;
- }
-
- private Searcher newVespaClientSearcher(String componentSpec) {
- return new Searcher<>(new ChainedComponentModel(
- BundleInstantiationSpecification.getFromStrings(componentSpec, null, vespaClientBundleSpecification),
- new Dependencies(null, null, null)));
- }
-
- //BEGIN HACK for docproc chains:
- private DocprocChains getDocprocChains(AbstractConfigProducer qrs, Element gateways) {
- Element clients = (Element) gateways.getParentNode();
- Element services = (Element) clients.getParentNode();
- if (services == null) {
- return null;
- }
-
- Element docproc = XML.getChild(services, "docproc");
- if (docproc == null) {
- return null;
- }
-
- String version = docproc.getAttribute("version");
- if (version.startsWith("1.")) {
- return null;
- } else if (version.startsWith("2.")) {
- return null;
- } else if (version.startsWith("3.")) {
- return getDocprocChainsV3(qrs, docproc);
- } else {
- throw new IllegalArgumentException("Docproc version " + version + " unknown.");
- }
- }
-
- private DocprocChains getDocprocChainsV3(AbstractConfigProducer qrs, Element docproc) {
- Element docprocChainsElem = XML.getChild(docproc, "docprocchains");
- if (docprocChainsElem == null) {
- return null;
- }
- return new DomDocprocChainsBuilder(null, true).build(qrs, docprocChainsElem);
- }
- //END HACK
- }
-
- public static class ContainerHttpGatewayBuilder extends DomConfigProducerBuilder<ContainerHttpGateway> {
- int index;
- ContainerCluster cluster;
-
- public ContainerHttpGatewayBuilder(ContainerCluster cluster, int index) {
- this.index = index;
- this.cluster = cluster;
- }
-
- @Override
- protected ContainerHttpGateway doBuild(AbstractConfigProducer parent, Element spec) {
- // TODO: remove port handling
- int port = 19020;
- if (spec != null && spec.hasAttribute("baseport")) {
- port = Integer.parseInt(spec.getAttribute("baseport"));
- }
- ContainerHttpGateway httpGateway = new ContainerHttpGateway(cluster, "" + index, port, index);
- List<Container> containers = new ArrayList<>();
- containers.add(httpGateway);
-
- cluster.addContainers(containers);
- return httpGateway;
- }
- }
-
/**
* This class parses the feederoptions xml tag and produces Vespa config output.
*
@@ -553,4 +337,5 @@ public class DomV20ClientsBuilder {
return builder;
}
}
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java
index 7dabfdc600b..c83f6098a0f 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/builder/xml/dom/NodesSpecification.java
@@ -25,14 +25,22 @@ public class NodesSpecification {
private final int groups;
+ /**
+ * Whether the capacity amount specified is required or can it be relaxed
+ * at the discretion of the component fulfilling it
+ */
+ private final boolean required;
+
private final Optional<String> flavor;
private final Optional<String> dockerImage;
- private NodesSpecification(boolean dedicated, int count, int groups, Optional<String> flavor, Optional<String> dockerImage) {
+ private NodesSpecification(boolean dedicated, int count, int groups, boolean required,
+ Optional<String> flavor, Optional<String> dockerImage) {
this.dedicated = dedicated;
this.count = count;
this.groups = groups;
+ this.required = required;
this.flavor = flavor;
this.dockerImage = dockerImage;
}
@@ -41,6 +49,7 @@ public class NodesSpecification {
this(dedicated,
nodesElement.requiredIntegerAttribute("count"),
nodesElement.getIntegerAttribute("groups", 1),
+ nodesElement.getBooleanAttribute("required", false),
Optional.ofNullable(nodesElement.getStringAttribute("flavor")),
Optional.ofNullable(nodesElement.getStringAttribute("docker-image")));
}
@@ -78,7 +87,7 @@ public class NodesSpecification {
/** Returns a requirement from <code>count</code> nondedicated nodes in one group */
public static NodesSpecification nonDedicated(int count) {
- return new NodesSpecification(false, count, 1, Optional.empty(), Optional.empty());
+ return new NodesSpecification(false, count, 1, false, Optional.empty(), Optional.empty());
}
/**
@@ -95,7 +104,7 @@ public class NodesSpecification {
public Map<HostResource, ClusterMembership> provision(HostSystem hostSystem, ClusterSpec.Type clusterType, ClusterSpec.Id clusterId, DeployLogger logger) {
ClusterSpec cluster = ClusterSpec.request(clusterType, clusterId, dockerImage);
- return hostSystem.allocateHosts(cluster, Capacity.fromNodeCount(count, flavor), groups, logger);
+ return hostSystem.allocateHosts(cluster, Capacity.fromNodeCount(count, flavor, required), groups, logger);
}
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java b/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java
index 55cfc8b2fba..c1ad6eead47 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/clients/ContainerDocumentApi.java
@@ -26,7 +26,7 @@ import java.util.Set;
import java.util.TreeSet;
/**
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
+ * @author Einar M R Rosenvinge
* @since 5.1.11
*/
public class ContainerDocumentApi implements FeederConfig.Producer {
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
index e92bffe2542..1351933fbc8 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/ContainerModelBuilder.java
@@ -16,6 +16,7 @@ import com.yahoo.config.provision.ClusterSpec;
import com.yahoo.config.provision.ClusterMembership;
import com.yahoo.config.provision.NodeType;
import com.yahoo.container.jdisc.config.MetricDefaultsConfig;
+import com.yahoo.path.Path;
import com.yahoo.search.rendering.RendererRegistry;
import com.yahoo.text.XML;
import com.yahoo.vespa.defaults.Defaults;
@@ -104,7 +105,6 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
checkVersion(spec);
this.log = modelContext.getDeployLogger();
-
ContainerCluster cluster = createContainerCluster(spec, modelContext);
addClusterContent(cluster, spec, modelContext);
addBundlesForPlatformComponents(cluster);
@@ -587,9 +587,7 @@ public class ContainerModelBuilder extends ConfigModelBuilder<ContainerModel> {
private ContainerDocumentApi buildDocumentApi(ContainerCluster cluster, Element spec) {
Element documentApiElement = XML.getChild(spec, "document-api");
- if (documentApiElement == null) {
- return null;
- }
+ if (documentApiElement == null) return null;
ContainerDocumentApi.Options documentApiOptions = DocumentApiOptionsBuilder.build(documentApiElement);
return new ContainerDocumentApi(cluster, documentApiOptions);
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/DocumentApiOptionsBuilder.java b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/DocumentApiOptionsBuilder.java
index df2090db166..b1ffd55b0f0 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/container/xml/DocumentApiOptionsBuilder.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/container/xml/DocumentApiOptionsBuilder.java
@@ -12,10 +12,11 @@ import java.util.List;
import java.util.logging.Logger;
/**
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
+ * @author Einar M R Rosenvinge
* @since 5.1.11
*/
public class DocumentApiOptionsBuilder {
+
private static final Logger log = Logger.getLogger(DocumentApiOptionsBuilder.class.getName());
private static final String[] DEFAULT_BINDINGS = {"http://*/", "https://*/"};
@@ -116,4 +117,5 @@ public class DocumentApiOptionsBuilder {
String value = getCleanValue(spec, "abortondocumenterror");
return value == null ? null : Boolean.parseBoolean(value);
}
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
index 56e275e74ac..7e24285c6fb 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/ContentSearchCluster.java
@@ -31,6 +31,7 @@ public class ContentSearchCluster extends AbstractConfigProducer implements Prot
/** The single, indexed search cluster this sets up (supporting multiple document types), or null if none */
private IndexedSearchCluster indexedCluster;
+ private Redundancy redundancy;
private final String clusterName;
Map<String, NewDocumentType> documentDefinitions;
@@ -254,6 +255,7 @@ public class ContentSearchCluster extends AbstractConfigProducer implements Prot
if (usesHierarchicDistribution()) {
indexedCluster.setMaxNodesDownPerFixedRow((redundancy.effectiveFinalRedundancy() / groupToSpecMap.size()) - 1);
}
+ this.redundancy = redundancy;
}
@Override
@@ -287,6 +289,9 @@ public class ContentSearchCluster extends AbstractConfigProducer implements Prot
if (tuning != null) {
tuning.getConfig(builder);
}
+ if (redundancy != null) {
+ redundancy.getConfig(builder);
+ }
}
@Override
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/Redundancy.java b/config-model/src/main/java/com/yahoo/vespa/model/content/Redundancy.java
index 262c985e733..918bdcb8cb7 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/Redundancy.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/Redundancy.java
@@ -2,19 +2,21 @@
package com.yahoo.vespa.model.content;
import com.yahoo.vespa.config.content.StorDistributionConfig;
+import com.yahoo.vespa.config.search.core.ProtonConfig;
/**
* Configuration of the redundancy of a content cluster.
*
* @author bratseth
*/
-public class Redundancy implements StorDistributionConfig.Producer {
+public class Redundancy implements StorDistributionConfig.Producer, ProtonConfig.Producer {
private final int initialRedundancy ;
private final int finalRedundancy;
private final int readyCopies;
private int implicitGroups = 1;
+ private int explicitGroups = 1;
/** The total number of nodes available in this cluster (assigned when this becomes known) */
private int totalNodes = 0;
@@ -39,6 +41,7 @@ public class Redundancy implements StorDistributionConfig.Producer {
* values returned in the config.
*/
public void setImplicitGroups(int implicitGroups) { this.implicitGroups = implicitGroups; }
+ public void setExplicitGroups(int explicitGroups) { this.explicitGroups = explicitGroups; }
public int initialRedundancy() { return initialRedundancy; }
public int finalRedundancy() { return finalRedundancy; }
@@ -54,4 +57,11 @@ public class Redundancy implements StorDistributionConfig.Producer {
builder.redundancy(effectiveFinalRedundancy());
builder.ready_copies(effectiveReadyCopies());
}
+ @Override
+ public void getConfig(ProtonConfig.Builder builder) {
+ ProtonConfig.Distribution.Builder distBuilder = new ProtonConfig.Distribution.Builder();
+ distBuilder.redundancy(finalRedundancy/explicitGroups);
+ distBuilder.searchablecopies(readyCopies/(explicitGroups*implicitGroups));
+ builder.distribution(distBuilder);
+ }
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
index fa417b34844..ef05a3d6ff5 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/content/cluster/ContentCluster.java
@@ -104,8 +104,7 @@ public class ContentCluster extends AbstractConfigProducer implements StorDistri
String routingSelection = new DocumentSelectionBuilder().build(contentElement.getChild("documents"));
Redundancy redundancy = new RedundancyBuilder().build(contentElement);
- ContentCluster c = new ContentCluster(ancestor, getClusterName(contentElement), documentDefinitions,
- routingSelection, redundancy);
+ ContentCluster c = new ContentCluster(ancestor, getClusterName(contentElement), documentDefinitions, routingSelection, redundancy);
c.clusterControllerConfig = new ClusterControllerConfig.Builder(getClusterName(contentElement), contentElement).build(c, contentElement.getXml());
c.search = new ContentSearchCluster.Builder(documentDefinitions).build(c, contentElement.getXml());
c.persistenceFactory = new EngineFactoryBuilder().build(contentElement, c);
@@ -113,6 +112,7 @@ public class ContentCluster extends AbstractConfigProducer implements StorDistri
c.distributorNodes = new DistributorCluster.Builder(c).build(c, w3cContentElement);
c.rootGroup = new StorageGroup.Builder(contentElement, c, deployLogger).buildRootGroup();
validateThatGroupSiblingsAreUnique(c.clusterName, c.rootGroup);
+ redundancy.setExplicitGroups(c.getRootGroup().getNumberOfLeafGroups());
c.search.handleRedundancy(redundancy);
IndexedSearchCluster index = c.search.getIndexed();
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributionConfigProducer.java b/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributionConfigProducer.java
index 630118cc60c..095a5e29450 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributionConfigProducer.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributionConfigProducer.java
@@ -13,6 +13,7 @@ import java.util.Map;
* @author tonytv
*/
public class FileDistributionConfigProducer extends AbstractConfigProducer {
+
private final Map<Host, FileDistributorService> fileDistributorServices = new IdentityHashMap<>();
private final FileDistributor fileDistributor;
private final FileDistributionOptions options;
@@ -56,4 +57,5 @@ public class FileDistributionConfigProducer extends AbstractConfigProducer {
return new FileDistributionConfigProducer(ancestor, fileDistributor, options);
}
}
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java b/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java
index 4dc24618a61..df7b4f58ab5 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/filedistribution/FileDistributor.java
@@ -117,4 +117,5 @@ public class FileDistributor {
result.addAll(asList(additionalHosts));
return result;
}
+
}
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java b/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
index 4ab9ed3af85..84685ecef3d 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/utils/FileSender.java
@@ -32,8 +32,8 @@ public class FileSender implements Serializable {
public static FileReference sendFileToServices(String relativePath,
Collection<? extends AbstractService> services) {
if (services.isEmpty()) {
- throw new IllegalStateException("'sendFileToServices called for empty services!" +
- " - This should never happen!");
+ throw new IllegalStateException("No service instances. Probably a standalone cluster setting up <nodes> " +
+ "using 'count' instead of <node> tags.");
}
FileReference fileref = null;
for (AbstractService service : services) {
@@ -146,4 +146,5 @@ public class FileSender implements Serializable {
}
builder.setValue(reference.value());
}
+
} \ No newline at end of file
diff --git a/config-model/src/main/java/com/yahoo/vespa/model/utils/FreezableMap.java b/config-model/src/main/java/com/yahoo/vespa/model/utils/FreezableMap.java
index a05008cc9a0..211413f9bff 100644
--- a/config-model/src/main/java/com/yahoo/vespa/model/utils/FreezableMap.java
+++ b/config-model/src/main/java/com/yahoo/vespa/model/utils/FreezableMap.java
@@ -4,8 +4,9 @@ package com.yahoo.vespa.model.utils;
import java.util.*;
/**
- * Delegates to a map that can be froozen.
+ * Delegates to a map that can be frozen.
* Not thread safe.
+ *
* @author tonytv
*/
public class FreezableMap<K, V> implements Map<K, V> {
@@ -88,4 +89,5 @@ public class FreezableMap<K, V> implements Map<K, V> {
public boolean isFrozen() {
return frozen;
}
+
}
diff --git a/config-model/src/main/resources/schema/common.rnc b/config-model/src/main/resources/schema/common.rnc
index 06e7b945c18..b89fe0d7fcb 100644
--- a/config-model/src/main/resources/schema/common.rnc
+++ b/config-model/src/main/resources/schema/common.rnc
@@ -23,6 +23,7 @@ Nodes = element nodes {
OptionalDedicatedNodes = element nodes {
attribute count { xsd:positiveInteger } &
attribute flavor { xsd:string }? &
+ attribute required { xsd:boolean }? &
attribute docker-image { xsd:string }? &
attribute dedicated { xsd:boolean }?
}
diff --git a/config-model/src/main/resources/schema/containercluster.rnc b/config-model/src/main/resources/schema/containercluster.rnc
index 8f1ed0d874e..17e38883755 100644
--- a/config-model/src/main/resources/schema/containercluster.rnc
+++ b/config-model/src/main/resources/schema/containercluster.rnc
@@ -161,7 +161,7 @@ ProcessingInContainer = element processing {
-# DOCUMENT API/GATEWAY:
+# DOCUMENT API:
DocumentApi = element document-api {
ServerBindings &
@@ -194,6 +194,7 @@ NodesOfContainerCluster = element nodes {
(
attribute count { xsd:positiveInteger } &
attribute flavor { xsd:string }? &
+ attribute required { xsd:boolean }? &
attribute docker-image { xsd:string }?
)
|
diff --git a/config-model/src/main/resources/schema/content.rnc b/config-model/src/main/resources/schema/content.rnc
index 30b931053d5..c3a8386ac5e 100644
--- a/config-model/src/main/resources/schema/content.rnc
+++ b/config-model/src/main/resources/schema/content.rnc
@@ -216,6 +216,7 @@ ContentNodes = element nodes {
(
attribute count { xsd:positiveInteger } &
attribute flavor { xsd:string }? &
+ attribute required { xsd:boolean }? &
attribute docker-image { xsd:string }? &
attribute groups { xsd:positiveInteger }?
)
@@ -260,6 +261,7 @@ Group = element group {
element nodes {
attribute count { xsd:positiveInteger } &
attribute flavor { xsd:string }? &
+ attribute required { xsd:boolean }? &
attribute docker-image { xsd:string }? &
attribute groups { xsd:positiveInteger }?
}
diff --git a/config-model/src/main/resources/schema/deployment.rnc b/config-model/src/main/resources/schema/deployment.rnc
new file mode 100644
index 00000000000..22ceab4efa5
--- /dev/null
+++ b/config-model/src/main/resources/schema/deployment.rnc
@@ -0,0 +1,26 @@
+# RELAX NG Compact Syntax
+# Vespa Deployment file
+
+start = element deployment {
+ attribute version { "1.0" } &
+ Test? &
+ Staging? &
+ Prod*
+}
+
+Test = element test {
+ text
+}
+
+Staging = element staging {
+ text
+}
+
+Prod =
+ element prod {
+ attribute global-service-id { text }?,
+ element region {
+ attribute active { xsd:boolean },
+ text
+ }*
+ }
diff --git a/config-model/src/main/resources/schema/schemas.xml b/config-model/src/main/resources/schema/schemas.xml
index 728754e3a5f..ed39af3d490 100644
--- a/config-model/src/main/resources/schema/schemas.xml
+++ b/config-model/src/main/resources/schema/schemas.xml
@@ -3,4 +3,5 @@
<locatingRules xmlns="http://thaiopensource.com/ns/locating-rules/1.0">
<documentElement localName="hosts" uri="hosts.rnc"/>
<documentElement localName="services" uri="services.rnc"/>
+ <documentElement localName="deployment" uri="deployment.rnc"/>
</locatingRules>
diff --git a/config-model/src/test/cfg/application/app1/deployment.xml b/config-model/src/test/cfg/application/app1/deployment.xml
new file mode 100644
index 00000000000..34d2036c1a5
--- /dev/null
+++ b/config-model/src/test/cfg/application/app1/deployment.xml
@@ -0,0 +1,8 @@
+<deployment version="1.0">
+ <test/>
+ <staging/>
+ <prod global-service-id="query">
+ <region active="true">us-east-3</region>
+ <region active="false">us-west-1</region>
+ </prod>
+</deployment>
diff --git a/config-model/src/test/cfg/application/app_invalid_deployment_xml/deployment.xml b/config-model/src/test/cfg/application/app_invalid_deployment_xml/deployment.xml
new file mode 100644
index 00000000000..7a1089a6c1e
--- /dev/null
+++ b/config-model/src/test/cfg/application/app_invalid_deployment_xml/deployment.xml
@@ -0,0 +1,8 @@
+<deployment version="1.0">
+ <test/>
+ <staging/>
+ <prod global-service-id="query">
+ <region>us-east-3</region>
+ <region active="false">us-west-1</region>
+ </prod>
+</deployment>
diff --git a/config-model/src/test/cfg/application/app_invalid_deployment_xml/hosts.xml b/config-model/src/test/cfg/application/app_invalid_deployment_xml/hosts.xml
new file mode 100644
index 00000000000..132169097cf
--- /dev/null
+++ b/config-model/src/test/cfg/application/app_invalid_deployment_xml/hosts.xml
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<hosts>
+ <host name="localhost">
+ <alias>node1</alias>
+ </host>
+ <host name="schmocalhost">
+ <alias>node2</alias>
+ </host>
+</hosts>
diff --git a/config-model/src/test/cfg/application/app_invalid_deployment_xml/services.xml b/config-model/src/test/cfg/application/app_invalid_deployment_xml/services.xml
new file mode 100644
index 00000000000..a1702af234f
--- /dev/null
+++ b/config-model/src/test/cfg/application/app_invalid_deployment_xml/services.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<services version="1.0">
+
+ <admin version="2.0">
+ <adminserver hostalias="node1"/>
+ </admin>
+
+ <container version="1.0">
+ <nodes>
+ <node hostalias="node1" />
+ </nodes>
+ <search/>
+ </container>
+
+</services>
diff --git a/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/deployment.xml b/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/deployment.xml
new file mode 100644
index 00000000000..d04cc5dfd65
--- /dev/null
+++ b/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/deployment.xml
@@ -0,0 +1,5 @@
+<deployment version="1.0">
+ <test/>
+ <staging/>
+ <prod />
+</deployment>
diff --git a/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/hosts.xml b/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/hosts.xml
new file mode 100644
index 00000000000..132169097cf
--- /dev/null
+++ b/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/hosts.xml
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<hosts>
+ <host name="localhost">
+ <alias>node1</alias>
+ </host>
+ <host name="schmocalhost">
+ <alias>node2</alias>
+ </host>
+</hosts>
diff --git a/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/services.xml b/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/services.xml
new file mode 100644
index 00000000000..a1702af234f
--- /dev/null
+++ b/config-model/src/test/cfg/application/empty_prod_region_in_deployment_xml/services.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<services version="1.0">
+
+ <admin version="2.0">
+ <adminserver hostalias="node1"/>
+ </admin>
+
+ <container version="1.0">
+ <nodes>
+ <node hostalias="node1" />
+ </nodes>
+ <search/>
+ </container>
+
+</services>
diff --git a/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java b/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java
index d4e521ebd13..0334b3c867b 100644
--- a/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/ApplicationDeployTest.java
@@ -21,6 +21,7 @@ import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.search.SearchDefinition;
import org.json.JSONException;
import org.junit.After;
+import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
@@ -203,6 +204,28 @@ public class ApplicationDeployTest {
assertThat(getSearchDefinitions(app).size(), is(6));
}
+ @Test
+ public void testThatAppWithDeploymentXmlIsValid() throws IOException {
+ File tmpDir = Files.createTempDir();
+ IOUtils.copyDirectory(new File(TESTDIR, "app1"), tmpDir);
+ createAppPkg(tmpDir.getAbsolutePath());
+ }
+
+ @Ignore // TODO: Enable when code in ApplicationPackageXmlFilesValidator does validation of deployment.xml
+ @Test(expected = IllegalArgumentException.class)
+ public void testThatAppWithIllegalDeploymentXmlIsNotValid() throws IOException {
+ File tmpDir = Files.createTempDir();
+ IOUtils.copyDirectory(new File(TESTDIR, "app_invalid_deployment_xml"), tmpDir);
+ createAppPkg(tmpDir.getAbsolutePath());
+ }
+
+ @Test
+ public void testThatAppWithIllegalEmptyProdRegion() throws IOException {
+ File tmpDir = Files.createTempDir();
+ IOUtils.copyDirectory(new File(TESTDIR, "empty_prod_region_in_deployment_xml"), tmpDir);
+ createAppPkg(tmpDir.getAbsolutePath());
+ }
+
private List<SearchDefinition> getSearchDefinitions(FilesApplicationPackage app) {
return new DeployState.Builder().applicationPackage(app).build().getSearchDefinitions();
}
diff --git a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
index 86fabdf26bc..8ed4539456a 100644
--- a/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
+++ b/config-model/src/test/java/com/yahoo/config/model/provision/ModelProvisioningTest.java
@@ -522,6 +522,7 @@ public class ModelProvisioningTest {
assertEquals(1, clusterControllers.getContainers().size()); // TODO: Expected 5 with this feature reactivated
}
+ @Test
public void testClusterControllersAreNotPlacedOnRetiredNodes() {
String services =
"<?xml version='1.0' encoding='utf-8' ?>\n" +
@@ -907,6 +908,26 @@ public class ModelProvisioningTest {
assertThat(cluster.getRootGroup().getNodes().get(0).getConfigId(), is("bar/storage/0"));
}
+ @Test(expected = IllegalArgumentException.class)
+ public void testRequiringMoreNodesThanAreAvailable() throws ParseException {
+ String services =
+ "<?xml version='1.0' encoding='utf-8' ?>\n" +
+ "<services>" +
+ " <content version='1.0' id='bar'>" +
+ " <redundancy>1</redundancy>" +
+ " <documents>" +
+ " <document type='type1' mode='index'/>" +
+ " </documents>" +
+ " <nodes count='3' required='true'/>" +
+ " </content>" +
+ "</services>";
+
+ int numberOfHosts = 2;
+ VespaModelTester tester = new VespaModelTester();
+ tester.addHosts(numberOfHosts);
+ tester.createModel(services, false);
+ }
+
@Test
public void testUsingNodesCountAttributesAndGettingJustOneNode() {
String services =
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContainerRestartValidatorTest.java b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContainerRestartValidatorTest.java
index 88ba6d885b8..3bec45279d9 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContainerRestartValidatorTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/application/validation/change/ContainerRestartValidatorTest.java
@@ -35,6 +35,14 @@ public class ContainerRestartValidatorTest {
assertTrue(result.isEmpty());
}
+ @Test
+ public void validator_returns_empty_list_for_containers_with_restart_on_deploy_disabled_where_previously_enabled() {
+ VespaModel current = createModel(true);
+ VespaModel next = createModel(false);
+ List<ConfigChangeAction> result = validateModel(current, next);
+ assertTrue(result.isEmpty());
+ }
+
private static List<ConfigChangeAction> validateModel(VespaModel current, VespaModel next) {
return new ContainerRestartValidator()
.validate(current, next, new ValidationOverrides(Collections.emptyList()));
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterTest.java b/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterTest.java
index bcb113687ec..126fcf7a583 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterTest.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/content/ClusterTest.java
@@ -9,6 +9,7 @@ import com.yahoo.vespa.config.content.core.StorServerConfig;
import com.yahoo.vespa.config.content.FleetcontrollerConfig;
import com.yahoo.vespa.config.content.StorDistributionConfig;
import com.yahoo.metrics.MetricsmanagerConfig;
+import com.yahoo.vespa.config.search.core.ProtonConfig;
import com.yahoo.vespa.model.VespaModel;
import com.yahoo.vespa.model.container.ContainerCluster;
import com.yahoo.vespa.model.content.cluster.ContentCluster;
@@ -34,44 +35,99 @@ public class ClusterTest extends ContentBaseTest {
}
@Test
- public void testRedundancy() {
- StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder();
- parse("" +
- "<content version=\"1.0\" id=\"storage\">\n" +
- " <documents/>" +
- " <engine>" +
- " <proton>" +
- " <searchable-copies>3</searchable-copies>" +
- " </proton>" +
- " </engine>" +
- " <redundancy reply-after=\"4\">5</redundancy>\n" +
- " <group>" +
- " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" +
- " <node hostalias=\"mockhost\" distribution-key=\"1\"/>\"" +
- " <node hostalias=\"mockhost\" distribution-key=\"2\"/>\"" +
- " <node hostalias=\"mockhost\" distribution-key=\"3\"/>\"" +
- " <node hostalias=\"mockhost\" distribution-key=\"4\"/>\"" +
- " </group>" +
- "</content>"
- ).getConfig(builder);
+ public void testHierarchicRedundancy() {
+ ContentCluster cc = parse("" +
+ "<content version=\"1.0\" id=\"storage\">\n" +
+ " <documents/>" +
+ " <engine>" +
+ " <proton>" +
+ " <searchable-copies>3</searchable-copies>" +
+ " </proton>" +
+ " </engine>" +
+ " <redundancy>15</redundancy>\n" +
+ " <group name='root' distribution-key='0'>" +
+ " <distribution partitions='1|1|*'/>" +
+ " <group name='g-1' distribution-key='0'>" +
+ " <node hostalias='mockhost' distribution-key='0'/>" +
+ " <node hostalias='mockhost' distribution-key='1'/>" +
+ " <node hostalias='mockhost' distribution-key='2'/>" +
+ " <node hostalias='mockhost' distribution-key='3'/>" +
+ " <node hostalias='mockhost' distribution-key='4'/>" +
+ " </group>" +
+ " <group name='g-2' distribution-key='1'>" +
+ " <node hostalias='mockhost' distribution-key='5'/>" +
+ " <node hostalias='mockhost' distribution-key='6'/>" +
+ " <node hostalias='mockhost' distribution-key='7'/>" +
+ " <node hostalias='mockhost' distribution-key='8'/>" +
+ " <node hostalias='mockhost' distribution-key='9'/>" +
+ " </group>" +
+ " <group name='g-3' distribution-key='1'>" +
+ " <node hostalias='mockhost' distribution-key='10'/>" +
+ " <node hostalias='mockhost' distribution-key='11'/>" +
+ " <node hostalias='mockhost' distribution-key='12'/>" +
+ " <node hostalias='mockhost' distribution-key='13'/>" +
+ " <node hostalias='mockhost' distribution-key='14'/>" +
+ " </group>" +
+ " </group>" +
+ "</content>"
+ );
+ StorDistributionConfig.Builder storBuilder = new StorDistributionConfig.Builder();
+ cc.getConfig(storBuilder);
+ StorDistributionConfig storConfig = new StorDistributionConfig(storBuilder);
+ assertEquals(15, storConfig.initial_redundancy());
+ assertEquals(15, storConfig.redundancy());
+ assertEquals(3, storConfig.ready_copies());
+ ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder();
+ cc.getSearch().getConfig(protonBuilder);
+ ProtonConfig protonConfig = new ProtonConfig(protonBuilder);
+ assertEquals(1, protonConfig.distribution().searchablecopies());
+ assertEquals(5, protonConfig.distribution().redundancy());
+ }
- StorDistributionConfig config = new StorDistributionConfig(builder);
- assertEquals(4, config.initial_redundancy());
- assertEquals(5, config.redundancy());
- assertEquals(3, config.ready_copies());
+ @Test
+ public void testRedundancy() {
+ ContentCluster cc = parse("" +
+ "<content version=\"1.0\" id=\"storage\">\n" +
+ " <documents/>" +
+ " <engine>" +
+ " <proton>" +
+ " <searchable-copies>3</searchable-copies>" +
+ " </proton>" +
+ " </engine>" +
+ " <redundancy reply-after='4'>5</redundancy>\n" +
+ " <group>" +
+ " <node hostalias='mockhost' distribution-key='0'/>" +
+ " <node hostalias='mockhost' distribution-key='1'/>" +
+ " <node hostalias='mockhost' distribution-key='2'/>" +
+ " <node hostalias='mockhost' distribution-key='3'/>" +
+ " <node hostalias='mockhost' distribution-key='4'/>" +
+ " </group>" +
+ "</content>"
+ );
+ StorDistributionConfig.Builder storBuilder = new StorDistributionConfig.Builder();
+ cc.getConfig(storBuilder);
+ StorDistributionConfig storConfig = new StorDistributionConfig(storBuilder);
+ assertEquals(4, storConfig.initial_redundancy());
+ assertEquals(5, storConfig.redundancy());
+ assertEquals(3, storConfig.ready_copies());
+ ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder();
+ cc.getSearch().getConfig(protonBuilder);
+ ProtonConfig protonConfig = new ProtonConfig(protonBuilder);
+ assertEquals(3, protonConfig.distribution().searchablecopies());
+ assertEquals(5, protonConfig.distribution().redundancy());
}
@Test
public void testNoId() {
ContentCluster c = parse(
- "<content version=\"1.0\">\n" +
- " <redundancy>1</redundancy>\n" +
- " <documents/>" +
- " <redundancy reply-after=\"4\">5</redundancy>\n" +
- " <group>" +
- " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" +
- " </group>" +
- "</content>"
+ "<content version=\"1.0\">\n" +
+ " <redundancy>1</redundancy>\n" +
+ " <documents/>" +
+ " <redundancy reply-after=\"4\">5</redundancy>\n" +
+ " <group>" +
+ " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" +
+ " </group>" +
+ "</content>"
);
assertEquals("content", c.getName());
@@ -81,14 +137,14 @@ public class ClusterTest extends ContentBaseTest {
public void testRedundancyDefaults() {
StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder();
parse(
- "<content version=\"1.0\" id=\"storage\">\n" +
- " <documents/>" +
- " <group>" +
- " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" +
- " <node hostalias=\"mockhost\" distribution-key=\"1\"/>\"" +
- " <node hostalias=\"mockhost\" distribution-key=\"2\"/>\"" +
- " </group>" +
- "</content>"
+ "<content version=\"1.0\" id=\"storage\">\n" +
+ " <documents/>" +
+ " <group>" +
+ " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" +
+ " <node hostalias=\"mockhost\" distribution-key=\"1\"/>\"" +
+ " <node hostalias=\"mockhost\" distribution-key=\"2\"/>\"" +
+ " </group>" +
+ "</content>"
).getConfig(builder);
StorDistributionConfig config = new StorDistributionConfig(builder);
@@ -99,39 +155,40 @@ public class ClusterTest extends ContentBaseTest {
@Test
public void testEndToEnd() throws Exception {
- String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
- "<services>\n" +
- "\n" +
- " <admin version=\"2.0\">\n" +
- " <adminserver hostalias=\"configserver\" />\n" +
- " <logserver hostalias=\"logserver\" />\n" +
- " <slobroks>\n" +
- " <slobrok hostalias=\"configserver\" />\n" +
- " <slobrok hostalias=\"logserver\" />\n" +
- " </slobroks>\n" +
- " <cluster-controllers>\n" +
- " <cluster-controller hostalias=\"configserver\"/>" +
- " <cluster-controller hostalias=\"configserver2\"/>" +
- " <cluster-controller hostalias=\"configserver3\"/>" +
- " </cluster-controllers>\n" +
- " </admin>\n" +
- " <content version='1.0' id='bar'>" +
- " <redundancy>1</redundancy>\n" +
- " <documents>" +
- " <document type=\"type1\" mode=\"index\"/>\n" +
- " <document type=\"type2\" mode=\"index\"/>\n" +
- " </documents>\n" +
- " <group>" +
- " <node hostalias='node0' distribution-key='0' />" +
- " </group>" +
- " <tuning>" +
- " <cluster-controller>\n" +
- " <init-progress-time>34567</init-progress-time>" +
- " </cluster-controller>" +
- " </tuning>" +
- " </content>" +
- "\n" +
- "</services>";
+ String xml =
+ "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
+ "<services>\n" +
+ "\n" +
+ " <admin version=\"2.0\">\n" +
+ " <adminserver hostalias=\"configserver\" />\n" +
+ " <logserver hostalias=\"logserver\" />\n" +
+ " <slobroks>\n" +
+ " <slobrok hostalias=\"configserver\" />\n" +
+ " <slobrok hostalias=\"logserver\" />\n" +
+ " </slobroks>\n" +
+ " <cluster-controllers>\n" +
+ " <cluster-controller hostalias=\"configserver\"/>" +
+ " <cluster-controller hostalias=\"configserver2\"/>" +
+ " <cluster-controller hostalias=\"configserver3\"/>" +
+ " </cluster-controllers>\n" +
+ " </admin>\n" +
+ " <content version='1.0' id='bar'>" +
+ " <redundancy>1</redundancy>\n" +
+ " <documents>" +
+ " <document type=\"type1\" mode=\"index\"/>\n" +
+ " <document type=\"type2\" mode=\"index\"/>\n" +
+ " </documents>\n" +
+ " <group>" +
+ " <node hostalias='node0' distribution-key='0' />" +
+ " </group>" +
+ " <tuning>" +
+ " <cluster-controller>\n" +
+ " <init-progress-time>34567</init-progress-time>" +
+ " </cluster-controller>" +
+ " </tuning>" +
+ " </content>" +
+ "\n" +
+ "</services>";
List<String> sds = ApplicationPackageUtils.generateSearchDefinitions("type1", "type2");
VespaModel model = (new VespaModelCreatorWithMockPkg(null, xml, sds)).create();
@@ -183,32 +240,33 @@ public class ClusterTest extends ContentBaseTest {
@Test
public void testSearchTuning() throws Exception {
- String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
- "<services>\n" +
- "\n" +
- " <admin version=\"2.0\">\n" +
- " <adminserver hostalias=\"node0\" />\n" +
- " <cluster-controllers>\n" +
- " <cluster-controller hostalias=\"node0\"/>" +
- " </cluster-controllers>\n" +
- " </admin>\n" +
- " <content version='1.0' id='bar'>" +
- " <redundancy>1</redundancy>\n" +
- " <documents>" +
- " <document type=\"type1\" mode='index'/>\n" +
- " <document type=\"type2\" mode='index'/>\n" +
- " </documents>\n" +
- " <group>" +
- " <node hostalias='node0' distribution-key='0'/>" +
- " </group>" +
- " <tuning>\n" +
- " <cluster-controller>" +
- " <init-progress-time>34567</init-progress-time>" +
- " </cluster-controller>" +
- " </tuning>" +
- " </content>" +
- "\n" +
- "</services>";
+ String xml =
+ "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
+ "<services>\n" +
+ "\n" +
+ " <admin version=\"2.0\">\n" +
+ " <adminserver hostalias=\"node0\" />\n" +
+ " <cluster-controllers>\n" +
+ " <cluster-controller hostalias=\"node0\"/>" +
+ " </cluster-controllers>\n" +
+ " </admin>\n" +
+ " <content version='1.0' id='bar'>" +
+ " <redundancy>1</redundancy>\n" +
+ " <documents>" +
+ " <document type=\"type1\" mode='index'/>\n" +
+ " <document type=\"type2\" mode='index'/>\n" +
+ " </documents>\n" +
+ " <group>" +
+ " <node hostalias='node0' distribution-key='0'/>" +
+ " </group>" +
+ " <tuning>\n" +
+ " <cluster-controller>" +
+ " <init-progress-time>34567</init-progress-time>" +
+ " </cluster-controller>" +
+ " </tuning>" +
+ " </content>" +
+ "\n" +
+ "</services>";
List<String> sds = ApplicationPackageUtils.generateSearchDefinitions("type1", "type2");
VespaModel model = new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create();
@@ -232,21 +290,22 @@ public class ClusterTest extends ContentBaseTest {
@Test
public void testRedundancyRequired() throws Exception {
- String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
- "<services>\n" +
- "\n" +
- " <admin version=\"2.0\">\n" +
- " <adminserver hostalias=\"node0\" />\n" +
- " </admin>\n" +
- " <content version='1.0' id='bar'>" +
- " <documents>" +
- " <document type=\"type1\" mode='index'/>\n" +
- " </documents>\n" +
- " <group>\n" +
- " <node hostalias='node0' distribution-key='0'/>\n" +
- " </group>\n" +
- " </content>\n" +
- "</services>\n";
+ String xml =
+ "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" +
+ "<services>\n" +
+ "\n" +
+ " <admin version=\"2.0\">\n" +
+ " <adminserver hostalias=\"node0\" />\n" +
+ " </admin>\n" +
+ " <content version='1.0' id='bar'>" +
+ " <documents>" +
+ " <document type=\"type1\" mode='index'/>\n" +
+ " </documents>\n" +
+ " <group>\n" +
+ " <node hostalias='node0' distribution-key='0'/>\n" +
+ " </group>\n" +
+ " </content>\n" +
+ "</services>\n";
List<String> sds = ApplicationPackageUtils.generateSearchDefinitions("type1", "type2");
try{
@@ -261,12 +320,12 @@ public class ClusterTest extends ContentBaseTest {
public void testRedundancyFinalLessThanInitial() {
try {
parse(
- "<content version=\"1.0\" id=\"storage\">\n" +
- " <redundancy reply-after=\"4\">2</redundancy>\n" +
- " <group>" +
- " <node hostalias='node0' distribution-key='0' />" +
- " </group>" +
- "</content>"
+ "<content version=\"1.0\" id=\"storage\">\n" +
+ " <redundancy reply-after=\"4\">2</redundancy>\n" +
+ " <group>" +
+ " <node hostalias='node0' distribution-key='0' />" +
+ " </group>" +
+ "</content>"
);
fail("no exception thrown");
} catch (Exception e) {
@@ -277,17 +336,17 @@ public class ClusterTest extends ContentBaseTest {
public void testReadyTooHigh() {
try {
parse(
- "<content version=\"1.0\" id=\"storage\">\n" +
- " <engine>" +
- " <proton>" +
- " <searchable-copies>3</searchable-copies>" +
- " </proton>" +
- " </engine>" +
- " <redundancy>2</redundancy>\n" +
- " <group>" +
- " <node hostalias='node0' distribution-key='0' />" +
- " </group>" +
- "</content>"
+ "<content version=\"1.0\" id=\"storage\">\n" +
+ " <engine>" +
+ " <proton>" +
+ " <searchable-copies>3</searchable-copies>" +
+ " </proton>" +
+ " </engine>" +
+ " <redundancy>2</redundancy>\n" +
+ " <group>" +
+ " <node hostalias='node0' distribution-key='0' />" +
+ " </group>" +
+ "</content>"
);
fail("no exception thrown");
} catch (Exception e) {
@@ -308,12 +367,12 @@ public class ClusterTest extends ContentBaseTest {
{
{
FleetcontrollerConfig config = getFleetControllerConfig(
- "<content version=\"1.0\" id=\"storage\">\n" +
- " <documents/>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- "</content>"
+ "<content version=\"1.0\" id=\"storage\">\n" +
+ " <documents/>" +
+ " <group>\n" +
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
+ " </group>\n" +
+ "</content>"
);
assertEquals(0, config.min_storage_up_ratio(), 0.01);
@@ -324,17 +383,17 @@ public class ClusterTest extends ContentBaseTest {
{
FleetcontrollerConfig config = getFleetControllerConfig(
- "<content version=\"1.0\" id=\"storage\">\n" +
- " <documents/>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
- " <node distribution-key=\"2\" hostalias=\"mockhost\"/>\n" +
- " <node distribution-key=\"3\" hostalias=\"mockhost\"/>\n" +
- " <node distribution-key=\"4\" hostalias=\"mockhost\"/>\n" +
- " <node distribution-key=\"5\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- "</content>"
+ "<content version=\"1.0\" id=\"storage\">\n" +
+ " <documents/>" +
+ " <group>\n" +
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
+ " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
+ " <node distribution-key=\"2\" hostalias=\"mockhost\"/>\n" +
+ " <node distribution-key=\"3\" hostalias=\"mockhost\"/>\n" +
+ " <node distribution-key=\"4\" hostalias=\"mockhost\"/>\n" +
+ " <node distribution-key=\"5\" hostalias=\"mockhost\"/>\n" +
+ " </group>\n" +
+ "</content>"
);
assertNotSame(0, config.min_storage_up_ratio());
@@ -345,12 +404,12 @@ public class ClusterTest extends ContentBaseTest {
public void testImplicitDistributionBits()
{
ContentCluster cluster = parse(
- "<content version=\"1.0\" id=\"storage\">\n" +
- " <documents/>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- "</content>"
+ "<content version=\"1.0\" id=\"storage\">\n" +
+ " <documents/>" +
+ " <group>\n" +
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
+ " </group>\n" +
+ "</content>"
);
{
@@ -368,15 +427,15 @@ public class ClusterTest extends ContentBaseTest {
assertEquals(8, config.minsplitcount());
}
cluster = parse(
- "<content version=\"1.0\" id=\"storage\">\n" +
- " <documents/>" +
- " <engine>" +
- " <vds/>" +
- " </engine>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- "</content>"
+ "<content version=\"1.0\" id=\"storage\">\n" +
+ " <documents/>" +
+ " <engine>" +
+ " <vds/>" +
+ " </engine>" +
+ " <group>\n" +
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
+ " </group>\n" +
+ "</content>"
);
{
@@ -399,15 +458,15 @@ public class ClusterTest extends ContentBaseTest {
public void testExplicitDistributionBits()
{
ContentCluster cluster = parse(
- "<content version=\"1.0\" id=\"storage\">\n" +
- " <documents/>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " <tuning>\n" +
- " <distribution type=\"strict\"/>\n" +
- " </tuning>\n" +
- "</content>"
+ "<content version=\"1.0\" id=\"storage\">\n" +
+ " <documents/>" +
+ " <group>\n" +
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
+ " </group>\n" +
+ " <tuning>\n" +
+ " <distribution type=\"strict\"/>\n" +
+ " </tuning>\n" +
+ "</content>"
);
{
@@ -425,18 +484,18 @@ public class ClusterTest extends ContentBaseTest {
assertEquals(8, config.minsplitcount());
}
cluster = parse(
- "<content version=\"1.0\" id=\"storage\">\n" +
- " <documents/>" +
- " <engine>" +
- " <vds/>" +
- " </engine>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- " <tuning>\n" +
- " <distribution type=\"loose\"/>\n" +
- " </tuning>\n" +
- "</content>"
+ "<content version=\"1.0\" id=\"storage\">\n" +
+ " <documents/>" +
+ " <engine>" +
+ " <vds/>" +
+ " </engine>" +
+ " <group>\n" +
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
+ " </group>\n" +
+ " <tuning>\n" +
+ " <distribution type=\"loose\"/>\n" +
+ " </tuning>\n" +
+ "</content>"
);
{
@@ -459,16 +518,16 @@ public class ClusterTest extends ContentBaseTest {
public void testGenerateSearchNodes()
{
ContentCluster cluster = parse(
- "<content version=\"1.0\" id=\"storage\">\n" +
- " <documents/>" +
- " <engine>" +
- " <proton/>" +
- " </engine>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- "</content>"
+ "<content version=\"1.0\" id=\"storage\">\n" +
+ " <documents/>" +
+ " <engine>" +
+ " <proton/>" +
+ " </engine>" +
+ " <group>\n" +
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
+ " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
+ " </group>\n" +
+ "</content>"
);
{
@@ -492,16 +551,16 @@ public class ClusterTest extends ContentBaseTest {
public void testAlternativeNodeSyntax()
{
ContentCluster cluster = parse(
- "<content version=\"1.0\" id=\"test\">\n" +
- " <documents/>" +
- " <engine>" +
- " <proton/>" +
- " </engine>" +
- " <nodes>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
- " </nodes>\n" +
- "</content>"
+ "<content version=\"1.0\" id=\"test\">\n" +
+ " <documents/>" +
+ " <engine>" +
+ " <proton/>" +
+ " </engine>" +
+ " <nodes>\n" +
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
+ " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" +
+ " </nodes>\n" +
+ "</content>"
);
StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder();
@@ -519,13 +578,13 @@ public class ClusterTest extends ContentBaseTest {
public void testReadyWhenInitialOne() {
StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder();
parse(
- "<content version=\"1.0\" id=\"storage\">\n" +
- " <documents/>" +
- " <redundancy>1</redundancy>\n" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</content>"
+ "<content version=\"1.0\" id=\"storage\">\n" +
+ " <documents/>" +
+ " <redundancy>1</redundancy>\n" +
+ " <group>\n" +
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
+ " </group>" +
+ "</content>"
).getConfig(builder);
StorDistributionConfig config = new StorDistributionConfig(builder);
@@ -536,16 +595,16 @@ public class ClusterTest extends ContentBaseTest {
public void testProvider(String tagName, StorServerConfig.Persistence_provider.Type.Enum type) {
ContentCluster cluster = parse(
- "<content version=\"1.0\" id=\"storage\">\n" +
- " <documents/>" +
- " <redundancy>3</redundancy>" +
- " <engine>\n" +
- " <" + tagName + "/>\n" +
- " </engine>\n" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</content>"
+ "<content version=\"1.0\" id=\"storage\">\n" +
+ " <documents/>" +
+ " <redundancy>3</redundancy>" +
+ " <engine>\n" +
+ " <" + tagName + "/>\n" +
+ " </engine>\n" +
+ " <group>\n" +
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
+ " </group>" +
+ "</content>"
);
{
@@ -582,11 +641,11 @@ public class ClusterTest extends ContentBaseTest {
MetricsmanagerConfig.Builder builder = new MetricsmanagerConfig.Builder();
ContentCluster cluster = parse("<content version=\"1.0\" id=\"storage\">\n" +
- " <documents/>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
- " </group>\n" +
- "</content>"
+ " <documents/>" +
+ " <group>\n" +
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" +
+ " </group>\n" +
+ "</content>"
);
cluster.getConfig(builder);
@@ -642,34 +701,34 @@ public class ClusterTest extends ContentBaseTest {
@Test
public void testConfiguredMetrics() throws Exception {
String xml = "" +
- "<services>" +
- "<content version=\"1.0\" id=\"storage\">\n" +
- " <redundancy>1</redundancy>\n" +
- " <documents>" +
- " <document type=\"type1\" mode='index'/>\n" +
- " <document type=\"type2\" mode='index'/>\n" +
- " </documents>" +
- " <group>\n" +
- " <node distribution-key=\"0\" hostalias=\"node0\"/>\n" +
- " </group>\n" +
- "</content>" +
- "<admin version=\"2.0\">" +
- " <logserver hostalias=\"node0\"/>" +
- " <adminserver hostalias=\"node0\"/>" +
- " <metric-consumers>" +
- " <consumer name=\"foobar\">" +
- " <metric name=\"storage.foo.bar\"/>" +
- " </consumer>" +
- " <consumer name=\"log\">" +
- " <metric name=\"extralogmetric\"/>" +
- " <metric name=\"extralogmetric3\"/>" +
- " </consumer>" +
- " <consumer name=\"fleetcontroller\">" +
- " <metric name=\"extraextra\"/>" +
- " </consumer>" +
- " </metric-consumers>" +
- "</admin>" +
- "</services>";
+ "<services>" +
+ "<content version=\"1.0\" id=\"storage\">\n" +
+ " <redundancy>1</redundancy>\n" +
+ " <documents>" +
+ " <document type=\"type1\" mode='index'/>\n" +
+ " <document type=\"type2\" mode='index'/>\n" +
+ " </documents>" +
+ " <group>\n" +
+ " <node distribution-key=\"0\" hostalias=\"node0\"/>\n" +
+ " </group>\n" +
+ "</content>" +
+ "<admin version=\"2.0\">" +
+ " <logserver hostalias=\"node0\"/>" +
+ " <adminserver hostalias=\"node0\"/>" +
+ " <metric-consumers>" +
+ " <consumer name=\"foobar\">" +
+ " <metric name=\"storage.foo.bar\"/>" +
+ " </consumer>" +
+ " <consumer name=\"log\">" +
+ " <metric name=\"extralogmetric\"/>" +
+ " <metric name=\"extralogmetric3\"/>" +
+ " </consumer>" +
+ " <consumer name=\"fleetcontroller\">" +
+ " <metric name=\"extraextra\"/>" +
+ " </consumer>" +
+ " </metric-consumers>" +
+ "</admin>" +
+ "</services>";
List<String> sds = ApplicationPackageUtils.generateSearchDefinitions("type1", "type2");
@@ -729,33 +788,33 @@ public class ClusterTest extends ContentBaseTest {
@Test
public void requireThatPreShutdownCommandIsSet() {
ContentCluster cluster = parse(
- "<content version=\"1.0\" id=\"storage\">" +
- " <documents/>" +
- " <engine>" +
- " <proton>" +
- " <flush-on-shutdown>true</flush-on-shutdown>" +
- " </proton>" +
- " </engine>" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</content>");
+ "<content version=\"1.0\" id=\"storage\">" +
+ " <documents/>" +
+ " <engine>" +
+ " <proton>" +
+ " <flush-on-shutdown>true</flush-on-shutdown>" +
+ " </proton>" +
+ " </engine>" +
+ " <group>" +
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
+ " </group>" +
+ "</content>");
assertThat(cluster.getSearch().getSearchNodes().size(), is(1));
assertTrue(cluster.getSearch().getSearchNodes().get(0).getPreShutdownCommand().isPresent());
cluster = parse(
- "<content version=\"1.0\" id=\"storage\">" +
- " <documents/>" +
- " <engine>" +
- " <proton>" +
- " <flush-on-shutdown> \n " +
- " true </flush-on-shutdown>" +
- " </proton>" +
- " </engine>" +
- " <group>" +
- " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
- " </group>" +
- "</content>");
+ "<content version=\"1.0\" id=\"storage\">" +
+ " <documents/>" +
+ " <engine>" +
+ " <proton>" +
+ " <flush-on-shutdown> \n " +
+ " true </flush-on-shutdown>" +
+ " </proton>" +
+ " </engine>" +
+ " <group>" +
+ " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" +
+ " </group>" +
+ "</content>");
assertThat(cluster.getSearch().getSearchNodes().size(), is(1));
assertTrue(cluster.getSearch().getSearchNodes().get(0).getPreShutdownCommand().isPresent());
}
diff --git a/config-model/src/test/java/com/yahoo/vespa/model/test/utils/VespaModelCreatorWithMockPkg.java b/config-model/src/test/java/com/yahoo/vespa/model/test/utils/VespaModelCreatorWithMockPkg.java
index 6f4effe0319..2f83d3bc394 100644
--- a/config-model/src/test/java/com/yahoo/vespa/model/test/utils/VespaModelCreatorWithMockPkg.java
+++ b/config-model/src/test/java/com/yahoo/vespa/model/test/utils/VespaModelCreatorWithMockPkg.java
@@ -55,12 +55,13 @@ public class VespaModelCreatorWithMockPkg {
VespaModel model = new VespaModel(configModelRegistry, deployState);
if (validate) {
try {
- SchemaValidator validator = SchemaValidator.createTestValidatorHosts();
if (appPkg.getHosts() != null) {
- validator.validate(appPkg.getHosts());
+ SchemaValidator.createTestValidatorHosts().validate(appPkg.getHosts());
}
- validator = SchemaValidator.createTestValidatorServices();
- validator.validate(appPkg.getServices());
+ if (appPkg.getDeployment().isPresent()) {
+ SchemaValidator.createTestValidatorDeployment().validate(appPkg.getDeployment().get());
+ }
+ SchemaValidator.createTestValidatorServices().validate(appPkg.getServices());
} catch (Exception e) {
System.err.println(e.getClass());
throw e instanceof RuntimeException ? (RuntimeException) e : new RuntimeException(e);
diff --git a/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java b/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java
index 7c13204c1e7..7894b722b58 100644
--- a/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java
+++ b/config-provisioning/src/main/java/com/yahoo/config/provision/Capacity.java
@@ -75,7 +75,11 @@ public final class Capacity {
public static Capacity fromRequiredNodeCount(int nodeCount, Optional<String> flavor) {
return new Capacity(nodeCount, true, flavor, NodeType.tenant);
}
-
+
+ public static Capacity fromNodeCount(int nodeCount, Optional<String> flavor, boolean required) {
+ return new Capacity(nodeCount, required, flavor, NodeType.tenant);
+ }
+
/** Creates this from a node type */
public static Capacity fromRequiredNodeType(NodeType type) {
return new Capacity(0, true, Optional.empty(), type);
diff --git a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java
index 8775d92417e..30c85efe93b 100644
--- a/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java
+++ b/config-proxy/src/main/java/com/yahoo/vespa/config/proxy/RpcConfigSourceClient.java
@@ -91,7 +91,7 @@ public class RpcConfigSourceClient extends ConfigSourceClient {
target.close();
}
String extra = "";
- log.log(LogLevel.ERROR, "Could not connect to any config source in set " + configSourceSet.toString() +
+ log.log(LogLevel.WARNING, "Could not connect to any config source in set " + configSourceSet.toString() +
", please make sure config server(s) are running. " + extra);
}
return false;
diff --git a/config/src/main/java/com/yahoo/config/subscription/ConfigSubscriber.java b/config/src/main/java/com/yahoo/config/subscription/ConfigSubscriber.java
index 2322726057e..619dd333b51 100644
--- a/config/src/main/java/com/yahoo/config/subscription/ConfigSubscriber.java
+++ b/config/src/main/java/com/yahoo/config/subscription/ConfigSubscriber.java
@@ -27,6 +27,7 @@ import com.yahoo.vespa.config.TimingValues;
* @since 5.1
*/
public class ConfigSubscriber {
+
private Logger log = Logger.getLogger(getClass().getName());
private State state = State.OPEN;
protected List<ConfigHandle<? extends ConfigInstance>> subscriptionHandles = new ArrayList<>();
diff --git a/configdefinitions/src/vespa/zookeeper-server.def b/configdefinitions/src/vespa/zookeeper-server.def
index b2f697a1488..b460b417105 100644
--- a/configdefinitions/src/vespa/zookeeper-server.def
+++ b/configdefinitions/src/vespa/zookeeper-server.def
@@ -4,6 +4,9 @@ namespace=cloud.config
# Vespa home is prepended if the file is relative
zooKeeperConfigFile string default="conf/zookeeper/zookeeper.cfg"
+# For more info about the values below, see ZooKeeper documentation
+
+# tick time in milliseconds
tickTime int default=2000
initLimit int default=20
syncLimit int default=15
@@ -19,6 +22,7 @@ clientPort int default=2181
# normal zone, a snapRetainCount of 15 gives 3-4 hours of logs before they're
# purged.
snapshotCount int default=50000
+# Purge interval in hours
autopurge.purgeInterval int default=1
autopurge.snapRetainCount int default=15
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java
index 58d651ae33a..37cea22e420 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/filedistribution/FileDBRegistry.java
@@ -43,12 +43,4 @@ public class FileDBRegistry implements FileRegistry {
return entries;
}
- @Override
- public Set<String> allRelativePaths() {
- Set<String> ret = new HashSet<>();
- for (Entry entry : entries) {
- ret.add(entry.relativePath);
- }
- return ret;
- }
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
index 99036ee0027..1b32d6bde22 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/ModelsBuilder.java
@@ -120,13 +120,12 @@ public abstract class ModelsBuilder<MODELRESULT extends ModelResult> {
ConfigserverConfig configserverConfig,
Zone zone,
Set<Rotation> rotations) {
- return new ModelContextImpl.Properties(
- applicationId,
- configserverConfig.multitenant(),
- ConfigServerSpec.fromConfig(configserverConfig),
- configserverConfig.hostedVespa(),
- zone,
- rotations);
+ return new ModelContextImpl.Properties(applicationId,
+ configserverConfig.multitenant(),
+ ConfigServerSpec.fromConfig(configserverConfig),
+ configserverConfig.hostedVespa(),
+ zone,
+ rotations);
}
}
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java
index cacd53cf945..9c1b2b4681e 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/modelfactory/PreparedModelsBuilder.java
@@ -91,11 +91,7 @@ public class PreparedModelsBuilder extends ModelsBuilder<PreparedModelsBuilder.P
this.applicationId = params.getApplicationId();
this.rotations = new Rotations(curator, tenantPath);
this.rotationsSet = getRotations(params.rotations());
- this.properties = createModelContextProperties(
- params.getApplicationId(),
- configserverConfig,
- zone,
- rotationsSet);
+ this.properties = createModelContextProperties(params.getApplicationId(), configserverConfig, zone, rotationsSet);
}
/** Construct with all dependencies passed separately */
diff --git a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
index b10865f257b..d2ded8ee226 100644
--- a/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
+++ b/configserver/src/main/java/com/yahoo/vespa/config/server/session/SessionPreparer.java
@@ -163,10 +163,7 @@ public class SessionPreparer {
void preprocess() {
try {
- this.applicationPackage = context.getApplicationPackage().preprocess(
- properties.zone(),
- null,
- logger);
+ this.applicationPackage = context.getApplicationPackage().preprocess(properties.zone(), null, logger);
} catch (IOException | TransformerException | ParserConfigurationException | SAXException e) {
throw new RuntimeException("Error deploying application package", e);
}
diff --git a/configserver/src/test/java/com/yahoo/vespa/config/server/provision/StaticProvisionerTest.java b/configserver/src/test/java/com/yahoo/vespa/config/server/provision/StaticProvisionerTest.java
index 3831f94a77d..b7af5e09f47 100644
--- a/configserver/src/test/java/com/yahoo/vespa/config/server/provision/StaticProvisionerTest.java
+++ b/configserver/src/test/java/com/yahoo/vespa/config/server/provision/StaticProvisionerTest.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertEquals;
* @author lulf
*/
public class StaticProvisionerTest {
+
@Test
public void sameHostsAreProvisioned() throws IOException, SAXException {
ApplicationPackage app = FilesApplicationPackage.fromFile(new File("src/test/apps/hosted"));
diff --git a/container-core/src/main/java/com/yahoo/container/handler/VipStatusHandler.java b/container-core/src/main/java/com/yahoo/container/handler/VipStatusHandler.java
index 3be1d08c5dc..f36cd77c0b7 100644
--- a/container-core/src/main/java/com/yahoo/container/handler/VipStatusHandler.java
+++ b/container-core/src/main/java/com/yahoo/container/handler/VipStatusHandler.java
@@ -14,7 +14,6 @@ import com.yahoo.container.core.VipStatusConfig;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.container.jdisc.ThreadedHttpRequestHandler;
-import com.yahoo.container.logging.AccessLog;
import com.yahoo.jdisc.Metric;
import com.yahoo.log.LogLevel;
import com.yahoo.text.Utf8;
diff --git a/container-disc/src/main/sh/vespa-start-container-daemon.sh b/container-disc/src/main/sh/vespa-start-container-daemon.sh
index 3afefd6f86d..ec632a9c0c4 100755
--- a/container-disc/src/main/sh/vespa-start-container-daemon.sh
+++ b/container-disc/src/main/sh/vespa-start-container-daemon.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#set -x
@@ -65,14 +65,21 @@ configure_memory() {
consider_fallback jvm_baseMaxDirectMemorySize 75
consider_fallback jvm_directMemorySizeCache 0
- if (( jvm_heapSizeAsPercentageOfPhysicalMemory > 0 && jvm_heapSizeAsPercentageOfPhysicalMemory < 100 )); then
- available=`free -m | grep Mem | tr -s ' ' | cut -f2 -d' '`
- jvm_heapsize=$[available * jvm_heapSizeAsPercentageOfPhysicalMemory / 100]
+ # Update jvm_heapsize only if percentage is explicitly set (default is 0).
+ if ((jvm_heapSizeAsPercentageOfPhysicalMemory > 0)); then
+ if ((TOTAL_MEMORY_MB > 0)); then
+ available="$TOTAL_MEMORY_MB"
+ else
+ available=`free -m | grep Mem | tr -s ' ' | cut -f2 -d' '`
+ fi
+
+ jvm_heapsize=$((available * jvm_heapSizeAsPercentageOfPhysicalMemory / 100))
if (( jvm_heapsize < 1024 )); then
jvm_heapsize=1024
fi
fi
- maxDirectMemorySize=$(( ${jvm_baseMaxDirectMemorySize} + ${jvm_heapsize}/8 + ${jvm_directMemorySizeCache} ))
+
+ maxDirectMemorySize=$(( jvm_baseMaxDirectMemorySize + jvm_heapsize / 8 + jvm_directMemorySizeCache ))
memory_options="-Xms${jvm_heapsize}m -Xmx${jvm_heapsize}m"
memory_options="${memory_options} -XX:ThreadStackSize=${jvm_stacksize}"
diff --git a/container-search/src/main/java/com/yahoo/prelude/VespaSVersionRetriever.java b/container-search/src/main/java/com/yahoo/prelude/VespaSVersionRetriever.java
deleted file mode 100644
index 9d6d4f55fb3..00000000000
--- a/container-search/src/main/java/com/yahoo/prelude/VespaSVersionRetriever.java
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.prelude;
-
-import java.io.IOException;
-import java.util.jar.Manifest;
-
-/**
- * Retrieves Vespa-Version from the manifest file.
- *
- * @author tonytv
- */
-public class VespaSVersionRetriever {
-
- public static String getVersion() {
- return version;
- }
-
- private static String version = retrieveVersion();
-
- private static String retrieveVersion() {
- try {
- Manifest manifest = new Manifest(VespaSVersionRetriever.class.getResourceAsStream("/META-INF/MANIFEST.MF"));
- manifest.getMainAttributes().entrySet();
- return manifest.getMainAttributes().getValue("Vespa-Version");
- } catch (IOException e) {
- return "not available.";
- }
- }
-}
diff --git a/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterMonitor.java b/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterMonitor.java
index adbae459e5d..ff77680172d 100644
--- a/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterMonitor.java
+++ b/container-search/src/main/java/com/yahoo/prelude/cluster/ClusterMonitor.java
@@ -20,6 +20,10 @@ import com.yahoo.search.result.ErrorMessage;
* @author Steinar Knutsen
*/
public class ClusterMonitor implements Runnable, Freezable {
+ // The ping thread wil start using the system, but we cannot be guaranteed that all components
+ // in the system is up. As a workaround for not being able to find out when the system
+ // is ready to be used, we wait some time before starting the ping thread
+ private static final int pingThreadInitialDelayMs = 3000;
private final MonitorConfiguration configuration;
@@ -53,7 +57,7 @@ public class ClusterMonitor implements Runnable, Freezable {
"Do not start the monitoring thread before the set of"
+" nodes to monitor is complete/the ClusterMonitor is frozen.");
}
- future = nodeManager.getScheduledExecutor().scheduleAtFixedRate(this, 30 * 1000, configuration.getCheckInterval(), TimeUnit.MILLISECONDS);
+ future = nodeManager.getScheduledExecutor().scheduleAtFixedRate(this, pingThreadInitialDelayMs, configuration.getCheckInterval(), TimeUnit.MILLISECONDS);
}
/**
diff --git a/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastHit.java b/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastHit.java
index ee3f9ac0583..fe0c4a35d1e 100644
--- a/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastHit.java
+++ b/container-search/src/main/java/com/yahoo/prelude/fastsearch/FastHit.java
@@ -19,7 +19,7 @@ import com.yahoo.data.access.simple.Value.StringValue;
*/
public class FastHit extends Hit {
- public static final String SUMMARY = "summary";
+ public static final String SUMMARY = "summary"; // TODO: Remove on Vespa 7
private static final long serialVersionUID = 298098891191029589L;
@@ -62,16 +62,18 @@ public class FastHit extends Hit {
*/
public FastHit() { }
+ // Note: This constructor is only used for tests, production use is always of the empty constructor
public FastHit(String uri, double relevancy) {
this(uri, relevancy, null);
}
+ // Note: This constructor is only used for tests, production use is always of the empty constructor
public FastHit(String uri, double relevance, String source) {
setId(uri);
- super.setField("uri", uri);
+ super.setField("uri", uri); // TODO: Remove on Vespa 7
setRelevance(new Relevance(relevance));
setSource(source);
- types().add(SUMMARY);
+ types().add(SUMMARY); // TODO: Remove on Vespa 7
setPartId(0, 0);
}
diff --git a/container-search/src/main/java/com/yahoo/prelude/querytransform/StemmingSearcher.java b/container-search/src/main/java/com/yahoo/prelude/querytransform/StemmingSearcher.java
index c02824420d5..ca8214f35d6 100644
--- a/container-search/src/main/java/com/yahoo/prelude/querytransform/StemmingSearcher.java
+++ b/container-search/src/main/java/com/yahoo/prelude/querytransform/StemmingSearcher.java
@@ -108,11 +108,8 @@ public class StemmingSearcher extends Searcher {
return reverseConnectivity;
}
- private Item scan(Item item,
- boolean isCJK,
- Language l,
- IndexFacts.Session indexFacts,
- Map<Item, TaggableItem> reverseConnectivity) {
+ private Item scan(Item item, boolean isCJK, Language l, IndexFacts.Session indexFacts,
+ Map<Item, TaggableItem> reverseConnectivity) {
if (item == null) {
return null;
} else if (item instanceof BlockItem) {
@@ -153,9 +150,8 @@ public class StemmingSearcher extends Searcher {
if (i instanceof TermItem) {
return ((TermItem) i).getOrigin(); // this should always be the case
} else {
- getLogger().log(LogLevel.WARNING,
- "Weird, BlockItem '" + b + "' was a composite containing " + i.getClass().getName()
- + ", expected TermItem.");
+ getLogger().log(LogLevel.WARNING, "Weird, BlockItem '" + b + "' was a composite containing " +
+ i.getClass().getName() + ", expected TermItem.");
}
}
return null;
@@ -217,8 +213,8 @@ public class StemmingSearcher extends Searcher {
setConnectivity(current, reverseConnectivity, replacement);
}
- private void andSegmentConnectivity(BlockItem current,
- Map<Item, TaggableItem> reverseConnectivity, CompositeItem composite) {
+ private void andSegmentConnectivity(BlockItem current, Map<Item, TaggableItem> reverseConnectivity,
+ CompositeItem composite) {
// if the original has connectivity to something, add to last word
Connectivity connectivity = getConnectivity(current);
if (connectivity != null) {
@@ -269,8 +265,7 @@ public class StemmingSearcher extends Searcher {
private TaggableItem singleWordSegment(BlockItem current,
StemList segment,
Index index,
- Substring substring)
- {
+ Substring substring) {
String indexName = current.getIndexName();
if (index.getLiteralBoost() || index.getStemMode() == StemMode.ALL) {
// Yes, this will create a new WordAlternativesItem even if stemmed
@@ -301,8 +296,7 @@ public class StemmingSearcher extends Searcher {
}
private WordItem singleStemSegment(Item blockAsItem, String stem, String indexName,
- Substring substring)
- {
+ Substring substring) {
WordItem replacement = new WordItem(stem, indexName, true, substring);
replacement.setStemmed(true);
copyAttributes(blockAsItem, replacement);
@@ -311,8 +305,7 @@ public class StemmingSearcher extends Searcher {
private void setConnectivity(BlockItem current,
Map<Item, TaggableItem> reverseConnectivity,
- Item replacement)
- {
+ Item replacement) {
if (reverseConnectivity != null && !reverseConnectivity.isEmpty()) {
// This Map<Item, TaggableItem>.get(BlockItem) is technically wrong, but the Item API ensures its correctness
TaggableItem connectedTo = reverseConnectivity.get(current);
@@ -425,4 +418,5 @@ public class StemmingSearcher extends Searcher {
}
}
+
}
diff --git a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
index 77b759973b8..aa5726bd52c 100644
--- a/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
+++ b/container-search/src/main/java/com/yahoo/search/handler/SearchHandler.java
@@ -4,6 +4,7 @@ package com.yahoo.search.handler;
import com.google.inject.Inject;
import com.yahoo.collections.Tuple2;
import com.yahoo.component.ComponentSpecification;
+import com.yahoo.component.Vtag;
import com.yahoo.component.chain.Chain;
import com.yahoo.component.chain.ChainsConfigurer;
import com.yahoo.component.chain.model.ChainsModel;
@@ -25,7 +26,6 @@ import com.yahoo.log.LogLevel;
import com.yahoo.net.UriTools;
import com.yahoo.prelude.IndexFacts;
import com.yahoo.prelude.IndexModel;
-import com.yahoo.prelude.VespaSVersionRetriever;
import com.yahoo.prelude.query.QueryException;
import com.yahoo.prelude.query.parser.ParseException;
import com.yahoo.prelude.query.parser.SpecialTokenRegistry;
@@ -363,7 +363,7 @@ public class SearchHandler extends LoggingRequestHandler {
execution.fill(result, result.getQuery().getPresentation().getSummary());
traceExecutionTimes(query, result);
- traceVespaSVersion(query);
+ traceVespaVersion(query);
traceRequestAttributes(query);
return result;
}
@@ -519,8 +519,8 @@ public class SearchHandler extends LoggingRequestHandler {
}
}
- private void traceVespaSVersion(Query query) {
- query.trace("Vespa version: " + VespaSVersionRetriever.getVersion(), false, 4);
+ private void traceVespaVersion(Query query) {
+ query.trace("Vespa version: " + Vtag.currentVersion.toString(), false, 4);
}
public SearchChainRegistry getSearchChainRegistry() {
diff --git a/container-search/src/main/java/com/yahoo/search/querytransform/SortingDegrader.java b/container-search/src/main/java/com/yahoo/search/querytransform/SortingDegrader.java
index 0c7871eb6e6..2597e440d17 100644
--- a/container-search/src/main/java/com/yahoo/search/querytransform/SortingDegrader.java
+++ b/container-search/src/main/java/com/yahoo/search/querytransform/SortingDegrader.java
@@ -69,6 +69,8 @@ public class SortingDegrader extends Searcher {
}
private void setDegradation(Query query) {
+ query.trace("Using sorting degrading for performance - totalHits will be wrong. " +
+ "Turn off with sorting.degrading=false.", 2);
Sorting.FieldOrder primarySort = query.getRanking().getSorting().fieldOrders().get(0); // ensured above
MatchPhase matchPhase = query.getRanking().getMatchPhase();
diff --git a/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java b/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java
index 1d400056d52..7df859c6070 100644
--- a/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java
+++ b/container-search/src/main/java/com/yahoo/search/rendering/JsonRenderer.java
@@ -22,7 +22,6 @@ import org.json.JSONObject;
import com.fasterxml.jackson.core.JsonEncoding;
import com.fasterxml.jackson.core.JsonFactory;
-import com.fasterxml.jackson.core.JsonGenerationException;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.TreeNode;
import com.fasterxml.jackson.databind.ObjectMapper;
@@ -77,15 +76,12 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
boolean booleanValue() {
switch (this) {
- case YES:
- return true;
- case NO:
- return false;
- default:
- throw new IllegalStateException();
+ case YES: return true;
+ case NO: return false;
+ default: throw new IllegalStateException();
}
}
- };
+ }
// if this must be optimized, simply use com.fasterxml.jackson.core.SerializableString
private static final String BUCKET_LIMITS = "limits";
@@ -173,8 +169,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
}
}
- private void doVisit(final long timestamp, final Object payload, final boolean hasChildren)
- throws IOException, JsonGenerationException {
+ private void doVisit(final long timestamp, final Object payload, final boolean hasChildren) throws IOException {
boolean dirty = false;
if (timestamp != 0L) {
header();
@@ -216,7 +211,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
}
}
- private void conditionalStartObject() throws IOException, JsonGenerationException {
+ private void conditionalStartObject() throws IOException {
if (!isInsideOpenObject()) {
generator.writeStartObject();
} else {
@@ -303,7 +298,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
generator = null;
renderedChildren = null;
debugRendering = false;
- timeSource = () -> System.currentTimeMillis();
+ timeSource = System::currentTimeMillis;
stream = null;
}
@@ -320,21 +315,19 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
}
private void renderTiming() throws IOException {
- if (!getResult().getQuery().getPresentation().getTiming()) {
- return;
- }
+ if (!getResult().getQuery().getPresentation().getTiming()) return;
- final double milli = .001d;
- final long now = timeSource.getAsLong();
- final long searchTime = now - getResult().getElapsedTime().first();
- final double searchSeconds = searchTime * milli;
+ double milli = .001d;
+ long now = timeSource.getAsLong();
+ long searchTime = now - getResult().getElapsedTime().first();
+ double searchSeconds = searchTime * milli;
generator.writeObjectFieldStart(TIMING);
if (getResult().getElapsedTime().firstFill() != 0L) {
- final long queryTime = getResult().getElapsedTime().weightedSearchTime();
- final long summaryFetchTime = getResult().getElapsedTime().weightedFillTime();
- final double querySeconds = queryTime * milli;
- final double summarySeconds = summaryFetchTime * milli;
+ long queryTime = getResult().getElapsedTime().weightedSearchTime();
+ long summaryFetchTime = getResult().getElapsedTime().weightedFillTime();
+ double querySeconds = queryTime * milli;
+ double summarySeconds = summaryFetchTime * milli;
generator.writeNumberField(QUERY_TIME, querySeconds);
generator.writeNumberField(SUMMARY_FETCH_TIME, summarySeconds);
}
@@ -344,18 +337,16 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
}
private boolean getDebugRendering(Query q) {
- return q == null ? false : q.properties().getBoolean(DEBUG_RENDERING_KEY, false);
+ return q != null && q.properties().getBoolean(DEBUG_RENDERING_KEY, false);
}
- private void renderTrace(Trace trace) throws JsonGenerationException, IOException {
- if (!trace.traceNode().children().iterator().hasNext()) {
- return;
- }
+ private void renderTrace(Trace trace) throws IOException {
+ if (!trace.traceNode().children().iterator().hasNext()) return;
+
try {
long basetime = trace.traceNode().timestamp();
- if (basetime == 0L) {
+ if (basetime == 0L)
basetime = getResult().getElapsedTime().first();
- }
trace.accept(new TraceRenderer(basetime));
} catch (TraceRenderWrapper e) {
throw new IOException(e);
@@ -365,53 +356,49 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
@Override
public void beginList(DataList<?> list) throws IOException {
Preconditions.checkArgument(list instanceof HitGroup,
- "Expected subclass of com.yahoo.search.result.HitGroup, got %s.",
- list.getClass());
+ "Expected subclass of com.yahoo.search.result.HitGroup, got %s.",
+ list.getClass());
moreChildren();
-
renderHitGroupHead((HitGroup) list);
}
- protected void moreChildren() throws IOException, JsonGenerationException {
- if (!renderedChildren.isEmpty()) {
+ protected void moreChildren() throws IOException {
+ if (!renderedChildren.isEmpty())
childrenArray();
- }
+
renderedChildren.push(0);
}
- private void childrenArray() throws IOException, JsonGenerationException {
- if (renderedChildren.peek() == 0) {
+ private void childrenArray() throws IOException {
+ if (renderedChildren.peek() == 0)
generator.writeArrayFieldStart(CHILDREN);
- }
renderedChildren.push(renderedChildren.pop() + 1);
}
- private void lessChildren() throws IOException, JsonGenerationException {
+ private void lessChildren() throws IOException {
int lastRenderedChildren = renderedChildren.pop();
if (lastRenderedChildren > 0) {
generator.writeEndArray();
}
}
- private void renderHitGroupHead(HitGroup hitGroup) throws JsonGenerationException, IOException {
- final ErrorHit errorHit = hitGroup.getErrorHit();
-
+ private void renderHitGroupHead(HitGroup hitGroup) throws IOException {
generator.writeStartObject();
+
renderHitContents(hitGroup);
- if (getRecursionLevel() == 1) {
+ if (getRecursionLevel() == 1)
renderCoverage();
- }
- if (errorHit != null) {
+
+ ErrorHit errorHit = hitGroup.getErrorHit();
+ if (errorHit != null)
renderErrors(errorHit.errors());
- }
// the framework will invoke begin methods as needed from here
}
- private void renderErrors(Set<ErrorMessage> errors) throws JsonGenerationException, IOException {
- if (errors.isEmpty()) {
- return;
- }
+ private void renderErrors(Set<ErrorMessage> errors) throws IOException {
+ if (errors.isEmpty()) return;
+
generator.writeArrayFieldStart(ERRORS);
for (ErrorMessage e : errors) {
String summary = e.getMessage();
@@ -441,11 +428,10 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
}
- private void renderCoverage() throws JsonGenerationException, IOException {
+ private void renderCoverage() throws IOException {
Coverage c = getResult().getCoverage(false);
- if (c == null) {
- return;
- }
+ if (c == null) return;
+
generator.writeObjectFieldStart(COVERAGE);
generator.writeNumberField(COVERAGE_COVERAGE, c.getResultPercentage());
generator.writeNumberField(COVERAGE_DOCUMENTS, c.getDocs());
@@ -456,10 +442,8 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
generator.writeEndObject();
}
- private void renderHit(Hit hit) throws JsonGenerationException, IOException {
- if (!shouldRender(hit)) {
- return;
- }
+ private void renderHit(Hit hit) throws IOException {
+ if (!shouldRender(hit)) return;
childrenArray();
generator.writeStartObject();
@@ -468,54 +452,45 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
}
private boolean shouldRender(Hit hit) {
- if (hit instanceof DefaultErrorHit) {
- return false;
- }
-
- return true;
+ return ! (hit instanceof DefaultErrorHit);
}
- private boolean fieldsStart(boolean hasFieldsField) throws JsonGenerationException, IOException {
- if (hasFieldsField) {
- return true;
- }
+ private boolean fieldsStart(boolean hasFieldsField) throws IOException {
+ if (hasFieldsField) return true;
generator.writeObjectFieldStart(FIELDS);
return true;
}
- private void fieldsEnd(boolean hasFieldsField) throws JsonGenerationException, IOException {
- if (!hasFieldsField) {
- return;
- }
+ private void fieldsEnd(boolean hasFieldsField) throws IOException {
+ if (!hasFieldsField) return;
generator.writeEndObject();
}
- private void renderHitContents(Hit hit) throws JsonGenerationException, IOException {
+ private void renderHitContents(Hit hit) throws IOException {
String id = hit.getDisplayId();
- Set<String> types = hit.types();
- String source = hit.getSource();
-
- if (id != null) {
+ if (id != null)
generator.writeStringField(ID, id);
- }
+
generator.writeNumberField(RELEVANCE, hit.getRelevance().getScore());
- if (types.size() > 0) {
+
+ if (hit.types().size() > 0) { // TODO: Remove types rendering on Vespa 7
generator.writeArrayFieldStart(TYPES);
- for (String t : types) {
+ for (String t : hit.types()) {
generator.writeString(t);
}
generator.writeEndArray();
}
- if (source != null) {
+
+ String source = hit.getSource();
+ if (source != null)
generator.writeStringField(SOURCE, hit.getSource());
- }
+
renderSpecialCasesForGrouping(hit);
renderAllFields(hit);
}
- private void renderAllFields(Hit hit) throws JsonGenerationException,
- IOException {
+ private void renderAllFields(Hit hit) throws IOException {
boolean hasFieldsField = false;
hasFieldsField |= renderTotalHitCount(hit, hasFieldsField);
@@ -523,8 +498,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
fieldsEnd(hasFieldsField);
}
- private boolean renderStandardFields(Hit hit, boolean initialHasFieldsField)
- throws JsonGenerationException, IOException {
+ private boolean renderStandardFields(Hit hit, boolean initialHasFieldsField) throws IOException {
boolean hasFieldsField = initialHasFieldsField;
for (String fieldName : hit.fieldKeys()) {
if (!shouldRender(fieldName, hit)) continue;
@@ -538,55 +512,39 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
}
private boolean shouldRender(String fieldName, Hit hit) {
- if (debugRendering) {
- return true;
- }
- if (fieldName.startsWith(VESPA_HIDDEN_FIELD_PREFIX)) {
- return false;
- }
+ if (debugRendering) return true;
+
+ if (fieldName.startsWith(VESPA_HIDDEN_FIELD_PREFIX)) return false;
RenderDecision r = lazyRenderAwareCheck(fieldName, hit);
- if (r != RenderDecision.DO_NOT_KNOW) {
- return r.booleanValue();
- }
+ if (r != RenderDecision.DO_NOT_KNOW) return r.booleanValue();
// this will trigger field decoding, so it is important the lazy decoding magic is done first
Object field = hit.getField(fieldName);
- if (field instanceof CharSequence && ((CharSequence) field).length() == 0) {
- return false;
- }
- if (field instanceof StringFieldValue && ((StringFieldValue) field).getString().isEmpty()) {
- // StringFieldValue cannot hold a null, so checking length directly is OK
- return false;
- }
- if (field instanceof NanNumber) {
- return false;
- }
+ if (field instanceof CharSequence && ((CharSequence) field).length() == 0) return false;
+
+ // StringFieldValue cannot hold a null, so checking length directly is OK:
+ if (field instanceof StringFieldValue && ((StringFieldValue) field).getString().isEmpty()) return false;
+
+ if (field instanceof NanNumber) return false;
return true;
}
private RenderDecision lazyRenderAwareCheck(String fieldName, Hit hit) {
- if (!(hit instanceof FastHit)) return RenderDecision.DO_NOT_KNOW;
+ if ( ! (hit instanceof FastHit)) return RenderDecision.DO_NOT_KNOW;
FastHit asFastHit = (FastHit) hit;
if (asFastHit.fieldIsNotDecoded(fieldName)) {
- FastHit.RawField r = asFastHit.fetchFieldAsUtf8(fieldName);
- if (r != null) {
- byte[] utf8 = r.getUtf8();
- if (utf8.length == 0) {
- return RenderDecision.NO;
- } else {
- return RenderDecision.YES;
- }
- }
+ FastHit.RawField rawField = asFastHit.fetchFieldAsUtf8(fieldName);
+ if (rawField != null)
+ return rawField.getUtf8().length == 0 ? RenderDecision.NO : RenderDecision.YES;
}
return RenderDecision.DO_NOT_KNOW;
}
- private void renderSpecialCasesForGrouping(Hit hit)
- throws JsonGenerationException, IOException {
+ private void renderSpecialCasesForGrouping(Hit hit) throws IOException {
if (hit instanceof AbstractList) {
renderGroupingListSyntheticFields((AbstractList) hit);
} else if (hit instanceof Group) {
@@ -594,8 +552,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
}
}
- private void renderGroupingGroupSyntheticFields(Hit hit)
- throws JsonGenerationException, IOException {
+ private void renderGroupingGroupSyntheticFields(Hit hit) throws IOException {
renderGroupMetadata(((Group) hit).getGroupId());
if (hit instanceof RootGroup) {
renderContinuations(Collections.singletonMap(
@@ -603,22 +560,18 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
}
}
- private void renderGroupingListSyntheticFields(AbstractList a)
- throws JsonGenerationException, IOException {
+ private void renderGroupingListSyntheticFields(AbstractList a) throws IOException {
writeGroupingLabel(a);
renderContinuations(a.continuations());
}
- private void writeGroupingLabel(AbstractList a)
- throws JsonGenerationException, IOException {
+ private void writeGroupingLabel(AbstractList a) throws IOException {
generator.writeStringField(LABEL, a.getLabel());
}
- private void renderContinuations(Map<String, Continuation> continuations)
- throws JsonGenerationException, IOException {
- if (continuations.isEmpty()) {
- return;
- }
+ private void renderContinuations(Map<String, Continuation> continuations) throws IOException {
+ if (continuations.isEmpty()) return;
+
generator.writeObjectFieldStart(CONTINUATION);
for (Map.Entry<String, Continuation> e : continuations.entrySet()) {
generator.writeStringField(e.getKey(), e.getValue().toString());
@@ -626,17 +579,14 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
generator.writeEndObject();
}
- private void renderGroupMetadata(GroupId id) throws JsonGenerationException,
- IOException {
- if (!(id instanceof ValueGroupId || id instanceof BucketGroupId)) {
- return;
- }
+ private void renderGroupMetadata(GroupId id) throws IOException {
+ if (!(id instanceof ValueGroupId || id instanceof BucketGroupId)) return;
if (id instanceof ValueGroupId) {
- final ValueGroupId<?> valueId = (ValueGroupId<?>) id;
+ ValueGroupId<?> valueId = (ValueGroupId<?>) id;
generator.writeStringField(GROUPING_VALUE, getIdValue(valueId));
- } else if (id instanceof BucketGroupId) {
- final BucketGroupId<?> bucketId = (BucketGroupId<?>) id;
+ } else {
+ BucketGroupId<?> bucketId = (BucketGroupId<?>) id;
generator.writeObjectFieldStart(BUCKET_LIMITS);
generator.writeStringField(BUCKET_FROM, getBucketFrom(bucketId));
generator.writeStringField(BUCKET_TO, getBucketTo(bucketId));
@@ -645,40 +595,33 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
}
private static String getIdValue(ValueGroupId<?> id) {
- return (id instanceof RawId ? Arrays.toString(((RawId) id).getValue())
- : id.getValue()).toString();
+ return (id instanceof RawId ? Arrays.toString(((RawId) id).getValue()) : id.getValue()).toString();
}
private static String getBucketFrom(BucketGroupId<?> id) {
- return (id instanceof RawBucketId ? Arrays.toString(((RawBucketId) id)
- .getFrom()) : id.getFrom()).toString();
+ return (id instanceof RawBucketId ? Arrays.toString(((RawBucketId) id).getFrom()) : id.getFrom()).toString();
}
private static String getBucketTo(BucketGroupId<?> id) {
- return (id instanceof RawBucketId ? Arrays.toString(((RawBucketId) id)
- .getTo()) : id.getTo()).toString();
+ return (id instanceof RawBucketId ? Arrays.toString(((RawBucketId) id).getTo()) : id.getTo()).toString();
}
- private boolean renderTotalHitCount(Hit hit, boolean hasFieldsField)
- throws JsonGenerationException, IOException {
- if (getRecursionLevel() == 1 && hit instanceof HitGroup) {
- fieldsStart(hasFieldsField);
- generator.writeNumberField(TOTAL_COUNT, getResult()
- .getTotalHitCount());
- return true;
- } else {
- return false;
- }
+ private boolean renderTotalHitCount(Hit hit, boolean hasFieldsField) throws IOException {
+ if ( ! (getRecursionLevel() == 1 && hit instanceof HitGroup)) return false;
+
+ fieldsStart(hasFieldsField);
+ generator.writeNumberField(TOTAL_COUNT, getResult().getTotalHitCount());
+ return true;
}
- private void renderField(String fieldName, Hit hit) throws JsonGenerationException, IOException {
+ private void renderField(String fieldName, Hit hit) throws IOException {
generator.writeFieldName(fieldName);
- if (!tryDirectRendering(fieldName, hit)) {
+ if ( ! tryDirectRendering(fieldName, hit)) {
renderFieldContents(hit.getField(fieldName));
}
}
- private void renderFieldContents(Object field) throws JsonGenerationException, IOException {
+ private void renderFieldContents(Object field) throws IOException {
if (field == null) {
generator.writeNull();
} else if (field instanceof Number) {
@@ -711,7 +654,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
}
}
- private void renderNumberField(Number field) throws JsonGenerationException, IOException {
+ private void renderNumberField(Number field) throws IOException {
if (field instanceof Integer) {
generator.writeNumber(field.intValue());
} else if (field instanceof Float) {
@@ -734,8 +677,7 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
/**
* Really a private method, but package access for testability.
*/
- boolean tryDirectRendering(String fieldName, Hit hit)
- throws IOException, JsonGenerationException {
+ boolean tryDirectRendering(String fieldName, Hit hit) throws IOException {
boolean renderedAsUtf8 = false;
if (hit instanceof FastHit) {
FastHit f = (FastHit) hit;
@@ -755,8 +697,8 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
@Override
public void data(Data data) throws IOException {
Preconditions.checkArgument(data instanceof Hit,
- "Expected subclass of com.yahoo.search.result.Hit, got %s.",
- data.getClass());
+ "Expected subclass of com.yahoo.search.result.Hit, got %s.",
+ data.getClass());
renderHit((Hit) data);
}
@@ -785,8 +727,8 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
private Result getResult() {
Response r = getResponse();
Preconditions.checkArgument(r instanceof Result,
- "JsonRenderer can only render instances of com.yahoo.search.Result, got instance of %s.",
- r.getClass());
+ "JsonRenderer can only render instances of com.yahoo.search.Result, got instance of %s.",
+ r.getClass());
return (Result) r;
}
@@ -841,4 +783,5 @@ public class JsonRenderer extends AsynchronousSectionedRenderer<Result> {
void setTimeSource(LongSupplier timeSource) {
this.timeSource = timeSource;
}
+
}
diff --git a/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java b/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java
index 3917d353630..e27893a2b20 100644
--- a/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java
+++ b/container-search/src/main/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcher.java
@@ -219,12 +219,8 @@ public class VdsStreamingSearcher extends VespaBackEndSearcher {
private FastHit buildSummaryHit(Query query, SearchResult.Hit hit) {
FastHit fastHit = new FastHit();
fastHit.setQuery(query);
- fastHit.setSource("VdsStreamingSearcher");
+ fastHit.setSource(getName());
fastHit.setId(hit.getDocId());
- // TODO: remove seField("uri", ...), just a helper for Velocity templates
- fastHit.setField("uri", hit.getDocId());
- fastHit.types().add("summary");
-
fastHit.setRelevance(new Relevance(hit.getRank()));
fastHit.setFillable();
diff --git a/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcherTestCase.java b/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcherTestCase.java
index 1931dd2179e..e3797b1f63e 100644
--- a/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcherTestCase.java
+++ b/container-search/src/test/java/com/yahoo/vespa/streamingvisitors/VdsStreamingSearcherTestCase.java
@@ -175,7 +175,7 @@ public class VdsStreamingSearcherTestCase {
for (int i=0; i<result.hits().size(); ++i) {
Hit hit = result.hits().get(i);
if (idPrefix != null) {
- assertEquals("VdsStreamingSearcher", hit.getSource());
+ assertEquals("clusterName", hit.getSource());
assertEquals(idPrefix + i, hit.getId().toString());
} else {
assertNull(hit.getSource());
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java
index bed10d0a242..7960abc8b64 100644
--- a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/DockerImpl.java
@@ -21,6 +21,9 @@ import com.yahoo.collections.Pair;
import com.yahoo.log.LogLevel;
import com.yahoo.system.ProcessExecuter;
import com.yahoo.vespa.defaults.Defaults;
+import com.yahoo.vespa.hosted.dockerapi.metrics.CounterWrapper;
+import com.yahoo.vespa.hosted.dockerapi.metrics.GaugeWrapper;
+import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import javax.annotation.concurrent.GuardedBy;
import java.io.ByteArrayOutputStream;
@@ -66,12 +69,15 @@ public class DockerImpl implements Docker {
final DockerClient dockerClient;
+ private GaugeWrapper numberOfRunningContainersGauge;
+ private CounterWrapper numberOfDockerDaemonFails;
+
DockerImpl(final DockerClient dockerClient) {
this.dockerClient = dockerClient;
}
@Inject
- public DockerImpl(final DockerConfig config) {
+ public DockerImpl(final DockerConfig config, MetricReceiverWrapper metricReceiver) {
JerseyDockerCmdExecFactory dockerFactory = new JerseyDockerCmdExecFactory()
.withMaxPerRouteConnections(DOCKER_MAX_PER_ROUTE_CONNECTIONS)
.withMaxTotalConnections(DOCKER_MAX_TOTAL_CONNECTIONS)
@@ -80,9 +86,9 @@ public class DockerImpl implements Docker {
RemoteApiVersion remoteApiVersion;
try {
- remoteApiVersion = RemoteApiVersion.parseConfig(DockerClientImpl.getInstance(
- buildDockerClientConfig(config).build())
- .withDockerCmdExecFactory(dockerFactory).versionCmd().exec().getApiVersion());
+ remoteApiVersion = RemoteApiVersion.parseConfig(DockerClientImpl.getInstance(
+ buildDockerClientConfig(config).build())
+ .withDockerCmdExecFactory(dockerFactory).versionCmd().exec().getApiVersion());
logger.info("Found version of remote docker API: "+ remoteApiVersion);
// From version 1.24 a field was removed which causes trouble with the current docker java code.
// When this is fixed, we can remove this and do not specify version.
@@ -97,8 +103,8 @@ public class DockerImpl implements Docker {
this.dockerClient = DockerClientImpl.getInstance(
buildDockerClientConfig(config)
- .withApiVersion(remoteApiVersion)
- .build())
+ .withApiVersion(remoteApiVersion)
+ .build())
.withDockerCmdExecFactory(dockerFactory);
try {
@@ -106,6 +112,12 @@ public class DockerImpl implements Docker {
} catch (Exception e) {
throw new RuntimeException("Could not setup docker network", e);
}
+
+ numberOfRunningContainersGauge = metricReceiver.declareGauge("containers.running");
+ numberOfDockerDaemonFails = metricReceiver.declareCounter("daemon.api_fails");
+
+ // Some containers could already be running, count them and intialize to that value
+ numberOfRunningContainersGauge.sample(getAllManagedContainers().size());
}
static DefaultDockerClientConfig.Builder buildDockerClientConfig(DockerConfig config) {
@@ -214,6 +226,7 @@ public class DockerImpl implements Docker {
flatMap(image -> Arrays.stream(image.getRepoTags())).
anyMatch(tag -> tag.equals(dockerImage.asString()));
} catch (DockerException e) {
+ numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to list image name: '" + dockerImage + "'", e);
}
}
@@ -226,9 +239,14 @@ public class DockerImpl implements Docker {
@Override
public void connectContainerToNetwork(ContainerName containerName, String networkName) {
- dockerClient.connectToNetworkCmd()
- .withContainerId(containerName.asString())
- .withNetworkId(networkName).exec();
+ try {
+ dockerClient.connectToNetworkCmd()
+ .withContainerId(containerName.asString())
+ .withNetworkId(networkName).exec();
+ } catch (DockerException e) {
+ numberOfDockerDaemonFails.add();
+ throw new RuntimeException("Failed to connect container to network", e);
+ }
}
@Override
@@ -253,6 +271,7 @@ public class DockerImpl implements Docker {
return new ProcessResult(exitCode, new String(output.toByteArray()), new String(errors.toByteArray()));
} catch (DockerException | InterruptedException e) {
+ numberOfDockerDaemonFails.add();
throw new RuntimeException("Container " + containerName.asString()
+ " failed to execute " + Arrays.toString(args), e);
}
@@ -260,8 +279,13 @@ public class DockerImpl implements Docker {
@Override
public ContainerInfo inspectContainer(ContainerName containerName) {
- InspectContainerResponse containerInfo = dockerClient.inspectContainerCmd(containerName.asString()).exec();
- return new ContainerInfoImpl(containerName, containerInfo);
+ try {
+ InspectContainerResponse containerInfo = dockerClient.inspectContainerCmd(containerName.asString()).exec();
+ return new ContainerInfoImpl(containerName, containerInfo);
+ } catch (DockerException e) {
+ numberOfDockerDaemonFails.add();
+ throw new RuntimeException("Failed to get container info", e);
+ }
}
@Override
@@ -270,7 +294,9 @@ public class DockerImpl implements Docker {
if (dockerContainer.isPresent()) {
try {
dockerClient.startContainerCmd(dockerContainer.get().getId()).exec();
+ numberOfRunningContainersGauge.sample(getAllManagedContainers().size());
} catch (DockerException e) {
+ numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to start container", e);
}
}
@@ -284,6 +310,7 @@ public class DockerImpl implements Docker {
try {
dockerClient.stopContainerCmd(dockerContainer.get().getId()).withTimeout(SECONDS_TO_WAIT_BEFORE_KILLING).exec();
} catch (DockerException e) {
+ numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to stop container", e);
}
}
@@ -295,7 +322,9 @@ public class DockerImpl implements Docker {
if (dockerContainer.isPresent()) {
try {
dockerClient.removeContainerCmd(dockerContainer.get().getId()).exec();
+ numberOfRunningContainersGauge.sample(getAllManagedContainers().size());
} catch (DockerException e) {
+ numberOfDockerDaemonFails.add();
throw new RuntimeException("Failed to delete container", e);
}
}
@@ -309,6 +338,7 @@ public class DockerImpl implements Docker {
.flatMap(this::asContainer)
.collect(Collectors.toList());
} catch (DockerException e) {
+ numberOfDockerDaemonFails.add();
throw new RuntimeException("Could not retrieve all container", e);
}
}
@@ -330,6 +360,7 @@ public class DockerImpl implements Docker {
new ContainerName(decode(response.getName())),
response.getState().getRunning()));
} catch (DockerException e) {
+ numberOfDockerDaemonFails.add();
//TODO: do proper exception handling
throw new RuntimeException("Failed talking to docker daemon", e);
}
@@ -367,7 +398,12 @@ public class DockerImpl implements Docker {
@Override
public void deleteImage(final DockerImage dockerImage) {
- dockerClient.removeImageCmd(dockerImage.asString()).exec();
+ try {
+ dockerClient.removeImageCmd(dockerImage.asString()).exec();
+ } catch (DockerException e) {
+ numberOfDockerDaemonFails.add();
+ throw new RuntimeException("Failed to delete docker image " + dockerImage.asString(), e);
+ }
}
private Map<String, Image> filterOutImagesUsedByContainers(
@@ -449,11 +485,7 @@ public class DockerImpl implements Docker {
@Override
public void deleteUnusedDockerImages(Set<DockerImage> except) {
- try {
- getUnusedDockerImages(except).stream().forEach(this::deleteImage);
- } catch (DockerException e) {
- throw new RuntimeException("Unexpected exception", e);
- }
+ getUnusedDockerImages(except).stream().forEach(this::deleteImage);
}
private class ImagePullCallback extends PullImageResultCallback {
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/CounterWrapper.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/CounterWrapper.java
new file mode 100644
index 00000000000..f6b398e83a2
--- /dev/null
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/CounterWrapper.java
@@ -0,0 +1,39 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.dockerapi.metrics;
+
+import com.yahoo.metrics.simple.Counter;
+
+/**
+ * Forwards sample to {@link com.yahoo.metrics.simple.Counter} to be displayed in /state/v1/metrics,
+ * while also saving the value so it can be accessed programatically later.
+ *
+ * @author valerijf
+ */
+public class CounterWrapper implements MetricValue {
+ private final Object lock = new Object();
+
+ private final Counter counter;
+ private long value = 0;
+
+ CounterWrapper(Counter counter) {
+ this.counter = counter;
+ }
+
+ public void add() {
+ add(1L);
+ }
+
+ public void add(long n) {
+ synchronized (lock) {
+ counter.add(n);
+ value += n;
+ }
+ }
+
+ @Override
+ public Number getValue() {
+ synchronized (lock) {
+ return value;
+ }
+ }
+} \ No newline at end of file
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/GaugeWrapper.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/GaugeWrapper.java
new file mode 100644
index 00000000000..db0670c2f87
--- /dev/null
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/GaugeWrapper.java
@@ -0,0 +1,35 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.dockerapi.metrics;
+
+import com.yahoo.metrics.simple.Gauge;
+
+/**
+ * Forwards sample to {@link com.yahoo.metrics.simple.Gauge} to be displayed in /state/v1/metrics,
+ * while also saving the value so it can be accessed programatically later.
+ *
+ * @author valerijf
+ */
+public class GaugeWrapper implements MetricValue {
+ private final Object lock = new Object();
+
+ private final Gauge gauge;
+ private double value;
+
+ GaugeWrapper(Gauge gauge) {
+ this.gauge = gauge;
+ }
+
+ public void sample(double x) {
+ synchronized (lock) {
+ gauge.sample(x);
+ this.value = x;
+ }
+ }
+
+ @Override
+ public Number getValue() {
+ synchronized (lock) {
+ return value;
+ }
+ }
+}
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapper.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapper.java
new file mode 100644
index 00000000000..c4a64845062
--- /dev/null
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapper.java
@@ -0,0 +1,41 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.dockerapi.metrics;
+
+import com.google.inject.Inject;
+import com.yahoo.metrics.simple.MetricReceiver;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.stream.Collectors;
+
+/**
+ * Export metrics to both /state/v1/metrics and makes them available programatically.
+ *
+ * @author valerijf
+ */
+public class MetricReceiverWrapper {
+ private final Map<String, MetricValue> metrics = new ConcurrentHashMap<>();
+ private final MetricReceiver metricReceiver;
+
+ @Inject
+ public MetricReceiverWrapper(MetricReceiver metricReceiver) {
+ this.metricReceiver = metricReceiver;
+ }
+
+ public CounterWrapper declareCounter(String name) {
+ CounterWrapper counter = new CounterWrapper(metricReceiver.declareCounter(name));
+ metrics.put(name, counter);
+ return counter;
+ }
+
+ public GaugeWrapper declareGauge(String name) {
+ GaugeWrapper gauge = new GaugeWrapper(metricReceiver.declareGauge(name));
+ metrics.put(name, gauge);
+ return gauge;
+ }
+
+ public Map<String, Number> getLatestMetrics() {
+ return metrics.entrySet().stream().collect(Collectors.toMap(
+ Map.Entry::getKey, entry -> entry.getValue().getValue()));
+ }
+}
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricValue.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricValue.java
new file mode 100644
index 00000000000..f9e04694cb5
--- /dev/null
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricValue.java
@@ -0,0 +1,9 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.dockerapi.metrics;
+
+/**
+ * @author valerijf
+ */
+public interface MetricValue {
+ Number getValue();
+}
diff --git a/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/package-info.java b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/package-info.java
new file mode 100644
index 00000000000..d7818e3b8ee
--- /dev/null
+++ b/docker-api/src/main/java/com/yahoo/vespa/hosted/dockerapi/metrics/package-info.java
@@ -0,0 +1,5 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+@ExportPackage
+package com.yahoo.vespa.hosted.dockerapi.metrics;
+
+import com.yahoo.osgi.annotation.ExportPackage;
diff --git a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerTest.java b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerTest.java
index 093a2f88a8e..6c5d6b1f3bb 100644
--- a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerTest.java
+++ b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/DockerTest.java
@@ -3,6 +3,8 @@ package com.yahoo.vespa.hosted.dockerapi;
import com.github.dockerjava.api.model.Network;
import com.github.dockerjava.core.command.BuildImageResultCallback;
+import com.yahoo.metrics.simple.MetricReceiver;
+import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import org.junit.After;
import org.junit.Before;
import org.junit.Ignore;
@@ -42,7 +44,7 @@ public class DockerTest {
.clientKeyPath("")
.uri("unix:///var/run/docker.sock"));
- private static final DockerImpl docker = new DockerImpl(dockerConfig);
+ private static final DockerImpl docker = new DockerImpl(dockerConfig, new MetricReceiverWrapper(MetricReceiver.nullImplementation));
private static final DockerImage dockerImage = new DockerImage("simple-ipv6-server:Dockerfile");
diff --git a/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapperTest.java b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapperTest.java
new file mode 100644
index 00000000000..ef74580479b
--- /dev/null
+++ b/docker-api/src/test/java/com/yahoo/vespa/hosted/dockerapi/metrics/MetricReceiverWrapperTest.java
@@ -0,0 +1,49 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.dockerapi.metrics;
+
+import com.yahoo.metrics.simple.MetricReceiver;
+import org.junit.Test;
+
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * @author valerijf
+ */
+public class MetricReceiverWrapperTest {
+ @Test
+ public void testDefaultValue() {
+ MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
+ metricReceiver.declareCounter("some.name");
+
+ assertEquals(metricReceiver.getLatestMetrics().get("some.name"), 0L);
+ }
+
+ @Test
+ public void testSimpleIncrementMetric() {
+ MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
+ CounterWrapper counter = metricReceiver.declareCounter("a_counter.value");
+
+ counter.add(5);
+ counter.add(8);
+
+ Map<String, Number> latestMetrics = metricReceiver.getLatestMetrics();
+ assertTrue("Expected only 1 metric value to be set", latestMetrics.size() == 1);
+ assertEquals(latestMetrics.get("a_counter.value"), 13L); // 5 + 8
+ }
+
+ @Test
+ public void testSimpleGauge() {
+ MetricReceiverWrapper metricReceiver = new MetricReceiverWrapper(MetricReceiver.nullImplementation);
+ GaugeWrapper gauge = metricReceiver.declareGauge("test.gauge");
+
+ gauge.sample(42);
+ gauge.sample(-342.23);
+
+ Map<String, Number> latestMetrics = metricReceiver.getLatestMetrics();
+ assertTrue("Expected only 1 metric value to be set", latestMetrics.size() == 1);
+ assertEquals(latestMetrics.get("test.gauge"), -342.23);
+ }
+}
diff --git a/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java b/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java
index a575fbfba2a..9d7139e6226 100644
--- a/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java
+++ b/document/src/main/java/com/yahoo/document/DocumentTypeManagerConfigurer.java
@@ -11,7 +11,7 @@ import java.util.ArrayList;
import java.util.logging.Logger;
/**
- * Configures the Vepsa document manager from a document id.
+ * Configures the Vespa document manager from a config id.
*
* @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
*/
diff --git a/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer42.java b/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer42.java
index 49e61f64e3d..9e764aae798 100644
--- a/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer42.java
+++ b/document/src/main/java/com/yahoo/document/serialization/VespaDocumentDeserializer42.java
@@ -78,8 +78,8 @@ import static com.yahoo.text.Utf8.calculateStringPositions;
* @deprecated Please use {@link com.yahoo.document.serialization.VespaDocumentDeserializerHead} instead for new code.
* @author baldersheim
*/
-@Deprecated // OK: Don't remove on Vespa 6: Mail may have documents on this format still
-// When removing: Move content of this class into VespaDocumentDeserializerHead (and subclass VespaDocumentSerializerHead in that)
+@Deprecated // Remove on Vespa 7
+// When removing: Move content of this class into VespaDocumentDeserializerHead
public class VespaDocumentDeserializer42 extends VespaDocumentSerializer42 implements DocumentDeserializer {
private final Compressor compressor = new Compressor();
@@ -597,8 +597,8 @@ public class VespaDocumentDeserializer42 extends VespaDocumentSerializer42 imple
DocumentType docType = manager.getDocumentType(new DataTypeName(docTypeName));
if (docType == null) {
- throw new DeserializationException(
- "No known document type with name " + new Utf8String(docTypeName).toString());
+ throw new DeserializationException("No known document type with name " +
+ new Utf8String(docTypeName).toString());
}
return docType;
}
diff --git a/document/src/test/java/com/yahoo/document/json/DocumentUpdateJsonSerializerTest.java b/document/src/test/java/com/yahoo/document/json/DocumentUpdateJsonSerializerTest.java
index 9a44ebc96d4..de483186d6c 100644
--- a/document/src/test/java/com/yahoo/document/json/DocumentUpdateJsonSerializerTest.java
+++ b/document/src/test/java/com/yahoo/document/json/DocumentUpdateJsonSerializerTest.java
@@ -26,6 +26,7 @@ import static com.yahoo.test.json.JsonTestHelper.inputJson;
* @author Vegard Sjonfjell
*/
public class DocumentUpdateJsonSerializerTest {
+
final static DocumentTypeManager types = new DocumentTypeManager();
final static JsonFactory parserFactory = new JsonFactory();
final static DocumentType docType = new DocumentType("doctype");
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccess.java b/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccess.java
index 0d781e4ca95..bad692f0a0d 100644
--- a/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccess.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccess.java
@@ -41,13 +41,13 @@ import com.yahoo.config.subscription.ConfigSubscriber;
* <p>Access to this class is thread-safe.</p>
*
* @author bratseth
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar Rosenvinge</a>
- * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen</a>
+ * @author Einar Rosenvinge
+ * @author Simon Thoresen
*/
public abstract class DocumentAccess {
- protected DocumentTypeManager documentMgr;
- protected ConfigSubscriber documentTypeManagerConfig;
+ private final DocumentTypeManager documentTypeManager;
+ private final ConfigSubscriber documentTypeConfigSubscriber;
/**
* <p>This is a convenience method to return a document access object with
@@ -69,8 +69,14 @@ public abstract class DocumentAccess {
*/
protected DocumentAccess(DocumentAccessParams params) {
super();
- documentMgr = new DocumentTypeManager();
- documentTypeManagerConfig = DocumentTypeManagerConfigurer.configure(documentMgr, params.getDocumentManagerConfigId());
+ if (params.documentmanagerConfig().isPresent()) { // our config has been injected into the creator
+ documentTypeManager = new DocumentTypeManager(params.documentmanagerConfig().get());
+ documentTypeConfigSubscriber = null;
+ }
+ else { // fallback to old style subscription
+ documentTypeManager = new DocumentTypeManager();
+ documentTypeConfigSubscriber = DocumentTypeManagerConfigurer.configure(documentTypeManager, params.getDocumentManagerConfigId());
+ }
}
/**
@@ -154,11 +160,15 @@ public abstract class DocumentAccess {
public abstract SubscriptionSession openSubscription(SubscriptionParameters parameters);
/**
- * <p>Shuts down the underlying sessions used by this DocumentAccess;
+ * Shuts down the underlying sessions used by this DocumentAccess;
* subsequent use of this DocumentAccess will throw unspecified exceptions,
- * depending on implementation.</p>
+ * depending on implementation.
+ * Classes overriding this must call super.shutdown().
*/
- public abstract void shutdown();
+ public void shutdown() {
+ if (documentTypeConfigSubscriber != null)
+ documentTypeConfigSubscriber.close();
+ }
/**
* <p>Returns the {@link DocumentTypeManager} used by this
@@ -167,6 +177,6 @@ public abstract class DocumentAccess {
* @return The document type manager.
*/
public DocumentTypeManager getDocumentTypeManager() {
- return documentMgr;
+ return documentTypeManager;
}
}
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccessParams.java b/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccessParams.java
index 57cfdbd32e1..701fafbab06 100755
--- a/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccessParams.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/DocumentAccessParams.java
@@ -1,6 +1,10 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.documentapi;
+import com.yahoo.document.config.DocumentmanagerConfig;
+
+import java.util.Optional;
+
/**
* Superclass of the classes which contains the parameters for creating or opening a document access.
*
@@ -8,26 +12,27 @@ package com.yahoo.documentapi;
*/
public class DocumentAccessParams {
- // The id to resolve to document manager config.
+ /** The id to resolve to document manager config. Not needed if the config is passed here */
private String documentManagerConfigId = "client";
- /**
- * Returns the config id that the document manager should subscribe to.
- *
- * @return The config id.
- */
- public String getDocumentManagerConfigId() {
- return documentManagerConfigId;
- }
+ /** The document manager config, or empty if not provided (in which case a subscription must be created) */
+ private Optional<DocumentmanagerConfig> documentmanagerConfig = Optional.empty();
+
+ /** Returns the config id that the document manager should subscribe to. */
+ public String getDocumentManagerConfigId() { return documentManagerConfigId; }
+
+ /** Returns the document manager config to use, or empty if it it necessary to subscribe to get it */
+ public Optional<DocumentmanagerConfig> documentmanagerConfig() { return documentmanagerConfig; }
- /**
- * Sets the config id that the document manager should subscribe to.
- *
- * @param configId The config id.
- * @return This, to allow chaining.
- */
+ /** Sets the config id that the document manager should subscribe to. */
public DocumentAccessParams setDocumentManagerConfigId(String configId) {
documentManagerConfigId = configId;
return this;
}
+
+ public DocumentAccessParams setDocumentmanagerConfig(DocumentmanagerConfig documentmanagerConfig) {
+ this.documentmanagerConfig = Optional.of(documentmanagerConfig);
+ return this;
+ }
+
} \ No newline at end of file
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/local/LocalDocumentAccess.java b/documentapi/src/main/java/com/yahoo/documentapi/local/LocalDocumentAccess.java
index edcefe9447d..ab1b5e7cdd6 100644
--- a/documentapi/src/main/java/com/yahoo/documentapi/local/LocalDocumentAccess.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/local/LocalDocumentAccess.java
@@ -22,13 +22,6 @@ public class LocalDocumentAccess extends DocumentAccess {
}
@Override
- public void shutdown() {
- if (documentTypeManagerConfig != null) {
- documentTypeManagerConfig.close();
- }
- }
-
- @Override
public SyncSession createSyncSession(SyncParameters parameters) {
return new LocalSyncSession(this);
}
@@ -57,4 +50,5 @@ public class LocalDocumentAccess extends DocumentAccess {
public SubscriptionSession openSubscription(SubscriptionParameters parameters) {
throw new UnsupportedOperationException("Not supported yet");
}
+
}
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusDocumentAccess.java b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusDocumentAccess.java
index 818bc204784..0a57a700276 100644
--- a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusDocumentAccess.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/MessageBusDocumentAccess.java
@@ -7,8 +7,11 @@ import com.yahoo.document.select.parser.ParseException;
import com.yahoo.documentapi.*;
import com.yahoo.documentapi.messagebus.protocol.DocumentProtocol;
import com.yahoo.messagebus.MessageBus;
+import com.yahoo.messagebus.NetworkMessageBus;
import com.yahoo.messagebus.RPCMessageBus;
import com.yahoo.messagebus.network.Network;
+import com.yahoo.messagebus.network.local.LocalNetwork;
+import com.yahoo.messagebus.network.local.LocalWire;
import com.yahoo.messagebus.routing.RoutingTable;
import java.util.concurrent.Executors;
@@ -17,16 +20,18 @@ import java.util.concurrent.ScheduledExecutorService;
/**
* This class implements the {@link DocumentAccess} interface using message bus for communication.
*
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar Rosenvinge</a>
+ * @author Einar Rosenvinge
* @author bratseth
*/
public class MessageBusDocumentAccess extends DocumentAccess {
- private final RPCMessageBus bus;
+ private final NetworkMessageBus bus;
+
private final MessageBusParams params;
// TODO: make pool size configurable? ScheduledExecutorService is not dynamic
- private final ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(
- Runtime.getRuntime().availableProcessors(), ThreadFactoryFactory.getDaemonThreadFactory("mbus.access.scheduler"));
+ private final ScheduledExecutorService scheduledExecutorService =
+ Executors.newScheduledThreadPool(Runtime.getRuntime().availableProcessors(),
+ ThreadFactoryFactory.getDaemonThreadFactory("mbus.access.scheduler"));
/**
* Creates a new document access using default values for all parameters.
@@ -46,32 +51,38 @@ public class MessageBusDocumentAccess extends DocumentAccess {
try {
com.yahoo.messagebus.MessageBusParams mbusParams = new com.yahoo.messagebus.MessageBusParams(params.getMessageBusParams());
mbusParams.addProtocol(new DocumentProtocol(getDocumentTypeManager(), params.getProtocolConfigId(), params.getLoadTypes()));
- bus = new RPCMessageBus(mbusParams,
- params.getRPCNetworkParams(),
- params.getRoutingConfigId());
+ if (System.getProperty("vespa.local", "false").equals("true")) { // set by Application when running locally
+ LocalNetwork network = new LocalNetwork();
+ bus = new NetworkMessageBus(network, new MessageBus(network, mbusParams));
+ }
+ else {
+ bus = new RPCMessageBus(mbusParams, params.getRPCNetworkParams(), params.getRoutingConfigId());
+ }
}
catch (Exception e) {
throw new DocumentAccessException(e);
}
}
+
+ private MessageBus messageBus() {
+ return bus.getMessageBus();
+ }
@Override
public void shutdown() {
+ super.shutdown();
bus.destroy();
- if (documentTypeManagerConfig != null) {
- documentTypeManagerConfig.close();
- }
scheduledExecutorService.shutdownNow();
}
@Override
public MessageBusSyncSession createSyncSession(SyncParameters parameters) {
- return new MessageBusSyncSession(parameters, bus.getMessageBus(), this.params);
+ return new MessageBusSyncSession(parameters, messageBus(), this.params);
}
@Override
public MessageBusAsyncSession createAsyncSession(AsyncParameters parameters) {
- return new MessageBusAsyncSession(parameters, bus.getMessageBus(), this.params);
+ return new MessageBusAsyncSession(parameters, messageBus(), this.params);
}
@Override
@@ -101,34 +112,20 @@ public class MessageBusDocumentAccess extends DocumentAccess {
throw new UnsupportedOperationException("Subscriptions not supported.");
}
- /**
- * Returns the internal message bus object so that clients can use it directly.
- *
- * @return The internal message bus.
- */
- public MessageBus getMessageBus() {
- return bus.getMessageBus();
- }
+ /** Returns the internal message bus object so that clients can use it directly. */
+ public MessageBus getMessageBus() { return messageBus(); }
/**
* Returns the network layer of the internal message bus object so that clients can use it directly. This may seem
* abit arbitrary, but the fact is that the RPCNetwork actually implements the IMirror API as well as exposing the
* SystemState object.
- *
- * @return The network layer.
*/
- public Network getNetwork() {
- return bus.getRPCNetwork();
- }
+ public Network getNetwork() { return bus.getNetwork(); }
/**
* Returns the parameter object that controls the underlying message bus. Changes to these parameters do not affect
* previously created sessions.
- *
- * @return The parameter object.
*/
- public MessageBusParams getParams() {
- return params;
- }
+ public MessageBusParams getParams() { return params; }
}
diff --git a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/loadtypes/LoadTypeSet.java b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/loadtypes/LoadTypeSet.java
index cb453559ab1..b9129bf3b85 100644
--- a/documentapi/src/main/java/com/yahoo/documentapi/messagebus/loadtypes/LoadTypeSet.java
+++ b/documentapi/src/main/java/com/yahoo/documentapi/messagebus/loadtypes/LoadTypeSet.java
@@ -23,6 +23,7 @@ import java.util.TreeMap;
* load types yourself with addType().
*/
public class LoadTypeSet {
+
class DualMap {
Map<String, LoadType> nameMap = new TreeMap<String, LoadType>();
Map<Integer, LoadType> idMap = new HashMap<Integer, LoadType>();
@@ -49,6 +50,10 @@ public class LoadTypeSet {
configure(new ConfigGetter<>(LoadTypeConfig.class).getConfig(configId));
}
+ public LoadTypeSet(LoadTypeConfig loadTypeConfig) {
+ configure(loadTypeConfig);
+ }
+
public Map<String, LoadType> getNameMap() {
return map.nameMap;
}
diff --git a/fastlib/src/vespa/fastlib/io/bufferedinputstream.cpp b/fastlib/src/vespa/fastlib/io/bufferedinputstream.cpp
index 9b761d2e272..7b1da7b3293 100644
--- a/fastlib/src/vespa/fastlib/io/bufferedinputstream.cpp
+++ b/fastlib/src/vespa/fastlib/io/bufferedinputstream.cpp
@@ -1,24 +1,8 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-//************************************************************************
-/**
- * Implmentation of Fast_BufferedInputStream
- *
- * @author Markus Bjartveit Kr�ger
- * @version $Id$
- */
- /*
- * Creation date : 2001-10-29
- * Copyright (c) : 1997-2002 Fast Search & Transfer ASA
- * ALL RIGHTS RESERVED
- *************************************************************************/
#include <vespa/fastos/fastos.h>
#include "bufferedinputstream.h"
-
-
-
-Fast_BufferedInputStream::Fast_BufferedInputStream(Fast_InputStream &in,
- size_t bufferSize)
+Fast_BufferedInputStream::Fast_BufferedInputStream(Fast_InputStream &in, size_t bufferSize)
: Fast_FilterInputStream(in),
_buffer(new char[bufferSize]),
_bufferSize((_buffer != NULL) ? bufferSize : 0),
@@ -28,89 +12,70 @@ Fast_BufferedInputStream::Fast_BufferedInputStream(Fast_InputStream &in,
{
}
-
-
-Fast_BufferedInputStream::~Fast_BufferedInputStream(void)
+Fast_BufferedInputStream::~Fast_BufferedInputStream()
{
delete [] _buffer;
-};
-
-
+}
-ssize_t Fast_BufferedInputStream::Available(void)
+ssize_t
+Fast_BufferedInputStream::Available()
{
return _in->Available() + _bufferUsed - _bufferRead;
}
-
-
-bool Fast_BufferedInputStream::Close(void)
+bool
+Fast_BufferedInputStream::Close()
{
return _in->Close();
}
-
-
-ssize_t Fast_BufferedInputStream::Skip(size_t skipNBytes)
+ssize_t
+Fast_BufferedInputStream::Skip(size_t skipNBytes)
{
ssize_t numBytesSkipped = 0;
- if (_nextWillFail)
- {
+ if (_nextWillFail) {
_nextWillFail = false;
return -1;
}
- if (skipNBytes > _bufferUsed - _bufferRead)
- {
+ if (skipNBytes > _bufferUsed - _bufferRead) {
// First, skip all bytes in buffer
numBytesSkipped = _bufferUsed - _bufferRead;
_bufferUsed = _bufferRead = 0;
// Skip rest of bytes in slave stream
ssize_t slaveSkipped = _in->Skip(skipNBytes - numBytesSkipped);
- if (slaveSkipped < 0)
- {
- if (numBytesSkipped > 0)
- {
+ if (slaveSkipped < 0) {
+ if (numBytesSkipped > 0) {
_nextWillFail = true;
- }
- else
- {
+ } else {
numBytesSkipped = slaveSkipped;
}
- }
- else
- {
+ } else {
numBytesSkipped += slaveSkipped;
}
- }
- else
- {
+ } else {
// Skip all skipNBytes in buffer
_bufferRead += skipNBytes;
- if (_bufferRead == _bufferUsed)
- {
+ if (_bufferRead == _bufferUsed) {
_bufferUsed = _bufferRead = 0;
}
numBytesSkipped = skipNBytes;
}
-
return numBytesSkipped;
}
-
-
-ssize_t Fast_BufferedInputStream::Read(void *targetBuffer, size_t length)
+ssize_t
+Fast_BufferedInputStream::Read(void *targetBuffer, size_t length)
{
// This function will under no circumstance read more than once from
// its slave stream, in order to prevent blocking on input.
- if (_nextWillFail)
- {
+ if (_nextWillFail) {
_nextWillFail = false;
return -1;
}
@@ -119,22 +84,17 @@ ssize_t Fast_BufferedInputStream::Read(void *targetBuffer, size_t length)
char* to = static_cast<char*>(targetBuffer);
size_t bufferRemain = _bufferUsed - _bufferRead;
- if (length <= bufferRemain)
- {
+ if (length <= bufferRemain) {
memcpy(to, &_buffer[_bufferRead], length);
numBytesRead += length;
_bufferRead += length;
- if (_bufferRead == _bufferUsed)
- {
+ if (_bufferRead == _bufferUsed) {
_bufferRead = _bufferUsed = 0;
}
- }
- else
- {
+ } else {
// Use the data currently in the buffer, then read from slave stream.
- if (bufferRemain > 0)
- {
+ if (bufferRemain > 0) {
memcpy(to, &_buffer[_bufferRead], bufferRemain);
numBytesRead += bufferRemain;
length -= bufferRemain;
@@ -146,54 +106,37 @@ ssize_t Fast_BufferedInputStream::Read(void *targetBuffer, size_t length)
// If remaining data to be read can fit in the buffer, put it
// there, otherwise read directly to receiver and empty the buffer.
- if (length < _bufferSize)
- {
+ if (length < _bufferSize) {
slaveRead = Fast_FilterInputStream::Read(_buffer, _bufferSize);
- }
- else
- {
+ } else {
slaveRead = Fast_FilterInputStream::Read(to, length);
}
- if (slaveRead > 0)
- {
- if (length < _bufferSize)
- {
+ if (slaveRead > 0) {
+ if (length < _bufferSize) {
// We read to buffer, so copy from buffer to receiver.
- if (length < static_cast<size_t>(slaveRead))
- {
+ if (length < static_cast<size_t>(slaveRead)) {
memcpy(to, _buffer, length);
numBytesRead += length;
_bufferUsed = slaveRead;
_bufferRead = length;
- }
- else
- {
+ } else {
memcpy(to, _buffer, slaveRead);
numBytesRead += slaveRead;
}
- }
- else
- {
+ } else {
// We read directly to receiver, no need to copy.
numBytesRead += slaveRead;
}
- }
- else if (slaveRead == 0)
- {
+ } else if (slaveRead == 0) {
// Do nothing
- }
- else
- {
+ } else {
// slaveRead < 0, so an error occurred while reading from the
// slave. If there was data in the buffer, report success and
// fail on next operation instead.
- if (numBytesRead > 0)
- {
+ if (numBytesRead > 0) {
_nextWillFail = true;
- }
- else
- {
+ } else {
numBytesRead = slaveRead;
}
}
@@ -203,10 +146,8 @@ ssize_t Fast_BufferedInputStream::Read(void *targetBuffer, size_t length)
return numBytesRead;
}
-
-ssize_t Fast_BufferedInputStream::ReadBufferFullUntil(void *targetBuffer,
- size_t maxlength,
- char stopChar)
+ssize_t
+Fast_BufferedInputStream::ReadBufferFullUntil(void *targetBuffer, size_t maxlength, char stopChar)
{
if (maxlength > _bufferSize)
@@ -215,8 +156,7 @@ ssize_t Fast_BufferedInputStream::ReadBufferFullUntil(void *targetBuffer,
// This function will under no circumstance read more than once from
// its slave stream, in order to prevent blocking on input.
- if (_nextWillFail)
- {
+ if (_nextWillFail) {
_nextWillFail = false;
return -1;
}
@@ -239,22 +179,17 @@ ssize_t Fast_BufferedInputStream::ReadBufferFullUntil(void *targetBuffer,
}
}
- if (maxlength <= bufferRemain)
- {
+ if (maxlength <= bufferRemain) {
memcpy(to, &_buffer[_bufferRead], maxlength);
numBytesRead += maxlength;
_bufferRead += maxlength;
- if (_bufferRead == _bufferUsed)
- {
+ if (_bufferRead == _bufferUsed) {
_bufferRead = _bufferUsed = 0;
}
- }
- else
- {
+ } else {
// Use the data currently in the buffer, then read from slave stream.
- if (bufferRemain > 0)
- {
+ if (bufferRemain > 0) {
memcpy(to, &_buffer[_bufferRead], bufferRemain);
numBytesRead += bufferRemain;
maxlength -= bufferRemain;
@@ -265,8 +200,7 @@ ssize_t Fast_BufferedInputStream::ReadBufferFullUntil(void *targetBuffer,
ssize_t slaveRead;
slaveRead = Fast_FilterInputStream::Read(_buffer, _bufferSize);
- if (slaveRead > 0)
- {
+ if (slaveRead > 0) {
for (offset = 0; offset < static_cast<uint32_t>(slaveRead); offset++) {
if(_buffer[offset] == stopChar) {
break;
@@ -276,42 +210,31 @@ ssize_t Fast_BufferedInputStream::ReadBufferFullUntil(void *targetBuffer,
if (offset >= maxlength) {
// Discard data if character was not present
numBytesRead = -1;
- }
- else {
+ } else {
// Found character in buffer
if (offset < static_cast<uint32_t>(slaveRead)) {
maxlength = offset + 1;
}
// We read to buffer, so copy from buffer to receiver.
- if (maxlength < static_cast<size_t>(slaveRead))
- {
+ if (maxlength < static_cast<size_t>(slaveRead)) {
memcpy(to, _buffer, maxlength);
numBytesRead += maxlength;
_bufferUsed = slaveRead;
_bufferRead = maxlength;
- }
- else
- {
+ } else {
memcpy(to, _buffer, slaveRead);
numBytesRead += slaveRead;
}
}
- }
- else if (slaveRead == 0)
- {
+ } else if (slaveRead == 0) {
// Do nothing
- }
- else
- {
+ } else {
// slaveRead < 0, so an error occurred while reading from the
// slave. If there was data in the buffer, report success and
// fail on next operation instead.
- if (numBytesRead > 0)
- {
+ if (numBytesRead > 0) {
_nextWillFail = true;
- }
- else
- {
+ } else {
numBytesRead = slaveRead;
}
}
diff --git a/fastlib/src/vespa/fastlib/io/bufferedinputstream.h b/fastlib/src/vespa/fastlib/io/bufferedinputstream.h
index 183e3494215..b102bd98abc 100644
--- a/fastlib/src/vespa/fastlib/io/bufferedinputstream.h
+++ b/fastlib/src/vespa/fastlib/io/bufferedinputstream.h
@@ -1,61 +1,37 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-//************************************************************************
-/**
- * Class definitions for Fast_BufferedInputStream
- *
- * @author Markus Bjartveit Kr�ger
- * @version $Id$
- */
- /*
- * Creation date : 2001-10-29
- * Copyright (c) : 1997-2002 Fast Search & Transfer ASA
- * ALL RIGHTS RESERVED
- *************************************************************************/
#pragma once
#include <vespa/fastlib/io/filterinputstream.h>
-
-
-
-
class Fast_BufferedInputStream : public Fast_FilterInputStream
{
- private:
-
- // Prevent use of:
- Fast_BufferedInputStream(const Fast_BufferedInputStream &);
- Fast_BufferedInputStream & operator=(const Fast_BufferedInputStream &);
-
-
protected:
-
// Buffer attributes
- char *_buffer;
- size_t _bufferSize;
- size_t _bufferUsed; // Amount of buffer currently holding data
- size_t _bufferRead; // How far buffer has been read
- bool _nextWillFail;
+ char *_buffer;
+ const size_t _bufferSize;
+ size_t _bufferUsed; // Amount of buffer currently holding data
+ size_t _bufferRead; // How far buffer has been read
+ bool _nextWillFail;
public:
+ Fast_BufferedInputStream(const Fast_BufferedInputStream &) = delete;
+ Fast_BufferedInputStream & operator = (const Fast_BufferedInputStream &) = delete;
// Constructor
Fast_BufferedInputStream(Fast_InputStream &in, size_t bufferSize = 1024);
// Destructor
- virtual ~Fast_BufferedInputStream(void);
+ virtual ~Fast_BufferedInputStream();
// Subclassed methods
- virtual ssize_t Available(void);
- virtual bool Close(void);
- virtual ssize_t Skip(size_t skipNBytes);
- virtual ssize_t Read(void *targetBuffer, size_t length);
+ ssize_t Available() override;
+ bool Close() override;
+ ssize_t Skip(size_t skipNBytes) override;
+ ssize_t Read(void *targetBuffer, size_t length) override;
// Additional methods
- ssize_t ReadBufferFullUntil(void *targetBuffer,
- size_t maxlength,
- char stopChar);
+ ssize_t ReadBufferFullUntil(void *targetBuffer, size_t maxlength, char stopChar);
};
diff --git a/fastlib/src/vespa/fastlib/net/httpheaderparser.cpp b/fastlib/src/vespa/fastlib/net/httpheaderparser.cpp
index 22174acfec9..aa56372277c 100644
--- a/fastlib/src/vespa/fastlib/net/httpheaderparser.cpp
+++ b/fastlib/src/vespa/fastlib/net/httpheaderparser.cpp
@@ -3,50 +3,40 @@
#include <vespa/fastlib/io/bufferedinputstream.h>
#include <vespa/fastlib/net/httpheaderparser.h>
-
-
Fast_HTTPHeaderParser::Fast_HTTPHeaderParser(Fast_BufferedInputStream &in)
: _pushBack(0),
_isPushBacked(false),
+ _bufferSize(16384),
+ _lineBuffer(new char[_bufferSize]),
_input(&in)
{
}
-
-
Fast_HTTPHeaderParser::~Fast_HTTPHeaderParser(void)
{
+ delete [] _lineBuffer;
}
-
-
-bool Fast_HTTPHeaderParser::ReadRequestLine(const char *&method,
- const char *&url,
- int &versionMajor,
- int &versionMinor)
+bool
+Fast_HTTPHeaderParser::ReadRequestLine(const char *&method, const char *&url, int &versionMajor, int &versionMinor)
{
// Read a single line from input. Repeat if line is blank, to cope
// with buggy HTTP/1.1 clients that print extra empty lines at the
// end of requests.
- do
- {
- int idx = 0;
- size_t readLen =
- _input->ReadBufferFullUntil(_lineBuffer,
- static_cast<size_t>
- (HTTPHEADERPARSER_LINE_BUFFER_SIZE),
- '\n');
+ do {
+ size_t idx = 0;
+ ssize_t readLen = _input->ReadBufferFullUntil(_lineBuffer, _bufferSize, '\n');
if (readLen <= 0) {
return false;
}
idx = readLen-1;
- if (idx == 0 || _lineBuffer[idx] != '\n')
+ if (idx == 0 || _lineBuffer[idx] != '\n') {
return false;
+ }
_lineBuffer[idx--] = '\0';
- if (_lineBuffer[idx] == '\r')
- {
+ if (_lineBuffer[idx] == '\r') {
_lineBuffer[idx] = '\0';
}
} while (_lineBuffer[0] == '\0');
@@ -58,20 +48,17 @@ bool Fast_HTTPHeaderParser::ReadRequestLine(const char *&method,
method = p;
p = strchr(p, ' ');
- if (p != NULL)
- {
+ if (p != NULL) {
*p++ = '\0';
url = p;
p = strchr(p, ' ');
- if (p != NULL)
- {
+ if (p != NULL) {
*p++ = '\0';
version = p;
}
}
- if (sscanf(version, "HTTP/%d.%d", &versionMajor, &versionMinor) != 2)
- {
+ if (sscanf(version, "HTTP/%d.%d", &versionMajor, &versionMinor) != 2) {
versionMajor = versionMinor = -1;
return false;
}
@@ -79,37 +66,31 @@ bool Fast_HTTPHeaderParser::ReadRequestLine(const char *&method,
return true;
}
-bool Fast_HTTPHeaderParser::ReadHeader(const char *&name, const char *&value)
+bool
+Fast_HTTPHeaderParser::ReadHeader(const char *&name, const char *&value)
{
- int idx = 0;
+ size_t idx = 0;
name = NULL;
value = NULL;
- if (_isPushBacked)
- {
+ if (_isPushBacked) {
idx = 0;
_lineBuffer[idx] = _pushBack;
_isPushBacked = false;
idx++;
}
- while (idx<HTTPHEADERPARSER_LINE_BUFFER_SIZE-1)
- {
-
- size_t readLen =
- _input->ReadBufferFullUntil(&_lineBuffer[idx],
- static_cast<size_t>
- (HTTPHEADERPARSER_LINE_BUFFER_SIZE),
- '\n');
+ constexpr size_t ROOM_FOR_PUSH_BACK = 1u;
+ while ((idx + ROOM_FOR_PUSH_BACK) < _bufferSize) {
+ ssize_t readLen = _input->ReadBufferFullUntil(&_lineBuffer[idx], _bufferSize - idx - ROOM_FOR_PUSH_BACK, '\n');
if (readLen <= 0) {
return false;
}
idx += readLen - 1;
// Empty line == end of headers.
// handle case with \r\n as \n
- if (idx == 0 || (_lineBuffer[0] == '\r' && idx == 1))
- {
+ if (idx == 0 || (_lineBuffer[0] == '\r' && idx == 1)) {
idx = 0;
break;
}
@@ -123,52 +104,44 @@ bool Fast_HTTPHeaderParser::ReadHeader(const char *&name, const char *&value)
}
// Check if header continues on next line.
- if (_input->Read(&_pushBack, 1) != 1)
+ if (_input->Read(&_pushBack, 1) != 1) {
break;
- if (_pushBack == ' ' || _pushBack == '\t')
- {
+ }
+ if (_pushBack == ' ' || _pushBack == '\t') {
// Header does continue on next line.
// Replace newline with horizontal whitespace.
_lineBuffer[idx] = _pushBack;
idx++;
- }
- else
- {
+ } else {
_isPushBacked = true;
// break out of while loop
break;
}
-
}
- if (idx != 0)
- {
+ if (idx != 0) {
_lineBuffer[idx] = '\0';
char *p = _lineBuffer;
name = p;
// Find end of header name.
- while (*p != ':' && *p != '\0')
- {
+ while (*p != ':' && *p != '\0') {
p++;
}
// If end of header name is not end of header, parse header value.
- if (*p != '\0')
- {
+ if (*p != '\0') {
// Terminate header name.
*p++ = '\0';
// Skip leading whitespace before header value.
- while (*p == ' ' || *p == '\t')
- {
+ while (*p == ' ' || *p == '\t') {
p++;
}
value = p;
// Strip trailing whitespace.
p += strlen(p);
- while (p > value && (*(p-1) == ' ' || *(p-1) == '\t'))
- {
+ while (p > value && (*(p-1) == ' ' || *(p-1) == '\t')) {
p--;
}
*p = '\0';
diff --git a/fastlib/src/vespa/fastlib/net/httpheaderparser.h b/fastlib/src/vespa/fastlib/net/httpheaderparser.h
index 5bb41e787a7..59c5cb01f8a 100644
--- a/fastlib/src/vespa/fastlib/net/httpheaderparser.h
+++ b/fastlib/src/vespa/fastlib/net/httpheaderparser.h
@@ -1,53 +1,24 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-/**
-*******************************************************************************
-*
-* @author Markus Bjartveit Kr�ger
-* @date Creation date: 2000-11-22
-* @version $Id$
-*
-* @file
-*
-* HTTP header parser.
-*
-* Copyright (c) : 2001 Fast Search & Transfer ASA
-* ALL RIGHTS RESERVED
-*
-******************************************************************************/
-
#pragma once
-
-
class Fast_BufferedInputStream;
-
-#define HTTPHEADERPARSER_LINE_BUFFER_SIZE 4096
-
-
class Fast_HTTPHeaderParser
{
- private:
- // Prevent use of:
- Fast_HTTPHeaderParser(const Fast_HTTPHeaderParser &);
- Fast_HTTPHeaderParser & operator=(const Fast_HTTPHeaderParser &);
- protected:
- char _pushBack;
- bool _isPushBacked;
- char _lineBuffer[HTTPHEADERPARSER_LINE_BUFFER_SIZE];
- Fast_BufferedInputStream *_input;
-
public:
+ Fast_HTTPHeaderParser(const Fast_HTTPHeaderParser &) = delete;
+ Fast_HTTPHeaderParser & operator = (const Fast_HTTPHeaderParser &) = delete;
Fast_HTTPHeaderParser(Fast_BufferedInputStream &in);
- virtual ~Fast_HTTPHeaderParser(void);
-
+ ~Fast_HTTPHeaderParser();
// Methods
- bool ReadRequestLine(const char *&method, const char *&url,
- int &versionMajor, int &versionMinor);
+ bool ReadRequestLine(const char *&method, const char *&url, int &versionMajor, int &versionMinor);
bool ReadHeader(const char *&name, const char *&value);
+ private:
+ char _pushBack;
+ bool _isPushBacked;
+ const size_t _bufferSize;
+ char *_lineBuffer;
+ Fast_BufferedInputStream *_input;
};
-
-
-
diff --git a/filedistribution/src/apps/filedistributor/filedistributor.cpp b/filedistribution/src/apps/filedistributor/filedistributor.cpp
index 37f8d259258..8625549dc19 100644
--- a/filedistribution/src/apps/filedistributor/filedistributor.cpp
+++ b/filedistribution/src/apps/filedistributor/filedistributor.cpp
@@ -5,7 +5,6 @@
#include <cstdlib>
#include <boost/program_options.hpp>
-#include <boost/exception/diagnostic_information.hpp>
#include <vespa/fastos/app.h>
#include <vespa/config-zookeepers.h>
@@ -288,32 +287,28 @@ FileDistributorApplication::Main() {
EV_STOPPING(programName, "Clean exit");
return 0;
} catch(const FileDoesNotExistException & e) {
- std::string s = boost::diagnostic_information(e);
- EV_STOPPING(programName, s.c_str());
+ EV_STOPPING(programName, e.what());
return 1;
} catch(const ZKNodeDoesNotExistsException & e) {
- std::string s = boost::diagnostic_information(e);
- EV_STOPPING(programName, s.c_str());
+ EV_STOPPING(programName, e.what());
return 2;
} catch(const ZKSessionExpired & e) {
- std::string s = boost::diagnostic_information(e);
- EV_STOPPING(programName, s.c_str());
+ EV_STOPPING(programName, e.what());
return 3;
} catch(const config::ConfigTimeoutException & e) {
- std::string s = boost::diagnostic_information(e);
- EV_STOPPING(programName, s.c_str());
+ EV_STOPPING(programName, e.what());
return 4;
} catch(const vespalib::PortListenException & e) {
- std::string s = boost::diagnostic_information(e);
- EV_STOPPING(programName, s.c_str());
+ EV_STOPPING(programName, e.what());
return 5;
} catch(const ZKConnectionLossException & e) {
- std::string s = boost::diagnostic_information(e);
- EV_STOPPING(programName, s.c_str());
+ EV_STOPPING(programName, e.what());
return 6;
+ } catch(const ZKFailedConnecting & e) {
+ EV_STOPPING(programName, e.what());
+ return 7;
} catch(const ZKGenericException & e) {
- std::string s = boost::diagnostic_information(e);
- EV_STOPPING(programName, s.c_str());
+ EV_STOPPING(programName, e.what());
return 99;
}
}
diff --git a/filedistribution/src/vespa/filedistribution/distributor/filedownloader.cpp b/filedistribution/src/vespa/filedistribution/distributor/filedownloader.cpp
index 8c2bda6321c..38080e5239c 100644
--- a/filedistribution/src/vespa/filedistribution/distributor/filedownloader.cpp
+++ b/filedistribution/src/vespa/filedistribution/distributor/filedownloader.cpp
@@ -399,6 +399,9 @@ void FileDownloader::runEventLoop() {
}
} catch (const ZKConnectionLossException & e) {
LOG(info, "Connection loss in downloader event loop, resuming. %s", e.what());
+ } catch (const vespalib::PortListenException & e) {
+ LOG(error, "Failed listening to torrent port : %s", e.what());
+ std::quick_exit(21);
}
}
drain();
diff --git a/jdisc_core/src/main/java/com/yahoo/jdisc/application/OsgiFramework.java b/jdisc_core/src/main/java/com/yahoo/jdisc/application/OsgiFramework.java
index 615b36fef1f..78b97caf57b 100644
--- a/jdisc_core/src/main/java/com/yahoo/jdisc/application/OsgiFramework.java
+++ b/jdisc_core/src/main/java/com/yahoo/jdisc/application/OsgiFramework.java
@@ -38,7 +38,7 @@ public interface OsgiFramework {
* or the caller does not have the appropriate permissions, or the system {@link
* BundleContext} is no longer valid.
*/
- public List<Bundle> installBundle(String bundleLocation) throws BundleException;
+ List<Bundle> installBundle(String bundleLocation) throws BundleException;
/**
* <p>Starts the given {@link Bundle}s. The parameter <tt>privileged</tt> tells the framework whether or not
@@ -53,13 +53,13 @@ public interface OsgiFramework {
* @throws SecurityException If the caller does not have the appropriate permissions.
* @throws IllegalStateException If this bundle has been uninstalled or this bundle tries to change its own state.
*/
- public void startBundles(List<Bundle> bundles, boolean privileged) throws BundleException;
+ void startBundles(List<Bundle> bundles, boolean privileged) throws BundleException;
/**
* <p>This method <em>synchronously</em> refreshes all bundles currently loaded. Once this method returns, the
* class loaders of all bundles will reflect on the current set of loaded bundles.</p>
*/
- public void refreshPackages();
+ void refreshPackages();
/**
* <p>Returns the BundleContext of this framework's system bundle. The returned BundleContext can be used by the
@@ -70,7 +70,7 @@ public interface OsgiFramework {
* @throws SecurityException If the caller does not have the appropriate permissions.
* @since 2.0
*/
- public BundleContext bundleContext();
+ BundleContext bundleContext();
/**
* <p>Returns an iterable collection of all installed bundles. This method returns a list of all bundles installed
@@ -79,7 +79,7 @@ public interface OsgiFramework {
*
* @return An iterable collection of Bundle objects, one object per installed bundle.
*/
- public List<Bundle> bundles();
+ List<Bundle> bundles();
/**
* <p>This method starts the framework instance. Before this method is called, any call to {@link
@@ -87,7 +87,7 @@ public interface OsgiFramework {
*
* @throws BundleException If any error occurs.
*/
- public void start() throws BundleException;
+ void start() throws BundleException;
/**
* <p>This method <em>synchronously</em> shuts down the framework. It must be called at the end of a session in
@@ -95,5 +95,6 @@ public interface OsgiFramework {
*
* @throws BundleException If any error occurs.
*/
- public void stop() throws BundleException;
+ void stop() throws BundleException;
+
}
diff --git a/jrt/src/com/yahoo/jrt/Acceptor.java b/jrt/src/com/yahoo/jrt/Acceptor.java
index 05a7591ab74..7316f8c620b 100644
--- a/jrt/src/com/yahoo/jrt/Acceptor.java
+++ b/jrt/src/com/yahoo/jrt/Acceptor.java
@@ -13,7 +13,7 @@ import java.util.logging.Logger;
* transport thread. To create an acceptor you need to invoke the
* {@link Supervisor#listen listen} method in the {@link Supervisor}
* class.
- **/
+ */
public class Acceptor {
private class Run implements Runnable {
@@ -34,15 +34,12 @@ public class Acceptor {
private ServerSocketChannel serverChannel;
- Acceptor(Transport parent, Supervisor owner,
- Spec spec) throws ListenFailedException {
-
+ Acceptor(Transport parent, Supervisor owner, Spec spec) throws ListenFailedException {
this.parent = parent;
this.owner = owner;
- if (spec.malformed()) {
- throw new ListenFailedException("Malformed spec");
- }
+ if (spec.malformed())
+ throw new ListenFailedException("Malformed spec '" + spec + "'");
try {
serverChannel = ServerSocketChannel.open();
@@ -55,7 +52,7 @@ public class Acceptor {
if (serverChannel != null) {
try { serverChannel.socket().close(); } catch (Exception x) {}
}
- throw new ListenFailedException("Listen failed", e);
+ throw new ListenFailedException("Failed to listen to " + spec, e);
}
thread.setDaemon(true);
@@ -84,7 +81,7 @@ public class Acceptor {
* @return listening spec, or null if not listening.
**/
public Spec spec() {
- if (!serverChannel.isOpen()) {
+ if ( ! serverChannel.isOpen()) {
return null;
}
return new Spec(serverChannel.socket().getInetAddress().getHostName(),
@@ -94,8 +91,7 @@ public class Acceptor {
private void run() {
while (serverChannel.isOpen()) {
try {
- parent.addConnection(new Connection(parent, owner,
- serverChannel.accept()));
+ parent.addConnection(new Connection(parent, owner, serverChannel.accept()));
parent.sync();
} catch (java.nio.channels.ClosedChannelException x) {
} catch (Exception e) {
diff --git a/jrt/src/com/yahoo/jrt/Connection.java b/jrt/src/com/yahoo/jrt/Connection.java
index 7affa875cd6..52964726eb7 100644
--- a/jrt/src/com/yahoo/jrt/Connection.java
+++ b/jrt/src/com/yahoo/jrt/Connection.java
@@ -1,7 +1,6 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jrt;
-
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.SelectionKey;
@@ -35,10 +34,8 @@ class Connection extends Target {
private Buffer output = new Buffer(WRITE_SIZE * 2);
private int maxInputSize = 64*1024;
private int maxOutputSize = 64*1024;
- private Map<Integer, ReplyHandler> replyMap
- = new HashMap<Integer, ReplyHandler>();
- private Map<TargetWatcher, TargetWatcher> watchers
- = new IdentityHashMap<TargetWatcher, TargetWatcher>();
+ private Map<Integer, ReplyHandler> replyMap = new HashMap<>();
+ private Map<TargetWatcher, TargetWatcher> watchers = new IdentityHashMap<>();
private int activeReqs = 0;
private int writeWork = 0;
private Transport parent;
@@ -52,8 +49,7 @@ class Connection extends Target {
private void setState(int state) {
if (state <= this.state) {
- log.log(Level.WARNING, "Bogus state transition: "
- + this.state + "->" + state);
+ log.log(Level.WARNING, "Bogus state transition: " + this.state + "->" + state);
return;
}
boolean live = (this.state == INITIAL && state == CONNECTED);
@@ -95,8 +91,7 @@ class Connection extends Target {
owner.sessionInit(this);
}
- public Connection(Transport parent, Supervisor owner,
- Spec spec, Object context) {
+ public Connection(Transport parent, Supervisor owner, Spec spec, Object context) {
super(context);
this.parent = parent;
this.owner = owner;
@@ -400,6 +395,6 @@ class Connection extends Target {
if (channel != null) {
return "Connection { " + channel.socket() + " }";
}
- return "Connection { no socket }";
+ return "Connection { no socket, spec " + spec + " }";
}
}
diff --git a/jrt/src/com/yahoo/jrt/Connector.java b/jrt/src/com/yahoo/jrt/Connector.java
index fa43710b1f6..6778e047a8b 100644
--- a/jrt/src/com/yahoo/jrt/Connector.java
+++ b/jrt/src/com/yahoo/jrt/Connector.java
@@ -27,7 +27,7 @@ class Connector {
}
public void connectLater(Connection c) {
- if (!connectQueue.enqueue(c)) {
+ if ( ! connectQueue.enqueue(c)) {
parent.addConnection(c);
}
}
diff --git a/jrt/src/com/yahoo/jrt/Request.java b/jrt/src/com/yahoo/jrt/Request.java
index 99d7df8657e..4786124e56b 100644
--- a/jrt/src/com/yahoo/jrt/Request.java
+++ b/jrt/src/com/yahoo/jrt/Request.java
@@ -14,7 +14,7 @@ package com.yahoo.jrt;
* client/server roles are independent of connection client/server
* roles, since invocations can be performed both ways across a {@link
* Target}.
- **/
+ */
public class Request
{
private String methodName;
@@ -242,8 +242,7 @@ public class Request
if (returnValues.satisfies(returnTypes)) {
return true;
}
- setError(ErrorCode.WRONG_RETURN,
- "checkReturnValues: Wrong return values");
+ setError(ErrorCode.WRONG_RETURN, "checkReturnValues: Wrong return values");
return false;
}
diff --git a/jrt/src/com/yahoo/jrt/Spec.java b/jrt/src/com/yahoo/jrt/Spec.java
index 7ed0aa69920..4c1f07b98a2 100644
--- a/jrt/src/com/yahoo/jrt/Spec.java
+++ b/jrt/src/com/yahoo/jrt/Spec.java
@@ -2,6 +2,8 @@
package com.yahoo.jrt;
+import com.yahoo.net.HostName;
+
import java.net.SocketAddress;
import java.net.InetSocketAddress;
@@ -9,9 +11,9 @@ import java.net.InetSocketAddress;
/**
* A Spec is a network address used for either listening or
* connecting.
- **/
-public class Spec
-{
+ */
+public class Spec {
+
private SocketAddress address;
private String host;
private int port;
@@ -24,11 +26,11 @@ public class Spec
*
* @param spec input string to be parsed
* @see #malformed
- **/
+ */
public Spec(String spec) {
if (spec.startsWith("tcp/")) {
int sep = spec.indexOf(':');
- String portStr = null;
+ String portStr;
if (sep == -1) {
portStr = spec.substring(4);
} else {
@@ -52,7 +54,7 @@ public class Spec
*
* @param host host name
* @param port port number
- **/
+ */
public Spec(String host, int port) {
this.host = host;
this.port = port;
@@ -62,7 +64,7 @@ public class Spec
* Create a Spec from a port number.
*
* @param port port number
- **/
+ */
public Spec(int port) {
this.port = port;
}
@@ -71,7 +73,7 @@ public class Spec
* Obtain the host name of this address
*
* @return host name
- **/
+ */
public String host() {
return host;
}
@@ -80,7 +82,7 @@ public class Spec
* Obtain the port number if this address
*
* @return port number
- **/
+ */
public int port() {
return port;
}
@@ -90,7 +92,7 @@ public class Spec
* you whether that string was malformed.
*
* @return true if this address is malformed
- **/
+ */
public boolean malformed() {
return malformed;
}
@@ -100,7 +102,7 @@ public class Spec
* malformed, this method will return null.
*
* @return socket address
- **/
+ */
SocketAddress address() {
if (malformed) {
return null;
@@ -114,13 +116,13 @@ public class Spec
}
return address;
}
-
+
/**
* Obtain a string representation of this address. The return
* value from this method may be used to create a new Spec.
*
* @return string representation of this address
- **/
+ */
public String toString() {
if (malformed) {
return "MALFORMED";
@@ -130,4 +132,5 @@ public class Spec
}
return "tcp/" + host + ":" + port;
}
+
}
diff --git a/jrt/src/com/yahoo/jrt/Transport.java b/jrt/src/com/yahoo/jrt/Transport.java
index 85bfed79732..6a9a978fb77 100644
--- a/jrt/src/com/yahoo/jrt/Transport.java
+++ b/jrt/src/com/yahoo/jrt/Transport.java
@@ -229,9 +229,8 @@ public class Transport {
* @param context application context for the new connection
* @param sync perform a synchronous connect in the calling thread
* if this flag is set
- **/
- Connection connect(Supervisor owner, Spec spec,
- Object context, boolean sync) {
+ */
+ Connection connect(Supervisor owner, Spec spec, Object context, boolean sync) {
Connection conn = new Connection(this, owner, spec, context);
if (sync) {
addConnection(conn.connect());
diff --git a/jrt/src/com/yahoo/jrt/slobrok/api/IMirror.java b/jrt/src/com/yahoo/jrt/slobrok/api/IMirror.java
index 3662e6ad5b9..421590e72ce 100644
--- a/jrt/src/com/yahoo/jrt/slobrok/api/IMirror.java
+++ b/jrt/src/com/yahoo/jrt/slobrok/api/IMirror.java
@@ -4,8 +4,8 @@ package com.yahoo.jrt.slobrok.api;
/**
* Defines an interface for the name server lookup.
*
- * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen</a>
- **/
+ * @author Simon Thoresen
+ */
public interface IMirror {
/**
@@ -21,7 +21,7 @@ public interface IMirror {
* @return a list of all matching services, with corresponding connect specs
* @param pattern The pattern used for matching
**/
- public Mirror.Entry[] lookup(String pattern);
+ Mirror.Entry[] lookup(String pattern);
/**
* Obtain the number of updates seen by this mirror. The value may wrap, but will never become 0 again. This can be
@@ -30,5 +30,6 @@ public interface IMirror {
*
* @return number of slobrok updates seen
**/
- public int updates();
+ int updates();
+
}
diff --git a/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java b/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java
index 5e62cb61b76..81ec51e2b9e 100644
--- a/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java
+++ b/jrt/src/com/yahoo/jrt/slobrok/api/Mirror.java
@@ -1,16 +1,14 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jrt.slobrok.api;
-
import com.yahoo.jrt.*;
-import java.util.Arrays;
-import java.util.Random;
import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
import java.util.logging.Logger;
import java.util.logging.Level;
-
/**
* A Mirror object is used to keep track of the services registered
* with a slobrok cluster.
@@ -18,57 +16,19 @@ import java.util.logging.Level;
* Updates to the service repository are fetched in the
* background. Lookups against this object is done using an internal
* mirror of the service repository.
- **/
+ */
public class Mirror implements IMirror {
private static Logger log = Logger.getLogger(Mirror.class.getName());
- /**
- * An Entry contains the name and connection spec for a single
- * service.
- **/
- public static final class Entry implements Comparable<Entry> {
- private final String name;
- private final String spec;
- private final char [] nameArray;
-
- public Entry(String name, String spec) {
- this.name = name;
- this.spec = spec;
- this.nameArray = name.toCharArray();
- }
-
- public boolean equals(Object rhs) {
- if (rhs == null || !(rhs instanceof Entry)) {
- return false;
- }
- Entry e = (Entry) rhs;
- return (name.equals(e.name) && spec.equals(e.spec));
- }
-
- public int hashCode() {
- return (name.hashCode() + spec.hashCode());
- }
-
- public int compareTo(Entry b) {
- int diff = name.compareTo(b.name);
- return diff != 0
- ? diff
- : spec.compareTo(b.spec);
- }
- char [] getNameArray() { return nameArray; }
- public String getName() { return name; }
- public String getSpec() { return spec; }
- }
-
private Supervisor orb;
private SlobrokList slobroks;
private String currSlobrok;
private BackOffPolicy backOff;
private volatile int updates = 0;
- private boolean reqDone = false;
+ private boolean requestDone = false;
private volatile Entry[] specs = new Entry[0];
- private int specsGen = 0;
+ private int specsGeneration = 0;
private Task updateTask = null;
private RequestWaiter reqWait = null;
private Target target = null;
@@ -87,11 +47,11 @@ public class Mirror implements IMirror {
this.slobroks = slobroks;
this.backOff = bop;
updateTask = orb.transport().createTask(new Runnable() {
- public void run() { handleUpdate(); }
+ public void run() { checkForUpdate(); }
});
reqWait = new RequestWaiter() {
public void handleRequestDone(Request req) {
- reqDone = true;
+ requestDone = true;
updateTask.scheduleNow();
}
};
@@ -104,7 +64,7 @@ public class Mirror implements IMirror {
*
* @param orb the Supervisor to use
* @param slobroks slobrok connect spec list
- **/
+ */
public Mirror(Supervisor orb, SlobrokList slobroks) {
this(orb, slobroks, new BackOff());
}
@@ -112,7 +72,7 @@ public class Mirror implements IMirror {
/**
* Shut down the Mirror. This will close any open connections and
* stop the regular mirror updates.
- **/
+ */
public void shutdown() {
updateTask.kill();
orb.transport().perform(new Runnable() {
@@ -122,12 +82,11 @@ public class Mirror implements IMirror {
@Override
public Entry[] lookup(String pattern) {
- ArrayList<Entry> found = new ArrayList<Entry>();
- Entry [] e = specs;
- char [] p = pattern.toCharArray();
- for (int i = 0; i < e.length; i++) {
- if (match(e[i].getNameArray(), p)) {
- found.add(e[i]);
+ ArrayList<Entry> found = new ArrayList<>();
+ char[] p = pattern.toCharArray();
+ for (Entry specEntry : specs) {
+ if (match(specEntry.getNameArray(), p)) {
+ found.add(specEntry);
}
}
return found.toArray(new Entry[found.size()]);
@@ -145,7 +104,7 @@ public class Mirror implements IMirror {
* (or if it never does, time out and tell the user there was no answer from any Service Location Broker).
*
* @return true if the MirrorAPI object has asked for updates from a Slobrok and got any answer back
- **/
+ */
public boolean ready() {
return (updates != 0);
}
@@ -167,7 +126,7 @@ public class Mirror implements IMirror {
* @return true if the name matches the pattern
* @param name the name
* @param pattern the pattern
- **/
+ */
static boolean match(char [] name, char [] pattern) {
int ni = 0;
int pi = 0;
@@ -197,95 +156,58 @@ public class Mirror implements IMirror {
/**
* Invoked by the update task.
- **/
- private void handleUpdate() {
- if (reqDone) {
- reqDone = false;
-
- if (req.errorCode() == ErrorCode.NONE &&
- req.returnValues().satisfies("SSi") &&
- req.returnValues().get(0).count() == req.returnValues().get(1).count())
- {
- Values answer = req.returnValues();
-
- if (specsGen != answer.get(2).asInt32()) {
-
- int numNames = answer.get(0).count();
- String[] n = answer.get(0).asStringArray();
- String[] s = answer.get(1).asStringArray();
- Entry[] newSpecs = new Entry[numNames];
-
- for (int idx = 0; idx < numNames; idx++) {
- newSpecs[idx] = new Entry(n[idx], s[idx]);
- }
-
- specs = newSpecs;
-
- specsGen = answer.get(2).asInt32();
- int u = (updates + 1);
- if (u == 0) {
- u++;
- }
- updates = u;
+ */
+ private void checkForUpdate() {
+ if (requestDone) {
+ handleUpdate();
+ requestDone = false;
+ return;
+ }
+
+ if (target != null && ! slobroks.contains(currSlobrok)) {
+ target.close();
+ target = null;
+ }
+ if (target == null) {
+ currSlobrok = slobroks.nextSlobrokSpec();
+ if (currSlobrok == null) {
+ double delay = backOff.get();
+ updateTask.schedule(delay);
+ if (backOff.shouldWarn(delay)) {
+ log.log(Level.INFO, "no location brokers available "
+ + "(retry in " + delay + " seconds) for: " + slobroks);
}
- backOff.reset();
- updateTask.schedule(0.1); // be nice
- return;
- }
- if (!req.checkReturnTypes("iSSSi")
- || (req.returnValues().get(2).count() !=
- req.returnValues().get(3).count()))
- {
- target.close();
- target = null;
- updateTask.scheduleNow(); // try next slobrok
return;
}
+ target = orb.connect(new Spec(currSlobrok));
+ specsGeneration = 0;
+ }
+ req = new Request("slobrok.incremental.fetch");
+ req.parameters().add(new Int32Value(specsGeneration)); // gencnt
+ req.parameters().add(new Int32Value(5000)); // mstimeout
+ target.invokeAsync(req, 40.0, reqWait);
+ }
+
+ private void handleUpdate() {
+ if (req.errorCode() == ErrorCode.NONE &&
+ req.returnValues().satisfies("SSi") &&
+ req.returnValues().get(0).count() == req.returnValues().get(1).count())
+ {
+ Values answer = req.returnValues();
+ if (specsGeneration != answer.get(2).asInt32()) {
- Values answer = req.returnValues();
+ int numNames = answer.get(0).count();
+ String[] n = answer.get(0).asStringArray();
+ String[] s = answer.get(1).asStringArray();
+ Entry[] newSpecs = new Entry[numNames];
- int diffFrom = answer.get(0).asInt32();
- int diffTo = answer.get(4).asInt32();
-
- if (specsGen != diffTo) {
-
- int nRemoves = answer.get(1).count();
- String[] r = answer.get(1).asStringArray();
-
- int numNames = answer.get(2).count();
- String[] n = answer.get(2).asStringArray();
- String[] s = answer.get(3).asStringArray();
-
-
- Entry[] newSpecs;
- if (diffFrom == 0) {
- newSpecs = new Entry[numNames];
-
- for (int idx = 0; idx < numNames; idx++) {
- newSpecs[idx] = new Entry(n[idx], s[idx]);
- }
- } else {
- java.util.HashMap<String, Entry> map = new java.util.HashMap<String, Entry>();
- for (Entry e : specs) {
- map.put(e.getName(), e);
- }
- for (String rem : r) {
- map.remove(rem);
- }
- for (int idx = 0; idx < numNames; idx++) {
- map.put(n[idx], new Entry(n[idx], s[idx]));
- }
- newSpecs = new Entry[map.size()];
- int idx = 0;
- for (Entry e : map.values()) {
- newSpecs[idx++] = e;
- }
+ for (int idx = 0; idx < numNames; idx++) {
+ newSpecs[idx] = new Entry(n[idx], s[idx]);
}
-
specs = newSpecs;
- specsGen = diffTo;
+ specsGeneration = answer.get(2).asInt32();
int u = (updates + 1);
if (u == 0) {
u++;
@@ -296,34 +218,72 @@ public class Mirror implements IMirror {
updateTask.schedule(0.1); // be nice
return;
}
- if (target != null && ! slobroks.contains(currSlobrok)) {
+ if (!req.checkReturnTypes("iSSSi")
+ || (req.returnValues().get(2).count() !=
+ req.returnValues().get(3).count()))
+ {
target.close();
target = null;
+ updateTask.scheduleNow(); // try next slobrok
+ return;
}
- if (target == null) {
- currSlobrok = slobroks.nextSlobrokSpec();
- if (currSlobrok == null) {
- double delay = backOff.get();
- updateTask.schedule(delay);
- if (backOff.shouldWarn(delay)) {
- log.log(Level.INFO, "no location brokers available "
- + "(retry in " + delay + " seconds) for: " + slobroks);
+
+
+ Values answer = req.returnValues();
+
+ int diffFromGeneration = answer.get(0).asInt32();
+ int diffToGeneration = answer.get(4).asInt32();
+ if (specsGeneration != diffToGeneration) {
+
+ int nRemoves = answer.get(1).count();
+ String[] r = answer.get(1).asStringArray();
+
+ int numNames = answer.get(2).count();
+ String[] n = answer.get(2).asStringArray();
+ String[] s = answer.get(3).asStringArray();
+
+ Entry[] newSpecs;
+ if (diffFromGeneration == 0) {
+ newSpecs = new Entry[numNames];
+
+ for (int idx = 0; idx < numNames; idx++) {
+ newSpecs[idx] = new Entry(n[idx], s[idx]);
+ }
+ } else {
+ Map<String, Entry> map = new HashMap<>();
+ for (Entry e : specs) {
+ map.put(e.getName(), e);
+ }
+ for (String rem : r) {
+ map.remove(rem);
+ }
+ for (int idx = 0; idx < numNames; idx++) {
+ map.put(n[idx], new Entry(n[idx], s[idx]));
+ }
+ newSpecs = new Entry[map.size()];
+ int idx = 0;
+ for (Entry e : map.values()) {
+ newSpecs[idx++] = e;
}
- return;
}
- target = orb.connect(new Spec(currSlobrok));
- specsGen = 0;
+
+ specs = newSpecs;
+
+ specsGeneration = diffToGeneration;
+ int u = (updates + 1);
+ if (u == 0) {
+ u++;
+ }
+ updates = u;
}
- req = new Request("slobrok.incremental.fetch");
- req.parameters().add(new Int32Value(specsGen)); // gencnt
- req.parameters().add(new Int32Value(5000)); // mstimeout
- target.invokeAsync(req, 40.0, reqWait);
+ backOff.reset();
+ updateTask.schedule(0.1); // be nice
}
/**
* Invoked from the transport thread, requested by the shutdown
* method.
- **/
+ */
private void handleShutdown() {
if (req != null) {
req.abort();
@@ -334,4 +294,44 @@ public class Mirror implements IMirror {
target = null;
}
}
+
+ /**
+ * An Entry contains the name and connection spec for a single
+ * service.
+ */
+ public static final class Entry implements Comparable<Entry> {
+
+ private final String name;
+ private final String spec;
+ private final char [] nameArray;
+
+ public Entry(String name, String spec) {
+ this.name = name;
+ this.spec = spec;
+ this.nameArray = name.toCharArray();
+ }
+
+ public boolean equals(Object rhs) {
+ if (rhs == null || !(rhs instanceof Entry)) {
+ return false;
+ }
+ Entry e = (Entry) rhs;
+ return (name.equals(e.name) && spec.equals(e.spec));
+ }
+
+ public int hashCode() {
+ return (name.hashCode() + spec.hashCode());
+ }
+
+ public int compareTo(Entry b) {
+ int diff = name.compareTo(b.name);
+ return diff != 0 ? diff : spec.compareTo(b.spec);
+ }
+
+ char [] getNameArray() { return nameArray; }
+ public String getName() { return name; }
+ public String getSpec() { return spec; }
+
+ }
+
}
diff --git a/jrt/src/com/yahoo/jrt/slobrok/api/Register.java b/jrt/src/com/yahoo/jrt/slobrok/api/Register.java
index 84720501ff8..d1ea7a7f1fa 100644
--- a/jrt/src/com/yahoo/jrt/slobrok/api/Register.java
+++ b/jrt/src/com/yahoo/jrt/slobrok/api/Register.java
@@ -1,22 +1,21 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.jrt.slobrok.api;
-
import com.yahoo.jrt.*;
import java.util.ArrayList;
+import java.util.List;
import java.util.Random;
import java.util.logging.Logger;
import java.util.logging.Level;
-
/**
* A Register object is used to register and unregister services with
* a slobrok cluster.
*
* The register/unregister operations performed against this object
- * are stored in a todo list that will be performed asynchronously
+ * are stored in a to-do list that will be performed asynchronously
* against the slobrok cluster as soon as possible.
- **/
+ */
public class Register {
private static Logger log = Logger.getLogger(Register.class.getName());
@@ -27,9 +26,9 @@ public class Register {
private String mySpec;
private BackOffPolicy backOff;
private boolean reqDone = false;
- private ArrayList<String> names = new ArrayList<String>();
- private ArrayList<String> pending = new ArrayList<String>();
- private ArrayList<String> unreg = new ArrayList<String>();
+ private List<String> names = new ArrayList<>();
+ private List<String> pending = new ArrayList<>();
+ private List<String> unreg = new ArrayList<>();
private Task updateTask = null;
private RequestWaiter reqWait = null;
private Target target = null;
@@ -39,9 +38,9 @@ public class Register {
/**
* Remove all instances of name from list.
- **/
- private void discard(ArrayList<String> list, String name) {
- ArrayList<String> tmp = new ArrayList<String>();
+ */
+ private void discard(List<String> list, String name) {
+ List<String> tmp = new ArrayList<>();
tmp.add(name);
list.removeAll(tmp);
}
@@ -54,7 +53,7 @@ public class Register {
* @param slobroks slobrok connect spec list
* @param spec the Spec representing hostname and port for this host
* @param bop custom backoff policy, mostly useful for testing
- **/
+ */
public Register(Supervisor orb, SlobrokList slobroks, Spec spec, BackOffPolicy bop) {
this.orb = orb;
this.slobroks = slobroks;
@@ -98,7 +97,7 @@ public class Register {
* @param orb the Supervisor to use
* @param slobroks slobrok connect spec list
* @param spec the Spec representing hostname and port for this host
- **/
+ */
public Register(Supervisor orb, SlobrokList slobroks, Spec spec) {
this(orb, slobroks, spec, new BackOff());
}
@@ -111,9 +110,8 @@ public class Register {
* @param slobroks slobrok connect spec list
* @param myHost the hostname of this host
* @param myPort the port number we are listening to
- **/
- public Register(Supervisor orb, SlobrokList slobroks,
- String myHost, int myPort) {
+ */
+ public Register(Supervisor orb, SlobrokList slobroks, String myHost, int myPort) {
this(orb, slobroks, new Spec(myHost, myPort));
}
@@ -121,7 +119,7 @@ public class Register {
/**
* Shut down the Register. This will close any open connections
* and stop the regular re-registration.
- **/
+ */
public void shutdown() {
updateTask.kill();
orb.transport().perform(new Runnable() {
@@ -133,7 +131,7 @@ public class Register {
* Register a service with the slobrok cluster.
*
* @param name service name
- **/
+ */
public synchronized void registerName(String name) {
if (names.indexOf(name) >= 0) {
return;
@@ -148,7 +146,7 @@ public class Register {
* Unregister a service with the slobrok cluster
*
* @param name service name
- **/
+ */
public synchronized void unregisterName(String name) {
discard(names, name);
discard(pending, name);
@@ -164,15 +162,11 @@ public class Register {
reqDone = false;
if (req.isError()) {
if (req.errorCode() != ErrorCode.METHOD_FAILED) {
- log.log(Level.FINE, "register failed: "
- + req.errorMessage()
- + " (code " + req.errorCode() + ")");
+ log.log(Level.FINE, "register failed: " + req.errorMessage() + " (code " + req.errorCode() + ")");
target.close();
target = null;
} else {
- log.log(Level.WARNING, "register failed: "
- + req.errorMessage()
- + " (code " + req.errorCode() + ")");
+ log.log(Level.WARNING, "register failed: " + req.errorMessage() + " (code " + req.errorCode() + ")");
}
} else {
backOff.reset();
@@ -192,13 +186,10 @@ public class Register {
if (currSlobrok == null) {
double delay = backOff.get();
updateTask.schedule(delay);
- if (backOff.shouldWarn(delay)) {
- log.log(Level.WARNING, "slobrok connection problems "
- + "(retry in " + delay + " seconds) to: " + slobroks);
- } else {
- log.log(Level.FINE, "slobrok retry in " + delay
- + " seconds");
- }
+ if (backOff.shouldWarn(delay))
+ log.log(Level.WARNING, "slobrok connection problems (retry in " + delay + " seconds) to: " + slobroks);
+ else
+ log.log(Level.FINE, "slobrok retry in " + delay + " seconds");
return;
}
target = orb.connect(new Spec(currSlobrok));
@@ -207,16 +198,14 @@ public class Register {
pending.addAll(names);
}
}
- boolean rem = false;
- boolean reg = false;
+ boolean unregister = false;
String name;
synchronized (this) {
if (unreg.size() > 0) {
name = unreg.remove(unreg.size() - 1);
- rem = true;
+ unregister = true;
} else if (pending.size() > 0) {
name = pending.remove(pending.size() - 1);
- reg = true;
} else {
pending.addAll(names);
log.log(Level.FINE, "done, reschedule in 30s");
@@ -225,13 +214,13 @@ public class Register {
}
}
- if (rem) {
+ if (unregister) {
req = new Request("slobrok.unregisterRpcServer");
req.parameters().add(new StringValue(name));
log.log(Level.FINE, "unregister [" + name + "]");
req.parameters().add(new StringValue(mySpec));
target.invokeAsync(req, 35.0, reqWait);
- } else if (reg) {
+ } else { // register
req = new Request("slobrok.registerRpcServer");
req.parameters().add(new StringValue(name));
log.log(Level.FINE, "register [" + name + "]");
@@ -246,8 +235,7 @@ public class Register {
}
private void handleRpcUnreg(Request req) {
- log.log(Level.WARNING, "unregistered name "
- + req.parameters().get(0).asString());
+ log.log(Level.WARNING, "unregistered name " + req.parameters().get(0).asString());
}
/**
@@ -266,4 +254,5 @@ public class Register {
target = null;
}
}
+
}
diff --git a/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java b/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java
index 085489897b5..3c81f9618f8 100644
--- a/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java
+++ b/jrt/src/com/yahoo/jrt/slobrok/server/Slobrok.java
@@ -5,79 +5,16 @@ import com.yahoo.jrt.*;
import java.util.ArrayList;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
public class Slobrok {
- private class RegisterCallback implements RequestWaiter {
-
- Request registerReq;
- String name;
- String spec;
- Target target;
-
- public RegisterCallback(Request req, String name, String spec) {
- req.detach();
- registerReq = req;
- this.name = name;
- this.spec = spec;
- target = orb.connect(new Spec(spec));
- Request cbReq = new Request("slobrok.callback.listNamesServed");
- target.invokeAsync(cbReq, 5.0, this);
- }
-
- public void handleRequestDone(Request req) {
- if (!req.checkReturnTypes("S")) {
- registerReq.setError(ErrorCode.METHOD_FAILED, "error during register callback: "
- + req.errorMessage());
- registerReq.returnRequest();
- target.close();
- return;
- }
- String[] names = req.returnValues().get(0).asStringArray();
- boolean found = false;
- for (String n : names) {
- if (n.equals(name)) {
- found = true;
- }
- }
- if (!found) {
- registerReq.setError(ErrorCode.METHOD_FAILED, "register failed: "
- + "served names does not contain name");
- registerReq.returnRequest();
- target.close();
- return;
- }
- handleRegisterCallbackDone(registerReq, name, spec, target);
- }
- }
-
- private class FetchMirror implements Runnable {
- public final Request req;
- public final Task task;
-
- public FetchMirror(Request req, int timeout) {
- req.detach();
- this.req = req;
- task = orb.transport().createTask(this);
- task.schedule(((double)timeout)/1000.0);
- }
- public void run() { // timeout
- handleFetchMirrorTimeout(this);
- }
- }
-
- private class TargetMonitor implements TargetWatcher {
- public void notifyTargetInvalid(Target target) {
- handleTargetDown(target);
- }
- }
-
Supervisor orb;
Acceptor listener;
- HashMap<String,String> services = new HashMap<String,String>();
- ArrayList<FetchMirror> pendingFetch = new ArrayList<FetchMirror>();
- HashMap<String,Target> targets = new HashMap<String,Target>();
+ private Map<String,String> services = new HashMap<>();
+ List<FetchMirror> pendingFetch = new ArrayList<>();
+ Map<String,Target> targets = new HashMap<>();
TargetMonitor monitor = new TargetMonitor();
int gencnt = 1;
@@ -123,15 +60,11 @@ public class Slobrok {
handleFetchMirrorFlush();
}
- private void handleRegisterCallbackDone(Request req,
- String name, String spec,
- Target target)
- {
+ private void handleRegisterCallbackDone(Request req, String name, String spec, Target target){
String stored = services.get(name);
if (stored != null) { // too late
- if (!stored.equals(spec)) {
- req.setError(ErrorCode.METHOD_FAILED,
- "service '" + name + "' registered with another spec");
+ if ( ! stored.equals(spec)) {
+ req.setError(ErrorCode.METHOD_FAILED, "service '" + name + "' registered with another spec");
}
req.returnRequest();
target.close();
@@ -153,8 +86,8 @@ public class Slobrok {
}
private void dumpServices(Request req) {
- ArrayList<String> names = new ArrayList<String>();
- ArrayList<String> specs = new ArrayList<String>();
+ List<String> names = new ArrayList<>();
+ List<String> specs = new ArrayList<>();
for (Map.Entry<String,String> entry : services.entrySet()) {
names.add(entry.getKey());
specs.add(entry.getValue());
@@ -225,12 +158,8 @@ public class Slobrok {
if (stored == null) {
new RegisterCallback(req, name, spec);
} else {
- if (stored.equals(spec)) {
- // ok, already stored
- } else {
- req.setError(ErrorCode.METHOD_FAILED,
- "service '" + name + "' registered with another spec");
- }
+ if ( ! stored.equals(spec))
+ req.setError(ErrorCode.METHOD_FAILED, "service '" + name + "' registered with another spec");
}
}
@@ -267,4 +196,67 @@ public class Slobrok {
}
}
+ private class RegisterCallback implements RequestWaiter {
+
+ Request registerReq;
+ String name;
+ String spec;
+ Target target;
+
+ public RegisterCallback(Request req, String name, String spec) {
+ req.detach();
+ registerReq = req;
+ this.name = name;
+ this.spec = spec;
+ target = orb.connect(new Spec(spec));
+ Request cbReq = new Request("slobrok.callback.listNamesServed");
+ target.invokeAsync(cbReq, 5.0, this);
+ }
+
+ @Override
+ public void handleRequestDone(Request req) {
+ if ( ! req.checkReturnTypes("S")) {
+ registerReq.setError(ErrorCode.METHOD_FAILED, "error during register callback: " + req.errorMessage());
+ registerReq.returnRequest();
+ target.close();
+ return;
+ }
+ String[] names = req.returnValues().get(0).asStringArray();
+ boolean found = false;
+ for (String n : names) {
+ if (n.equals(name)) {
+ found = true;
+ }
+ }
+ if (!found) {
+ registerReq.setError(ErrorCode.METHOD_FAILED, "register failed: served names does not contain name");
+ registerReq.returnRequest();
+ target.close();
+ return;
+ }
+ handleRegisterCallbackDone(registerReq, name, spec, target);
+ }
+ }
+
+ private class FetchMirror implements Runnable {
+ public final Request req;
+ public final Task task;
+
+ public FetchMirror(Request req, int timeout) {
+ req.detach();
+ this.req = req;
+ task = orb.transport().createTask(this);
+ task.schedule(((double)timeout)/1000.0);
+ }
+ public void run() { // timeout
+ handleFetchMirrorTimeout(this);
+ }
+ }
+
+ private class TargetMonitor implements TargetWatcher {
+ public void notifyTargetInvalid(Target target) {
+ handleTargetDown(target);
+ }
+ }
+
}
diff --git a/linguistics/src/main/java/com/yahoo/language/process/StemMode.java b/linguistics/src/main/java/com/yahoo/language/process/StemMode.java
index 269b08dcdf7..ebacb307a85 100644
--- a/linguistics/src/main/java/com/yahoo/language/process/StemMode.java
+++ b/linguistics/src/main/java/com/yahoo/language/process/StemMode.java
@@ -6,7 +6,7 @@ package com.yahoo.language.process;
* Stemming implementation may support a smaller number of modes by mapping a mode to a more
* inclusive alternative.
*
- * @author <a href="mailto:mathiasm@yahoo-inc.com">Mathias Mølster Lidal</a>
+ * @author Mathias Mølster Lidal
*/
public enum StemMode {
diff --git a/logserver/src/main/java/com/yahoo/logserver/net/LogConnection.java b/logserver/src/main/java/com/yahoo/logserver/net/LogConnection.java
index 1a48c5fb0f4..192e0769b6f 100644
--- a/logserver/src/main/java/com/yahoo/logserver/net/LogConnection.java
+++ b/logserver/src/main/java/com/yahoo/logserver/net/LogConnection.java
@@ -206,7 +206,7 @@ public class LogConnection implements Connection {
return;
}
int count = 200;
- log.log(LogLevel.WARNING, "Log message too long. Message from "
+ log.log(LogLevel.DEBUG, "Log message too long. Message from "
+ socket.socket().getInetAddress() + " exceeds "
+ readBuffer.capacity()
+ ". Skipping buffer (might be part of same long message). Printing first " + count + " characters of line: " +
diff --git a/logserver/src/test/java/com/yahoo/logserver/net/test/LogConnectionTestCase.java b/logserver/src/test/java/com/yahoo/logserver/net/test/LogConnectionTestCase.java
index 06f35567e9f..90835930fc0 100644
--- a/logserver/src/test/java/com/yahoo/logserver/net/test/LogConnectionTestCase.java
+++ b/logserver/src/test/java/com/yahoo/logserver/net/test/LogConnectionTestCase.java
@@ -32,7 +32,6 @@ import static org.junit.Assert.*;
public class LogConnectionTestCase {
private static final Logger log = Logger.getLogger(LogConnectionTestCase.class.getName());
- private static final int PROBING_RANGE_START = 41352;
private static final Charset charset = Charset.forName("utf-8");
private static final ByteBuffer bigBuffer;
private int port;
@@ -48,7 +47,7 @@ public class LogConnectionTestCase {
prefix = prefix.substring(0, prefix.length() - 1);
sb.append(prefix);
- // fill up the remainding buffer with rubbish to make
+ // fill up the remaining buffer with rubbish to make
// it too long
for (int i = 0; i < (LogConnection.READBUFFER_SIZE * 3); i++) {
sb.append("a");
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/MessageBus.java b/messagebus/src/main/java/com/yahoo/messagebus/MessageBus.java
index 729bef7985f..cf5beb4a903 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/MessageBus.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/MessageBus.java
@@ -22,33 +22,34 @@ import java.util.logging.Logger;
* and forward messages.</p>
*
* <p>There are three types of sessions:</p>
- * <ul><li>{@link SourceSession Source sessions} sends messages and receives
- * replies</li>
- * <li>{@link IntermediateSession Intermediate sessions} receives messages on
- * their way to their final destination, and may decide to forward the messages
- * or reply directly.</li>
- * <li>{@link DestinationSession Destination sessions} are the final recipient
- * of messages, and are expected to reply to every one of them, but may not
- * forward messages.</li></ul>
+ * <ul>
+ * <li>{@link SourceSession Source sessions} sends messages and receives replies</li>
+ * <li>{@link IntermediateSession Intermediate sessions} receives messages on
+ * their way to their final destination, and may decide to forward the messages or reply directly.
+ * <li>{@link DestinationSession Destination sessions} are the final recipient
+ * of messages, and are expected to reply to every one of them, but may not forward messages.
+ * </ul>
*
* <p>A message bus is configured with a {@link Protocol protocol}. This table
* enumerates the permissible routes from intermediates to destinations and the
* messaging semantics of each hop.</p>
*
- * <p>The responsibilities of a message bus are:</p>
- * <ul> <li>Assign a route to every send message from its routing table</li>
- * <li>Deliver every message it <i>accepts</i> to the next hop on its route on a
- * best effort basis, <i>or</i> deliver a <i>failure reply</i>.</li>
- * <li>Deliver replies back to message sources through all the intermediate
- * hops.</li></ul>
+ * The responsibilities of a message bus are:
+ * <ul>
+ * <li>Assign a route to every send message from its routing table
+ * <li>Deliver every message it <i>accepts</i> to the next hop on its route
+ * <i>or</i> deliver a <i>failure reply</i>.
+ * <li>Deliver replies back to message sources through all the intermediate hops.
+ * </ul>
*
- * <p>A runtime will typically</p>
- * <ul><li>Create a message bus implementation and set properties on this
- * implementation once.</li>
- * <li>Create sessions using that message bus many places.</li></ul>
+ * A runtime will typically
+ * <ul>
+ * <li>Create a message bus implementation and set properties on this implementation once.
+ * <li>Create sessions using that message bus many places.</li>
+ * </ul>
*
- * @author btratseth
- * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen</a>
+ * @author bratseth
+ * @author Simon Thoresen
*/
public class MessageBus implements ConfigHandler, NetworkOwner, MessageHandler, ReplyHandler {
@@ -101,9 +102,8 @@ public class MessageBus implements ConfigHandler, NetworkOwner, MessageHandler,
// Attach and start network.
this.net = net;
net.attach(this);
- if (!net.waitUntilReady(120)) {
+ if ( ! net.waitUntilReady(120))
throw new IllegalStateException("Network failed to become ready in time.");
- }
// Start messenger.
msn = new Messenger();
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/NetworkMessageBus.java b/messagebus/src/main/java/com/yahoo/messagebus/NetworkMessageBus.java
new file mode 100644
index 00000000000..24e177f1fbf
--- /dev/null
+++ b/messagebus/src/main/java/com/yahoo/messagebus/NetworkMessageBus.java
@@ -0,0 +1,43 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.messagebus;
+
+import com.yahoo.messagebus.network.Network;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * The combination of a messagebus and a network over which it may send data.
+ *
+ * @author bratseth
+ */
+public class NetworkMessageBus {
+
+ private final Network network;
+ private final MessageBus messageBus;
+
+ private final AtomicBoolean destroyed = new AtomicBoolean(false);
+
+ public NetworkMessageBus(Network network, MessageBus messageBus) {
+ this.network = network;
+ this.messageBus = messageBus;
+ }
+
+ /** Returns the contained message bus object */
+ public MessageBus getMessageBus() { return messageBus; }
+
+ /** Returns the network of this as a Network */
+ public Network getNetwork() { return network; }
+
+ /**
+ * Irreversibly destroys the content of this.
+ *
+ * @return whether this destroyed anything, or if it was already destroyed
+ */
+ public boolean destroy() {
+ if ( destroyed.getAndSet(true)) return false;
+
+ getMessageBus().destroy();
+ return true;
+ }
+
+}
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/RPCMessageBus.java b/messagebus/src/main/java/com/yahoo/messagebus/RPCMessageBus.java
index d767e197b11..cfa50a35549 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/RPCMessageBus.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/RPCMessageBus.java
@@ -3,6 +3,7 @@ package com.yahoo.messagebus;
import com.yahoo.log.LogLevel;
import com.yahoo.messagebus.network.Identity;
+import com.yahoo.messagebus.network.Network;
import com.yahoo.messagebus.network.rpc.RPCNetwork;
import com.yahoo.messagebus.network.rpc.RPCNetworkParams;
@@ -17,12 +18,9 @@ import java.util.logging.Logger;
*
* @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen</a>
*/
-public class RPCMessageBus {
+public class RPCMessageBus extends NetworkMessageBus {
private static final Logger log = Logger.getLogger(RPCMessageBus.class.getName());
- private final AtomicBoolean destroyed = new AtomicBoolean(false);
- private final MessageBus mbus;
- private final RPCNetwork net;
private final ConfigAgent configAgent;
/**
@@ -33,9 +31,16 @@ public class RPCMessageBus {
* @param routingCfgId The config id for message bus routing specs.
*/
public RPCMessageBus(MessageBusParams mbusParams, RPCNetworkParams rpcParams, String routingCfgId) {
- net = new RPCNetwork(rpcParams);
- mbus = new MessageBus(net, mbusParams);
- configAgent = new ConfigAgent(routingCfgId != null ? routingCfgId : "client", mbus);
+ this(mbusParams, new RPCNetwork(rpcParams), routingCfgId);
+ }
+
+ private RPCMessageBus(MessageBusParams mbusParams, RPCNetwork network, String routingCfgId) {
+ this(new MessageBus(network, mbusParams), network, routingCfgId);
+ }
+
+ private RPCMessageBus(MessageBus messageBus, RPCNetwork network, String routingCfgId) {
+ super(network, messageBus);
+ configAgent = new ConfigAgent(routingCfgId != null ? routingCfgId : "client", messageBus);
configAgent.subscribe();
}
@@ -80,33 +85,17 @@ public class RPCMessageBus {
* Sets the destroyed flag to true. The very first time this method is called, it cleans up all its dependencies.
* Even if you retain a reference to this object, all of its content is allowed to be garbage collected.
*
- * @return True if content existed and was destroyed.
+ * @return true if content existed and was destroyed.
*/
+ @Override
public boolean destroy() {
- if (!destroyed.getAndSet(true)) {
+ boolean destroyed = super.destroy();
+ if (destroyed)
configAgent.shutdown();
- mbus.destroy();
- return true;
- }
- return false;
+ return destroyed;
}
- /**
- * Returns the contained message bus object.
- *
- * @return Message bus.
- */
- public MessageBus getMessageBus() {
- return mbus;
- }
-
- /**
- * Returns the contained rpc network object.
- *
- * @return RPC network.
- */
- public RPCNetwork getRPCNetwork() {
- return net;
- }
+ /** Returns the network of this as a RPCNetwork */
+ public RPCNetwork getRPCNetwork() { return (RPCNetwork)getNetwork(); }
}
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/network/Identity.java b/messagebus/src/main/java/com/yahoo/messagebus/network/Identity.java
index 37d42169a1a..45887b072ab 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/network/Identity.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/network/Identity.java
@@ -1,7 +1,12 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.messagebus.network;
+import com.yahoo.log.LogLevel;
import com.yahoo.net.HostName;
+import com.yahoo.net.LinuxInetAddress;
+
+import java.net.Inet6Address;
+import java.net.InetAddress;
/**
* This class encapsulates the identity of the application that uses this instance of message bus. This identity
@@ -11,6 +16,7 @@ import com.yahoo.net.HostName;
* @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen</a>
*/
public class Identity {
+
private final String hostname;
private final String servicePrefix;
@@ -22,7 +28,11 @@ public class Identity {
* @param configId The config identifier for the application.
*/
public Identity(String configId) {
- hostname = HostName.getLocalhost();
+ InetAddress addr = LinuxInetAddress.getLocalHost(); // try hard to get a resolvable address
+ if (addr instanceof Inet6Address) //
+ hostname = HostName.getLocalhost(); // ... but fallback to hostname if we get an IPv6 address
+ else
+ hostname = addr.getCanonicalHostName();
servicePrefix = configId;
}
@@ -55,4 +65,5 @@ public class Identity {
public String getServicePrefix() {
return servicePrefix;
}
+
}
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/network/Network.java b/messagebus/src/main/java/com/yahoo/messagebus/network/Network.java
index cd3b3286778..b0bbe4266c4 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/network/Network.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/network/Network.java
@@ -21,28 +21,28 @@ public interface Network {
* @param seconds The timeout.
* @return True if ready.
*/
- public boolean waitUntilReady(double seconds);
+ boolean waitUntilReady(double seconds);
/**
* Attach the network layer to the given owner
*
* @param owner owner of the network
*/
- public void attach(NetworkOwner owner);
+ void attach(NetworkOwner owner);
/**
* Register a session name with the network layer. This will make the session visible to other nodes.
*
* @param session the session name
*/
- public void registerSession(String session);
+ void registerSession(String session);
/**
* Unregister a session name with the network layer. This will make the session unavailable for other nodes.
*
* @param session session name
*/
- public void unregisterSession(String session);
+ void unregisterSession(String session);
/**
* Resolves the service address of the recipient referenced by the given routing node. If a recipient can not be
@@ -52,7 +52,7 @@ public interface Network {
* @param recipient The node whose service address to allocate.
* @return True if a service address was allocated.
*/
- public boolean allocServiceAddress(RoutingNode recipient);
+ boolean allocServiceAddress(RoutingNode recipient);
/**
* Frees the service address from the given routing node. This allows the network layer to track and close
@@ -60,7 +60,7 @@ public interface Network {
*
* @param recipient The node whose service address to free.
*/
- public void freeServiceAddress(RoutingNode recipient);
+ void freeServiceAddress(RoutingNode recipient);
/**
* Send a message to the given recipients. A {@link RoutingNode} contains all the necessary context for sending.
@@ -68,7 +68,7 @@ public interface Network {
* @param msg The message to send.
* @param recipients A list of routing leaf nodes resolved for the message.
*/
- public void send(Message msg, List<RoutingNode> recipients);
+ void send(Message msg, List<RoutingNode> recipients);
/**
* Synchronize with internal threads. This method will handshake with all internal threads. This has the implicit
@@ -76,12 +76,12 @@ public interface Network {
* that would make the thread wait for itself... forever. This method is typically used to untangle during session
* shutdown.
*/
- public void sync();
+ void sync();
/**
* Shuts down the network. This is a blocking call that waits for all scheduled tasks to complete.
*/
- public void shutdown();
+ void shutdown();
/**
* Returns a string that represents the connection specs of this network. It is in not a complete address since it
@@ -89,12 +89,13 @@ public interface Network {
*
* @return The connection string.
*/
- public String getConnectionSpec();
+ String getConnectionSpec();
/**
* Returns a reference to a name server mirror.
*
* @return The mirror object.
*/
- public IMirror getMirror();
+ IMirror getMirror();
+
}
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalNetwork.java b/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalNetwork.java
index ffcb853a0a7..78cf352cfbf 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalNetwork.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalNetwork.java
@@ -23,7 +23,7 @@ import java.util.concurrent.Executors;
import static com.yahoo.messagebus.ErrorCode.NO_ADDRESS_FOR_SERVICE;
/**
- * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen Hult</a>
+ * @author Simon Thoresen Hult
*/
public class LocalNetwork implements Network {
@@ -32,35 +32,39 @@ public class LocalNetwork implements Network {
private final String hostId;
private volatile NetworkOwner owner;
- public LocalNetwork(final LocalWire wire) {
+ public LocalNetwork() {
+ this(new LocalWire());
+ }
+
+ public LocalNetwork(LocalWire wire) {
this.wire = wire;
this.hostId = wire.newHostId();
}
@Override
- public boolean waitUntilReady(final double seconds) {
+ public boolean waitUntilReady(double seconds) {
return true;
}
@Override
- public void attach(final NetworkOwner owner) {
+ public void attach(NetworkOwner owner) {
this.owner = owner;
}
@Override
- public void registerSession(final String session) {
+ public void registerSession(String session) {
wire.registerService(hostId + "/" + session, this);
}
@Override
- public void unregisterSession(final String session) {
+ public void unregisterSession(String session) {
wire.unregisterService(hostId + "/" + session);
}
@Override
- public boolean allocServiceAddress(final RoutingNode recipient) {
- final String service = recipient.getRoute().getHop(0).getServiceName();
- final ServiceAddress address = wire.resolveServiceAddress(service);
+ public boolean allocServiceAddress(RoutingNode recipient) {
+ String service = recipient.getRoute().getHop(0).getServiceName();
+ ServiceAddress address = wire.resolveServiceAddress(service);
if (address == null) {
recipient.setError(new Error(NO_ADDRESS_FOR_SERVICE, "No address for service '" + service + "'."));
return false;
@@ -70,24 +74,24 @@ public class LocalNetwork implements Network {
}
@Override
- public void freeServiceAddress(final RoutingNode recipient) {
+ public void freeServiceAddress(RoutingNode recipient) {
recipient.setServiceAddress(null);
}
@Override
- public void send(final Message msg, final List<RoutingNode> recipients) {
- for (final RoutingNode recipient : recipients) {
+ public void send(Message msg, List<RoutingNode> recipients) {
+ for (RoutingNode recipient : recipients) {
new MessageEnvelope(this, msg, recipient).send();
}
}
- private void receiveLater(final MessageEnvelope envelope) {
- final byte[] payload = envelope.sender.encode(envelope.msg.getProtocol(), envelope.msg);
+ private void receiveLater(MessageEnvelope envelope) {
+ byte[] payload = envelope.sender.encode(envelope.msg.getProtocol(), envelope.msg);
executor.execute(new Runnable() {
@Override
public void run() {
- final Message msg = decode(envelope.msg.getProtocol(), payload, Message.class);
+ Message msg = decode(envelope.msg.getProtocol(), payload, Message.class);
msg.getTrace().setLevel(envelope.msg.getTrace().getLevel());
msg.setRoute(envelope.msg.getRoute()).getRoute().removeHop(0);
msg.setRetryEnabled(envelope.msg.getRetryEnabled());
@@ -96,7 +100,7 @@ public class LocalNetwork implements Network {
msg.pushHandler(new ReplyHandler() {
@Override
- public void handleReply(final Reply reply) {
+ public void handleReply(Reply reply) {
new ReplyEnvelope(LocalNetwork.this, envelope, reply).send();
}
});
@@ -106,17 +110,17 @@ public class LocalNetwork implements Network {
});
}
- private void receiveLater(final ReplyEnvelope envelope) {
- final byte[] payload = envelope.sender.encode(envelope.reply.getProtocol(), envelope.reply);
+ private void receiveLater(ReplyEnvelope envelope) {
+ byte[] payload = envelope.sender.encode(envelope.reply.getProtocol(), envelope.reply);
executor.execute(new Runnable() {
@Override
public void run() {
- final Reply reply = decode(envelope.reply.getProtocol(), payload, Reply.class);
+ Reply reply = decode(envelope.reply.getProtocol(), payload, Reply.class);
reply.setRetryDelay(envelope.reply.getRetryDelay());
reply.getTrace().getRoot().addChild(TraceNode.decode(envelope.reply.getTrace().getRoot().encode()));
for (int i = 0, len = envelope.reply.getNumErrors(); i < len; ++i) {
- final Error error = envelope.reply.getError(i);
+ Error error = envelope.reply.getError(i);
reply.addError(new Error(error.getCode(),
error.getMessage(),
error.getService() != null ? error.getService() : envelope.sender.hostId));
@@ -126,7 +130,7 @@ public class LocalNetwork implements Network {
});
}
- private byte[] encode(final Utf8String protocolName, final Routable toEncode) {
+ private byte[] encode(Utf8String protocolName, Routable toEncode) {
if (toEncode.getType() == 0) {
return new byte[0];
}
@@ -134,7 +138,7 @@ public class LocalNetwork implements Network {
}
@SuppressWarnings("unchecked")
- private <T extends Routable> T decode(final Utf8String protocolName, final byte[] toDecode, final Class<T> clazz) {
+ private <T extends Routable> T decode(Utf8String protocolName, byte[] toDecode, Class<T> clazz) {
if (toDecode.length == 0) {
return clazz.cast(new EmptyReply());
}
@@ -167,15 +171,14 @@ public class LocalNetwork implements Network {
final Message msg;
final RoutingNode recipient;
- MessageEnvelope(final LocalNetwork sender, final Message msg, final RoutingNode recipient) {
+ MessageEnvelope(LocalNetwork sender, Message msg, RoutingNode recipient) {
this.sender = sender;
this.msg = msg;
this.recipient = recipient;
}
void send() {
- LocalServiceAddress.class.cast(recipient.getServiceAddress())
- .getNetwork().receiveLater(this);
+ LocalServiceAddress.class.cast(recipient.getServiceAddress()).getNetwork().receiveLater(this);
}
}
@@ -185,7 +188,7 @@ public class LocalNetwork implements Network {
final MessageEnvelope parent;
final Reply reply;
- ReplyEnvelope(final LocalNetwork sender, final MessageEnvelope parent, final Reply reply) {
+ ReplyEnvelope(LocalNetwork sender, MessageEnvelope parent, Reply reply) {
this.sender = sender;
this.parent = parent;
this.reply = reply;
@@ -195,4 +198,5 @@ public class LocalNetwork implements Network {
parent.sender.receiveLater(this);
}
}
+
}
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalWire.java b/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalWire.java
index 84ca8c64bc0..5c9035a5f99 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalWire.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/network/local/LocalWire.java
@@ -11,7 +11,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Pattern;
/**
- * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen Hult</a>
+ * @author Simon Thoresen Hult
*/
public class LocalWire implements IMirror {
@@ -19,19 +19,19 @@ public class LocalWire implements IMirror {
private final AtomicInteger updateCnt = new AtomicInteger();
private final ConcurrentHashMap<String, LocalNetwork> services = new ConcurrentHashMap<>();
- public void registerService(final String serviceName, final LocalNetwork owner) {
+ public void registerService(String serviceName, LocalNetwork owner) {
if (services.putIfAbsent(serviceName, owner) != null) {
throw new IllegalStateException();
}
updateCnt.incrementAndGet();
}
- public void unregisterService(final String serviceName) {
+ public void unregisterService(String serviceName) {
services.remove(serviceName);
updateCnt.incrementAndGet();
}
- public LocalServiceAddress resolveServiceAddress(final String serviceName) {
+ public LocalServiceAddress resolveServiceAddress(String serviceName) {
final LocalNetwork owner = services.get(serviceName);
return owner != null ? new LocalServiceAddress(serviceName, owner) : null;
}
@@ -41,10 +41,10 @@ public class LocalWire implements IMirror {
}
@Override
- public Mirror.Entry[] lookup(final String pattern) {
- final List<Mirror.Entry> out = new ArrayList<>();
- final Pattern regex = Pattern.compile(pattern.replace("*", "[a-zA-Z0-9_-]+"));
- for (final String key : services.keySet()) {
+ public Mirror.Entry[] lookup(String pattern) {
+ List<Mirror.Entry> out = new ArrayList<>();
+ Pattern regex = Pattern.compile(pattern.replace("*", "[a-zA-Z0-9_-]+"));
+ for (String key : services.keySet()) {
if (regex.matcher(key).matches()) {
out.add(new Mirror.Entry(key, key));
}
@@ -56,4 +56,5 @@ public class LocalWire implements IMirror {
public int updates() {
return updateCnt.get();
}
+
}
diff --git a/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCNetwork.java b/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCNetwork.java
index 9ab24d662bd..f5f8dd56991 100644
--- a/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCNetwork.java
+++ b/messagebus/src/main/java/com/yahoo/messagebus/network/rpc/RPCNetwork.java
@@ -51,7 +51,7 @@ public class RPCNetwork implements Network, MethodHandler {
private final ExecutorService sendService =
new ThreadPoolExecutor(Runtime.getRuntime().availableProcessors(), Runtime.getRuntime().availableProcessors(),
0L, TimeUnit.SECONDS,
- new SynchronousQueue<Runnable>(false),
+ new SynchronousQueue<>(false),
ThreadFactoryFactory.getDaemonThreadFactory("mbus.net"), new ThreadPoolExecutor.CallerRunsPolicy());
/**
diff --git a/messagebus/src/test/java/com/yahoo/messagebus/ChokeTestCase.java b/messagebus/src/test/java/com/yahoo/messagebus/ChokeTestCase.java
index cde801d81f2..5e0df7068b0 100755
--- a/messagebus/src/test/java/com/yahoo/messagebus/ChokeTestCase.java
+++ b/messagebus/src/test/java/com/yahoo/messagebus/ChokeTestCase.java
@@ -11,28 +11,30 @@ import com.yahoo.messagebus.test.Receptor;
import com.yahoo.messagebus.test.SimpleMessage;
import com.yahoo.messagebus.test.SimpleProtocol;
import junit.framework.TestCase;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertFalse;
/**
- * @author <a href="mailto:simon@yahoo-inc.com">Simon Thoresen</a>
+ * @author Simon Thoresen
*/
-public class ChokeTestCase extends TestCase {
-
- ////////////////////////////////////////////////////////////////////////////////
- //
- // Setup
- //
- ////////////////////////////////////////////////////////////////////////////////
+public class ChokeTestCase {
Slobrok slobrok;
TestServer srcServer, dstServer;
SourceSession srcSession;
DestinationSession dstSession;
- @Override
+ @Before
public void setUp() throws ListenFailedException, UnknownHostException {
slobrok = new Slobrok();
dstServer = new TestServer(new MessageBusParams().addProtocol(new SimpleProtocol()),
@@ -45,7 +47,7 @@ public class ChokeTestCase extends TestCase {
assertTrue(srcServer.waitSlobrok("dst/session", 1));
}
- @Override
+ @After
public void tearDown() {
slobrok.stop();
dstSession.destroy();
@@ -54,12 +56,7 @@ public class ChokeTestCase extends TestCase {
srcServer.destroy();
}
- ////////////////////////////////////////////////////////////////////////////////
- //
- // Tests
- //
- ////////////////////////////////////////////////////////////////////////////////
-
+ @Test
public void testMaxCount() {
int max = 10;
dstServer.mb.setMaxPendingCount(max);
@@ -107,6 +104,7 @@ public class ChokeTestCase extends TestCase {
assertEquals(0, dstServer.mb.getPendingCount());
}
+ @Test
public void testMaxSize() {
int size = createMessage("msg").getApproxSize();
int max = size * 10;
@@ -155,15 +153,10 @@ public class ChokeTestCase extends TestCase {
assertEquals(0, dstServer.mb.getPendingSize());
}
- ////////////////////////////////////////////////////////////////////////////////
- //
- // Utilities
- //
- ////////////////////////////////////////////////////////////////////////////////
-
private static Message createMessage(String msg) {
Message ret = new SimpleMessage(msg);
ret.getTrace().setLevel(9);
return ret;
}
+
}
diff --git a/node-admin/README_MAC.md b/node-admin/README_MAC.md
index 75a67f6a29c..71f0cae0d7d 100644
--- a/node-admin/README_MAC.md
+++ b/node-admin/README_MAC.md
@@ -43,7 +43,7 @@ scripts/setup-route-and-hosts-osx.sh
The script will prompt you to continue as this will alter your routing table and /etc/hosts file. If your local zone is up and running, the config server should respond to this:
```
-curl config-server:19071
+curl config-server:4080
```
If you don't want your `/etc/hosts` file to be changed, the
diff --git a/node-admin/include/nodectl-instance.sh b/node-admin/include/nodectl-instance.sh
index 5a6665dbdc7..a8d872b314e 100755
--- a/node-admin/include/nodectl-instance.sh
+++ b/node-admin/include/nodectl-instance.sh
@@ -103,7 +103,7 @@ stop() {
$echo $VESPA_HOME/bin/vespa-routing vip -u chef out
if has_searchnode; then
- $echo $VESPA_HOME/bin/vespa-proton-cmd --local triggerFlush
+ $echo $VESPA_HOME/bin/vespa-proton-cmd --local prepareRestart
fi
if has_container; then
diff --git a/node-admin/scripts/app.sh b/node-admin/scripts/app.sh
index 83754413508..2757d637dc8 100755
--- a/node-admin/scripts/app.sh
+++ b/node-admin/scripts/app.sh
@@ -101,7 +101,7 @@ function DeployApp {
# Create tenant
echo -n "Creating tenant... "
local create_tenant_response
- if create_tenant_response=$(curl --silent --show-error -X PUT "http://$CONFIG_SERVER_HOSTNAME:$CONFIG_SERVER_PORT/application/v2/tenant/$TENANT_NAME" 2>&1)
+ if create_tenant_response=$(curl --silent --show-error -X PUT "http://$CONFIG_SERVER_HOSTNAME:$VESPA_WEB_SERVICE_PORT/application/v2/tenant/$TENANT_NAME" 2>&1)
then
if ! [[ "$create_tenant_response" =~ "Tenant $TENANT_NAME created" ]] &&
! [[ "$create_tenant_response" =~ "already exists" ]]
@@ -131,7 +131,7 @@ function UndeployApp {
local app_name=default
local output
echo -n "Removing application $TENANT_NAME:$app_name... "
- if ! output=$(curl --silent --show-error -X DELETE "http://$CONFIG_SERVER_HOSTNAME:$CONFIG_SERVER_PORT/application/v2/tenant/$TENANT_NAME/application/$app_name")
+ if ! output=$(curl --silent --show-error -X DELETE "http://$CONFIG_SERVER_HOSTNAME:$VESPA_WEB_SERVICE_PORT/application/v2/tenant/$TENANT_NAME/application/$app_name")
then
echo
Fail "Failed to remove application: $output"
diff --git a/node-admin/scripts/common.sh b/node-admin/scripts/common.sh
index d07b4adcc5a..6a10fb71a99 100644
--- a/node-admin/scripts/common.sh
+++ b/node-admin/scripts/common.sh
@@ -28,7 +28,7 @@ declare -r NODE_ADMIN_CONTAINER_NAME=node-admin
declare -r CONFIG_SERVER_CONTAINER_NAME=config-server
declare -r CONFIG_SERVER_HOSTNAME="$CONFIG_SERVER_CONTAINER_NAME"
declare -r CONFIG_SERVER_IP="$NETWORK_PREFIX.1.1"
-declare -r CONFIG_SERVER_PORT=19071
+declare -r VESPA_WEB_SERVICE_PORT=4080 # E.g. config server port
declare -r DEFAULT_HOSTED_VESPA_REGION=local-region
declare -r DEFAULT_HOSTED_VESPA_ENVIRONMENT=prod
diff --git a/node-admin/scripts/config-server.sh b/node-admin/scripts/config-server.sh
index 60b05d4b3cd..f8e0f1a60e4 100755
--- a/node-admin/scripts/config-server.sh
+++ b/node-admin/scripts/config-server.sh
@@ -124,7 +124,7 @@ function Start {
then
# Wait for config server to come up
echo -n "Waiting for healthy Config Server (~30s)"
- local url="http://$CONFIG_SERVER_HOSTNAME:19071/state/v1/health"
+ local url="http://$CONFIG_SERVER_HOSTNAME:$VESPA_WEB_SERVICE_PORT/state/v1/health"
while ! curl --silent --fail --max-time 1 "$url" >/dev/null
do
echo -n .
diff --git a/node-admin/scripts/node-repo.sh b/node-admin/scripts/node-repo.sh
index 94173a6726b..2e113843916 100755
--- a/node-admin/scripts/node-repo.sh
+++ b/node-admin/scripts/node-repo.sh
@@ -3,6 +3,8 @@
set -e
+declare -r VESPA_WEB_SERVICE_PORT=4080
+
# Output from InnerCurlNodeRepo, see there for details.
declare CURL_RESPONSE
@@ -162,7 +164,7 @@ function ProvisionNode {
local config_server_hostname="$1"
local json="$2"
- local url="http://$config_server_hostname:19071/nodes/v2/node"
+ local url="http://$config_server_hostname:$VESPA_WEB_SERVICE_PORT/nodes/v2/node"
CurlOrFail -H "Content-Type: application/json" -X POST -d "$json" "$url"
}
@@ -172,7 +174,7 @@ function SetNodeState {
local hostname="$2"
local state="$3"
- local url="http://$config_server_hostname:19071/nodes/v2/state/$state/$hostname"
+ local url="http://$config_server_hostname:$VESPA_WEB_SERVICE_PORT/nodes/v2/state/$state/$hostname"
CurlOrFail -X PUT "$url"
}
@@ -284,7 +286,7 @@ function RemoveCommand {
local hostname
for hostname in "$@"
do
- local url="http://$config_server_hostname:19071/nodes/v2/node/$hostname"
+ local url="http://$config_server_hostname:$VESPA_WEB_SERVICE_PORT/nodes/v2/node/$hostname"
CurlOrFail -X DELETE "$url"
echo -n .
done
diff --git a/node-admin/src/main/application/services.xml b/node-admin/src/main/application/services.xml
index a5dea070285..c746afa0e85 100644
--- a/node-admin/src/main/application/services.xml
+++ b/node-admin/src/main/application/services.xml
@@ -8,11 +8,13 @@
</handler>
<component id="node-admin" class="com.yahoo.vespa.hosted.node.admin.provider.ComponentsProviderImpl" bundle="node-admin"/>
<component id="docker-api" class="com.yahoo.vespa.hosted.dockerapi.DockerImpl" bundle="docker-api"/>
+ <component id="metrics-wrapper" class="com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper" bundle="docker-api"/>
<config name='vespa.hosted.dockerapi.docker'>
- <caCertPath>/host/docker/certs/ca_cert.pem</caCertPath>
- <clientCertPath>/host/docker/certs/client_cert.pem</clientCertPath>
- <clientKeyPath>/host/docker/certs/client_key.pem</clientKeyPath>
+ <uri>tcp://localhost:2376</uri>
+ <caCertPath></caCertPath>
+ <clientCertPath></clientCertPath>
+ <clientKeyPath></clientKeyPath>
</config>
</jdisc>
</services>
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java
index bfd46e1453e..0877275f93d 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/docker/DockerOperationsImpl.java
@@ -27,8 +27,10 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
+import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
+import java.util.Map;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.regex.Matcher;
@@ -50,24 +52,29 @@ public class DockerOperationsImpl implements DockerOperations {
private static final Pattern VESPA_VERSION_PATTERN = Pattern.compile("^(\\S*)$", Pattern.MULTILINE);
- private static final List<String> DIRECTORIES_TO_MOUNT = Arrays.asList(
- getDefaults().underVespaHome("logs"),
- getDefaults().underVespaHome("var/cache"),
- getDefaults().underVespaHome("var/crash"),
- getDefaults().underVespaHome("var/db/jdisc"),
- getDefaults().underVespaHome("var/db/vespa"),
- getDefaults().underVespaHome("var/jdisc_container"),
- getDefaults().underVespaHome("var/jdisc_core"),
- getDefaults().underVespaHome("var/maven"),
- getDefaults().underVespaHome("var/run"),
- getDefaults().underVespaHome("var/scoreboards"),
- getDefaults().underVespaHome("var/service"),
- getDefaults().underVespaHome("var/share"),
- getDefaults().underVespaHome("var/spool"),
- getDefaults().underVespaHome("var/vespa"),
- getDefaults().underVespaHome("var/yca"),
- getDefaults().underVespaHome("var/ycore++"),
- getDefaults().underVespaHome("var/zookeeper"));
+ // Map of directories to mount and whether they should be writeable by everyone
+ private static final Map<String, Boolean> DIRECTORIES_TO_MOUNT = new HashMap<>();
+ static {
+ DIRECTORIES_TO_MOUNT.put("/metrics-share", true);
+ DIRECTORIES_TO_MOUNT.put("/etc/yamas-agent", true);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("logs"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/cache"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/crash"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/jdisc"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/db/vespa"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_container"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/jdisc_core"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/maven"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/run"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/scoreboards"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/service"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/share"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/spool"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/vespa"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/yca"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/ycore++"), false);
+ DIRECTORIES_TO_MOUNT.put(getDefaults().underVespaHome("var/zookeeper"), false);
+ }
private final Docker docker;
private final Environment environment;
@@ -119,34 +126,32 @@ public class DockerOperationsImpl implements DockerOperations {
}
private void configureContainer(ContainerNodeSpec nodeSpec) {
- Path yamasAgentTempFolder = Paths.get("/tmp/yamas_schedule_" + System.currentTimeMillis() + "/yamas-agent/");
- yamasAgentTempFolder.toFile().mkdirs();
+ final Path yamasAgentFolder = Paths.get("/etc/yamas-agent/");
- // Path to the executeable that the secret-agent will run to gather metrics
- Path systemCheckPath = Paths.get("/usr/bin/yms_check_system");
- // Path to the secret-agent schedule file
- Path systemCheckSchedulePath = yamasAgentTempFolder.resolve("system-checks.yaml");
- // Contents of the secret-agent schedule file
- String systemCheckSchedule = generateSecretAgentSchedule(nodeSpec, "system-checks", 60, systemCheckPath,
- "-l", "/var/secret-agent/custom/");
+ Path diskUsageCheckPath = Paths.get("/bin/cat");
+ Path diskUsageCheckSchedulePath = yamasAgentFolder.resolve("disk-usage.yaml");
+ String diskUsageCheckSchedule = generateSecretAgentSchedule(nodeSpec, "disk-usage", 60, diskUsageCheckPath,
+ "/metrics-share/disk.usage");
Path vespaCheckPath = Paths.get("/home/y/libexec/yms/yms_check_vespa");
- Path vespaCheckSchedulePath = yamasAgentTempFolder.resolve("vespa.yaml");
+ Path vespaCheckSchedulePath = yamasAgentFolder.resolve("vespa.yaml");
String vespaCheckSchedule = generateSecretAgentSchedule(nodeSpec, "vespa", 60, vespaCheckPath, "all");
try {
- Files.write(systemCheckSchedulePath, systemCheckSchedule.getBytes());
- systemCheckSchedulePath.toFile().setReadable(true, false); // Give everyone read access to the schedule file
-
- Files.write(vespaCheckSchedulePath, vespaCheckSchedule.getBytes());
- vespaCheckSchedulePath.toFile().setReadable(true, false);
+ writeSecretAgentSchedule(nodeSpec.containerName, diskUsageCheckSchedulePath, diskUsageCheckSchedule);
+ writeSecretAgentSchedule(nodeSpec.containerName, vespaCheckSchedulePath, vespaCheckSchedule);
} catch (IOException e) {
e.printStackTrace();
}
- docker.copyArchiveToContainer(yamasAgentTempFolder.toString(), nodeSpec.containerName, "/etc/");
docker.executeInContainer(nodeSpec.containerName, "service", "yamas-agent", "restart");
}
+ private void writeSecretAgentSchedule(ContainerName containerName, Path schedulePath, String secretAgentSchedule) throws IOException {
+ Path scheduleFilePath = Maintainer.pathInNodeAdminFromPathInNode(containerName, schedulePath.toString());
+ Files.write(scheduleFilePath, secretAgentSchedule.getBytes());
+ scheduleFilePath.toFile().setReadable(true, false); // Give everyone read access to the schedule file
+ }
+
String generateSecretAgentSchedule(ContainerNodeSpec nodeSpec, String id, int interval, Path pathToCheck,
String... args) {
StringBuilder stringBuilder = new StringBuilder()
@@ -292,7 +297,7 @@ public class DockerOperationsImpl implements DockerOperations {
.withEnvironment("CONFIG_SERVER_ADDRESS", configServers);
command.withVolume("/etc/hosts", "/etc/hosts");
- for (String pathInNode : DIRECTORIES_TO_MOUNT) {
+ for (String pathInNode : DIRECTORIES_TO_MOUNT.keySet()) {
String pathInHost = Maintainer.pathInHostFromPathInNode(nodeSpec.containerName, pathInNode).toString();
command = command.withVolume(pathInHost, pathInNode);
}
@@ -304,6 +309,9 @@ public class DockerOperationsImpl implements DockerOperations {
long minMainMemoryAvailableMb = (long) (nodeSpec.minMainMemoryAvailableGb.get() * 1024);
if (minMainMemoryAvailableMb > 0) {
command.withMemoryInMb(minMainMemoryAvailableMb);
+ // TOTAL_MEMORY_MB is used to make any jdisc container think the machine
+ // only has this much physical memory (overrides total memory reported by `free -m`).
+ command.withEnvironment("TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb));
}
}
@@ -317,6 +325,9 @@ public class DockerOperationsImpl implements DockerOperations {
} else {
docker.startContainer(nodeSpec.containerName);
}
+
+ DIRECTORIES_TO_MOUNT.entrySet().stream().filter(Map.Entry::getValue).forEach(entry ->
+ docker.executeInContainer(nodeSpec.containerName, "sudo", "chmod", "-R", "a+w", entry.getKey()));
} catch (UnknownHostException e) {
throw new RuntimeException("Failed to create container " + nodeSpec.containerName.asString(), e);
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/MaintenanceScheduler.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/MaintenanceScheduler.java
deleted file mode 100644
index 711edf4544d..00000000000
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/MaintenanceScheduler.java
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.node.admin.maintenance;
-
-import com.yahoo.vespa.hosted.dockerapi.ContainerName;
-
-import java.io.IOException;
-
-/**
- * @author valerijf
- */
-public interface MaintenanceScheduler {
- void removeOldFilesFromNode(ContainerName containerName);
-
- void cleanNodeAdmin();
-
- void deleteContainerStorage(ContainerName containerName) throws IOException;
-}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/MaintenanceSchedulerImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/MaintenanceSchedulerImpl.java
deleted file mode 100644
index db6b40f4c73..00000000000
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/MaintenanceSchedulerImpl.java
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-package com.yahoo.vespa.hosted.node.admin.maintenance;
-
-import com.yahoo.io.IOUtils;
-import com.yahoo.vespa.hosted.dockerapi.ContainerName;
-import com.yahoo.vespa.hosted.node.admin.util.PrefixLogger;
-import com.yahoo.vespa.hosted.node.maintenance.DeleteOldAppData;
-import com.yahoo.vespa.hosted.node.maintenance.Maintainer;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.time.Duration;
-
-/**
- * @author valerijf
- */
-public class MaintenanceSchedulerImpl implements MaintenanceScheduler {
- private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(MaintenanceSchedulerImpl.class);
-
- private static final String[] baseArguments = {"sudo", "/home/y/libexec/vespa/node-admin/maintenance.sh"};
-
- @Override
- public void removeOldFilesFromNode(ContainerName containerName) {
- PrefixLogger logger = PrefixLogger.getNodeAgentLogger(MaintenanceSchedulerImpl.class, containerName);
-
- String[] pathsToClean = {"/home/y/logs/elasticsearch2", "/home/y/logs/logstash2",
- "/home/y/logs/daemontools_y", "/home/y/logs/nginx", "/home/y/logs/vespa"};
- for (String pathToClean : pathsToClean) {
- File path = Maintainer.pathInNodeAdminFromPathInNode(containerName, pathToClean).toFile();
- if (path.exists()) {
- DeleteOldAppData.deleteFiles(path.getAbsolutePath(), Duration.ofDays(3).getSeconds(), ".*\\.log\\..+", false);
- DeleteOldAppData.deleteFiles(path.getAbsolutePath(), Duration.ofDays(3).getSeconds(), ".*QueryAccessLog.*", false);
- }
- }
-
- File logArchiveDir = Maintainer.pathInNodeAdminFromPathInNode(containerName, "/home/y/logs/vespa/logarchive").toFile();
- if (logArchiveDir.exists()) {
- DeleteOldAppData.deleteFiles(logArchiveDir.getAbsolutePath(), Duration.ofDays(31).getSeconds(), null, false);
- }
-
- File fileDistrDir = Maintainer.pathInNodeAdminFromPathInNode(containerName, "/home/y/var/db/vespa/filedistribution").toFile();
- if (fileDistrDir.exists()) {
- DeleteOldAppData.deleteFiles(fileDistrDir.getAbsolutePath(), Duration.ofDays(31).getSeconds(), null, false);
- }
-
- execute(logger, Maintainer.JOB_CLEAN_CORE_DUMPS);
- }
-
- @Override
- public void cleanNodeAdmin() {
- execute(NODE_ADMIN_LOGGER, Maintainer.JOB_DELETE_OLD_APP_DATA);
- execute(NODE_ADMIN_LOGGER, Maintainer.JOB_CLEAN_HOME);
-
- File nodeAdminJDiskLogsPath = Maintainer.pathInNodeAdminFromPathInNode(new ContainerName("node-admin"),
- "/home/y/logs/jdisc_core/").toFile();
- DeleteOldAppData.deleteFiles(nodeAdminJDiskLogsPath.getAbsolutePath(), Duration.ofDays(31).getSeconds(), null, false);
- }
-
- @Override
- public void deleteContainerStorage(ContainerName containerName) throws IOException {
- PrefixLogger logger = PrefixLogger.getNodeAgentLogger(MaintenanceSchedulerImpl.class, containerName);
-
- File yVarDir = Maintainer.pathInNodeAdminFromPathInNode(containerName, "/home/y/var").toFile();
- if (yVarDir.exists()) {
- DeleteOldAppData.deleteDirectories(yVarDir.getAbsolutePath(), 0, null);
- }
-
- Path from = Maintainer.pathInNodeAdminFromPathInNode(containerName, "/");
- if (!Files.exists(from)) {
- logger.info("The application storage at " + from + " doesn't exist");
- return;
- }
-
- Path to = Maintainer.pathInNodeAdminToNodeCleanup(containerName);
- logger.info("Deleting application storage by moving it from " + from + " to " + to);
- //TODO: move to maintenance JVM
- Files.move(from, to);
- }
-
- private void execute(PrefixLogger logger, String... params) {
- try {
- Process p = Runtime.getRuntime().exec(concatenateArrays(baseArguments, params));
- String output = IOUtils.readAll(new InputStreamReader(p.getInputStream()));
- String errors = IOUtils.readAll(new InputStreamReader(p.getErrorStream()));
-
- if (! output.isEmpty()) logger.info(output);
- if (! errors.isEmpty()) logger.error(errors);
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
-
- private static String[] concatenateArrays(String[] ar1, String[] ar2) {
- String[] concatenated = new String[ar1.length + ar2.length];
- System.arraycopy(ar1, 0, concatenated, 0, ar1.length);
- System.arraycopy(ar2, 0, concatenated, ar1.length, ar2.length);
- return concatenated;
- }
-} \ No newline at end of file
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
new file mode 100644
index 00000000000..0dd1a24d93e
--- /dev/null
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainer.java
@@ -0,0 +1,195 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.node.admin.maintenance;
+
+import com.yahoo.io.IOUtils;
+import com.yahoo.vespa.hosted.dockerapi.ContainerName;
+import com.yahoo.vespa.hosted.node.admin.restapi.SecretAgentHandler;
+import com.yahoo.vespa.hosted.node.admin.util.PrefixLogger;
+import com.yahoo.vespa.hosted.node.maintenance.DeleteOldAppData;
+import com.yahoo.vespa.hosted.node.maintenance.Maintainer;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * @author valerijf
+ */
+public class StorageMaintainer {
+ private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(StorageMaintainer.class);
+ private static final String[] baseArguments = {"sudo", "/home/y/libexec/vespa/node-admin/maintenance.sh"};
+ private static final long intervalSec = 1000;
+
+ private final Object monitor = new Object();
+
+ private Map<ContainerName, MetricsCache> metricsCacheByContainerName = new ConcurrentHashMap<>();
+ private Random random = new Random();
+
+ public void updateDiskUsage(String hostname, ContainerName containerName) {
+ updateMetricsCacheForContainerIfNeeded(containerName);
+
+ try {
+ PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName);
+ SecretAgentHandler secretAgentHandler = new SecretAgentHandler();
+ secretAgentHandler.withDimension("host", hostname);
+ metricsCacheByContainerName.get(containerName).metrics.forEach(secretAgentHandler::withMetric);
+
+ // First write to temp file, then move temp file to main file to achieve atomic write
+ Path metricsSharePath = Maintainer.pathInNodeAdminFromPathInNode(containerName, "/metrics-share/disk.usage");
+ Path metricsSharePathTemp = Paths.get(metricsSharePath.toString() + "_temp");
+ Files.write(metricsSharePathTemp, secretAgentHandler.toJson().getBytes(StandardCharsets.UTF_8.name()));
+
+ // Files.move() fails to move if target already exist, could do target.delete() first, but then it's no longer atomic
+ execute(logger, "mv", metricsSharePathTemp.toString(), metricsSharePath.toString());
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private void updateMetricsCacheForContainerIfNeeded(ContainerName containerName) {
+ // Calculating disk usage is IO expensive operation and its value changes relatively slowly, we want to perform
+ // that calculation rarely. Additionally, we spread out the calculation for different containers by adding
+ // a random deviation.
+ if (metricsCacheByContainerName.containsKey(containerName) &&
+ metricsCacheByContainerName.get(containerName).nextUpdateAt.isAfter(Instant.now())) return;
+
+ long distributedSecs = (long) (intervalSec * (0.5 + random.nextDouble()));
+ MetricsCache metricsCache = new MetricsCache(Instant.now().plusSeconds(distributedSecs));
+
+ // Throttle to one disk usage calculation at a time.
+ synchronized (monitor) {
+ PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName);
+ File containerDir = Maintainer.pathInNodeAdminFromPathInNode(containerName, "/home/").toFile();
+
+ try {
+ long used = getDiscUsedInBytes(containerDir);
+ metricsCache.metrics.put("node.disk.used", used);
+ } catch (Throwable e) {
+ logger.error("Problems during disk usage calculations: " + e.getMessage());
+ }
+ }
+
+ metricsCacheByContainerName.put(containerName, metricsCache);
+ }
+
+ // Public for testing
+ long getDiscUsedInBytes(File path) throws IOException, InterruptedException {
+ final String[] command = {"du", "-xsk", path.toString()};
+
+ Process duCommand = new ProcessBuilder().command(command).start();
+ if (!duCommand.waitFor(60, TimeUnit.SECONDS)) {
+ duCommand.destroy();
+ throw new RuntimeException("Disk usage command timedout, aborting.");
+ }
+ String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream()));
+ String error = IOUtils.readAll(new InputStreamReader(duCommand.getErrorStream()));
+
+ if (! error.isEmpty()) {
+ throw new RuntimeException("Disk usage wrote to error log: " + error);
+ }
+
+ String[] results = output.split("\t");
+ if (results.length != 2) {
+ throw new RuntimeException("Result from disk usage command not as expected: " + output);
+ }
+ long diskUsageKB = Long.valueOf(results[0]);
+
+ return diskUsageKB * 1024;
+ }
+
+
+ public void removeOldFilesFromNode(ContainerName containerName) {
+ PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName);
+
+ String[] pathsToClean = {"/home/y/logs/elasticsearch2", "/home/y/logs/logstash2",
+ "/home/y/logs/daemontools_y", "/home/y/logs/nginx", "/home/y/logs/vespa"};
+ for (String pathToClean : pathsToClean) {
+ File path = Maintainer.pathInNodeAdminFromPathInNode(containerName, pathToClean).toFile();
+ if (path.exists()) {
+ DeleteOldAppData.deleteFiles(path.getAbsolutePath(), Duration.ofDays(3).getSeconds(), ".*\\.log\\..+", false);
+ DeleteOldAppData.deleteFiles(path.getAbsolutePath(), Duration.ofDays(3).getSeconds(), ".*QueryAccessLog.*", false);
+ }
+ }
+
+ File logArchiveDir = Maintainer.pathInNodeAdminFromPathInNode(containerName, "/home/y/logs/vespa/logarchive").toFile();
+ if (logArchiveDir.exists()) {
+ DeleteOldAppData.deleteFiles(logArchiveDir.getAbsolutePath(), Duration.ofDays(31).getSeconds(), null, false);
+ }
+
+ File fileDistrDir = Maintainer.pathInNodeAdminFromPathInNode(containerName, "/home/y/var/db/vespa/filedistribution").toFile();
+ if (fileDistrDir.exists()) {
+ DeleteOldAppData.deleteFiles(fileDistrDir.getAbsolutePath(), Duration.ofDays(31).getSeconds(), null, false);
+ }
+
+ execute(logger, concatenateArrays(baseArguments, Maintainer.JOB_CLEAN_CORE_DUMPS));
+ }
+
+ public void cleanNodeAdmin() {
+ execute(NODE_ADMIN_LOGGER, concatenateArrays(baseArguments, Maintainer.JOB_DELETE_OLD_APP_DATA));
+ execute(NODE_ADMIN_LOGGER, concatenateArrays(baseArguments, Maintainer.JOB_CLEAN_HOME));
+
+ File nodeAdminJDiskLogsPath = Maintainer.pathInNodeAdminFromPathInNode(new ContainerName("node-admin"),
+ "/home/y/logs/jdisc_core/").toFile();
+ DeleteOldAppData.deleteFiles(nodeAdminJDiskLogsPath.getAbsolutePath(), Duration.ofDays(31).getSeconds(), null, false);
+ }
+
+ public void deleteContainerStorage(ContainerName containerName) throws IOException {
+ PrefixLogger logger = PrefixLogger.getNodeAgentLogger(StorageMaintainer.class, containerName);
+
+ File yVarDir = Maintainer.pathInNodeAdminFromPathInNode(containerName, "/home/y/var").toFile();
+ if (yVarDir.exists()) {
+ DeleteOldAppData.deleteDirectories(yVarDir.getAbsolutePath(), 0, null);
+ }
+
+ Path from = Maintainer.pathInNodeAdminFromPathInNode(containerName, "/");
+ if (!Files.exists(from)) {
+ logger.info("The application storage at " + from + " doesn't exist");
+ return;
+ }
+
+ Path to = Maintainer.pathInNodeAdminToNodeCleanup(containerName);
+ logger.info("Deleting application storage by moving it from " + from + " to " + to);
+ //TODO: move to maintenance JVM
+ Files.move(from, to);
+ }
+
+ private void execute(PrefixLogger logger, String... params) {
+ try {
+ Process p = Runtime.getRuntime().exec(params);
+ String output = IOUtils.readAll(new InputStreamReader(p.getInputStream()));
+ String errors = IOUtils.readAll(new InputStreamReader(p.getErrorStream()));
+
+ if (! output.isEmpty()) logger.info(output);
+ if (! errors.isEmpty()) logger.error(errors);
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private static String[] concatenateArrays(String[] ar1, String... ar2) {
+ String[] concatenated = new String[ar1.length + ar2.length];
+ System.arraycopy(ar1, 0, concatenated, 0, ar1.length);
+ System.arraycopy(ar2, 0, concatenated, ar1.length, ar2.length);
+ return concatenated;
+ }
+
+ private static class MetricsCache {
+ private final Instant nextUpdateAt;
+ private final Map<String, Object> metrics = new HashMap<>();
+
+ MetricsCache(Instant nextUpdateAt) {
+ this.nextUpdateAt = nextUpdateAt;
+ }
+ }
+} \ No newline at end of file
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
index 600f4b16931..aaebe1b3784 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImpl.java
@@ -2,13 +2,17 @@
package com.yahoo.vespa.hosted.node.admin.nodeadmin;
import com.yahoo.collections.Pair;
+import com.yahoo.vespa.hosted.dockerapi.metrics.CounterWrapper;
+import com.yahoo.vespa.hosted.dockerapi.metrics.GaugeWrapper;
+import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec;
import com.yahoo.vespa.hosted.dockerapi.Container;
import com.yahoo.vespa.hosted.dockerapi.Docker;
import com.yahoo.vespa.hosted.dockerapi.DockerImage;
-import com.yahoo.vespa.hosted.node.admin.maintenance.MaintenanceScheduler;
+import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgent;
import com.yahoo.vespa.hosted.node.admin.util.PrefixLogger;
+import com.yahoo.vespa.hosted.provision.Node;
import java.io.IOException;
import java.time.Duration;
@@ -37,7 +41,7 @@ public class NodeAdminImpl implements NodeAdmin {
private final Docker docker;
private final Function<String, NodeAgent> nodeAgentFactory;
- private final MaintenanceScheduler maintenanceScheduler;
+ private final StorageMaintainer storageMaintainer;
private AtomicBoolean frozen = new AtomicBoolean(false);
private final Map<String, NodeAgent> nodeAgents = new HashMap<>();
@@ -46,24 +50,52 @@ public class NodeAdminImpl implements NodeAdmin {
private final int nodeAgentScanIntervalMillis;
+ private GaugeWrapper numberOfContainersInActiveState;
+ private GaugeWrapper numberOfContainersInLoadImageState;
+ private CounterWrapper numberOfUnhandledExceptionsInNodeAgent;
+
/**
* @param docker interface to docker daemon and docker-related tasks
* @param nodeAgentFactory factory for {@link NodeAgent} objects
*/
public NodeAdminImpl(final Docker docker, final Function<String, NodeAgent> nodeAgentFactory,
- final MaintenanceScheduler maintenanceScheduler, int nodeAgentScanIntervalMillis) {
+ final StorageMaintainer storageMaintainer, int nodeAgentScanIntervalMillis,
+ final MetricReceiverWrapper metricReceiver) {
this.docker = docker;
this.nodeAgentFactory = nodeAgentFactory;
- this.maintenanceScheduler = maintenanceScheduler;
+ this.storageMaintainer = storageMaintainer;
this.nodeAgentScanIntervalMillis = nodeAgentScanIntervalMillis;
+
+ this.numberOfContainersInActiveState = metricReceiver.declareGauge("nodes.state.active");
+ this.numberOfContainersInLoadImageState = metricReceiver.declareGauge("nodes.image.loading");
+ this.numberOfUnhandledExceptionsInNodeAgent = metricReceiver.declareCounter("nodes.unhandled_exceptions");
}
public void refreshContainersToRun(final List<ContainerNodeSpec> containersToRun) {
final List<Container> existingContainers = docker.getAllManagedContainers();
- maintenanceScheduler.cleanNodeAdmin();
+ storageMaintainer.cleanNodeAdmin();
synchronizeNodeSpecsToNodeAgents(containersToRun, existingContainers);
garbageCollectDockerImages(containersToRun);
+
+ updateNodeAgentMetrics();
+ }
+
+ private void updateNodeAgentMetrics() {
+ int numberContainersInActive = 0;
+ int numberContainersWaitingImage = 0;
+ int numberOfNewUnhandledExceptions = 0;
+
+ for (NodeAgent nodeAgent : nodeAgents.values()) {
+ Optional<ContainerNodeSpec> nodeSpec = nodeAgent.getContainerNodeSpec();
+ if (nodeSpec.isPresent() && nodeSpec.get().nodeState == Node.State.active) numberContainersInActive++;
+ if (nodeAgent.isDownloadingImage()) numberContainersWaitingImage++;
+ numberOfNewUnhandledExceptions += nodeAgent.getAndResetNumberOfUnhandledExceptions();
+ }
+
+ numberOfContainersInActiveState.sample(numberContainersInActive);
+ numberOfContainersInLoadImageState.sample(numberContainersWaitingImage);
+ numberOfUnhandledExceptionsInNodeAgent.add(numberOfNewUnhandledExceptions);
}
public boolean freezeNodeAgentsAndCheckIfAllFrozen() {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgent.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgent.java
index fe6af0fdeec..50b29991527 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgent.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgent.java
@@ -4,6 +4,7 @@ package com.yahoo.vespa.hosted.node.admin.nodeagent;
import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec;
import java.util.Map;
+import java.util.Optional;
/**
* Responsible for management of a single node over its lifecycle.
@@ -55,5 +56,15 @@ public interface NodeAgent {
/**
* Returns the {@link ContainerNodeSpec} for this node agent.
*/
- ContainerNodeSpec getContainerNodeSpec();
+ Optional<ContainerNodeSpec> getContainerNodeSpec();
+
+ /**
+ * Returns true if NodeAgent is waiting for an image download to finish
+ */
+ boolean isDownloadingImage();
+
+ /**
+ * Returns and resets number of unhandled exceptions
+ */
+ int getAndResetNumberOfUnhandledExceptions();
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
index fd59002edcf..d00068c2e58 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImpl.java
@@ -4,7 +4,7 @@ package com.yahoo.vespa.hosted.node.admin.nodeagent;
import com.yahoo.vespa.hosted.dockerapi.DockerImage;
import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec;
import com.yahoo.vespa.hosted.node.admin.docker.DockerOperations;
-import com.yahoo.vespa.hosted.node.admin.maintenance.MaintenanceScheduler;
+import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer;
import com.yahoo.vespa.hosted.node.admin.noderepository.NodeRepository;
import com.yahoo.vespa.hosted.node.admin.noderepository.NodeRepositoryImpl;
import com.yahoo.vespa.hosted.node.admin.orchestrator.Orchestrator;
@@ -18,6 +18,7 @@ import java.util.Date;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.Map;
+import java.util.Optional;
import java.util.concurrent.atomic.AtomicBoolean;
import static com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentImpl.ContainerState.ABSENT;
@@ -45,7 +46,7 @@ public class NodeAgentImpl implements NodeAgent {
private final NodeRepository nodeRepository;
private final Orchestrator orchestrator;
private final DockerOperations dockerOperations;
- private final MaintenanceScheduler maintenanceScheduler;
+ private final StorageMaintainer storageMaintainer;
private final Object monitor = new Object();
@@ -53,6 +54,7 @@ public class NodeAgentImpl implements NodeAgent {
private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
private long delaysBetweenEachTickMillis;
+ private int numberOfUnhandledException = 0;
private Thread loopThread;
@@ -72,12 +74,12 @@ public class NodeAgentImpl implements NodeAgent {
final NodeRepository nodeRepository,
final Orchestrator orchestrator,
final DockerOperations dockerOperations,
- final MaintenanceScheduler maintenanceScheduler) {
+ final StorageMaintainer storageMaintainer) {
this.nodeRepository = nodeRepository;
this.orchestrator = orchestrator;
this.hostname = hostName;
this.dockerOperations = dockerOperations;
- this.maintenanceScheduler = maintenanceScheduler;
+ this.storageMaintainer = storageMaintainer;
this.logger = PrefixLogger.getNodeAgentLogger(NodeAgentImpl.class,
NodeRepositoryImpl.containerNameFromHostName(hostName));
}
@@ -138,7 +140,7 @@ public class NodeAgentImpl implements NodeAgent {
throw new RuntimeException("Can not restart a node agent.");
}
loopThread = new Thread(this::loop);
- loopThread.setName("loop-" + hostname.toString());
+ loopThread.setName("loop-" + hostname);
loopThread.start();
}
@@ -232,7 +234,7 @@ public class NodeAgentImpl implements NodeAgent {
imageBeingDownloaded = nodeSpec.wantedDockerImage.get();
// Create a signalWorkToBeDone when download is finished.
dockerOperations.scheduleDownloadOfImage(nodeSpec, this::signalWorkToBeDone);
- } else {
+ } else if (imageBeingDownloaded != null) { // Image was downloading, but now its ready
imageBeingDownloaded = null;
}
}
@@ -272,6 +274,7 @@ public class NodeAgentImpl implements NodeAgent {
try {
tick();
} catch (Exception e) {
+ numberOfUnhandledException++;
logger.error("Unhandled exception, ignoring.", e);
addDebugMessage(e.getMessage());
} catch (Throwable t) {
@@ -303,7 +306,7 @@ public class NodeAgentImpl implements NodeAgent {
removeContainerIfNeededUpdateContainerState(nodeSpec);
break;
case active:
- maintenanceScheduler.removeOldFilesFromNode(nodeSpec.containerName);
+ storageMaintainer.removeOldFilesFromNode(nodeSpec.containerName);
scheduleDownLoadIfNeeded(nodeSpec);
if (imageBeingDownloaded != null) {
addDebugMessage("Waiting for image to download " + imageBeingDownloaded.asString());
@@ -326,17 +329,18 @@ public class NodeAgentImpl implements NodeAgent {
updateNodeRepoWithCurrentAttributes(nodeSpec);
logger.info("Call resume against Orchestrator");
orchestrator.resume(nodeSpec.hostname);
+ storageMaintainer.updateDiskUsage(nodeSpec.hostname, nodeSpec.containerName);
break;
case inactive:
- maintenanceScheduler.removeOldFilesFromNode(nodeSpec.containerName);
+ storageMaintainer.removeOldFilesFromNode(nodeSpec.containerName);
removeContainerIfNeededUpdateContainerState(nodeSpec);
break;
case provisioned:
case dirty:
- maintenanceScheduler.removeOldFilesFromNode(nodeSpec.containerName);
+ storageMaintainer.removeOldFilesFromNode(nodeSpec.containerName);
removeContainerIfNeededUpdateContainerState(nodeSpec);
logger.info("State is " + nodeSpec.nodeState + ", will delete application storage and mark node as ready");
- maintenanceScheduler.deleteContainerStorage(nodeSpec.containerName);
+ storageMaintainer.deleteContainerStorage(nodeSpec.containerName);
updateNodeRepoAndMarkNodeAsReady(nodeSpec);
break;
case parked:
@@ -348,9 +352,21 @@ public class NodeAgentImpl implements NodeAgent {
}
}
- public ContainerNodeSpec getContainerNodeSpec() {
+ public Optional<ContainerNodeSpec> getContainerNodeSpec() {
synchronized (monitor) {
- return lastNodeSpec;
+ return Optional.ofNullable(lastNodeSpec);
}
}
+
+ @Override
+ public boolean isDownloadingImage() {
+ return imageBeingDownloaded != null;
+ }
+
+ @Override
+ public int getAndResetNumberOfUnhandledExceptions() {
+ int temp = numberOfUnhandledException;
+ numberOfUnhandledException = 0;
+ return temp;
+ }
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImpl.java
index a8439bfc8cc..b91c2411d95 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImpl.java
@@ -1,6 +1,7 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.orchestrator;
+import com.yahoo.vespa.defaults.Defaults;
import com.yahoo.vespa.hosted.node.admin.noderepository.NodeRepositoryImpl;
import com.yahoo.vespa.hosted.node.admin.util.ConfigServerHttpRequestExecutor;
@@ -22,8 +23,7 @@ import java.util.Set;
*/
public class OrchestratorImpl implements Orchestrator {
private static final PrefixLogger NODE_ADMIN_LOGGER = PrefixLogger.getNodeAdminLogger(OrchestratorImpl.class);
- // TODO: Figure out the port dynamically.
- static final int HARDCODED_ORCHESTRATOR_PORT = 19071;
+ static final int WEB_SERVICE_PORT = Defaults.getDefaults().vespaWebServicePort();
// TODO: Find a way to avoid duplicating this (present in orchestrator's services.xml also).
private static final String ORCHESTRATOR_PATH_PREFIX = "/orchestrator";
static final String ORCHESTRATOR_PATH_PREFIX_HOST_API
@@ -53,7 +53,7 @@ public class OrchestratorImpl implements Orchestrator {
try {
final UpdateHostResponse updateHostResponse = requestExecutor.put(
ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName + "/suspended",
- HARDCODED_ORCHESTRATOR_PORT,
+ WEB_SERVICE_PORT,
Optional.empty(), /* body */
UpdateHostResponse.class);
return updateHostResponse.reason() == null;
@@ -72,7 +72,7 @@ public class OrchestratorImpl implements Orchestrator {
try {
final BatchOperationResult batchOperationResult = requestExecutor.put(
ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API,
- HARDCODED_ORCHESTRATOR_PORT,
+ WEB_SERVICE_PORT,
Optional.of(new BatchHostSuspendRequest(parentHostName, hostNames)),
BatchOperationResult.class);
return batchOperationResult.getFailureReason();
@@ -89,7 +89,7 @@ public class OrchestratorImpl implements Orchestrator {
try {
final UpdateHostResponse batchOperationResult = requestExecutor.delete(
ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName + "/suspended",
- HARDCODED_ORCHESTRATOR_PORT,
+ WEB_SERVICE_PORT,
UpdateHostResponse.class);
return batchOperationResult.reason() == null;
} catch (ConfigServerHttpRequestExecutor.NotFoundException n) {
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProvider.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProvider.java
index b0e07e03eea..f00b6bb828c 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProvider.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProvider.java
@@ -1,6 +1,7 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.provider;
+import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater;
/**
@@ -10,4 +11,6 @@ import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater;
*/
public interface ComponentsProvider {
NodeAdminStateUpdater getNodeAdminStateUpdater();
+
+ MetricReceiverWrapper getMetricReceiverWrapper();
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProviderImpl.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProviderImpl.java
index 19988959691..aa01fd602c4 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProviderImpl.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/provider/ComponentsProviderImpl.java
@@ -1,8 +1,9 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.provider;
-import com.yahoo.vespa.hosted.node.admin.maintenance.MaintenanceScheduler;
-import com.yahoo.vespa.hosted.node.admin.maintenance.MaintenanceSchedulerImpl;
+import com.yahoo.vespa.defaults.Defaults;
+import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
+import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdmin;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminImpl;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater;
@@ -26,18 +27,18 @@ import java.util.function.Function;
*/
public class ComponentsProviderImpl implements ComponentsProvider {
- private final Docker docker;
private final NodeAdminStateUpdater nodeAdminStateUpdater;
+ private final MetricReceiverWrapper metricReceiverWrapper;
private static final long INITIAL_SCHEDULER_DELAY_MILLIS = 1;
- private static final int NODE_AGENT_SCAN_INTERVAL_MILLIS = 60000;
- private static final int HARDCODED_NODEREPOSITORY_PORT = 19071;
+ private static final int NODE_AGENT_SCAN_INTERVAL_MILLIS = 30000;
+ private static final int WEB_SERVICE_PORT = Defaults.getDefaults().vespaWebServicePort();
private static final String ENV_HOSTNAME = "HOSTNAME";
// We only scan for new nodes within a host every 5 minutes. This is only if new nodes are added or removed
// which happens rarely. Changes of apps running etc it detected by the NodeAgent.
private static final int NODE_ADMIN_STATE_INTERVAL_MILLIS = 5 * 60000;
- public ComponentsProviderImpl(final Docker docker) {
- this.docker = docker;
+
+ public ComponentsProviderImpl(final Docker docker, final MetricReceiverWrapper metricReceiver) {
String baseHostName = java.util.Optional.ofNullable(System.getenv(ENV_HOSTNAME))
.orElseThrow(() -> new IllegalStateException("Environment variable " + ENV_HOSTNAME + " unset"));
@@ -45,18 +46,26 @@ public class ComponentsProviderImpl implements ComponentsProvider {
Set<String> configServerHosts = environment.getConfigServerHosts();
Orchestrator orchestrator = new OrchestratorImpl(configServerHosts);
- NodeRepository nodeRepository = new NodeRepositoryImpl(configServerHosts, HARDCODED_NODEREPOSITORY_PORT, baseHostName);
- MaintenanceScheduler maintenanceScheduler = new MaintenanceSchedulerImpl();
+ NodeRepository nodeRepository = new NodeRepositoryImpl(configServerHosts, WEB_SERVICE_PORT, baseHostName);
+ StorageMaintainer storageMaintainer = new StorageMaintainer();
final Function<String, NodeAgent> nodeAgentFactory = (hostName) -> new NodeAgentImpl(hostName, nodeRepository,
- orchestrator, new DockerOperationsImpl(docker, environment), maintenanceScheduler);
- final NodeAdmin nodeAdmin = new NodeAdminImpl(docker, nodeAgentFactory, maintenanceScheduler, NODE_AGENT_SCAN_INTERVAL_MILLIS);
+ orchestrator, new DockerOperationsImpl(docker, environment), storageMaintainer);
+ final NodeAdmin nodeAdmin = new NodeAdminImpl(docker, nodeAgentFactory, storageMaintainer,
+ NODE_AGENT_SCAN_INTERVAL_MILLIS, metricReceiver);
nodeAdminStateUpdater = new NodeAdminStateUpdater(
nodeRepository, nodeAdmin, INITIAL_SCHEDULER_DELAY_MILLIS, NODE_ADMIN_STATE_INTERVAL_MILLIS, orchestrator, baseHostName);
+
+ metricReceiverWrapper = metricReceiver;
}
@Override
public NodeAdminStateUpdater getNodeAdminStateUpdater() {
return nodeAdminStateUpdater;
}
+
+ @Override
+ public MetricReceiverWrapper getMetricReceiverWrapper() {
+ return metricReceiverWrapper;
+ }
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/RestApiHandler.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/RestApiHandler.java
index 2ce4151f497..ba083b5e593 100644
--- a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/RestApiHandler.java
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/RestApiHandler.java
@@ -7,12 +7,15 @@ import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.container.jdisc.LoggingRequestHandler;
import com.yahoo.container.logging.AccessLog;
+import com.yahoo.net.HostName;
+import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater;
import com.yahoo.vespa.hosted.node.admin.provider.ComponentsProvider;
import javax.ws.rs.core.MediaType;
import java.io.IOException;
import java.io.OutputStream;
+import java.io.PrintStream;
import java.nio.charset.StandardCharsets;
import java.util.Optional;
import java.util.concurrent.Executor;
@@ -30,12 +33,14 @@ import static com.yahoo.jdisc.http.HttpRequest.Method.PUT;
*/
public class RestApiHandler extends LoggingRequestHandler{
- private final NodeAdminStateUpdater refresher;
private final static ObjectMapper objectMapper = new ObjectMapper();
+ private final NodeAdminStateUpdater refresher;
+ private final MetricReceiverWrapper metricReceiverWrapper;
public RestApiHandler(Executor executor, AccessLog accessLog, ComponentsProvider componentsProvider) {
super(executor, accessLog);
this.refresher = componentsProvider.getNodeAdminStateUpdater();
+ this.metricReceiverWrapper = componentsProvider.getMetricReceiverWrapper();
}
@Override
@@ -47,16 +52,30 @@ public class RestApiHandler extends LoggingRequestHandler{
return handlePut(request);
}
return new SimpleResponse(400, "Only PUT and GET are implemented.");
-
}
private HttpResponse handleGet(HttpRequest request) {
String path = request.getUri().getPath();
if (path.endsWith("/info")) {
+ return new SimpleObjectResponse(200, refresher.getDebugPage());
+ }
+
+ if (path.endsWith("/metrics")) {
+ SecretAgentHandler secretAgentHandler = new SecretAgentHandler();
+ secretAgentHandler.withDimension("host", HostName.getLocalhost());
+ metricReceiverWrapper.getLatestMetrics().forEach(secretAgentHandler::withMetric);
+
return new HttpResponse(200) {
@Override
+ public String getContentType() {
+ return MediaType.APPLICATION_JSON;
+ }
+
+ @Override
public void render(OutputStream outputStream) throws IOException {
- objectMapper.writeValue(outputStream, refresher.getDebugPage());
+ try (PrintStream printStream = new PrintStream(outputStream)) {
+ printStream.write(secretAgentHandler.toJson().getBytes(StandardCharsets.UTF_8.name()));
+ }
}
};
}
@@ -84,10 +103,9 @@ public class RestApiHandler extends LoggingRequestHandler{
}
private static class SimpleResponse extends HttpResponse {
-
private final String jsonMessage;
- public SimpleResponse(int code, String message) {
+ SimpleResponse(int code, String message) {
super(code);
ObjectNode objectNode = objectMapper.createObjectNode();
objectNode.put("jsonMessage", message);
@@ -105,4 +123,22 @@ public class RestApiHandler extends LoggingRequestHandler{
}
}
+ private static class SimpleObjectResponse extends HttpResponse {
+ private final Object response;
+
+ SimpleObjectResponse(int status, Object response) {
+ super(status);
+ this.response = response;
+ }
+
+ @Override
+ public String getContentType() {
+ return MediaType.APPLICATION_JSON;
+ }
+
+ @Override
+ public void render(OutputStream outputStream) throws IOException {
+ objectMapper.writeValue(outputStream, response);
+ }
+ }
}
diff --git a/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/SecretAgentHandler.java b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/SecretAgentHandler.java
new file mode 100644
index 00000000000..e266d18eece
--- /dev/null
+++ b/node-admin/src/main/java/com/yahoo/vespa/hosted/node/admin/restapi/SecretAgentHandler.java
@@ -0,0 +1,43 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.node.admin.restapi;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+/**
+ * Collects last value from all the previously declared counters/gauges and genereates a map
+ * structure that can be converted to secret-agent JSON message
+ *
+ * @author valerijf
+ */
+public class SecretAgentHandler {
+ private static final ObjectMapper objectMapper = new ObjectMapper();
+
+ private static final String applicationName = "docker";
+ private final Map<String, Object> dimensions = new HashMap<>();
+ private final Map<String, Object> metrics = new HashMap<>();
+
+ public SecretAgentHandler withDimension(String name, Object value) {
+ dimensions.put(name, value);
+ return this;
+ }
+
+ public SecretAgentHandler withMetric(String name, Object value) {
+ metrics.put(name, value);
+ return this;
+ }
+
+ public String toJson() throws JsonProcessingException {
+ Map<String, Object> report = new LinkedHashMap<>();
+ report.put("application", applicationName);
+ report.put("timestamp", System.currentTimeMillis() / 1000);
+ report.put("dimensions", dimensions);
+ report.put("metrics", metrics);
+
+ return objectMapper.writeValueAsString(report);
+ }
+}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ComponentsProviderWithMocks.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ComponentsProviderWithMocks.java
index 2984bbd563b..67e0e0c0552 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ComponentsProviderWithMocks.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ComponentsProviderWithMocks.java
@@ -1,6 +1,8 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.integrationTests;
+import com.yahoo.metrics.simple.MetricReceiver;
+import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdmin;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminImpl;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminStateUpdater;
@@ -21,18 +23,24 @@ import java.util.function.Function;
public class ComponentsProviderWithMocks implements ComponentsProvider {
static final CallOrderVerifier callOrder = new CallOrderVerifier();
static final NodeRepoMock nodeRepositoryMock = new NodeRepoMock(callOrder);
- static final MaintenanceSchedulerMock maintenanceSchedulerMock = new MaintenanceSchedulerMock(callOrder);
+ static final StorageMaintainerMock maintenanceSchedulerMock = new StorageMaintainerMock(callOrder);
static final OrchestratorMock orchestratorMock = new OrchestratorMock(callOrder);
static final Docker dockerMock = new DockerMock(callOrder);
private Environment environment = new Environment();
private final Function<String, NodeAgent> nodeAgentFactory = (hostName) -> new NodeAgentImpl(hostName,
nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock);
- private NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100);
+ private NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100,
+ new MetricReceiverWrapper(MetricReceiver.nullImplementation));
@Override
public NodeAdminStateUpdater getNodeAdminStateUpdater() {
return new NodeAdminStateUpdater(nodeRepositoryMock, nodeAdmin, 1, 5, orchestratorMock, "localhost");
}
+
+ @Override
+ public MetricReceiverWrapper getMetricReceiverWrapper() {
+ return null;
+ }
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java
index 6948448e336..7ccbeb23166 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/DockerFailTest.java
@@ -1,9 +1,11 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.integrationTests;
+import com.yahoo.metrics.simple.MetricReceiver;
import com.yahoo.vespa.hosted.dockerapi.ContainerName;
import com.yahoo.vespa.hosted.dockerapi.Docker;
import com.yahoo.vespa.hosted.dockerapi.DockerImage;
+import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec;
import com.yahoo.vespa.hosted.node.admin.docker.DockerOperationsImpl;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdmin;
@@ -39,7 +41,7 @@ public class DockerFailTest {
@Before
public void before() throws InterruptedException, UnknownHostException {
callOrder = new CallOrderVerifier();
- MaintenanceSchedulerMock maintenanceSchedulerMock = new MaintenanceSchedulerMock(callOrder);
+ StorageMaintainerMock maintenanceSchedulerMock = new StorageMaintainerMock(callOrder);
OrchestratorMock orchestratorMock = new OrchestratorMock(callOrder);
NodeRepoMock nodeRepositoryMock = new NodeRepoMock(callOrder);
dockerMock = new DockerMock(callOrder);
@@ -50,7 +52,8 @@ public class DockerFailTest {
Function<String, NodeAgent> nodeAgentFactory = (hostName) ->
new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock);
- NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100);
+ NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100,
+ new MetricReceiverWrapper(MetricReceiver.nullImplementation));
initialContainerNodeSpec = new ContainerNodeSpec(
"hostName",
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java
index f57f6df422e..81c8965a55c 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MultiDockerTest.java
@@ -1,8 +1,10 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.integrationTests;
+import com.yahoo.metrics.simple.MetricReceiver;
import com.yahoo.vespa.hosted.dockerapi.ContainerName;
import com.yahoo.vespa.hosted.dockerapi.DockerImage;
+import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdmin;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminImpl;
@@ -41,7 +43,7 @@ public class MultiDockerTest {
@Before
public void before() throws InterruptedException, UnknownHostException {
callOrder = new CallOrderVerifier();
- MaintenanceSchedulerMock maintenanceSchedulerMock = new MaintenanceSchedulerMock(callOrder);
+ StorageMaintainerMock maintenanceSchedulerMock = new StorageMaintainerMock(callOrder);
OrchestratorMock orchestratorMock = new OrchestratorMock(callOrder);
nodeRepositoryMock = new NodeRepoMock(callOrder);
dockerMock = new DockerMock(callOrder);
@@ -52,7 +54,8 @@ public class MultiDockerTest {
Function<String, NodeAgent> nodeAgentFactory = (hostName) ->
new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock);
- nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100);
+ nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100,
+ new MetricReceiverWrapper(MetricReceiver.nullImplementation));
updater = new NodeAdminStateUpdater(nodeRepositoryMock, nodeAdmin, 1, 1, orchestratorMock, "basehostname");
}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeStateTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeStateTest.java
index ad45f3ef2f1..e89ebafab8f 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeStateTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/NodeStateTest.java
@@ -1,6 +1,8 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.integrationTests;
+import com.yahoo.metrics.simple.MetricReceiver;
+import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec;
import com.yahoo.vespa.hosted.dockerapi.ContainerName;
import com.yahoo.vespa.hosted.dockerapi.DockerImage;
@@ -46,7 +48,7 @@ public class NodeStateTest {
@Before
public void before() throws InterruptedException, UnknownHostException {
callOrder = new CallOrderVerifier();
- MaintenanceSchedulerMock maintenanceSchedulerMock = new MaintenanceSchedulerMock(callOrder);
+ StorageMaintainerMock maintenanceSchedulerMock = new StorageMaintainerMock(callOrder);
OrchestratorMock orchestratorMock = new OrchestratorMock(callOrder);
nodeRepositoryMock = new NodeRepoMock(callOrder);
dockerMock = new DockerMock(callOrder);
@@ -57,7 +59,8 @@ public class NodeStateTest {
Function<String, NodeAgent> nodeAgentFactory = (hostName) ->
new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock);
- NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100);
+ NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100,
+ new MetricReceiverWrapper(MetricReceiver.nullImplementation));
initialContainerNodeSpec = new ContainerNodeSpec(
"host1",
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ResumeTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ResumeTest.java
index 9af0d7a56a4..a1db7eb9b97 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ResumeTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/ResumeTest.java
@@ -1,6 +1,8 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.admin.integrationTests;
+import com.yahoo.metrics.simple.MetricReceiver;
+import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdmin;
import com.yahoo.vespa.hosted.node.admin.nodeadmin.NodeAdminImpl;
@@ -37,7 +39,7 @@ public class ResumeTest {
public void test() throws InterruptedException, UnknownHostException {
CallOrderVerifier callOrder = new CallOrderVerifier();
NodeRepoMock nodeRepositoryMock = new NodeRepoMock(callOrder);
- MaintenanceSchedulerMock maintenanceSchedulerMock = new MaintenanceSchedulerMock(callOrder);
+ StorageMaintainerMock maintenanceSchedulerMock = new StorageMaintainerMock(callOrder);
OrchestratorMock orchestratorMock = new OrchestratorMock(callOrder);
DockerMock dockerMock = new DockerMock(callOrder);
@@ -47,7 +49,8 @@ public class ResumeTest {
Function<String, NodeAgent> nodeAgentFactory = (hostName) ->
new NodeAgentImpl(hostName, nodeRepositoryMock, orchestratorMock, new DockerOperationsImpl(dockerMock, environment), maintenanceSchedulerMock);
- NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100);
+ NodeAdmin nodeAdmin = new NodeAdminImpl(dockerMock, nodeAgentFactory, maintenanceSchedulerMock, 100,
+ new MetricReceiverWrapper(MetricReceiver.nullImplementation));
nodeRepositoryMock.addContainerNodeSpec(new ContainerNodeSpec(
"host1",
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MaintenanceSchedulerMock.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/StorageMaintainerMock.java
index 30ddc71f546..7356c2c34b9 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/MaintenanceSchedulerMock.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/integrationTests/StorageMaintainerMock.java
@@ -2,28 +2,30 @@
package com.yahoo.vespa.hosted.node.admin.integrationTests;
import com.yahoo.vespa.hosted.dockerapi.ContainerName;
-import com.yahoo.vespa.hosted.node.admin.maintenance.MaintenanceScheduler;
+import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer;
import java.io.IOException;
/**
* @author valerijf
*/
-public class MaintenanceSchedulerMock implements MaintenanceScheduler {
+public class StorageMaintainerMock extends StorageMaintainer {
private final CallOrderVerifier callOrder;
- public MaintenanceSchedulerMock(CallOrderVerifier callOrder) {
+ public StorageMaintainerMock(CallOrderVerifier callOrder) {
this.callOrder = callOrder;
}
@Override
- public void removeOldFilesFromNode(ContainerName containerName) {
+ public void updateDiskUsage(String hostname, ContainerName containerName) {
+ }
+ @Override
+ public void removeOldFilesFromNode(ContainerName containerName) {
}
@Override
public void cleanNodeAdmin() {
-
}
@Override
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java
new file mode 100644
index 00000000000..c1603a7535e
--- /dev/null
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/maintenance/StorageMaintainerTest.java
@@ -0,0 +1,30 @@
+package com.yahoo.vespa.hosted.node.admin.maintenance;
+
+import com.yahoo.vespa.hosted.node.maintenance.DeleteOldAppDataTest;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.io.IOException;
+
+import static org.junit.Assert.*;
+
+/**
+ * @author dybis
+ */
+public class StorageMaintainerTest {
+
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder();
+
+ @Test
+ public void testDiskUsed() throws IOException, InterruptedException {
+ int writeSize = 10000;
+ DeleteOldAppDataTest.writeNBytesToFile(folder.newFile(), writeSize);
+
+ StorageMaintainer storageMaintainer = new StorageMaintainer();
+ long usedBytes = storageMaintainer.getDiscUsedInBytes(folder.getRoot());
+ if (usedBytes * 4 < writeSize || usedBytes > writeSize * 4)
+ fail("Used bytes is " + usedBytes + ", but wrote " + writeSize + " bytes, not even close.");
+ }
+} \ No newline at end of file
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java
index e1dac0844c3..5af24e71c3d 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeadmin/NodeAdminImplTest.java
@@ -2,12 +2,14 @@
package com.yahoo.vespa.hosted.node.admin.nodeadmin;
import com.yahoo.collections.Pair;
+import com.yahoo.metrics.simple.MetricReceiver;
+import com.yahoo.vespa.hosted.dockerapi.metrics.MetricReceiverWrapper;
import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec;
import com.yahoo.vespa.hosted.dockerapi.Container;
import com.yahoo.vespa.hosted.dockerapi.ContainerName;
import com.yahoo.vespa.hosted.dockerapi.Docker;
import com.yahoo.vespa.hosted.dockerapi.DockerImage;
-import com.yahoo.vespa.hosted.node.admin.maintenance.MaintenanceScheduler;
+import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgent;
import com.yahoo.vespa.hosted.node.admin.nodeagent.NodeAgentImpl;
import com.yahoo.vespa.hosted.provision.Node;
@@ -48,9 +50,10 @@ public class NodeAdminImplTest {
public void nodeAgentsAreProperlyLifeCycleManaged() throws Exception {
final Docker docker = mock(Docker.class);
final Function<String, NodeAgent> nodeAgentFactory = mock(NodeAgentFactory.class);
- final MaintenanceScheduler maintenanceScheduler = mock(MaintenanceScheduler.class);
+ final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class);
- final NodeAdminImpl nodeAdmin = new NodeAdminImpl(docker, nodeAgentFactory, maintenanceScheduler, 100);
+ final NodeAdminImpl nodeAdmin = new NodeAdminImpl(docker, nodeAgentFactory, storageMaintainer, 100,
+ new MetricReceiverWrapper(MetricReceiver.nullImplementation));
final NodeAgent nodeAgent1 = mock(NodeAgentImpl.class);
final NodeAgent nodeAgent2 = mock(NodeAgentImpl.class);
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
index 871032005aa..1e748b74ea9 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/nodeagent/NodeAgentImplTest.java
@@ -5,7 +5,7 @@ import com.yahoo.vespa.hosted.dockerapi.ContainerName;
import com.yahoo.vespa.hosted.dockerapi.DockerImage;
import com.yahoo.vespa.hosted.node.admin.ContainerNodeSpec;
import com.yahoo.vespa.hosted.node.admin.docker.DockerOperations;
-import com.yahoo.vespa.hosted.node.admin.maintenance.MaintenanceScheduler;
+import com.yahoo.vespa.hosted.node.admin.maintenance.StorageMaintainer;
import com.yahoo.vespa.hosted.node.admin.noderepository.NodeRepository;
import com.yahoo.vespa.hosted.node.admin.orchestrator.Orchestrator;
import com.yahoo.vespa.hosted.node.admin.orchestrator.OrchestratorException;
@@ -38,9 +38,9 @@ public class NodeAgentImplTest {
private final DockerOperations dockerOperations = mock(DockerOperations.class);
private final NodeRepository nodeRepository = mock(NodeRepository.class);
private final Orchestrator orchestrator = mock(Orchestrator.class);
- private final MaintenanceScheduler maintenanceScheduler = mock(MaintenanceScheduler.class);
+ private final StorageMaintainer storageMaintainer = mock(StorageMaintainer.class);
- private final NodeAgentImpl nodeAgent = new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations, maintenanceScheduler);
+ private final NodeAgentImpl nodeAgent = new NodeAgentImpl(hostName, nodeRepository, orchestrator, dockerOperations, storageMaintainer);
@Test
public void upToDateContainerIsUntouched() throws Exception {
@@ -262,8 +262,8 @@ public class NodeAgentImplTest {
nodeAgent.tick();
- final InOrder inOrder = inOrder(maintenanceScheduler, dockerOperations);
- inOrder.verify(maintenanceScheduler, times(1)).removeOldFilesFromNode(eq(containerName));
+ final InOrder inOrder = inOrder(storageMaintainer, dockerOperations);
+ inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, times(1)).removeContainerIfNeeded(eq(nodeSpec), eq(hostName), any());
verify(orchestrator, never()).resume(any(String.class));
@@ -294,10 +294,10 @@ public class NodeAgentImplTest {
nodeAgent.tick();
- final InOrder inOrder = inOrder(maintenanceScheduler, dockerOperations, nodeRepository);
- inOrder.verify(maintenanceScheduler, times(1)).removeOldFilesFromNode(eq(containerName));
+ final InOrder inOrder = inOrder(storageMaintainer, dockerOperations, nodeRepository);
+ inOrder.verify(storageMaintainer, times(1)).removeOldFilesFromNode(eq(containerName));
inOrder.verify(dockerOperations, times(1)).removeContainerIfNeeded(eq(nodeSpec), eq(hostName), any());
- inOrder.verify(maintenanceScheduler, times(1)).deleteContainerStorage(eq(containerName));
+ inOrder.verify(storageMaintainer, times(1)).deleteContainerStorage(eq(containerName));
inOrder.verify(nodeRepository, times(1)).markAsReady(eq(hostName));
verify(dockerOperations, never()).startContainerIfNeeded(any());
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImplTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImplTest.java
index 761aa1fad53..39af637a45a 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImplTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/orchestrator/OrchestratorImplTest.java
@@ -34,7 +34,7 @@ public class OrchestratorImplTest {
public void testSuspendCall() {
when(requestExecutor.put(
OrchestratorImpl.ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName+ "/suspended",
- OrchestratorImpl.HARDCODED_ORCHESTRATOR_PORT,
+ OrchestratorImpl.WEB_SERVICE_PORT,
Optional.empty(),
UpdateHostResponse.class
)).thenReturn(new UpdateHostResponse(hostName, null));
@@ -47,7 +47,7 @@ public class OrchestratorImplTest {
public void testSuspendCallWithFailureReason() {
when(requestExecutor.put(
OrchestratorImpl.ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName+ "/suspended",
- OrchestratorImpl.HARDCODED_ORCHESTRATOR_PORT,
+ OrchestratorImpl.WEB_SERVICE_PORT,
Optional.empty(),
UpdateHostResponse.class
)).thenReturn(new UpdateHostResponse(hostName, new HostStateChangeDenialReason("hostname", "service", "fail")));
@@ -87,7 +87,7 @@ public class OrchestratorImplTest {
public void testResumeCall() {
when(requestExecutor.delete(
OrchestratorImpl.ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName+ "/suspended",
- OrchestratorImpl.HARDCODED_ORCHESTRATOR_PORT,
+ OrchestratorImpl.WEB_SERVICE_PORT,
UpdateHostResponse.class
)).thenReturn(new UpdateHostResponse(hostName, null));
@@ -99,7 +99,7 @@ public class OrchestratorImplTest {
public void testResumeCallWithFailureReason() {
when(requestExecutor.delete(
OrchestratorImpl.ORCHESTRATOR_PATH_PREFIX_HOST_API + "/" + hostName+ "/suspended",
- OrchestratorImpl.HARDCODED_ORCHESTRATOR_PORT,
+ OrchestratorImpl.WEB_SERVICE_PORT,
UpdateHostResponse.class
)).thenReturn(new UpdateHostResponse(hostName, new HostStateChangeDenialReason("hostname", "service", "fail")));
@@ -140,7 +140,7 @@ public class OrchestratorImplTest {
when(requestExecutor.put(
OrchestratorImpl.ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API,
- OrchestratorImpl.HARDCODED_ORCHESTRATOR_PORT,
+ OrchestratorImpl.WEB_SERVICE_PORT,
Optional.of(new BatchHostSuspendRequest(parentHostName, hostNames)),
BatchOperationResult.class
)).thenReturn(BatchOperationResult.successResult());
@@ -157,7 +157,7 @@ public class OrchestratorImplTest {
when(requestExecutor.put(
OrchestratorImpl.ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API,
- OrchestratorImpl.HARDCODED_ORCHESTRATOR_PORT,
+ OrchestratorImpl.WEB_SERVICE_PORT,
Optional.of(new BatchHostSuspendRequest(parentHostName, hostNames)),
BatchOperationResult.class
)).thenReturn(new BatchOperationResult(failureReason));
@@ -174,7 +174,7 @@ public class OrchestratorImplTest {
when(requestExecutor.put(
OrchestratorImpl.ORCHESTRATOR_PATH_PREFIX_HOST_SUSPENSION_API,
- OrchestratorImpl.HARDCODED_ORCHESTRATOR_PORT,
+ OrchestratorImpl.WEB_SERVICE_PORT,
Optional.of(new BatchHostSuspendRequest(parentHostName, hostNames)),
BatchOperationResult.class
)).thenThrow(new RuntimeException(exceptionMessage));
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/restapi/SecretAgentHandlerTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/restapi/SecretAgentHandlerTest.java
new file mode 100644
index 00000000000..bd9b81bb17a
--- /dev/null
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/admin/restapi/SecretAgentHandlerTest.java
@@ -0,0 +1,31 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+package com.yahoo.vespa.hosted.node.admin.restapi;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import org.junit.Test;
+
+import java.util.regex.Pattern;
+
+import static org.hamcrest.Matchers.matchesPattern;
+import static org.junit.Assert.*;
+
+/**
+ * @author valerijf
+ */
+public class SecretAgentHandlerTest {
+ @Test
+ public void testSecretAgentFormat() throws JsonProcessingException {
+ SecretAgentHandler secretAgentHandler = new SecretAgentHandler();
+ secretAgentHandler
+ .withDimension("host", "host.name.test.yahoo.com")
+ .withDimension("dimention", 6)
+ .withMetric("runtime", 0.0254)
+ .withMetric("memory", 321415L);
+
+ String expectedJson = Pattern.quote("{\"application\":\"docker\",\"timestamp\":") +
+ "[0-9]{10}" + // The timestamp is (currently) 10 digit long numbe, update to 11 on 20/11/2286
+ Pattern.quote(",\"dimensions\":{\"host\":\"host.name.test.yahoo.com\",\"dimention\":6},\"metrics\":{\"memory\":321415,\"runtime\":0.0254}}");
+
+ assertThat(secretAgentHandler.toJson(), matchesPattern(expectedJson));
+ }
+}
diff --git a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/maintenance/DeleteOldAppDataTest.java b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/maintenance/DeleteOldAppDataTest.java
index 84922852365..462216ea827 100644
--- a/node-admin/src/test/java/com/yahoo/vespa/hosted/node/maintenance/DeleteOldAppDataTest.java
+++ b/node-admin/src/test/java/com/yahoo/vespa/hosted/node/maintenance/DeleteOldAppDataTest.java
@@ -1,15 +1,14 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.node.maintenance;
-import org.apache.commons.lang3.StringUtils;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.File;
-import java.io.FileWriter;
import java.io.IOException;
+import java.nio.file.Files;
import java.time.Duration;
import java.util.Arrays;
@@ -157,13 +156,13 @@ public class DeleteOldAppDataTest {
initSubDirectories();
File temp1 = new File(folder.getRoot(), "small_file");
- writeNBytesToFiles(temp1, 50);
+ writeNBytesToFile(temp1, 50);
File temp2 = new File(folder.getRoot(), "some_file");
- writeNBytesToFiles(temp2, 20);
+ writeNBytesToFile(temp2, 20);
File temp3 = new File(folder.getRoot(), "test_folder1/some_other_file");
- writeNBytesToFiles(temp3, 75);
+ writeNBytesToFile(temp3, 75);
DeleteOldAppData.deleteFilesLargerThan(folder.getRoot(), 10);
@@ -238,9 +237,7 @@ public class DeleteOldAppDataTest {
return total;
}
- private static void writeNBytesToFiles(File file, int nBytes) throws IOException {
- try (FileWriter writer = new FileWriter(file)) {
- writer.write(StringUtils.repeat("0", nBytes));
- }
+ public static void writeNBytesToFile(File file, int nBytes) throws IOException {
+ Files.write(file.toPath(), new byte[nBytes]);
}
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
index f2191b7bbb1..f194d9d53fd 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailer.java
@@ -87,7 +87,7 @@ public class NodeFailer extends Maintainer {
// Active nodes
for (Node node : determineActiveNodeDownStatus()) {
Instant graceTimeEnd = node.history().event(History.Event.Type.down).get().at().plus(downTimeLimit);
- if (graceTimeEnd.isBefore(clock.instant()) && ! applicationSuspended(node))
+ if (graceTimeEnd.isBefore(clock.instant()) && ! applicationSuspended(node) && failAllowedFor(node.type()))
failActive(node);
}
}
@@ -154,6 +154,17 @@ public class NodeFailer extends Maintainer {
}
/**
+ * We can attempt to fail any number of *tenant* nodes because the operation will not be effected unless
+ * the node is replaced.
+ * However, nodes of other types are not replaced (because all of the type are used by a single application),
+ * so we only allow one to be in failed at any point in time to protect against runaway failing.
+ */
+ private boolean failAllowedFor(NodeType nodeType) {
+ if (nodeType == NodeType.tenant) return true;
+ return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0;
+ }
+
+ /**
* If the node is positively DOWN, and there is no "down" history record, we add it.
* If the node is positively UP we remove any "down" history record.
*
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Flavor.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Flavor.java
index 0893ca75f92..1039beea7c0 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Flavor.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/node/Flavor.java
@@ -17,6 +17,7 @@ public class Flavor {
private final String name;
private final int cost;
+ private final boolean isStock;
private final Type type;
private final double minCpuCores;
private final double minMainMemoryAvailableGb;
@@ -32,6 +33,7 @@ public class Flavor {
this.name = flavorConfig.name();
this.replacesFlavors = new ArrayList<>();
this.cost = flavorConfig.cost();
+ this.isStock = flavorConfig.stock();
this.type = Type.valueOf(flavorConfig.environment());
this.minCpuCores = flavorConfig.minCpuCores();
this.minMainMemoryAvailableGb = flavorConfig.minMainMemoryAvailableGb();
@@ -39,6 +41,7 @@ public class Flavor {
this.description = flavorConfig.description();
}
+ /** Returns the unique identity of this flavor */
public String name() { return name; }
/**
@@ -47,26 +50,18 @@ public class Flavor {
* @return Monthly cost in USD
*/
public int cost() { return cost; }
+
+ public boolean isStock() { return isStock; }
- public double getMinMainMemoryAvailableGb() {
- return minMainMemoryAvailableGb;
- }
+ public double getMinMainMemoryAvailableGb() { return minMainMemoryAvailableGb; }
- public double getMinDiskAvailableGb() {
- return minDiskAvailableGb;
- }
+ public double getMinDiskAvailableGb() { return minDiskAvailableGb; }
- public double getMinCpuCores() {
- return minCpuCores;
- }
+ public double getMinCpuCores() { return minCpuCores; }
- public String getDescription() {
- return description;
- }
+ public String getDescription() { return description; }
- public Type getType() {
- return type;
- }
+ public Type getType() { return type; }
/**
* Returns the canonical name of this flavor - which is the name which should be used as an interface to users.
@@ -78,11 +73,16 @@ public class Flavor {
*
* The logic is that we can use this to capture the gritty details of configurations in exact flavor names
* but also encourage users to refer to them by a common name by letting such flavor variants declare that they
- * replace the canonical name we want. However, if a node replaces multiple names, it means that a former
- * flavor distinction has become obsolete so this name becomes one of the canonical names users should refer to.
+ * replace the canonical name we want. However, if a node replaces multiple names, we have no basis for choosing one
+ * of them as the canonical, so we return the current as canonical.
*/
public String canonicalName() {
- return replacesFlavors.size() == 1 ? replacesFlavors.get(0).canonicalName() : name;
+ return isCanonical() ? name : replacesFlavors.get(0).canonicalName();
+ }
+
+ /** Returns whether this is a canonical flavor */
+ public boolean isCanonical() {
+ return replacesFlavors.size() != 1;
}
/**
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
index 6665833c1a2..a759a8fca37 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/CapacityPolicies.java
@@ -31,7 +31,7 @@ public class CapacityPolicies {
switch(zone.environment()) {
case dev : case test : return 1;
- case perf : return Math.min(requestedCapacity.nodeCount(), 10); // TODO: Decrease to 3 when isRequired is implemented
+ case perf : return Math.min(requestedCapacity.nodeCount(), 3);
case staging: return requestedNodes <= 1 ? requestedNodes : Math.max(2, requestedNodes / 10);
case prod : return ensureRedundancy(requestedCapacity.nodeCount());
default : throw new IllegalArgumentException("Unsupported environment " + zone.environment());
@@ -53,7 +53,7 @@ public class CapacityPolicies {
/**
* Throw if the node count is 1
-
+ *
* @return the argument node count
* @throws IllegalArgumentException if only one node is requested
*/
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
index 9ac8204789c..ea205a15040 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/GroupPreparer.java
@@ -5,13 +5,11 @@ import com.google.common.collect.ComparisonChain;
import com.yahoo.config.provision.ApplicationId;
import com.yahoo.config.provision.ClusterMembership;
import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.NodeType;
import com.yahoo.config.provision.OutOfCapacityException;
import com.yahoo.lang.MutableInteger;
import com.yahoo.transaction.Mutex;
import com.yahoo.vespa.hosted.provision.Node;
import com.yahoo.vespa.hosted.provision.NodeRepository;
-import com.yahoo.vespa.hosted.provision.node.Flavor;
import java.time.Clock;
import java.util.ArrayList;
@@ -67,7 +65,7 @@ class GroupPreparer {
if (nodeList.saturated()) return nodeList.finalNodes(surplusActiveNodes);
// Use active nodes from other groups that will otherwise be retired
- List<Node> accepted = nodeList.offer(sortNodeListByCost(surplusActiveNodes), canChangeGroup);
+ List<Node> accepted = nodeList.offer(prioritizeNodes(surplusActiveNodes, requestedNodes), canChangeGroup);
surplusActiveNodes.removeAll(accepted);
if (nodeList.saturated()) return nodeList.finalNodes(surplusActiveNodes);
@@ -76,14 +74,14 @@ class GroupPreparer {
if (nodeList.saturated()) return nodeList.finalNodes(surplusActiveNodes);
// Use inactive nodes
- accepted = nodeList.offer(sortNodeListByCost(nodeRepository.getNodes(application, Node.State.inactive)), !canChangeGroup);
+ accepted = nodeList.offer(prioritizeNodes(nodeRepository.getNodes(application, Node.State.inactive), requestedNodes), !canChangeGroup);
nodeList.update(nodeRepository.reserve(accepted));
if (nodeList.saturated()) return nodeList.finalNodes(surplusActiveNodes);
// Use new, ready nodes. Lock ready pool to ensure that nodes are not grabbed by others.
try (Mutex readyLock = nodeRepository.lockUnallocated()) {
List<Node> readyNodes = nodeRepository.getNodes(requestedNodes.type(), Node.State.ready);
- accepted = nodeList.offer(stripeOverHosts(sortNodeListByCost(readyNodes)), !canChangeGroup);
+ accepted = nodeList.offer(stripeOverHosts(prioritizeNodes(readyNodes, requestedNodes)), !canChangeGroup);
nodeList.update(nodeRepository.reserve(accepted));
}
@@ -101,13 +99,27 @@ class GroupPreparer {
}
}
- /** Sort nodes according to their cost, and if the cost is equal, sort by hostname (to get stable tests) */
- private List<Node> sortNodeListByCost(List<Node> nodeList) {
- Collections.sort(nodeList, (n1, n2) -> ComparisonChain.start()
- .compare(n1.flavor().cost(), n2.flavor().cost())
- .compare(n1.hostname(), n2.hostname())
- .result()
- );
+ /**
+ * Returns the node list in prioritized order, where the nodes we would most prefer the application
+ * to use comes first
+ */
+ private List<Node> prioritizeNodes(List<Node> nodeList, NodeSpec nodeSpec) {
+ if ( nodeSpec.specifiesNonStockFlavor()) { // sort by exact before inexact flavor match, increasing cost, hostname
+ Collections.sort(nodeList, (n1, n2) -> ComparisonChain.start()
+ .compareTrueFirst(nodeSpec.matchesExactly(n1.flavor()), nodeSpec.matchesExactly(n2.flavor()))
+ .compare(n1.flavor().cost(), n2.flavor().cost())
+ .compare(n1.hostname(), n2.hostname())
+ .result()
+ );
+ }
+ else { // sort by increasing cost, hostname
+ Collections.sort(nodeList, (n1, n2) -> ComparisonChain.start()
+ .compareTrueFirst(nodeSpec.matchesExactly(n1.flavor()), nodeSpec.matchesExactly(n1.flavor()))
+ .compare(n1.flavor().cost(), n2.flavor().cost())
+ .compare(n1.hostname(), n2.hostname())
+ .result()
+ );
+ }
return nodeList;
}
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
index 9bdce8d5921..2ce364daa07 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/provisioning/NodeSpec.java
@@ -19,7 +19,13 @@ public interface NodeSpec {
/** Returns whether the given flavor is compatible with this spec */
boolean isCompatible(Flavor flavor);
-
+
+ /** Returns whether the given flavor is exactly specified by this node spec */
+ boolean matchesExactly(Flavor flavor);
+
+ /** Returns whether this requests a non-stock flavor */
+ boolean specifiesNonStockFlavor();
+
/** Returns whether the given node count is sufficient to consider this spec fulfilled to the maximum amount */
boolean saturatedBy(int count);
@@ -59,6 +65,12 @@ public interface NodeSpec {
public boolean isCompatible(Flavor flavor) { return flavor.satisfies(this.flavor); }
@Override
+ public boolean matchesExactly(Flavor flavor) { return flavor.equals(this.flavor); }
+
+ @Override
+ public boolean specifiesNonStockFlavor() { return ! flavor.isStock(); }
+
+ @Override
public boolean fulfilledBy(int count) { return count >= this.count; }
@Override
@@ -91,6 +103,12 @@ public interface NodeSpec {
public boolean isCompatible(Flavor flavor) { return true; }
@Override
+ public boolean matchesExactly(Flavor flavor) { return false; }
+
+ @Override
+ public boolean specifiesNonStockFlavor() { return false; }
+
+ @Override
public boolean fulfilledBy(int count) { return true; }
@Override
diff --git a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/FlavorConfigBuilder.java b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/FlavorConfigBuilder.java
index b312e7c85ca..748a5b6c558 100644
--- a/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/FlavorConfigBuilder.java
+++ b/node-repository/src/main/java/com/yahoo/vespa/hosted/provision/testutils/FlavorConfigBuilder.java
@@ -31,6 +31,19 @@ public class FlavorConfigBuilder {
return flavor;
}
+ public NodeRepositoryConfig.Flavor.Builder addNonStockFlavor(String flavorName, double cpu, double mem, double disk, Flavor.Type type) {
+ NodeRepositoryConfig.Flavor.Builder flavor = new NodeRepositoryConfig.Flavor.Builder();
+ flavor.name(flavorName);
+ flavor.description("Flavor-name-is-" + flavorName);
+ flavor.minDiskAvailableGb(disk);
+ flavor.minCpuCores(cpu);
+ flavor.minMainMemoryAvailableGb(mem);
+ flavor.stock(false);
+ flavor.environment(type.name());
+ builder.flavor(flavor);
+ return flavor;
+ }
+
public void addReplaces(String replaces, NodeRepositoryConfig.Flavor.Builder flavor) {
NodeRepositoryConfig.Flavor.Replaces.Builder flavorReplaces = new NodeRepositoryConfig.Flavor.Replaces.Builder();
flavorReplaces.name(replaces);
diff --git a/node-repository/src/main/resources/configdefinitions/node-repository.def b/node-repository/src/main/resources/configdefinitions/node-repository.def
index cd053adca61..f9b500594bd 100644
--- a/node-repository/src/main/resources/configdefinitions/node-repository.def
+++ b/node-repository/src/main/resources/configdefinitions/node-repository.def
@@ -18,6 +18,13 @@ flavor[].replaces[].name string
# the expected lifetime of the node (usually three years).
flavor[].cost int default=0
+# A stock flavor is any flavor which we expect to buy more of in the future.
+# Stock flavors are assigned to applications by cost priority.
+#
+# Non-stock flavors are used for nodes for which a fixed amount has already been purchased
+# for some historical reason. These nodes are assigned to applications by exact match and ignoring cost.
+flavor[].stock bool default=true
+
# The type of node (e.g. bare metal, docker..).
flavor[].environment string default="undefined"
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainerTest.java
index 670c763ba16..bc902a4d910 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/ApplicationMaintainerTest.java
@@ -143,8 +143,10 @@ public class ApplicationMaintainerTest {
void runApplicationMaintainer() {
Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>();
- apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1, wantedNodesApp1, Optional.of("default"), 1));
- apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2, wantedNodesApp2, Optional.of("default"), 1));
+ apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1,
+ Capacity.fromNodeCount(wantedNodesApp1, Optional.of("default")), 1));
+ apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2,
+ Capacity.fromNodeCount(wantedNodesApp2, Optional.of("default")), 1));
MockDeployer deployer = new MockDeployer(provisioner, apps);
new ApplicationMaintainer(deployer, nodeRepository, Duration.ofMinutes(30)).run();
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MockDeployer.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MockDeployer.java
index c12b65f046d..ea7ed099b4e 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MockDeployer.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/MockDeployer.java
@@ -76,17 +76,15 @@ public class MockDeployer implements Deployer {
/** An application context which substitutes for an application repository */
public static class ApplicationContext {
- private ApplicationId id;
- private ClusterSpec cluster;
- private int wantedNodes;
- private Optional<String> flavor;
- private int groups;
+ private final ApplicationId id;
+ private final ClusterSpec cluster;
+ private final Capacity capacity;
+ private final int groups;
- public ApplicationContext(ApplicationId id, ClusterSpec cluster, int wantedNodes, Optional<String> flavor, int groups) {
+ public ApplicationContext(ApplicationId id, ClusterSpec cluster, Capacity capacity, int groups) {
this.id = id;
this.cluster = cluster;
- this.wantedNodes = wantedNodes;
- this.flavor = flavor;
+ this.capacity = capacity;
this.groups = groups;
}
@@ -96,7 +94,7 @@ public class MockDeployer implements Deployer {
public ClusterSpec cluster() { return cluster; }
private List<HostSpec> prepare(NodeRepositoryProvisioner provisioner) {
- return provisioner.prepare(id, cluster, Capacity.fromNodeCount(wantedNodes, flavor), groups, null);
+ return provisioner.prepare(id, cluster, capacity, groups, null);
}
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
new file mode 100644
index 00000000000..4e63e7a6203
--- /dev/null
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailTester.java
@@ -0,0 +1,334 @@
+package com.yahoo.vespa.hosted.provision.maintenance;
+
+import com.yahoo.config.provision.ApplicationId;
+import com.yahoo.config.provision.ApplicationName;
+import com.yahoo.config.provision.Capacity;
+import com.yahoo.config.provision.ClusterSpec;
+import com.yahoo.config.provision.Environment;
+import com.yahoo.config.provision.HostLivenessTracker;
+import com.yahoo.config.provision.HostSpec;
+import com.yahoo.config.provision.InstanceName;
+import com.yahoo.config.provision.NodeType;
+import com.yahoo.config.provision.RegionName;
+import com.yahoo.config.provision.TenantName;
+import com.yahoo.config.provision.Zone;
+import com.yahoo.test.ManualClock;
+import com.yahoo.transaction.NestedTransaction;
+import com.yahoo.vespa.applicationmodel.ApplicationInstance;
+import com.yahoo.vespa.applicationmodel.ApplicationInstanceId;
+import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference;
+import com.yahoo.vespa.applicationmodel.ClusterId;
+import com.yahoo.vespa.applicationmodel.ConfigId;
+import com.yahoo.vespa.applicationmodel.HostName;
+import com.yahoo.vespa.applicationmodel.ServiceCluster;
+import com.yahoo.vespa.applicationmodel.ServiceInstance;
+import com.yahoo.vespa.applicationmodel.ServiceType;
+import com.yahoo.vespa.applicationmodel.TenantId;
+import com.yahoo.vespa.curator.Curator;
+import com.yahoo.vespa.curator.mock.MockCurator;
+import com.yahoo.vespa.curator.transaction.CuratorTransaction;
+import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.NodeRepository;
+import com.yahoo.vespa.hosted.provision.node.Flavor;
+import com.yahoo.vespa.hosted.provision.node.NodeFlavors;
+import com.yahoo.vespa.hosted.provision.provisioning.NodeRepositoryProvisioner;
+import com.yahoo.vespa.hosted.provision.testutils.FlavorConfigBuilder;
+import com.yahoo.vespa.orchestrator.ApplicationIdNotFoundException;
+import com.yahoo.vespa.orchestrator.ApplicationStateChangeDeniedException;
+import com.yahoo.vespa.orchestrator.BatchHostNameNotFoundException;
+import com.yahoo.vespa.orchestrator.BatchInternalErrorException;
+import com.yahoo.vespa.orchestrator.HostNameNotFoundException;
+import com.yahoo.vespa.orchestrator.Orchestrator;
+import com.yahoo.vespa.orchestrator.policy.BatchHostStateChangeDeniedException;
+import com.yahoo.vespa.orchestrator.policy.HostStateChangeDeniedException;
+import com.yahoo.vespa.orchestrator.status.ApplicationInstanceStatus;
+import com.yahoo.vespa.orchestrator.status.HostStatus;
+import com.yahoo.vespa.service.monitor.ServiceMonitor;
+import com.yahoo.vespa.service.monitor.ServiceMonitorStatus;
+
+import java.time.Clock;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * @author bratseth
+ */
+public class NodeFailTester {
+
+ // Immutable components
+ public static final ApplicationId app1 = ApplicationId.from(TenantName.from("foo1"), ApplicationName.from("bar"), InstanceName.from("fuz"));
+ public static final ApplicationId app2 = ApplicationId.from(TenantName.from("foo2"), ApplicationName.from("bar"), InstanceName.from("fuz"));
+ public static final NodeFlavors nodeFlavors = FlavorConfigBuilder.createDummies("default", "docker");
+ private static final Zone zone = new Zone(Environment.prod, RegionName.from("us-east"));
+ private static final Duration downtimeLimitOneHour = Duration.ofMinutes(60);
+
+ // Components with state
+ public final ManualClock clock;
+ public final NodeRepository nodeRepository;
+ public NodeFailer failer;
+ public ServiceMonitorStub serviceMonitor;
+ public MockDeployer deployer;
+ private final TestHostLivenessTracker hostLivenessTracker;
+ private final Orchestrator orchestrator;
+ private final NodeRepositoryProvisioner provisioner;
+ private final Curator curator;
+
+ public NodeFailTester() {
+ clock = new ManualClock();
+ curator = new MockCurator();
+ nodeRepository = new NodeRepository(nodeFlavors, curator, clock);
+ provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone);
+ hostLivenessTracker = new TestHostLivenessTracker(clock);
+ orchestrator = new OrchestratorMock();
+ }
+
+ public static NodeFailTester withTwoApplications() {
+ NodeFailTester tester = new NodeFailTester();
+
+ tester.createReadyNodes(16);
+ tester.createHostNodes(3);
+
+ // Create applications
+ ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Optional.empty());
+ ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Optional.empty());
+ int wantedNodesApp1 = 5;
+ int wantedNodesApp2 = 7;
+ tester.activate(app1, clusterApp1, wantedNodesApp1);
+ tester.activate(app2, clusterApp2, wantedNodesApp2);
+ assertEquals(wantedNodesApp1, tester.nodeRepository.getNodes(app1, Node.State.active).size());
+ assertEquals(wantedNodesApp2, tester.nodeRepository.getNodes(app2, Node.State.active).size());
+
+ Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>();
+ apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1, Capacity.fromNodeCount(wantedNodesApp1, Optional.of("default")), 1));
+ apps.put(app2, new MockDeployer.ApplicationContext(app2, clusterApp2, Capacity.fromNodeCount(wantedNodesApp2, Optional.of("default")), 1));
+ tester.deployer = new MockDeployer(tester.provisioner, apps);
+ tester.serviceMonitor = new ServiceMonitorStub(apps, tester.nodeRepository);
+ tester.failer = tester.createFailer();
+ return tester;
+ }
+
+ public static NodeFailTester withProxyApplication() {
+ NodeFailTester tester = new NodeFailTester();
+
+ tester.createReadyNodes(16, NodeType.proxy);
+
+ // Create application
+ Capacity allProxies = Capacity.fromRequiredNodeType(NodeType.proxy);
+ ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Optional.empty());
+ tester.activate(app1, clusterApp1, allProxies);
+ assertEquals(16, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.active).size());
+
+ Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>();
+ apps.put(app1, new MockDeployer.ApplicationContext(app1, clusterApp1, allProxies, 1));
+ tester.deployer = new MockDeployer(tester.provisioner, apps);
+ tester.serviceMonitor = new ServiceMonitorStub(apps, tester.nodeRepository);
+ tester.failer = tester.createFailer();
+ return tester;
+ }
+
+ public void suspend(ApplicationId app) {
+ try {
+ orchestrator.suspend(app);
+ }
+ catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public NodeFailer createFailer() {
+ return new NodeFailer(deployer, hostLivenessTracker, serviceMonitor, nodeRepository, downtimeLimitOneHour, clock, orchestrator);
+ }
+
+ public void allNodesMakeAConfigRequestExcept(Node ... deadNodeArray) {
+ Set<Node> deadNodes = new HashSet<>(Arrays.asList(deadNodeArray));
+ for (Node node : nodeRepository.getNodes(NodeType.tenant)) {
+ if ( ! deadNodes.contains(node) && node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
+ hostLivenessTracker.receivedRequestFrom(node.hostname());
+ }
+ }
+
+ public void createReadyNodes(int count) {
+ createReadyNodes(count, 0);
+ }
+
+ public void createReadyNodes(int count, NodeType nodeType) {
+ createReadyNodes(count, 0, nodeFlavors.getFlavorOrThrow("default"), nodeType);
+ }
+
+ public void createReadyNodes(int count, int startIndex) {
+ createReadyNodes(count, startIndex, "default");
+ }
+
+ public void createReadyNodes(int count, int startIndex, String flavor) {
+ createReadyNodes(count, startIndex, nodeFlavors.getFlavorOrThrow(flavor), NodeType.tenant);
+ }
+
+ private void createReadyNodes(int count, int startIndex, Flavor flavor, NodeType nodeType) {
+ List<Node> nodes = new ArrayList<>(count);
+ for (int i = startIndex; i < startIndex + count; i++)
+ nodes.add(nodeRepository.createNode("node" + i, "host" + i, Optional.empty(), flavor, nodeType));
+ nodes = nodeRepository.addNodes(nodes);
+ nodeRepository.setReady(nodes);
+ }
+
+ private void createHostNodes(int count) {
+ List<Node> nodes = new ArrayList<>(count);
+ for (int i = 0; i < count; i++)
+ nodes.add(nodeRepository.createNode("parent" + i, "parent" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host));
+ nodes = nodeRepository.addNodes(nodes);
+ nodeRepository.setReady(nodes);
+ }
+
+ private void activate(ApplicationId applicationId, ClusterSpec cluster, int nodeCount) {
+ activate(applicationId, cluster, Capacity.fromNodeCount(nodeCount));
+ }
+ private void activate(ApplicationId applicationId, ClusterSpec cluster, Capacity capacity) {
+ List<HostSpec> hosts = provisioner.prepare(applicationId, cluster, capacity, 1, null);
+ NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator));
+ provisioner.activate(transaction, applicationId, hosts);
+ transaction.commit();
+ }
+
+ /** Returns the node with the highest membership index from the given set of allocated nodes */
+ public Node highestIndex(List<Node> nodes) {
+ Node highestIndex = null;
+ for (Node node : nodes) {
+ if (highestIndex == null || node.allocation().get().membership().index() >
+ highestIndex.allocation().get().membership().index())
+ highestIndex = node;
+ }
+ return highestIndex;
+ }
+
+ /** This is a fully functional implementation */
+ private static class TestHostLivenessTracker implements HostLivenessTracker {
+
+ private final Clock clock;
+ private final Map<String, Instant> lastRequestFromHost = new HashMap<>();
+
+ public TestHostLivenessTracker(Clock clock) {
+ this.clock = clock;
+ }
+
+ @Override
+ public void receivedRequestFrom(String hostname) {
+ lastRequestFromHost.put(hostname, clock.instant());
+ }
+
+ @Override
+ public Optional<Instant> lastRequestFrom(String hostname) {
+ return Optional.ofNullable(lastRequestFromHost.get(hostname));
+ }
+
+ }
+
+ public static class ServiceMonitorStub implements ServiceMonitor {
+
+ private final Map<ApplicationId, MockDeployer.ApplicationContext> apps;
+ private final NodeRepository nodeRepository;
+
+ private Set<String> downHosts = new HashSet<>();
+ private boolean statusIsKnown = true;
+
+ /** Create a service monitor where all nodes are initially up */
+ public ServiceMonitorStub(Map<ApplicationId, MockDeployer.ApplicationContext> apps, NodeRepository nodeRepository) {
+ this.apps = apps;
+ this.nodeRepository = nodeRepository;
+ }
+
+ public void setHostDown(String hostname) {
+ downHosts.add(hostname);
+ }
+
+ public void setHostUp(String hostname) {
+ downHosts.remove(hostname);
+ }
+
+ public void setStatusIsKnown(boolean statusIsKnown) {
+ this.statusIsKnown = statusIsKnown;
+ }
+
+ private ServiceMonitorStatus getHostStatus(String hostname) {
+ if ( ! statusIsKnown) return ServiceMonitorStatus.NOT_CHECKED;
+ if (downHosts.contains(hostname)) return ServiceMonitorStatus.DOWN;
+ return ServiceMonitorStatus.UP;
+ }
+
+ @Override
+ public Map<ApplicationInstanceReference, ApplicationInstance<ServiceMonitorStatus>> queryStatusOfAllApplicationInstances() {
+ // Convert apps information to the response payload to return
+ Map<ApplicationInstanceReference, ApplicationInstance<ServiceMonitorStatus>> status = new HashMap<>();
+ for (Map.Entry<ApplicationId, MockDeployer.ApplicationContext> app : apps.entrySet()) {
+ Set<ServiceInstance<ServiceMonitorStatus>> serviceInstances = new HashSet<>();
+ for (Node node : nodeRepository.getNodes(app.getValue().id(), Node.State.active)) {
+ serviceInstances.add(new ServiceInstance<>(new ConfigId("configid"),
+ new HostName(node.hostname()),
+ getHostStatus(node.hostname())));
+ }
+ Set<ServiceCluster<ServiceMonitorStatus>> serviceClusters = new HashSet<>();
+ serviceClusters.add(new ServiceCluster<>(new ClusterId(app.getValue().cluster().id().value()),
+ new ServiceType("serviceType"),
+ serviceInstances));
+ TenantId tenantId = new TenantId(app.getKey().tenant().value());
+ ApplicationInstanceId applicationInstanceId = new ApplicationInstanceId(app.getKey().application().value());
+ status.put(new ApplicationInstanceReference(tenantId, applicationInstanceId),
+ new ApplicationInstance<>(tenantId, applicationInstanceId, serviceClusters));
+ }
+ return status;
+ }
+
+ }
+
+ class OrchestratorMock implements Orchestrator {
+
+ Set<ApplicationId> suspendedApplications = new HashSet<>();
+
+ @Override
+ public HostStatus getNodeStatus(HostName hostName) throws HostNameNotFoundException {
+ return null;
+ }
+
+ @Override
+ public void resume(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {}
+
+ @Override
+ public void suspend(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {}
+
+ @Override
+ public ApplicationInstanceStatus getApplicationInstanceStatus(ApplicationId appId) throws ApplicationIdNotFoundException {
+ return suspendedApplications.contains(appId)
+ ? ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN : ApplicationInstanceStatus.NO_REMARKS;
+ }
+
+ @Override
+ public Set<ApplicationId> getAllSuspendedApplications() {
+ return null;
+ }
+
+ @Override
+ public void resume(ApplicationId appId) throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException {
+ suspendedApplications.remove(appId);
+ }
+
+ @Override
+ public void suspend(ApplicationId appId) throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException {
+ suspendedApplications.add(appId);
+ }
+
+ @Override
+ public void suspendAll(HostName parentHostname, List<HostName> hostNames) throws BatchInternalErrorException, BatchHostStateChangeDeniedException, BatchHostNameNotFoundException {
+ throw new RuntimeException("Not implemented");
+ }
+ }
+
+}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java
index 2a0bfe9c662..7e611e3224a 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/NodeFailerTest.java
@@ -1,70 +1,23 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vespa.hosted.provision.maintenance;
-import com.yahoo.config.provision.ApplicationId;
-import com.yahoo.config.provision.ApplicationName;
-import com.yahoo.config.provision.Capacity;
-import com.yahoo.config.provision.ClusterSpec;
-import com.yahoo.config.provision.Environment;
-import com.yahoo.config.provision.HostLivenessTracker;
-import com.yahoo.config.provision.HostSpec;
-import com.yahoo.config.provision.InstanceName;
import com.yahoo.config.provision.NodeType;
-import com.yahoo.config.provision.RegionName;
-import com.yahoo.config.provision.TenantName;
-import com.yahoo.config.provision.Zone;
-import com.yahoo.test.ManualClock;
-import com.yahoo.transaction.NestedTransaction;
-import com.yahoo.vespa.applicationmodel.ApplicationInstance;
-import com.yahoo.vespa.applicationmodel.ApplicationInstanceId;
-import com.yahoo.vespa.applicationmodel.ApplicationInstanceReference;
-import com.yahoo.vespa.applicationmodel.ClusterId;
-import com.yahoo.vespa.applicationmodel.ConfigId;
-import com.yahoo.vespa.applicationmodel.HostName;
-import com.yahoo.vespa.applicationmodel.ServiceCluster;
-import com.yahoo.vespa.applicationmodel.ServiceInstance;
-import com.yahoo.vespa.applicationmodel.ServiceType;
-import com.yahoo.vespa.applicationmodel.TenantId;
-import com.yahoo.vespa.curator.Curator;
-import com.yahoo.vespa.curator.mock.MockCurator;
-import com.yahoo.vespa.curator.transaction.CuratorTransaction;
import com.yahoo.vespa.hosted.provision.Node;
-import com.yahoo.vespa.hosted.provision.NodeRepository;
-import com.yahoo.vespa.hosted.provision.node.Flavor;
-import com.yahoo.vespa.hosted.provision.node.NodeFlavors;
import com.yahoo.vespa.hosted.provision.node.Status;
-import com.yahoo.vespa.hosted.provision.provisioning.NodeRepositoryProvisioner;
-import com.yahoo.vespa.hosted.provision.testutils.FlavorConfigBuilder;
import com.yahoo.vespa.orchestrator.ApplicationIdNotFoundException;
import com.yahoo.vespa.orchestrator.ApplicationStateChangeDeniedException;
-import com.yahoo.vespa.orchestrator.BatchHostNameNotFoundException;
-import com.yahoo.vespa.orchestrator.BatchInternalErrorException;
-import com.yahoo.vespa.orchestrator.HostNameNotFoundException;
-import com.yahoo.vespa.orchestrator.Orchestrator;
-import com.yahoo.vespa.orchestrator.policy.BatchHostStateChangeDeniedException;
-import com.yahoo.vespa.orchestrator.policy.HostStateChangeDeniedException;
-import com.yahoo.vespa.orchestrator.status.ApplicationInstanceStatus;
-import com.yahoo.vespa.orchestrator.status.HostStatus;
-import com.yahoo.vespa.service.monitor.ServiceMonitor;
-import com.yahoo.vespa.service.monitor.ServiceMonitorStatus;
-import org.junit.Before;
import org.junit.Test;
-import java.time.Clock;
import java.time.Duration;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
-import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
/**
* Tests automatic failing of nodes.
@@ -73,388 +26,223 @@ import static org.junit.Assert.assertTrue;
*/
public class NodeFailerTest {
- // Immutable components
- private static final Zone ZONE = new Zone(Environment.prod, RegionName.from("us-east"));
- private static final NodeFlavors NODE_FLAVORS = FlavorConfigBuilder.createDummies("default", "docker");
- private static final ApplicationId APP_1 = ApplicationId.from(TenantName.from("foo1"), ApplicationName.from("bar"), InstanceName.from("fuz"));
- private static final ApplicationId APP_2 = ApplicationId.from(TenantName.from("foo2"), ApplicationName.from("bar"), InstanceName.from("fuz"));
- private static final Duration DOWNTIME_LIMIT_ONE_HOUR = Duration.ofMinutes(60);
-
- // Components with state
- private ManualClock clock;
- private Curator curator;
- private TestHostLivenessTracker hostLivenessTracker;
- private ServiceMonitorStub serviceMonitor;
- private MockDeployer deployer;
- private NodeRepository nodeRepository;
- private Orchestrator orchestrator;
- private NodeFailer failer;
-
- @Before
- public void setup() {
- clock = new ManualClock();
- curator = new MockCurator();
- nodeRepository = new NodeRepository(NODE_FLAVORS, curator, clock);
- NodeRepositoryProvisioner provisioner = new NodeRepositoryProvisioner(nodeRepository, NODE_FLAVORS, ZONE);
-
- createReadyNodes(16, nodeRepository, NODE_FLAVORS);
- createHostNodes(3, nodeRepository, NODE_FLAVORS);
-
- // Create applications
- ClusterSpec clusterApp1 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("test"), Optional.empty());
- ClusterSpec clusterApp2 = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("test"), Optional.empty());
- int wantedNodesApp1 = 5;
- int wantedNodesApp2 = 7;
- activate(APP_1, clusterApp1, wantedNodesApp1, provisioner);
- activate(APP_2, clusterApp2, wantedNodesApp2, provisioner);
- assertEquals(wantedNodesApp1, nodeRepository.getNodes(APP_1, Node.State.active).size());
- assertEquals(wantedNodesApp2, nodeRepository.getNodes(APP_2, Node.State.active).size());
-
- // Create a deployer ...
- Map<ApplicationId, MockDeployer.ApplicationContext> apps = new HashMap<>();
- apps.put(APP_1, new MockDeployer.ApplicationContext(APP_1, clusterApp1, wantedNodesApp1, Optional.of("default"), 1));
- apps.put(APP_2, new MockDeployer.ApplicationContext(APP_2, clusterApp2, wantedNodesApp2, Optional.of("default"), 1));
- deployer = new MockDeployer(provisioner, apps);
- // ... and the other services
- hostLivenessTracker = new TestHostLivenessTracker(clock);
- serviceMonitor = new ServiceMonitorStub(apps, nodeRepository);
- orchestrator = new OrchestratorMock();
-
- failer = createFailer();
- }
-
- private NodeFailer createFailer() {
- return new NodeFailer(deployer, hostLivenessTracker, serviceMonitor, nodeRepository, DOWNTIME_LIMIT_ONE_HOUR, clock, orchestrator);
- }
-
@Test
public void nodes_for_suspended_applications_are_not_failed() throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException {
- orchestrator.suspend(APP_1);
+ NodeFailTester tester = NodeFailTester.withTwoApplications();
+ tester.suspend(tester.app1);
// Set two nodes down (one for each application) and wait 65 minutes
- String host_from_suspended_app = nodeRepository.getNodes(APP_1, Node.State.active).get(1).hostname();
- String host_from_normal_app = nodeRepository.getNodes(APP_2, Node.State.active).get(3).hostname();
- serviceMonitor.setHostDown(host_from_suspended_app);
- serviceMonitor.setHostDown(host_from_normal_app);
- failer.run();
- clock.advance(Duration.ofMinutes(65));
- failer.run();
-
- assertEquals(Node.State.failed, nodeRepository.getNode(host_from_normal_app).get().state());
- assertEquals(Node.State.active, nodeRepository.getNode(host_from_suspended_app).get().state());
+ String host_from_suspended_app = tester.nodeRepository.getNodes(tester.app1, Node.State.active).get(1).hostname();
+ String host_from_normal_app = tester.nodeRepository.getNodes(tester.app2, Node.State.active).get(3).hostname();
+ tester.serviceMonitor.setHostDown(host_from_suspended_app);
+ tester.serviceMonitor.setHostDown(host_from_normal_app);
+ tester.failer.run();
+ tester.clock.advance(Duration.ofMinutes(65));
+ tester.failer.run();
+
+ assertEquals(Node.State.failed, tester.nodeRepository.getNode(host_from_normal_app).get().state());
+ assertEquals(Node.State.active, tester.nodeRepository.getNode(host_from_suspended_app).get().state());
}
@Test
public void test_node_failing() throws InterruptedException {
+ NodeFailTester tester = NodeFailTester.withTwoApplications();
+
// For a day all nodes work so nothing happens
for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) {
- failer.run();
- clock.advance(Duration.ofMinutes(5));
- allNodesMakeAConfigRequestExcept();
+ tester.failer.run();
+ tester.clock.advance(Duration.ofMinutes(5));
+ tester.allNodesMakeAConfigRequestExcept();
- assertEquals( 0, deployer.redeployments);
- assertEquals(12, nodeRepository.getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals( 0, nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals( 4, nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals( 0, tester.deployer.redeployments);
+ assertEquals(12, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.active).size());
+ assertEquals( 0, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
+ assertEquals( 4, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
}
// Failures are detected on two ready nodes, which are then failed
- Node readyFail1 = nodeRepository.getNodes(NodeType.tenant, Node.State.ready).get(2);
- Node readyFail2 = nodeRepository.getNodes(NodeType.tenant, Node.State.ready).get(3);
- nodeRepository.write(readyFail1.with(readyFail1.status().withHardwareFailure(Optional.of(Status.HardwareFailureType.memory_mcelog))));
- nodeRepository.write(readyFail2.with(readyFail2.status().withHardwareFailure(Optional.of(Status.HardwareFailureType.disk_smart))));
- assertEquals(4, nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
- failer.run();
- assertEquals(2, nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
- assertEquals(Node.State.failed, nodeRepository.getNode(readyFail1.hostname()).get().state());
- assertEquals(Node.State.failed, nodeRepository.getNode(readyFail2.hostname()).get().state());
+ Node readyFail1 = tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).get(2);
+ Node readyFail2 = tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).get(3);
+ tester.nodeRepository.write(readyFail1.with(readyFail1.status().withHardwareFailure(Optional.of(Status.HardwareFailureType.memory_mcelog))));
+ tester.nodeRepository.write(readyFail2.with(readyFail2.status().withHardwareFailure(Optional.of(Status.HardwareFailureType.disk_smart))));
+ assertEquals(4, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
+ tester.failer.run();
+ assertEquals(2, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals(Node.State.failed, tester.nodeRepository.getNode(readyFail1.hostname()).get().state());
+ assertEquals(Node.State.failed, tester.nodeRepository.getNode(readyFail2.hostname()).get().state());
- String downHost1 = nodeRepository.getNodes(APP_1, Node.State.active).get(1).hostname();
- String downHost2 = nodeRepository.getNodes(APP_2, Node.State.active).get(3).hostname();
- serviceMonitor.setHostDown(downHost1);
- serviceMonitor.setHostDown(downHost2);
+ String downHost1 = tester.nodeRepository.getNodes(tester.app1, Node.State.active).get(1).hostname();
+ String downHost2 = tester.nodeRepository.getNodes(tester.app2, Node.State.active).get(3).hostname();
+ tester.serviceMonitor.setHostDown(downHost1);
+ tester.serviceMonitor.setHostDown(downHost2);
// nothing happens the first 45 minutes
for (int minutes = 0; minutes < 45; minutes +=5 ) {
- failer.run();
- clock.advance(Duration.ofMinutes(5));
- allNodesMakeAConfigRequestExcept();
- assertEquals( 0, deployer.redeployments);
- assertEquals(12, nodeRepository.getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals( 2, nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals( 2, nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
- }
- serviceMonitor.setHostUp(downHost1);
+ tester.failer.run();
+ tester.clock.advance(Duration.ofMinutes(5));
+ tester.allNodesMakeAConfigRequestExcept();
+ assertEquals( 0, tester.deployer.redeployments);
+ assertEquals(12, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.active).size());
+ assertEquals( 2, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
+ assertEquals( 2, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
+ }
+ tester.serviceMonitor.setHostUp(downHost1);
for (int minutes = 0; minutes < 30; minutes +=5 ) {
- failer.run();
- clock.advance(Duration.ofMinutes(5));
- allNodesMakeAConfigRequestExcept();
+ tester.failer.run();
+ tester.clock.advance(Duration.ofMinutes(5));
+ tester.allNodesMakeAConfigRequestExcept();
}
// downHost2 should now be failed and replaced, but not downHost1
- assertEquals( 1, deployer.redeployments);
- assertEquals(12, nodeRepository.getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals( 3, nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals( 1, nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
- assertEquals(downHost2, nodeRepository.getNodes(NodeType.tenant, Node.State.failed).get(0).hostname());
+ assertEquals( 1, tester.deployer.redeployments);
+ assertEquals(12, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.active).size());
+ assertEquals( 3, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
+ assertEquals( 1, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals(downHost2, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).get(0).hostname());
// downHost1 fails again
- serviceMonitor.setHostDown(downHost1);
- failer.run();
- clock.advance(Duration.ofMinutes(5));
- allNodesMakeAConfigRequestExcept();
+ tester.serviceMonitor.setHostDown(downHost1);
+ tester.failer.run();
+ tester.clock.advance(Duration.ofMinutes(5));
+ tester.allNodesMakeAConfigRequestExcept();
// the system goes down and do not have updated information when coming back
- clock.advance(Duration.ofMinutes(120));
- failer = createFailer();
- serviceMonitor.setStatusIsKnown(false);
- failer.run();
+ tester.clock.advance(Duration.ofMinutes(120));
+ tester.failer = tester.createFailer();
+ tester.serviceMonitor.setStatusIsKnown(false);
+ tester.failer.run();
// due to this, nothing is failed
- assertEquals( 1, deployer.redeployments);
- assertEquals(12, nodeRepository.getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals( 3, nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals( 1, nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals( 1, tester.deployer.redeployments);
+ assertEquals(12, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.active).size());
+ assertEquals( 3, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
+ assertEquals( 1, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
// when status becomes known, and the host is still down, it is failed
- clock.advance(Duration.ofMinutes(5));
- allNodesMakeAConfigRequestExcept();
- serviceMonitor.setStatusIsKnown(true);
- failer.run();
- assertEquals( 2, deployer.redeployments);
- assertEquals(12, nodeRepository.getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals( 4, nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals( 0, nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
+ tester.clock.advance(Duration.ofMinutes(5));
+ tester.allNodesMakeAConfigRequestExcept();
+ tester.serviceMonitor.setStatusIsKnown(true);
+ tester.failer.run();
+ assertEquals( 2, tester.deployer.redeployments);
+ assertEquals(12, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.active).size());
+ assertEquals( 4, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
+ assertEquals( 0, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
// the last host goes down
- Node lastNode = highestIndex(nodeRepository.getNodes(APP_1, Node.State.active));
- serviceMonitor.setHostDown(lastNode.hostname());
+ Node lastNode = tester.highestIndex(tester.nodeRepository.getNodes(tester.app1, Node.State.active));
+ tester.serviceMonitor.setHostDown(lastNode.hostname());
// it is not failed because there are no ready nodes to replace it
for (int minutes = 0; minutes < 75; minutes +=5 ) {
- failer.run();
- clock.advance(Duration.ofMinutes(5));
- allNodesMakeAConfigRequestExcept();
- assertEquals( 2, deployer.redeployments);
- assertEquals(12, nodeRepository.getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals( 4, nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals( 0, nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
+ tester.failer.run();
+ tester.clock.advance(Duration.ofMinutes(5));
+ tester.allNodesMakeAConfigRequestExcept();
+ assertEquals( 2, tester.deployer.redeployments);
+ assertEquals(12, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.active).size());
+ assertEquals( 4, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
+ assertEquals( 0, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
}
// A new node is available
- createReadyNodes(1, 16, nodeRepository, NODE_FLAVORS);
- failer.run();
+ tester.createReadyNodes(1, 16);
+ tester.failer.run();
// The node is now failed
- assertEquals( 3, deployer.redeployments);
- assertEquals(12, nodeRepository.getNodes(NodeType.tenant, Node.State.active).size());
- assertEquals( 5, nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
- assertEquals( 0, nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals( 3, tester.deployer.redeployments);
+ assertEquals(12, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.active).size());
+ assertEquals( 5, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
+ assertEquals( 0, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
assertTrue("The index of the last failed node is not reused",
- highestIndex(nodeRepository.getNodes(APP_1, Node.State.active)).allocation().get().membership().index()
+ tester.highestIndex(tester.nodeRepository.getNodes(tester.app1, Node.State.active)).allocation().get().membership().index()
>
lastNode.allocation().get().membership().index());
}
@Test
public void testFailingReadyNodes() {
+ NodeFailTester tester = NodeFailTester.withTwoApplications();
+
// Add ready docker node
- createReadyNodes(1, 16, nodeRepository, NODE_FLAVORS.getFlavorOrThrow("docker"));
+ tester.createReadyNodes(1, 16, "docker");
// For a day all nodes work so nothing happens
for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) {
- clock.advance(Duration.ofMinutes(5));
- allNodesMakeAConfigRequestExcept();
- failer.run();
- assertEquals( 5, nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
+ tester.clock.advance(Duration.ofMinutes(5));
+ tester.allNodesMakeAConfigRequestExcept();
+ tester.failer.run();
+ assertEquals( 5, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
}
- List<Node> ready = nodeRepository.getNodes(NodeType.tenant, Node.State.ready);
+ List<Node> ready = tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready);
- // Two ready nodes die and a ready docker node "dies" (Vespa does not run when in ready state for docker node, so
- // it does not mae config requests)
- clock.advance(Duration.ofMinutes(180));
- Node dockerNode = ready.stream().filter(node -> node.flavor() == NODE_FLAVORS.getFlavorOrThrow("docker")).findFirst().get();
+ // Two ready nodes die and a ready docker node "dies"
+ // (Vespa does not run when in ready state for docker node, so it does not make config requests)
+ tester.clock.advance(Duration.ofMinutes(180));
+ Node dockerNode = ready.stream().filter(node -> node.flavor() == tester.nodeFlavors.getFlavorOrThrow("docker")).findFirst().get();
List<Node> otherNodes = ready.stream()
- .filter(node -> node.flavor() != NODE_FLAVORS.getFlavorOrThrow("docker"))
+ .filter(node -> node.flavor() != tester.nodeFlavors.getFlavorOrThrow("docker"))
.collect(Collectors.toList());
- allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), dockerNode);
- failer.run();
- assertEquals( 3, nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
- assertEquals( 2, nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
+ tester.allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), dockerNode);
+ tester.failer.run();
+ assertEquals( 3, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals( 2, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
// Another ready node die
- clock.advance(Duration.ofMinutes(180));
- allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), dockerNode, otherNodes.get(3));
- failer.run();
- assertEquals( 2, nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
- assertEquals(ready.get(1), nodeRepository.getNodes(NodeType.tenant, Node.State.ready).get(0));
- assertEquals( 3, nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
- }
-
- private void allNodesMakeAConfigRequestExcept(Node ... deadNodeArray) {
- Set<Node> deadNodes = new HashSet<>(Arrays.asList(deadNodeArray));
- for (Node node : nodeRepository.getNodes(NodeType.tenant)) {
- if ( ! deadNodes.contains(node) && node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER)
- hostLivenessTracker.receivedRequestFrom(node.hostname());
- }
- }
-
- private void createReadyNodes(int count, NodeRepository nodeRepository, NodeFlavors nodeFlavors) {
- createReadyNodes(count, 0, nodeRepository, nodeFlavors);
- }
-
- private void createReadyNodes(int count, int startIndex, NodeRepository nodeRepository, NodeFlavors nodeFlavors) {
- createReadyNodes(count, startIndex, nodeRepository, nodeFlavors.getFlavorOrThrow("default"));
- }
-
- private void createReadyNodes(int count, int startIndex, NodeRepository nodeRepository, Flavor flavor) {
- List<Node> nodes = new ArrayList<>(count);
- for (int i = startIndex; i < startIndex + count; i++)
- nodes.add(nodeRepository.createNode("node" + i, "host" + i, Optional.empty(), flavor, NodeType.tenant));
- nodes = nodeRepository.addNodes(nodes);
- nodeRepository.setReady(nodes);
- }
-
- private void createHostNodes(int count, NodeRepository nodeRepository, NodeFlavors nodeFlavors) {
- List<Node> nodes = new ArrayList<>(count);
- for (int i = 0; i < count; i++)
- nodes.add(nodeRepository.createNode("parent" + i, "parent" + i, Optional.empty(), nodeFlavors.getFlavorOrThrow("default"), NodeType.host));
- nodes = nodeRepository.addNodes(nodes);
- nodeRepository.setReady(nodes);
- }
-
- private void activate(ApplicationId applicationId, ClusterSpec cluster, int nodeCount, NodeRepositoryProvisioner provisioner) {
- List<HostSpec> hosts = provisioner.prepare(applicationId, cluster, Capacity.fromNodeCount(nodeCount), 1, null);
- NestedTransaction transaction = new NestedTransaction().add(new CuratorTransaction(curator));
- provisioner.activate(transaction, applicationId, hosts);
- transaction.commit();
- }
-
- /** Returns the node with the highest membership index from the given set of allocated nodes */
- private Node highestIndex(List<Node> nodes) {
- Node highestIndex = null;
- for (Node node : nodes) {
- if (highestIndex == null || node.allocation().get().membership().index() >
- highestIndex.allocation().get().membership().index())
- highestIndex = node;
- }
- return highestIndex;
- }
-
- /** This is a fully functional implementation */
- private static class TestHostLivenessTracker implements HostLivenessTracker {
-
- private final Clock clock;
- private final Map<String, Instant> lastRequestFromHost = new HashMap<>();
-
- public TestHostLivenessTracker(Clock clock) {
- this.clock = clock;
- }
-
- @Override
- public void receivedRequestFrom(String hostname) {
- lastRequestFromHost.put(hostname, clock.instant());
- }
-
- @Override
- public Optional<Instant> lastRequestFrom(String hostname) {
- return Optional.ofNullable(lastRequestFromHost.get(hostname));
- }
-
+ tester.clock.advance(Duration.ofMinutes(180));
+ tester.allNodesMakeAConfigRequestExcept(otherNodes.get(0), otherNodes.get(2), dockerNode, otherNodes.get(3));
+ tester.failer.run();
+ assertEquals( 2, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).size());
+ assertEquals(ready.get(1), tester.nodeRepository.getNodes(NodeType.tenant, Node.State.ready).get(0));
+ assertEquals( 3, tester.nodeRepository.getNodes(NodeType.tenant, Node.State.failed).size());
}
- private static class ServiceMonitorStub implements ServiceMonitor {
-
- private final Map<ApplicationId, MockDeployer.ApplicationContext> apps;
- private final NodeRepository nodeRepository;
-
- private Set<String> downHosts = new HashSet<>();
- private boolean statusIsKnown = true;
-
- /** Create a service monitor where all nodes are initially up */
- public ServiceMonitorStub(Map<ApplicationId, MockDeployer.ApplicationContext> apps, NodeRepository nodeRepository) {
- this.apps = apps;
- this.nodeRepository = nodeRepository;
- }
-
- public void setHostDown(String hostname) {
- downHosts.add(hostname);
- }
-
- public void setHostUp(String hostname) {
- downHosts.remove(hostname);
- }
-
- public void setStatusIsKnown(boolean statusIsKnown) {
- this.statusIsKnown = statusIsKnown;
- }
-
- private ServiceMonitorStatus getHostStatus(String hostname) {
- if ( ! statusIsKnown) return ServiceMonitorStatus.NOT_CHECKED;
- if (downHosts.contains(hostname)) return ServiceMonitorStatus.DOWN;
- return ServiceMonitorStatus.UP;
- }
-
- @Override
- public Map<ApplicationInstanceReference, ApplicationInstance<ServiceMonitorStatus>> queryStatusOfAllApplicationInstances() {
- // Convert apps information to the response payload to return
- Map<ApplicationInstanceReference, ApplicationInstance<ServiceMonitorStatus>> status = new HashMap<>();
- for (Map.Entry<ApplicationId, MockDeployer.ApplicationContext> app : apps.entrySet()) {
- Set<ServiceInstance<ServiceMonitorStatus>> serviceInstances = new HashSet<>();
- for (Node node : nodeRepository.getNodes(app.getValue().id(), Node.State.active)) {
- serviceInstances.add(new ServiceInstance<>(new ConfigId("configid"),
- new HostName(node.hostname()),
- getHostStatus(node.hostname())));
- }
- Set<ServiceCluster<ServiceMonitorStatus>> serviceClusters = new HashSet<>();
- serviceClusters.add(new ServiceCluster<>(new ClusterId(app.getValue().cluster().id().value()),
- new ServiceType("serviceType"),
- serviceInstances));
- TenantId tenantId = new TenantId(app.getKey().tenant().value());
- ApplicationInstanceId applicationInstanceId = new ApplicationInstanceId(app.getKey().application().value());
- status.put(new ApplicationInstanceReference(tenantId, applicationInstanceId),
- new ApplicationInstance<>(tenantId, applicationInstanceId, serviceClusters));
- }
- return status;
- }
-
- }
-
- class OrchestratorMock implements Orchestrator {
-
- Set<ApplicationId> suspendedApplications = new HashSet<>();
-
- @Override
- public HostStatus getNodeStatus(HostName hostName) throws HostNameNotFoundException {
- return null;
- }
-
- @Override
- public void resume(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {}
-
- @Override
- public void suspend(HostName hostName) throws HostStateChangeDeniedException, HostNameNotFoundException {}
-
- @Override
- public ApplicationInstanceStatus getApplicationInstanceStatus(ApplicationId appId) throws ApplicationIdNotFoundException {
- return suspendedApplications.contains(appId)
- ? ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN : ApplicationInstanceStatus.NO_REMARKS;
- }
+ @Test
+ public void testFailingProxyNodes() {
+ NodeFailTester tester = NodeFailTester.withProxyApplication();
- @Override
- public Set<ApplicationId> getAllSuspendedApplications() {
- return null;
- }
+ // For a day all nodes work so nothing happens
+ for (int minutes = 0; minutes < 24 * 60; minutes +=5 ) {
+ tester.failer.run();
+ tester.clock.advance(Duration.ofMinutes(5));
+ tester.allNodesMakeAConfigRequestExcept();
- @Override
- public void resume(ApplicationId appId) throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException {
- suspendedApplications.remove(appId);
+ assertEquals(16, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.active).size());
}
- @Override
- public void suspend(ApplicationId appId) throws ApplicationStateChangeDeniedException, ApplicationIdNotFoundException {
- suspendedApplications.add(appId);
- }
+ Set<String> downHosts = new HashSet<>();
+ downHosts.add("host4");
+ downHosts.add("host5");
- @Override
- public void suspendAll(HostName parentHostname, List<HostName> hostNames) throws BatchInternalErrorException, BatchHostStateChangeDeniedException, BatchHostNameNotFoundException {
- throw new RuntimeException("Not implemented");
- }
+ for (String downHost : downHosts)
+ tester.serviceMonitor.setHostDown(downHost);
+ // nothing happens the first 45 minutes
+ for (int minutes = 0; minutes < 45; minutes +=5 ) {
+ tester.failer.run();
+ tester.clock.advance(Duration.ofMinutes(5));
+ tester.allNodesMakeAConfigRequestExcept();
+ assertEquals( 0, tester.deployer.redeployments);
+ assertEquals(16, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.active).size());
+ assertEquals( 0, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.failed).size());
+ }
+
+ tester.clock.advance(Duration.ofMinutes(60));
+ tester.failer.run();
+
+ // one down host should now be failed, but not two as we are only allowed to fail one proxy
+ assertEquals( 1, tester.deployer.redeployments);
+ assertEquals(15, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.active).size());
+ assertEquals( 1, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.failed).size());
+ String failedHost1 = tester.nodeRepository.getNodes(NodeType.proxy, Node.State.failed).get(0).hostname();
+ assertTrue(downHosts.contains(failedHost1));
+
+ // trying to fail again will still not fail the other down host
+ tester.clock.advance(Duration.ofMinutes(60));
+ tester.failer.run();
+ assertEquals(15, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.active).size());
+
+ // The first down host is removed, which causes the second one to be moved to failed
+ tester.nodeRepository.remove(failedHost1);
+ tester.failer.run();
+ assertEquals( 2, tester.deployer.redeployments);
+ assertEquals(14, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.active).size());
+ assertEquals( 1, tester.nodeRepository.getNodes(NodeType.proxy, Node.State.failed).size());
+ String failedHost2 = tester.nodeRepository.getNodes(NodeType.proxy, Node.State.failed).get(0).hostname();
+ assertFalse(failedHost1.equals(failedHost2));
+ assertTrue(downHosts.contains(failedHost2));
}
-
+
}
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
index b37b6985713..3b9f4469b01 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/maintenance/RetiredExpirerTest.java
@@ -67,7 +67,7 @@ public class RetiredExpirerTest {
clock.advance(Duration.ofHours(30)); // Retire period spent
MockDeployer deployer =
new MockDeployer(provisioner,
- Collections.singletonMap(applicationId, new MockDeployer.ApplicationContext(applicationId, cluster, wantedNodes, Optional.of("default"), 1)));
+ Collections.singletonMap(applicationId, new MockDeployer.ApplicationContext(applicationId, cluster, Capacity.fromNodeCount(wantedNodes, Optional.of("default")), 1)));
new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofHours(12)).run();
assertEquals(3, nodeRepository.getNodes(applicationId, Node.State.active).size());
assertEquals(4, nodeRepository.getNodes(applicationId, Node.State.inactive).size());
@@ -101,7 +101,7 @@ public class RetiredExpirerTest {
clock.advance(Duration.ofHours(30)); // Retire period spent
MockDeployer deployer =
new MockDeployer(provisioner,
- Collections.singletonMap(applicationId, new MockDeployer.ApplicationContext(applicationId, cluster, 1, Optional.of("default"), 1)));
+ Collections.singletonMap(applicationId, new MockDeployer.ApplicationContext(applicationId, cluster, Capacity.fromNodeCount(1, Optional.of("default")), 1)));
new RetiredExpirer(nodeRepository, deployer, clock, Duration.ofHours(12)).run();
assertEquals(1, nodeRepository.getNodes(applicationId, Node.State.active).size());
assertEquals(7, nodeRepository.getNodes(applicationId, Node.State.inactive).size());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java
index acd687e197e..fa00b7d60b8 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/MultigroupProvisioningTest.java
@@ -131,7 +131,9 @@ public class MultigroupProvisioningTest {
tester.advanceTime(Duration.ofDays(7));
MockDeployer deployer =
new MockDeployer(tester.provisioner(),
- Collections.singletonMap(application1, new MockDeployer.ApplicationContext(application1, cluster(), 8, Optional.of("large"), 1)));
+ Collections.singletonMap(application1,
+ new MockDeployer.ApplicationContext(application1, cluster(),
+ Capacity.fromNodeCount(8, Optional.of("large")), 1)));
new RetiredExpirer(tester.nodeRepository(), deployer, tester.clock(), Duration.ofHours(12)).run();
assertEquals(8, tester.getNodes(application1, Node.State.inactive).flavor("small").size());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
index 86ab77faa61..2256c8d8645 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTest.java
@@ -13,7 +13,10 @@ import com.yahoo.config.provision.OutOfCapacityException;
import com.yahoo.config.provision.RegionName;
import com.yahoo.config.provision.Zone;
import com.yahoo.transaction.NestedTransaction;
+import com.yahoo.vespa.config.nodes.NodeRepositoryConfig;
import com.yahoo.vespa.hosted.provision.Node;
+import com.yahoo.vespa.hosted.provision.node.Flavor;
+import com.yahoo.vespa.hosted.provision.testutils.FlavorConfigBuilder;
import org.junit.Ignore;
import org.junit.Test;
@@ -467,8 +470,24 @@ public class ProvisioningTest {
}
@Test
- public void application_deployment_allocates_cheapest_available() {
- ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")));
+ public void application_deployment_prefers_cheapest_stock_nodes() {
+ assertCorrectFlavorPreferences(true);
+ }
+
+ @Test
+ public void application_deployment_prefers_exact_nonstock_nodes() {
+ assertCorrectFlavorPreferences(false);
+ }
+
+ private void assertCorrectFlavorPreferences(boolean largeIsStock) {
+ FlavorConfigBuilder b = new FlavorConfigBuilder();
+ b.addFlavor("large", 4., 8., 100, Flavor.Type.BARE_METAL).cost(10).stock(largeIsStock);
+ NodeRepositoryConfig.Flavor.Builder largeVariant = b.addFlavor("large-variant", 3., 9., 101, Flavor.Type.BARE_METAL).cost(9);
+ b.addReplaces("large", largeVariant);
+ NodeRepositoryConfig.Flavor.Builder largeVariantVariant = b.addFlavor("large-variant-variant", 4., 9., 101, Flavor.Type.BARE_METAL).cost(11);
+ b.addReplaces("large-variant", largeVariantVariant);
+
+ ProvisioningTester tester = new ProvisioningTester(new Zone(Environment.prod, RegionName.from("us-east")), b.build());
tester.makeReadyNodes(6, "large"); //cost = 10
tester.makeReadyNodes(6, "large-variant"); //cost = 9
tester.makeReadyNodes(6, "large-variant-variant"); //cost = 11
@@ -477,17 +496,23 @@ public class ProvisioningTest {
ClusterSpec contentClusterSpec = ClusterSpec.request(ClusterSpec.Type.content, ClusterSpec.Id.from("myContent"), Optional.empty());
ClusterSpec containerClusterSpec = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("myContainer"), Optional.empty());
+ List<HostSpec> containerNodes = tester.prepare(applicationId, containerClusterSpec, 5, 1, "large");
+ List<HostSpec> contentNodes = tester.prepare(applicationId, contentClusterSpec, 10, 1, "large");
- List<HostSpec> containerNodes = tester.prepare(applicationId, containerClusterSpec, 5, 1, "large"); //should be replaced by 5 large-variant
- List<HostSpec> contentNodes = tester.prepare(applicationId, contentClusterSpec, 10, 1, "large"); // should give 1 large-variant, 6 large and 3 large-variant-variant
-
- tester.assertNumberOfNodesWithFlavor(containerNodes, "large-variant", 5);
- tester.assertNumberOfNodesWithFlavor(contentNodes, "large-variant", 1);
- tester.assertNumberOfNodesWithFlavor(contentNodes, "large", 6);
+ if (largeIsStock) { // 'large' is replaced by 'large-variant' when possible, as it is cheaper
+ tester.assertNumberOfNodesWithFlavor(containerNodes, "large-variant", 5);
+ tester.assertNumberOfNodesWithFlavor(contentNodes, "large-variant", 1);
+ tester.assertNumberOfNodesWithFlavor(contentNodes, "large", 6);
+ }
+ else { // 'large' is preferred when available, as it is what is exactly specified
+ tester.assertNumberOfNodesWithFlavor(containerNodes, "large", 5);
+ tester.assertNumberOfNodesWithFlavor(contentNodes, "large", 1);
+ tester.assertNumberOfNodesWithFlavor(contentNodes, "large-variant", 6);
+ }
+ // in both cases the most expensive, never exactly specified is least preferred
tester.assertNumberOfNodesWithFlavor(contentNodes, "large-variant-variant", 3);
}
-
private SystemState prepare(ApplicationId application, int container0Size, int container1Size, int content0Size, int content1Size, String flavor, ProvisioningTester tester) {
// "deploy prepare" with a two container clusters and a storage cluster having of two groups
ClusterSpec containerCluster0 = ClusterSpec.request(ClusterSpec.Type.container, ClusterSpec.Id.from("container0"), Optional.empty());
diff --git a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
index 27e4b11ab2d..6aea7ae4d61 100644
--- a/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
+++ b/node-repository/src/test/java/com/yahoo/vespa/hosted/provision/provisioning/ProvisioningTester.java
@@ -72,6 +72,20 @@ public class ProvisioningTester implements AutoCloseable {
}
}
+ public ProvisioningTester(Zone zone, NodeRepositoryConfig config) {
+ try {
+ nodeFlavors = new NodeFlavors(config);
+ clock = new ManualClock();
+ nodeRepository = new NodeRepository(nodeFlavors, curator, clock);
+ provisioner = new NodeRepositoryProvisioner(nodeRepository, nodeFlavors, zone, clock);
+ capacityPolicies = new CapacityPolicies(zone, nodeFlavors);
+ provisionLogger = new NullProvisionLogger();
+ }
+ catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
private NodeRepositoryConfig createConfig() {
FlavorConfigBuilder b = new FlavorConfigBuilder();
b.addFlavor("default", 2., 4., 100, Flavor.Type.BARE_METAL).cost(3);
@@ -79,7 +93,7 @@ public class ProvisioningTester implements AutoCloseable {
b.addFlavor("docker1", 1., 1., 10, Flavor.Type.DOCKER_CONTAINER).cost(1);
b.addFlavor("v-4-8-100", 4., 8., 100, Flavor.Type.VIRTUAL_MACHINE).cost(4);
b.addFlavor("old-large1", 2., 4., 100, Flavor.Type.BARE_METAL).cost(6);
- b.addFlavor("old-large2", 2., 5., 100, Flavor.Type.BARE_METAL).cost(8);
+ b.addFlavor("old-large2", 2., 5., 100, Flavor.Type.BARE_METAL).cost(14);
NodeRepositoryConfig.Flavor.Builder large = b.addFlavor("large", 4., 8., 100, Flavor.Type.BARE_METAL).cost(10);
b.addReplaces("old-large1", large);
b.addReplaces("old-large2", large);
@@ -114,6 +128,7 @@ public class ProvisioningTester implements AutoCloseable {
public NodeRepositoryProvisioner provisioner() { return provisioner; }
public CapacityPolicies capacityPolicies() { return capacityPolicies; }
public NodeList getNodes(ApplicationId id, Node.State ... inState) { return new NodeList(nodeRepository.getNodes(id, inState)); }
+ public NodeFlavors flavors() { return nodeFlavors; }
public void patchNode(Node node) { nodeRepository.write(node); }
diff --git a/pom.xml b/pom.xml
index 5410b10f1ba..1d0dfd83465 100644
--- a/pom.xml
+++ b/pom.xml
@@ -660,36 +660,6 @@
<version>1.7.17</version>
</dependency>
<dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-client</artifactId>
- <version>1.13</version>
- </dependency>
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-core</artifactId>
- <version>1.13</version>
- </dependency>
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-json</artifactId>
- <version>1.13</version>
- </dependency>
- <dependency>
- <groupId>com.sun.jersey</groupId>
- <artifactId>jersey-server</artifactId>
- <version>1.13</version>
- </dependency>
- <dependency>
- <groupId>com.sun.jersey.contribs</groupId>
- <artifactId>jersey-guice</artifactId>
- <version>1.13</version>
- </dependency>
- <dependency>
- <groupId>com.sun.jersey.contribs</groupId>
- <artifactId>jersey-multipart</artifactId>
- <version>1.13</version>
- </dependency>
- <dependency>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
<version>1.3.1</version>
@@ -1111,7 +1081,7 @@
<aries.util.version>1.0.0</aries.util.version>
<asm-debug-all.version>5.0.3</asm-debug-all.version>
<curator.version>2.9.1</curator.version>
- <jackson2.version>2.5.3</jackson2.version>
+ <jackson2.version>2.8.3</jackson2.version>
<jersey2.version>2.10.1</jersey2.version>
<jetty.version>9.3.12.v20160915</jetty.version>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
diff --git a/sample-apps/blog-tutorial-shared/README.md b/sample-apps/blog-tutorial-shared/README.md
index 09ac61e6b56..846156908c3 100644
--- a/sample-apps/blog-tutorial-shared/README.md
+++ b/sample-apps/blog-tutorial-shared/README.md
@@ -1,6 +1,8 @@
# Vespa tutorial utility scripts
-## From raw JSON to Vespa Feeding format
+## Vespa Tutorial pt. 1
+
+### From raw JSON to Vespa Feeding format
$ python parse.py trainPosts.json > somefile.json
@@ -10,10 +12,85 @@ Parses JSON from the file trainPosts.json downloaded from Kaggle during the [blo
Give it the flag "-p" or "--popularity", and the script also calculates and adds the field `popularity`, as introduced [in the tutorial](https://git.corp.yahoo.com/pages/vespa/documentation/documentation/tutorials/blog-search.html#blog-popularity-signal).
-## Building and running the Spark script for calculating latent factors
+## Vespa Tutorial pt. 2
+
+### Building and running the Spark script for calculating latent factors
1. Install the latest version of [Apache Spark](http://spark.apache.org/) and [sbt](http://www.scala-sbt.org/download.html).
2. Clone this repository and build the Spark script with `sbt package` (in the root directory of this repo).
-3. Use the resulting jar file when running spark jobs included in the tutorials. \ No newline at end of file
+3. Use the resulting jar file when running spark jobs included in the tutorials.
+
+## Vespa Tutorial pt.3
+
+Pre-computed data used through out the tutorial can be found [here](http://trdstorage.trondheim.corp.yahoo.com/~tmartins/vespa_tutorial_data/).
+
+You can download ```vespa_tutorial_data.tar.gz``` (144MB) and decompress it with
+
+ $ wget http://trdstorage.trondheim.corp.yahoo.com/~tmartins/vespa_tutorial_data.tar.gz
+ $ tar -xvzf vespa_tutorial_data.tar.gz
+
+### Create Training Dataset
+
+ $ ./generateDataset.R -d vespa_tutorial_data/user_item_cf_cv/product.json \
+ -u vespa_tutorial_data/user_item_cf_cv/user.json \
+ -t vespa_tutorial_data/training_and_test_indices/train.txt \
+ -o vespa_tutorial_data/nn_model/training_set.txt
+
+### Train model with TensorFlow
+
+Train the model with
+
+ $ python vespaModel.py --product_features_file_path vespa_tutorial_data/user_item_cf_cv/product.json \
+ --user_features_file_path vespa_tutorial_data/user_item_cf_cv/user.json \
+ --dataset_file_path vespa_tutorial_data/nn_model/training_set.txt
+
+Model parameters and summary statistics will be saved at folder ```runs/${start_time}``` with ```${start_time}``` representing the time you started to train the model.
+
+Visualize the accuracy and loss metrics with
+
+ $ tensorboard --logdir runs/1473845959/summaries/
+
+**Note**: The folder ```1473845959``` depends on the time you start to train the model and will be different in your case.
+
+### Export model parameters to Tensor Vespa format
+
+```checkpoint_dir``` holds the folder that TensorFlow writes the learned model parameters (stored using protobuf) and ```output_dir``` is the folder that we will output the model parameters in
+Vespa Tensor format.
+
+ import vespaModel
+
+ checkpoint_dir = "./runs/1473845959/checkpoints"
+ output_dir = "application_package/constants"
+
+ serializer = serializeVespaModel(checkpoint_dir, output_dir)
+ serializer.serialize_to_disk(variable_name = "W_hidden", dimension_names = ['input', 'hidden'])
+ serializer.serialize_to_disk(variable_name = "b_hidden", dimension_names = ['hidden'])
+ serializer.serialize_to_disk(variable_name = "W_final", dimension_names = ['hidden', 'final'])
+ serializer.serialize_to_disk(variable_name = "b_final", dimension_names = ['final'])
+
+The python code containing the class ```serializeVespaModel``` can be found at: ```src/python/vespaModel.py```
+
+### Offline evaluation
+
+Query Vespa using the rank-profile ```tensor``` for users in the test set and return 100 blog post recommendations. Use those recommendations in the information contained in the test set to compute
+metrics defined in the Tutorial pt. 2.
+
+ pig -x local -f tutorial_compute_metric.pig \
+ -param VESPA_HADOOP_JAR=vespa-hadoop.jar \
+ -param TEST_INDICES=blog-job/training_and_test_indices/testing_set_ids \
+ -param ENDPOINT=$(hostname):8080
+ -param NUMBER_RECOMMENDATIONS=100
+ -param RANKING_NAME=tensor
+ -param OUTPUT=blog-job/cf-metric
+
+Repeat the process, but now using the rank-profile ```nn_tensor```.
+
+ pig -x local -f tutorial_compute_metric.pig \
+ -param VESPA_HADOOP_JAR=vespa-hadoop.jar \
+ -param TEST_INDICES=blog-job/training_and_test_indices/testing_set_ids \
+ -param ENDPOINT=$(hostname):8080
+ -param NUMBER_RECOMMENDATIONS=100
+ -param RANKING_NAME=nn_tensor
+ -param OUTPUT=blog-job/cf-metric \ No newline at end of file
diff --git a/sample-apps/blog-tutorial-shared/src/R/generateDataset.R b/sample-apps/blog-tutorial-shared/src/R/generateDataset.R
new file mode 100644
index 00000000000..b410ad4094c
--- /dev/null
+++ b/sample-apps/blog-tutorial-shared/src/R/generateDataset.R
@@ -0,0 +1,56 @@
+library(jsonlite)
+library(dplyr)
+
+file_path_document <- '/Users/tmartins/projects/yahoo/sw/vespa-examples/blog-recommendation-support/data/blog-job/user_item_cf_cv/product.json'
+file_path_user <- '/Users/tmartins/projects/yahoo/sw/vespa-examples/blog-recommendation-support/data/blog-job/user_item_cf_cv/user.json'
+file_path_train <- '/Users/tmartins/projects/yahoo/sw/vespa-examples/blog-recommendation-support/data/blog-job/training_and_test_indices/train.txt'
+output_file <- '/Users/tmartins/projects/yahoo/sw/vespa-examples/blog-recommendation-support/data/blog-job/nn_model/training_set.txt'
+
+# get ids from documents that have a latent vector
+lines <- readLines(file_path_document)
+product_ids <- NULL
+for (line in lines){
+ product_ids <- c(product_ids, fromJSON(txt=line)$post_id)
+}
+
+# get ids from users that have a latent vector
+lines <- readLines(file_path_user)
+user_ids <- NULL
+for (line in lines){
+ user_ids <- c(user_ids, fromJSON(txt=line)$user_id)
+}
+
+# read (product, user) ids used for training
+train_ids <- read.delim(file = file_path_train, header = FALSE, stringsAsFactors = FALSE)
+colnames(train_ids) <- c("product_id", "user_id")
+
+# filter out product id and user id that does not have latent vectors
+temp <- merge(x = train_ids, y = data.frame(product_id = product_ids))
+final_positive_train_ids <- merge(x = temp, y = data.frame(user_id = user_ids))
+
+# add positive labels
+final_positive_train_ids <- data.frame(final_positive_train_ids, label = 1)
+
+# add noise to the data
+clicks_per_user <- final_positive_train_ids %>% group_by(user_id) %>% summarise(number_clicks = sum(label))
+
+unread_proportion <- 10
+unread_products <- matrix(NA, unread_proportion*sum(clicks_per_user$number_clicks), 3)
+colnames(unread_products) <- c("user_id", "product_id", "label")
+count <- 0
+for (i in 1:nrow(clicks_per_user)){
+ print(paste(i, "/ ", nrow(clicks_per_user)))
+ number_itens <- unread_proportion * as.numeric(clicks_per_user[i, "number_clicks"])
+ row_index <- count + 1:number_itens
+ count <- count + number_itens
+ user_id <- clicks_per_user[i, "user_id"]
+ new_samples <- sample(x = product_ids, size = unread_proportion * as.numeric(clicks_per_user[i, "number_clicks"]), replace = FALSE)
+ unread_products[row_index, ] <- matrix(c(rep(as.numeric(user_id), number_itens), new_samples, rep(0, number_itens)), ncol = 3)
+}
+
+# create final dataset
+final_train_ids <- rbind(final_positive_train_ids, data.frame(unread_products))
+duplicated_rows <- duplicated(x = final_train_ids[, c("user_id", "product_id")])
+final_train_ids <- final_train_ids[!duplicated_rows, ]
+
+write.table(x = final_train_ids, file = output_file, sep = "\t", quote = FALSE, row.names = FALSE)
diff --git a/sample-apps/blog-tutorial-shared/src/main/pig/tutorial_get_recommendation_list.pig b/sample-apps/blog-tutorial-shared/src/main/pig/tutorial_get_recommendation_list.pig
new file mode 100644
index 00000000000..ab4245eaa25
--- /dev/null
+++ b/sample-apps/blog-tutorial-shared/src/main/pig/tutorial_get_recommendation_list.pig
@@ -0,0 +1,21 @@
+REGISTER $VESPA_HADOOP_JAR
+
+DEFINE BlogPostRecommendations
+ com.yahoo.vespa.hadoop.pig.VespaQuery(
+ 'query=http://$ENDPOINT/search/?user_id=<user_id>&hits=$NUMBER_RECOMMENDATIONS&ranking=$RANKING_NAME',
+ 'schema=rank:int,id:chararray,relevance:double,fields/post_id:chararray'
+ );
+
+-- Load test_set data from a local file
+test_indices = LOAD '$TEST_INDICES' AS (post_id:chararray, user_id:chararray);
+users = FOREACH test_indices GENERATE user_id;
+users = FILTER users BY user_id IS NOT null;
+users = DISTINCT users;
+
+-- Run a set of queries against Vespa
+recommendations = FOREACH users GENERATE user_id,
+ FLATTEN(BlogPostRecommendations(*)) AS (rank, id, relevance, post_id);
+recommendations = FOREACH recommendations GENERATE user_id, rank, post_id;
+recommendations = FILTER recommendations BY rank IS NOT NULL AND post_id IS NOT NULL;
+
+STORE recommendations INTO '$OUTPUT';
diff --git a/sample-apps/blog-tutorial-shared/src/main/python/__init__.py b/sample-apps/blog-tutorial-shared/src/python/__init__.py
index e69de29bb2d..e69de29bb2d 100644
--- a/sample-apps/blog-tutorial-shared/src/main/python/__init__.py
+++ b/sample-apps/blog-tutorial-shared/src/python/__init__.py
diff --git a/sample-apps/blog-tutorial-shared/src/main/python/parse.py b/sample-apps/blog-tutorial-shared/src/python/parse.py
index 0d5f892eebc..0d5f892eebc 100644
--- a/sample-apps/blog-tutorial-shared/src/main/python/parse.py
+++ b/sample-apps/blog-tutorial-shared/src/python/parse.py
diff --git a/sample-apps/blog-recommendation/src/main/python/vespaModel.py b/sample-apps/blog-tutorial-shared/src/python/vespaModel.py
index fd0718721eb..fd0718721eb 100755
--- a/sample-apps/blog-recommendation/src/main/python/vespaModel.py
+++ b/sample-apps/blog-tutorial-shared/src/python/vespaModel.py
diff --git a/searchcore/src/apps/fdispatch/fdispatch.cpp b/searchcore/src/apps/fdispatch/fdispatch.cpp
index 58af00accf1..f4771cf3c14 100644
--- a/searchcore/src/apps/fdispatch/fdispatch.cpp
+++ b/searchcore/src/apps/fdispatch/fdispatch.cpp
@@ -22,6 +22,7 @@ LOG_SETUP("fdispatch");
using fdispatch::Fdispatch;
using vespa::config::search::core::FdispatchrcConfig;
+using namespace std::literals;
extern char FastS_VersionTag[];
@@ -34,8 +35,7 @@ private:
protected:
vespalib::string _configId;
- bool CheckShutdownFlags ()
- {
+ bool CheckShutdownFlags () const {
return (vespalib::SignalHandler::INT.check() || vespalib::SignalHandler::TERM.check());
}
@@ -66,8 +66,7 @@ FastS_FDispatchApp::Main()
forcelink_searchlib_aggregation();
if (!GetOptions(&exitCode)) {
- EV_STOPPING("fdispatch",
- (exitCode == 0) ? "clean shutdown" : "error");
+ EV_STOPPING("fdispatch", (exitCode == 0) ? "clean shutdown" : "error");
return exitCode;
}
@@ -93,15 +92,14 @@ FastS_FDispatchApp::Main()
#ifdef RLIMIT_NOFILE
struct rlimit curlim;
getrlimit(RLIMIT_NOFILE, &curlim);
- if (curlim.rlim_cur != (rlim_t)RLIM_INFINITY)
+ if (curlim.rlim_cur != (rlim_t)RLIM_INFINITY) {
LOG(debug, "Max number of open files = %d", (int) curlim.rlim_cur);
- else
+ } else {
LOG(debug, "Max number of open files = unlimited");
+ }
if (curlim.rlim_cur >= 64) {
} else {
- LOG(error,
- "CRITICAL: Too few file descriptors available: %d",
- (int)curlim.rlim_cur);
+ LOG(error, "CRITICAL: Too few file descriptors available: %d", (int)curlim.rlim_cur);
throw std::runtime_error("CRITICAL: Too few file descriptors available");
}
#endif
@@ -109,19 +107,21 @@ FastS_FDispatchApp::Main()
getrlimit(RLIMIT_DATA, &curlim);
if (curlim.rlim_cur != (rlim_t)RLIM_INFINITY &&
curlim.rlim_cur < (rlim_t) (400 * 1024 * 1024)) {
- if (curlim.rlim_max == (rlim_t)RLIM_INFINITY)
+ if (curlim.rlim_max == (rlim_t)RLIM_INFINITY) {
curlim.rlim_cur = (rlim_t) (400 * 1024 * 1024);
- else
+ } else {
curlim.rlim_cur = curlim.rlim_max;
+ }
setrlimit(RLIMIT_DATA, &curlim);
getrlimit(RLIMIT_DATA, &curlim);
}
- if (curlim.rlim_cur != (rlim_t)RLIM_INFINITY)
+ if (curlim.rlim_cur != (rlim_t)RLIM_INFINITY) {
LOG(debug,
"VERBOSE: Max data segment size = %dM",
(int) ((curlim.rlim_cur + 512 * 1024) / (1024 * 1024)));
- else
+ } else {
LOG(debug, "VERBOSE: Max data segment size = unlimited");
+ }
#endif
if (!myfdispatch->Init()) {
@@ -139,7 +139,7 @@ FastS_FDispatchApp::Main()
if (myfdispatch->Failed()) {
throw std::runtime_error("myfdispatch->Failed()");
}
- FastOS_Thread::Sleep(1000);
+ std::this_thread::sleep_for(100ms);
#ifndef NO_MONITOR_LATENCY_CHECK
if (!myfdispatch->CheckTempFail())
break;
@@ -149,6 +149,9 @@ FastS_FDispatchApp::Main()
if (myfdispatch->Failed()) {
throw std::runtime_error("myfdispatch->Failed()");
}
+ } catch (std::runtime_error &e) {
+ LOG(warning, "got std::runtime_error during init: %s", e.what());
+ exitCode = 1;
} catch (std::exception &e) {
LOG(error, "got exception during init: %s", e.what());
exitCode = 1;
@@ -160,8 +163,7 @@ FastS_FDispatchApp::Main()
LOG(debug, "Deleting fdispatch");
myfdispatch.reset();
LOG(debug, "COMPLETION: Exiting");
- EV_STOPPING("fdispatch",
- (exitCode == 0) ? "clean shutdown" : "error");
+ EV_STOPPING("fdispatch", (exitCode == 0) ? "clean shutdown" : "error");
return exitCode;
}
@@ -181,25 +183,18 @@ FastS_FDispatchApp::GetOptions(int *exitCode)
LONGOPT_CONFIGID
};
int optIndex = 1; // Start with argument 1
- while ((c = GetOptLong("c:",
- optArgument,
- optIndex,
- longopts,
- &longopt_index)) != -1) {
+ while ((c = GetOptLong("c:", optArgument, optIndex, longopts, &longopt_index)) != -1) {
switch (c) {
case 0:
switch (longopt_index) {
case LONGOPT_CONFIGID:
break;
default:
- if (optArgument != NULL)
- LOG(info,
- "longopt %s with arg %s",
- longopts[longopt_index].name, optArgument);
- else
- LOG(info,
- "longopt %s",
- longopts[longopt_index].name);
+ if (optArgument != NULL) {
+ LOG(info, "longopt %s with arg %s", longopts[longopt_index].name, optArgument);
+ } else {
+ LOG(info, "longopt %s", longopts[longopt_index].name);
+ }
break;
}
break;
diff --git a/searchcore/src/vespa/searchcore/config/proton.def b/searchcore/src/vespa/searchcore/config/proton.def
index 45052c3f865..ffc69a18f17 100644
--- a/searchcore/src/vespa/searchcore/config/proton.def
+++ b/searchcore/src/vespa/searchcore/config/proton.def
@@ -171,12 +171,21 @@ grouping.sessionmanager.maxentries int default=500 restart
## Control of pruning interval to remove sessions that have timed out
grouping.sessionmanager.pruning.interval double default=1.0
+## Redundancy of documents.
+distribution.redundancy long default=1 restart
+
+## Searchable copies of the documents.
+distribution.searchablecopies long default=1 restart
+
## Minimum initial size for any per document tables.
-grow.initial int default=1024 restart
+grow.initial long default=1024 restart
+
## Grow factor in percent for any per document tables.
grow.factor int default=50 restart
+
## Constant added when growing any per document tables.
grow.add int default=1 restart
+
## The number of documents to amortize memory spike cost over
grow.numdocs int default=10000 restart
diff --git a/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp b/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp
index 541f7a107ca..f6fda5c0df7 100644
--- a/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp
+++ b/searchcore/src/vespa/searchcore/proton/server/documentsubdbcollection.cpp
@@ -49,13 +49,14 @@ DocumentSubDBCollection::DocumentSubDBCollection(
_bucketDB(),
_bucketDBHandler()
{
-
+ const ProtonConfig::Grow & growCfg = protonCfg.grow;
+ const ProtonConfig::Distribution & distCfg = protonCfg.distribution;
_bucketDB = std::make_shared<BucketDBOwner>();
_bucketDBHandler.reset(new bucketdb::BucketDBHandler(*_bucketDB));
- search::GrowStrategy attributeGrow(protonCfg.grow.initial,
- protonCfg.grow.factor,
- protonCfg.grow.add);
- size_t attributeGrowNumDocs(protonCfg.grow.numdocs);
+ search::GrowStrategy searchableGrowth(growCfg.initial * distCfg.searchablecopies, growCfg.factor, growCfg.add);
+ search::GrowStrategy removedGrowth(std::max(1024l, growCfg.initial/100), growCfg.factor, growCfg.add);
+ search::GrowStrategy notReadyGrowth(growCfg.initial * (distCfg.redundancy - distCfg.searchablecopies), growCfg.factor, growCfg.add);
+ size_t attributeGrowNumDocs(growCfg.numdocs);
size_t numSearcherThreads = protonCfg.numsearcherthreads;
StoreOnlyDocSubDB::Context context(owner,
@@ -74,7 +75,7 @@ DocumentSubDBCollection::DocumentSubDBCollection(
(StoreOnlyDocSubDB::Config(docTypeName,
"0.ready",
baseDir,
- attributeGrow,
+ searchableGrowth,
attributeGrowNumDocs,
_readySubDbId,
SubDbType::READY),
@@ -94,7 +95,7 @@ DocumentSubDBCollection::DocumentSubDBCollection(
(new StoreOnlyDocSubDB(StoreOnlyDocSubDB::Config(docTypeName,
"1.removed",
baseDir,
- attributeGrow,
+ removedGrowth,
attributeGrowNumDocs,
_remSubDbId,
SubDbType::REMOVED),
@@ -104,7 +105,7 @@ DocumentSubDBCollection::DocumentSubDBCollection(
(StoreOnlyDocSubDB::Config(docTypeName,
"2.notready",
baseDir,
- attributeGrow,
+ notReadyGrowth,
attributeGrowNumDocs,
_notReadySubDbId,
SubDbType::NOTREADY),
@@ -201,7 +202,7 @@ DocumentSubDBCollection::initViews(const DocumentDBConfig &configSnapshot,
void
-DocumentSubDBCollection::clearViews(void)
+DocumentSubDBCollection::clearViews()
{
for (auto subDb : _subDBs) {
subDb->clearViews();
@@ -210,7 +211,7 @@ DocumentSubDBCollection::clearViews(void)
void
-DocumentSubDBCollection::onReplayDone(void)
+DocumentSubDBCollection::onReplayDone()
{
for (auto subDb : _subDBs) {
subDb->onReplayDone();
@@ -228,7 +229,7 @@ DocumentSubDBCollection::onReprocessDone(SerialNum serialNum)
SerialNum
-DocumentSubDBCollection::getOldestFlushedSerial(void)
+DocumentSubDBCollection::getOldestFlushedSerial()
{
SerialNum lowest = -1;
for (auto subDb : _subDBs) {
@@ -239,7 +240,7 @@ DocumentSubDBCollection::getOldestFlushedSerial(void)
SerialNum
-DocumentSubDBCollection::getNewestFlushedSerial(void)
+DocumentSubDBCollection::getNewestFlushedSerial()
{
SerialNum highest = 0;
for (auto subDb : _subDBs) {
diff --git a/searchlib/src/tests/attribute/multivaluemapping/multivaluemapping_test.cpp b/searchlib/src/tests/attribute/multivaluemapping/multivaluemapping_test.cpp
index e78e180856b..48768da32c5 100644
--- a/searchlib/src/tests/attribute/multivaluemapping/multivaluemapping_test.cpp
+++ b/searchlib/src/tests/attribute/multivaluemapping/multivaluemapping_test.cpp
@@ -3,8 +3,6 @@
#include <vespa/log/log.h>
LOG_SETUP("multivaluemapping_test");
#include <vespa/vespalib/testkit/testapp.h>
-//#define DEBUG_MULTIVALUE_MAPPING
-//#define LOG_MULTIVALUE_MAPPING
#include <vespa/searchlib/attribute/multivaluemapping.h>
#include <algorithm>
#include <limits>
@@ -117,7 +115,7 @@ MultiValueMappingTest::testIndex64()
EXPECT_EQUAL(idx.alternative(), 0u);
EXPECT_EQUAL(idx.vectorIdx(), 6u);
EXPECT_EQUAL(idx.offset(), 1000u);
- EXPECT_EQUAL(idx.idx(), 0x3000003e8ull);
+ EXPECT_EQUAL(idx.idx(), 0x6000003e8ul);
}
{
Index64 idx(15, 1, 134217727);
@@ -125,11 +123,20 @@ MultiValueMappingTest::testIndex64()
EXPECT_EQUAL(idx.alternative(), 1u);
EXPECT_EQUAL(idx.vectorIdx(), 31u);
EXPECT_EQUAL(idx.offset(), 134217727u);
- EXPECT_EQUAL(idx.idx(), 0xf87ffffffull);
+ EXPECT_EQUAL(idx.idx(), 0x1f07fffffful);
}
{
- EXPECT_EQUAL(Index64::maxValues(), 1023u);
+ Index64 idx(3087, 1, 0xfffffffful);
+ EXPECT_EQUAL(idx.values(), 3087u);
+ EXPECT_EQUAL(idx.alternative(), 1u);
+ EXPECT_EQUAL(idx.vectorIdx(), (3087u << 1) + 1);
+ EXPECT_EQUAL(idx.offset(), 0xfffffffful);
+ EXPECT_EQUAL(idx.idx(), 0x181ffffffffful);
+ }
+ {
+ EXPECT_EQUAL(Index64::maxValues(), 4095u);
EXPECT_EQUAL(Index64::alternativeSize(), 2u);
+ EXPECT_EQUAL(Index64::offsetSize(), 0x1ul << 32);
}
}
@@ -160,9 +167,6 @@ MultiValueMappingTest::testSimpleSetAndGet()
} else {
EXPECT_EQUAL(idx.values(), Index::maxValues());
}
-#ifdef LOG_MULTIVALUE_MAPPING
- LOG(info, "------------------------------------------------------------");
-#endif
}
EXPECT_TRUE(!mvm.hasKey(numKeys));
@@ -221,9 +225,6 @@ MultiValueMappingTest::testChangingValueCount()
// Increasing the value count for some keys
for (uint32_t valueCount = 1; valueCount <= maxCount; ++valueCount) {
-#ifdef LOG_MULTIVALUE_MAPPING
- LOG(info, "########################### %u ##############################", valueCount);
-#endif
uint32_t lastValueCount = valueCount - 1;
// set values
for (uint32_t key = 0; key < numKeys; ++key) {
@@ -271,10 +272,6 @@ MultiValueMappingTest::checkReaders(MvMapping &mvm,
for (ReaderVector::iterator iter = readers.begin();
iter != readers.end(); ) {
if (iter->_endGen <= mvmGen) {
-#ifdef LOG_MULTIVALUE_MAPPING
- LOG(info, "check and remove reader: start = %u, end = %u",
- iter->_startGen, iter->_endGen);
-#endif
for (uint32_t key = 0; key < iter->numKeys(); ++key) {
Index idx = iter->_indices[key];
uint32_t valueCount = iter->_expected[key].size();
@@ -321,11 +318,6 @@ MultiValueMappingTest::testHoldListAndGeneration()
generation_t mvmGen = 0u;
for (uint32_t valueCount = 1; valueCount < maxCount; ++valueCount) {
-#ifdef LOG_MULTIVALUE_MAPPING
- LOG(info, "#################### count(%u) - gen(%u) ####################",
- valueCount, mvm.getGeneration());
-#endif
-
// check and remove readers
checkReaders(mvm, mvmGen, readers);
diff --git a/searchlib/src/vespa/searchlib/attribute/multivaluemapping.cpp b/searchlib/src/vespa/searchlib/attribute/multivaluemapping.cpp
index e8e21073323..6c031116e5e 100644
--- a/searchlib/src/vespa/searchlib/attribute/multivaluemapping.cpp
+++ b/searchlib/src/vespa/searchlib/attribute/multivaluemapping.cpp
@@ -2,12 +2,13 @@
#include <vespa/fastos/fastos.h>
#include <vespa/log/log.h>
-LOG_SETUP(".searchlib.attribute.multivaluemapping");
#include "multivaluemapping.h"
#include "multivaluemapping.hpp"
#include "attributevector.h"
#include "loadedenumvalue.h"
+LOG_SETUP(".searchlib.attribute.multivaluemapping");
+
namespace search {
using vespalib::GenerationHeldBase;
@@ -43,8 +44,7 @@ MultiValueMappingBaseBase::
computeNewSize(size_t used, size_t dead, size_t needed, size_t maxSize)
{
float growRatio = 1.5f;
- size_t newSize = static_cast<size_t>
- ((used - dead + needed) * growRatio);
+ size_t newSize = static_cast<size_t>((used - dead + needed) * growRatio);
if (newSize <= maxSize)
return newSize;
newSize = (used - dead + needed) + 1000000;
@@ -54,14 +54,14 @@ computeNewSize(size_t used, size_t dead, size_t needed, size_t maxSize)
return 0;
}
-MultiValueMappingBaseBase::Histogram::Histogram(size_t maxValues) :
+MultiValueMappingBaseBase::Histogram::Histogram(uint32_t maxValues) :
_maxValues(maxValues),
_histogram()
{
}
MultiValueMappingBaseBase::Histogram
-MultiValueMappingBaseBase::getEmptyHistogram(size_t maxValues) const
+MultiValueMappingBaseBase::getEmptyHistogram(uint32_t maxValues) const
{
return Histogram(maxValues);
}
@@ -81,7 +81,7 @@ MultiValueMappingBaseBase::getHistogram(AttributeVector::ReaderBase &reader)
void
-MultiValueMappingBaseBase::clearPendingCompact(void)
+MultiValueMappingBaseBase::clearPendingCompact()
{
if (!_pendingCompact || _pendingCompactVectorVector ||
!_pendingCompactSingleVector.empty())
@@ -109,7 +109,7 @@ public:
}
virtual
- ~MultiValueMappingHeldVector(void)
+ ~MultiValueMappingHeldVector()
{
_mvmb.doneHoldVector(_idx);
}
@@ -119,11 +119,6 @@ public:
template <typename I>
void MultiValueMappingBase<I>::doneHoldVector(Index idx)
{
-#ifdef LOG_MULTIVALUE_MAPPING
- LOG(info,
- "free vector: idx.values() = %u, idx.alternative() = %u",
- idx.values(), idx.alternative());
-#endif
clearVector(idx);
if (idx.values() < Index::maxValues()) {
_singleVectorsStatus[idx.vectorIdx()] = FREE;
@@ -139,13 +134,13 @@ MultiValueMappingBase<I>::getMemoryUsage() const
{
MemoryUsage retval = _indices.getMemoryUsage();
- for (uint32_t i = 0; i < _singleVectorsStatus.size(); ++i) {
+ for (size_t i = 0; i < _singleVectorsStatus.size(); ++i) {
if (_singleVectorsStatus[i] == HOLD)
continue;
const MemoryUsage & memUsage(getSingleVectorUsage(i));
retval.merge(memUsage);
}
- for (uint32_t i = 0; i < _vectorVectorsStatus.size(); ++i) {
+ for (size_t i = 0; i < _vectorVectorsStatus.size(); ++i) {
if (_vectorVectorsStatus[i] == HOLD)
continue;
const MemoryUsage & memUsage(getVectorVectorUsage(i));
@@ -160,12 +155,12 @@ AddressSpace
MultiValueMappingBase<I>::getAddressSpaceUsage() const
{
size_t addressSpaceUsed = 0;
- for (uint32_t i = 0; i < _singleVectorsStatus.size(); ++i) {
+ for (size_t i = 0; i < _singleVectorsStatus.size(); ++i) {
if (_singleVectorsStatus[i] == ACTIVE) {
addressSpaceUsed = std::max(addressSpaceUsed, getSingleVectorAddressSpaceUsed(i));
}
}
- for (uint32_t i = 0; i < _vectorVectorsStatus.size(); ++i) {
+ for (size_t i = 0; i < _vectorVectorsStatus.size(); ++i) {
if (_vectorVectorsStatus[i] == ACTIVE) {
addressSpaceUsed = std::max(addressSpaceUsed, getVectorVectorAddressSpaceUsed(i));
}
diff --git a/searchlib/src/vespa/searchlib/attribute/multivaluemapping.h b/searchlib/src/vespa/searchlib/attribute/multivaluemapping.h
index 3134f826774..a191e40f59f 100644
--- a/searchlib/src/vespa/searchlib/attribute/multivaluemapping.h
+++ b/searchlib/src/vespa/searchlib/attribute/multivaluemapping.h
@@ -34,58 +34,25 @@ public:
: _idx(0)
{
_idx += static_cast<T>(values_) << (NUM_ALT_BITS+NUM_OFFSET_BITS);
- _idx += static_cast<T>((alternative_) &
- ((1<<NUM_ALT_BITS) - 1)) << NUM_OFFSET_BITS;
+ _idx += static_cast<T>((alternative_) & ((1<<NUM_ALT_BITS) - 1)) << NUM_OFFSET_BITS;
_idx += offset_;
}
- uint32_t
- values(void) const
- {
- return _idx >> (NUM_ALT_BITS+NUM_OFFSET_BITS);
- }
-
- uint32_t
- alternative(void) const
- {
- return (_idx >> NUM_OFFSET_BITS) & ((1<<NUM_ALT_BITS) - 1);
- }
+ uint32_t values() const { return _idx >> (NUM_ALT_BITS+NUM_OFFSET_BITS); }
+ uint32_t alternative() const { return (_idx >> NUM_OFFSET_BITS) & ((1<<NUM_ALT_BITS) - 1); }
// values and alternative combined
- uint32_t
- vectorIdx(void) const
- {
- return _idx >> NUM_OFFSET_BITS;
- }
-
- uint32_t offset(void) const
- {
- return (_idx & ((1u << NUM_OFFSET_BITS) - 1));
- }
-
- T idx() const { return _idx; }
-
- static uint32_t
- maxValues(void)
- {
- return (1 << NUM_VALUE_BITS) - 1;
- }
+ uint32_t vectorIdx() const { return _idx >> NUM_OFFSET_BITS; }
+ uint64_t offset() const { return (_idx & ((1ul << NUM_OFFSET_BITS) - 1)); }
+ T idx() const { return _idx; }
- static uint32_t
- alternativeSize(void)
- {
- return 1 << NUM_ALT_BITS;
- }
-
- static T
- offsetSize(void)
- {
- return 1 << (NUM_OFFSET_BITS);
- }
+ static uint32_t maxValues() { return (1 << NUM_VALUE_BITS) - 1; }
+ static uint32_t alternativeSize() { return 1 << NUM_ALT_BITS; }
+ static uint64_t offsetSize() { return 1ul << (NUM_OFFSET_BITS); }
};
typedef Index<uint32_t, 27,4,1> Index32;
-typedef Index<uint64_t, 31,10,1> Index64;
+typedef Index<uint64_t, 32,12,1> Index64;
template <typename T, typename I>
struct MVMTemplateArg {
@@ -103,33 +70,21 @@ public:
_dead(0),
_wantCompact(false),
_usage()
- {
- }
+ { }
- uint32_t used() const { return _used; }
- uint32_t dead() const { return _dead; }
+ size_t used() const { return _used; }
+ size_t dead() const { return _dead; }
void incUsed(uint32_t inc) { _used += inc; }
void incDead(uint32_t inc) { _dead += inc; }
-
- void
- setWantCompact(void)
- {
- _wantCompact = true;
- }
-
- bool
- getWantCompact(void) const
- {
- return _wantCompact;
- }
-
+ void setWantCompact() { _wantCompact = true; }
+ bool getWantCompact() const { return _wantCompact; }
MemoryUsage & getUsage() { return _usage; }
const MemoryUsage & getUsage() const { return _usage; }
protected:
void reset() { _used = 0; _dead = 0; }
private:
- uint32_t _used;
- uint32_t _dead;
+ size_t _used;
+ size_t _dead;
bool _wantCompact;
MemoryUsage _usage;
};
@@ -141,13 +96,13 @@ public:
class Histogram
{
private:
- typedef vespalib::hash_map<uint32_t, uint32_t> HistogramM;
+ typedef vespalib::hash_map<uint32_t, size_t> HistogramM;
public:
typedef HistogramM::const_iterator const_iterator;
- Histogram(size_t maxValues);
- uint32_t & operator [] (uint32_t i) { return _histogram[std::min(i, _maxValues)]; }
+ Histogram(uint32_t maxValues);
+ size_t & operator [] (uint32_t i) { return _histogram[std::min(i, _maxValues)]; }
const_iterator begin() const { return _histogram.begin(); }
- const_iterator end() const { return _histogram.end(); }
+ const_iterator end() const { return _histogram.end(); }
private:
uint32_t _maxValues;
HistogramM _histogram;
@@ -164,7 +119,7 @@ protected:
};
typedef AttributeVector::generation_t generation_t;
- typedef vespalib::Array<VectorStatus> StatusVector;
+ typedef std::vector<VectorStatus> StatusVector;
typedef vespalib::GenerationHolder GenerationHolder;
// active -> hold
@@ -177,7 +132,7 @@ protected:
std::set<uint32_t> _pendingCompactSingleVector;
bool _pendingCompactVectorVector;
bool _pendingCompact;
- Histogram getEmptyHistogram(size_t maxValues) const;
+ Histogram getEmptyHistogram(uint32_t maxValues) const;
virtual const MemoryUsage & getSingleVectorUsage(size_t i) const = 0;
virtual const MemoryUsage & getVectorVectorUsage(size_t i) const = 0;
virtual size_t getSingleVectorAddressSpaceUsed(size_t i) const = 0;
@@ -192,22 +147,15 @@ public:
Histogram getHistogram(AttributeVector::ReaderBase & reader) const;
size_t getTotalValueCnt() const { return _totalValueCnt; }
static void failNewSize(uint64_t minNewSize, uint64_t maxSize);
+ void clearPendingCompact();
- void
- clearPendingCompact(void);
+ static size_t computeNewSize(size_t used, size_t dead, size_t needed, size_t maxSize);
- static size_t
- computeNewSize(size_t used, size_t dead, size_t needed, size_t maxSize);
-
- void
- transferHoldLists(generation_t generation)
- {
+ void transferHoldLists(generation_t generation) {
_genHolder.transferHoldLists(generation);
}
- void
- trimHoldLists(generation_t firstUsed)
- {
+ void trimHoldLists(generation_t firstUsed) {
_genHolder.trimHoldLists(firstUsed);
}
};
@@ -238,71 +186,36 @@ private:
public:
using IndexCopyVector = vespalib::Array<Index, vespalib::DefaultAlloc>;
- void
- doneHoldVector(Index idx);
+ void doneHoldVector(Index idx);
- virtual Histogram getEmptyHistogram() const override {
+ Histogram getEmptyHistogram() const override {
return MultiValueMappingBaseBase::getEmptyHistogram(Index::maxValues());
}
- virtual MemoryUsage getMemoryUsage() const override;
-
+ MemoryUsage getMemoryUsage() const override;
AddressSpace getAddressSpaceUsage() const;
+ size_t getNumKeys() const { return _indices.size(); }
+ size_t getCapacityKeys() const { return _indices.capacity(); }
- size_t getNumKeys(void) const
- {
- return _indices.size();
- }
-
- size_t getCapacityKeys(void) const
- {
- return _indices.capacity();
- }
-
- IndexCopyVector
- getIndicesCopy() const
- {
+ IndexCopyVector getIndicesCopy() const {
uint32_t size = _committedDocIdLimit;
assert(size <= _indices.size());
- return std::move(IndexCopyVector(&_indices[0], &_indices[0] + size));
+ return IndexCopyVector(&_indices[0], &_indices[0] + size);
}
- bool
- hasKey(uint32_t key) const
- {
- return key < _indices.size();
- }
-
- bool
- hasReaderKey(uint32_t key) const
- {
+ bool hasReaderKey(uint32_t key) const {
return key < _committedDocIdLimit && key < _indices.size();
}
- bool
- isFull(void) const
- {
- return _indices.isFull();
- }
-
- static size_t
- maxValues(void)
- {
- return Index::maxValues();
- }
-
- void
- addKey(uint32_t & key);
-
- void
- shrinkKeys(uint32_t newSize);
-
- void
- clearDocs(uint32_t lidLow, uint32_t lidLimit, AttributeVector &v);
-
+ bool hasKey(uint32_t key) const { return key < _indices.size(); }
+ bool isFull() const { return _indices.isFull(); }
+ void addKey(uint32_t & key);
+ void shrinkKeys(uint32_t newSize);
+ void clearDocs(uint32_t lidLow, uint32_t lidLimit, AttributeVector &v);
void holdElem(Index idx, size_t size);
-
virtual void doneHoldElem(Index idx) = 0;
+
+ static size_t maxValues() { return Index::maxValues(); }
};
extern template class MultiValueMappingBase<multivalue::Index32>;
@@ -314,18 +227,14 @@ class MultiValueMappingFallbackVectorHold
{
V _hold;
public:
- MultiValueMappingFallbackVectorHold(size_t size,
- V &rhs)
+ MultiValueMappingFallbackVectorHold(size_t size, V &rhs)
: vespalib::GenerationHeldBase(size),
_hold()
{
_hold.swap(rhs);
}
- virtual
- ~MultiValueMappingFallbackVectorHold(void)
- {
- }
+ virtual ~MultiValueMappingFallbackVectorHold() { }
};
@@ -337,14 +246,14 @@ public:
typedef vespalib::Array<VT, vespalib::DefaultAlloc> VectorBase;
typedef MultiValueMappingFallbackVectorHold<VectorBase> FallBackHold;
MultiValueMappingVector();
- MultiValueMappingVector(uint32_t n);
+ MultiValueMappingVector(size_t n);
MultiValueMappingVector(const MultiValueMappingVector & rhs);
MultiValueMappingVector &
operator=(const MultiValueMappingVector & rhs);
~MultiValueMappingVector();
- void reset(uint32_t n);
- uint32_t remaining() const { return this->size() - used(); }
+ void reset(size_t n);
+ size_t remaining() const { return this->size() - used(); }
void swapVector(MultiValueMappingVector & rhs);
vespalib::GenerationHeldBase::UP
@@ -391,7 +300,7 @@ private:
virtual const MemoryUsage & getVectorVectorUsage(size_t i) const override;
virtual size_t getSingleVectorAddressSpaceUsed(size_t i) const override;
virtual size_t getVectorVectorAddressSpaceUsed(size_t i) const override;
- void initVectors(uint32_t initSize);
+ void initVectors(size_t initSize);
void initVectors(const Histogram & initCapacity);
bool getValidIndex(Index & newIdx, uint32_t numValues);
@@ -420,7 +329,7 @@ private:
vec.incDead(numValues);
vec.getUsage().incDeadBytes(numValues * sizeof(T));
}
- void swapVector(SingleVector & vec, uint32_t initSize) {
+ void swapVector(SingleVector & vec, size_t initSize) {
SingleVector(initSize).swapVector(vec);
vec.getUsage().setAllocatedBytes(initSize * sizeof(T));
}
@@ -433,7 +342,7 @@ private:
void incDead(VectorVector & vec) {
vec.incDead(1);
}
- void swapVector(VectorVector & vec, uint32_t initSize) {
+ void swapVector(VectorVector & vec, size_t initSize) {
VectorVector(initSize).swapVector(vec);
vec.getUsage().setAllocatedBytes(initSize * sizeof(VectorBase));
}
@@ -443,13 +352,13 @@ public:
MultiValueMappingT(uint32_t &committedDocIdLimit,
const GrowStrategy & gs = GrowStrategy());
MultiValueMappingT(uint32_t &committedDocIdLimit,
- uint32_t numKeys, uint32_t initSize = 0,
+ uint32_t numKeys, size_t initSize = 0,
const GrowStrategy & gs = GrowStrategy());
MultiValueMappingT(uint32_t &committedDocIdLimit,
uint32_t numKeys, const Histogram & initCapacity,
const GrowStrategy & gs = GrowStrategy());
~MultiValueMappingT();
- void reset(uint32_t numKeys, uint32_t initSize = 0);
+ void reset(uint32_t numKeys, size_t initSize = 0);
void reset(uint32_t numKeys, const Histogram & initCapacity);
uint32_t get(uint32_t key, std::vector<T> & buffer) const;
template <typename BufferType>
@@ -498,11 +407,6 @@ public:
bool hasWeights);
virtual void doneHoldElem(Index idx) override;
-
-#ifdef DEBUG_MULTIVALUE_MAPPING
- void printContent() const;
- void printVectorVectors() const;
-#endif
};
//-----------------------------------------------------------------------------
@@ -521,7 +425,7 @@ MultiValueMappingVector<VT>::~MultiValueMappingVector()
}
template <typename VT>
-MultiValueMappingVector<VT>::MultiValueMappingVector(uint32_t n)
+MultiValueMappingVector<VT>::MultiValueMappingVector(size_t n)
: VectorBase(),
MultiValueMappingVectorBaseBase()
{
@@ -549,7 +453,7 @@ MultiValueMappingVector<VT>::operator=(const MultiValueMappingVector & rhs)
template <typename VT>
void
-MultiValueMappingVector<VT>::reset(uint32_t n)
+MultiValueMappingVector<VT>::reset(size_t n)
{
this->resize(n);
MultiValueMappingVectorBaseBase::reset();
@@ -586,9 +490,9 @@ MultiValueMappingVector<VT>::fallbackResize(uint64_t newSize)
template <typename T, typename I>
void
-MultiValueMappingT<T, I>::initVectors(uint32_t initSize)
+MultiValueMappingT<T, I>::initVectors(size_t initSize)
{
- for (uint32_t i = 0; i < this->_singleVectorsStatus.size(); ++i) {
+ for (size_t i = 0; i < this->_singleVectorsStatus.size(); ++i) {
if (i % Index::alternativeSize() == 0) {
swapVector(_singleVectors[i], initSize);
this->_singleVectorsStatus[i] = MultiValueMappingBaseBase::ACTIVE;
@@ -597,7 +501,7 @@ MultiValueMappingT<T, I>::initVectors(uint32_t initSize)
this->_singleVectorsStatus[i] = MultiValueMappingBaseBase::FREE;
}
}
- for (uint32_t i = 0; i < this->_vectorVectorsStatus.size(); ++i) {
+ for (size_t i = 0; i < this->_vectorVectorsStatus.size(); ++i) {
if (i % Index::alternativeSize() == 0) {
swapVector(_vectorVectors[i], initSize);
this->_vectorVectorsStatus[i] = MultiValueMappingBaseBase::ACTIVE;
@@ -612,23 +516,17 @@ template <typename T, typename I>
void
MultiValueMappingT<T, I>::initVectors(const Histogram &initCapacity)
{
- for (typename Histogram::const_iterator it(initCapacity.begin()), mt(initCapacity.end()); it != mt; ++it) {
- uint32_t valueCnt = it->first;
- uint64_t numEntries = it->second;
+ for (const auto & entry : initCapacity) {
+ uint32_t valueCnt = entry.first;
+ uint64_t numEntries = entry.second;
if (valueCnt != 0 && valueCnt < Index::maxValues()) {
uint64_t maxSize = Index::offsetSize() * valueCnt;
- if (maxSize > std::numeric_limits<uint32_t>::max()) {
- maxSize = std::numeric_limits<uint32_t>::max();
- maxSize -= (maxSize % valueCnt);
- }
if (numEntries * valueCnt > maxSize) {
failNewSize(numEntries * valueCnt, maxSize);
}
swapVector(_singleVectors[valueCnt * 2], valueCnt * numEntries);
} else if (valueCnt == Index::maxValues()) {
uint64_t maxSize = Index::offsetSize();
- if (maxSize > std::numeric_limits<uint32_t>::max())
- maxSize = std::numeric_limits<uint32_t>::max();
if (numEntries > maxSize) {
failNewSize(numEntries, maxSize);
}
@@ -651,7 +549,7 @@ MultiValueMappingT<T, I>::getValidIndex(Index &newIdx, uint32_t numValues)
return false;
}
- uint32_t used = active.first->used();
+ size_t used = active.first->used();
assert(used % numValues == 0);
incUsed(*active.first, numValues);
newIdx = Index(active.second.values(), active.second.alternative(),
@@ -664,7 +562,7 @@ MultiValueMappingT<T, I>::getValidIndex(Index &newIdx, uint32_t numValues)
return false;
}
- uint32_t used = active.first->used();
+ size_t used = active.first->used();
incUsed(*active.first, numValues);
(*active.first)[used].resize(numValues);
newIdx = Index(active.second.values(), active.second.alternative(),
@@ -687,9 +585,6 @@ compactSingleVector(SingleVectorPtr &activeVector,
SingleVectorPtr freeVector =
getSingleVector(valueCnt, MultiValueMappingBaseBase::FREE);
if (freeVector.first == NULL) {
-#ifdef LOG_MULTIVALUE_MAPPING
- LOG(warning, "did not find any free '%u-vector'", valueCnt);
-#endif
uint64_t dead = activeVector.first->dead();
uint64_t fallbackNewSize = newSize + dead * valueCnt + 1024 * valueCnt;
if (fallbackNewSize > maxSize)
@@ -709,21 +604,11 @@ compactSingleVector(SingleVectorPtr &activeVector,
return;
}
swapVector(*freeVector.first, newSize);
-#ifdef LOG_MULTIVALUE_MAPPING
- LOG(info,
- "compacting from '%u-vector(%u)' "
- "(s = %u, u = %u, d = %u) to "
- "'%u-vector(%u)' (s = %u)",
- valueCnt, activeVector.second.alternative(),
- activeVector.first->size(),
- activeVector.first->used() , activeVector.first->dead(),
- valueCnt, freeVector.second.alternative(), newSize);
-#endif
uint32_t activeVectorIdx = activeVector.second.vectorIdx();
- for (uint32_t i = 0; i < this->_indices.size(); ++i) {
+ for (size_t i = 0; i < this->_indices.size(); ++i) {
Index & idx = this->_indices[i];
if (activeVectorIdx == idx.vectorIdx()) {
- for (uint32_t j = idx.offset() * idx.values(),
+ for (uint64_t j = idx.offset() * idx.values(),
k = freeVector.first->used();
j < (idx.offset() + 1) * idx.values() &&
k < freeVector.first->used() + valueCnt; ++j, ++k)
@@ -759,9 +644,6 @@ compactVectorVector(VectorVectorPtr &activeVector,
VectorVectorPtr freeVector =
getVectorVector(MultiValueMappingBaseBase::FREE);
if (freeVector.first == NULL) {
-#ifdef LOG_MULTIVALUE_MAPPING
- LOG(error, "did not find any free vectorvector");
-#endif
uint64_t dead = activeVector.first->dead();
uint64_t fallbackNewSize = newSize + dead + 1024;
if (fallbackNewSize > maxSize)
@@ -780,24 +662,15 @@ compactVectorVector(VectorVectorPtr &activeVector,
return;
}
swapVector(*freeVector.first, newSize);
-#ifdef LOG_MULTIVALUE_MAPPING
- LOG(info,
- "compacting from 'vectorvector(%u)' "
- "(s = %u, u = %u, d = %u) to "
- "'vectorvector(%u)' (s = %u)",
- activeVector.second.alternative(), activeVector.first->size(),
- activeVector.first->used(), activeVector.first->dead(),
- freeVector.second.alternative(), newSize);
-#endif
uint32_t activeVectorIdx = activeVector.second.vectorIdx();
- for (uint32_t i = 0; i < this->_indices.size(); ++i) {
+ for (size_t i = 0; i < this->_indices.size(); ++i) {
Index & idx = this->_indices[i];
if (activeVectorIdx == idx.vectorIdx()) {
- uint32_t activeOffset = idx.offset();
- uint32_t vecSize = (*activeVector.first)[activeOffset].size();
- uint32_t freeOffset = freeVector.first->used();
+ uint64_t activeOffset = idx.offset();
+ uint64_t vecSize = (*activeVector.first)[activeOffset].size();
+ uint64_t freeOffset = freeVector.first->used();
(*freeVector.first)[freeOffset].resize(vecSize);
- for (uint32_t j = 0; j < vecSize; ++j) {
+ for (uint64_t j = 0; j < vecSize; ++j) {
(*freeVector.first)[freeOffset][j] =
(*activeVector.first)[activeOffset][j];
}
@@ -820,7 +693,7 @@ typename MultiValueMappingT<T, I>::SingleVectorPtr
MultiValueMappingT<T, I>::getSingleVector(uint32_t numValues,
VectorStatus status)
{
- for (uint32_t i = numValues * Index::alternativeSize();
+ for (size_t i = numValues * Index::alternativeSize();
i < (numValues + 1) * Index::alternativeSize(); ++i)
{
if (this->_singleVectorsStatus[i] == status) {
@@ -837,7 +710,7 @@ template <typename T, typename I>
typename MultiValueMappingT<T, I>::VectorVectorPtr
MultiValueMappingT<T, I>::getVectorVector(VectorStatus status)
{
- for (uint32_t i = 0; i < _vectorVectors.size(); ++i) {
+ for (size_t i = 0; i < _vectorVectors.size(); ++i) {
if (this->_vectorVectorsStatus[i] == status) {
return VectorVectorPtr(&_vectorVectors[i],
Index(Index::maxValues(), i, 0));
@@ -875,7 +748,7 @@ MultiValueMappingT<T, I>::MultiValueMappingT(uint32_t &committedDocIdLimit,
template <typename T, typename I>
MultiValueMappingT<T, I>::MultiValueMappingT(uint32_t &committedDocIdLimit,
uint32_t numKeys,
- uint32_t initSize,
+ size_t initSize,
const GrowStrategy & gs)
: MultiValueMappingBase<I>(committedDocIdLimit, numKeys, gs),
_singleVectors((Index::maxValues()) * Index::alternativeSize()),
@@ -905,7 +778,7 @@ MultiValueMappingT<T, I>::~MultiValueMappingT()
template <typename T, typename I>
void
-MultiValueMappingT<T, I>::reset(uint32_t numKeys, uint32_t initSize)
+MultiValueMappingT<T, I>::reset(uint32_t numKeys, size_t initSize)
{
MultiValueMappingBase<I>::reset(numKeys);
initVectors(initSize);
@@ -941,7 +814,7 @@ MultiValueMappingT<T, I>::get(uint32_t key,
uint32_t available = idx.values();
uint32_t num2Read = std::min(available, sz);
const SingleVector & vec = _singleVectors[idx.vectorIdx()];
- for (uint32_t i = 0, j = idx.offset() * idx.values();
+ for (uint64_t i = 0, j = idx.offset() * idx.values();
i < num2Read && j < (idx.offset() + 1) * idx.values(); ++i, ++j) {
buffer[i] = static_cast<BufferType>(vec[j]);
}
@@ -970,7 +843,7 @@ MultiValueMappingT<T, I>::get(uint32_t key, uint32_t index, T & value) const
if (index >= idx.values()) {
return false;
}
- uint32_t offset = idx.offset() * idx.values() + index;
+ uint64_t offset = idx.offset() * idx.values() + index;
value = _singleVectors[idx.vectorIdx()][offset];
return true;
} else {
@@ -1020,40 +893,20 @@ MultiValueMappingT<T, I>::set(uint32_t key,
if (!getValidIndex(newIdx, numValues)) {
abort();
}
-#ifdef LOG_MULTIVALUE_MAPPING
- LOG(info,
- "newIdx: values = %u, alternative = %u, offset = %u",
- newIdx.values(), newIdx.alternative(), newIdx.offset());
-#endif
if (newIdx.values() != 0 && newIdx.values() < Index::maxValues()) {
SingleVector & vec = _singleVectors[newIdx.vectorIdx()];
- for (uint32_t i = newIdx.offset() * newIdx.values(), j = 0;
+ for (uint64_t i = newIdx.offset() * newIdx.values(), j = 0;
i < (newIdx.offset() + 1) * newIdx.values() && j < numValues;
++i, ++j)
{
vec[i] = values[j];
}
-#ifdef LOG_MULTIVALUE_MAPPING
- LOG(info,
- "inserted in '%u-vector(%u)': "
- "key = %u, size = %u, used = %u, dead = %u, offset = %u",
- newIdx.values(), newIdx.alternative(),
- key, vec.size(),
- vec.used(), vec.dead(), newIdx.offset() * newIdx.values());
-#endif
} else if (newIdx.values() == Index::maxValues()) {
VectorVector & vec = _vectorVectors[newIdx.alternative()];
for (uint32_t i = 0; i < numValues; ++i) {
vec[newIdx.offset()][i] = values[i];
}
-#ifdef LOG_MULTIVALUE_MAPPING
- LOG(info,
- "inserted %u values in 'vector-vector(%u)': "
- "key = %u, size = %u, used = %u, dead = %u, offset = %u",
- numValues, newIdx.alternative(),
- key, vec.size(), vec.used(), vec.dead(), newIdx.offset());
-#endif
}
std::atomic_thread_fence(std::memory_order_release);
@@ -1065,25 +918,12 @@ MultiValueMappingT<T, I>::set(uint32_t key,
SingleVector & vec = _singleVectors[oldIdx.vectorIdx()];
incDead(vec, oldIdx.values());
this->decValueCnt(oldIdx.values());
-#ifdef LOG_MULTIVALUE_MAPPING
- LOG(info,
- "mark space dead in '%u-vector(%u)': "
- "size = %u, used = %u, dead = %u",
- oldIdx.values(), oldIdx.alternative(),
- vec.size(), vec.used(), vec.dead());
-#endif
} else if (oldIdx.values() == Index::maxValues()) {
VectorVector & vec = _vectorVectors[oldIdx.alternative()];
uint32_t oldNumValues = vec[oldIdx.offset()].size();
incDead(vec);
this->decValueCnt(oldNumValues);
holdElem(oldIdx, sizeof(VectorBase) + sizeof(T) * oldNumValues);
-#ifdef LOG_MULTIVALUE_MAPPING
- LOG(info,
- "mark space dead in 'vector-vector(%u)': "
- "size = %u, used = %u, dead = %u",
- oldIdx.alternative(), vec.size(), vec.used(), vec.dead());
-#endif
}
}
@@ -1109,7 +949,7 @@ MultiValueMappingT<T, I>::replace(uint32_t key,
if (currIdx.values() != 0 && currIdx.values() < Index::maxValues()) {
SingleVector & vec = _singleVectors[currIdx.vectorIdx()];
- for (uint32_t i = currIdx.offset() * currIdx.values(), j = 0;
+ for (uint64_t i = currIdx.offset() * currIdx.values(), j = 0;
i < (currIdx.offset() + 1) * currIdx.values() && j < numValues;
++i, ++j)
{
@@ -1216,9 +1056,9 @@ MultiValueMappingT<T, I>::enoughCapacity(const Histogram & capacityNeeded)
{
if (_pendingCompact)
return false;
- for (typename Histogram::const_iterator it(capacityNeeded.begin()), mt(capacityNeeded.end()); it != mt; ++it) {
- uint32_t valueCnt = it->first;
- uint64_t numEntries = it->second;
+ for (const auto & entry : capacityNeeded) {
+ uint32_t valueCnt = entry.first;
+ uint64_t numEntries = entry.second;
if (valueCnt < Index::maxValues()) {
SingleVectorPtr active =
getSingleVector(valueCnt, MultiValueMappingBaseBase::ACTIVE);
@@ -1240,24 +1080,18 @@ template <typename T, typename I>
void
MultiValueMappingT<T, I>::performCompaction(Histogram & capacityNeeded)
{
-#ifdef LOG_MULTIVALUE_MAPPING
- LOG(info, "performCompaction()");
-#endif
if (_pendingCompact) {
// Further populate histogram to ensure pending compaction being done.
- for (std::set<uint32_t>::const_iterator
- pit(_pendingCompactSingleVector.begin()),
- pmt(_pendingCompactSingleVector.end());
- pit != pmt; ++pit) {
- (void) capacityNeeded[*pit];
+ for (uint32_t value : _pendingCompactSingleVector) {
+ (void) capacityNeeded[value];
}
if (_pendingCompactVectorVector) {
(void) capacityNeeded[Index::maxValues()];
}
}
- for (typename Histogram::const_iterator it(capacityNeeded.begin()), mt(capacityNeeded.end()); it != mt; ++it) {
- uint32_t valueCnt = it->first;
- uint64_t numEntries = it->second;
+ for (const auto & entry : capacityNeeded) {
+ uint32_t valueCnt = entry.first;
+ uint64_t numEntries = entry.second;
if (valueCnt != 0 && valueCnt < Index::maxValues()) {
SingleVectorPtr active =
getSingleVector(valueCnt, MultiValueMappingBaseBase::ACTIVE);
@@ -1266,10 +1100,6 @@ MultiValueMappingT<T, I>::performCompaction(Histogram & capacityNeeded)
_pendingCompactSingleVector.find(valueCnt) !=
_pendingCompactSingleVector.end()) {
uint64_t maxSize = Index::offsetSize() * valueCnt;
- if (maxSize > std::numeric_limits<uint32_t>::max()) {
- maxSize = std::numeric_limits<uint32_t>::max();
- maxSize -= (maxSize % valueCnt);
- }
uint64_t newSize = this->computeNewSize(active.first->used(),
active.first->dead(),
valueCnt * numEntries,
@@ -1284,8 +1114,6 @@ MultiValueMappingT<T, I>::performCompaction(Histogram & capacityNeeded)
if (active.first->remaining() < numEntries ||
_pendingCompactVectorVector) {
uint64_t maxSize = Index::offsetSize();
- if (maxSize > std::numeric_limits<uint32_t>::max())
- maxSize = std::numeric_limits<uint32_t>::max();
uint64_t newSize = this->computeNewSize(active.first->used(),
active.first->dead(),
numEntries,
@@ -1298,41 +1126,6 @@ MultiValueMappingT<T, I>::performCompaction(Histogram & capacityNeeded)
assert(!_pendingCompact);
}
-#ifdef DEBUG_MULTIVALUE_MAPPING
-template <typename T, typename I>
-void
-MultiValueMappingT<T, I>::printContent() const
-{
- for (uint32_t key = 0; key < this->_indices.size(); ++key) {
- std::vector<T> buffer(getValueCount(key));
- get(key, buffer);
- std::cout << "key = " << key << ", count = " <<
- getValueCount(key) << ": ";
- for (uint32_t i = 0; i < buffer.size(); ++i) {
- std::cout << buffer[i] << ", ";
- }
- std::cout << '\n';
- }
-}
-
-template <typename T, typename I>
-void
-MultiValueMappingT<T, I>::printVectorVectors() const
-{
- for (uint32_t i = 0; i < _vectorVectors.size(); ++i) {
- std::cout << "Alternative " << i << '\n';
- for (uint32_t j = 0; j < _vectorVectors[i].size(); ++j) {
- std::cout << "Vector " << j << ": [";
- uint32_t size = _vectorVectors[i][j].size();
- for (uint32_t k = 0; k < size; ++k) {
- std::cout << _vectorVectors[i][j][k] << ", ";
- }
- std::cout << "]\n";
- }
- }
-}
-#endif
-
extern template class MultiValueMappingFallbackVectorHold<
MultiValueMappingVector<multivalue::Value<int8_t> >::VectorBase >;
extern template class MultiValueMappingFallbackVectorHold<
@@ -1495,4 +1288,3 @@ extern template class MultiValueMappingT<
multivalue::Index64>;
} // namespace search
-
diff --git a/searchlib/src/vespa/searchlib/attribute/multivaluemapping.hpp b/searchlib/src/vespa/searchlib/attribute/multivaluemapping.hpp
index a1e06ee4759..9a42a708b5a 100644
--- a/searchlib/src/vespa/searchlib/attribute/multivaluemapping.hpp
+++ b/searchlib/src/vespa/searchlib/attribute/multivaluemapping.hpp
@@ -5,7 +5,6 @@
namespace search
{
-
template <typename T, typename I>
template <typename V, class Saver>
uint32_t
@@ -36,8 +35,9 @@ MultiValueMappingT<T, I>::fillMapped(AttributeVector::ReaderBase &attrReader,
indices.push_back(T(map[e], weight));
saver.save(e, doc, vci, weight);
}
- if (maxvc < indices.size())
+ if (maxvc < indices.size()) {
maxvc = indices.size();
+ }
set(doc, indices);
}
assert(di == numValues);
@@ -45,6 +45,5 @@ MultiValueMappingT<T, I>::fillMapped(AttributeVector::ReaderBase &attrReader,
return maxvc;
}
-
} // namespace search
diff --git a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
index f55dd06f576..732e844c757 100644
--- a/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
+++ b/searchlib/src/vespa/searchlib/docstore/logdatastore.cpp
@@ -465,8 +465,9 @@ void LogDataStore::compactFile(FileId fileId)
flushFileAndWait(guard, compactTo, 0);
compactTo.freeze();
}
+ compacter.reset();
- std::this_thread::sleep_for(10s);
+ std::this_thread::sleep_for(1s);
uint64_t currentGeneration;
{
LockGuard guard(_updateLock);
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Gauge.java b/simplemetrics/src/main/java/com/yahoo/metrics/simple/Gauge.java
index 87ea7ffdaf0..edfa198416e 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/Gauge.java
+++ b/simplemetrics/src/main/java/com/yahoo/metrics/simple/Gauge.java
@@ -14,7 +14,7 @@ import edu.umd.cs.findbugs.annotations.Nullable;
* @author steinar
*/
@Beta
-public final class Gauge {
+public class Gauge {
@Nullable
private final Point defaultPosition;
private final String name;
diff --git a/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java b/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java
index a0b94f1e571..c45d50db065 100644
--- a/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java
+++ b/simplemetrics/src/main/java/com/yahoo/metrics/simple/MetricReceiver.java
@@ -55,6 +55,25 @@ public class MetricReceiver {
}
}
+ private static final class NullGauge extends Gauge {
+ NullGauge() {
+ super(null, null, null);
+ }
+
+ @Override
+ public void sample(double x) {
+ }
+
+ @Override
+ public void sample(double x, Point p) {
+ }
+
+ @Override
+ public PointBuilder builder() {
+ return super.builder();
+ }
+ }
+
private static final class NullReceiver extends MetricReceiver {
NullReceiver() {
super(null, null);
@@ -76,12 +95,12 @@ public class MetricReceiver {
@Override
public Gauge declareGauge(String name) {
- return null;
+ return new NullGauge();
}
@Override
public Gauge declareGauge(String name, Point boundDimensions) {
- return null;
+ return new NullGauge();
}
@Override
diff --git a/simplemetrics/src/main/resources/configdefinitions/manager.def b/simplemetrics/src/main/resources/configdefinitions/manager.def
index 6f6bef75fd7..11077b87177 100644
--- a/simplemetrics/src/main/resources/configdefinitions/manager.def
+++ b/simplemetrics/src/main/resources/configdefinitions/manager.def
@@ -2,5 +2,5 @@
version=1
namespace=metrics
-reportPeriodSeconds int default=300
+reportPeriodSeconds int default=60
pointsToKeepPerMetric int default=100
diff --git a/standalone-container/src/main/scala/com/yahoo/application/container/impl/ClassLoaderOsgiFramework.scala b/standalone-container/src/main/scala/com/yahoo/application/container/impl/ClassLoaderOsgiFramework.scala
index 6d45e6fa8a1..5a41462cb48 100644
--- a/standalone-container/src/main/scala/com/yahoo/application/container/impl/ClassLoaderOsgiFramework.scala
+++ b/standalone-container/src/main/scala/com/yahoo/application/container/impl/ClassLoaderOsgiFramework.scala
@@ -17,6 +17,8 @@ import org.osgi.framework.wiring._
import org.osgi.resource.{Wire, Capability, Requirement}
/**
+ * A (mock) OSGI implementation which loads classes from the system classpath
+ *
* @author tonytv
*/
final class ClassLoaderOsgiFramework extends OsgiFramework {
@@ -197,4 +199,5 @@ final class ClassLoaderOsgiFramework extends OsgiFramework {
override def createFilter(filter: String) = throw new UnsupportedOperationException
}
+
}
diff --git a/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneContainerApplication.scala b/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneContainerApplication.scala
index 8157170e6d1..fc57353c194 100644
--- a/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneContainerApplication.scala
+++ b/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneContainerApplication.scala
@@ -2,7 +2,7 @@
package com.yahoo.container.standalone
import com.google.inject.{Key, AbstractModule, Injector, Inject}
-import com.yahoo.config.application.api.{DeployLogger, RuleConfigDeriver, FileRegistry, ApplicationPackage}
+import com.yahoo.config.application.api.{RuleConfigDeriver, FileRegistry, ApplicationPackage}
import com.yahoo.config.provision.Zone
import com.yahoo.jdisc.application.Application
import com.yahoo.container.jdisc.ConfiguredApplication
@@ -10,6 +10,7 @@ import java.io.{IOException, File}
import com.yahoo.config.model.test.MockRoot
import com.yahoo.config.model.application.provider._
import com.yahoo.vespa.defaults.Defaults
+import com.yahoo.vespa.model.VespaModel
import com.yahoo.vespa.model.container.xml.{ConfigServerContainerModelBuilder, ManhattanContainerModelBuilder, ContainerModelBuilder}
import org.w3c.dom.Element
import com.yahoo.config.model.builder.xml.XmlHelper
@@ -134,9 +135,9 @@ object StandaloneContainerApplication {
tmpDir.toFile
}
- private def validateApplication(applicationPackage: ApplicationPackage, logger: DeployLogger) = {
+ private def validateApplication(applicationPackage: ApplicationPackage) = {
try {
- applicationPackage.validateXML(logger)
+ applicationPackage.validateXML()
} catch {
case e: IOException => throw new IllegalArgumentException(e)
}
@@ -164,14 +165,14 @@ object StandaloneContainerApplication {
fileRegistry: FileRegistry,
preprocessedApplicationDir: File,
networkingOption: Networking,
- configModelRepo: ConfigModelRepo = new ConfigModelRepo): (MockRoot, Container) = {
+ configModelRepo: ConfigModelRepo = new ConfigModelRepo): (VespaModel, Container) = {
val logger = new BaseDeployLogger
val rawApplicationPackage = new FilesApplicationPackage.Builder(applicationPath.toFile).includeSourceFiles(true).preprocessedDir(preprocessedApplicationDir).build()
// TODO: Needed until we get rid of semantic rules
val applicationPackage = rawApplicationPackage.preprocess(Zone.defaultZone(), new RuleConfigDeriver {
override def derive(ruleBaseDir: String, outputDir: String): Unit = {}
}, logger)
- validateApplication(applicationPackage, logger)
+ validateApplication(applicationPackage)
val deployState = new DeployState.Builder().
applicationPackage(applicationPackage).
fileRegistry(fileRegistry).
@@ -179,12 +180,13 @@ object StandaloneContainerApplication {
configDefinitionRepo(configDefinitionRepo).
build()
- val root = new MockRoot("", deployState)
+ val root = VespaModel.createIncomplete(deployState)
val vespaRoot = new ApplicationConfigProducerRoot(root,
"vespa",
deployState.getDocumentModel,
deployState.getProperties.vespaVersion(),
deployState.getProperties.applicationId())
+
val spec = containerRootElement(applicationPackage)
val containerModel = newContainerModelBuilder(networkingOption).build(deployState, configModelRepo, vespaRoot, spec)
@@ -192,11 +194,16 @@ object StandaloneContainerApplication {
containerModel.initialize(configModelRepo)
val container = first(containerModel.getCluster().getContainers)
+ // TODO: If we can do the mutations below on the builder, we can separate out model finalization from the
+ // VespaModel constructor, such that the above and below code to finalize the container can be
+ // replaced by root.finalize();
+
// Always disable rpc server for standalone container. This server will soon be removed anyway.
container.setRpcServerEnabled(false)
container.setHttpServerEnabled(networkingOption == Networking.enable)
initializeContainer(container, spec)
+
root.freezeModelTopology()
(root, container)
}
diff --git a/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneSubscriberFactory.scala b/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneSubscriberFactory.scala
index 432e5b82946..4ac88eaafae 100644
--- a/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneSubscriberFactory.scala
+++ b/standalone-container/src/main/scala/com/yahoo/container/standalone/StandaloneSubscriberFactory.scala
@@ -4,6 +4,7 @@ package com.yahoo.container.standalone
import com.yahoo.config.model.test.MockRoot
import com.yahoo.config.{ConfigBuilder, ConfigInstance}
import com.yahoo.container.di.ConfigKeyT
+import com.yahoo.vespa.model.VespaModel
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import com.yahoo.vespa.config.ConfigKey
@@ -14,7 +15,7 @@ import StandaloneSubscriberFactory._
* @author tonytv
* @author gjoranv
*/
-class StandaloneSubscriberFactory(root: MockRoot) extends SubscriberFactory {
+class StandaloneSubscriberFactory(root: VespaModel) extends SubscriberFactory {
class StandaloneSubscriber(configKeys: Set[ConfigKeyT]) extends Subscriber {
override def configChanged =
generation == 0
diff --git a/standalone-container/src/test/java/com/yahoo/container/standalone/StandaloneContainerActivatorTest.java b/standalone-container/src/test/java/com/yahoo/container/standalone/StandaloneContainerActivatorTest.java
index 484d4c4d50e..f847f05108e 100644
--- a/standalone-container/src/test/java/com/yahoo/container/standalone/StandaloneContainerActivatorTest.java
+++ b/standalone-container/src/test/java/com/yahoo/container/standalone/StandaloneContainerActivatorTest.java
@@ -24,7 +24,7 @@ import static org.hamcrest.collection.IsEmptyCollection.empty;
import static org.junit.Assert.assertThat;
/**
- * @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
+ * @author Einar M R Rosenvinge
* @since 5.22.0
*/
public class StandaloneContainerActivatorTest {
diff --git a/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainer.scala b/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainer.scala
index 7152c0c0af1..f0c2ce6fa0d 100644
--- a/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainer.scala
+++ b/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainer.scala
@@ -32,7 +32,7 @@ object StandaloneContainer {
}
}
- def withContainerModel[T](containerNode: Node)(f: MockRoot => T) {
+ def withContainerModel[T](containerNode: Node)(f: VespaModel => T) {
withTempDirectory { applicationPath =>
createServicesXml(applicationPath, containerNode)
diff --git a/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainerTest.scala b/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainerTest.scala
index 41026e1c263..2705322ab32 100644
--- a/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainerTest.scala
+++ b/standalone-container/src/test/scala/com/yahoo/container/standalone/StandaloneContainerTest.scala
@@ -58,7 +58,7 @@ class StandaloneContainerTest {
</services>
StandaloneContainer.withContainerModel(servicesXml) {
root =>
- assertNotNull(root.getProducer("container-1/standalone"))
+ assertTrue(root.getConfigProducer("container-1/standalone").isPresent)
}
}
@@ -72,10 +72,10 @@ class StandaloneContainerTest {
</jdisc>
StandaloneContainer.withContainerModel(xml) { root =>
- val container = root.getProducer("jdisc/standalone").asInstanceOf[AbstractService]
+ val container = root.getConfigProducer("jdisc/standalone").get().asInstanceOf[AbstractService]
println("portCnt: " + container.getPortCount)
println("numPorts: " + container.getNumPortsAllocated)
- assertThat(container.getNumPortsAllocated, is(1))
+ assertEquals(1, container.getNumPortsAllocated)
}
}
diff --git a/testutil/src/main/java/com/yahoo/test/json/JsonTestHelper.java b/testutil/src/main/java/com/yahoo/test/json/JsonTestHelper.java
index fee6cc2fef0..a8e8e562b2d 100644
--- a/testutil/src/main/java/com/yahoo/test/json/JsonTestHelper.java
+++ b/testutil/src/main/java/com/yahoo/test/json/JsonTestHelper.java
@@ -10,6 +10,7 @@ import static uk.co.datumedge.hamcrest.json.SameJSONAs.sameJSONAs;
* @author Vegard Sjonfjell
*/
public class JsonTestHelper {
+
/**
* Convenience method to input JSON without escaping double quotes and newlines
* Each parameter represents a line of JSON encoded data
diff --git a/vdslib/src/main/java/com/yahoo/vdslib/state/ClusterState.java b/vdslib/src/main/java/com/yahoo/vdslib/state/ClusterState.java
index b3d572e48ae..d70b55c66a2 100644
--- a/vdslib/src/main/java/com/yahoo/vdslib/state/ClusterState.java
+++ b/vdslib/src/main/java/com/yahoo/vdslib/state/ClusterState.java
@@ -11,6 +11,9 @@ import java.util.*;
*/
public class ClusterState implements Cloneable {
+ private static final NodeState DEFAULT_STORAGE_UP_NODE_STATE = new NodeState(NodeType.STORAGE, State.UP);
+ private static final NodeState DEFAULT_DISTRIBUTOR_UP_NODE_STATE = new NodeState(NodeType.DISTRIBUTOR, State.UP);
+
private int version = 0;
private State state = State.DOWN;
// nodeStates maps each of the non-up nodes that have an index <= the node count for its type.
@@ -30,6 +33,22 @@ public class ClusterState implements Cloneable {
deserialize(serialized);
}
+ /**
+ * Parse a given cluster state string into a returned ClusterState instance, wrapping any
+ * parse exceptions in a RuntimeException.
+ */
+ public static ClusterState stateFromString(final String stateStr) {
+ try {
+ return new ClusterState(stateStr);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ public static ClusterState emptyState() {
+ return stateFromString("");
+ }
+
public ClusterState clone() {
try{
ClusterState state = (ClusterState) super.clone();
@@ -61,22 +80,81 @@ public class ClusterState implements Cloneable {
return true;
}
+ @FunctionalInterface
+ private interface NodeStateCmp {
+ boolean similar(NodeType nodeType, NodeState lhs, NodeState rhs);
+ }
+
public boolean similarTo(Object o) {
if (!(o instanceof ClusterState)) { return false; }
- ClusterState other = (ClusterState) o;
+ final ClusterState other = (ClusterState) o;
- if (state.equals(State.DOWN) && other.state.equals(State.DOWN)) return true; // both down, means equal (why??)
- if (version != other.version || !state.equals(other.state)) return false;
- if (distributionBits != other.distributionBits) return false;
- if ( ! nodeCount.equals(other.nodeCount)) return false;
+ return similarToImpl(other, this::normalizedNodeStateSimilarTo);
+ }
+
+ public boolean similarToIgnoringInitProgress(final ClusterState other) {
+ return similarToImpl(other, this::normalizedNodeStateSimilarToIgnoringInitProgress);
+ }
- for (Map.Entry<Node, NodeState> nodeStateEntry : nodeStates.entrySet()) {
- NodeState otherNodeState = other.nodeStates.get(nodeStateEntry.getKey());
- if (otherNodeState == null || ! otherNodeState.similarTo(nodeStateEntry.getValue())) return false;
+ private boolean similarToImpl(final ClusterState other, final NodeStateCmp nodeStateCmp) {
+ // Two cluster states are considered similar if they are both down. When clusters
+ // are down, their individual node states do not matter to ideal state computations
+ // and content nodes therefore do not need to observe them.
+ if (state.equals(State.DOWN) && other.state.equals(State.DOWN)) {
+ return true;
+ }
+ if (!metaInformationSimilarTo(other)) {
+ return false;
+ }
+ // TODO verify behavior of C++ impl against this
+ for (Node node : unionNodeSetWith(other.nodeStates.keySet())) {
+ final NodeState lhs = nodeStates.get(node);
+ final NodeState rhs = other.nodeStates.get(node);
+ if (!nodeStateCmp.similar(node.getType(), lhs, rhs)) {
+ return false;
+ }
}
return true;
}
+ private Set<Node> unionNodeSetWith(final Set<Node> otherNodes) {
+ final Set<Node> unionNodeSet = new TreeSet<Node>(nodeStates.keySet());
+ unionNodeSet.addAll(otherNodes);
+ return unionNodeSet;
+ }
+
+ private boolean metaInformationSimilarTo(final ClusterState other) {
+ if (version != other.version || !state.equals(other.state)) {
+ return false;
+ }
+ if (distributionBits != other.distributionBits) {
+ return false;
+ }
+ return nodeCount.equals(other.nodeCount);
+ }
+
+ private boolean normalizedNodeStateSimilarTo(final NodeType nodeType, final NodeState lhs, final NodeState rhs) {
+ final NodeState lhsNormalized = (lhs != null ? lhs : defaultUpNodeState(nodeType));
+ final NodeState rhsNormalized = (rhs != null ? rhs : defaultUpNodeState(nodeType));
+
+ return lhsNormalized.similarTo(rhsNormalized);
+ }
+
+ private boolean normalizedNodeStateSimilarToIgnoringInitProgress(
+ final NodeType nodeType, final NodeState lhs, final NodeState rhs)
+ {
+ final NodeState lhsNormalized = (lhs != null ? lhs : defaultUpNodeState(nodeType));
+ final NodeState rhsNormalized = (rhs != null ? rhs : defaultUpNodeState(nodeType));
+
+ return lhsNormalized.similarToIgnoringInitProgress(rhsNormalized);
+ }
+
+ private static NodeState defaultUpNodeState(final NodeType nodeType) {
+ return nodeType == NodeType.STORAGE
+ ? DEFAULT_STORAGE_UP_NODE_STATE
+ : DEFAULT_DISTRIBUTOR_UP_NODE_STATE;
+ }
+
/**
* Fleet controller marks states that are actually sent out to nodes as official states. Only fleetcontroller
* should set this to official, and only just before sending it out. This state is currently not serialized with
@@ -97,7 +175,7 @@ public class ClusterState implements Cloneable {
public void addNodeState() throws ParseException {
if (!empty) {
NodeState ns = NodeState.deserialize(node.getType(), sb.toString());
- if (!ns.equals(new NodeState(node.getType(), State.UP))) {
+ if (!ns.equals(defaultUpNodeState(node.getType()))) {
nodeStates.put(node, ns);
}
if (nodeCount.get(node.getType().ordinal()) <= node.getIndex()) {
diff --git a/vdslib/src/main/java/com/yahoo/vdslib/state/NodeState.java b/vdslib/src/main/java/com/yahoo/vdslib/state/NodeState.java
index 8c31938dfaf..15c929fe49d 100644
--- a/vdslib/src/main/java/com/yahoo/vdslib/state/NodeState.java
+++ b/vdslib/src/main/java/com/yahoo/vdslib/state/NodeState.java
@@ -112,17 +112,27 @@ public class NodeState implements Cloneable {
* Cluster state will check for that.
*/
public boolean similarTo(Object o) {
- if (!(o instanceof NodeState)) { return false; }
- NodeState other = (NodeState) o;
+ if (!(o instanceof NodeState)) {
+ return false;
+ }
+ return similarToImpl((NodeState)o, true);
+ }
+
+ public boolean similarToIgnoringInitProgress(final NodeState other) {
+ return similarToImpl(other, false);
+ }
+ private boolean similarToImpl(final NodeState other, boolean considerInitProgress) {
if (state != other.state) return false;
if (Math.abs(capacity - other.capacity) > 0.0000000001) return false;
if (Math.abs(reliability - other.reliability) > 0.0000000001) return false;
if (startTimestamp != other.startTimestamp) return false;
// Init progress on different sides of the init progress limit boundary is not similar.
- if (type.equals(NodeType.STORAGE)
- && initProgress < getListingBucketsInitProgressLimit() ^ other.initProgress < getListingBucketsInitProgressLimit())
+ if (considerInitProgress
+ && type.equals(NodeType.STORAGE)
+ && (initProgress < getListingBucketsInitProgressLimit()
+ ^ other.initProgress < getListingBucketsInitProgressLimit()))
{
return false;
}
diff --git a/vdslib/src/test/java/com/yahoo/vdslib/state/ClusterStateTestCase.java b/vdslib/src/test/java/com/yahoo/vdslib/state/ClusterStateTestCase.java
index c058a7c9919..0d06fcc6faa 100644
--- a/vdslib/src/test/java/com/yahoo/vdslib/state/ClusterStateTestCase.java
+++ b/vdslib/src/test/java/com/yahoo/vdslib/state/ClusterStateTestCase.java
@@ -1,10 +1,18 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.vdslib.state;
+import org.junit.Test;
+
import java.text.ParseException;
+import java.util.function.BiFunction;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
-public class ClusterStateTestCase extends junit.framework.TestCase {
+public class ClusterStateTestCase{
+ @Test
public void testSetNodeState() throws ParseException {
ClusterState state = new ClusterState("");
assertEquals("", state.toString());
@@ -22,6 +30,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase {
assertEquals("distributor:5 .0.s:d .2.s:d .3.s:d storage:1 .0.d:4 .0.d.1.s:d", state.toString());
}
+ @Test
public void testClone() throws ParseException {
ClusterState state = new ClusterState("");
state.setNodeState(new Node(NodeType.DISTRIBUTOR, 1), new NodeState(NodeType.DISTRIBUTOR, State.UP).setDescription("available"));
@@ -31,8 +40,9 @@ public class ClusterStateTestCase extends junit.framework.TestCase {
assertEquals(state.toString(true), other.toString(true));
assertEquals(state.toString(false), other.toString(false));
assertEquals(state, other);
- }
+ }
+ @Test
public void testEquals() throws ParseException {
ClusterState state = new ClusterState("");
@@ -55,6 +65,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase {
ClusterState state2 = new ClusterState("distributor:3 .1.s:d .2.s:m storage:3 .1.s:i .2.s:m");
assertFalse(state1.equals(state2));
assertFalse(state1.similarTo(state2));
+ assertFalse(state1.similarToIgnoringInitProgress(state2));
}
{
@@ -62,6 +73,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase {
ClusterState state2 = new ClusterState("cluster:d version:1 bits:20 distributor:1 storage:1 .0.s:d");
assertFalse(state1.equals(state2));
assertTrue(state1.similarTo(state2));
+ assertTrue(state1.similarToIgnoringInitProgress(state2));
}
{
@@ -69,6 +81,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase {
ClusterState state2 = new ClusterState("distributor:3 storage:3");
assertFalse(state1.equals(state2));
assertFalse(state1.similarTo(state2));
+ assertFalse(state1.similarToIgnoringInitProgress(state2));
}
assertFalse(state.equals("class not instance of ClusterState"));
@@ -78,6 +91,92 @@ public class ClusterStateTestCase extends junit.framework.TestCase {
assertTrue(state.similarTo(state));
}
+ private static ClusterState stateFromString(final String stateStr) {
+ try {
+ return new ClusterState(stateStr);
+ } catch (ParseException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ private void do_test_differing_storage_node_sets(BiFunction<ClusterState, ClusterState, Boolean> cmp) {
+ final ClusterState a = stateFromString("distributor:3 storage:3 .0.s:d");
+ final ClusterState b = stateFromString("distributor:3 storage:3");
+ assertFalse(cmp.apply(a, b));
+ assertFalse(cmp.apply(b, a));
+ assertTrue(cmp.apply(a, a));
+ assertTrue(cmp.apply(b, b));
+ }
+
+ private void do_test_differing_distributor_node_sets(BiFunction<ClusterState, ClusterState, Boolean> cmp) {
+ final ClusterState a = stateFromString("distributor:3 .0.s:d storage:3");
+ final ClusterState b = stateFromString("distributor:3 storage:3");
+ assertFalse(cmp.apply(a, b));
+ assertFalse(cmp.apply(b, a));
+ assertTrue(cmp.apply(a, a));
+ assertTrue(cmp.apply(b, b));
+ }
+
+ @Test
+ public void similarity_check_considers_differing_distributor_node_state_sets() {
+ do_test_differing_distributor_node_sets((a, b) -> a.similarTo(b));
+ }
+
+ @Test
+ public void similarity_check_considers_differing_storage_node_state_sets() {
+ do_test_differing_storage_node_sets((a, b) -> a.similarTo(b));
+ }
+
+ @Test
+ public void structural_similarity_check_considers_differing_distributor_node_state_sets() {
+ do_test_differing_distributor_node_sets((a, b) -> a.similarToIgnoringInitProgress(b));
+ }
+
+ @Test
+ public void init_progress_ignoring_similarity_check_considers_differing_storage_node_state_sets() {
+ do_test_differing_storage_node_sets((a, b) -> a.similarToIgnoringInitProgress(b));
+ }
+
+ private void do_test_similarity_for_down_cluster_state(BiFunction<ClusterState, ClusterState, Boolean> cmp) {
+ final ClusterState a = stateFromString("cluster:d distributor:3 .0.s:d storage:3 .2:s:d");
+ final ClusterState b = stateFromString("cluster:d distributor:3 storage:3 .1:s:d");
+ assertTrue(cmp.apply(a, b));
+ assertTrue(cmp.apply(b, a));
+ }
+
+ @Test
+ public void similarity_check_considers_differing_down_cluster_states_similar() {
+ do_test_similarity_for_down_cluster_state((a, b) -> a.similarTo(b));
+ }
+
+ @Test
+ public void init_progress_ignoring__similarity_check_considers_differing_down_cluster_states_similar() {
+ do_test_similarity_for_down_cluster_state((a, b) -> a.similarToIgnoringInitProgress(b));
+ }
+
+ // If we naively only look at the NodeState sets in the ClusterState instances to be
+ // compared, we might get false positives. If state A has a NodeState(Up, minBits 15)
+ // while state B has NodeState(Up, minBits 16), the latter will be pruned away from the
+ // NodeState set because it's got a "default" Up state. The two states are still semantically
+ // similar, and should be returned as such. But their state sets technically differ.
+ @Test
+ public void similarity_check_does_not_consider_per_storage_node_min_bits() {
+ final ClusterState a = stateFromString("distributor:4 storage:4");
+ final ClusterState b = stateFromString("distributor:4 storage:4");
+ b.setNodeState(new Node(NodeType.STORAGE, 1), new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(15));
+ assertTrue(a.similarTo(b));
+ assertTrue(b.similarTo(a));
+ }
+
+ @Test
+ public void init_progress_ignoring_similarity_check_does_in_fact_ignore_init_progress() {
+ final ClusterState a = stateFromString("distributor:3 storage:3 .0.i:0.01 .1.i:0.1 .2.i:0.9");
+ final ClusterState b = stateFromString("distributor:3 storage:3 .0.i:0.2 .1.i:0.5 .2.i:0.99");
+ assertTrue(a.similarToIgnoringInitProgress(b));
+ assertTrue(b.similarToIgnoringInitProgress(a));
+ }
+
+ @Test
public void testTextDiff() throws ParseException {
ClusterState state1 = new ClusterState("distributor:9 storage:4");
ClusterState state2 = new ClusterState("distributor:7 storage:6");
@@ -94,6 +193,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase {
assertEquals("version: 123 => 0, bits: 16 => 21, official: false => true, storage: [2: [Initializing => Up, disks: 2 => 0, description: Booting => ], 4: Down => Up, 5: Down => Up], distributor: [7: Up => Down, 8: Up => Down]", state1.getTextualDifference(state2));
}
+ @Test
public void testHtmlDiff() throws ParseException {
ClusterState state1 = new ClusterState("distributor:9 storage:4");
ClusterState state2 = new ClusterState("distributor:7 storage:6");
@@ -133,7 +233,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase {
"]", state1.getHtmlDifference(state2));
}
-
+ @Test
public void testParser() throws ParseException {
ClusterState state = new ClusterState("distributor:2 storage:17 .2.s:d .13.s:r m:cluster\\x20message");
assertEquals("cluster message", state.getDescription());
@@ -191,17 +291,20 @@ public class ClusterStateTestCase extends junit.framework.TestCase {
} catch (Exception e) {}
}
+ @Test
public void testCapacityExponential() throws ParseException {
ClusterState state = new ClusterState("distributor:27 storage:170 .2.s:d .13.c:3E-8 .13.s:r");
- assertEquals(3E-8, state.getNodeState(new Node(NodeType.STORAGE, 13)).getCapacity());
+ assertEquals(3E-8, state.getNodeState(new Node(NodeType.STORAGE, 13)).getCapacity(), 1E-8);
}
+ @Test
public void testCapacityExponentialCpp() throws ParseException {
ClusterState state = new ClusterState("distributor:27 storage:170 .2.s:d .13.c:3e-08 .13.s:r");
- assertEquals(3E-8, state.getNodeState(new Node(NodeType.STORAGE, 13)).getCapacity());
+ assertEquals(3E-8, state.getNodeState(new Node(NodeType.STORAGE, 13)).getCapacity(), 1E-8);
}
+ @Test
public void testSetState() throws ParseException {
ClusterState state = new ClusterState("distributor:2 storage:2");
state.setNodeState(new Node(NodeType.DISTRIBUTOR, 0), new NodeState(NodeType.DISTRIBUTOR, State.DOWN));
@@ -209,6 +312,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase {
assertEquals("distributor:2 .0.s:d storage:2", state.toString());
}
+ @Test
public void testVersionAndClusterStates() throws ParseException {
ClusterState state = new ClusterState("version:4 cluster:i distributor:2 .1.s:i storage:2 .0.s:i .0.i:0.345");
assertEquals(4, state.getVersion());
@@ -220,6 +324,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase {
assertEquals("version:5 cluster:d bits:12 distributor:2 .1.s:i .1.i:1.0 storage:2 .0.s:i .0.i:0.345", state.toString());
}
+ @Test
public void testNotRemovingCommentedDownNodesAtEnd() throws ParseException {
ClusterState state = new ClusterState("");
state.setNodeState(new Node(NodeType.DISTRIBUTOR, 0), new NodeState(NodeType.DISTRIBUTOR, State.UP));
@@ -234,6 +339,7 @@ public class ClusterStateTestCase extends junit.framework.TestCase {
assertEquals("distributor:1 storage:2", state.toString(false));
}
+ @Test
public void testWhitespace() throws ParseException {
ClusterState state = new ClusterState("distributor:2\n .1.t:3\nstorage:2\n\t.0.s:i \r\f.1.s:m");
assertEquals(2, state.getNodeCount(NodeType.DISTRIBUTOR));
@@ -243,4 +349,22 @@ public class ClusterStateTestCase extends junit.framework.TestCase {
assertEquals(new NodeState(NodeType.STORAGE, State.INITIALIZING), state.getNodeState(new Node(NodeType.STORAGE, 0)));
assertEquals(new NodeState(NodeType.STORAGE, State.MAINTENANCE), state.getNodeState(new Node(NodeType.STORAGE, 1)));
}
+
+ @Test
+ public void empty_state_factory_method_returns_empty_state() {
+ final ClusterState state = ClusterState.emptyState();
+ assertEquals("", state.toString());
+ }
+
+ @Test
+ public void state_from_string_factory_method_returns_cluster_state_constructed_from_input() {
+ final String stateStr = "version:123 distributor:2 storage:2";
+ final ClusterState state = ClusterState.stateFromString(stateStr);
+ assertEquals(stateStr, state.toString());
+ }
+
+ @Test(expected=RuntimeException.class)
+ public void state_from_string_factory_method_throws_runtime_exception_on_parse_failure() {
+ ClusterState.stateFromString("fraggle rock");
+ }
}
diff --git a/vdslib/src/test/java/com/yahoo/vdslib/state/NodeStateTestCase.java b/vdslib/src/test/java/com/yahoo/vdslib/state/NodeStateTestCase.java
index 63137a92c7b..9362838b63c 100644
--- a/vdslib/src/test/java/com/yahoo/vdslib/state/NodeStateTestCase.java
+++ b/vdslib/src/test/java/com/yahoo/vdslib/state/NodeStateTestCase.java
@@ -165,6 +165,12 @@ public class NodeStateTestCase extends junit.framework.TestCase {
assertFalse(ns2.similarTo(ns3));
assertTrue(ns3.similarTo(ns4));
+ assertTrue(ns1.similarToIgnoringInitProgress(ns2));
+ assertTrue(ns1.similarToIgnoringInitProgress(ns3));
+ assertTrue(ns3.similarToIgnoringInitProgress(ns1));
+ assertTrue(ns1.similarToIgnoringInitProgress(ns4));
+ assertTrue(ns2.similarToIgnoringInitProgress(ns4));
+
assertFalse(ns1.equals(ns2));
assertFalse(ns2.equals(ns3));
assertFalse(ns3.equals(ns4));
@@ -176,6 +182,7 @@ public class NodeStateTestCase extends junit.framework.TestCase {
NodeState ns1 = new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(16);
NodeState ns2 = new NodeState(NodeType.STORAGE, State.UP).setMinUsedBits(18);
assertTrue(ns1.similarTo(ns2));
+ assertTrue(ns1.similarToIgnoringInitProgress(ns2));
assertFalse(ns1.equals(ns2));
}
{
@@ -184,12 +191,14 @@ public class NodeStateTestCase extends junit.framework.TestCase {
assertEquals(ns, ns2Disks);
assertEquals(ns2Disks, ns);
assertTrue(ns.similarTo(ns2Disks));
+ assertTrue(ns.similarToIgnoringInitProgress(ns2Disks));
assertTrue(ns2Disks.similarTo(ns));
ns2Disks.getDiskState(0).setState(State.DOWN);
assertFalse(ns.equals(ns2Disks));
assertFalse(ns2Disks.equals(ns));
assertFalse(ns.similarTo(ns2Disks));
+ assertFalse(ns.similarToIgnoringInitProgress(ns2Disks));
assertFalse(ns2Disks.similarTo(ns));
}
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/FeedClientImpl.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/FeedClientImpl.java
index e6ff8593e71..d7f001eff31 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/FeedClientImpl.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/api/FeedClientImpl.java
@@ -27,8 +27,9 @@ public class FeedClientImpl implements FeedClient {
public FeedClientImpl(
SessionParams sessionParams, ResultCallback resultCallback, ScheduledThreadPoolExecutor timeoutExecutor) {
- this.closeTimeoutMs = sessionParams.getFeedParams().getServerTimeout(TimeUnit.MILLISECONDS) +
- sessionParams.getFeedParams().getClientTimeout(TimeUnit.MILLISECONDS);
+ this.closeTimeoutMs = sessionParams.getConnectionParams().getMaxRetries() * (
+ sessionParams.getFeedParams().getServerTimeout(TimeUnit.MILLISECONDS) +
+ sessionParams.getFeedParams().getClientTimeout(TimeUnit.MILLISECONDS));
this.operationProcessor = new OperationProcessor(
new IncompleteResultsThrottler(
sessionParams.getThrottlerMinSize(),
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java
index 199f3dcbaa8..414ae90dd27 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/ClusterConnection.java
@@ -114,8 +114,8 @@ public class ClusterConnection implements AutoCloseable {
IOThread ioThread = ioThreads.get(hash % ioThreads.size());
try {
ioThread.post(document);
- } catch (InterruptedException e) {
- throw new EndpointIOException(ioThread.getEndpoint(), "While sending", e);
+ } catch (Throwable t) {
+ throw new EndpointIOException(ioThread.getEndpoint(), "While sending", t);
}
}
diff --git a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/IOThread.java b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/IOThread.java
index 505039cd2d4..60324eda47a 100644
--- a/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/IOThread.java
+++ b/vespa-http-client/src/main/java/com/yahoo/vespa/http/client/core/communication/IOThread.java
@@ -136,33 +136,18 @@ class IOThread implements Runnable, AutoCloseable {
stopSignal.countDown();
log.finer("Closed called.");
- try {
- if (! running.await(2 * localQueueTimeOut, TimeUnit.MILLISECONDS)) {
- log.info("Waited " + 2 * localQueueTimeOut
- + " ms for queue to be empty, did not happen, interrupting thread.");
- }
- } catch (InterruptedException e) {
- log.log(Level.INFO, "Interrupted while waiting for threads to finish sending.", e);
- }
-
- // Make 5 attempts the next 30 secs to get results from previous operations.
- for (int i = 0 ; i < 5; i++) {
- int size = resultQueue.getPendingSize();
- if (size == 0) break;
- log.info("We have outstanding operations (" + size +") , waiting for responses, iteraton: " + i + ".");
+ // Make a last attempt to get results from previous operations, we have already waited quite a bit before getting here.
+ int size = resultQueue.getPendingSize();
+ if (size > 0) {
+ log.info("We have outstanding operations (" + size + ") , trying to fetch responses.");
try {
processResponse(client.drain());
} catch (Throwable e) {
log.log(Level.SEVERE, "Some failures while trying to get latest responses from vespa.", e);
- break;
- }
- try {
- Thread.sleep(6000);
- } catch (InterruptedException e) {
- break;
}
}
+
try {
client.close();
} finally {
diff --git a/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationProcessorTest.java b/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationProcessorTest.java
index c87385ec2ce..0eb3fc12405 100644
--- a/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationProcessorTest.java
+++ b/vespa-http-client/src/test/java/com/yahoo/vespa/http/client/core/operationProcessor/OperationProcessorTest.java
@@ -13,11 +13,16 @@ import org.junit.Test;
import java.util.ArrayDeque;
import java.util.Queue;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
/**
* @author <a href="mailto:einarmr@yahoo-inc.com">Einar M R Rosenvinge</a>
@@ -367,4 +372,32 @@ public class OperationProcessorTest {
assertThat(done.await(120, TimeUnit.SECONDS), is(true));
}
+
+ @Test
+ public void testSendsResponseToQueuedDocumentOnClose() throws InterruptedException {
+ SessionParams sessionParams = new SessionParams.Builder()
+ .addCluster(new Cluster.Builder().addEndpoint(Endpoint.create("#$#")).build())
+ .build();
+
+ ScheduledThreadPoolExecutor executor = mock(ScheduledThreadPoolExecutor.class);
+ when(executor.awaitTermination(anyLong(), any())).thenReturn(true);
+
+ CountDownLatch countDownLatch = new CountDownLatch(3);
+
+ OperationProcessor operationProcessor = new OperationProcessor(
+ new IncompleteResultsThrottler(19, 19, null, null),
+ (docId, documentResult) -> {
+ countDownLatch.countDown();
+ },
+ sessionParams, executor);
+
+ // Will fail due to bogus host name, but will be retried.
+ operationProcessor.sendDocument(doc1);
+ operationProcessor.sendDocument(doc2);
+ operationProcessor.sendDocument(doc3);
+
+ // Will create fail results.
+ operationProcessor.close();
+ countDownLatch.await();
+ }
}
diff --git a/vespabase/src/start-cbinaries.sh b/vespabase/src/start-cbinaries.sh
index 91ce8edede4..1809f889244 100755
--- a/vespabase/src/start-cbinaries.sh
+++ b/vespabase/src/start-cbinaries.sh
@@ -83,6 +83,8 @@ if [ "$VESPA_USE_VALGRIND" = "all" ]; then
no_valgrind=false
fi
+export STD_THREAD_PREVENT_TRY_CATCH=true
+
# special malloc setup; we should make some better mechanism for this
#
export GLIBCXX_FORCE_NEW=1
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java
index 4889d064387..6a7797c20a7 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/document/restapi/resource/RestApi.java
@@ -23,6 +23,7 @@ import com.yahoo.document.restapi.RestUri;
import com.yahoo.documentapi.messagebus.MessageBusDocumentAccess;
import com.yahoo.documentapi.messagebus.MessageBusParams;
import com.yahoo.documentapi.messagebus.loadtypes.LoadTypeSet;
+import com.yahoo.vespa.config.content.LoadTypeConfig;
import com.yahoo.vespaxmlparser.VespaXMLFeedReader;
import java.io.IOException;
@@ -55,10 +56,12 @@ public class RestApi extends LoggingRequestHandler {
private AtomicInteger threadsAvailableForApi = new AtomicInteger(20 /*max concurrent requests */);
@Inject
- public RestApi(Executor executor, AccessLog accessLog, DocumentmanagerConfig documentManagerConfig) {
+ public RestApi(Executor executor, AccessLog accessLog, DocumentmanagerConfig documentManagerConfig,
+ LoadTypeConfig loadTypeConfig) {
super(executor, accessLog);
- final LoadTypeSet loadTypes = new LoadTypeSet("client");
- this.operationHandler = new OperationHandlerImpl(new MessageBusDocumentAccess(new MessageBusParams(loadTypes)));
+ MessageBusParams params = new MessageBusParams(new LoadTypeSet(loadTypeConfig));
+ params.setDocumentmanagerConfig(documentManagerConfig);
+ this.operationHandler = new OperationHandlerImpl(new MessageBusDocumentAccess(params));
this.singleDocumentParser = new SingleDocumentParser(new DocumentTypeManager(documentManagerConfig));
}
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemove.java b/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemove.java
index 14b2d86ae75..87a7ebe9e49 100755
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemove.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemove.java
@@ -3,9 +3,12 @@ package com.yahoo.feedhandler;
import com.google.inject.Inject;
import com.yahoo.clientmetrics.RouteMetricSet;
+import com.yahoo.cloud.config.ClusterListConfig;
+import com.yahoo.cloud.config.SlobroksConfig;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.document.DocumentId;
+import com.yahoo.document.config.DocumentmanagerConfig;
import com.yahoo.feedapi.FeedContext;
import com.yahoo.feedapi.MessagePropertyProcessor;
import com.yahoo.feedapi.SingleSender;
@@ -20,9 +23,14 @@ import java.util.concurrent.Executor;
public class VespaFeedHandlerRemove extends VespaFeedHandlerBase {
@Inject
- public VespaFeedHandlerRemove(FeederConfig feederConfig,
- LoadTypeConfig loadTypeConfig, Executor executor, Metric metric) throws Exception {
- super(feederConfig, loadTypeConfig, executor, metric);
+ public VespaFeedHandlerRemove(FeederConfig feederConfig,
+ LoadTypeConfig loadTypeConfig,
+ DocumentmanagerConfig documentmanagerConfig,
+ SlobroksConfig slobroksConfig,
+ ClusterListConfig clusterListConfig,
+ Executor executor,
+ Metric metric) throws Exception {
+ super(feederConfig, loadTypeConfig, documentmanagerConfig, slobroksConfig, clusterListConfig, executor, metric);
}
VespaFeedHandlerRemove(FeedContext context, Executor executor) throws Exception {
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemoveLocation.java b/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemoveLocation.java
index 3b2f82c865e..04d22386bfb 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemoveLocation.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerRemoveLocation.java
@@ -3,8 +3,11 @@ package com.yahoo.feedhandler;
import com.google.inject.Inject;
import com.yahoo.clientmetrics.RouteMetricSet;
+import com.yahoo.cloud.config.ClusterListConfig;
+import com.yahoo.cloud.config.SlobroksConfig;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
+import com.yahoo.document.config.DocumentmanagerConfig;
import com.yahoo.documentapi.messagebus.protocol.RemoveLocationMessage;
import com.yahoo.feedapi.FeedContext;
import com.yahoo.feedapi.MessagePropertyProcessor;
@@ -19,9 +22,13 @@ import java.util.concurrent.Executor;
public class VespaFeedHandlerRemoveLocation extends VespaFeedHandlerBase {
@Inject
- public VespaFeedHandlerRemoveLocation(FeederConfig feederConfig, LoadTypeConfig loadTypeConfig, Executor executor,
- Metric metric) throws Exception {
- super(feederConfig, loadTypeConfig, executor, metric);
+ public VespaFeedHandlerRemoveLocation(FeederConfig feederConfig,
+ LoadTypeConfig loadTypeConfig,
+ DocumentmanagerConfig documentmanagerConfig,
+ SlobroksConfig slobroksConfig,
+ ClusterListConfig clusterListConfig,
+ Executor executor, Metric metric) throws Exception {
+ super(feederConfig, loadTypeConfig, documentmanagerConfig, slobroksConfig, clusterListConfig, executor, metric);
}
VespaFeedHandlerRemoveLocation(FeedContext context, Executor executor) throws Exception {
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerStatus.java b/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerStatus.java
index 77930ae5a94..ed80443f970 100755
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerStatus.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerStatus.java
@@ -3,9 +3,12 @@ package com.yahoo.feedhandler;
import java.util.concurrent.Executor;
+import com.yahoo.cloud.config.ClusterListConfig;
+import com.yahoo.cloud.config.SlobroksConfig;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
import com.yahoo.container.jdisc.ThreadedHttpRequestHandler;
+import com.yahoo.document.config.DocumentmanagerConfig;
import com.yahoo.vespa.config.content.LoadTypeConfig;
import com.yahoo.feedapi.FeedContext;
import com.yahoo.metrics.MetricManager;
@@ -16,8 +19,14 @@ public class VespaFeedHandlerStatus extends ThreadedHttpRequestHandler {
private MetricManager manager;
- public VespaFeedHandlerStatus(FeederConfig feederConfig, LoadTypeConfig loadTypeConfig, Executor executor) {
- this(FeedContext.getInstance(feederConfig, loadTypeConfig, new NullFeedMetric()), true, true, executor);
+ public VespaFeedHandlerStatus(FeederConfig feederConfig,
+ LoadTypeConfig loadTypeConfig,
+ DocumentmanagerConfig documentmanagerConfig,
+ SlobroksConfig slobroksConfig,
+ ClusterListConfig clusterListConfig,
+ Executor executor) {
+ this(FeedContext.getInstance(feederConfig, loadTypeConfig, documentmanagerConfig, slobroksConfig,
+ clusterListConfig, new NullFeedMetric()), true, true, executor);
}
VespaFeedHandlerStatus(FeedContext context, boolean doLog, boolean makeSnapshots, Executor executor) {
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/GetSearcher.java b/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/GetSearcher.java
index 661fcac6a64..cf42bce9c1c 100755
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/GetSearcher.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/GetSearcher.java
@@ -2,7 +2,10 @@
package com.yahoo.storage.searcher;
import com.google.inject.Inject;
+import com.yahoo.cloud.config.ClusterListConfig;
+import com.yahoo.cloud.config.SlobroksConfig;
import com.yahoo.container.jdisc.HttpRequest;
+import com.yahoo.document.config.DocumentmanagerConfig;
import com.yahoo.feedhandler.NullFeedMetric;
import com.yahoo.processing.request.CompoundName;
import com.yahoo.vespa.config.content.LoadTypeConfig;
@@ -169,9 +172,13 @@ public class GetSearcher extends Searcher {
}
@Inject
- public GetSearcher(FeederConfig feederConfig, LoadTypeConfig loadTypeConfig) throws Exception {
- this(FeedContext.getInstance(feederConfig, loadTypeConfig, new NullFeedMetric()),
- (long)(feederConfig.timeout() * 1000));
+ public GetSearcher(FeederConfig feederConfig,
+ LoadTypeConfig loadTypeConfig,
+ DocumentmanagerConfig documentmanagerConfig,
+ SlobroksConfig slobroksConfig,
+ ClusterListConfig clusterListConfig) throws Exception {
+ this(FeedContext.getInstance(feederConfig, loadTypeConfig, documentmanagerConfig, slobroksConfig,
+ clusterListConfig, new NullFeedMetric()), (long)(feederConfig.timeout() * 1000));
}
GetSearcher(FeedContext context) throws Exception {
diff --git a/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/VisitSearcher.java b/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/VisitSearcher.java
index 621ffcefbe1..2d7e5fbc338 100644
--- a/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/VisitSearcher.java
+++ b/vespaclient-container-plugin/src/main/java/com/yahoo/storage/searcher/VisitSearcher.java
@@ -1,6 +1,9 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.storage.searcher;
+import com.yahoo.cloud.config.ClusterListConfig;
+import com.yahoo.cloud.config.SlobroksConfig;
+import com.yahoo.document.config.DocumentmanagerConfig;
import com.yahoo.feedhandler.NullFeedMetric;
import com.yahoo.vespa.config.content.LoadTypeConfig;
import com.yahoo.component.ComponentId;
@@ -30,8 +33,13 @@ public class VisitSearcher extends Searcher {
public static final String VISITOR_CONTINUATION_TOKEN_FIELDNAME = "visitorContinuationToken";
FeedContext context;
- public VisitSearcher(FeederConfig feederConfig, LoadTypeConfig loadTypeConfig) throws Exception {
- this(FeedContext.getInstance(feederConfig, loadTypeConfig, new NullFeedMetric()));
+ public VisitSearcher(FeederConfig feederConfig,
+ LoadTypeConfig loadTypeConfig,
+ DocumentmanagerConfig documentmanagerConfig,
+ SlobroksConfig slobroksConfig,
+ ClusterListConfig clusterListConfig) throws Exception {
+ this(FeedContext.getInstance(feederConfig, loadTypeConfig, documentmanagerConfig,
+ slobroksConfig, clusterListConfig, new NullFeedMetric()));
}
VisitSearcher(FeedContext context) throws Exception {
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/DocumentApiApplicationTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/DocumentApiApplicationTest.java
new file mode 100644
index 00000000000..42ec41bd107
--- /dev/null
+++ b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/DocumentApiApplicationTest.java
@@ -0,0 +1,23 @@
+package com.yahoo.document.restapi;
+
+import com.yahoo.application.Application;
+import com.yahoo.application.Networking;
+import org.junit.Test;
+
+/**
+ * @author bratseth
+ */
+public class DocumentApiApplicationTest {
+
+ /** Test that it is possible to instantiate an Application with a document-api */
+ @Test
+ public void application_with_document_api() {
+ String services =
+ "<container version='1.0'>" +
+ " <document-api/>" +
+ "</container>";
+ try (Application application = Application.fromServicesXml(services, Networking.enable)) {
+ }
+ }
+
+}
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java
index c4e9e27ca75..95a48ab41fe 100644
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java
+++ b/vespaclient-container-plugin/src/test/java/com/yahoo/document/restapi/resource/RestApiTest.java
@@ -33,11 +33,12 @@ import static org.hamcrest.core.StringStartsWith.startsWith;
import static org.junit.Assert.assertThat;
public class RestApiTest {
+
Application application;
@Before
public void setup() throws Exception {
- application = Application.fromApplicationPackage(Paths.get("src/test/application"), Networking.enable);
+ application = Application.fromApplicationPackage(Paths.get("src/test/rest-api-application"), Networking.enable);
}
@After
@@ -249,7 +250,7 @@ public class RestApiTest {
public void testbasicEncodingV2() throws Exception {
Request request = new Request("http://localhost:" + getFirstListenPort() + get_enc_test_uri_v2);
HttpGet get = new HttpGet(request.getUri());
- final String rest = doRest(get);
+ String rest = doRest(get);
assertThat(rest, containsString(get_enc_response_part1_v2));
assertThat(rest, containsString(get_enc_response_part2));
}
@@ -264,7 +265,7 @@ public class RestApiTest {
public void testbasicVisit() throws Exception {
Request request = new Request("http://localhost:" + getFirstListenPort() + visit_test_uri);
HttpGet get = new HttpGet(request.getUri());
- final String rest = doRest(get);
+ String rest = doRest(get);
assertThat(rest, containsString(visit_response_part1));
assertThat(rest, containsString(visit_response_part2));
assertThat(rest, containsString(visit_response_part3));
@@ -276,9 +277,9 @@ public class RestApiTest {
@Test
public void testBadVisit() throws Exception {
- final Request request = new Request("http://localhost:" + getFirstListenPort() + visit_test_bad_uri);
+ Request request = new Request("http://localhost:" + getFirstListenPort() + visit_test_bad_uri);
HttpGet get = new HttpGet(request.getUri());
- final String rest = doRest(get);
+ String rest = doRest(get);
assertThat(rest, containsString(visit_test_bad_response));
}
diff --git a/vespaclient-container-plugin/src/test/java/com/yahoo/storage/searcher/VisitorSearcherTestCase.java b/vespaclient-container-plugin/src/test/java/com/yahoo/storage/searcher/VisitorSearcherTestCase.java
index 820f7f56e2f..4b1c69c73e7 100644
--- a/vespaclient-container-plugin/src/test/java/com/yahoo/storage/searcher/VisitorSearcherTestCase.java
+++ b/vespaclient-container-plugin/src/test/java/com/yahoo/storage/searcher/VisitorSearcherTestCase.java
@@ -60,8 +60,7 @@ public class VisitorSearcherTestCase {
public VisitSearcher create() throws Exception {
ClusterListConfig.Storage.Builder storageCluster = new ClusterListConfig.Storage.Builder().configid("storage/cluster.foobar").name("foobar");
ClusterListConfig clusterListCfg = new ClusterListConfig(new ClusterListConfig.Builder().storage(storageCluster));
- ClusterList clusterList = new ClusterList();
- clusterList.configure(clusterListCfg);
+ ClusterList clusterList = new ClusterList(clusterListCfg);
return new VisitSearcher(new FeedContext(
new MessagePropertyProcessor(new FeederConfig(new FeederConfig.Builder().timeout(458).route("riksveg18").retryenabled(true)),
new LoadTypeConfig(new LoadTypeConfig.Builder())),
@@ -139,15 +138,13 @@ public class VisitorSearcherTestCase {
ClusterListConfig.Storage.Builder storageCluster1 = new ClusterListConfig.Storage.Builder().configid("storage/cluster.foo").name("foo");
ClusterListConfig.Storage.Builder storageCluster2 = new ClusterListConfig.Storage.Builder().configid("storage/cluster.bar").name("bar");
ClusterListConfig clusterListCfg = new ClusterListConfig(new ClusterListConfig.Builder().storage(Arrays.asList(storageCluster1, storageCluster2)));
- ClusterList clusterList = new ClusterList();
- clusterList.configure(clusterListCfg);
+ ClusterList clusterList = new ClusterList(clusterListCfg);
VisitSearcher searcher = new VisitSearcher(new FeedContext(
new MessagePropertyProcessor(new FeederConfig(new FeederConfig.Builder().timeout(100).route("whatever").retryenabled(true)),
new LoadTypeConfig(new LoadTypeConfig.Builder())),
factory, docMan, clusterList, new NullFeedMetric()));
- searcher.getVisitorParameters(
- newQuery("visit?visit.selection=id.user=1234"), null);
+ searcher.getVisitorParameters(newQuery("visit?visit.selection=id.user=1234"), null);
}
@Test
diff --git a/vespaclient-container-plugin/src/test/application/services.xml b/vespaclient-container-plugin/src/test/rest-api-application/services.xml
index df178e109c3..df178e109c3 100644
--- a/vespaclient-container-plugin/src/test/application/services.xml
+++ b/vespaclient-container-plugin/src/test/rest-api-application/services.xml
diff --git a/vespaclient-core/src/main/java/com/yahoo/feedapi/FeedContext.java b/vespaclient-core/src/main/java/com/yahoo/feedapi/FeedContext.java
index a26064cd98b..885e28b63a5 100755
--- a/vespaclient-core/src/main/java/com/yahoo/feedapi/FeedContext.java
+++ b/vespaclient-core/src/main/java/com/yahoo/feedapi/FeedContext.java
@@ -1,6 +1,9 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.feedapi;
+import com.yahoo.cloud.config.ClusterListConfig;
+import com.yahoo.cloud.config.SlobroksConfig;
+import com.yahoo.document.config.DocumentmanagerConfig;
import com.yahoo.jdisc.Metric;
import com.yahoo.vespa.config.content.LoadTypeConfig;
import com.yahoo.document.DocumentTypeManager;
@@ -8,6 +11,7 @@ import com.yahoo.clientmetrics.ClientMetrics;
import com.yahoo.vespaclient.ClusterList;
import com.yahoo.vespaclient.config.FeederConfig;
+import javax.naming.OperationNotSupportedException;
import java.util.Map;
import java.util.TreeMap;
@@ -87,16 +91,35 @@ public class FeedContext {
return docTypeManager;
}
- public static FeedContext getInstance(FeederConfig feederConfig, LoadTypeConfig loadTypeConfig, Metric metric) {
+ public static FeedContext getInstance(FeederConfig feederConfig,
+ LoadTypeConfig loadTypeConfig,
+ DocumentmanagerConfig documentmanagerConfig,
+ SlobroksConfig slobroksConfig,
+ ClusterListConfig clusterListConfig,
+ Metric metric) {
synchronized (sync) {
try {
if (instance == null) {
MessagePropertyProcessor proc = new MessagePropertyProcessor(feederConfig, loadTypeConfig);
- MessageBusSessionFactory mbusFactory = new MessageBusSessionFactory(proc);
- instance = new FeedContext(proc,
- mbusFactory,
- mbusFactory.getAccess().getDocumentTypeManager(),
- new ClusterList("client"), metric);
+
+ if (System.getProperty("vespa.local", "false").equals("true")) {
+ // Use injected configs when running from Application. This means we cannot reconfigure
+ MessageBusSessionFactory mbusFactory = new MessageBusSessionFactory(proc, documentmanagerConfig, slobroksConfig);
+ instance = new FeedContext(proc,
+ mbusFactory,
+ mbusFactory.getAccess().getDocumentTypeManager(),
+ new ClusterList(clusterListConfig), metric);
+ }
+ else {
+ // Don't send configs to messagebus to make it self-subscribe instead as this instance
+ // survives reconfig :-/
+ // This code will die soon ...
+ MessageBusSessionFactory mbusFactory = new MessageBusSessionFactory(proc, null, null);
+ instance = new FeedContext(proc,
+ mbusFactory,
+ mbusFactory.getAccess().getDocumentTypeManager(),
+ new ClusterList("client"), metric);
+ }
} else {
instance.getPropertyProcessor().configure(feederConfig, loadTypeConfig);
}
diff --git a/vespaclient-core/src/main/java/com/yahoo/feedapi/FeederOptions.java b/vespaclient-core/src/main/java/com/yahoo/feedapi/FeederOptions.java
index 2894993b983..1546d605f02 100755
--- a/vespaclient-core/src/main/java/com/yahoo/feedapi/FeederOptions.java
+++ b/vespaclient-core/src/main/java/com/yahoo/feedapi/FeederOptions.java
@@ -252,18 +252,6 @@ public class FeederOptions {
return params;
}
- public MessageBusParams toMessageBusParams() {
- MessageBusParams mbusParams = new MessageBusParams();
- if (retryEnabled) {
- RetryTransientErrorsPolicy retryPolicy = new RetryTransientErrorsPolicy();
- retryPolicy.setBaseDelay(retryDelay);
- mbusParams.setRetryPolicy(retryPolicy);
- } else {
- mbusParams.setRetryPolicy(null);
- }
- return mbusParams;
- }
-
public RPCNetworkParams getNetworkParams() {
try {
RPCNetworkParams networkParams = new RPCNetworkParams();
diff --git a/vespaclient-core/src/main/java/com/yahoo/feedapi/MessageBusSessionFactory.java b/vespaclient-core/src/main/java/com/yahoo/feedapi/MessageBusSessionFactory.java
index 8021ea86783..d670ceb4e77 100755
--- a/vespaclient-core/src/main/java/com/yahoo/feedapi/MessageBusSessionFactory.java
+++ b/vespaclient-core/src/main/java/com/yahoo/feedapi/MessageBusSessionFactory.java
@@ -1,6 +1,8 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
package com.yahoo.feedapi;
+import com.yahoo.cloud.config.SlobroksConfig;
+import com.yahoo.document.config.DocumentmanagerConfig;
import com.yahoo.documentapi.VisitorParameters;
import com.yahoo.documentapi.VisitorSession;
import com.yahoo.documentapi.messagebus.MessageBusDocumentAccess;
@@ -12,6 +14,7 @@ import com.yahoo.jdisc.Metric;
import com.yahoo.messagebus.Message;
import com.yahoo.messagebus.ReplyHandler;
import com.yahoo.messagebus.SourceSession;
+import com.yahoo.messagebus.network.rpc.RPCNetworkParams;
import java.util.Collections;
@@ -27,12 +30,24 @@ public class MessageBusSessionFactory implements SessionFactory {
String NUM_UPDATES = "num_updates";
}
+ @SuppressWarnings("unused") // used from extensions
public MessageBusSessionFactory(MessagePropertyProcessor processor) {
+ this(processor, null, null);
+ }
+
+ public MessageBusSessionFactory(MessagePropertyProcessor processor,
+ DocumentmanagerConfig documentmanagerConfig,
+ SlobroksConfig slobroksConfig) {
this.processor = processor;
MessageBusParams params = new MessageBusParams(processor.getLoadTypes());
params.setTraceLevel(processor.getFeederOptions().getTraceLevel());
- params.setRPCNetworkParams(processor.getFeederOptions().getNetworkParams());
+ RPCNetworkParams rpcNetworkParams = processor.getFeederOptions().getNetworkParams();
+ if (slobroksConfig != null) // not set: will subscribe
+ rpcNetworkParams.setSlobroksConfig(slobroksConfig);
+ params.setRPCNetworkParams(rpcNetworkParams);
params.setDocumentManagerConfigId("client");
+ if (documentmanagerConfig != null) // not set: will subscribe
+ params.setDocumentmanagerConfig(documentmanagerConfig);
access = new MessageBusDocumentAccess(params);
}
diff --git a/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandler.java b/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandler.java
index 6e3facbdc98..08e1ca0482f 100755
--- a/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandler.java
+++ b/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandler.java
@@ -3,8 +3,11 @@ package com.yahoo.feedhandler;
import com.google.inject.Inject;
import com.yahoo.clientmetrics.RouteMetricSet;
+import com.yahoo.cloud.config.ClusterListConfig;
+import com.yahoo.cloud.config.SlobroksConfig;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.HttpResponse;
+import com.yahoo.document.config.DocumentmanagerConfig;
import com.yahoo.feedapi.DocprocMessageProcessor;
import com.yahoo.feedapi.FeedContext;
import com.yahoo.feedapi.Feeder;
@@ -30,9 +33,14 @@ public final class VespaFeedHandler extends VespaFeedHandlerBase {
public static final String JSON_INPUT = "jsonInput";
@Inject
- public VespaFeedHandler(FeederConfig feederConfig, LoadTypeConfig loadTypeConfig, Executor executor,
+ public VespaFeedHandler(FeederConfig feederConfig,
+ LoadTypeConfig loadTypeConfig,
+ DocumentmanagerConfig documentmanagerConfig,
+ SlobroksConfig slobroksConfig,
+ ClusterListConfig clusterListConfig,
+ Executor executor,
Metric metric) throws Exception {
- super(feederConfig, loadTypeConfig, executor, metric);
+ super(feederConfig, loadTypeConfig, documentmanagerConfig, slobroksConfig, clusterListConfig, executor, metric);
}
VespaFeedHandler(FeedContext context, Executor executor) throws Exception {
diff --git a/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerBase.java b/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerBase.java
index fa1e6854593..6b4810f1ac4 100755
--- a/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerBase.java
+++ b/vespaclient-core/src/main/java/com/yahoo/feedhandler/VespaFeedHandlerBase.java
@@ -3,11 +3,14 @@ package com.yahoo.feedhandler;
import com.google.inject.Inject;
import com.yahoo.clientmetrics.ClientMetrics;
+import com.yahoo.cloud.config.ClusterListConfig;
+import com.yahoo.cloud.config.SlobroksConfig;
import com.yahoo.component.provider.ComponentRegistry;
import com.yahoo.container.jdisc.HttpRequest;
import com.yahoo.container.jdisc.ThreadedHttpRequestHandler;
import com.yahoo.docproc.DocprocService;
import com.yahoo.document.DocumentTypeManager;
+import com.yahoo.document.config.DocumentmanagerConfig;
import com.yahoo.feedapi.FeedContext;
import com.yahoo.feedapi.MessagePropertyProcessor;
import com.yahoo.feedapi.SharedSender;
@@ -29,9 +32,14 @@ public abstract class VespaFeedHandlerBase extends ThreadedHttpRequestHandler {
@Inject
public VespaFeedHandlerBase(FeederConfig feederConfig,
LoadTypeConfig loadTypeConfig,
+ DocumentmanagerConfig documentmanagerConfig,
+ SlobroksConfig slobroksConfig,
+ ClusterListConfig clusterListConfig,
Executor executor,
Metric metric) throws Exception {
- this(FeedContext.getInstance(feederConfig, loadTypeConfig, metric), executor, (long)feederConfig.timeout() * 1000);
+ this(FeedContext.getInstance(feederConfig, loadTypeConfig, documentmanagerConfig,
+ slobroksConfig, clusterListConfig, metric),
+ executor, (long)feederConfig.timeout() * 1000);
}
public VespaFeedHandlerBase(FeedContext context, Executor executor) throws Exception {
diff --git a/vespaclient-core/src/main/java/com/yahoo/vespaclient/ClusterList.java b/vespaclient-core/src/main/java/com/yahoo/vespaclient/ClusterList.java
index 3ea3bb5cb9d..7587630a985 100644
--- a/vespaclient-core/src/main/java/com/yahoo/vespaclient/ClusterList.java
+++ b/vespaclient-core/src/main/java/com/yahoo/vespaclient/ClusterList.java
@@ -5,36 +5,39 @@ import com.yahoo.cloud.config.ClusterListConfig;
import com.yahoo.config.subscription.ConfigGetter;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
+/** A list of content clusters, either obtained from a list, a given config or by self-subscribing */
public class ClusterList {
- List<ClusterDef> storageClusters = new ArrayList<ClusterDef>();
+
+ List<ClusterDef> contentClusters = new ArrayList<>();
public ClusterList() {
- this(null);
+ this(new ArrayList<>());
+ }
+
+ public ClusterList(List<ClusterDef> contentClusters) {
+ this.contentClusters = contentClusters;
}
public ClusterList(String configId) {
- if (configId != null) {
- configure(new ConfigGetter<>(ClusterListConfig.class).getConfig(configId));
- }
+ configure(new ConfigGetter<>(ClusterListConfig.class).getConfig(configId));
}
-
- public List<ClusterDef> getStorageClusters() {
- return storageClusters;
+
+ public ClusterList(ClusterListConfig config) {
+ configure(config);
}
- public void configure(ClusterListConfig cfg) {
- storageClusters.clear();
- for (int i = 0; i < cfg.storage().size(); i++) {
- storageClusters.add(new ClusterDef(cfg.storage(i).name(),
- cfg.storage(i).configid()));
- }
+ private void configure(ClusterListConfig config) {
+ contentClusters.clear(); // TODO: Create a new
+ for (int i = 0; i < config.storage().size(); i++)
+ contentClusters.add(new ClusterDef(config.storage(i).name(), config.storage(i).configid()));
}
- public static ClusterList createMockedList(List<ClusterDef> clusters) {
- ClusterList list = new ClusterList(null);
- list.storageClusters = clusters;
- return list;
+ /** Returns a reference to the mutable list */
+ public List<ClusterDef> getStorageClusters() {
+ return contentClusters; // TODO: Use immutable list
}
+
}
diff --git a/vespajlib/src/main/java/com/yahoo/net/HostName.java b/vespajlib/src/main/java/com/yahoo/net/HostName.java
index 9dff33e1f5f..4e791ca117a 100644
--- a/vespajlib/src/main/java/com/yahoo/net/HostName.java
+++ b/vespajlib/src/main/java/com/yahoo/net/HostName.java
@@ -15,12 +15,11 @@ public class HostName {
private static String myHost = null;
/**
- * Static method that returns the name of localhost using shell
- * command "hostname".
+ * Static method that returns the name of localhost using shell command "hostname".
+ * If you need a guaranteed resolvable name see LinuxINetAddress.
*
* @return the name of localhost.
* @throws RuntimeException if executing the command 'hostname' fails.
- * @see LinuxInetAddress if you need a host name/address which is reachable
*/
public static synchronized String getLocalhost() {
if (myHost == null) {
@@ -38,4 +37,5 @@ public class HostName {
}
return myHost;
}
+
}
diff --git a/vespajlib/src/main/java/com/yahoo/net/LinuxInetAddress.java b/vespajlib/src/main/java/com/yahoo/net/LinuxInetAddress.java
index 1b7658b3a11..9d50c99d77c 100644
--- a/vespajlib/src/main/java/com/yahoo/net/LinuxInetAddress.java
+++ b/vespajlib/src/main/java/com/yahoo/net/LinuxInetAddress.java
@@ -12,28 +12,23 @@ import java.util.stream.Collectors;
/**
* Utilities for returning localhost addresses on Linux.
- * See
- * http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4665037
- * on why this is necessary.
+ * See http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4665037 on why this is necessary.
*
* @author bratseth
*/
-// TODO: Remove on vespa 7
public class LinuxInetAddress {
/**
- * Returns an InetAddress representing the address of the localhost.
+ * Returns an InetAddress representing a resolvable localhost address.
* A non-loopback address is preferred if available.
* An address that resolves to a hostname is preferred among non-loopback addresses.
* IPv4 is preferred over IPv6 among resolving addresses.
*
* @return a localhost address
- * @deprecated use {@link HostName} instead
*/
// Note: Checking resolvability of ipV6 addresses takes a long time on some systems (over 5 seconds
// for some addresses on my mac). This method is written to minimize the number of resolution checks done
// and to defer ip6 checks until necessary.
- @Deprecated
public static InetAddress getLocalHost() {
InetAddress fallback = InetAddress.getLoopbackAddress();
try {
@@ -70,9 +65,7 @@ public class LinuxInetAddress {
*
* @return an array of the addresses of this
* @throws UnknownHostException if we cannot access the network
- * @deprecated do not use
*/
- @Deprecated
public static InetAddress[] getAllLocal() throws UnknownHostException {
InetAddress[] localInetAddresses = InetAddress.getAllByName("127.0.0.1");
if ( ! localInetAddresses[0].isLoopbackAddress()) return localInetAddresses;
diff --git a/vespalib/CMakeLists.txt b/vespalib/CMakeLists.txt
index 2ca768bc483..907cab7dbc6 100644
--- a/vespalib/CMakeLists.txt
+++ b/vespalib/CMakeLists.txt
@@ -75,9 +75,7 @@ vespa_define_module(
src/tests/tensor/sparse_tensor_builder
src/tests/tensor/dense_tensor_builder
src/tests/tensor/dense_tensor_operations
- src/tests/tensor/join_tensor_addresses
src/tests/tensor/tensor_address
- src/tests/tensor/tensor_address_element_iterator
src/tests/tensor/tensor_conformance
src/tests/tensor/tensor_function
src/tests/tensor/tensor_mapper
diff --git a/vespalib/src/testlist.txt b/vespalib/src/testlist.txt
index e3dd8414e2e..67982805df7 100644
--- a/vespalib/src/testlist.txt
+++ b/vespalib/src/testlist.txt
@@ -65,11 +65,9 @@ tests/sync
tests/tensor/sparse_tensor_builder
tests/tensor/dense_tensor_builder
tests/tensor/dense_tensor_operations
-tests/tensor/join_tensor_addresses
tests/tensor/simple_tensor_builder
tests/tensor/tensor
tests/tensor/tensor_address
-tests/tensor/tensor_address_element_iterator
tests/tensor/tensor_function
tests/tensor/tensor_mapper
tests/tensor/tensor_operations
diff --git a/vespalib/src/tests/eval/simple_tensor/simple_tensor_test.cpp b/vespalib/src/tests/eval/simple_tensor/simple_tensor_test.cpp
index 775c2b72e0a..33812779a30 100644
--- a/vespalib/src/tests/eval/simple_tensor/simple_tensor_test.cpp
+++ b/vespalib/src/tests/eval/simple_tensor/simple_tensor_test.cpp
@@ -16,101 +16,68 @@ using Stash = vespalib::Stash;
// need to specify numbers explicitly as size_t to avoid ambiguous behavior for 0
constexpr size_t operator "" _z (unsigned long long int n) { return n; }
-void dump(const Cells &cells, std::ostream &out) {
- out << std::endl;
- for (const auto &cell: cells) {
- size_t n = 0;
- out << " [";
- for (const auto &label: cell.address) {
- if (n++) {
- out << ",";
- }
- if (label.is_mapped()) {
- out << label.name;
- } else {
- out << label.index;
- }
- }
- out << "]: " << cell.value << std::endl;
- }
+const Tensor &unwrap(const Value &value) {
+ ASSERT_TRUE(value.is_tensor());
+ return *value.as_tensor();
}
-struct Check {
+struct CellBuilder {
Cells cells;
- Check() : cells() {}
- explicit Check(const SimpleTensor &tensor) : cells() {
- for (const auto &cell: tensor.cells()) {
- add(cell.address, cell.value);
- }
- }
- explicit Check(const TensorSpec &spec)
- : Check(*SimpleTensor::create(spec)) {}
- Check &add(const Address &address, double value) {
- cells.emplace_back(address, value);
- std::sort(cells.begin(), cells.end(),
- [](const auto &a, const auto &b){ return (a.address < b.address); });
+ CellBuilder &add(const Address &addr, double value) {
+ cells.emplace_back(addr, value);
return *this;
}
- bool operator==(const Check &rhs) const {
- if (cells.size() != rhs.cells.size()) {
- return false;
- }
- for (size_t i = 0; i < cells.size(); ++i) {
- if ((cells[i].address != rhs.cells[i].address) ||
- (cells[i].value != rhs.cells[i].value))
- {
- return false;
- }
- }
- return true;
- }
+ Cells build() { return cells; }
};
-std::ostream &operator<<(std::ostream &out, const Check &value) {
- dump(value.cells, out);
- return out;
-}
-
-const SimpleTensor &unwrap(const Tensor &tensor) {
- ASSERT_EQUAL(&tensor.engine(), &SimpleTensorEngine::ref());
- return static_cast<const SimpleTensor &>(tensor);
-}
-
-const SimpleTensor &unwrap(const Value &value) {
- ASSERT_TRUE(value.is_tensor());
- return unwrap(*value.as_tensor());
-}
-
TEST("require that simple tensors can be built using tensor spec") {
TensorSpec spec("tensor(w{},x[2],y{},z[2])");
spec.add({{"w", "xxx"}, {"x", 0}, {"y", "xxx"}, {"z", 0}}, 1.0)
.add({{"w", "xxx"}, {"x", 0}, {"y", "yyy"}, {"z", 1}}, 2.0)
.add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 0}}, 3.0)
.add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 4.0);
- std::unique_ptr<SimpleTensor> tensor = SimpleTensor::create(spec);
- Check expect = Check()
- .add({{"xxx"}, {0_z}, {"xxx"}, {0_z}}, 1.0)
- .add({{"xxx"}, {0_z}, {"xxx"}, {1_z}}, 0.0)
- .add({{"xxx"}, {1_z}, {"xxx"}, {0_z}}, 0.0)
- .add({{"xxx"}, {1_z}, {"xxx"}, {1_z}}, 0.0)
- //-----------------------------------------
- .add({{"xxx"}, {0_z}, {"yyy"}, {0_z}}, 0.0)
- .add({{"xxx"}, {0_z}, {"yyy"}, {1_z}}, 2.0)
- .add({{"xxx"}, {1_z}, {"yyy"}, {0_z}}, 0.0)
- .add({{"xxx"}, {1_z}, {"yyy"}, {1_z}}, 0.0)
- //-----------------------------------------
- .add({{"yyy"}, {0_z}, {"xxx"}, {0_z}}, 0.0)
- .add({{"yyy"}, {0_z}, {"xxx"}, {1_z}}, 0.0)
- .add({{"yyy"}, {1_z}, {"xxx"}, {0_z}}, 3.0)
- .add({{"yyy"}, {1_z}, {"xxx"}, {1_z}}, 0.0)
- //-----------------------------------------
- .add({{"yyy"}, {0_z}, {"yyy"}, {0_z}}, 0.0)
- .add({{"yyy"}, {0_z}, {"yyy"}, {1_z}}, 0.0)
- .add({{"yyy"}, {1_z}, {"yyy"}, {0_z}}, 0.0)
- .add({{"yyy"}, {1_z}, {"yyy"}, {1_z}}, 4.0);
- EXPECT_EQUAL(expect, Check(*tensor));
- std::unique_ptr<Tensor> tensor2 = SimpleTensorEngine::ref().create(spec);
- EXPECT_EQUAL(expect, Check(unwrap(*tensor2)));
+ auto tensor = SimpleTensorEngine::ref().create(spec);
+ TensorSpec full_spec("tensor(w{},x[2],y{},z[2])");
+ full_spec
+ .add({{"w", "xxx"}, {"x", 0}, {"y", "xxx"}, {"z", 0}}, 1.0)
+ .add({{"w", "xxx"}, {"x", 0}, {"y", "xxx"}, {"z", 1}}, 0.0)
+ .add({{"w", "xxx"}, {"x", 0}, {"y", "yyy"}, {"z", 0}}, 0.0)
+ .add({{"w", "xxx"}, {"x", 0}, {"y", "yyy"}, {"z", 1}}, 2.0)
+ .add({{"w", "xxx"}, {"x", 1}, {"y", "xxx"}, {"z", 0}}, 0.0)
+ .add({{"w", "xxx"}, {"x", 1}, {"y", "xxx"}, {"z", 1}}, 0.0)
+ .add({{"w", "xxx"}, {"x", 1}, {"y", "yyy"}, {"z", 0}}, 0.0)
+ .add({{"w", "xxx"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 0}, {"y", "xxx"}, {"z", 0}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 0}, {"y", "xxx"}, {"z", 1}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 0}, {"y", "yyy"}, {"z", 0}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 0}, {"y", "yyy"}, {"z", 1}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 0}}, 3.0)
+ .add({{"w", "yyy"}, {"x", 1}, {"y", "xxx"}, {"z", 1}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 0}}, 0.0)
+ .add({{"w", "yyy"}, {"x", 1}, {"y", "yyy"}, {"z", 1}}, 4.0);
+ auto full_tensor = SimpleTensorEngine::ref().create(full_spec);
+ SimpleTensor expect_tensor(ValueType::from_spec("tensor(w{},x[2],y{},z[2])"),
+ CellBuilder()
+ .add({{"xxx"}, {0_z}, {"xxx"}, {0_z}}, 1.0)
+ .add({{"xxx"}, {0_z}, {"xxx"}, {1_z}}, 0.0)
+ .add({{"xxx"}, {0_z}, {"yyy"}, {0_z}}, 0.0)
+ .add({{"xxx"}, {0_z}, {"yyy"}, {1_z}}, 2.0)
+ .add({{"xxx"}, {1_z}, {"xxx"}, {0_z}}, 0.0)
+ .add({{"xxx"}, {1_z}, {"xxx"}, {1_z}}, 0.0)
+ .add({{"xxx"}, {1_z}, {"yyy"}, {0_z}}, 0.0)
+ .add({{"xxx"}, {1_z}, {"yyy"}, {1_z}}, 0.0)
+ .add({{"yyy"}, {0_z}, {"xxx"}, {0_z}}, 0.0)
+ .add({{"yyy"}, {0_z}, {"xxx"}, {1_z}}, 0.0)
+ .add({{"yyy"}, {0_z}, {"yyy"}, {0_z}}, 0.0)
+ .add({{"yyy"}, {0_z}, {"yyy"}, {1_z}}, 0.0)
+ .add({{"yyy"}, {1_z}, {"xxx"}, {0_z}}, 3.0)
+ .add({{"yyy"}, {1_z}, {"xxx"}, {1_z}}, 0.0)
+ .add({{"yyy"}, {1_z}, {"yyy"}, {0_z}}, 0.0)
+ .add({{"yyy"}, {1_z}, {"yyy"}, {1_z}}, 4.0)
+ .build());
+ EXPECT_EQUAL(expect_tensor, *tensor);
+ EXPECT_EQUAL(expect_tensor, *full_tensor);
+ EXPECT_EQUAL(full_spec, tensor->engine().to_spec(*tensor));
};
TEST("require that simple tensors can have their values negated") {
@@ -125,10 +92,10 @@ TEST("require that simple tensors can have their values negated") {
.add({{"x","2"},{"y","1"}}, 3)
.add({{"x","1"},{"y","2"}}, -5));
auto result = SimpleTensor::perform(operation::Neg(), *tensor);
- EXPECT_EQUAL(Check(*expect), Check(*result));
+ EXPECT_EQUAL(*expect, *result);
Stash stash;
const Value &result2 = SimpleTensorEngine::ref().map(operation::Neg(), *tensor, stash);
- EXPECT_EQUAL(Check(*expect), Check(unwrap(result2)));
+ EXPECT_EQUAL(*expect, unwrap(result2));
}
TEST("require that simple tensors can be multiplied with each other") {
@@ -150,10 +117,10 @@ TEST("require that simple tensors can be multiplied with each other") {
.add({{"x","2"},{"y","1"},{"z","2"}}, 39)
.add({{"x","1"},{"y","2"},{"z","1"}}, 55));
auto result = SimpleTensor::perform(operation::Mul(), *lhs, *rhs);
- EXPECT_EQUAL(Check(*expect), Check(*result));
+ EXPECT_EQUAL(*expect, *result);
Stash stash;
const Value &result2 = SimpleTensorEngine::ref().apply(operation::Mul(), *lhs, *rhs, stash);
- EXPECT_EQUAL(Check(*expect), Check(unwrap(result2)));
+ EXPECT_EQUAL(*expect, unwrap(result2));
}
TEST("require that simple tensors support dimension reduction") {
@@ -178,22 +145,22 @@ TEST("require that simple tensors support dimension reduction") {
auto result_sum_y = tensor->reduce(operation::Add(), {"y"});
auto result_sum_x = tensor->reduce(operation::Add(), {"x"});
auto result_sum_all = tensor->reduce(operation::Add(), {"x", "y"});
- EXPECT_EQUAL(Check(*expect_sum_y), Check(*result_sum_y));
- EXPECT_EQUAL(Check(*expect_sum_x), Check(*result_sum_x));
- EXPECT_EQUAL(Check(*expect_sum_all), Check(*result_sum_all));
+ EXPECT_EQUAL(*expect_sum_y, *result_sum_y);
+ EXPECT_EQUAL(*expect_sum_x, *result_sum_x);
+ EXPECT_EQUAL(*expect_sum_all, *result_sum_all);
Stash stash;
const Value &result_sum_y_2 = SimpleTensorEngine::ref().reduce(*tensor, operation::Add(), {"y"}, stash);
const Value &result_sum_x_2 = SimpleTensorEngine::ref().reduce(*tensor, operation::Add(), {"x"}, stash);
const Value &result_sum_all_2 = SimpleTensorEngine::ref().reduce(*tensor, operation::Add(), {"x", "y"}, stash);
const Value &result_sum_all_3 = SimpleTensorEngine::ref().reduce(*tensor, operation::Add(), {}, stash);
- EXPECT_EQUAL(Check(*expect_sum_y), Check(unwrap(result_sum_y_2)));
- EXPECT_EQUAL(Check(*expect_sum_x), Check(unwrap(result_sum_x_2)));
+ EXPECT_EQUAL(*expect_sum_y, unwrap(result_sum_y_2));
+ EXPECT_EQUAL(*expect_sum_x, unwrap(result_sum_x_2));
EXPECT_TRUE(result_sum_all_2.is_double());
EXPECT_TRUE(result_sum_all_3.is_double());
EXPECT_EQUAL(21, result_sum_all_2.as_double());
EXPECT_EQUAL(21, result_sum_all_3.as_double());
- EXPECT_TRUE(SimpleTensorEngine::ref().equal(*result_sum_y, *result_sum_y));
- EXPECT_TRUE(!SimpleTensorEngine::ref().equal(*result_sum_y, *result_sum_x));
+ EXPECT_EQUAL(*result_sum_y, *result_sum_y);
+ EXPECT_NOT_EQUAL(*result_sum_y, *result_sum_x);
}
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/eval/tensor/eval_tensor_test.cpp b/vespalib/src/tests/eval/tensor/eval_tensor_test.cpp
index e381ae88cbe..9a656ad2697 100644
--- a/vespalib/src/tests/eval/tensor/eval_tensor_test.cpp
+++ b/vespalib/src/tests/eval/tensor/eval_tensor_test.cpp
@@ -64,13 +64,13 @@ TEST("require that tensor sum over dimension works") {
}
TEST("require that tensor add works") {
- EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:5,{x:3}:3}"), Eval("{{x:1}:1,{x:2}:2} + {{x:2}:3,{x:3}:3}"));
- EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:5,{x:3}:3}"), Eval("{{x:2}:3,{x:3}:3} + {{x:1}:1,{x:2}:2}"));
+ EXPECT_EQUAL(Eval("{{x:2}:5}"), Eval("{{x:1}:1,{x:2}:2} + {{x:2}:3,{x:3}:3}"));
+ EXPECT_EQUAL(Eval("{{x:2}:5}"), Eval("{{x:2}:3,{x:3}:3} + {{x:1}:1,{x:2}:2}"));
}
TEST("require that tensor sub works") {
- EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:-1,{x:3}:-3}"), Eval("{{x:1}:1,{x:2}:2} - {{x:2}:3,{x:3}:3}"));
- EXPECT_EQUAL(Eval("{{x:1}:-1,{x:2}:1,{x:3}:3}"), Eval("{{x:2}:3,{x:3}:3} - {{x:1}:1,{x:2}:2}"));
+ EXPECT_EQUAL(Eval("{{x:2}:-1}"), Eval("{{x:1}:1,{x:2}:2} - {{x:2}:3,{x:3}:3}"));
+ EXPECT_EQUAL(Eval("{{x:2}:1}"), Eval("{{x:2}:3,{x:3}:3} - {{x:1}:1,{x:2}:2}"));
}
TEST("require that tensor multiply works") {
@@ -78,13 +78,13 @@ TEST("require that tensor multiply works") {
}
TEST("require that tensor min works") {
- EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:2,{x:3}:3}"), Eval("min({{x:1}:1,{x:2}:2}, {{x:2}:3,{x:3}:3})"));
- EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:2,{x:3}:3}"), Eval("min({{x:2}:3,{x:3}:3}, {{x:1}:1,{x:2}:2})"));
+ EXPECT_EQUAL(Eval("{{x:2}:2}"), Eval("min({{x:1}:1,{x:2}:2}, {{x:2}:3,{x:3}:3})"));
+ EXPECT_EQUAL(Eval("{{x:2}:2}"), Eval("min({{x:2}:3,{x:3}:3}, {{x:1}:1,{x:2}:2})"));
}
TEST("require that tensor max works") {
- EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:3,{x:3}:3}"), Eval("max({{x:1}:1,{x:2}:2}, {{x:2}:3,{x:3}:3})"));
- EXPECT_EQUAL(Eval("{{x:1}:1,{x:2}:3,{x:3}:3}"), Eval("max({{x:2}:3,{x:3}:3}, {{x:1}:1,{x:2}:2})"));
+ EXPECT_EQUAL(Eval("{{x:2}:3}"), Eval("max({{x:1}:1,{x:2}:2}, {{x:2}:3,{x:3}:3})"));
+ EXPECT_EQUAL(Eval("{{x:2}:3}"), Eval("max({{x:2}:3,{x:3}:3}, {{x:1}:1,{x:2}:2})"));
}
TEST("require that tensor match works") {
diff --git a/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp b/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp
index 8478d46e1f4..595b3743625 100644
--- a/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp
+++ b/vespalib/src/tests/tensor/dense_tensor_builder/dense_tensor_builder_test.cpp
@@ -4,11 +4,11 @@
#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/vespalib/tensor/dense/dense_tensor_builder.h>
#include <vespa/vespalib/util/exceptions.h>
-#include <algorithm>
using namespace vespalib::tensor;
using vespalib::IllegalArgumentException;
using Builder = DenseTensorBuilder;
+using vespalib::eval::TensorSpec;
void
assertTensor(const DenseTensor::DimensionsMeta &expDims,
@@ -20,33 +20,71 @@ assertTensor(const DenseTensor::DimensionsMeta &expDims,
EXPECT_EQUAL(expCells, realTensor.cells());
}
+void
+assertTensorSpec(const TensorSpec &expSpec, const Tensor &tensor)
+{
+ TensorSpec actSpec = tensor.toSpec();
+ EXPECT_EQUAL(expSpec, actSpec);
+}
+
struct Fixture
{
Builder builder;
};
+Tensor::UP
+build1DTensor(Builder &builder)
+{
+ Builder::Dimension dimX = builder.defineDimension("x", 3);
+ builder.addLabel(dimX, 0).addCell(10).
+ addLabel(dimX, 1).addCell(11).
+ addLabel(dimX, 2).addCell(12);
+ return builder.build();
+}
+
TEST_F("require that 1d tensor can be constructed", Fixture)
{
- Builder::Dimension dimX = f.builder.defineDimension("x", 3);
- f.builder.addLabel(dimX, 0).addCell(10).
- addLabel(dimX, 1).addCell(11).
- addLabel(dimX, 2).addCell(12);
- assertTensor({{"x",3}}, {10,11,12},
- *f.builder.build());
+ assertTensor({{"x",3}}, {10,11,12}, *build1DTensor(f.builder));
+}
+
+TEST_F("require that 1d tensor can be converted to tensor spec", Fixture)
+{
+ assertTensorSpec(TensorSpec("tensor(x[3])").
+ add({{"x", 0}}, 10).
+ add({{"x", 1}}, 11).
+ add({{"x", 2}}, 12),
+ *build1DTensor(f.builder));
+}
+
+Tensor::UP
+build2DTensor(Builder &builder)
+{
+ Builder::Dimension dimX = builder.defineDimension("x", 3);
+ Builder::Dimension dimY = builder.defineDimension("y", 2);
+ builder.addLabel(dimX, 0).addLabel(dimY, 0).addCell(10).
+ addLabel(dimX, 0).addLabel(dimY, 1).addCell(11).
+ addLabel(dimX, 1).addLabel(dimY, 0).addCell(12).
+ addLabel(dimX, 1).addLabel(dimY, 1).addCell(13).
+ addLabel(dimX, 2).addLabel(dimY, 0).addCell(14).
+ addLabel(dimX, 2).addLabel(dimY, 1).addCell(15);
+ return builder.build();
}
TEST_F("require that 2d tensor can be constructed", Fixture)
{
- Builder::Dimension dimX = f.builder.defineDimension("x", 3);
- Builder::Dimension dimY = f.builder.defineDimension("y", 2);
- f.builder.addLabel(dimX, 0).addLabel(dimY, 0).addCell(10).
- addLabel(dimX, 0).addLabel(dimY, 1).addCell(11).
- addLabel(dimX, 1).addLabel(dimY, 0).addCell(12).
- addLabel(dimX, 1).addLabel(dimY, 1).addCell(13).
- addLabel(dimX, 2).addLabel(dimY, 0).addCell(14).
- addLabel(dimX, 2).addLabel(dimY, 1).addCell(15);
- assertTensor({{"x",3},{"y",2}}, {10,11,12,13,14,15},
- *f.builder.build());
+ assertTensor({{"x",3},{"y",2}}, {10,11,12,13,14,15}, *build2DTensor(f.builder));
+}
+
+TEST_F("require that 2d tensor can be converted to tensor spec", Fixture)
+{
+ assertTensorSpec(TensorSpec("tensor(x[3],y[2])").
+ add({{"x", 0},{"y", 0}}, 10).
+ add({{"x", 0},{"y", 1}}, 11).
+ add({{"x", 1},{"y", 0}}, 12).
+ add({{"x", 1},{"y", 1}}, 13).
+ add({{"x", 2},{"y", 0}}, 14).
+ add({{"x", 2},{"y", 1}}, 15),
+ *build2DTensor(f.builder));
}
TEST_F("require that 3d tensor can be constructed", Fixture)
@@ -189,7 +227,6 @@ TEST_F("require that already specified label throws exception", Fixture)
"Label for dimension 'x' is already specified with value '0'");
}
-
TEST_F("require that dimensions are sorted", Fixture)
{
Builder::Dimension dimY = f.builder.defineDimension("y", 3);
@@ -205,4 +242,9 @@ TEST_F("require that dimensions are sorted", Fixture)
EXPECT_EQUAL("tensor(x[5],y[3])", denseTensor.getType().to_spec());
}
+
+
+
+
+
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/tensor/join_tensor_addresses/.gitignore b/vespalib/src/tests/tensor/join_tensor_addresses/.gitignore
deleted file mode 100644
index bcf856a9f59..00000000000
--- a/vespalib/src/tests/tensor/join_tensor_addresses/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-vespalib_join_tensor_addresses_test_app
diff --git a/vespalib/src/tests/tensor/join_tensor_addresses/CMakeLists.txt b/vespalib/src/tests/tensor/join_tensor_addresses/CMakeLists.txt
deleted file mode 100644
index 6923cbc1133..00000000000
--- a/vespalib/src/tests/tensor/join_tensor_addresses/CMakeLists.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(vespalib_join_tensor_addresses_test_app TEST
- SOURCES
- join_tensor_addresses_test.cpp
- DEPENDS
- vespalib
- vespalib_vespalib_tensor
-)
-vespa_add_test(NAME vespalib_join_tensor_addresses_test_app COMMAND vespalib_join_tensor_addresses_test_app)
diff --git a/vespalib/src/tests/tensor/join_tensor_addresses/FILES b/vespalib/src/tests/tensor/join_tensor_addresses/FILES
deleted file mode 100644
index ad4ab2f6d87..00000000000
--- a/vespalib/src/tests/tensor/join_tensor_addresses/FILES
+++ /dev/null
@@ -1 +0,0 @@
-join_tensor_addresses_test.cpp
diff --git a/vespalib/src/tests/tensor/join_tensor_addresses/join_tensor_addresses_test.cpp b/vespalib/src/tests/tensor/join_tensor_addresses/join_tensor_addresses_test.cpp
deleted file mode 100644
index db1e04d792a..00000000000
--- a/vespalib/src/tests/tensor/join_tensor_addresses/join_tensor_addresses_test.cpp
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/vespalib/stllike/hash_set.h>
-#include <vespa/vespalib/tensor/tensor_address.h>
-#include <vespa/vespalib/tensor/tensor_address_builder.h>
-#include <vespa/vespalib/tensor/sparse/compact_tensor_address.h>
-#include <vespa/vespalib/tensor/sparse/compact_tensor_address_builder.h>
-#include <vespa/vespalib/tensor/tensor_address_element_iterator.h>
-#include <vespa/vespalib/tensor/dimensions_vector_iterator.h>
-#include <vespa/vespalib/tensor/join_tensor_addresses.h>
-
-using namespace vespalib::tensor;
-
-using TensorAddressMap = std::map<std::string, std::string>;
-using TensorAddressElementVec =
- std::vector<std::pair<std::string, std::string>>;
-
-namespace vespalib
-{
-
-std::ostream &
-operator<<(std::ostream &out, const TensorAddressElementVec &vec)
-{
- out << "{";
- bool first = true;
- for (const auto &elem : vec) {
- if (!first) {
- out << ",";
- }
- out << "{\"" << elem.first << "\",\"" << elem.second << "\"}";
- first = false;
- }
- out << "}";
- return out;
-};
-
-}
-
-
-class DummyAddressBuilder
-{
- TensorAddressElementVec _elements;
-public:
- void add(vespalib::stringref dimension, vespalib::stringref label)
- {
- _elements.emplace_back(dimension, label);
- }
-
- const TensorAddressElementVec &elements() const { return _elements; }
- void clear() { }
-};
-
-
-template <class TensorAddressT> struct FixtureBase;
-
-template <> struct FixtureBase<TensorAddress>
-{
- using AddressType = TensorAddress;
- using AddressBuilderType = TensorAddressBuilder;
-
- static TensorAddress create(TensorAddressBuilder &builder) {
- return builder.build();
- }
-};
-
-
-template <> struct FixtureBase<CompactTensorAddress>
-{
- using AddressType = CompactTensorAddress;
- using AddressBuilderType = CompactTensorAddressBuilder;
-
- vespalib::Stash _stash;
-
- CompactTensorAddress
- create(CompactTensorAddressBuilder &builder)
- {
- CompactTensorAddressRef oldRef = builder.getAddressRef();
- CompactTensorAddressRef newRef(oldRef, _stash);
- CompactTensorAddress ret;
- ret.deserializeFromSparseAddressRef(newRef);
- return ret;
- }
-};
-
-template <> struct FixtureBase<CompactTensorAddressRef>
-{
- using AddressType = CompactTensorAddressRef;
- using AddressBuilderType = CompactTensorAddressBuilder;
-
- vespalib::Stash _stash;
-
- CompactTensorAddressRef
- create(CompactTensorAddressBuilder &builder)
- {
- CompactTensorAddressRef oldRef = builder.getAddressRef();
- CompactTensorAddressRef newRef(oldRef, _stash);
- return newRef;
- }
-};
-
-template <class TensorAddressT> struct Fixture
- : public FixtureBase<TensorAddressT>
-{
- using Parent = FixtureBase<TensorAddressT>;
- using AddressType = typename Parent::AddressType;
- using AddressBuilderType = typename Parent::AddressBuilderType;
- using Parent::create;
-
- AddressType
- create(const TensorAddressMap &address_in) {
- AddressBuilderType builder;
- for (auto &element : address_in) {
- builder.add(element.first, element.second);
- }
- return create(builder);
- }
-
- void
- verifyJoin3Way(bool exp,
- const TensorAddressElementVec &expVec,
- const DimensionsVector &commonDimensions,
- const TensorAddressMap &lhsAddress_in,
- const TensorAddressMap &rhsAddress_in)
- {
- AddressType expAddress = create(lhsAddress_in);
- AddressType lhsAddress = create(lhsAddress_in);
- AddressType rhsAddress = create(rhsAddress_in);
- DummyAddressBuilder builder;
- bool act = joinTensorAddresses<DummyAddressBuilder,
- AddressType, AddressType>
- (builder, commonDimensions, lhsAddress, rhsAddress);
- EXPECT_EQUAL(exp, act);
- if (exp) {
- EXPECT_EQUAL(expVec, builder.elements());
- }
- }
-
- void
- verifyJoin2Way(bool exp,
- const TensorAddressElementVec &expVec,
- const DimensionsSet &commonDimensions,
- const TensorAddressMap &lhsAddress_in,
- const TensorAddressMap &rhsAddress_in)
- {
- AddressType expAddress = create(lhsAddress_in);
- AddressType lhsAddress = create(lhsAddress_in);
- AddressType rhsAddress = create(rhsAddress_in);
- DummyAddressBuilder builder;
- bool act = joinTensorAddresses<DummyAddressBuilder,
- AddressType, AddressType>
- (builder, commonDimensions, lhsAddress, rhsAddress);
- EXPECT_EQUAL(exp, act);
- if (exp) {
- EXPECT_EQUAL(expVec, builder.elements());
- }
- }
-
- void
- verifyJoin(bool exp,
- const TensorAddressElementVec &expVec,
- const DimensionsVector &commonDimensions,
- const TensorAddressMap &lhsAddress,
- const TensorAddressMap &rhsAddress)
- {
- TEST_DO(verifyJoin3Way(exp, expVec, commonDimensions,
- lhsAddress, rhsAddress));
- DimensionsSet commonDimensionsSet(commonDimensions.begin(),
- commonDimensions.end());
- TEST_DO(verifyJoin2Way(exp, expVec, commonDimensionsSet,
- lhsAddress, rhsAddress));
- }
-
- void
- verifyJoin(const TensorAddressElementVec &expVec,
- const DimensionsVector &commonDimensions,
- const TensorAddressMap &lhsAddress,
- const TensorAddressMap &rhsAddress)
- {
- verifyJoin(true, expVec, commonDimensions, lhsAddress, rhsAddress);
- }
-
- void
- verifyJoinFailure(const DimensionsVector &commonDimensions,
- const TensorAddressMap &lhsAddress,
- const TensorAddressMap &rhsAddress)
- {
- verifyJoin(false, {}, commonDimensions, lhsAddress, rhsAddress);
- }
-
- void
- verifyJoinFailureOnLabelMisMatch()
- {
- TEST_DO(verifyJoinFailure({"x", "y"},
- {{"x", "1"}, {"y", "2"}},
- {{"x", "1"}, {"y", "3"}}));
- TEST_DO(verifyJoinFailure({"x", "y"},
- {{"x", "1"}, {"y", "2"}},
- {{"x", "2"}, {"y", "2"}}));
- TEST_DO(verifyJoinFailure({"y"},
- {{"x", "1"}, {"y", "2"}},
- {{"y", "1"}, {"z", "3"}}));
- TEST_DO(verifyJoinFailure({"y"},
- {{"y", "2"}, {"z", "3"}},
- {{"x", "1"}, {"y", "1"}}));
- }
-
- void
- verityJoinFailureOnMissingDimension()
- {
- TEST_DO(verifyJoinFailure({"x", "y"},
- {{"y", "2"}},
- {{"x", "2"}, {"y", "2"}}));
- TEST_DO(verifyJoinFailure({"x", "y"},
- {{"x", "1"}, {"y", "2"}},
- {{"y", "2"}}));
- TEST_DO(verifyJoinFailure({"x", "y"},
- {{"x", "1"}},
- {{"x", "2"}, {"y", "2"}}));
- TEST_DO(verifyJoinFailure({"x", "y"},
- {{"x", "1"}, {"y", "2"}},
- {{"x", "2"}}));
- TEST_DO(verifyJoinFailure({"x", "y", "z"},
- {{"x", "1"}, {"z", "3"}},
- {{"x", "2"}, {"y", "2"}, {"z", "3"}}));
- TEST_DO(verifyJoinFailure({"x", "y", "z"},
- {{"x", "2"}, {"y", "2"}, {"z", "3"}},
- {{"x", "1"}, {"z", "3"}}));
- }
-
- void
- verifyJoinSuccessOnDisjunctDimensions()
- {
- TEST_DO(verifyJoin({}, {}, {}, {}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}, {"zz", "4"}},
- {},
- {{"x", "1"}, {"y", "2"}},
- {{"z", "3"}, {"zz", "4"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}, {"zz", "4"}},
- {},
- {{"z", "3"}, {"zz", "4"}},
- {{"x", "1"}, {"y", "2"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}, {"zz", "4"}},
- {},
- {{"x", "1"}, {"z", "3"}},
- {{"y", "2"}, {"zz", "4"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}},
- {},
- {{"x", "1"}, {"y", "2"}},
- {}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}},
- {},
- {},
- {{"x", "1"}, {"y", "2"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"z", "3"}}, {"y"},
- {{"x", "1"}},
- {{"z", "3"}}));
- TEST_DO(verifyJoin( {{"x", "1"}, {"z", "3"}}, {"y"},
- {{"z", "3"}},
- {{"x", "1"}}));
- }
-
- void
- verifyJoinSuccessOnOverlappingDimensions()
- {
- TEST_DO(verifyJoin({{"x", "1"}}, {"x"},
- {{"x", "1"}}, {{"x", "1"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}},
- {"x", "z"},
- {{"x", "1"}, {"y", "2"}, {"z", "3"}},
- {{"x", "1"}, {"z", "3"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}},
- {"x", "z"},
- {{"x", "1"}, {"y", "2"}, {"z", "3"}},
- {{"x", "1"}, {"z", "3"}}));
- TEST_DO(verifyJoin( {{"x", "1"}, {"y", "2"}}, {"x", "y"},
- {{"x", "1"}, {"y", "2"}},
- {{"x", "1"}, {"y", "2"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}}, {"y"},
- {{"x", "1"}, {"y", "2"}},
- {{"y", "2"}, {"z", "3"}}));
- TEST_DO(verifyJoin({{"x", "1"}, {"y", "2"}, {"z", "3"}}, {"y"},
- {{"y", "2"}, {"z", "3"}},
- {{"x", "1"}, {"y", "2"}}));
- }
-
- void
- verifyJoin()
- {
- verifyJoinSuccessOnDisjunctDimensions();
- verifyJoinSuccessOnOverlappingDimensions();
- verifyJoinFailureOnLabelMisMatch();
- verityJoinFailureOnMissingDimension();
- }
-
-};
-
-
-TEST_F("Test that Tensor address can be joined", Fixture<TensorAddress>)
-{
- f.verifyJoin();
-}
-
-TEST_F("Test that compact Tensor address can be joined",
- Fixture<CompactTensorAddress>)
-{
- f.verifyJoin();
-}
-
-
-TEST_F("Test that compact Tensor address ref can be joined",
- Fixture<CompactTensorAddressRef>)
-{
- f.verifyJoin();
-}
-
-TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp b/vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp
index 69d8a7c3062..39e82abec7d 100644
--- a/vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp
+++ b/vespalib/src/tests/tensor/sparse_tensor_builder/sparse_tensor_builder_test.cpp
@@ -2,9 +2,10 @@
#include <vespa/vespalib/testkit/test_kit.h>
#include <vespa/vespalib/tensor/sparse/sparse_tensor_builder.h>
+#include <vespa/vespalib/test/insertion_operators.h>
using namespace vespalib::tensor;
-
+using vespalib::eval::TensorSpec;
void
assertCellValue(double expValue, const TensorAddress &address,
@@ -27,13 +28,14 @@ assertCellValue(double expValue, const TensorAddress &address,
addressBuilder.add("");
++dimsItr;
}
- CompactTensorAddressRef addressRef(addressBuilder.getAddressRef());
+ SparseTensorAddressRef addressRef(addressBuilder.getAddressRef());
auto itr = cells.find(addressRef);
EXPECT_FALSE(itr == cells.end());
EXPECT_EQUAL(expValue, itr->second);
}
-TEST("require that tensor can be constructed")
+Tensor::UP
+buildTensor()
{
SparseTensorBuilder builder;
builder.define_dimension("c");
@@ -44,7 +46,12 @@ TEST("require that tensor can be constructed")
add_label(builder.define_dimension("b"), "2").add_cell(10).
add_label(builder.define_dimension("c"), "3").
add_label(builder.define_dimension("d"), "4").add_cell(20);
- Tensor::UP tensor = builder.build();
+ return builder.build();
+}
+
+TEST("require that tensor can be constructed")
+{
+ Tensor::UP tensor = buildTensor();
const SparseTensor &sparseTensor = dynamic_cast<const SparseTensor &>(*tensor);
const TensorDimensions &dimensions = sparseTensor.dimensions();
const SparseTensor::Cells &cells = sparseTensor.cells();
@@ -55,6 +62,16 @@ TEST("require that tensor can be constructed")
dimensions, cells);
}
+TEST("require that tensor can be converted to tensor spec")
+{
+ Tensor::UP tensor = buildTensor();
+ TensorSpec expSpec("tensor(a{},b{},c{},d{})");
+ expSpec.add({{"a", "1"}, {"b", "2"}}, 10).
+ add({{"c", "3"}, {"d", "4"}}, 20);
+ TensorSpec actSpec = tensor->toSpec();
+ EXPECT_EQUAL(expSpec, actSpec);
+}
+
TEST("require that dimensions are extracted")
{
SparseTensorBuilder builder;
diff --git a/vespalib/src/tests/tensor/tensor_address_element_iterator/.gitignore b/vespalib/src/tests/tensor/tensor_address_element_iterator/.gitignore
deleted file mode 100644
index c28cf0c86f2..00000000000
--- a/vespalib/src/tests/tensor/tensor_address_element_iterator/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-vespalib_tensor_address_element_iterator_test_app
diff --git a/vespalib/src/tests/tensor/tensor_address_element_iterator/CMakeLists.txt b/vespalib/src/tests/tensor/tensor_address_element_iterator/CMakeLists.txt
deleted file mode 100644
index dad69af7ba3..00000000000
--- a/vespalib/src/tests/tensor/tensor_address_element_iterator/CMakeLists.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-vespa_add_executable(vespalib_tensor_address_element_iterator_test_app TEST
- SOURCES
- tensor_address_element_iterator_test.cpp
- DEPENDS
- vespalib
- vespalib_vespalib_tensor
-)
-vespa_add_test(NAME vespalib_tensor_address_element_iterator_test_app COMMAND vespalib_tensor_address_element_iterator_test_app)
diff --git a/vespalib/src/tests/tensor/tensor_address_element_iterator/FILES b/vespalib/src/tests/tensor/tensor_address_element_iterator/FILES
deleted file mode 100644
index b185a25973e..00000000000
--- a/vespalib/src/tests/tensor/tensor_address_element_iterator/FILES
+++ /dev/null
@@ -1 +0,0 @@
-tensor_address_element_iterator_test.cpp
diff --git a/vespalib/src/tests/tensor/tensor_address_element_iterator/tensor_address_element_iterator_test.cpp b/vespalib/src/tests/tensor/tensor_address_element_iterator/tensor_address_element_iterator_test.cpp
deleted file mode 100644
index 4e953048f67..00000000000
--- a/vespalib/src/tests/tensor/tensor_address_element_iterator/tensor_address_element_iterator_test.cpp
+++ /dev/null
@@ -1,343 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/vespalib/testkit/test_kit.h>
-#include <vespa/vespalib/stllike/hash_set.h>
-#include <vespa/vespalib/tensor/tensor_address.h>
-#include <vespa/vespalib/tensor/tensor_address_builder.h>
-#include <vespa/vespalib/tensor/sparse/compact_tensor_address.h>
-#include <vespa/vespalib/tensor/sparse/compact_tensor_address_builder.h>
-#include <vespa/vespalib/tensor/tensor_address_element_iterator.h>
-
-using namespace vespalib::tensor;
-
-using TensorAddressMap = std::map<std::string, std::string>;
-using TensorAddressElementVec =
- std::vector<std::pair<std::string, std::string>>;
-
-namespace vespalib
-{
-
-std::ostream &
-operator<<(std::ostream &out, const TensorAddressElementVec &vec)
-{
- out << "{";
- bool first = true;
- for (const auto &elem : vec) {
- if (!first) {
- out << ",";
- }
- out << "{\"" << elem.first << "\",\"" << elem.second << "\"}";
- first = false;
- }
- out << "}";
- return out;
-};
-
-}
-
-
-class DummyAddressBuilder
-{
- TensorAddressElementVec _elements;
-public:
- void add(vespalib::stringref dimension, vespalib::stringref label)
- {
- _elements.emplace_back(dimension, label);
- }
-
- const TensorAddressElementVec &elements() const { return _elements; }
-};
-
-
-template <class TensorAddressT> struct FixtureBase;
-
-template <> struct FixtureBase<TensorAddress>
-{
- using AddressType = TensorAddress;
- using AddressBuilderType = TensorAddressBuilder;
-
- static TensorAddress create(TensorAddressBuilder &builder) {
- return builder.build();
- }
-};
-
-
-template <> struct FixtureBase<CompactTensorAddress>
-{
- using AddressType = CompactTensorAddress;
- using AddressBuilderType = CompactTensorAddressBuilder;
-
- vespalib::Stash _stash;
-
- CompactTensorAddress
- create(CompactTensorAddressBuilder &builder)
- {
- CompactTensorAddressRef oldRef = builder.getAddressRef();
- CompactTensorAddressRef newRef(oldRef, _stash);
- CompactTensorAddress ret;
- ret.deserializeFromSparseAddressRef(newRef);
- return ret;
- }
-};
-
-template <> struct FixtureBase<CompactTensorAddressRef>
-{
- using AddressType = CompactTensorAddressRef;
- using AddressBuilderType = CompactTensorAddressBuilder;
-
- vespalib::Stash _stash;
-
- CompactTensorAddressRef
- create(CompactTensorAddressBuilder &builder)
- {
- CompactTensorAddressRef oldRef = builder.getAddressRef();
- CompactTensorAddressRef newRef(oldRef, _stash);
- return newRef;
- }
-};
-
-template <class TensorAddressT> struct Fixture
- : public FixtureBase<TensorAddressT>
-{
- using Parent = FixtureBase<TensorAddressT>;
- using AddressType = typename Parent::AddressType;
- using AddressBuilderType = typename Parent::AddressBuilderType;
- using Parent::create;
-
- AddressType
- create(const TensorAddressMap &address_in) {
- AddressBuilderType builder;
- for (auto &element : address_in) {
- builder.add(element.first, element.second);
- }
- return create(builder);
- }
-
- void
- verifyPlainIterate(const TensorAddressMap &address_in)
- {
- AddressType address = create(address_in);
- TensorAddressElementIterator<AddressType> itr(address);
- for (auto &element : address_in) {
- EXPECT_TRUE(itr.valid());
- EXPECT_EQUAL(element.first, itr.dimension());
- EXPECT_EQUAL(element.second, itr.label());
- itr.next();
- }
- EXPECT_FALSE(itr.valid());
- }
-
-
- void
- verifyPlainIterate()
- {
- TEST_DO(verifyPlainIterate({}));
- TEST_DO(verifyPlainIterate({{"a", "1"}}));
- TEST_DO(verifyPlainIterate({{"a", "1"}, {"b", "2"}}));
- }
-
- void
- verifyBeforeDimension(const TensorAddressMap &lhsAddress_in,
- const TensorAddressMap &rhsAddress_in,
- bool exp)
- {
- AddressType lhsAddress = create(lhsAddress_in);
- TensorAddressElementIterator<AddressType> lhsItr(lhsAddress);
- AddressType rhsAddress = create(rhsAddress_in);
- TensorAddressElementIterator<AddressType> rhsItr(rhsAddress);
- EXPECT_EQUAL(exp, lhsItr.beforeDimension(rhsItr));
- }
-
- void
- verifyBeforeDimension() {
- TEST_DO(verifyBeforeDimension({}, {}, false));
- TEST_DO(verifyBeforeDimension({}, {{"x", "1"}}, false));
- TEST_DO(verifyBeforeDimension({{"x", "1"}}, {}, true));
- TEST_DO(verifyBeforeDimension({{"x", "1"}}, {{"x", "2"}}, false));
- TEST_DO(verifyBeforeDimension({{"x", "1"}}, {{"y", "2"}}, true));
- TEST_DO(verifyBeforeDimension({{"y", "1"}}, {{"x", "2"}}, false));
- }
-
- void
- verifyAtDimension(const TensorAddressMap &address_in,
- vespalib::stringref dimension,
- bool exp)
- {
- AddressType address = create(address_in);
- TensorAddressElementIterator<AddressType> itr(address);
- EXPECT_EQUAL(exp, itr.atDimension(dimension));
- }
-
- void
- verifyAtDimension()
- {
- TEST_DO(verifyAtDimension({}, "x", false));
- TEST_DO(verifyAtDimension({{"x", "1"}}, "x", true));
- TEST_DO(verifyAtDimension({{"x", "1"}}, "y", false));
- TEST_DO(verifyAtDimension({{"y", "1"}}, "x", false));
- TEST_DO(verifyAtDimension({{"y", "1"}}, "y", true));
- }
-
- void
- verifyAddElements(const TensorAddressMap &lhsAddress_in,
- const TensorAddressMap &rhsAddress_in,
- const TensorAddressElementVec &exp)
- {
- AddressType lhsAddress = create(lhsAddress_in);
- TensorAddressElementIterator<AddressType> lhsItr(lhsAddress);
- AddressType rhsAddress = create(rhsAddress_in);
- TensorAddressElementIterator<AddressType> rhsItr(rhsAddress);
- DummyAddressBuilder builder;
- lhsItr.addElements(builder, rhsItr);
- EXPECT_EQUAL(exp, builder.elements());
- }
-
- void verifyAddElements(const TensorAddressMap &address_in,
- const TensorAddressElementVec &exp)
- {
- AddressType address = create(address_in);
- TensorAddressElementIterator<AddressType> itr(address);
- DummyAddressBuilder builder;
- itr.addElements(builder);
- EXPECT_EQUAL(exp, builder.elements());
- }
-
- void verifyAddElements(const TensorAddressMap &address_in,
- const DimensionsSet &dimensions,
- bool exp,
- const TensorAddressElementVec &expVec)
- {
- AddressType address = create(address_in);
- TensorAddressElementIterator<AddressType> itr(address);
- DummyAddressBuilder builder;
- EXPECT_EQUAL(exp, itr.addElements(builder, dimensions));
- EXPECT_EQUAL(expVec, builder.elements());
- }
-
- void verifyAddElements(const TensorAddressMap &lhsAddress_in,
- const TensorAddressMap &rhsAddress_in,
- const DimensionsSet &dimensions,
- bool exp,
- const TensorAddressElementVec &expVec)
- {
- AddressType lhsAddress = create(lhsAddress_in);
- TensorAddressElementIterator<AddressType> lhsItr(lhsAddress);
- AddressType rhsAddress = create(rhsAddress_in);
- TensorAddressElementIterator<AddressType> rhsItr(rhsAddress);
- DummyAddressBuilder builder;
- ASSERT_TRUE(lhsItr.beforeDimension(rhsItr));
- EXPECT_EQUAL(exp, lhsItr.addElements(builder, dimensions, rhsItr));
- EXPECT_EQUAL(expVec, builder.elements());
- }
-
- void
- verifyAddElements()
- {
- // Stop according to rhs iterator
- TEST_DO(verifyAddElements({}, {}, {}));
- TEST_DO(verifyAddElements({{"x", "1"}}, {}, {{"x", "1"}}));
- TEST_DO(verifyAddElements({{"x", "1"}}, {{"x", "1"}}, {}));
- TEST_DO(verifyAddElements({{"x", "1"}}, {{"y", "1"}}, {{"x", "1"}}));
- TEST_DO(verifyAddElements({{"y", "1"}}, {{"x", "1"}}, {}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {{"z", "1"}},
- {{"x", "1"}, {"y", "2"}}));
- // Pass through everything
- TEST_DO(verifyAddElements({}, {}));
- TEST_DO(verifyAddElements({{"x", "1"}}, {{"x", "1"}}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}},
- {{"x", "1"}, {"y", "2"}}));
- // Filter on dimension set
- TEST_DO(verifyAddElements({}, {}, true, {}));
- TEST_DO(verifyAddElements({{"x", "1"}}, {}, true, {{"x", "1"}}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {}, true,
- {{"x", "1"}, {"y", "2"}}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {"y"}, false,
- {{"x", "1"}}));
- // Filter on dimension set and stop according to rhs iterator
- TEST_DO(verifyAddElements({{"x", "1"}}, {}, {}, true, {{"x", "1"}}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {}, {}, true,
- {{"x", "1"}, {"y", "2"}}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {{"y", "2"}}, {},
- true, {{"x", "1"}}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {{"y", "2"}}, {"y"},
- true, {{"x", "1"}}));
- TEST_DO(verifyAddElements({{"x", "1"}, {"y", "2"}}, {{"y", "2"}}, {"x"},
- false, {}));
- }
-};
-
-
-TEST_F("Test that Tensor address can be iterated", Fixture<TensorAddress>)
-{
- f.verifyPlainIterate();
-}
-
-TEST_F("Test that compact Tensor address can be iterated",
- Fixture<CompactTensorAddress>)
-{
- f.verifyPlainIterate();
-}
-
-
-TEST_F("Test that compact Tensor address ref can be iterated",
- Fixture<CompactTensorAddressRef>)
-{
- f.verifyPlainIterate();
-}
-
-TEST_F("Test that Tensor address works with beforeDimension",
- Fixture<TensorAddress>)
-{
- f.verifyBeforeDimension();
-}
-
-TEST_F("Test that compact Tensor address works with beforeDimension",
- Fixture<CompactTensorAddress>)
-{
- f.verifyBeforeDimension();
-}
-
-TEST_F("Test that compat Tensor address ref works with beforeDimension",
- Fixture<CompactTensorAddressRef>)
-{
- f.verifyBeforeDimension();
-}
-
-TEST_F("Test that Tensor address works with atDimension",
- Fixture<TensorAddress>)
-{
- f.verifyAtDimension();
-}
-
-TEST_F("Test that compact Tensor address works with atDimension",
- Fixture<CompactTensorAddress>)
-{
- f.verifyAtDimension();
-}
-
-TEST_F("Test that compat Tensor address ref works with atDimension",
- Fixture<CompactTensorAddressRef>)
-{
- f.verifyAtDimension();
-}
-
-TEST_F("Test that Tensor address works with addElements",
- Fixture<TensorAddress>)
-{
- f.verifyAddElements();
-}
-
-TEST_F("Test that compact Tensor address works with addElements",
- Fixture<CompactTensorAddress>)
-{
- f.verifyAddElements();
-}
-
-TEST_F("Test that compat Tensor address ref works with addElements",
- Fixture<CompactTensorAddressRef>)
-{
- f.verifyAddElements();
-}
-
-
-TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp b/vespalib/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
index 1f96b64d170..238d0604ee7 100644
--- a/vespalib/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
+++ b/vespalib/src/tests/tensor/tensor_conformance/tensor_conformance_test.cpp
@@ -16,8 +16,4 @@ IGNORE_TEST("require that production tensor implementation passes non-mixed conf
TEST_DO(TensorConformance::run_tests(DefaultTensorEngine::ref(), false));
}
-IGNORE_TEST("require that production tensor implementation passes all conformance tests") {
- TEST_DO(TensorConformance::run_tests(DefaultTensorEngine::ref(), true));
-}
-
TEST_MAIN() { TEST_RUN_ALL(); }
diff --git a/vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp b/vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp
index a87c6555e84..5ad26e979c5 100644
--- a/vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp
+++ b/vespalib/src/tests/tensor/tensor_operations/tensor_operations_test.cpp
@@ -120,6 +120,10 @@ struct Fixture
void assertAdd(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
assertAddImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
}
+ void assertAdd(const TensorCells &exp, const TensorDimensions &expDimensions,
+ const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
+ assertAddImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types);
+ }
void assertSubtractImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
MyInput input;
function::Node_UP ir = function::subtract(function::input(lhs.getType(), input.add(lhs)),
@@ -129,6 +133,9 @@ struct Fixture
void assertSubtract(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
assertSubtractImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
}
+ void assertSubtract(const TensorCells &exp, const TensorDimensions &expDimensions, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
+ assertSubtractImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types);
+ }
void assertMinImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
MyInput input;
function::Node_UP ir = function::min(function::input(lhs.getType(), input.add(lhs)),
@@ -138,6 +145,9 @@ struct Fixture
void assertMin(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
assertMinImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
}
+ void assertMin(const TensorCells &exp, const TensorDimensions &expDimensions, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
+ assertMinImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types);
+ }
void assertMaxImpl(const Tensor &exp, const Tensor &lhs, const Tensor &rhs, bool check_types) {
MyInput input;
function::Node_UP ir = function::max(function::input(lhs.getType(), input.add(lhs)),
@@ -147,6 +157,9 @@ struct Fixture
void assertMax(const TensorCells &exp, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
assertMaxImpl(*createTensor(exp), *createTensor(lhs), *createTensor(rhs), check_types);
}
+ void assertMax(const TensorCells &exp, const TensorDimensions &expDimensions, const TensorCells &lhs, const TensorCells &rhs, bool check_types = true) {
+ assertMaxImpl(*createTensor(exp, expDimensions), *createTensor(lhs), *createTensor(rhs), check_types);
+ }
void assertSumImpl(double exp, const Tensor &tensor) {
MyInput input;
function::Node_UP ir = function::sum(function::input(tensor.getType(), input.add(tensor)));
@@ -252,42 +265,42 @@ void
testTensorAdd(FixtureType &f)
{
f.assertAdd({},{},{}, false);
- f.assertAdd({ {{{"x","1"}}, 3}, {{{"x","2"}}, 5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 5} });
- f.assertAdd({ {{{"x","1"}}, 8} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 5} });
- f.assertAdd({ {{{"x","1"}}, -2} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -5} });
- f.assertAdd({ {{{"x","1"}}, 0} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -3} });
- f.assertAdd({ {{{"x","1"}}, 3}, {{{"y","2"}}, 12}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertAdd({ {{{"x","1"}}, 3}, {{{"y","2"}}, 12}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertAdd({ {{{"y","2"}}, 12}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertAdd({ {{{"y","2"}}, 12}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} });
- f.assertAdd({ {{{"x","1"}}, 3}, {{{"y","2"}}, 12} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7} });
- f.assertAdd({ {{{"x","1"}}, 3}, {{{"y","2"}}, 12} },
- { {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertAdd({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} },
- { {{{"z","3"}}, 11} });
- f.assertAdd({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} },
- { {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} });
+ TEST_DO(f.assertAdd({}, { "x" },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","2"}}, 5} }));
+ TEST_DO(f.assertAdd({ {{{"x","1"}}, 8} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, 5} }));
+ TEST_DO(f.assertAdd({ {{{"x","1"}}, -2} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, -5} }));
+ TEST_DO(f.assertAdd({ {{{"x","1"}}, 0} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, -3} }));
+ TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14}, {{{"y","2"}}, 12} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14}, {{{"y","2"}}, 12} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "y", "z" },
+ { {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "y", "z" },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "x", "y" },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7} }));
+ TEST_DO(f.assertAdd({ {{{"y","2"}}, 12} }, { "x", "y" },
+ { {{{"y","2"}}, 7} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14} },
+ { {{{"x","1"}}, 3} },
+ { {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertAdd({ {{{"x","1"},{"z","3"}}, 14} },
+ { {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3} }));
}
template <typename FixtureType>
@@ -295,42 +308,42 @@ void
testTensorSubtract(FixtureType &f)
{
f.assertSubtract({},{},{}, false);
- f.assertSubtract({ {{{"x","1"}}, 3}, {{{"x","2"}}, -5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 5} });
- f.assertSubtract({ {{{"x","1"}}, -2} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 5} });
- f.assertSubtract({ {{{"x","1"}}, 8} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -5} });
- f.assertSubtract({ {{{"x","1"}}, 0} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 3} });
- f.assertSubtract({ {{{"x","1"}}, 3}, {{{"y","2"}},-2}, {{{"z","3"}},-11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertSubtract({ {{{"x","1"}},-3}, {{{"y","2"}}, 2}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertSubtract({ {{{"y","2"}},-2}, {{{"z","3"}},-11} },
- { {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertSubtract({ {{{"y","2"}}, 2}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} });
- f.assertSubtract({ {{{"x","1"}}, 3}, {{{"y","2"}},-2} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7} });
- f.assertSubtract({ {{{"x","1"}},-3}, {{{"y","2"}}, 2} },
- { {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertSubtract({ {{{"x","1"}}, 3}, {{{"z","3"}},-11} },
- { {{{"x","1"}}, 3} },
- { {{{"z","3"}}, 11} });
- f.assertSubtract({ {{{"x","1"}},-3}, {{{"z","3"}}, 11} },
- { {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} });
+ TEST_DO(f.assertSubtract({}, { "x" },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","2"}}, 5} }));
+ TEST_DO(f.assertSubtract({ {{{"x","1"}}, -2} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, 5} }));
+ TEST_DO(f.assertSubtract({ {{{"x","1"}}, 8} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, -5} }));
+ TEST_DO(f.assertSubtract({ {{{"x","1"}}, 0} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, 3} }));
+ TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, -8}, {{{"y","2"}},-2} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, 8}, {{{"y","2"}}, 2} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertSubtract({ {{{"y","2"}},-2} }, { "y", "z" },
+ { {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertSubtract({ {{{"y","2"}}, 2} }, { "y", "z" },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertSubtract({ {{{"y","2"}},-2} }, { "x", "y" },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7} }));
+ TEST_DO(f.assertSubtract({ {{{"y","2"}}, 2} }, { "x", "y" },
+ { {{{"y","2"}}, 7} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, -8} },
+ { {{{"x","1"}}, 3} },
+ { {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertSubtract({ {{{"x","1"},{"z","3"}}, 8} },
+ { {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3} }));
}
template <typename FixtureType>
@@ -338,42 +351,42 @@ void
testTensorMin(FixtureType &f)
{
f.assertMin({},{},{}, false);
- f.assertMin({ {{{"x","1"}}, 3}, {{{"x","2"}}, 5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 5} });
- f.assertMin({ {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 5} });
- f.assertMin({ {{{"x","1"}}, -5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -5} });
- f.assertMin({ {{{"x","1"}}, 3}, {{{"x","2"}}, 0} },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 0} });
- f.assertMin({ {{{"x","1"}}, 3}, {{{"y","2"}}, 5}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertMin({ {{{"x","1"}}, 3}, {{{"y","2"}}, 5}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertMin({ {{{"y","2"}}, 5}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertMin({ {{{"y","2"}}, 5}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} });
- f.assertMin({ {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7} });
- f.assertMin({ {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertMin({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} },
- { {{{"z","3"}}, 11} });
- f.assertMin({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} },
- { {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} });
+ TEST_DO(f.assertMin({}, { "x" },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","2"}}, 5} }));
+ TEST_DO(f.assertMin({ {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, 5} }));
+ TEST_DO(f.assertMin({ {{{"x","1"}}, -5} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, -5} }));
+ TEST_DO(f.assertMin({}, { "x" },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","2"}}, 0} }));
+ TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "y", "z" },
+ { {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "y", "z" },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "x", "y" },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7} }));
+ TEST_DO(f.assertMin({ {{{"y","2"}}, 5} }, { "x", "y" },
+ { {{{"y","2"}}, 7} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3} },
+ { {{{"x","1"}}, 3} },
+ { {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertMin({ {{{"x","1"},{"z","3"}}, 3} },
+ { {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3} }));
}
template <typename FixtureType>
@@ -381,45 +394,45 @@ void
testTensorMax(FixtureType &f)
{
f.assertMax({},{},{}, false);
- f.assertMax({ {{{"x","1"}}, 3}, {{{"x","2"}}, 5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 5} });
- f.assertMax({ {{{"x","1"}}, 5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 5} });
- f.assertMax({ {{{"x","1"}}, 3} },
- { {{{"x","1"}}, 3} },
- { {{{"x","1"}}, -5} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"x","2"}}, 0} },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, 0} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"x","2"}}, -5} },
- { {{{"x","1"}}, 3} },
- { {{{"x","2"}}, -5} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertMax({ {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} });
- f.assertMax({ {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
- { {{{"y","2"}}, 5} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
- { {{{"y","2"}}, 7} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"y","2"}}, 7} },
- { {{{"y","2"}}, 7} },
- { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} },
- { {{{"z","3"}}, 11} });
- f.assertMax({ {{{"x","1"}}, 3}, {{{"z","3"}}, 11} },
- { {{{"z","3"}}, 11} },
- { {{{"x","1"}}, 3} });
+ TEST_DO(f.assertMax({}, { "x" },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","2"}}, 5} }));
+ TEST_DO(f.assertMax({ {{{"x","1"}}, 5} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, 5} }));
+ TEST_DO(f.assertMax({ {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","1"}}, -5} }));
+ TEST_DO(f.assertMax({}, { "x" },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","2"}}, 0} }));
+ TEST_DO(f.assertMax({}, { "x" },
+ { {{{"x","1"}}, 3} },
+ { {{{"x","2"}}, -5} }));
+ TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11}, {{{"y","2"}}, 7} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11}, {{{"y","2"}}, 7} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "y", "z" },
+ { {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "y", "z" },
+ { {{{"y","2"}}, 7}, {{{"z","3"}}, 11} },
+ { {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "x", "y" },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} },
+ { {{{"y","2"}}, 7} }));
+ TEST_DO(f.assertMax({ {{{"y","2"}}, 7} }, { "x", "y" },
+ { {{{"y","2"}}, 7} },
+ { {{{"x","1"}}, 3}, {{{"y","2"}}, 5} }));
+ TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3} },
+ { {{{"z","3"}}, 11} }));
+ TEST_DO(f.assertMax({ {{{"x","1"},{"z","3"}}, 11} },
+ { {{{"z","3"}}, 11} },
+ { {{{"x","1"}}, 3} }));
}
template <typename FixtureType>
diff --git a/vespalib/src/tests/tensor/tensor_performance/tensor_performance_test.cpp b/vespalib/src/tests/tensor/tensor_performance/tensor_performance_test.cpp
index ab8acb9e296..2cac4cfa18c 100644
--- a/vespalib/src/tests/tensor/tensor_performance/tensor_performance_test.cpp
+++ b/vespalib/src/tests/tensor/tensor_performance/tensor_performance_test.cpp
@@ -113,8 +113,8 @@ TEST("SMOKETEST - require that model match benchmark expression produces expecte
TEST("SMOKETEST - require that matrix product benchmark expression produces expected result") {
Params params;
- params.add("query", parse_tensor("{{x:0}:1.0}"));
- params.add("document", parse_tensor("{{x:1}:2.0}"));
+ params.add("query", parse_tensor("{{x:0}:1.0,{x:1}:0.0}"));
+ params.add("document", parse_tensor("{{x:0}:0.0,{x:1}:2.0}"));
params.add("model", parse_tensor("{{x:0,y:0}:1.0,{x:0,y:1}:2.0,"
" {x:1,y:0}:3.0,{x:1,y:1}:4.0}"));
EXPECT_EQUAL(calculate_expression(matrix_product_expr, params), 17.0);
@@ -339,12 +339,8 @@ TEST("benchmark matrix product") {
size_t matrix_size = vector_size * 2;
for (auto type: {SPARSE, DENSE}) {
Params params;
- size_t document_size = vector_size;
- if (type == DENSE) {
- document_size = matrix_size;
- }
- params.add("query", make_tensor(type, {DimensionSpec("x", vector_size, vector_size)}));
- params.add("document", make_tensor(type, {DimensionSpec("x", document_size)}));
+ params.add("query", make_tensor(type, {DimensionSpec("x", matrix_size)}));
+ params.add("document", make_tensor(type, {DimensionSpec("x", matrix_size)}));
params.add("model", make_tensor(type, {DimensionSpec("x", matrix_size), DimensionSpec("y", matrix_size)}));
double time_us = benchmark_expression_us(matrix_product_expr, params);
fprintf(stderr, "-- matrix product (%s) %zu + %zu vs %zux%zu: %g us\n", name(type), vector_size, vector_size, matrix_size, matrix_size, time_us);
diff --git a/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.cpp b/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.cpp
index 6e2e7778bc7..06e514e51ba 100644
--- a/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.cpp
+++ b/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.cpp
@@ -54,6 +54,30 @@ SimpleTensorEngine::to_string(const Tensor &tensor) const
return out;
}
+TensorSpec
+SimpleTensorEngine::to_spec(const Tensor &tensor) const
+{
+ assert(&tensor.engine() == this);
+ const SimpleTensor &simple_tensor = static_cast<const SimpleTensor&>(tensor);
+ ValueType type = simple_tensor.type();
+ const auto &dimensions = type.dimensions();
+ TensorSpec spec(type.to_spec());
+ for (const auto &cell: simple_tensor.cells()) {
+ TensorSpec::Address addr;
+ assert(cell.address.size() == dimensions.size());
+ for (size_t i = 0; i < cell.address.size(); ++i) {
+ const auto &label = cell.address[i];
+ if (label.is_mapped()) {
+ addr.emplace(dimensions[i].name, TensorSpec::Label(label.name));
+ } else {
+ addr.emplace(dimensions[i].name, TensorSpec::Label(label.index));
+ }
+ }
+ spec.add(addr, cell.value);
+ }
+ return spec;
+}
+
std::unique_ptr<eval::Tensor>
SimpleTensorEngine::create(const TensorSpec &spec) const
{
diff --git a/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.h b/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.h
index 4013aa9de5b..c3207c440fb 100644
--- a/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.h
+++ b/vespalib/src/vespa/vespalib/eval/simple_tensor_engine.h
@@ -22,6 +22,7 @@ public:
ValueType type_of(const Tensor &tensor) const override;
bool equal(const Tensor &a, const Tensor &b) const override;
vespalib::string to_string(const Tensor &tensor) const override;
+ TensorSpec to_spec(const Tensor &tensor) const override;
std::unique_ptr<Tensor> create(const TensorSpec &spec) const override;
const Value &reduce(const Tensor &tensor, const BinaryOperation &op, const std::vector<vespalib::string> &dimensions, Stash &stash) const override;
diff --git a/vespalib/src/vespa/vespalib/eval/tensor_engine.h b/vespalib/src/vespa/vespalib/eval/tensor_engine.h
index 637d549a55d..2458da7ff8b 100644
--- a/vespalib/src/vespa/vespalib/eval/tensor_engine.h
+++ b/vespalib/src/vespa/vespalib/eval/tensor_engine.h
@@ -41,6 +41,7 @@ struct TensorEngine
virtual ValueType type_of(const Tensor &tensor) const = 0;
virtual bool equal(const Tensor &a, const Tensor &b) const = 0;
virtual vespalib::string to_string(const Tensor &tensor) const = 0;
+ virtual TensorSpec to_spec(const Tensor &tensor) const = 0;
virtual TensorFunction::UP compile(tensor_function::Node_UP expr) const { return std::move(expr); }
diff --git a/vespalib/src/vespa/vespalib/eval/tensor_spec.cpp b/vespalib/src/vespa/vespalib/eval/tensor_spec.cpp
index 28cda1b2962..eec930b8da4 100644
--- a/vespalib/src/vespa/vespalib/eval/tensor_spec.cpp
+++ b/vespalib/src/vespa/vespalib/eval/tensor_spec.cpp
@@ -1,10 +1,49 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/fastos/fastos.h>
+#include <vespa/vespalib/util/stringfmt.h>
#include "tensor_spec.h"
+#include <iostream>
namespace vespalib {
namespace eval {
+vespalib::string
+TensorSpec::to_string() const
+{
+ vespalib::string out = vespalib::make_string("spec(%s) {\n", _type.c_str());
+ for (const auto &cell: _cells) {
+ size_t n = 0;
+ out.append(" [");
+ for (const auto &label: cell.first) {
+ if (n++) {
+ out.append(",");
+ }
+ if (label.second.is_mapped()) {
+ out.append(label.second.name);
+ } else {
+ out.append(vespalib::make_string("%zu", label.second.index));
+ }
+ }
+ out.append(vespalib::make_string("]: %g\n", cell.second.value));
+ }
+ out.append("}");
+ return out;
+}
+
+bool
+operator==(const TensorSpec &lhs, const TensorSpec &rhs)
+{
+ return ((lhs.type() == rhs.type()) &&
+ (lhs.cells() == rhs.cells()));
+}
+
+std::ostream &
+operator<<(std::ostream &out, const TensorSpec &spec)
+{
+ out << spec.to_string();
+ return out;
+}
+
} // namespace vespalib::eval
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/eval/tensor_spec.h b/vespalib/src/vespa/vespalib/eval/tensor_spec.h
index aff23a42832..41c1f8d4f3c 100644
--- a/vespalib/src/vespa/vespalib/eval/tensor_spec.h
+++ b/vespalib/src/vespa/vespalib/eval/tensor_spec.h
@@ -3,6 +3,7 @@
#pragma once
#include <vespa/vespalib/stllike/string.h>
+#include <vespa/vespalib/util/approx.h>
#include <memory>
#include <map>
@@ -25,6 +26,10 @@ public:
Label(const char *name_in) : index(npos), name(name_in) {}
bool is_mapped() const { return (index == npos); }
bool is_indexed() const { return (index != npos); }
+ bool operator==(const Label &rhs) const {
+ return ((index == rhs.index) &&
+ (name == rhs.name));
+ }
bool operator<(const Label &rhs) const {
if (index != rhs.index) {
return (index < rhs.index);
@@ -32,8 +37,14 @@ public:
return (name < rhs.name);
}
};
+ struct Value {
+ double value;
+ Value(double value_in) : value(value_in) {}
+ operator double() const { return value; }
+ bool operator==(const Value &rhs) const { return approx_equal(value, rhs.value); }
+ };
using Address = std::map<vespalib::string,Label>;
- using Cells = std::map<Address,double>;
+ using Cells = std::map<Address,Value>;
private:
vespalib::string _type;
Cells _cells;
@@ -45,7 +56,11 @@ public:
}
const vespalib::string &type() const { return _type; }
const Cells &cells() const { return _cells; }
+ vespalib::string to_string() const;
};
+bool operator==(const TensorSpec &lhs, const TensorSpec &rhs);
+std::ostream &operator<<(std::ostream &out, const TensorSpec &tensor);
+
} // namespace vespalib::eval
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp b/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp
index 4a4fc8dc555..362bb8c5561 100644
--- a/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp
+++ b/vespalib/src/vespa/vespalib/eval/test/tensor_conformance.cpp
@@ -3,6 +3,7 @@
#include <vespa/fastos/fastos.h>
#include <vespa/vespalib/testkit/test_kit.h>
#include "tensor_conformance.h"
+#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/eval/simple_tensor_engine.h>
#include <vespa/vespalib/eval/tensor_spec.h>
#include <vespa/vespalib/eval/function.h>
@@ -14,17 +15,6 @@ namespace eval {
namespace test {
namespace {
-// virtual ValueType type_of(const Tensor &tensor) const = 0;
-// virtual bool equal(const Tensor &a, const Tensor &b) const = 0;
-
-// virtual TensorFunction::UP compile(tensor_function::Node_UP expr) const { return std::move(expr); }
-
-// virtual std::unique_ptr<Tensor> create(const TensorSpec &spec) const = 0;
-
-// virtual const Value &reduce(const Tensor &tensor, const BinaryOperation &op, const std::vector<vespalib::string> &dimensions, Stash &stash) const = 0;
-// virtual const Value &map(const UnaryOperation &op, const Tensor &a, Stash &stash) const = 0;
-// virtual const Value &apply(const BinaryOperation &op, const Tensor &a, const Tensor &b, Stash &stash) const = 0;
-
// Random access sequence of numbers
struct Sequence {
virtual double operator[](size_t i) const = 0;
@@ -43,6 +33,13 @@ struct Div10 : Sequence {
double operator[](size_t i) const override { return (seq[i] / 10.0); }
};
+// Sequence of another sequence minus 2
+struct Sub2 : Sequence {
+ const Sequence &seq;
+ Sub2(const Sequence &seq_in) : seq(seq_in) {}
+ double operator[](size_t i) const override { return (seq[i] - 2.0); }
+};
+
// Sequence of a unary operator applied to a sequence
struct OpSeq : Sequence {
const Sequence &seq;
@@ -51,6 +48,13 @@ struct OpSeq : Sequence {
double operator[](size_t i) const override { return op.eval(seq[i]); }
};
+// Sequence of applying sigmoid to another sequence
+struct Sigmoid : Sequence {
+ const Sequence &seq;
+ Sigmoid(const Sequence &seq_in) : seq(seq_in) {}
+ double operator[](size_t i) const override { return operation::Sigmoid().eval(seq[i]); }
+};
+
// pre-defined sequence of numbers
struct Seq : Sequence {
std::vector<double> seq;
@@ -78,6 +82,13 @@ struct None : Mask {
bool operator[](size_t) const override { return false; }
};
+// Mask with false for each Nth index
+struct SkipNth : Mask {
+ size_t n;
+ SkipNth(size_t n_in) : n(n_in) {}
+ bool operator[](size_t i) const override { return (i % n) != 0; }
+};
+
// pre-defined mask
struct Bits : Mask {
std::vector<bool> bits;
@@ -88,6 +99,16 @@ struct Bits : Mask {
}
};
+// A mask converted to a sequence of two unique values (mapped from true and false)
+struct Mask2Seq : Sequence {
+ const Mask &mask;
+ double true_value;
+ double false_value;
+ Mask2Seq(const Mask &mask_in, double true_value_in = 1.0, double false_value_in = 0.0)
+ : mask(mask_in), true_value(true_value_in), false_value(false_value_in) {}
+ double operator[](size_t i) const override { return mask[i] ? true_value : false_value; }
+};
+
// custom op1
struct MyOp : CustomUnaryOperation {
double eval(double a) const override { return ((a + 1) * 2); }
@@ -132,28 +153,37 @@ vespalib::string infer_type(const Layout &layout) {
return ValueType::tensor_type(dimensions).to_spec();
}
-// Mix spaces with a number sequence to make a tensor spec
+// Wrapper for the things needed to generate a tensor
+struct Source {
+ using Address = TensorSpec::Address;
+
+ const Layout &layout;
+ const Sequence &seq;
+ const Mask &mask;
+ Source(const Layout &layout_in, const Sequence &seq_in, const Mask &mask_in)
+ : layout(layout_in), seq(seq_in), mask(mask_in) {}
+};
+
+// Mix layout with a number sequence to make a tensor spec
class TensorSpecBuilder
{
private:
using Label = TensorSpec::Label;
using Address = TensorSpec::Address;
- const Layout &_layout;
- const Sequence &_seq;
- const Mask &_mask;
- TensorSpec _spec;
- Address _addr;
- size_t _idx;
+ Source _source;
+ TensorSpec _spec;
+ Address _addr;
+ size_t _idx;
void generate(size_t layout_idx) {
- if (layout_idx == _layout.size()) {
- if (_mask[_idx]) {
- _spec.add(_addr, _seq[_idx]);
+ if (layout_idx == _source.layout.size()) {
+ if (_source.mask[_idx]) {
+ _spec.add(_addr, _source.seq[_idx]);
}
++_idx;
} else {
- const Domain &domain = _layout[layout_idx];
+ const Domain &domain = _source.layout[layout_idx];
if (domain.size > 0) { // indexed
for (size_t i = 0; i < domain.size; ++i) {
_addr.emplace(domain.dimension, Label(i)).first->second = Label(i);
@@ -170,67 +200,168 @@ private:
public:
TensorSpecBuilder(const Layout &layout, const Sequence &seq, const Mask &mask)
- : _layout(layout), _seq(seq), _mask(mask), _spec(infer_type(layout)), _addr(), _idx(0) {}
+ : _source(layout, seq, mask), _spec(infer_type(layout)), _addr(), _idx(0) {}
TensorSpec build() {
generate(0);
return _spec;
}
};
+TensorSpec spec(const Layout &layout, const Sequence &seq, const Mask &mask) {
+ return TensorSpecBuilder(layout, seq, mask).build();
+}
+TensorSpec spec(const Layout &layout, const Sequence &seq) {
+ return spec(layout, seq, All());
+}
+TensorSpec spec(const Layout &layout) {
+ return spec(layout, Seq(), None());
+}
+TensorSpec spec(const Domain &domain, const Sequence &seq, const Mask &mask) {
+ return spec(Layout({domain}), seq, mask);
+}
+TensorSpec spec(const Domain &domain, const Sequence &seq) {
+ return spec(Layout({domain}), seq);
+}
+TensorSpec spec(const Domain &domain) {
+ return spec(Layout({domain}));
+}
+TensorSpec spec(double value) {
+ return spec(Layout({}), Seq({value}));
+}
+TensorSpec spec() {
+ return spec(Layout({}));
+}
-using Tensor_UP = std::unique_ptr<Tensor>;
-
-// small utility used to capture passed tensor references for uniform handling
-struct TensorRef {
- const Tensor &ref;
- TensorRef(const Tensor &ref_in) : ref(ref_in) {}
- TensorRef(const Tensor_UP &up_ref) : ref(*(up_ref.get())) {}
-};
-
-// abstract evaluation verification wrapper
+// abstract evaluation wrapper
struct Eval {
- virtual void verify(const TensorEngine &engine, TensorRef expect) const {
- (void) engine;
- (void) expect;
+ // typed result wrapper
+ class Result {
+ private:
+ enum class Type { ERROR, NUMBER, TENSOR };
+ Type _type;
+ double _number;
+ TensorSpec _tensor;
+ public:
+ Result(const Value &value) : _type(Type::ERROR), _number(error_value), _tensor("error") {
+ if (value.is_double()) {
+ _type = Type::NUMBER;
+ _number = value.as_double();
+ _tensor = TensorSpec("double").add({}, _number);
+ } else if (value.is_tensor()) {
+ _type = Type::TENSOR;
+ _tensor = value.as_tensor()->engine().to_spec(*value.as_tensor());
+ if (_tensor.type() == "double") {
+ _number = _tensor.cells().empty() ? 0.0 : _tensor.cells().begin()->second.value;
+ }
+ }
+ }
+ bool is_error() const { return (_type == Type::ERROR); }
+ bool is_number() const { return (_type == Type::NUMBER); }
+ bool is_tensor() const { return (_type == Type::TENSOR); }
+ double number() const {
+ EXPECT_TRUE(is_number());
+ return _number;
+ }
+ const TensorSpec &tensor() const {
+ EXPECT_TRUE(is_tensor());
+ return _tensor;
+ }
+ };
+ virtual Result eval(const TensorEngine &) const {
+ TEST_ERROR("wrong signature");
+ return Result(ErrorValue());
+ }
+ virtual Result eval(const TensorEngine &, const TensorSpec &) const {
TEST_ERROR("wrong signature");
+ return Result(ErrorValue());
}
- virtual void verify(const TensorEngine &engine, TensorRef a, TensorRef expect) const {
- (void) engine;
- (void) a;
- (void) expect;
+ virtual Result eval(const TensorEngine &, const TensorSpec &, const TensorSpec &) const {
TEST_ERROR("wrong signature");
+ return Result(ErrorValue());
}
virtual ~Eval() {}
};
-// expression(void) -> tensor
-struct Expr_V_T : Eval {
+// catches exceptions trying to keep the test itself safe from eval side-effects
+struct SafeEval : Eval {
+ const Eval &unsafe;
+ SafeEval(const Eval &unsafe_in) : unsafe(unsafe_in) {}
+ Result eval(const TensorEngine &engine) const override {
+ try {
+ return unsafe.eval(engine);
+ } catch (std::exception &e) {
+ TEST_ERROR(e.what());
+ return Result(ErrorValue());
+ }
+ }
+ Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
+ try {
+ return unsafe.eval(engine, a);
+ } catch (std::exception &e) {
+ TEST_ERROR(e.what());
+ return Result(ErrorValue());
+ }
+
+ }
+ Result eval(const TensorEngine &engine, const TensorSpec &a, const TensorSpec &b) const override {
+ try {
+ return unsafe.eval(engine, a, b);
+ } catch (std::exception &e) {
+ TEST_ERROR(e.what());
+ return Result(ErrorValue());
+ }
+ }
+};
+SafeEval safe(const Eval &eval) { return SafeEval(eval); }
+
+// expression(void)
+struct Expr_V : Eval {
const vespalib::string &expr;
- Expr_V_T(const vespalib::string &expr_in) : expr(expr_in) {}
- void verify(const TensorEngine &engine, TensorRef expect) const override {
+ Expr_V(const vespalib::string &expr_in) : expr(expr_in) {}
+ Result eval(const TensorEngine &engine) const override {
InterpretedFunction::Context ctx;
InterpretedFunction ifun(engine, Function::parse(expr));
- const Value &result = ifun.eval(ctx);
- if (EXPECT_TRUE(result.is_tensor())) {
- const Tensor *actual = result.as_tensor();
- EXPECT_EQUAL(*actual, expect.ref);
- }
+ return Result(ifun.eval(ctx));
}
};
-// expression(tensor) -> tensor
-struct Expr_T_T : Eval {
+// expression(tensor)
+struct Expr_T : Eval {
const vespalib::string &expr;
- Expr_T_T(const vespalib::string &expr_in) : expr(expr_in) {}
- void verify(const TensorEngine &engine, TensorRef a, TensorRef expect) const override {
- TensorValue va(a.ref);
+ Expr_T(const vespalib::string &expr_in) : expr(expr_in) {}
+ Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
+ TensorValue va(engine.create(a));
InterpretedFunction::Context ctx;
InterpretedFunction ifun(engine, Function::parse(expr));
ctx.add_param(va);
- const Value &result = ifun.eval(ctx);
- if (EXPECT_TRUE(result.is_tensor())) {
- const Tensor *actual = result.as_tensor();
- EXPECT_EQUAL(*actual, expect.ref);
- }
+ return Result(ifun.eval(ctx));
+ }
+};
+
+// expression(tensor,tensor)
+struct Expr_TT : Eval {
+ const vespalib::string &expr;
+ Expr_TT(const vespalib::string &expr_in) : expr(expr_in) {}
+ Result eval(const TensorEngine &engine, const TensorSpec &a, const TensorSpec &b) const override {
+ TensorValue va(engine.create(a));
+ TensorValue vb(engine.create(b));
+ InterpretedFunction::Context ctx;
+ InterpretedFunction ifun(engine, Function::parse(expr));
+ ctx.add_param(va);
+ ctx.add_param(vb);
+ return Result(ifun.eval(ctx));
+ }
+};
+
+// evaluate tensor reduce operation using tensor engine immediate api
+struct ImmediateReduce : Eval {
+ const BinaryOperation &op;
+ std::vector<vespalib::string> dimensions;
+ ImmediateReduce(const BinaryOperation &op_in) : op(op_in), dimensions() {}
+ ImmediateReduce(const BinaryOperation &op_in, const vespalib::string &dimension)
+ : op(op_in), dimensions({dimension}) {}
+ Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
+ Stash stash;
+ return Result(engine.reduce(*engine.create(a), op, dimensions, stash));
}
};
@@ -238,28 +369,66 @@ struct Expr_T_T : Eval {
struct ImmediateMap : Eval {
const UnaryOperation &op;
ImmediateMap(const UnaryOperation &op_in) : op(op_in) {}
- void verify(const TensorEngine &engine, TensorRef a, TensorRef expect) const override {
+ Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
Stash stash;
- const Value &result = engine.map(op, a.ref, stash);
- if (EXPECT_TRUE(result.is_tensor())) {
- const Tensor *actual = result.as_tensor();
- EXPECT_EQUAL(*actual, expect.ref);
- }
+ return Result(engine.map(op, *engine.create(a), stash));
+ }
+};
+
+// evaluate tensor apply operation using tensor engine immediate api
+struct ImmediateApply : Eval {
+ const BinaryOperation &op;
+ ImmediateApply(const BinaryOperation &op_in) : op(op_in) {}
+ Result eval(const TensorEngine &engine, const TensorSpec &a, const TensorSpec &b) const override {
+ Stash stash;
+ return Result(engine.apply(op, *engine.create(a), *engine.create(b), stash));
}
};
-// input needed to evaluate a map operation in retained mode
-struct TensorMapInput : TensorFunction::Input {
- TensorValue tensor;
- const UnaryOperation &map_op;
- TensorMapInput(TensorRef in, const UnaryOperation &op) : tensor(in.ref), map_op(op) {}
+const size_t tensor_id_a = 11;
+const size_t tensor_id_b = 12;
+const size_t map_operation_id = 22;
+
+// input used when evaluating in retained mode
+struct Input : TensorFunction::Input {
+ std::vector<TensorValue> tensors;
+ const UnaryOperation *map_op;
+ Input(std::unique_ptr<Tensor> a) : tensors(), map_op(nullptr) {
+ tensors.emplace_back(std::move(a));
+ }
+ Input(std::unique_ptr<Tensor> a, const UnaryOperation &op) : tensors(), map_op(&op) {
+ tensors.emplace_back(std::move(a));
+ }
+ Input(std::unique_ptr<Tensor> a, std::unique_ptr<Tensor> b) : tensors(), map_op(nullptr) {
+ tensors.emplace_back(std::move(a));
+ tensors.emplace_back(std::move(b));
+ }
const Value &get_tensor(size_t id) const override {
- ASSERT_EQUAL(id, 11u);
- return tensor;
+ size_t offset = (id - tensor_id_a);
+ ASSERT_GREATER(tensors.size(), offset);
+ return tensors[offset];
}
const UnaryOperation &get_map_operation(size_t id) const {
- ASSERT_EQUAL(id, 22u);
- return map_op;
+ ASSERT_TRUE(map_op != nullptr);
+ ASSERT_EQUAL(id, map_operation_id);
+ return *map_op;
+ }
+};
+
+// evaluate tensor reduce operation using tensor engine retained api
+struct RetainedReduce : Eval {
+ const BinaryOperation &op;
+ std::vector<vespalib::string> dimensions;
+ RetainedReduce(const BinaryOperation &op_in) : op(op_in), dimensions() {}
+ RetainedReduce(const BinaryOperation &op_in, const vespalib::string &dimension)
+ : op(op_in), dimensions({dimension}) {}
+ Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
+ auto a_type = ValueType::from_spec(a.type());
+ auto ir = tensor_function::reduce(tensor_function::inject(a_type, tensor_id_a), op, dimensions);
+ auto fun = engine.compile(std::move(ir));
+ Input input(engine.create(a));
+ Stash stash;
+ return Result(fun->eval(input, stash));
}
};
@@ -267,65 +436,65 @@ struct TensorMapInput : TensorFunction::Input {
struct RetainedMap : Eval {
const UnaryOperation &op;
RetainedMap(const UnaryOperation &op_in) : op(op_in) {}
- void verify(const TensorEngine &engine, TensorRef a, TensorRef expect) const override {
- auto a_type = a.ref.engine().type_of(a.ref);
- auto ir = tensor_function::map(22, tensor_function::inject(a_type, 11));
+ Result eval(const TensorEngine &engine, const TensorSpec &a) const override {
+ auto a_type = ValueType::from_spec(a.type());
+ auto ir = tensor_function::map(map_operation_id, tensor_function::inject(a_type, tensor_id_a));
auto fun = engine.compile(std::move(ir));
- TensorMapInput input(a, op);
+ Input input(engine.create(a), op);
Stash stash;
- const Value &result = fun->eval(input, stash);
- if (EXPECT_TRUE(result.is_tensor())) {
- const Tensor *actual = result.as_tensor();
- EXPECT_EQUAL(*actual, expect.ref);
- }
+ return Result(fun->eval(input, stash));
+ }
+};
+
+// evaluate tensor apply operation using tensor engine retained api
+struct RetainedApply : Eval {
+ const BinaryOperation &op;
+ RetainedApply(const BinaryOperation &op_in) : op(op_in) {}
+ Result eval(const TensorEngine &engine, const TensorSpec &a, const TensorSpec &b) const override {
+ auto a_type = ValueType::from_spec(a.type());
+ auto b_type = ValueType::from_spec(b.type());
+ auto ir = tensor_function::apply(op, tensor_function::inject(a_type, tensor_id_a),
+ tensor_function::inject(a_type, tensor_id_b));
+ auto fun = engine.compile(std::move(ir));
+ Input input(engine.create(a), engine.create(b));
+ Stash stash;
+ return Result(fun->eval(input, stash));
}
};
// placeholder used for unused values in a sequence
-const double X = 31212.0;
+const double X = error_value;
+
+// NaN value
+const double my_nan = std::numeric_limits<double>::quiet_NaN();
+
// Test wrapper to avoid passing global test parameters around
struct TestContext {
+ const TensorEngine &ref_engine;
const TensorEngine &engine;
bool test_mixed_cases;
+ size_t skip_count;
+
TestContext(const TensorEngine &engine_in, bool test_mixed_cases_in)
- : engine(engine_in), test_mixed_cases(test_mixed_cases_in) {}
+ : ref_engine(SimpleTensorEngine::ref()), engine(engine_in),
+ test_mixed_cases(test_mixed_cases_in), skip_count(0) {}
+
+ std::unique_ptr<Tensor> tensor(const TensorSpec &spec) {
+ auto result = engine.create(spec);
+ EXPECT_EQUAL(spec.type(), engine.type_of(*result).to_spec());
+ return result;
+ }
- bool mixed() {
+ bool mixed(size_t n) {
if (!test_mixed_cases) {
- fprintf(stderr, "skipping some tests since mixed testing is disabled\n");
+ skip_count += n;
}
return test_mixed_cases;
}
- Tensor_UP tensor(const Layout &layout, const Sequence &seq, const Mask &mask) {
- TensorSpec spec = TensorSpecBuilder(layout, seq, mask).build();
- Tensor_UP result = engine.create(spec);
- EXPECT_EQUAL(spec.type(), engine.type_of(*result).to_spec());
- return result;
- }
- Tensor_UP tensor(const Layout &layout, const Sequence &seq) {
- return tensor(layout, seq, All());
- }
- Tensor_UP tensor(const Layout &layout) {
- return tensor(layout, Seq(), None());
- }
- Tensor_UP tensor(const Domain &domain, const Sequence &seq, const Mask &mask) {
- return tensor(Layout({domain}), seq, mask);
- }
- Tensor_UP tensor(const Domain &domain, const Sequence &seq) {
- return tensor(Layout({domain}), seq);
- }
- Tensor_UP tensor(const Domain &domain) {
- return tensor(Layout({domain}));
- }
- Tensor_UP tensor(double value) {
- return tensor(Layout({}), Seq({value}));
- }
- Tensor_UP tensor() {
- return tensor(Layout({}));
- }
+ //-------------------------------------------------------------------------
void verify_create_type(const vespalib::string &type_spec) {
auto tensor = engine.create(TensorSpec(type_spec));
@@ -333,59 +502,154 @@ struct TestContext {
EXPECT_EQUAL(type_spec, engine.type_of(*tensor).to_spec());
}
- void verify_not_equal(TensorRef a, TensorRef b) {
- EXPECT_FALSE(a.ref == b.ref);
- EXPECT_FALSE(b.ref == a.ref);
- }
-
- void verify_verbatim_tensor(const vespalib::string &tensor_expr, TensorRef expect) {
- Expr_V_T(tensor_expr).verify(engine, expect);
- }
-
void test_tensor_create_type() {
TEST_DO(verify_create_type("double"));
TEST_DO(verify_create_type("tensor(x{})"));
TEST_DO(verify_create_type("tensor(x{},y{})"));
TEST_DO(verify_create_type("tensor(x[5])"));
TEST_DO(verify_create_type("tensor(x[5],y[10])"));
- if (mixed()) {
+ if (mixed(2)) {
TEST_DO(verify_create_type("tensor(x{},y[10])"));
TEST_DO(verify_create_type("tensor(x[5],y{})"));
}
}
+ //-------------------------------------------------------------------------
+
+ void verify_equal(const TensorSpec &a, const TensorSpec &b) {
+ auto ta = tensor(a);
+ auto tb = tensor(b);
+ EXPECT_EQUAL(a, b);
+ EXPECT_EQUAL(*ta, *tb);
+ TensorSpec spec = engine.to_spec(*ta);
+ TensorSpec ref_spec = ref_engine.to_spec(*ref_engine.create(a));
+ EXPECT_EQUAL(spec, ref_spec);
+ }
+
+ void test_tensor_equality() {
+ TEST_DO(verify_equal(spec(), spec()));
+ TEST_DO(verify_equal(spec(10.0), spec(10.0)));
+ TEST_DO(verify_equal(spec(x()), spec(x())));
+ TEST_DO(verify_equal(spec(x({"a"}), Seq({1})), spec(x({"a"}), Seq({1}))));
+ TEST_DO(verify_equal(spec({x({"a"}),y({"a"})}, Seq({1})), spec({y({"a"}),x({"a"})}, Seq({1}))));
+ TEST_DO(verify_equal(spec(x(3)), spec(x(3))));
+ TEST_DO(verify_equal(spec({x(1),y(1)}, Seq({1})), spec({y(1),x(1)}, Seq({1}))));
+ if (mixed(2)) {
+ TEST_DO(verify_equal(spec({x({"a"}),y(1)}, Seq({1})), spec({y(1),x({"a"})}, Seq({1}))));
+ TEST_DO(verify_equal(spec({y({"a"}),x(1)}, Seq({1})), spec({x(1),y({"a"})}, Seq({1}))));
+ }
+ }
+
+ //-------------------------------------------------------------------------
+
+ void verify_not_equal(const TensorSpec &a, const TensorSpec &b) {
+ auto ta = tensor(a);
+ auto tb = tensor(b);
+ EXPECT_NOT_EQUAL(a, b);
+ EXPECT_NOT_EQUAL(b, a);
+ EXPECT_NOT_EQUAL(*ta, *tb);
+ EXPECT_NOT_EQUAL(*tb, *ta);
+ }
+
void test_tensor_inequality() {
- TEST_DO(verify_not_equal(tensor(1.0), tensor(2.0)));
- TEST_DO(verify_not_equal(tensor(), tensor(x())));
- TEST_DO(verify_not_equal(tensor(), tensor(x(1))));
- TEST_DO(verify_not_equal(tensor(x()), tensor(x(1))));
- TEST_DO(verify_not_equal(tensor(x()), tensor(y())));
- TEST_DO(verify_not_equal(tensor(x(1)), tensor(x(2))));
- TEST_DO(verify_not_equal(tensor(x(1)), tensor(y(1))));
- TEST_DO(verify_not_equal(tensor(x({"a"}), Seq({1})), tensor(x({"a"}), Seq({2}))));
- TEST_DO(verify_not_equal(tensor(x({"a"}), Seq({1})), tensor(x({"b"}), Seq({1}))));
- TEST_DO(verify_not_equal(tensor(x({"a"}), Seq({1})), tensor({x({"a"}),y({"a"})}, Seq({1}))));
- TEST_DO(verify_not_equal(tensor(x(1), Seq({1})), tensor(x(1), Seq({2}))));
- TEST_DO(verify_not_equal(tensor(x(1), Seq({1})), tensor(x(2), Seq({1}), Bits({1,0}))));
- TEST_DO(verify_not_equal(tensor(x(2), Seq({1,1}), Bits({1,0})),
- tensor(x(2), Seq({1,1}), Bits({0,1}))));
- TEST_DO(verify_not_equal(tensor(x(1), Seq({1})), tensor({x(1),y(1)}, Seq({1}))));
- if (mixed()) {
- TEST_DO(verify_not_equal(tensor({x({"a"}),y(1)}, Seq({1})), tensor({x({"a"}),y(1)}, Seq({2}))));
- TEST_DO(verify_not_equal(tensor({x({"a"}),y(1)}, Seq({1})), tensor({x({"b"}),y(1)}, Seq({1}))));
- TEST_DO(verify_not_equal(tensor({x(2),y({"a"})}, Seq({1}), Bits({1,0})),
- tensor({x(2),y({"a"})}, Seq({X,1}), Bits({0,1}))));
+ TEST_DO(verify_not_equal(spec(1.0), spec(2.0)));
+ TEST_DO(verify_not_equal(spec(), spec(x())));
+ TEST_DO(verify_not_equal(spec(), spec(x(1))));
+ TEST_DO(verify_not_equal(spec(x()), spec(x(1))));
+ TEST_DO(verify_not_equal(spec(x()), spec(y())));
+ TEST_DO(verify_not_equal(spec(x(1)), spec(x(2))));
+ TEST_DO(verify_not_equal(spec(x(1)), spec(y(1))));
+ TEST_DO(verify_not_equal(spec(x({"a"}), Seq({1})), spec(x({"a"}), Seq({2}))));
+ TEST_DO(verify_not_equal(spec(x({"a"}), Seq({1})), spec(x({"b"}), Seq({1}))));
+ TEST_DO(verify_not_equal(spec(x({"a"}), Seq({1})), spec({x({"a"}),y({"a"})}, Seq({1}))));
+ TEST_DO(verify_not_equal(spec(x(1), Seq({1})), spec(x(1), Seq({2}))));
+ TEST_DO(verify_not_equal(spec(x(1), Seq({1})), spec(x(2), Seq({1}), Bits({1,0}))));
+ TEST_DO(verify_not_equal(spec(x(2), Seq({1,1}), Bits({1,0})),
+ spec(x(2), Seq({1,1}), Bits({0,1}))));
+ TEST_DO(verify_not_equal(spec(x(1), Seq({1})), spec({x(1),y(1)}, Seq({1}))));
+ if (mixed(3)) {
+ TEST_DO(verify_not_equal(spec({x({"a"}),y(1)}, Seq({1})), spec({x({"a"}),y(1)}, Seq({2}))));
+ TEST_DO(verify_not_equal(spec({x({"a"}),y(1)}, Seq({1})), spec({x({"b"}),y(1)}, Seq({1}))));
+ TEST_DO(verify_not_equal(spec({x(2),y({"a"})}, Seq({1}), Bits({1,0})),
+ spec({x(2),y({"a"})}, Seq({X,1}), Bits({0,1}))));
}
}
+ //-------------------------------------------------------------------------
+
+ void verify_verbatim_tensor(const vespalib::string &tensor_expr, const TensorSpec &expect) {
+ EXPECT_EQUAL(Expr_V(tensor_expr).eval(engine).tensor(), expect);
+ }
+
void test_verbatim_tensors() {
- TEST_DO(verify_verbatim_tensor("{}", tensor()));
- TEST_DO(verify_verbatim_tensor("{{}:5}", tensor(5.0)));
- TEST_DO(verify_verbatim_tensor("{{x:foo}:1,{x:bar}:2,{x:baz}:3}", tensor(x({"foo","bar","baz"}), Seq({1,2,3}))));
+ TEST_DO(verify_verbatim_tensor("{}", spec(0.0)));
+ TEST_DO(verify_verbatim_tensor("{{}:5}", spec(5.0)));
+ TEST_DO(verify_verbatim_tensor("{{x:foo}:1,{x:bar}:2,{x:baz}:3}", spec(x({"foo","bar","baz"}), Seq({1,2,3}))));
TEST_DO(verify_verbatim_tensor("{{x:foo,y:a}:1,{y:b,x:bar}:2}",
- tensor({x({"foo","bar"}),y({"a","b"})}, Seq({1,X,X,2}), Bits({1,0,0,1}))));
+ spec({x({"foo","bar"}),y({"a","b"})}, Seq({1,X,X,2}), Bits({1,0,0,1}))));
+ }
+
+ //-------------------------------------------------------------------------
+
+ void verify_reduce_result(const Eval &eval, const TensorSpec &a, const Eval::Result &expect) {
+ if (expect.is_tensor()) {
+ EXPECT_EQUAL(eval.eval(engine, a).tensor(), expect.tensor());
+ } else if (expect.is_number()) {
+ EXPECT_EQUAL(eval.eval(engine, a).number(), expect.number());
+ } else {
+ TEST_FATAL("expected result should be valid");
+ }
+ }
+
+ void test_reduce_op(const vespalib::string &name, const BinaryOperation &op, const Sequence &seq) {
+ std::vector<Layout> layouts = {
+ {x(3)},
+ {x(3),y(5)},
+ {x(3),y(5),z(7)},
+ {x({"a","b","c"})},
+ {x({"a","b","c"}),y({"foo","bar"})},
+ {x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}
+ };
+ if (mixed(2 * 4)) {
+ layouts.push_back({x(3),y({"foo", "bar"}),z(7)});
+ layouts.push_back({x({"a","b","c"}),y(5),z({"i","j","k","l"})});
+ }
+ for (const Layout &layout: layouts) {
+ TensorSpec input = spec(layout, seq);
+ for (const Domain &domain: layout) {
+ Eval::Result expect = ImmediateReduce(op, domain.dimension).eval(ref_engine, input);
+ TEST_STATE(make_string("shape: %s, reduce dimension: %s",
+ infer_type(layout).c_str(), domain.dimension.c_str()).c_str());
+ if (!name.empty()) {
+ vespalib::string expr = make_string("%s(a,%s)", name.c_str(), domain.dimension.c_str());
+ TEST_DO(verify_reduce_result(Expr_T(expr), input, expect));
+ }
+ TEST_DO(verify_reduce_result(ImmediateReduce(op, domain.dimension), input, expect));
+ TEST_DO(verify_reduce_result(RetainedReduce(op, domain.dimension), input, expect));
+ }
+ {
+ Eval::Result expect = ImmediateReduce(op).eval(ref_engine, input);
+ TEST_STATE(make_string("shape: %s, reduce all dimensions",
+ infer_type(layout).c_str()).c_str());
+ if (!name.empty()) {
+ vespalib::string expr = make_string("%s(a)", name.c_str());
+ TEST_DO(verify_reduce_result(Expr_T(expr), input, expect));
+ }
+ TEST_DO(verify_reduce_result(ImmediateReduce(op), input, expect));
+ TEST_DO(verify_reduce_result(RetainedReduce(op), input, expect));
+ }
+ }
+ }
+
+ void test_tensor_reduce() {
+ TEST_DO(test_reduce_op("sum", operation::Add(), N()));
+ TEST_DO(test_reduce_op("", operation::Mul(), Sigmoid(N())));
+ TEST_DO(test_reduce_op("", operation::Min(), N()));
+ TEST_DO(test_reduce_op("", operation::Max(), N()));
}
+ //-------------------------------------------------------------------------
+
void test_map_op(const Eval &eval, const UnaryOperation &ref_op, const Sequence &seq) {
std::vector<Layout> layouts = {
{},
@@ -396,38 +660,121 @@ struct TestContext {
{x({"a","b","c"}),y({"foo","bar"})},
{x({"a","b","c"}),y({"foo","bar"}),z({"i","j","k","l"})}
};
- if (mixed()) {
+ if (mixed(2)) {
layouts.push_back({x(3),y({"foo", "bar"}),z(7)});
layouts.push_back({x({"a","b","c"}),y(5),z({"i","j","k","l"})});
}
for (const Layout &layout: layouts) {
- TEST_DO(eval.verify(engine, tensor(layout, seq), tensor(layout, OpSeq(seq, ref_op))));
+ EXPECT_EQUAL(eval.eval(engine, spec(layout, seq)).tensor(), spec(layout, OpSeq(seq, ref_op)));
}
}
+ void test_map_op(const vespalib::string &expr, const UnaryOperation &op, const Sequence &seq) {
+ TEST_DO(test_map_op(ImmediateMap(op), op, seq));
+ TEST_DO(test_map_op(RetainedMap(op), op, seq));
+ TEST_DO(test_map_op(Expr_T(expr), op, seq));
+ }
+
void test_tensor_map() {
- TEST_DO(test_map_op(ImmediateMap(operation::Floor()), operation::Floor(), Div10(N())));
- TEST_DO(test_map_op(RetainedMap(operation::Floor()), operation::Floor(), Div10(N())));
- TEST_DO(test_map_op(Expr_T_T("floor(a)"), operation::Floor(), Div10(N())));
- //---------------------------------------------------------------------
- TEST_DO(test_map_op(ImmediateMap(operation::Ceil()), operation::Ceil(), Div10(N())));
- TEST_DO(test_map_op(RetainedMap(operation::Ceil()), operation::Ceil(), Div10(N())));
- TEST_DO(test_map_op(Expr_T_T("ceil(a)"), operation::Ceil(), Div10(N())));
- //---------------------------------------------------------------------
- TEST_DO(test_map_op(ImmediateMap(operation::Sqrt()), operation::Sqrt(), Div10(N())));
- TEST_DO(test_map_op(RetainedMap(operation::Sqrt()), operation::Sqrt(), Div10(N())));
- TEST_DO(test_map_op(Expr_T_T("sqrt(a)"), operation::Sqrt(), Div10(N())));
- //---------------------------------------------------------------------
- TEST_DO(test_map_op(ImmediateMap(MyOp()), MyOp(), Div10(N())));
- TEST_DO(test_map_op(RetainedMap(MyOp()), MyOp(), Div10(N())));
- TEST_DO(test_map_op(Expr_T_T("(a+1)*2"), MyOp(), Div10(N())));
+ TEST_DO(test_map_op("-a", operation::Neg(), Sub2(Div10(N()))));
+ TEST_DO(test_map_op("!a", operation::Not(), Mask2Seq(SkipNth(3))));
+ TEST_DO(test_map_op("cos(a)", operation::Cos(), Div10(N())));
+ TEST_DO(test_map_op("sin(a)", operation::Sin(), Div10(N())));
+ TEST_DO(test_map_op("tan(a)", operation::Tan(), Div10(N())));
+ TEST_DO(test_map_op("cosh(a)", operation::Cosh(), Div10(N())));
+ TEST_DO(test_map_op("sinh(a)", operation::Sinh(), Div10(N())));
+ TEST_DO(test_map_op("tanh(a)", operation::Tanh(), Div10(N())));
+ TEST_DO(test_map_op("acos(a)", operation::Acos(), Sigmoid(Div10(N()))));
+ TEST_DO(test_map_op("asin(a)", operation::Asin(), Sigmoid(Div10(N()))));
+ TEST_DO(test_map_op("atan(a)", operation::Atan(), Div10(N())));
+ TEST_DO(test_map_op("exp(a)", operation::Exp(), Div10(N())));
+ TEST_DO(test_map_op("log10(a)", operation::Log10(), Div10(N())));
+ TEST_DO(test_map_op("log(a)", operation::Log(), Div10(N())));
+ TEST_DO(test_map_op("sqrt(a)", operation::Sqrt(), Div10(N())));
+ TEST_DO(test_map_op("ceil(a)", operation::Ceil(), Div10(N())));
+ TEST_DO(test_map_op("fabs(a)", operation::Fabs(), Div10(N())));
+ TEST_DO(test_map_op("floor(a)", operation::Floor(), Div10(N())));
+ TEST_DO(test_map_op("isNan(a)", operation::IsNan(), Mask2Seq(SkipNth(3), 1.0, my_nan)));
+ TEST_DO(test_map_op("relu(a)", operation::Relu(), Sub2(Div10(N()))));
+ TEST_DO(test_map_op("sigmoid(a)", operation::Sigmoid(), Sub2(Div10(N()))));
+ TEST_DO(test_map_op("(a+1)*2", MyOp(), Div10(N())));
+ }
+
+ //-------------------------------------------------------------------------
+
+ void test_apply_op(const Eval &eval, const BinaryOperation &op, const Sequence &seq) {
+ std::vector<Layout> layouts = {
+ {}, {},
+ {x(5)}, {x(5)},
+ {x(5)}, {x(3)},
+ {x(5)}, {y(5)},
+ {x(5)}, {x(5),y(5)},
+ {x(3),y(5)}, {x(4),y(4)},
+ {x(3),y(5)}, {y(5),z(7)},
+ {x({"a","b","c"})}, {x({"a","b","c"})},
+ {x({"a","b","c"})}, {x({"a","b"})},
+ {x({"a","b","c"})}, {y({"foo","bar","baz"})},
+ {x({"a","b","c"})}, {x({"a","b","c"}),y({"foo","bar","baz"})},
+ {x({"a","b"}),y({"foo","bar","baz"})}, {x({"a","b","c"}),y({"foo","bar"})},
+ {x({"a","b"}),y({"foo","bar","baz"})}, {y({"foo","bar"}),z({"i","j","k","l"})}
+ };
+ if (mixed(2)) {
+ layouts.push_back({x(3),y({"foo", "bar"})});
+ layouts.push_back({y({"foo", "bar"}),z(7)});
+ layouts.push_back({x({"a","b","c"}),y(5)});
+ layouts.push_back({y(5),z({"i","j","k","l"})});
+ }
+ ASSERT_TRUE((layouts.size() % 2) == 0);
+ for (size_t i = 0; i < layouts.size(); i += 2) {
+ TensorSpec lhs_input = spec(layouts[i], seq);
+ TensorSpec rhs_input = spec(layouts[i + 1], seq);
+ TEST_STATE(make_string("lhs shape: %s, rhs shape: %s",
+ lhs_input.type().c_str(),
+ rhs_input.type().c_str()).c_str());
+ TensorSpec expect = ImmediateApply(op).eval(ref_engine, lhs_input, rhs_input).tensor();
+ EXPECT_EQUAL(safe(eval).eval(engine, lhs_input, rhs_input).tensor(), expect);
+ }
}
+ void test_apply_op(const vespalib::string &expr, const BinaryOperation &op, const Sequence &seq) {
+ TEST_DO(test_apply_op(ImmediateApply(op), op, seq));
+ TEST_DO(test_apply_op(RetainedApply(op), op, seq));
+ TEST_DO(test_apply_op(Expr_TT(expr), op, seq));
+ }
+
+ void test_tensor_apply() {
+ TEST_DO(test_apply_op("a+b", operation::Add(), Div10(N())));
+ TEST_DO(test_apply_op("a-b", operation::Sub(), Div10(N())));
+ TEST_DO(test_apply_op("a*b", operation::Mul(), Div10(N())));
+ TEST_DO(test_apply_op("a/b", operation::Div(), Div10(N())));
+ TEST_DO(test_apply_op("a^b", operation::Pow(), Div10(N())));
+ TEST_DO(test_apply_op("pow(a,b)", operation::Pow(), Div10(N())));
+ TEST_DO(test_apply_op("a==b", operation::Equal(), Div10(N())));
+ TEST_DO(test_apply_op("a!=b", operation::NotEqual(), Div10(N())));
+ TEST_DO(test_apply_op("a~=b", operation::Approx(), Div10(N())));
+ TEST_DO(test_apply_op("a<b", operation::Less(), Div10(N())));
+ TEST_DO(test_apply_op("a<=b", operation::LessEqual(), Div10(N())));
+ TEST_DO(test_apply_op("a>b", operation::Greater(), Div10(N())));
+ TEST_DO(test_apply_op("a>=b", operation::GreaterEqual(), Div10(N())));
+ TEST_DO(test_apply_op("a&&b", operation::And(), Mask2Seq(SkipNth(3))));
+ TEST_DO(test_apply_op("a||b", operation::Or(), Mask2Seq(SkipNth(3))));
+ TEST_DO(test_apply_op("atan2(a,b)", operation::Atan2(), Div10(N())));
+ TEST_DO(test_apply_op("ldexp(a,b)", operation::Ldexp(), Div10(N())));
+ TEST_DO(test_apply_op("fmod(a,b)", operation::Fmod(), Div10(N())));
+ TEST_DO(test_apply_op("min(a,b)", operation::Min(), Div10(N())));
+ TEST_DO(test_apply_op("max(a,b)", operation::Max(), Div10(N())));
+ }
+
+ //-------------------------------------------------------------------------
+
void run_tests() {
TEST_DO(test_tensor_create_type());
+ TEST_DO(test_tensor_equality());
TEST_DO(test_tensor_inequality());
TEST_DO(test_verbatim_tensors());
+ TEST_DO(test_tensor_reduce());
TEST_DO(test_tensor_map());
+ TEST_DO(test_tensor_apply());
}
};
@@ -438,6 +785,9 @@ TensorConformance::run_tests(const TensorEngine &engine, bool test_mixed_cases)
{
TestContext ctx(engine, test_mixed_cases);
ctx.run_tests();
+ if (ctx.skip_count > 0) {
+ fprintf(stderr, "WARNING: skipped %zu mixed test cases\n", ctx.skip_count);
+ }
}
} // namespace vespalib::eval::test
diff --git a/vespalib/src/vespa/vespalib/eval/value.cpp b/vespalib/src/vespa/vespalib/eval/value.cpp
index 859c91a59f5..ff72ac4c85c 100644
--- a/vespalib/src/vespa/vespalib/eval/value.cpp
+++ b/vespalib/src/vespa/vespalib/eval/value.cpp
@@ -23,23 +23,23 @@ Value::apply(const BinaryOperation &, const Value &, Stash &stash) const
bool
TensorValue::equal(const Value &rhs) const
{
- return (rhs.is_tensor() && _tensor.engine().equal(_tensor, *rhs.as_tensor()));
+ return (rhs.is_tensor() && _value->engine().equal(*_value, *rhs.as_tensor()));
}
const Value &
TensorValue::apply(const UnaryOperation &op, Stash &stash) const
{
- return _tensor.engine().map(op, _tensor, stash);
+ return _value->engine().map(op, *_value, stash);
}
const Value &
TensorValue::apply(const BinaryOperation &op, const Value &rhs, Stash &stash) const
{
const Tensor *other = rhs.as_tensor();
- if ((other == nullptr) || (&other->engine() != &_tensor.engine())) {
+ if ((other == nullptr) || (&other->engine() != &_value->engine())) {
return stash.create<ErrorValue>();
}
- return _tensor.engine().apply(op, _tensor, *other, stash);
+ return _value->engine().apply(op, *_value, *other, stash);
}
} // namespace vespalib::eval
diff --git a/vespalib/src/vespa/vespalib/eval/value.h b/vespalib/src/vespa/vespalib/eval/value.h
index 659e9ac6ec2..22e90b9327f 100644
--- a/vespalib/src/vespa/vespalib/eval/value.h
+++ b/vespalib/src/vespa/vespalib/eval/value.h
@@ -59,13 +59,11 @@ public:
class TensorValue : public Value
{
private:
- const Tensor &_tensor;
- std::unique_ptr<Tensor> _stored;
+ std::unique_ptr<Tensor> _value;
public:
- TensorValue(const Tensor &value) : _tensor(value), _stored() {}
- TensorValue(std::unique_ptr<Tensor> value) : _tensor(*value), _stored(std::move(value)) {}
+ TensorValue(std::unique_ptr<Tensor> value) : _value(std::move(value)) {}
bool is_tensor() const override { return true; }
- const Tensor *as_tensor() const override { return &_tensor; }
+ const Tensor *as_tensor() const override { return _value.get(); }
bool equal(const Value &rhs) const override;
const Value &apply(const UnaryOperation &op, Stash &stash) const override;
const Value &apply(const BinaryOperation &op, const Value &rhs, Stash &stash) const override;
diff --git a/vespalib/src/vespa/vespalib/tensor/decoded_tensor_address_store.h b/vespalib/src/vespa/vespalib/tensor/decoded_tensor_address_store.h
deleted file mode 100644
index b2d8d1b07ce..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/decoded_tensor_address_store.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-namespace vespalib {
-namespace tensor {
-
-/**
- * A utility class to store decoded tensor address based on data stored
- * in tensors.
- */
-template <class AddressT> class DecodedTensorAddressStore;
-
-/**
- * A utility class to store decoded tensor address. TensorAddress
- * doesn't need any decoding, just pass through the argument
- * (e.g. tensor address in tensor hash table).
- */
-template <> class DecodedTensorAddressStore<TensorAddress>
-{
-public:
- void set(const TensorAddress &) { }
- static const TensorAddress &get(const TensorAddress &rhs) { return rhs; }
-};
-
-/**
- * A utility class to store decoded tensor address.
- * CompactTensorAddress needs decoding.
- */
-template <> class DecodedTensorAddressStore<CompactTensorAddress>
-{
-private:
- CompactTensorAddress _address;
-public:
- void set(const CompactTensorAddressRef rhs)
- { _address.deserializeFromSparseAddressRef(rhs); }
- const CompactTensorAddress &get(const CompactTensorAddressRef &)
- { return _address; }
-};
-
-/**
- * A utility class to store decoded tensor address. Just pass through
- * the argument (e.g. tensor address ref in tensor hash table).
- * CompactTensorAddressRef is encoded, decoding is performed on the
- * fly while iterating.
- */
-template <> class DecodedTensorAddressStore<CompactTensorAddressRef>
-{
-public:
- void set(const CompactTensorAddressRef &) { }
- static CompactTensorAddressRef get(const CompactTensorAddressRef rhs)
- { return rhs; }
-};
-
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.cpp b/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.cpp
index c34cfb78bbb..a2bc118c00b 100644
--- a/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.cpp
@@ -47,6 +47,14 @@ DefaultTensorEngine::to_string(const Tensor &tensor) const
return my_tensor.toString();
}
+eval::TensorSpec
+DefaultTensorEngine::to_spec(const Tensor &tensor) const
+{
+ assert(&tensor.engine() == this);
+ const tensor::Tensor &my_tensor = static_cast<const tensor::Tensor &>(tensor);
+ return my_tensor.toSpec();
+}
+
struct IsAddOperation : public eval::DefaultOperationVisitor {
bool result = false;
void visitDefault(const eval::Operation &) override {}
@@ -107,11 +115,11 @@ DefaultTensorEngine::reduce(const Tensor &tensor, const BinaryOperation &op, con
const tensor::Tensor &my_tensor = static_cast<const tensor::Tensor &>(tensor);
IsAddOperation check;
op.accept(check);
+ tensor::Tensor::UP result;
if (check.result) {
if (dimensions.empty()) { // sum
return stash.create<eval::DoubleValue>(my_tensor.sum());
} else { // dimension sum
- tensor::Tensor::UP result;
for (const auto &dimension: dimensions) {
if (result) {
result = result->sum(dimension);
@@ -119,8 +127,18 @@ DefaultTensorEngine::reduce(const Tensor &tensor, const BinaryOperation &op, con
result = my_tensor.sum(dimension);
}
}
+ }
+ } else {
+ result = my_tensor.reduce(op, dimensions);
+ }
+ if (result) {
+ eval::ValueType result_type(result->getType());
+ if (result_type.is_tensor()) {
return stash.create<TensorValue>(std::move(result));
}
+ if (result_type.is_double()) {
+ return stash.create<eval::DoubleValue>(result->sum());
+ }
}
return stash.create<ErrorValue>();
}
@@ -147,8 +165,13 @@ struct TensorOperationOverride : eval::DefaultOperationVisitor {
TensorOperationOverride(const tensor::Tensor &lhs_in,
const tensor::Tensor &rhs_in)
: lhs(lhs_in), rhs(rhs_in), result() {}
- virtual void visitDefault(const eval::Operation &) override {
+ virtual void visitDefault(const eval::Operation &op) override {
// empty result indicates error
+ const eval::BinaryOperation *binaryOp =
+ dynamic_cast<const eval::BinaryOperation *>(&op);
+ if (binaryOp) {
+ result = lhs.apply(*binaryOp, rhs);
+ }
}
virtual void visit(const eval::operation::Add &) override {
result = lhs.add(rhs);
diff --git a/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.h b/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.h
index aba3665d98a..7e1bd903626 100644
--- a/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.h
+++ b/vespalib/src/vespa/vespalib/tensor/default_tensor_engine.h
@@ -22,6 +22,7 @@ public:
ValueType type_of(const Tensor &tensor) const override;
bool equal(const Tensor &a, const Tensor &b) const override;
vespalib::string to_string(const Tensor &tensor) const override;
+ TensorSpec to_spec(const Tensor &tensor) const override;
std::unique_ptr<Tensor> create(const TensorSpec &spec) const override;
const Value &reduce(const Tensor &tensor, const BinaryOperation &op, const std::vector<vespalib::string> &dimensions, Stash &stash) const override;
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/CMakeLists.txt b/vespalib/src/vespa/vespalib/tensor/dense/CMakeLists.txt
index e80083056ca..c965eb6609c 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/CMakeLists.txt
+++ b/vespalib/src/vespa/vespalib/tensor/dense/CMakeLists.txt
@@ -1,9 +1,9 @@
# Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
vespa_add_library(vespalib_vespalib_tensor_dense OBJECT
SOURCES
+ direct_dense_tensor_builder.cpp
dense_tensor.cpp
+ dense_tensor_address_combiner.cpp
dense_tensor_builder.cpp
- dense_tensor_dimension_sum.cpp
- dense_tensor_product.cpp
DEPENDS
)
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp
index 5a160329e79..18506870354 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.cpp
@@ -2,15 +2,17 @@
#include <vespa/fastos/fastos.h>
#include "dense_tensor.h"
-#include "dense_tensor_dimension_sum.h"
-#include "dense_tensor_product.h"
+#include "dense_tensor_apply.hpp"
+#include "dense_tensor_reduce.hpp"
#include <vespa/vespalib/util/stringfmt.h>
#include <vespa/vespalib/util/exceptions.h>
#include <vespa/vespalib/stllike/asciistream.h>
#include <vespa/vespalib/tensor/tensor_address_builder.h>
#include <vespa/vespalib/tensor/tensor_visitor.h>
+#include <vespa/vespalib/eval/operation.h>
#include <sstream>
+using vespalib::eval::TensorSpec;
namespace vespalib {
namespace tensor {
@@ -121,6 +123,15 @@ joinDenseTensorsNegated(const DenseTensor &lhs,
std::move(cells));
}
+std::vector<vespalib::string>
+getDimensions(const DenseTensor &tensor)
+{
+ std::vector<vespalib::string> dimensions;
+ for (const auto &dimMeta : tensor.dimensionsMeta()) {
+ dimensions.emplace_back(dimMeta.dimension());
+ }
+ return dimensions;
+}
}
@@ -237,7 +248,8 @@ DenseTensor::multiply(const Tensor &arg) const
if (!rhs) {
return Tensor::UP();
}
- return DenseTensorProduct(*this, *rhs).result();
+ return dense::apply(*this, *rhs, [](double lhsValue, double rhsValue)
+ { return lhsValue * rhsValue; });
}
Tensor::UP
@@ -296,7 +308,9 @@ DenseTensor::apply(const CellFunction &func) const
Tensor::UP
DenseTensor::sum(const vespalib::string &dimension) const
{
- return DenseTensorDimensionSum(*this, dimension).result();
+ return dense::reduce(*this, { dimension },
+ [](double lhsValue, double rhsValue)
+ { return lhsValue + rhsValue; });
}
bool
@@ -323,6 +337,33 @@ DenseTensor::clone() const
return std::make_unique<DenseTensor>(_dimensionsMeta, _cells);
}
+namespace {
+
+void
+buildAddress(const DenseTensor::CellsIterator &itr, TensorSpec::Address &address)
+{
+ auto addressItr = itr.address().begin();
+ for (const auto &dim : itr.dimensions()) {
+ address.emplace(std::make_pair(dim.dimension(), TensorSpec::Label(*addressItr++)));
+ }
+ assert(addressItr == itr.address().end());
+}
+
+}
+
+TensorSpec
+DenseTensor::toSpec() const
+{
+ TensorSpec result(getType().to_spec());
+ TensorSpec::Address address;
+ for (CellsIterator itr(_dimensionsMeta, _cells); itr.valid(); itr.next()) {
+ buildAddress(itr, address);
+ result.add(address, itr.cell());
+ address.clear();
+ }
+ return result;
+}
+
void
DenseTensor::print(std::ostream &out) const
{
@@ -376,5 +417,27 @@ operator<<(std::ostream &out, const DenseTensor::DimensionMeta &value)
return out;
}
+Tensor::UP
+DenseTensor::apply(const eval::BinaryOperation &op, const Tensor &arg) const
+{
+ const DenseTensor *rhs = dynamic_cast<const DenseTensor *>(&arg);
+ if (!rhs) {
+ return Tensor::UP();
+ }
+ return dense::apply(*this, *rhs,
+ [&op](double lhsValue, double rhsValue)
+ { return op.eval(lhsValue, rhsValue); });
+}
+
+Tensor::UP
+DenseTensor::reduce(const eval::BinaryOperation &op,
+ const std::vector<vespalib::string> &dimensions) const
+{
+ return dense::reduce(*this,
+ (dimensions.empty() ? getDimensions(*this) : dimensions),
+ [&op](double lhsValue, double rhsValue)
+ { return op.eval(lhsValue, rhsValue); });
+}
+
} // namespace vespalib::tensor
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h
index 73d9c26c408..b7d911363ba 100644
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor.h
@@ -69,6 +69,7 @@ public:
void next();
double cell() const { return _cells[_cellIdx]; }
const std::vector<size_t> &address() const { return _address; }
+ const DimensionsMeta &dimensions() const { return _dimensionsMeta; }
};
@@ -99,10 +100,16 @@ public:
virtual Tensor::UP match(const Tensor &arg) const override;
virtual Tensor::UP apply(const CellFunction &func) const override;
virtual Tensor::UP sum(const vespalib::string &dimension) const override;
+ virtual Tensor::UP apply(const eval::BinaryOperation &op,
+ const Tensor &arg) const override;
+ virtual Tensor::UP reduce(const eval::BinaryOperation &op,
+ const std::vector<vespalib::string> &dimensions)
+ const override;
virtual bool equals(const Tensor &arg) const override;
virtual void print(std::ostream &out) const override;
virtual vespalib::string toString() const override;
virtual Tensor::UP clone() const override;
+ virtual eval::TensorSpec toSpec() const override;
virtual void accept(TensorVisitor &visitor) const override;
};
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp
new file mode 100644
index 00000000000..2ad4228e0ec
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.cpp
@@ -0,0 +1,124 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include "dense_tensor_address_combiner.h"
+#include <vespa/vespalib/util/exceptions.h>
+#include <vespa/vespalib/util/stringfmt.h>
+
+namespace vespalib {
+namespace tensor {
+
+using Address = DenseTensorAddressCombiner::Address;
+using DimensionsMeta = DenseTensorAddressCombiner::DimensionsMeta;
+
+namespace {
+
+class AddressReader
+{
+private:
+ const Address &_address;
+ size_t _idx;
+
+public:
+ AddressReader(const Address &address)
+ : _address(address),
+ _idx(0)
+ {}
+ size_t nextLabel() {
+ return _address[_idx++];
+ }
+ bool valid() {
+ return _idx < _address.size();
+ }
+};
+
+}
+
+DenseTensorAddressCombiner::DenseTensorAddressCombiner(const DimensionsMeta &lhs,
+ const DimensionsMeta &rhs)
+ : _ops(),
+ _combinedAddress()
+{
+ auto rhsItr = rhs.cbegin();
+ auto rhsItrEnd = rhs.cend();
+ for (const auto &lhsDim : lhs) {
+ while ((rhsItr != rhsItrEnd) && (rhsItr->dimension() < lhsDim.dimension())) {
+ _ops.push_back(AddressOp::RHS);
+ ++rhsItr;
+ }
+ if ((rhsItr != rhsItrEnd) && (rhsItr->dimension() == lhsDim.dimension())) {
+ _ops.push_back(AddressOp::BOTH);
+ ++rhsItr;
+ } else {
+ _ops.push_back(AddressOp::LHS);
+ }
+ }
+ while (rhsItr != rhsItrEnd) {
+ _ops.push_back(AddressOp::RHS);
+ ++rhsItr;
+ }
+}
+
+bool
+DenseTensorAddressCombiner::combine(const CellsIterator &lhsItr,
+ const CellsIterator &rhsItr)
+{
+ _combinedAddress.clear();
+ AddressReader lhsReader(lhsItr.address());
+ AddressReader rhsReader(rhsItr.address());
+ for (const auto &op : _ops) {
+ switch (op) {
+ case AddressOp::LHS:
+ _combinedAddress.emplace_back(lhsReader.nextLabel());
+ break;
+ case AddressOp::RHS:
+ _combinedAddress.emplace_back(rhsReader.nextLabel());
+ break;
+ case AddressOp::BOTH:
+ size_t lhsLabel = lhsReader.nextLabel();
+ size_t rhsLabel = rhsReader.nextLabel();
+ if (lhsLabel != rhsLabel) {
+ return false;
+ }
+ _combinedAddress.emplace_back(lhsLabel);
+ }
+ }
+ assert(!lhsReader.valid());
+ assert(!rhsReader.valid());
+ return true;
+}
+
+namespace {
+
+void
+validateDimensionsMeta(const DimensionsMeta &dimensionsMeta)
+{
+ for (size_t i = 1; i < dimensionsMeta.size(); ++i) {
+ const auto &prevDimMeta = dimensionsMeta[i-1];
+ const auto &currDimMeta = dimensionsMeta[i];
+ if ((prevDimMeta.dimension() == currDimMeta.dimension()) &&
+ (prevDimMeta.size() != currDimMeta.size()))
+ {
+ throw IllegalArgumentException(make_string(
+ "Shared dimension '%s' has mis-matching label ranges: "
+ "[0, %zu> vs [0, %zu>. This is not supported.",
+ prevDimMeta.dimension().c_str(), prevDimMeta.size(), currDimMeta.size()));
+ }
+ }
+}
+
+}
+
+DimensionsMeta
+DenseTensorAddressCombiner::combineDimensions(const DimensionsMeta &lhs, const DimensionsMeta &rhs)
+{
+ DimensionsMeta result;
+ std::set_union(lhs.cbegin(), lhs.cend(),
+ rhs.cbegin(), rhs.cend(),
+ std::back_inserter(result));
+ validateDimensionsMeta(result);
+ return result;
+}
+
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h
new file mode 100644
index 00000000000..2c7f9e61223
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_address_combiner.h
@@ -0,0 +1,46 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include <vespa/vespalib/tensor/dense/dense_tensor.h>
+
+namespace vespalib {
+namespace tensor {
+
+/**
+ * Combines two dense tensor addresses to a new tensor address.
+ * The resulting dimensions is the union of the input dimensions and
+ * common dimensions must have matching labels.
+ */
+class DenseTensorAddressCombiner
+{
+public:
+ using Address = std::vector<size_t>;
+ using DimensionsMeta = DenseTensor::DimensionsMeta;
+
+private:
+ enum class AddressOp {
+ LHS,
+ RHS,
+ BOTH
+ };
+
+ using CellsIterator = DenseTensor::CellsIterator;
+
+ std::vector<AddressOp> _ops;
+ Address _combinedAddress;
+
+public:
+ DenseTensorAddressCombiner(const DimensionsMeta &lhs,
+ const DimensionsMeta &rhs);
+
+ bool combine(const CellsIterator &lhsItr,
+ const CellsIterator &rhsItr);
+ const Address &address() const { return _combinedAddress; }
+
+ static DimensionsMeta combineDimensions(const DimensionsMeta &lhs, const DimensionsMeta &rhs);
+
+};
+
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.h
new file mode 100644
index 00000000000..307e1db43d3
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.h
@@ -0,0 +1,25 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace vespalib {
+namespace tensor {
+
+class Tensor;
+class DenseTensor;
+
+namespace dense {
+
+/**
+ * Creates a new tensor using all combinations of input tensor cells with matching
+ * labels for common dimensions, using func to calculate new cell value
+ * based on the cell values in the input tensors.
+ */
+template <typename Function>
+std::unique_ptr<Tensor>
+apply(const DenseTensor &lhs, const DenseTensor &rhs, Function &&func);
+
+} // namespace vespalib::tensor::dense
+} // namespace vespalib::tensor
+} // namespace vespalib
+
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp
new file mode 100644
index 00000000000..3168089b941
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_apply.hpp
@@ -0,0 +1,32 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "dense_tensor_apply.h"
+#include "dense_tensor_address_combiner.h"
+#include "direct_dense_tensor_builder.h"
+
+namespace vespalib {
+namespace tensor {
+namespace dense {
+
+template <typename Function>
+std::unique_ptr<Tensor>
+apply(const DenseTensor &lhs, const DenseTensor &rhs, Function &&func)
+{
+ DenseTensorAddressCombiner combiner(lhs.dimensionsMeta(), rhs.dimensionsMeta());
+ DirectDenseTensorBuilder builder(DenseTensorAddressCombiner::combineDimensions(lhs.dimensionsMeta(), rhs.dimensionsMeta()));
+ for (DenseTensor::CellsIterator lhsItr = lhs.cellsIterator(); lhsItr.valid(); lhsItr.next()) {
+ for (DenseTensor::CellsIterator rhsItr = rhs.cellsIterator(); rhsItr.valid(); rhsItr.next()) {
+ bool combineSuccess = combiner.combine(lhsItr, rhsItr);
+ if (combineSuccess) {
+ builder.insertCell(combiner.address(), func(lhsItr.cell(), rhsItr.cell()));
+ }
+ }
+ }
+ return builder.build();
+}
+
+} // namespace vespalib::tensor::dense
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.cpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.cpp
deleted file mode 100644
index f94c9137798..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.cpp
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/fastos/fastos.h>
-#include "dense_tensor_dimension_sum.h"
-
-namespace vespalib {
-namespace tensor {
-
-using DimensionsMeta = DenseTensor::DimensionsMeta;
-
-namespace {
-
-DimensionsMeta
-removeDimension(const DimensionsMeta &dimensionsMeta,
- const string &dimension)
-{
- DimensionsMeta result = dimensionsMeta;
- auto itr = std::lower_bound(result.begin(), result.end(), dimension,
- [](const auto &dimMeta, const auto &dimension_in)
- { return dimMeta.dimension() < dimension_in; });
- if ((itr != result.end()) && (itr->dimension() == dimension)) {
- result.erase(itr);
- }
- return result;
-}
-
-size_t
-calcCellsSize(const DimensionsMeta &dimensionsMeta)
-{
- size_t cellsSize = 1;
- for (const auto &dimMeta : dimensionsMeta) {
- cellsSize *= dimMeta.size();
- }
- return cellsSize;
-}
-
-struct DimensionSummer
-{
- size_t _innerDimSize;
- size_t _sumDimSize;
- size_t _outerDimSize;
- using Cells = DenseTensor::Cells;
-
- DimensionSummer(const DimensionsMeta &dimensionsMeta,
- const string &dimension)
- : _innerDimSize(1),
- _sumDimSize(1),
- _outerDimSize(1)
- {
- auto itr = std::lower_bound(dimensionsMeta.cbegin(), dimensionsMeta.cend(), dimension,
- [](const auto &dimMeta, const auto &dimension_in)
- { return dimMeta.dimension() < dimension_in; });
- if ((itr != dimensionsMeta.end()) && (itr->dimension() == dimension)) {
- for (auto outerItr = dimensionsMeta.cbegin(); outerItr != itr; ++outerItr) {
- _outerDimSize *= outerItr->size();
- }
- _sumDimSize = itr->size();
- for (++itr; itr != dimensionsMeta.cend(); ++itr) {
- _innerDimSize *= itr->size();
- }
- } else {
- _outerDimSize = calcCellsSize(dimensionsMeta);
- }
- }
-
- void
- sumCells(Cells &cells, const Cells &cells_in) const
- {
- auto itr_in = cells_in.cbegin();
- auto itr = cells.begin();
- for (size_t outerDim = 0; outerDim < _outerDimSize;
- ++outerDim) {
- auto saved_itr = itr;
- for (size_t sumDim = 0; sumDim < _sumDimSize; ++sumDim) {
- itr = saved_itr;
- for (size_t innerDim = 0; innerDim < _innerDimSize;
- ++innerDim) {
- *itr += *itr_in;
- ++itr;
- ++itr_in;
- }
- }
- }
- assert(itr == cells.end());
- assert(itr_in == cells_in.cend());
- }
-};
-
-
-}
-
-
-DenseTensorDimensionSum::DenseTensorDimensionSum(const TensorImplType &tensor,
- const string &dimension)
- : _dimensionsMeta(removeDimension(tensor.dimensionsMeta(),
- dimension)),
- _cells(calcCellsSize(_dimensionsMeta))
-{
- DimensionSummer dimensionSummer(tensor.dimensionsMeta(),
- dimension);
- dimensionSummer.sumCells(_cells, tensor.cells());
-}
-
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.h
deleted file mode 100644
index c61e07d5c3a..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_dimension_sum.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "dense_tensor.h"
-
-namespace vespalib {
-namespace tensor {
-
-/**
- * Returns a tensor with the given dimension removed and the cell values in that dimension summed.
- */
-class DenseTensorDimensionSum
-{
-public:
- using TensorImplType = DenseTensor;
-private:
- using DimensionMeta = DenseTensor::DimensionMeta;
- using DimensionsMeta = DenseTensor::DimensionsMeta;
- using Cells = DenseTensor::Cells;
-
- DimensionsMeta _dimensionsMeta;
- Cells _cells;
-
-public:
- DenseTensorDimensionSum(const TensorImplType &tensor,
- const vespalib::string &dimension);
-
- Tensor::UP result() {
- return std::make_unique<DenseTensor>(std::move(_dimensionsMeta),
- std::move(_cells));
- }
-};
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.cpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.cpp
deleted file mode 100644
index fff5f21d3d1..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.cpp
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/fastos/fastos.h>
-#include "dense_tensor_product.h"
-#include <vespa/vespalib/util/exceptions.h>
-
-namespace vespalib {
-namespace tensor {
-
-using DimensionsMeta = DenseTensor::DimensionsMeta;
-using CellsIterator = DenseTensor::CellsIterator;
-using Address = std::vector<size_t>;
-
-using vespalib::IllegalArgumentException;
-using vespalib::make_string;
-
-namespace {
-
-enum class AddressCombineOp
-{
- LHS,
- RHS,
- BOTH
-};
-
-using AddressCombineOps = std::vector<AddressCombineOp>;
-
-class AddressReader
-{
-private:
- const Address &_address;
- size_t _idx;
-
-public:
- AddressReader(const Address &address)
- : _address(address),
- _idx(0)
- {}
- size_t nextLabel() {
- return _address[_idx++];
- }
- bool valid() {
- return _idx < _address.size();
- }
-};
-
-class CellsInserter
-{
-private:
- const DimensionsMeta &_dimensionsMeta;
- DenseTensor::Cells &_cells;
-
- size_t calculateCellAddress(const Address &address) {
- assert(address.size() == _dimensionsMeta.size());
- size_t result = 0;
- for (size_t i = 0; i < address.size(); ++i) {
- result *= _dimensionsMeta[i].size();
- result += address[i];
- }
- return result;
- }
-
-public:
- CellsInserter(const DimensionsMeta &dimensionsMeta,
- DenseTensor::Cells &cells)
- : _dimensionsMeta(dimensionsMeta),
- _cells(cells)
- {}
- void insertCell(const Address &address, double cellValue) {
- size_t cellAddress = calculateCellAddress(address);
- assert(cellAddress < _cells.size());
- _cells[cellAddress] = cellValue;
- }
-};
-
-void
-validateDimensionsMeta(const DimensionsMeta &dimensionsMeta)
-{
- for (size_t i = 1; i < dimensionsMeta.size(); ++i) {
- const auto &prevDimMeta = dimensionsMeta[i-1];
- const auto &currDimMeta = dimensionsMeta[i];
- if ((prevDimMeta.dimension() == currDimMeta.dimension()) &&
- (prevDimMeta.size() != currDimMeta.size())) {
- throw IllegalArgumentException(make_string(
- "Shared dimension '%s' in dense tensor product has mis-matching label ranges: "
- "[0, %zu> vs [0, %zu>. This is not supported.",
- prevDimMeta.dimension().c_str(), prevDimMeta.size(), currDimMeta.size()));
- }
- }
-}
-
-DimensionsMeta
-combineDimensions(const DimensionsMeta &lhs, const DimensionsMeta &rhs)
-{
- DimensionsMeta result;
- std::set_union(lhs.cbegin(), lhs.cend(),
- rhs.cbegin(), rhs.cend(),
- std::back_inserter(result));
- validateDimensionsMeta(result);
- return result;
-}
-
-size_t
-calculateCellsSize(const DimensionsMeta &dimensionsMeta)
-{
- size_t cellsSize = 1;
- for (const auto &dimMeta : dimensionsMeta) {
- cellsSize *= dimMeta.size();
- }
- return cellsSize;
-}
-
-AddressCombineOps
-buildCombineOps(const DimensionsMeta &lhs,
- const DimensionsMeta &rhs)
-{
- AddressCombineOps ops;
- auto rhsItr = rhs.cbegin();
- auto rhsItrEnd = rhs.cend();
- for (const auto &lhsDim : lhs) {
- while ((rhsItr != rhsItrEnd) && (rhsItr->dimension() < lhsDim.dimension())) {
- ops.push_back(AddressCombineOp::RHS);
- ++rhsItr;
- }
- if ((rhsItr != rhsItrEnd) && (rhsItr->dimension() == lhsDim.dimension())) {
- ops.push_back(AddressCombineOp::BOTH);
- ++rhsItr;
- } else {
- ops.push_back(AddressCombineOp::LHS);
- }
- }
- while (rhsItr != rhsItrEnd) {
- ops.push_back(AddressCombineOp::RHS);
- ++rhsItr;
- }
- return ops;
-}
-
-bool
-combineAddress(Address &combinedAddress,
- const CellsIterator &lhsItr,
- const CellsIterator &rhsItr,
- const AddressCombineOps &ops)
-{
- combinedAddress.clear();
- AddressReader lhsReader(lhsItr.address());
- AddressReader rhsReader(rhsItr.address());
- for (const auto &op : ops) {
- switch (op) {
- case AddressCombineOp::LHS:
- combinedAddress.emplace_back(lhsReader.nextLabel());
- break;
- case AddressCombineOp::RHS:
- combinedAddress.emplace_back(rhsReader.nextLabel());
- break;
- case AddressCombineOp::BOTH:
- size_t lhsLabel = lhsReader.nextLabel();
- size_t rhsLabel = rhsReader.nextLabel();
- if (lhsLabel != rhsLabel) {
- return false;
- }
- combinedAddress.emplace_back(lhsLabel);
- }
- }
- assert(!lhsReader.valid());
- assert(!rhsReader.valid());
- return true;
-}
-
-}
-
-void
-DenseTensorProduct::bruteForceProduct(const DenseTensor &lhs,
- const DenseTensor &rhs)
-{
- AddressCombineOps ops = buildCombineOps(lhs.dimensionsMeta(), rhs.dimensionsMeta());
- Address combinedAddress;
- CellsInserter cellsInserter(_dimensionsMeta, _cells);
- for (CellsIterator lhsItr = lhs.cellsIterator(); lhsItr.valid(); lhsItr.next()) {
- for (CellsIterator rhsItr = rhs.cellsIterator(); rhsItr.valid(); rhsItr.next()) {
- bool combineSuccess = combineAddress(combinedAddress, lhsItr, rhsItr, ops);
- if (combineSuccess) {
- cellsInserter.insertCell(combinedAddress, lhsItr.cell() * rhsItr.cell());
- }
- }
- }
-}
-
-DenseTensorProduct::DenseTensorProduct(const DenseTensor &lhs,
- const DenseTensor &rhs)
- : _dimensionsMeta(combineDimensions(lhs.dimensionsMeta(), rhs.dimensionsMeta())),
- _cells(calculateCellsSize(_dimensionsMeta))
-{
- bruteForceProduct(lhs, rhs);
-}
-
-Tensor::UP
-DenseTensorProduct::result()
-{
- return std::make_unique<DenseTensor>(std::move(_dimensionsMeta), std::move(_cells));
-}
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.h
deleted file mode 100644
index 5615067119b..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_product.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include "dense_tensor.h"
-#include <vespa/vespalib/tensor/tensor_operation.h>
-
-namespace vespalib {
-namespace tensor {
-
-/**
- * Returns the tensor product of the two given dense tensors.
- * This is all combinations of all cells in the first tensor with all cells of
- * the second tensor.
- *
- * Shared dimensions must have the same label range from [0, dimSize>.
- */
-class DenseTensorProduct
-{
-private:
- DenseTensor::DimensionsMeta _dimensionsMeta;
- DenseTensor::Cells _cells;
-
- void bruteForceProduct(const DenseTensor &lhs, const DenseTensor &rhs);
-
-public:
- DenseTensorProduct(const DenseTensor &lhs, const DenseTensor &rhs);
- Tensor::UP result();
-};
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.h b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.h
new file mode 100644
index 00000000000..ce3bf308fd3
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.h
@@ -0,0 +1,21 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "dense_tensor.h"
+
+namespace vespalib {
+namespace tensor {
+namespace dense {
+
+/**
+ * Returns a tensor with the given dimension(s) removed and the cell values in that dimension(s)
+ * combined using the given func.
+ */
+template<typename Function>
+std::unique_ptr<Tensor>
+reduce(const DenseTensor &tensor, const std::vector<vespalib::string> &dimensions, Function &&func);
+
+} // namespace dense
+} // namespace tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp
new file mode 100644
index 00000000000..0e890fa9bc4
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/dense_tensor_reduce.hpp
@@ -0,0 +1,133 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include "dense_tensor_reduce.h"
+
+namespace vespalib {
+namespace tensor {
+namespace dense {
+
+using Cells = DenseTensor::Cells;
+using DimensionsMeta = DenseTensor::DimensionsMeta;
+
+namespace {
+
+DimensionsMeta
+removeDimension(const DimensionsMeta &dimensionsMeta,
+ const string &dimensionToRemove)
+{
+ DimensionsMeta result = dimensionsMeta;
+ auto itr = std::lower_bound(result.begin(), result.end(), dimensionToRemove,
+ [](const auto &dimMeta, const auto &dimension_in) {
+ return dimMeta.dimension() < dimension_in;
+ });
+ if ((itr != result.end()) && (itr->dimension() == dimensionToRemove)) {
+ result.erase(itr);
+ }
+ return result;
+}
+
+size_t
+calcCellsSize(const DimensionsMeta &dimensionsMeta)
+{
+ size_t cellsSize = 1;
+ for (const auto &dimMeta : dimensionsMeta) {
+ cellsSize *= dimMeta.size();
+ }
+ return cellsSize;
+}
+
+
+class DimensionReducer
+{
+private:
+ DimensionsMeta _dimensionsResult;
+ Cells _cellsResult;
+ size_t _innerDimSize;
+ size_t _sumDimSize;
+ size_t _outerDimSize;
+
+ void setup(const DimensionsMeta &dimensions,
+ const vespalib::string &dimensionToRemove) {
+ auto itr = std::lower_bound(dimensions.cbegin(), dimensions.cend(), dimensionToRemove,
+ [](const auto &dimMeta, const auto &dimension) {
+ return dimMeta.dimension() < dimension;
+ });
+ if ((itr != dimensions.end()) && (itr->dimension() == dimensionToRemove)) {
+ for (auto outerItr = dimensions.cbegin(); outerItr != itr; ++outerItr) {
+ _outerDimSize *= outerItr->size();
+ }
+ _sumDimSize = itr->size();
+ for (++itr; itr != dimensions.cend(); ++itr) {
+ _innerDimSize *= itr->size();
+ }
+ } else {
+ _outerDimSize = calcCellsSize(dimensions);
+ }
+ }
+
+public:
+ DimensionReducer(const DimensionsMeta &dimensions,
+ const string &dimensionToRemove)
+ : _dimensionsResult(removeDimension(dimensions, dimensionToRemove)),
+ _cellsResult(calcCellsSize(_dimensionsResult)),
+ _innerDimSize(1),
+ _sumDimSize(1),
+ _outerDimSize(1)
+ {
+ setup(dimensions, dimensionToRemove);
+ }
+
+ template <typename Function>
+ DenseTensor::UP
+ reduceCells(const Cells &cellsIn, Function &&func) {
+ auto itr_in = cellsIn.cbegin();
+ auto itr_out = _cellsResult.begin();
+ for (size_t outerDim = 0; outerDim < _outerDimSize; ++outerDim) {
+ auto saved_itr = itr_out;
+ for (size_t sumDim = 0; sumDim < _sumDimSize; ++sumDim) {
+ itr_out = saved_itr;
+ for (size_t innerDim = 0; innerDim < _innerDimSize; ++innerDim) {
+ *itr_out = func(*itr_out, *itr_in);
+ ++itr_out;
+ ++itr_in;
+ }
+ }
+ }
+ assert(itr_out == _cellsResult.end());
+ assert(itr_in == cellsIn.cend());
+ return std::make_unique<DenseTensor>(std::move(_dimensionsResult), std::move(_cellsResult));
+ }
+};
+
+template <typename Function>
+DenseTensor::UP
+reduce(const DenseTensor &tensor, const vespalib::string &dimensionToRemove, Function &&func)
+{
+ DimensionReducer reducer(tensor.dimensionsMeta(), dimensionToRemove);
+ return reducer.reduceCells(tensor.cells(), func);
+}
+
+}
+
+template <typename Function>
+std::unique_ptr<Tensor>
+reduce(const DenseTensor &tensor, const std::vector<vespalib::string> &dimensions, Function &&func)
+{
+ if (dimensions.size() == 1) {
+ return reduce(tensor, dimensions[0], func);
+ } else if (dimensions.size() > 0) {
+ DenseTensor::UP result = reduce(tensor, dimensions[0], func);
+ for (size_t i = 1; i < dimensions.size(); ++i) {
+ DenseTensor::UP tmpResult = reduce(*result, dimensions[i], func);
+ result = std::move(tmpResult);
+ }
+ return result;
+ } else {
+ return std::unique_ptr<Tensor>();
+ }
+}
+
+} // namespace dense
+} // namespace tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp
new file mode 100644
index 00000000000..dd1682fb451
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.cpp
@@ -0,0 +1,59 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include "direct_dense_tensor_builder.h"
+
+namespace vespalib {
+namespace tensor {
+
+using Address = DirectDenseTensorBuilder::Address;
+using DimensionsMeta = DirectDenseTensorBuilder::DimensionsMeta;
+
+namespace {
+
+size_t
+calculateCellsSize(const DimensionsMeta &dimensionsMeta)
+{
+ size_t cellsSize = 1;
+ for (const auto &dimMeta : dimensionsMeta) {
+ cellsSize *= dimMeta.size();
+ }
+ return cellsSize;
+}
+
+size_t
+calculateCellAddress(const Address &address, const DimensionsMeta &dimensionsMeta)
+{
+ assert(address.size() == dimensionsMeta.size());
+ size_t result = 0;
+ for (size_t i = 0; i < address.size(); ++i) {
+ result *= dimensionsMeta[i].size();
+ result += address[i];
+ }
+ return result;
+}
+
+}
+
+DirectDenseTensorBuilder::DirectDenseTensorBuilder(const DimensionsMeta &dimensionsMeta)
+ : _dimensionsMeta(dimensionsMeta),
+ _cells(calculateCellsSize(_dimensionsMeta))
+{
+}
+
+void
+DirectDenseTensorBuilder::insertCell(const Address &address, double cellValue)
+{
+ size_t cellAddress = calculateCellAddress(address, _dimensionsMeta);
+ assert(cellAddress < _cells.size());
+ _cells[cellAddress] = cellValue;
+}
+
+Tensor::UP
+DirectDenseTensorBuilder::build()
+{
+ return std::make_unique<DenseTensor>(std::move(_dimensionsMeta), std::move(_cells));
+}
+
+} // namespace tensor
+} // namesapce vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h
new file mode 100644
index 00000000000..74234f1cabe
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/dense/direct_dense_tensor_builder.h
@@ -0,0 +1,31 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "dense_tensor.h"
+
+namespace vespalib {
+namespace tensor {
+
+/**
+ * Class for building a dense tensor by inserting cell values directly into underlying array of cells.
+ */
+class DirectDenseTensorBuilder
+{
+public:
+ using DimensionsMeta = DenseTensor::DimensionsMeta;
+ using Cells = DenseTensor::Cells;
+ using Address = std::vector<size_t>;
+
+private:
+ DimensionsMeta _dimensionsMeta;
+ Cells _cells;
+
+public:
+ DirectDenseTensorBuilder(const DimensionsMeta &dimensionsMeta);
+ void insertCell(const Address &address, double cellValue);
+ Tensor::UP build();
+};
+
+} // namespace tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/dimensions_vector_iterator.h b/vespalib/src/vespa/vespalib/tensor/dimensions_vector_iterator.h
deleted file mode 100644
index f23c4b6e20f..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/dimensions_vector_iterator.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-namespace vespalib {
-namespace tensor {
-
-using DimensionsVector = std::vector<vespalib::stringref>;
-
-/**
- * An iterator for a dimensions vector used to simplify 3-way merge
- * between two tensor addresses and a dimension vector.
- */
-class DimensionsVectorIterator
-{
- using InnerIterator = DimensionsVector::const_iterator;
- InnerIterator _itr;
- InnerIterator _itrEnd;
-public:
- DimensionsVectorIterator(const DimensionsVector &dimensions)
- : _itr(dimensions.cbegin()),
- _itrEnd(dimensions.cend())
- {
- }
- bool valid() const { return (_itr != _itrEnd); }
- vespalib::stringref dimension() const { return *_itr; }
- template <typename Iterator>
- bool beforeDimension(const Iterator &rhs) const {
- if (!valid()) {
- return false;
- }
- if (!rhs.valid()) {
- return true;
- }
- return (*_itr < rhs.dimension());
- }
- bool atDimension(vespalib::stringref rhsDimension) const
- {
- return (valid() && (*_itr == rhsDimension));
- }
- void next() { ++_itr; }
-};
-
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/join_tensor_addresses.h b/vespalib/src/vespa/vespalib/tensor/join_tensor_addresses.h
deleted file mode 100644
index d691732b800..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/join_tensor_addresses.h
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-namespace vespalib {
-namespace tensor {
-
-/*
- * Combine two tensor addresses, but fail if dimension label doesn't match
- * for common dimensions. Use 3-way merge between two tensors and a vector
- * of dimensions. To be used when we have few common dimensions.
- * The commonDimensions parameter is the intersection of the
- * dimensions in the two input tensors.
- */
-template <class AddressBuilder, class LhsAddress, class RhsAddress>
-bool
-joinTensorAddresses(AddressBuilder &combined,
- const DimensionsVector &commonDimensions,
- const LhsAddress &lhs,
- const RhsAddress &rhs)
-{
- TensorAddressElementIterator<LhsAddress> lhsItr(lhs);
- TensorAddressElementIterator<RhsAddress> rhsItr(rhs);
- DimensionsVectorIterator dimsItr(commonDimensions);
- combined.clear();
- while (lhsItr.valid()) {
- while (dimsItr.beforeDimension(lhsItr)) {
- rhsItr.addElements(combined, dimsItr);
- if (rhsItr.atDimension(dimsItr.dimension())) {
- // needed dimension missing from lhs
- return false;
- }
- dimsItr.next();
- }
- if (dimsItr.atDimension(lhsItr.dimension())) {
- rhsItr.addElements(combined, dimsItr);
- if (!rhsItr.atDimension(dimsItr.dimension())) {
- // needed dimension missing from rhs
- return false;
- }
- if (lhsItr.label() != rhsItr.label()) {
- // dimension exists in both rhs and lhs, but labels don't match
- return false;
- }
- // common dimension, labels match
- lhsItr.addElement(combined);
- lhsItr.next();
- rhsItr.next();
- dimsItr.next();
- continue;
- }
- rhsItr.addElements(combined, lhsItr);
- assert(lhsItr.beforeDimension(rhsItr));
- lhsItr.addElement(combined);
- lhsItr.next();
- }
- while (dimsItr.valid()) {
- rhsItr.addElements(combined, dimsItr);
- if (rhsItr.atDimension(dimsItr.dimension())) {
- // needed dimension missing from lhs
- return false;
- }
- dimsItr.next();
- }
- rhsItr.addElements(combined);
- // All matching
- return true;
-}
-
-/*
- * Combine two tensor addresses, but fail if dimension label doesn't match
- * for common dimensions. Use 3-way merge between two tensors and a vector
- * of dimensions. To be used when we have many common dimensions.
- * The commonDimensions parameter is the intersection of the
- * dimensions in the two input tensors.
- */
-template <class AddressBuilder, class LhsAddress, class RhsAddress>
-bool
-joinTensorAddresses(AddressBuilder &combined,
- const DimensionsSet &commonDimensions,
- const LhsAddress &lhs,
- const RhsAddress &rhs)
-{
- TensorAddressElementIterator<LhsAddress> lhsItr(lhs);
- TensorAddressElementIterator<RhsAddress> rhsItr(rhs);
- combined.clear();
- if (lhsItr.valid() && rhsItr.valid()) {
- for (;;) {
- if (lhsItr.beforeDimension(rhsItr)) {
- if (!lhsItr.addElements(combined, commonDimensions, rhsItr)) {
- return false;
- }
- if (!lhsItr.valid()) {
- break;
- }
- }
- if (lhsItr.dimension() == rhsItr.dimension()) {
- if (lhsItr.label() != rhsItr.label()) {
- return false;
- }
- lhsItr.addElement(combined);
- lhsItr.next();
- rhsItr.next();
- if (!lhsItr.valid() || !rhsItr.valid()) {
- break;
- }
- continue;
- }
- if (!rhsItr.addElements(combined, commonDimensions, lhsItr)) {
- return false;
- }
- if (!rhsItr.valid()) {
- break;
- }
- }
- }
- if (!lhsItr.addElements(combined, commonDimensions)) {
- return false;
- }
- if (!rhsItr.addElements(combined, commonDimensions)) {
- return false;
- }
- // All matching
- return true;
-}
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/CMakeLists.txt b/vespalib/src/vespa/vespalib/tensor/sparse/CMakeLists.txt
index aa2cc7869e5..7d8725ad610 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/CMakeLists.txt
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/CMakeLists.txt
@@ -2,12 +2,10 @@
vespa_add_library(vespalib_vespalib_tensor_sparse OBJECT
SOURCES
sparse_tensor.cpp
- sparse_tensor_dimension_sum.cpp
+ sparse_tensor_address_combiner.cpp
+ sparse_tensor_address_reducer.cpp
sparse_tensor_match.cpp
- sparse_tensor_product.cpp
- compact_tensor_address.cpp
- compact_tensor_address_builder.cpp
sparse_tensor_builder.cpp
- compact_tensor_unsorted_address_builder.cpp
+ sparse_tensor_unsorted_address_builder.cpp
DEPENDS
)
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.cpp
deleted file mode 100644
index 97415e81e29..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.cpp
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/fastos/fastos.h>
-#include "compact_tensor_address.h"
-#include "sparse_tensor_address_decoder.h"
-#include <algorithm>
-
-namespace vespalib {
-namespace tensor {
-
-namespace
-{
-
-void
-setupElements(CompactTensorAddress::Elements &elements,
- CompactTensorAddressRef ref)
-{
- const char *cur = static_cast<const char *>(ref.start());
- const char *end = cur + ref.size();
- while (cur != end) {
- const char *dim = cur;
- while (*cur) {
- ++cur;
- }
- ++cur;
- const char *label = cur;
- while (*cur) {
- ++cur;
- }
- ++cur;
- elements.emplace_back(vespalib::stringref(dim, label - 1 - dim),
- vespalib::stringref(label, cur - 1 - label));
- }
-}
-
-
-}
-
-
-
-CompactTensorAddress::CompactTensorAddress()
- : _elements()
-{
-}
-
-CompactTensorAddress::CompactTensorAddress(const Elements &elements_in)
- : _elements(elements_in)
-{
-}
-
-bool
-CompactTensorAddress::hasDimension(const vespalib::string &dimension) const
-{
- for (const auto &elem : _elements) {
- if (elem.dimension() == dimension) {
- return true;
- }
- }
- return false;
-}
-
-bool
-CompactTensorAddress::operator<(const CompactTensorAddress &rhs) const
-{
- size_t minSize = std::min(_elements.size(), rhs._elements.size());
- for (size_t i = 0; i < minSize; ++i) {
- if (_elements[i] != rhs._elements[i]) {
- return _elements[i] < rhs._elements[i];
- }
- }
- return _elements.size() < rhs._elements.size();
-}
-
-bool
-CompactTensorAddress::operator==(const CompactTensorAddress &rhs) const
-{
- return _elements == rhs._elements;
-}
-
-
-void
-CompactTensorAddress::deserializeFromSparseAddressRef(CompactTensorAddressRef
- ref)
-{
- _elements.clear();
- setupElements(_elements, ref);
-}
-
-
-void
-CompactTensorAddress::deserializeFromAddressRefV2(CompactTensorAddressRef ref,
- const TensorDimensions &
- dimensions)
-{
- _elements.clear();
- SparseTensorAddressDecoder addr(ref);
- for (auto &dim : dimensions) {
- auto label = addr.decodeLabel();
- if (label.size() != 0u) {
- _elements.emplace_back(dim, label);
- }
- }
- assert(!addr.valid());
-}
-
-
-
-std::ostream &
-operator<<(std::ostream &out, const CompactTensorAddress::Elements &elements)
-{
- out << "{";
- bool first = true;
- for (const auto &elem : elements) {
- if (!first) {
- out << ",";
- }
- out << elem.dimension() << ":" << elem.label();
- first = false;
- }
- out << "}";
- return out;
-}
-
-std::ostream &
-operator<<(std::ostream &out, const CompactTensorAddress &value)
-{
- out << value.elements();
- return out;
-}
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.h b/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.h
deleted file mode 100644
index 509c267323c..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address.h
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/vespalib/stllike/string.h>
-#include <iostream>
-#include <vector>
-#include "compact_tensor_address_ref.h"
-#include <vespa/vespalib/tensor/types.h>
-
-namespace vespalib {
-namespace tensor {
-
-/**
- * A compact sparse immutable address to a tensor cell.
- *
- * Only dimensions which have a different label than "undefined" are
- * explicitly included.
- *
- * Tensor addresses are ordered by the natural order of the elements
- * in sorted order.
- */
-class CompactTensorAddress
-{
-public:
- class Element
- {
- private:
- vespalib::stringref _dimension;
- vespalib::stringref _label;
-
- public:
- Element(vespalib::stringref dimension_in,
- vespalib::stringref label_in)
- : _dimension(dimension_in), _label(label_in)
- {}
- vespalib::stringref dimension() const { return _dimension; }
- vespalib::stringref label() const { return _label; }
- bool operator<(const Element &rhs) const {
- if (_dimension == rhs._dimension) {
- // Define sort order when dimension is the same to be able
- // to do set operations over element vectors.
- return _label < rhs._label;
- }
- return _dimension < rhs._dimension;
- }
- bool operator==(const Element &rhs) const {
- return (_dimension == rhs._dimension) && (_label == rhs._label);
- }
- bool operator!=(const Element &rhs) const {
- return !(*this == rhs);
- }
- };
-
- typedef std::vector<Element> Elements;
-
-private:
- Elements _elements;
-
-public:
- CompactTensorAddress();
- explicit CompactTensorAddress(const Elements &elements_in);
- const Elements &elements() const { return _elements; }
- bool hasDimension(const vespalib::string &dimension) const;
- bool operator<(const CompactTensorAddress &rhs) const;
- bool operator==(const CompactTensorAddress &rhs) const;
- void deserializeFromSparseAddressRef(CompactTensorAddressRef ref);
- void deserializeFromAddressRefV2(CompactTensorAddressRef ref,
- const TensorDimensions &dimensions);
-};
-
-std::ostream &operator<<(std::ostream &out, const CompactTensorAddress::Elements &elements);
-std::ostream &operator<<(std::ostream &out, const CompactTensorAddress &value);
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.cpp
deleted file mode 100644
index 03f2ec0fd15..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/fastos/fastos.h>
-#include "compact_tensor_address_builder.h"
-#include <algorithm>
-
-namespace vespalib {
-namespace tensor {
-
-namespace
-{
-
-void
-append(std::vector<char> &address, vespalib::stringref str)
-{
- const char *cstr = str.c_str();
- address.insert(address.end(), cstr, cstr + str.size() + 1);
-}
-
-}
-
-CompactTensorAddressBuilder::CompactTensorAddressBuilder()
- : _address()
-{
-}
-
-
-void
-CompactTensorAddressBuilder::add(vespalib::stringref dimension,
- vespalib::stringref label)
-{
- append(_address, dimension);
- append(_address, label);
-}
-
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.h b/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.h
deleted file mode 100644
index 2981352eef5..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_builder.h
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/vespalib/stllike/string.h>
-#include <vector>
-#include "compact_tensor_address_ref.h"
-
-namespace vespalib {
-namespace tensor {
-
-
-class CompactTensorAddress;
-
-/**
- * A writer to serialize tensor addresses into a compact representation.
- *
- * Format: (dimStr NUL labelStr NUL)*
- */
-class CompactTensorAddressBuilder
-{
-private:
- std::vector<char> _address;
-public:
- CompactTensorAddressBuilder();
- void add(vespalib::stringref dimension, vespalib::stringref label);
- void clear() { _address.clear(); }
- CompactTensorAddressRef getAddressRef() const {
- return CompactTensorAddressRef(&_address[0], _address.size());
- }
- bool empty() const { return _address.empty(); }
-};
-
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/direct_sparse_tensor_builder.h b/vespalib/src/vespa/vespalib/tensor/sparse/direct_sparse_tensor_builder.h
index 5e51a750fc2..1d5b4b550a4 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/direct_sparse_tensor_builder.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/direct_sparse_tensor_builder.h
@@ -21,8 +21,7 @@ public:
using Dimensions = typename TensorImplType::Dimensions;
using Cells = typename TensorImplType::Cells;
using AddressBuilderType = SparseTensorAddressBuilder;
- using AddressRefType = CompactTensorAddressRef;
- using AddressType = CompactTensorAddress;
+ using AddressRefType = SparseTensorAddressRef;
private:
Stash _stash;
@@ -34,8 +33,8 @@ public:
copyCells(const Cells &cells_in)
{
for (const auto &cell : cells_in) {
- CompactTensorAddressRef oldRef = cell.first;
- CompactTensorAddressRef newRef(oldRef, _stash);
+ SparseTensorAddressRef oldRef = cell.first;
+ SparseTensorAddressRef newRef(oldRef, _stash);
_cells[newRef] = cell.second;
}
}
@@ -47,8 +46,8 @@ public:
cells_in_dimensions);
for (const auto &cell : cells_in) {
addressPadder.padAddress(cell.first);
- CompactTensorAddressRef oldRef = addressPadder.getAddressRef();
- CompactTensorAddressRef newRef(oldRef, _stash);
+ SparseTensorAddressRef oldRef = addressPadder.getAddressRef();
+ SparseTensorAddressRef newRef(oldRef, _stash);
_cells[newRef] = cell.second;
}
}
@@ -97,20 +96,20 @@ public:
}
template <class Function>
- void insertCell(CompactTensorAddressRef address, double value,
+ void insertCell(SparseTensorAddressRef address, double value,
Function &&func)
{
- CompactTensorAddressRef oldRef(address);
+ SparseTensorAddressRef oldRef(address);
auto res = _cells.insert(std::make_pair(oldRef, value));
if (res.second) {
// Replace key with own copy
- res.first->first = CompactTensorAddressRef(oldRef, _stash);
+ res.first->first = SparseTensorAddressRef(oldRef, _stash);
} else {
res.first->second = func(res.first->second, value);
}
}
- void insertCell(CompactTensorAddressRef address, double value) {
+ void insertCell(SparseTensorAddressRef address, double value) {
// This address should not already exist and a new cell should be inserted.
insertCell(address, value, [](double, double) -> double { abort(); });
}
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/join_sparse_tensors.h b/vespalib/src/vespa/vespalib/tensor/sparse/join_sparse_tensors.h
deleted file mode 100644
index e17f1812533..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/join_sparse_tensors.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-namespace vespalib {
-namespace tensor {
-
-/*
- * Join the cells of two tensors.
- * The given function is used to calculate the resulting cell value for overlapping cells.
- */
-template <typename Function>
-Tensor::UP
-joinSparseTensors(const SparseTensor &lhs, const SparseTensor &rhs,
- Function &&func)
-{
- DirectTensorBuilder<SparseTensor> builder(lhs.combineDimensionsWith(rhs),
- lhs.cells(), lhs.dimensions());
- if (builder.dimensions().size() == rhs.dimensions().size()) {
- for (const auto &rhsCell : rhs.cells()) {
- builder.insertCell(rhsCell.first, rhsCell.second, func);
- }
- } else {
- SparseTensorAddressPadder addressPadder(builder.dimensions(),
- rhs.dimensions());
- for (const auto &rhsCell : rhs.cells()) {
- addressPadder.padAddress(rhsCell.first);
- builder.insertCell(addressPadder, rhsCell.second, func);
- }
- }
- return builder.build();
-}
-
-/*
- * Join the cells of two tensors, where the rhs values are treated as negated values.
- * The given function is used to calculate the resulting cell value for overlapping cells.
- */
-template <typename Function>
-Tensor::UP
-joinSparseTensorsNegated(const SparseTensor &lhs,
- const SparseTensor &rhs,
- Function &&func)
-{
- DirectTensorBuilder<SparseTensor> builder(lhs.combineDimensionsWith(rhs),
- lhs.cells(), lhs.dimensions());
- if (builder.dimensions().size() == rhs.dimensions().size()) {
- for (const auto &rhsCell : rhs.cells()) {
- builder.insertCell(rhsCell.first, -rhsCell.second, func);
- }
- } else {
- SparseTensorAddressPadder addressPadder(builder.dimensions(),
- rhs.dimensions());
- for (const auto &rhsCell : rhs.cells()) {
- addressPadder.padAddress(rhsCell.first);
- builder.insertCell(addressPadder, -rhsCell.second, func);
- }
- }
- return builder.build();
-}
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp
index 7f833bf0bce..5e7ec5b1db3 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.cpp
@@ -3,15 +3,16 @@
#include <vespa/fastos/fastos.h>
#include "sparse_tensor.h"
#include "sparse_tensor_address_builder.h"
-#include "sparse_tensor_dimension_sum.h"
#include "sparse_tensor_match.h"
-#include "sparse_tensor_product.h"
-#include "join_sparse_tensors.h"
+#include "sparse_tensor_apply.hpp"
+#include "sparse_tensor_reduce.hpp"
#include <vespa/vespalib/tensor/tensor_address_builder.h>
#include <vespa/vespalib/tensor/tensor_apply.h>
#include <vespa/vespalib/tensor/tensor_visitor.h>
+#include <vespa/vespalib/eval/operation.h>
#include <sstream>
+using vespalib::eval::TensorSpec;
namespace vespalib {
namespace tensor {
@@ -24,12 +25,33 @@ void
copyCells(Cells &cells, const Cells &cells_in, Stash &stash)
{
for (const auto &cell : cells_in) {
- CompactTensorAddressRef oldRef = cell.first;
- CompactTensorAddressRef newRef(oldRef, stash);
+ SparseTensorAddressRef oldRef = cell.first;
+ SparseTensorAddressRef newRef(oldRef, stash);
cells[newRef] = cell.second;
}
}
+void
+printAddress(std::ostream &out, const SparseTensorAddressRef &ref,
+ const TensorDimensions &dimensions)
+{
+ out << "{";
+ bool first = true;
+ SparseTensorAddressDecoder addr(ref);
+ for (auto &dim : dimensions) {
+ auto label = addr.decodeLabel();
+ if (label.size() != 0u) {
+ if (!first) {
+ out << ",";
+ }
+ out << dim << ":" << label;
+ first = false;
+ }
+ }
+ assert(!addr.valid());
+ out << "}";
+}
+
}
SparseTensor::SparseTensor(const Dimensions &dimensions_in,
@@ -96,8 +118,8 @@ SparseTensor::add(const Tensor &arg) const
if (!rhs) {
return Tensor::UP();
}
- return joinSparseTensors(*this, *rhs,
- [](double lhsValue, double rhsValue) { return lhsValue + rhsValue; });
+ return sparse::apply(*this, *rhs, [](double lhsValue, double rhsValue)
+ { return lhsValue + rhsValue; });
}
Tensor::UP
@@ -107,9 +129,8 @@ SparseTensor::subtract(const Tensor &arg) const
if (!rhs) {
return Tensor::UP();
}
- // Note that -rhsCell.second is passed to the lambda function, that is why we do addition.
- return joinSparseTensorsNegated(*this, *rhs,
- [](double lhsValue, double rhsValue) { return lhsValue + rhsValue; });
+ return sparse::apply(*this, *rhs, [](double lhsValue, double rhsValue)
+ { return lhsValue - rhsValue; });
}
Tensor::UP
@@ -119,7 +140,8 @@ SparseTensor::multiply(const Tensor &arg) const
if (!rhs) {
return Tensor::UP();
}
- return SparseTensorProduct(*this, *rhs).result();
+ return sparse::apply(*this, *rhs, [](double lhsValue, double rhsValue)
+ { return lhsValue * rhsValue; });
}
Tensor::UP
@@ -129,8 +151,8 @@ SparseTensor::min(const Tensor &arg) const
if (!rhs) {
return Tensor::UP();
}
- return joinSparseTensors(*this, *rhs,
- [](double lhsValue, double rhsValue) { return std::min(lhsValue, rhsValue); });
+ return sparse::apply(*this, *rhs, [](double lhsValue, double rhsValue)
+ { return std::min(lhsValue, rhsValue); });
}
Tensor::UP
@@ -140,8 +162,8 @@ SparseTensor::max(const Tensor &arg) const
if (!rhs) {
return Tensor::UP();
}
- return joinSparseTensors(*this, *rhs,
- [](double lhsValue, double rhsValue) { return std::max(lhsValue, rhsValue); });
+ return sparse::apply(*this, *rhs, [](double lhsValue, double rhsValue)
+ { return std::max(lhsValue, rhsValue); });
}
Tensor::UP
@@ -163,7 +185,9 @@ SparseTensor::apply(const CellFunction &func) const
Tensor::UP
SparseTensor::sum(const vespalib::string &dimension) const
{
- return SparseTensorDimensionSum(*this, dimension).result();
+ return sparse::reduce(*this, { dimension },
+ [](double lhsValue, double rhsValue)
+ { return lhsValue + rhsValue; });
}
bool
@@ -190,18 +214,52 @@ SparseTensor::clone() const
return std::make_unique<SparseTensor>(_dimensions, _cells);
}
+namespace {
+
+void
+buildAddress(const SparseTensor::Dimensions &dimensions,
+ SparseTensorAddressDecoder &decoder,
+ TensorSpec::Address &address)
+{
+ for (const auto &dimension : dimensions) {
+ auto label = decoder.decodeLabel();
+ if (!label.empty()) {
+ address.emplace(std::make_pair(dimension, TensorSpec::Label(label)));
+ }
+ }
+ assert(!decoder.valid());
+}
+
+}
+
+TensorSpec
+SparseTensor::toSpec() const
+{
+ TensorSpec result(getType().to_spec());
+ TensorSpec::Address address;
+ for (const auto &cell : _cells) {
+ SparseTensorAddressDecoder decoder(cell.first);
+ buildAddress(_dimensions, decoder, address);
+ result.add(address, cell.second);
+ address.clear();
+ }
+ if (_dimensions.empty() && _cells.empty()) {
+ result.add(address, 0.0);
+ }
+ return result;
+}
+
void
SparseTensor::print(std::ostream &out) const
{
out << "{ ";
bool first = true;
- CompactTensorAddress addr;
for (const auto &cell : cells()) {
if (!first) {
out << ", ";
}
- addr.deserializeFromAddressRefV2(cell.first, _dimensions);
- out << addr << ":" << cell.second;
+ printAddress(out, cell.first, _dimensions);
+ out << ":" << cell.second;
first = false;
}
out << " }";
@@ -227,5 +285,27 @@ SparseTensor::accept(TensorVisitor &visitor) const
}
}
+Tensor::UP
+SparseTensor::apply(const eval::BinaryOperation &op, const Tensor &arg) const
+{
+ const SparseTensor *rhs = dynamic_cast<const SparseTensor *>(&arg);
+ if (!rhs) {
+ return Tensor::UP();
+ }
+ return sparse::apply(*this, *rhs,
+ [&op](double lhsValue, double rhsValue)
+ { return op.eval(lhsValue, rhsValue); });
+}
+
+Tensor::UP
+SparseTensor::reduce(const eval::BinaryOperation &op,
+ const std::vector<vespalib::string> &dimensions) const
+{
+ return sparse::reduce(*this,
+ (dimensions.empty() ? _dimensions : dimensions),
+ [&op](double lhsValue, double rhsValue)
+ { return op.eval(lhsValue, rhsValue); });
+}
+
} // namespace vespalib::tensor
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.h
index 8427e51ffd1..d788a55885e 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor.h
@@ -5,7 +5,7 @@
#include <vespa/vespalib/tensor/cell_function.h>
#include <vespa/vespalib/tensor/tensor.h>
#include <vespa/vespalib/tensor/tensor_address.h>
-#include "compact_tensor_address.h"
+#include "sparse_tensor_address_ref.h"
#include <vespa/vespalib/tensor/types.h>
#include <vespa/vespalib/stllike/hash_map.h>
#include <vespa/vespalib/stllike/string.h>
@@ -22,7 +22,7 @@ namespace tensor {
class SparseTensor : public Tensor
{
public:
- typedef vespalib::hash_map<CompactTensorAddressRef, double> Cells;
+ typedef vespalib::hash_map<SparseTensorAddressRef, double> Cells;
typedef TensorDimensions Dimensions;
static constexpr size_t STASH_CHUNK_SIZE = 16384u;
@@ -52,10 +52,16 @@ public:
virtual Tensor::UP match(const Tensor &arg) const override;
virtual Tensor::UP apply(const CellFunction &func) const override;
virtual Tensor::UP sum(const vespalib::string &dimension) const override;
+ virtual Tensor::UP apply(const eval::BinaryOperation &op,
+ const Tensor &arg) const override;
+ virtual Tensor::UP reduce(const eval::BinaryOperation &op,
+ const std::vector<vespalib::string> &dimensions)
+ const override;
virtual bool equals(const Tensor &arg) const override;
virtual void print(std::ostream &out) const override;
virtual vespalib::string toString() const override;
virtual Tensor::UP clone() const override;
+ virtual eval::TensorSpec toSpec() const override;
virtual void accept(TensorVisitor &visitor) const override;
};
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_builder.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_builder.h
index 239b405fe4c..c1678d89018 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_builder.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_builder.h
@@ -4,14 +4,12 @@
#include <vespa/vespalib/stllike/string.h>
#include <vector>
-#include "compact_tensor_address_ref.h"
+#include "sparse_tensor_address_ref.h"
namespace vespalib {
namespace tensor {
-class CompactTensorAddress;
-
/**
* A writer to serialize tensor addresses into a compact representation.
* All dimensions in the tensors are present, empty label is the "undefined"
@@ -38,8 +36,8 @@ public:
void add(vespalib::stringref label) { append(label); }
void addUndefined() { _address.emplace_back('\0'); }
void clear() { _address.clear(); }
- CompactTensorAddressRef getAddressRef() const {
- return CompactTensorAddressRef(&_address[0], _address.size());
+ SparseTensorAddressRef getAddressRef() const {
+ return SparseTensorAddressRef(&_address[0], _address.size());
}
bool empty() const { return _address.empty(); }
};
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.cpp
new file mode 100644
index 00000000000..53cf90e2db0
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.cpp
@@ -0,0 +1,69 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include "sparse_tensor_address_combiner.h"
+#include "sparse_tensor_address_decoder.h"
+
+namespace vespalib {
+namespace tensor {
+namespace sparse {
+
+TensorAddressCombiner::TensorAddressCombiner(const TensorDimensions &lhs,
+ const TensorDimensions &rhs)
+{
+ auto rhsItr = rhs.cbegin();
+ auto rhsItrEnd = rhs.cend();
+ for (auto &lhsDim : lhs) {
+ while (rhsItr != rhsItrEnd && *rhsItr < lhsDim) {
+ _ops.push_back(AddressOp::RHS);
+ ++rhsItr;
+ }
+ if (rhsItr != rhsItrEnd && *rhsItr == lhsDim) {
+ _ops.push_back(AddressOp::BOTH);
+ ++rhsItr;
+ } else {
+ _ops.push_back(AddressOp::LHS);
+ }
+ }
+ while (rhsItr != rhsItrEnd) {
+ _ops.push_back(AddressOp::RHS);
+ ++rhsItr;
+ }
+}
+
+TensorAddressCombiner::~TensorAddressCombiner()
+{
+}
+
+bool
+TensorAddressCombiner::combine(SparseTensorAddressRef lhsRef,
+ SparseTensorAddressRef rhsRef)
+{
+ clear();
+ SparseTensorAddressDecoder lhs(lhsRef);
+ SparseTensorAddressDecoder rhs(rhsRef);
+ for (auto op : _ops) {
+ switch (op) {
+ case AddressOp::LHS:
+ add(lhs.decodeLabel());
+ break;
+ case AddressOp::RHS:
+ add(rhs.decodeLabel());
+ break;
+ case AddressOp::BOTH:
+ auto lhsLabel(lhs.decodeLabel());
+ auto rhsLabel(rhs.decodeLabel());
+ if (lhsLabel != rhsLabel) {
+ return false;
+ }
+ add(lhsLabel);
+ }
+ }
+ assert(!lhs.valid());
+ assert(!rhs.valid());
+ return true;
+}
+
+} // namespace vespalib::tensor::sparse
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.h
new file mode 100644
index 00000000000..72717396a02
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_combiner.h
@@ -0,0 +1,39 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "sparse_tensor_address_builder.h"
+#include <vespa/vespalib/tensor/types.h>
+
+namespace vespalib {
+namespace tensor {
+namespace sparse {
+
+/**
+ * Combine two tensor addresses to a new tensor address. Common dimensions
+ * must have matching labels.
+ */
+class TensorAddressCombiner : public SparseTensorAddressBuilder
+{
+ enum class AddressOp
+ {
+ LHS,
+ RHS,
+ BOTH
+ };
+
+ std::vector<AddressOp> _ops;
+
+public:
+ TensorAddressCombiner(const TensorDimensions &lhs,
+ const TensorDimensions &rhs);
+
+ ~TensorAddressCombiner();
+
+ bool combine(SparseTensorAddressRef lhsRef, SparseTensorAddressRef rhsRef);
+};
+
+
+} // namespace vespalib::tensor::sparse
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_decoder.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_decoder.h
index bac864b53f3..94cb9373bc2 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_decoder.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_decoder.h
@@ -3,7 +3,7 @@
#pragma once
#include <vespa/vespalib/stllike/string.h>
-#include "compact_tensor_address_ref.h"
+#include "sparse_tensor_address_ref.h"
namespace vespalib {
@@ -18,7 +18,7 @@ class SparseTensorAddressDecoder
const char *_cur;
const char *_end;
public:
- SparseTensorAddressDecoder(CompactTensorAddressRef ref)
+ SparseTensorAddressDecoder(SparseTensorAddressRef ref)
: _cur(static_cast<const char *>(ref.start())),
_end(_cur + ref.size())
{
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_padder.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_padder.h
index 5de4bd00404..5f0c95033b3 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_padder.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_padder.h
@@ -47,7 +47,7 @@ public:
}
void
- padAddress(CompactTensorAddressRef ref)
+ padAddress(SparseTensorAddressRef ref)
{
clear();
SparseTensorAddressDecoder addr(ref);
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.cpp
new file mode 100644
index 00000000000..2d3bbaef043
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.cpp
@@ -0,0 +1,51 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#include <vespa/fastos/fastos.h>
+#include "sparse_tensor_address_reducer.h"
+
+namespace vespalib {
+namespace tensor {
+namespace sparse {
+
+TensorAddressReducer::TensorAddressReducer(const TensorDimensions &dims,
+ const std::vector<vespalib::string> &
+ removeDimensions)
+ : SparseTensorAddressBuilder(),
+ _ops()
+{
+ TensorDimensionsSet removeSet(removeDimensions.cbegin(),
+ removeDimensions.cend());
+ _ops.reserve(dims.size());
+ for (auto &dim : dims) {
+ if (removeSet.find(dim) != removeSet.end()) {
+ _ops.push_back(AddressOp::REMOVE);
+ } else {
+ _ops.push_back(AddressOp::COPY);
+ }
+ }
+}
+
+TensorDimensions
+TensorAddressReducer::remainingDimensions(const TensorDimensions &dimensions,
+ const std::vector<vespalib::string> &
+ removeDimensions)
+{
+ TensorDimensionsSet removeSet(removeDimensions.cbegin(),
+ removeDimensions.cend());
+ TensorDimensions result;
+ result.reserve(dimensions.size());
+ for (auto &dim : dimensions) {
+ if (removeSet.find(dim) == removeSet.end()) {
+ result.push_back(dim);
+ }
+ }
+ return std::move(result);
+}
+
+TensorAddressReducer::~TensorAddressReducer()
+{
+}
+
+} // namespace vespalib::tensor::sparse
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.h
new file mode 100644
index 00000000000..775607ca059
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_reducer.h
@@ -0,0 +1,58 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "sparse_tensor_address_builder.h"
+#include <vespa/vespalib/tensor/types.h>
+#include "sparse_tensor_address_decoder.h"
+
+namespace vespalib {
+namespace tensor {
+namespace sparse {
+
+/**
+ * Reduce sparse tensor address by removing one or more dimensions.
+ */
+class TensorAddressReducer : public SparseTensorAddressBuilder
+{
+ enum AddressOp
+ {
+ REMOVE,
+ COPY
+ };
+
+ using AddressOps = std::vector<AddressOp>;
+
+ AddressOps _ops;
+
+public:
+ TensorAddressReducer(const TensorDimensions &dims,
+ const std::vector<vespalib::string> &removeDimensions);
+
+ ~TensorAddressReducer();
+
+ static TensorDimensions
+ remainingDimensions(const TensorDimensions &dimensions,
+ const std::vector<vespalib::string> &removeDimensions);
+
+ void reduce(SparseTensorAddressRef ref)
+ {
+ clear();
+ SparseTensorAddressDecoder decoder(ref);
+ for (auto op : _ops) {
+ switch (op) {
+ case AddressOp::REMOVE:
+ decoder.skipLabel();
+ break;
+ case AddressOp::COPY:
+ add(decoder.decodeLabel());
+ }
+ }
+ assert(!decoder.valid());
+ }
+};
+
+
+} // namespace vespalib::tensor::sparse
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_ref.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_ref.h
index fa49e2fd39c..4358ce501a2 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_address_ref.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_address_ref.h
@@ -16,24 +16,24 @@ namespace tensor {
/**
* A reference to a compact sparse immutable address to a tensor cell.
*/
-class CompactTensorAddressRef
+class SparseTensorAddressRef
{
const void *_start;
size_t _size;
size_t _hash;
public:
- CompactTensorAddressRef()
+ SparseTensorAddressRef()
: _start(nullptr), _size(0u), _hash(0u)
{
}
- CompactTensorAddressRef(const void *start_in, size_t size_in)
+ SparseTensorAddressRef(const void *start_in, size_t size_in)
: _start(start_in), _size(size_in),
_hash(calcHash())
{
}
- CompactTensorAddressRef(const CompactTensorAddressRef rhs, Stash &stash)
+ SparseTensorAddressRef(const SparseTensorAddressRef rhs, Stash &stash)
: _start(nullptr),
_size(rhs._size),
_hash(rhs._hash)
@@ -47,7 +47,7 @@ public:
size_t calcHash() const { return hashValue(_start, _size); }
- bool operator<(const CompactTensorAddressRef &rhs) const {
+ bool operator<(const SparseTensorAddressRef &rhs) const {
size_t minSize = std::min(_size, rhs._size);
int res = memcmp(_start, rhs._start, minSize);
if (res != 0) {
@@ -56,7 +56,7 @@ public:
return _size < rhs._size;
}
- bool operator==(const CompactTensorAddressRef &rhs) const
+ bool operator==(const SparseTensorAddressRef &rhs) const
{
if (_size != rhs._size || _hash != rhs._hash) {
return false;
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.h
new file mode 100644
index 00000000000..e0a8b2cee5b
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.h
@@ -0,0 +1,23 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+namespace vespalib {
+namespace tensor {
+class Tensor;
+class SparseTensor;
+namespace sparse {
+
+/**
+ * Create new tensor using all combinations of input tensor cells with matching
+ * labels for common dimensions, using func to calculate new cell value
+ * based on the cell values in the input tensors.
+ */
+template <typename Function>
+std::unique_ptr<Tensor>
+apply(const SparseTensor &lhs, const SparseTensor &rhs, Function &&func);
+
+
+} // namespace vespalib::tensor::sparse
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.hpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.hpp
new file mode 100644
index 00000000000..6c055d8547b
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_apply.hpp
@@ -0,0 +1,35 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "sparse_tensor_apply.h"
+#include "sparse_tensor_address_combiner.h"
+#include <vespa/vespalib/tensor/direct_tensor_builder.h>
+#include "direct_sparse_tensor_builder.h"
+
+namespace vespalib {
+namespace tensor {
+namespace sparse {
+
+template <typename Function>
+std::unique_ptr<Tensor>
+apply(const SparseTensor &lhs, const SparseTensor &rhs, Function &&func)
+{
+ DirectTensorBuilder<SparseTensor> builder(lhs.combineDimensionsWith(rhs));
+ TensorAddressCombiner addressCombiner(lhs.dimensions(), rhs.dimensions());
+ for (const auto &lhsCell : lhs.cells()) {
+ for (const auto &rhsCell : rhs.cells()) {
+ bool combineSuccess = addressCombiner.combine(lhsCell.first,
+ rhsCell.first);
+ if (combineSuccess) {
+ builder.insertCell(addressCombiner.getAddressRef(),
+ func(lhsCell.second, rhsCell.second));
+ }
+ }
+ }
+ return builder.build();
+}
+
+} // namespace vespalib::tensor::sparse
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.cpp
index a16774707b4..bb00d9b2e19 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.cpp
@@ -65,9 +65,9 @@ SparseTensorBuilder::add_cell(double value)
makeSortedDimensions();
}
_addressBuilder.buildTo(_normalizedAddressBuilder, _sortedDimensions);
- CompactTensorAddressRef taddress(_normalizedAddressBuilder.getAddressRef());
+ SparseTensorAddressRef taddress(_normalizedAddressBuilder.getAddressRef());
// Make a persistent copy of sparse tensor address owned by _stash
- CompactTensorAddressRef address(taddress, _stash);
+ SparseTensorAddressRef address(taddress, _stash);
_cells[address] = value;
_addressBuilder.clear();
_normalizedAddressBuilder.clear();
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.h
index c7a7e8a5a9e..be0791a59c1 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_builder.h
@@ -4,7 +4,7 @@
#include "sparse_tensor.h"
#include "sparse_tensor_address_builder.h"
-#include "compact_tensor_unsorted_address_builder.h"
+#include "sparse_tensor_unsorted_address_builder.h"
#include <vespa/vespalib/tensor/tensor_builder.h>
#include <vespa/vespalib/tensor/tensor_address.h>
#include <vespa/vespalib/stllike/hash_map.h>
@@ -18,7 +18,7 @@ namespace tensor {
*/
class SparseTensorBuilder : public TensorBuilder
{
- CompactTensorUnsortedAddressBuilder _addressBuilder; // unsorted dimensions
+ SparseTensorUnsortedAddressBuilder _addressBuilder; // unsorted dimensions
SparseTensorAddressBuilder _normalizedAddressBuilder; // sorted dimensions
SparseTensor::Cells _cells;
Stash _stash;
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.cpp
deleted file mode 100644
index 54c8d9b175a..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.cpp
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/fastos/fastos.h>
-#include "sparse_tensor_dimension_sum.h"
-#include "sparse_tensor_address_decoder.h"
-
-namespace vespalib {
-namespace tensor {
-
-namespace {
-
-enum class AddressOp
-{
- REMOVE,
- COPY
-};
-
-using ReduceOps = std::vector<AddressOp>;
-
-
-ReduceOps
-buildReduceOps(const TensorDimensions &dims,
- const vespalib::stringref &dimension)
-{
- ReduceOps ops;
- for (auto &dim : dims) {
- if (dim == dimension) {
- ops.push_back(AddressOp::REMOVE);
- } else {
- ops.push_back(AddressOp::COPY);
- }
- }
- return ops;
-}
-
-
-void
-reduceAddress(SparseTensorAddressBuilder &builder,
- CompactTensorAddressRef ref,
- const ReduceOps &ops)
-{
- builder.clear();
- SparseTensorAddressDecoder addr(ref);
- for (auto op : ops) {
- switch (op) {
- case AddressOp::REMOVE:
- addr.skipLabel();
- break;
- case AddressOp::COPY:
- builder.add(addr.decodeLabel());
- break;
- }
- }
- assert(!addr.valid());
-}
-
-TensorDimensions
-removeDimension(const TensorDimensions &dimensions,
- const vespalib::string &dimension)
-{
- TensorDimensions result = dimensions;
- auto itr = std::lower_bound(result.begin(), result.end(), dimension);
- if (itr != result.end() && *itr == dimension) {
- result.erase(itr);
- }
- return result;
-}
-
-}
-
-SparseTensorDimensionSum::SparseTensorDimensionSum(const TensorImplType &
- tensor,
- const
- vespalib::string &
- dimension)
- : Parent(removeDimension(tensor.dimensions(), dimension))
-{
- ReduceOps ops(buildReduceOps(tensor.dimensions(), dimension));
- AddressBuilderType reducedAddress;
- for (const auto &cell : tensor.cells()) {
- reduceAddress(reducedAddress, cell.first, ops);
- _builder.insertCell(reducedAddress, cell.second,
- [](double cellValue, double rhsValue) { return cellValue + rhsValue; });
- }
-}
-
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.h
deleted file mode 100644
index f88239834e9..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_dimension_sum.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/vespalib/tensor/tensor_operation.h>
-
-namespace vespalib {
-namespace tensor {
-
-/**
- * Returns a tensor with the given dimension removed and the cell values in that dimension summed.
- */
-class SparseTensorDimensionSum : public TensorOperation<SparseTensor>
-{
-public:
- using TensorImplType = SparseTensor;
- using Parent = TensorOperation<SparseTensor>;
- using AddressBuilderType = typename Parent::AddressBuilderType;
- using AddressType = typename Parent::AddressType;
- using Parent::_builder;
- SparseTensorDimensionSum(const TensorImplType &tensor,
- const vespalib::string &dimension);
-};
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_match.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_match.cpp
index 27cede44ff4..35da291bbee 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_match.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_match.cpp
@@ -45,7 +45,7 @@ buildTransformOps(std::vector<AddressOp> &ops,
bool
transformAddress(SparseTensorAddressBuilder &builder,
- CompactTensorAddressRef ref,
+ SparseTensorAddressRef ref,
const std::vector<AddressOp> &ops)
{
builder.clear();
@@ -99,7 +99,7 @@ SparseTensorMatch::slowMatch(const TensorImplType &lhs,
if (!transformAddress(addressBuilder, lhsCell.first, ops)) {
continue;
}
- CompactTensorAddressRef ref(addressBuilder.getAddressRef());
+ SparseTensorAddressRef ref(addressBuilder.getAddressRef());
auto rhsItr = rhs.cells().find(ref);
if (rhsItr != rhs.cells().end()) {
addressPadder.padAddress(lhsCell.first);
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.cpp
deleted file mode 100644
index 1a276ad55dd..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.cpp
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#include <vespa/fastos/fastos.h>
-#include "sparse_tensor_product.h"
-#include "sparse_tensor_address_decoder.h"
-#include <type_traits>
-
-namespace vespalib {
-namespace tensor {
-
-namespace {
-
-enum class AddressOp
-{
- LHS,
- RHS,
- BOTH
-};
-
-using CombineOps = std::vector<AddressOp>;
-
-CombineOps
-buildCombineOps(const TensorDimensions &lhs,
- const TensorDimensions &rhs)
-{
- CombineOps ops;
- auto rhsItr = rhs.cbegin();
- auto rhsItrEnd = rhs.cend();
- for (auto &lhsDim : lhs) {
- while (rhsItr != rhsItrEnd && *rhsItr < lhsDim) {
- ops.push_back(AddressOp::RHS);
- ++rhsItr;
- }
- if (rhsItr != rhsItrEnd && *rhsItr == lhsDim) {
- ops.push_back(AddressOp::BOTH);
- ++rhsItr;
- } else {
- ops.push_back(AddressOp::LHS);
- }
- }
- while (rhsItr != rhsItrEnd) {
- ops.push_back(AddressOp::RHS);
- ++rhsItr;
- }
- return ops;
-}
-
-
-bool
-combineAddresses(SparseTensorAddressBuilder &builder,
- CompactTensorAddressRef lhsRef,
- CompactTensorAddressRef rhsRef,
- const CombineOps &ops)
-{
- builder.clear();
- SparseTensorAddressDecoder lhs(lhsRef);
- SparseTensorAddressDecoder rhs(rhsRef);
- for (auto op : ops) {
- switch (op) {
- case AddressOp::LHS:
- builder.add(lhs.decodeLabel());
- break;
- case AddressOp::RHS:
- builder.add(rhs.decodeLabel());
- break;
- case AddressOp::BOTH:
- auto lhsLabel(lhs.decodeLabel());
- auto rhsLabel(rhs.decodeLabel());
- if (lhsLabel != rhsLabel) {
- return false;
- }
- builder.add(lhsLabel);
- }
- }
- assert(!lhs.valid());
- assert(!rhs.valid());
- return true;
-}
-
-}
-
-
-void
-SparseTensorProduct::bruteForceProduct(const TensorImplType &lhs,
- const TensorImplType &rhs)
-{
- CombineOps ops(buildCombineOps(lhs.dimensions(), rhs.dimensions()));
- SparseTensorAddressBuilder addressBuilder;
- for (const auto &lhsCell : lhs.cells()) {
- for (const auto &rhsCell : rhs.cells()) {
- bool combineSuccess = combineAddresses(addressBuilder,
- lhsCell.first, rhsCell.first,
- ops);
- if (combineSuccess) {
- _builder.insertCell(addressBuilder.getAddressRef(),
- lhsCell.second * rhsCell.second);
- }
- }
- }
-}
-
-
-void
-SparseTensorProduct::fastProduct(const TensorImplType &lhs,
- const TensorImplType &rhs)
-{
- const typename TensorImplType::Cells &rhsCells = rhs.cells();
- for (const auto &lhsCell : lhs.cells()) {
- auto itr = rhsCells.find(lhsCell.first);
- if (itr != rhsCells.end()) {
- _builder.insertCell(lhsCell.first,
- lhsCell.second * itr->second);
- }
- }
-}
-
-
-SparseTensorProduct::SparseTensorProduct(const TensorImplType &lhs,
- const TensorImplType &rhs)
- : Parent(lhs.combineDimensionsWith(rhs))
-{
-#if 0
- /* Commented ut for now since we want to see brute force performance. */
- // All dimensions are common
- if (lhs.dimensions().size() == rhs.dimensions().size() &&
- lhs.dimensions().size() == _builder.dimensions().size()) {
- fastProduct(lhs, rhs);
- return;
- }
- // TODO: Handle zero cells or zero dimensions cases
- // No dimensions are common
- if (lhs.dimensions().size() + rhs.dimensions().size() ==
- _builder.dimensions().size()) {
- bruteForceNoCommonDimensionProduct(lhs, rhs);
- return;
- }
- // lhs dimensions equals common dimensions
- if (rhs.dimensions().size() == _builder.dimensions().size()) {
- }
- // rhs dimensions equals common dimensions
- if (lhs.dimensions().size() == _builder.dimensions().size()) {
- }
-#endif
- bruteForceProduct(lhs, rhs);
-}
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.h
deleted file mode 100644
index 6aa84e83541..00000000000
--- a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_product.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
-
-#pragma once
-
-#include <vespa/vespalib/tensor/tensor_operation.h>
-
-namespace vespalib {
-namespace tensor {
-
-/**
- * Returns the sparse tensor product of the two given tensors.
- * This is all combinations of all cells in the first tensor with all cells of
- * the second tensor, except the combinations which would have multiple labels
- * for the same dimension due to shared dimensions between the two tensors.
- *
- * If there are no overlapping dimensions this is the regular tensor product.
- * If the two tensors have exactly the same dimensions this is the Hadamard product.
- *
- * The sparse tensor is associative and commutative. Its dimensions are the
- * set of the dimensions of the two input tensors.
- */
-class SparseTensorProduct : public TensorOperation<SparseTensor>
-{
-public:
- using TensorImplType = SparseTensor;
- using Parent = TensorOperation<SparseTensor>;
- using Dimensions = typename Parent::Dimensions;
- using AddressBuilderType = typename Parent::AddressBuilderType;
- using AddressRefType = typename Parent::AddressRefType;
- using AddressType = typename Parent::AddressType;
- using Parent::_builder;
-
-private:
- void
- bruteForceProduct(const TensorImplType &lhs, const TensorImplType &rhs);
-
- void
- fastProduct(const TensorImplType &lhs, const TensorImplType &rhs);
-
-public:
- SparseTensorProduct(const TensorImplType &lhs,
- const TensorImplType &rhs);
-};
-
-
-} // namespace vespalib::tensor
-} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_reduce.hpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_reduce.hpp
new file mode 100644
index 00000000000..06c5deade5e
--- /dev/null
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_reduce.hpp
@@ -0,0 +1,29 @@
+// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
+
+#pragma once
+
+#include "sparse_tensor_address_reducer.h"
+#include <vespa/vespalib/tensor/direct_tensor_builder.h>
+#include "direct_sparse_tensor_builder.h"
+
+namespace vespalib {
+namespace tensor {
+namespace sparse {
+
+template <typename Function>
+std::unique_ptr<Tensor>
+reduce(const SparseTensor &tensor,
+ const std::vector<vespalib::string> &dimensions, Function &&func)
+{
+ DirectTensorBuilder<SparseTensor> builder(TensorAddressReducer::remainingDimensions(tensor.dimensions(), dimensions));
+ TensorAddressReducer addressReducer(tensor.dimensions(), dimensions);
+ for (const auto &cell : tensor.cells()) {
+ addressReducer.reduce(cell.first);
+ builder.insertCell(addressReducer.getAddressRef(), cell.second, func);
+ }
+ return builder.build();
+}
+
+} // namespace vespalib::tensor::sparse
+} // namespace vespalib::tensor
+} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_unsorted_address_builder.cpp b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.cpp
index 1496ed0e5d5..57db0902396 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_unsorted_address_builder.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.cpp
@@ -1,15 +1,14 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#include <vespa/fastos/fastos.h>
-#include "compact_tensor_unsorted_address_builder.h"
-#include "compact_tensor_address_builder.h"
+#include "sparse_tensor_unsorted_address_builder.h"
#include "sparse_tensor_address_builder.h"
#include <algorithm>
namespace vespalib {
namespace tensor {
-CompactTensorUnsortedAddressBuilder::CompactTensorUnsortedAddressBuilder()
+SparseTensorUnsortedAddressBuilder::SparseTensorUnsortedAddressBuilder()
: _elementStrings(),
_elements()
{
@@ -17,21 +16,7 @@ CompactTensorUnsortedAddressBuilder::CompactTensorUnsortedAddressBuilder()
void
-CompactTensorUnsortedAddressBuilder::buildTo(CompactTensorAddressBuilder &
- builder)
-{
- const char *base = &_elementStrings[0];
- std::sort(_elements.begin(), _elements.end(),
- [=](const ElementRef &lhs, const ElementRef &rhs)
- { return lhs.getDimension(base) < rhs.getDimension(base); });
- // build normalized address with sorted dimensions
- for (const auto &element : _elements) {
- builder.add(element.getDimension(base), element.getLabel(base));
- }
-}
-
-void
-CompactTensorUnsortedAddressBuilder::buildTo(SparseTensorAddressBuilder &
+SparseTensorUnsortedAddressBuilder::buildTo(SparseTensorAddressBuilder &
builder,
const TensorDimensions &
dimensions)
diff --git a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_unsorted_address_builder.h b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.h
index 1ee7ccf0b60..914f7d6ce2f 100644
--- a/vespalib/src/vespa/vespalib/tensor/sparse/compact_tensor_unsorted_address_builder.h
+++ b/vespalib/src/vespa/vespalib/tensor/sparse/sparse_tensor_unsorted_address_builder.h
@@ -9,14 +9,13 @@
namespace vespalib {
namespace tensor {
-class CompactTensorAddressBuilder;
class SparseTensorAddressBuilder;
/**
* A builder that buffers up a tensor address with unsorted
* dimensions.
*/
-class CompactTensorUnsortedAddressBuilder
+class SparseTensorUnsortedAddressBuilder
{
struct ElementStringRef
{
@@ -62,7 +61,7 @@ class CompactTensorUnsortedAddressBuilder
}
public:
- CompactTensorUnsortedAddressBuilder();
+ SparseTensorUnsortedAddressBuilder();
bool empty() const { return _elementStrings.empty(); }
void add(vespalib::stringref dimension, vespalib::stringref label)
{
@@ -72,7 +71,6 @@ public:
* Sort the stored tensor address and pass it over to a strict
* tensor address builder in sorted order.
*/
- void buildTo(CompactTensorAddressBuilder &builder);
void buildTo(SparseTensorAddressBuilder &builder,
const TensorDimensions &dimensions);
void clear() { _elementStrings.clear(); _elements.clear(); }
diff --git a/vespalib/src/vespa/vespalib/tensor/tensor.h b/vespalib/src/vespa/vespalib/tensor/tensor.h
index 4128a27d9a7..9e4f4a9bff0 100644
--- a/vespalib/src/vespa/vespalib/tensor/tensor.h
+++ b/vespalib/src/vespa/vespalib/tensor/tensor.h
@@ -6,9 +6,11 @@
#include "tensor_address.h"
#include <vespa/vespalib/stllike/string.h>
#include <vespa/vespalib/eval/tensor.h>
+#include <vespa/vespalib/eval/tensor_spec.h>
#include <vespa/vespalib/eval/value_type.h>
namespace vespalib {
+namespace eval { class BinaryOperation; }
namespace tensor {
class TensorVisitor;
@@ -37,10 +39,16 @@ struct Tensor : public eval::Tensor
virtual Tensor::UP match(const Tensor &arg) const = 0;
virtual Tensor::UP apply(const CellFunction &func) const = 0;
virtual Tensor::UP sum(const vespalib::string &dimension) const = 0;
+ virtual Tensor::UP apply(const eval::BinaryOperation &op,
+ const Tensor &arg) const = 0;
+ virtual Tensor::UP reduce(const eval::BinaryOperation &op,
+ const std::vector<vespalib::string> &dimensions)
+ const = 0;
virtual bool equals(const Tensor &arg) const = 0;
virtual void print(std::ostream &out) const = 0;
virtual vespalib::string toString() const = 0;
virtual Tensor::UP clone() const = 0;
+ virtual eval::TensorSpec toSpec() const = 0;
virtual void accept(TensorVisitor &visitor) const = 0;
};
diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_address_element_iterator.h b/vespalib/src/vespa/vespalib/tensor/tensor_address_element_iterator.h
index 3a260c7c693..a250331de5f 100644
--- a/vespalib/src/vespa/vespalib/tensor/tensor_address_element_iterator.h
+++ b/vespalib/src/vespa/vespalib/tensor/tensor_address_element_iterator.h
@@ -25,68 +25,7 @@ public:
bool valid() const { return (_itr != _itrEnd); }
vespalib::stringref dimension() const { return _itr->dimension(); }
vespalib::stringref label() const { return _itr->label(); }
- template <class Iterator>
- bool beforeDimension(const Iterator &rhs) const {
- if (!valid()) {
- return false;
- }
- if (!rhs.valid()) {
- return true;
- }
- return (_itr->dimension() < rhs.dimension());
- }
- bool atDimension(vespalib::stringref rhsDimension) const
- {
- return (valid() && (_itr->dimension() == rhsDimension));
- }
void next() { ++_itr; }
- template <class AddressBuilder>
- void
- addElement(AddressBuilder &builder) {
- builder.add(_itr->dimension(), _itr->label());
- }
- template <class AddressBuilder, class Iterator>
- void addElements(AddressBuilder &builder, const Iterator &limit)
- {
- while (beforeDimension(limit)) {
- addElement(builder);
- next();
- }
- }
- template <class AddressBuilder, class Iterator>
- bool addElements(AddressBuilder &builder, const DimensionsSet &dims,
- const Iterator &limit)
- {
- do {
- if (dims.find(_itr->dimension()) != dims.end()) {
- return false;
- }
- addElement(builder);
- next();
- } while (beforeDimension(limit));
- return true;
- }
- template <class AddressBuilder>
- void addElements(AddressBuilder &builder)
- {
- while (valid()) {
- addElement(builder);
- next();
- }
- }
- template <class AddressBuilder>
- bool addElements(AddressBuilder &builder, const DimensionsSet &dims)
- {
- while (valid()) {
- if (dims.find(_itr->dimension()) != dims.end()) {
- return false;
- }
- addElement(builder);
- next();
- }
- return true;
- }
-
bool skipToDimension(vespalib::stringref rhsDimension) {
for (;;) {
if (!valid()) {
@@ -101,118 +40,5 @@ public:
}
};
-
-/**
- * An iterator for tensor address elements used to simplify 3-way merge
- * between two tensor addresses and a dimension vector.
- * This is a specialization to perform decoding on the fly while iterating.
- */
-template <>
-class TensorAddressElementIterator<CompactTensorAddressRef> {
- const char *_itr;
- const char *_itrEnd;
- vespalib::stringref _dimension;
- vespalib::stringref _label;
-
- size_t
- simple_strlen(const char *str) {
- const char *strend = str;
- for (; *strend != '\0'; ++strend) {
- }
- return (strend - str);
- }
-
- void decodeElement()
- {
- _dimension = vespalib::stringref(_itr, simple_strlen(_itr));
- const char *labelp = _dimension.c_str() + _dimension.size() + 1;
- _label = vespalib::stringref(labelp, simple_strlen(labelp));
- _itr = _label.c_str() + _label.size() + 1;
- }
-public:
- TensorAddressElementIterator(CompactTensorAddressRef address)
- : _itr(static_cast<const char *>(address.start())),
- _itrEnd(_itr + address.size()),
- _dimension(),
- _label()
- {
- if (_itr != _itrEnd) {
- decodeElement();
- }
- }
- bool valid() const { return (_dimension.size() != 0u); }
- vespalib::stringref dimension() const { return _dimension; }
- vespalib::stringref label() const { return _label; }
- template <class Iterator>
- bool beforeDimension(const Iterator &rhs) const {
- if (!valid()) {
- return false;
- }
- if (!rhs.valid()) {
- return true;
- }
- return (_dimension < rhs.dimension());
- }
- bool atDimension(vespalib::stringref rhsDimension) const
- {
- return (_dimension == rhsDimension);
- }
- void next() {
- if (_itr != _itrEnd) {
- decodeElement();
- } else {
- _dimension = vespalib::stringref();
- _label = vespalib::stringref();
- }
- }
- template <class AddressBuilder>
- void
- addElement(AddressBuilder &builder) {
- builder.add(_dimension, _label);
- }
- template <class AddressBuilder, class Iterator>
- void addElements(AddressBuilder &builder, const Iterator &limit)
- {
- while (beforeDimension(limit)) {
- addElement(builder);
- next();
- }
- }
- template <class AddressBuilder, class Iterator>
- bool addElements(AddressBuilder &builder, const DimensionsSet &dims,
- const Iterator &limit)
- {
- do {
- if (dims.find(_dimension) != dims.end()) {
- return false;
- }
- addElement(builder);
- next();
- } while (beforeDimension(limit));
- return true;
- }
- template <class AddressBuilder>
- void addElements(AddressBuilder &builder)
- {
- while (valid()) {
- addElement(builder);
- next();
- }
- }
- template <class AddressBuilder>
- bool addElements(AddressBuilder &builder, const DimensionsSet &dims)
- {
- while (valid()) {
- if (dims.find(_dimension) != dims.end()) {
- return false;
- }
- addElement(builder);
- next();
- }
- return true;
- }
-};
-
-
} // namespace vespalib::tensor
} // namespace vespalib
diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp b/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp
index 460cb8f8bb4..a527627d786 100644
--- a/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp
+++ b/vespalib/src/vespa/vespalib/tensor/tensor_mapper.cpp
@@ -98,22 +98,6 @@ mapAddress(const TensorAddress &address)
template <class TensorT>
void
-SparseTensorMapper<TensorT>::mapAddress(const TensorAddress &address)
-{
- _addressBuilder.clear();
- TensorAddressElementIterator<TensorAddress> addressIterator(address);
- for (const auto &dimension : _builder.dimensions()) {
- if (addressIterator.skipToDimension(dimension)) {
- _addressBuilder.add(dimension, addressIterator.label());
- addressIterator.next();
- } else {
- // output dimension not in input
- }
- }
-}
-
-template <class TensorT>
-void
SparseTensorMapper<TensorT>::visit(const TensorAddress &address, double value)
{
mapAddress(address);
diff --git a/vespalib/src/vespa/vespalib/tensor/tensor_operation.h b/vespalib/src/vespa/vespalib/tensor/tensor_operation.h
index f74f7a7990d..350dfcc8abc 100644
--- a/vespalib/src/vespa/vespalib/tensor/tensor_operation.h
+++ b/vespalib/src/vespa/vespalib/tensor/tensor_operation.h
@@ -21,7 +21,6 @@ public:
using Cells = typename TensorImplType::Cells;
using AddressBuilderType = typename MyTensorBuilder::AddressBuilderType;
using AddressRefType = typename MyTensorBuilder::AddressRefType;
- using AddressType = typename MyTensorBuilder::AddressType;
protected:
MyTensorBuilder _builder;
Dimensions &_dimensions;
diff --git a/vespalib/src/vespa/vespalib/test/insertion_operators.h b/vespalib/src/vespa/vespalib/test/insertion_operators.h
index 8ed52062281..ac4fa3541e3 100644
--- a/vespalib/src/vespa/vespalib/test/insertion_operators.h
+++ b/vespalib/src/vespa/vespalib/test/insertion_operators.h
@@ -1,6 +1,7 @@
// Copyright 2016 Yahoo Inc. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#pragma once
+#include <map>
#include <ostream>
#include <set>
#include <vector>
@@ -41,5 +42,22 @@ operator<<(std::ostream &os, const std::vector<T> &set)
return os;
}
+template <typename K, typename V>
+std::ostream &
+operator<<(std::ostream &os, const std::map<K, V> &map)
+{
+ os << "{";
+ bool first = true;
+ for (const auto &entry : map) {
+ if (!first) {
+ os << ",";
+ }
+ os << "{" << entry.first << "," << entry.second << "}";
+ first = false;
+ }
+ os << "}";
+ return os;
+}
+
} // namespace std
diff --git a/vsm/src/tests/searcher/searcher.cpp b/vsm/src/tests/searcher/searcher.cpp
index dbf458a0c32..dfcad223701 100644
--- a/vsm/src/tests/searcher/searcher.cpp
+++ b/vsm/src/tests/searcher/searcher.cpp
@@ -545,10 +545,18 @@ SearcherTest::testUTF8ExactStringFieldSearcher()
{
UTF8ExactStringFieldSearcher fs(0);
// regular
- assertString(fs, "vespa", "vespa", Hits().add(0));
- assertString(fs, "vespa", "vespa vespa", Hits());
- assertString(fs, "vesp", "vespa", Hits());
- assertString(fs, "vesp*", "vespa", Hits().add(0));
+ TEST_DO(assertString(fs, "vespa", "vespa", Hits().add(0)));
+ TEST_DO(assertString(fs, "vespar", "vespa", Hits()));
+ TEST_DO(assertString(fs, "vespa", "vespar", Hits()));
+ TEST_DO(assertString(fs, "vespa", "vespa vespa", Hits()));
+ TEST_DO(assertString(fs, "vesp", "vespa", Hits()));
+ TEST_DO(assertString(fs, "vesp*", "vespa", Hits().add(0)));
+ TEST_DO(assertString(fs, "hutte", "hutte", Hits().add(0)));
+ TEST_DO(assertString(fs, "hütte", "hütte", Hits().add(0)));
+ TEST_DO(assertString(fs, "hutte", "hütte", Hits()));
+ TEST_DO(assertString(fs, "hütte", "hutte", Hits()));
+ TEST_DO(assertString(fs, "hütter", "hütte", Hits()));
+ TEST_DO(assertString(fs, "hütte", "hütter", Hits()));
}
void
diff --git a/vsm/src/vespa/vsm/searcher/utf8stringfieldsearcherbase.cpp b/vsm/src/vespa/vsm/searcher/utf8stringfieldsearcherbase.cpp
index 3cba7587598..a19829773b1 100644
--- a/vsm/src/vespa/vsm/searcher/utf8stringfieldsearcherbase.cpp
+++ b/vsm/src/vespa/vsm/searcher/utf8stringfieldsearcherbase.cpp
@@ -139,7 +139,7 @@ UTF8StringFieldSearcherBase::matchTermExact(const FieldRef & f, QueryTerm & qt)
termsize_t tsz = qt.term(term);
const cmptype_t * eterm = term+tsz;
const byte * e = n + f.size();
- if ((tsz == f.size()) || ((tsz < f.size()) && qt.isPrefix())) {
+ if (tsz <= f.size()) {
bool equal(true);
for (; equal && (n < e) && (term < eterm); term++) {
if (*term < 0x80) {
@@ -149,7 +149,7 @@ UTF8StringFieldSearcherBase::matchTermExact(const FieldRef & f, QueryTerm & qt)
equal = (*term == c);
}
}
- if (equal && (qt.isPrefix() || (n == e))) {
+ if (equal && (term == eterm) && (qt.isPrefix() || (n == e))) {
addHit(qt,0);
}
}